diff --git "a/3140.jsonl" "b/3140.jsonl" new file mode 100644--- /dev/null +++ "b/3140.jsonl" @@ -0,0 +1,1678 @@ +{"seq_id":"27251345932","text":"import numpy as np\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import coo_matrix\nfrom numpy.linalg import inv\nimport argparse\n\nimport utils\nimport inverse\n\n\ndef gen(b, p, k, q, nonzerosRatiosA, nonzerosRatiosB):\n\n As = []\n Bs = []\n Cs = []\n for i in range(b):\n A = coo_matrix(utils.genRandomFloatSparseMatrix(p, k, nonzerosRatiosA[i]))\n B = coo_matrix(utils.genRandomFloatSparseMatrix(k, q, nonzerosRatiosB[i]))\n C = coo_matrix(A.dot(B))\n As.append(A)\n Bs.append(B)\n Cs.append(C)\n\n return As, Bs, Cs\n\nif __name__ == \"__main__\":\n parse = argparse.ArgumentParser(description='generate random sparse matrices, a batch of sparse matrices \\\n A[0..b-1], B[0..b-1] with given non-zero probability, \\\n then perform C[i]=A[i]/B[i] (A[i]: p by k, B[i]: k by q, C[i]: p by q) for i in [0..b-1]')\n parse.add_argument('--batchsize', '-b', type=int, default=2, help='the number of rows for the input matrix A')\n parse.add_argument('--rowsA', '-p', type=int, default=3, help='the number of rows for the input matrix A')\n parse.add_argument('--colsA', '-k', type=int, default=3, help='the number of columns for the input matrix A')\n parse.add_argument('--colsB', '-q', type=int, default=3, help='the number of columns for the input matrix B')\n parse.add_argument('--nonzerosRatios', '-z', nargs=\"+\", type=float, default=[0.2], help='the ratios \\\n of non-zeros for A[0..b-1], B[0..b-1]')\n parse.add_argument('--randomseed', '-s', type=int, default=0, help='the seed for numpy to generate random data')\n args = parse.parse_args()\n\n np.random.seed(args.randomseed)\n\n nonzerosRatiosA, nonzerosRatiosB = utils.processList(args.nonzerosRatios, args.batchsize)\n As, Bs, Cs = gen(args.batchsize, args.rowsA, args.colsA, args.colsB, nonzerosRatiosA, nonzerosRatiosB)\n\n print(\"----Kotlin format----\")\n utils.printCOO3DMatrixKotlin(As, 'input left', 't1')\n utils.printCOO3DMatrixKotlin(Bs, 'input right', 't2')\n utils.printCOO3DMatrixKotlin(Cs, 'expected output', 'outExp')\n print(\"----Kotlin format----\")\n","repo_name":"facebookresearch/diffkt","sub_path":"python/utilities/testCasesGen/matmul3d.py","file_name":"matmul3d.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"9"} +{"seq_id":"16341437616","text":"import torch\r\nfrom model.asr_model import MyASRModel\r\nfrom model.domain_model import DomainModel\r\nfrom model.ebranchformer.encoder import EBranchformerEncoder\r\nfrom model.moduel.domin_classification import DomainClassifier\r\nfrom model.moduel.fusion import Fusion\r\nfrom model.moe_asr_model import MoeAsrModel\r\nfrom model.moe_conformer.encoder import MoeConformerEncoder\r\nfrom model.moe_e_branchformer.encoder import MoeEBranchformerEncoder\r\n\r\nfrom wenet.branchformer.encoder import BranchformerEncoder\r\nfrom wenet.transformer.asr_model import ASRModel\r\nfrom wenet.transformer.cmvn import GlobalCMVN\r\nfrom wenet.transformer.ctc import CTC\r\nfrom wenet.transformer.decoder import TransformerDecoder\r\nfrom wenet.transformer.encoder import ConformerEncoder, TransformerEncoder\r\nfrom wenet.utils.cmvn import load_cmvn\r\n\r\n\r\ndef init_model(configs):\r\n if configs['cmvn_file'] is not None:\r\n mean, istd = load_cmvn(configs['cmvn_file'], configs['is_json_cmvn'])\r\n global_cmvn = GlobalCMVN(\r\n torch.from_numpy(mean).float(),\r\n torch.from_numpy(istd).float())\r\n else:\r\n global_cmvn = None\r\n\r\n input_dim = configs['input_dim']\r\n vocab_size = configs['output_dim']\r\n domain_num = configs[\"domain_num\"]\r\n\r\n domain_encoder_type = configs.get('domain_encoder', 'none')\r\n encoder_type = configs.get('encoder', 'conformer')\r\n decoder_type = configs.get('decoder', 'none')\r\n model_type = configs.get('model_type', 'my_model')\r\n\r\n if model_type == \"moe_model\":\r\n if domain_encoder_type == \"conformer\":\r\n domain_encoder = ConformerEncoder(input_dim,\r\n global_cmvn=None,\r\n **configs['domain_encoder_conf'])\r\n domain_classifier = DomainClassifier(domain_num, domain_encoder.output_size())\r\n else:\r\n domain_encoder = None\r\n domain_classifier=None\r\n \r\n if encoder_type == 'conformer':\r\n encoder = MoeConformerEncoder(input_dim,\r\n global_cmvn=global_cmvn,\r\n **configs['encoder_conf'])\r\n elif encoder_type == \"ebranchformer\":\r\n encoder = MoeEBranchformerEncoder(input_size=input_dim,\r\n global_cmvn=global_cmvn,\r\n **configs['encoder_conf'])\r\n else:\r\n raise ValueError(f\"encoder type was not support {encoder_type}\")\r\n \r\n ctc = CTC(vocab_size, encoder.output_size())\r\n \r\n if decoder_type == 'transformer':\r\n decoder = TransformerDecoder(vocab_size, encoder.output_size(),\r\n **configs['decoder_conf'])\r\n else:\r\n decoder = None\r\n\r\n # fusion = Fusion(domain_encoder.output_size(),encoder.output_size(),encoder.output_size(),fusion_type=\"concat\")\r\n fusion =None\r\n\r\n model = MoeAsrModel(vocab_size,\r\n encoder=encoder,\r\n decoder=decoder,\r\n ctc=ctc,\r\n domain_encoder=domain_encoder,\r\n domain_classifier=domain_classifier,\r\n fusion=fusion,\r\n **configs[\"model_conf\"])\r\n \r\n elif model_type == \"my_model\":\r\n if encoder_type == 'conformer':\r\n encoder = ConformerEncoder(input_dim,\r\n global_cmvn=global_cmvn,\r\n **configs['encoder_conf'])\r\n elif encoder_type == 'branchformer':\r\n encoder = BranchformerEncoder(input_dim,\r\n global_cmvn=global_cmvn,\r\n **configs['encoder_conf'])\r\n elif encoder_type == \"ebranchformer\":\r\n encoder = EBranchformerEncoder(input_size=input_dim,\r\n global_cmvn=global_cmvn,\r\n **configs['encoder_conf'])\r\n else:\r\n encoder = TransformerEncoder(input_dim,\r\n global_cmvn=global_cmvn,\r\n **configs['encoder_conf'])\r\n ctc = CTC(vocab_size, encoder.output_size())\r\n\r\n if decoder_type == 'transformer':\r\n decoder = TransformerDecoder(vocab_size, encoder.output_size(),\r\n **configs['decoder_conf'])\r\n else:\r\n decoder = None\r\n model = MyASRModel(vocab_size=vocab_size,\r\n encoder=encoder,\r\n decoder=decoder,\r\n ctc=ctc,\r\n **configs['model_conf'])\r\n elif model_type == \"domain_asr_model\":\r\n pass\r\n elif model_type == \"domain_model\":\r\n domain_encoder = ConformerEncoder(input_dim,\r\n global_cmvn=global_cmvn,\r\n **configs['domain_encoder_conf'])\r\n domain_classifier = DomainClassifier(domain_num, domain_encoder.output_size())\r\n\r\n model = DomainModel(domain_num=domain_num,\r\n domain_encoder=domain_encoder,\r\n domain_classifier=domain_classifier)\r\n else:\r\n raise ValueError(f\"model type was not support {model_type}\")\r\n return model\r\n","repo_name":"NefelibataJay/ournet","sub_path":"init_model.py","file_name":"init_model.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"42057447646","text":"\"\"\"it is the Celery section.\"\"\"\nfrom celery import Celery\n\n\nREDIS_URL = 'redis://localhost:6379/0'\n\napp = Celery('tasks', backend=REDIS_URL, broker=REDIS_URL)\n\n\n@app.task\ndef add(first_term: int, second_term: int) -> int:\n \"\"\"\n Task for adding two terms.\n\n Using Celery\n \"\"\"\n return first_term + second_term\n","repo_name":"comeillfoo/simple-backend","sub_path":"tasks/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"75127313572","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\n# pylint: disable=protected-access,too-many-public-methods\n\nfrom hamcrest import is_\nfrom hamcrest import all_of\nfrom hamcrest import is_not\nfrom hamcrest import has_entry\nfrom hamcrest import assert_that\ndoes_not = is_not\n\nimport unittest\n\nfrom nti.namedfile.constraints import FileConstraints\n\nfrom nti.namedfile.file import NamedBlobFile\n\nfrom nti.externalization.tests import externalizes\n\nfrom nti.namedfile.tests import SharedConfiguringTestLayer\n\n\nclass TestNamedFile(unittest.TestCase):\n\n layer = SharedConfiguringTestLayer\n\n def test_restrictions(self):\n named = NamedBlobFile(data=b'data',\n contentType=u'image/gif',\n filename=u'zpt.gif')\n internal = FileConstraints(named)\n internal.max_file_size = 1\n internal.allowed_extensions = (u'.doc',)\n internal.allowed_mime_types = (u'image/jpeg',)\n assert_that(internal.is_file_size_allowed(), is_(False))\n assert_that(internal.is_mime_type_allowed(), is_(False))\n assert_that(internal.is_filename_allowed(), is_(False))\n\n assert_that(internal,\n externalizes(\n all_of(has_entry('Class', 'FileConstraints'),\n has_entry('max_file_size', 1),\n has_entry('allowed_extensions', is_([u'.doc'])),\n has_entry('allowed_mime_types', is_(['image/jpeg'])))))\n\n named = NamedBlobFile(data=b'data',\n contentType=u'image/gif',\n filename=u'zpt.gif')\n\n internal = FileConstraints()\n assert_that(internal.is_mime_type_allowed(), is_(False))\n assert_that(internal.is_mime_type_allowed('*/*'), is_(False))\n \n internal.allowed_mime_types = ('image/*;',)\n assert_that(internal.is_mime_type_allowed('image/gif'), is_(True))\n \n internal.allowed_mime_types = ('*/*',)\n assert_that(internal.is_mime_type_allowed('image/gif'), is_(True))\n\n internal = FileConstraints(named)\n internal.allowed_extensions = (u'.GIF',)\n internal.allowed_mime_types = (u'image/gif',)\n assert_that(internal.is_file_size_allowed(), is_(True))\n assert_that(internal.is_mime_type_allowed(), is_(True))\n assert_that(internal.is_filename_allowed(), is_(True))\n\n internal.max_file_size = 10\n assert_that(internal.is_file_size_allowed(15), is_(False))\n","repo_name":"OpenNTI/nti.namedfile","sub_path":"src/nti/namedfile/tests/test_constraints.py","file_name":"test_constraints.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"13721756910","text":"\"\"\"show_crypto.py\n\nIOSXE parsers for the following show commands:\n * show crypto pki certificates \n\"\"\"\n\n# Python\nimport re\n\n# Metaparser\nfrom genie.metaparser import MetaParser\nfrom genie.metaparser.util.schemaengine import Schema, Any, Optional\n\n# Genie Libs\nfrom genie.libs.parser.utils.common import Common\n\n\n# =================================================\n# Schema for 'show crypto pki certificates '\n# =================================================\nclass ShowCryptoPkiCertificatesSchema(MetaParser):\n \"\"\"Schema for show crypto pki certificates \"\"\"\n schema = {\n 'trustpoints':\n {Any():\n {'associated_trustpoints':\n {Any():\n {'status': str,\n 'serial_number_in_hex': str,\n 'usage': str,\n Optional('storage'): str,\n 'issuer':\n {\n Optional('cn'): str,\n Optional('o'): str},\n 'subject':\n {Optional('name'): str,\n Optional('serial_number'): str,\n Optional('pid'): str,\n Optional('cn'): str,\n Optional('o'): str,\n },\n Optional('crl_distribution_points'): str,\n 'validity_date':\n {'start_date':str,\n 'end_date': str,\n },\n },\n },\n },\n },\n }\n\n\n# =================================================\n# Parser for 'show crypto pki certificates '\n# =================================================\nclass ShowCryptoPkiCertificates(ShowCryptoPkiCertificatesSchema):\n \"\"\"Parser for show crypto pki certificates \"\"\"\n\n cli_command = ['show crypto pki certificates {trustpoint_name}','show crypto pki certificates']\n\n def cli(self, trustpoint_name='',output=None):\n if output is None:\n if trustpoint_name:\n cmd = self.cli_command[0].format(trustpoint_name=trustpoint_name)\n else:\n cmd = self.cli_command[1]\n out = self.device.execute(cmd)\n else:\n out = output\n\n # initial return dictionary\n ret_dict = {}\n\n # initial regexp pattern\n # Certificate\n # CA Certificate\n p1 = re.compile(r'^((?PCertificate)|(?P(CA|Router Self-Signed) +Certificate))$')\n\n # Status: Available\n p2 = re.compile(r'^Status: +(?P\\w+)$')\n\n # Certificate Serial Number (hex): 793B572700000003750B\n # Certificate Serial Number: 0x15\n p3 = re.compile(r'^Certificate +Serial +Number( +\\(hex\\))?: +(?P\\w+)$')\n\n # Certificate Usage: General Purpose\n p4 = re.compile(r'^Certificate Usage: +(?P[\\w\\s]+)$')\n\n # Issuer:\n # Subject:\n # Validity Date:\n p5 = re.compile(r'^((?PIssuer)|(?PSubject)|(?PValidity +Date)):$')\n\n # cn=Cisco Manufacturing CA SHA2\n # CN = tpca-root\n p6 = re.compile(r'(?i)^cn *= *(?P[\\S\\s]+)$')\n\n # o=Cisco\n # O = Company\n p7 = re.compile(r'(?i)^o *= *(?P[\\w\\s]+)$')\n\n # Name: WS-C3850-24P-0057D21BC800\n p8 = re.compile(r'^Name: +(?P.*)$')\n\n # Serial Number: PID:WS-C3850-24P SN:FCW1947C0GF\n p9 = re.compile(r'^Serial +Number: *'\n 'PID: *(?P[\\w\\-]+) +'\n 'SN: *(?P[\\w\\-]+)$')\n\n # CRL Distribution Points: \n # http://www.cisco.com/security/pki/crl/cmca2.crl\n p10 = re.compile(r'(?P^http:[\\w\\/\\:\\.]+)$')\n\n # start date: 00:34:52 UTC Nov 20 2015\n # end date: 00:44:52 UTC Nov 20 2025\n p11 = re.compile(r'^((?Pstart +date)|(?Pend +date)): +(?P.*)$')\n\n # Associated Trustpoints: CISCO_IDEVID_SUDI\n # Associated Trustpoints: CISCO_IDEVID_SUDI Trustpool\n p12 = re.compile(r'^Associated +Trustpoints: +(?P[\\w\\-]+)( +Trustpool)?$')\n\n # Storage: nvram:IOS-Self-Sig#1.cer\n p13 = re.compile(r'^Storage: +(?P(\\S+))$')\n\n for line in out.splitlines():\n line = line.strip()\n \n # Certificate\n # CA Certificate\n m = p1.match(line)\n if m:\n if m.groupdict()['cer']:\n cer_type = 'certificate'\n else:\n cer_type = m.groupdict()['cer_name'].lower().replace(\" \", \"_\").replace(\"-\", \"_\")\n cer_dict = ret_dict.setdefault(cer_type, {})\n continue\n\n # Status: Available\n m = p2.match(line)\n if m:\n cer_dict['status'] = m.groupdict()['status']\n continue\n\n # Certificate Serial Number (hex): 793B572700000003750B\n # Certificate Serial Number: 0x15\n m = p3.match(line)\n if m:\n cer_dict['serial_number_in_hex'] = m.groupdict()['serial_number_in_hex']\n continue\n\n # Certificate Usage: General Purpose\n m = p4.match(line)\n if m:\n cer_dict['usage'] = m.groupdict()['usage']\n continue\n\n # Issuer:\n # Subject:\n # Validity Date:\n m = p5.match(line)\n if m:\n group = m.groupdict()\n if group.get('issuer', {}):\n sub_dict = cer_dict.setdefault('issuer', {})\n if group.get('subject', {}):\n sub_dict = cer_dict.setdefault('subject', {})\n if group.get('validity_date', {}):\n sub_dict = cer_dict.setdefault('validity_date', {})\n continue\n\n # cn=Cisco Manufacturing CA SHA2\n # CN = tpca-root\n m = p6.match(line)\n if m:\n sub_dict['cn'] = m.groupdict()['cn']\n continue\n \n # o=Cisco\n # O = Company\n m = p7.match(line)\n if m:\n sub_dict['o'] = m.groupdict()['o']\n continue\n\n # Name: WS-C3850-24P-0057D21BC800\n m = p8.match(line)\n if m:\n sub_dict['name'] = m.groupdict()['name']\n continue\n\n # Serial Number: PID:WS-C3850-24P SN:FCW1947C0GF\n m = p9.match(line)\n if m:\n sub_dict.update({k:v for k,v in m.groupdict().items()})\n continue\n \n # CRL Distribution Points: \n # http://www.cisco.com/security/pki/crl/cmca2.crl\n m = p10.match(line)\n if m:\n cer_dict['crl_distribution_points'] = m.groupdict()['crl_distribution_points']\n continue\n\n # start date: 00:34:52 UTC Nov 20 2015\n # end date: 00:44:52 UTC Nov 20 2025\n m = p11.match(line)\n if m:\n group = m.groupdict()\n sub_dict.setdefault('start_date', group['value']) if \\\n group.get('start_date', {}) else None\n sub_dict.setdefault('end_date', group['value']) if \\\n group.get('end_date', {}) else None\n continue\n\n # Storage: nvram:IOS-Self-Sig#1.cer\n m = p13.match(line)\n if m:\n cer_dict['storage'] = m.groupdict()['storage']\n continue\n\n # Associated Trustpoints: CISCO_IDEVID_SUDI\n # Associated Trustpoints: CISCO_IDEVID_SUDI Trustpool\n m = p12.match(line)\n if m:\n trustpoints = m.groupdict()['trustpoints'] \n continue\n try:\n return {'trustpoints': {trustpoints: {'associated_trustpoints': ret_dict}}}\n except Exception:\n return {}\n","repo_name":"saranraj-netcodes/ansible-course-2020","sub_path":"VENV/py3_venv/lib/python3.6/site-packages/genie/libs/parser/iosxe/show_crypto.py","file_name":"show_crypto.py","file_ext":"py","file_size_in_byte":8233,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"9"} +{"seq_id":"27254853692","text":"from abc import ABC, abstractmethod\nimport numpy as np\nimport math\nfrom .generators import all_operators, math_constants, Node, NodeList\n\nclass Encoder(ABC):\n \"\"\"\n Base class for encoders, encodes and decodes matrices\n abstract methods for encoding/decoding numbers\n \"\"\"\n def __init__(self, params):\n pass\n\n @abstractmethod\n def encode(self, val):\n pass\n\n @abstractmethod\n def decode(self, lst):\n pass\n\nclass IntegerSequences(Encoder):\n def __init__(self, params):\n super().__init__(params)\n self.int_base = params.int_base\n self.symbols = ['+', '-', ',']\n self.symbols.extend([str(i) for i in range(self.int_base)])\n\n def encode(self, value):\n seq = []\n for val in value:\n seq.append('+' if val >= 0 else '-')\n vseq = []\n w = abs(val)\n if w == 0:\n seq.append('0')\n else:\n while w > 0:\n vseq.append(str(w % self.int_base))\n w = w // self.int_base\n seq.extend(vseq[::-1])\n return seq\n\n def decode(self, lst):\n \n if len(lst) == 0:\n return None\n res = []\n if lst[0] in [\"+\", \"-\"]:\n curr_group = [lst[0]]\n else:\n return None\n if lst[-1] in [\"+\", \"-\"]:\n return None\n\n for x in lst[1:]:\n if x in [\"+\", \"-\"]:\n if len(curr_group)>1:\n sign = 1 if curr_group[0]==\"+\" else -1\n value = 0\n for elem in curr_group[1:]:\n value = value*self.int_base + int(elem)\n res.append(sign*value)\n curr_group = [x]\n else:\n return None\n else:\n curr_group.append(x)\n if len(curr_group)>1:\n sign = 1 if curr_group[0]==\"+\" else -1\n value = 0\n for elem in curr_group[1:]:\n value = value*self.int_base + int(elem)\n res.append(sign*value)\n return res\n \nclass FloatSequences(Encoder):\n def __init__(self, params):\n super().__init__(params)\n self.float_precision = params.float_precision\n self.mantissa_len = params.mantissa_len\n self.max_exponent = params.max_exponent\n self.base = (self.float_precision+1)//self.mantissa_len\n self.max_token = 10 ** self.base\n self.symbols = ['+','-']\n self.symbols.extend(['N' + f\"%0{self.base}d\" % i for i in range(self.max_token)])\n self.symbols.extend(['E' + str(i) for i in range(-self.max_exponent, self.max_exponent+1)])\n\n def encode(self, value):\n \"\"\"\n Write a float number\n \"\"\"\n seq = []\n precision = self.float_precision\n for val in value:\n assert val not in [-np.inf, np.inf]\n sign = '+' if val>=0 else '-'\n m, e = (f\"%.{precision}e\" % val).split(\"e\")\n i, f = m.lstrip(\"-\").split(\".\")\n i = i + f\n tokens = self.chunks(i, (precision+1)//self.mantissa_len)\n expon = int(e) - precision\n if expon < -self.max_exponent:\n tokens = ['0'*self.base]*self.mantissa_len\n expon = int(0)\n seq.extend([sign, *['N' + token for token in tokens], \"E\" + str(expon)])\n return seq\n \n def chunks(self, lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n\n def decode(self, lst):\n \"\"\"\n Parse a list that starts with a float.\n Return the float value, and the position it ends in the list.\n \"\"\"\n if len(lst)==0:\n return None\n seq = []\n for val in self.chunks(lst, 2+self.mantissa_len):\n for x in val: \n if x[0] not in ['-','+','E','N']: return np.nan\n try:\n sign = 1 if val[0]=='+' else -1\n mant = ''\n for x in val[1:-1]:\n mant += x[1:]\n mant = int(mant)\n exp = int(val[-1][1:])\n value = sign * mant * (10 ** exp)\n value=float(value)\n except Exception:\n value = np.nan\n seq.append(value)\n return seq\n \nclass Equation(Encoder):\n def __init__(self, params, symbols):\n super().__init__(params)\n self.params = params\n self.max_int = self.params.max_int\n self.symbols = symbols\n if params.extra_unary_operators!=\"\":\n self.extra_unary_operators=self.params.extra_unary_operators.split(\",\")\n else:\n self.extra_unary_operators=[]\n if params.extra_binary_operators!=\"\":\n self.extra_binary_operators=self.params.extra_binary_operators.split(\",\")\n else:\n self.extra_binary_operators=[]\n\n def encode(self, tree):\n res = []\n for elem in tree.prefix().split(','):\n try:\n val=float(elem) \n if (val).is_integer():\n res.extend(self.write_int(int(elem)))\n else:\n res.append(\"OOD_constant\")\n except ValueError:\n res.append(elem)\n return res\n\n def _decode(self, lst):\n if len(lst)==0:\n return None, 0\n if (lst[0] not in self.symbols) and (not lst[0].lstrip('-').isdigit()):\n return None, 0\n if \"OOD\" in lst[0]:\n return None, 0\n if lst[0] in all_operators.keys():\n res = Node(lst[0], self.params)\n arity = all_operators[lst[0]]\n pos = 1\n for i in range(arity):\n child, length = self._decode(lst[pos:])\n if child is None:\n return None, pos\n res.push_child(child)\n pos += length\n return res, pos\n elif lst[0].startswith('INT'):\n val, length = self.parse_int(lst)\n return Node(str(val), self.params), length\n else: # other leafs\n return Node(lst[0], self.params), 1\n\n \n def decode(self, lst):\n trees = []\n lists = self.split_at_value(lst, '|')\n for lst in lists:\n tree = self._decode(lst)[0]\n if tree is None: return None\n trees.append(tree)\n tree = NodeList(trees)\n return tree\n \n def split_at_value(self, lst, value):\n indices = [i for i, x in enumerate(lst) if x==value]\n res = []\n for start, end in zip([0, *[i+1 for i in indices]], [*[i-1 for i in indices], len(lst)]):\n res.append(lst[start:end+1])\n return res\n \n def parse_int(self, lst):\n \"\"\"\n Parse a list that starts with an integer.\n Return the integer value, and the position it ends in the list.\n \"\"\"\n base = self.max_int\n val = 0\n i = 0\n for x in lst[1:]:\n if not (x.rstrip('-').isdigit()):\n break\n val = val * base + int(x)\n i += 1\n if base > 0 and lst[0] == 'INT-':\n val = -val\n return val, i + 1\n\n def write_int(self, val):\n \"\"\"\n Convert a decimal integer to a representation in the given base.\n \"\"\"\n base = self.max_int\n res = []\n max_digit = abs(base)\n neg = val < 0\n val = -val if neg else val\n while True:\n rem = val % base\n val = val // base\n if rem < 0 or rem > max_digit:\n rem -= base\n val += 1\n res.append(str(rem))\n if val == 0:\n break\n res.append('INT-' if neg else 'INT+')\n return res[::-1]\n \n","repo_name":"facebookresearch/recur","sub_path":"src/envs/encoders.py","file_name":"encoders.py","file_ext":"py","file_size_in_byte":7891,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"9"} +{"seq_id":"2930770876","text":"import os\nimport time\n\nfrom PySide2 import QtCore, QtWidgets\n\nimport Code\nfrom Code import Util\nfrom Code.Base.Constantes import FEN_INITIAL\nfrom Code.QT import Colocacion\nfrom Code.QT import Controles\nfrom Code.QT import FormLayout\nfrom Code.QT import Iconos\nfrom Code.QT import LCDialog\nfrom Code.QT import QTUtil2\nfrom Code.QT import QTVarios\nfrom Code.SQL import UtilSQL\n\n\nclass WFiltrar(QtWidgets.QDialog):\n def __init__(self, w_parent, o_columns, liFiltro, dbSaveNom=None):\n super(WFiltrar, self).__init__()\n\n if dbSaveNom is None:\n dbSaveNom = Code.configuration.ficheroFiltrosPGN\n\n self.setWindowTitle(_(\"Filter\"))\n self.setWindowFlags(QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.Dialog | QtCore.Qt.WindowTitleHint)\n self.setWindowIcon(Iconos.Filtrar())\n\n self.liFiltro = liFiltro\n nFiltro = len(liFiltro)\n self.dbSaveNom = dbSaveNom\n\n li_fields = [(x.head, '\"%s\"' % x.key) for x in o_columns.li_columns if x.key not in (\"__num__\", \"opening\")]\n li_fields.insert(0, (\"\", None))\n li_condicion = [\n (\"\", None),\n (_(\"Equal\"), \"=\"),\n (_(\"Not equal\"), \"<>\"),\n (_(\"Greater than\"), \">\"),\n (_(\"Less than\"), \"<\"),\n (_(\"Greater than or equal\"), \">=\"),\n (_(\"Less than or equal\"), \"<=\"),\n (_(\"Like (wildcard = *)\"), \"LIKE\"),\n (_(\"Not like (wildcard = *)\"), \"NOT LIKE\"),\n ]\n\n li_union = [(\"\", None), (_(\"AND\"), \"AND\"), (_(\"OR\"), \"OR\")]\n\n f = Controles.TipoLetra(puntos=12) # 0, peso=75 )\n\n lb_col = Controles.LB(self, _(\"Column\")).ponFuente(f)\n lb_par0 = Controles.LB(self, \"(\").ponFuente(f)\n lb_par1 = Controles.LB(self, \")\").ponFuente(f)\n lb_con = Controles.LB(self, _(\"Condition\")).ponFuente(f)\n lb_val = Controles.LB(self, _(\"Value\")).ponFuente(f)\n lb_uni = Controles.LB(self, \"+\").ponFuente(f)\n\n ly = Colocacion.G()\n ly.controlc(lb_uni, 0, 0).controlc(lb_par0, 0, 1).controlc(lb_col, 0, 2)\n ly.controlc(lb_con, 0, 3).controlc(lb_val, 0, 4).controlc(lb_par1, 0, 5)\n\n self.numC = 8\n liC = []\n\n union, par0, campo, condicion, valor, par1 = None, False, None, None, \"\", False\n for i in range(self.numC):\n if i > 0:\n c_union = Controles.CB(self, li_union, union)\n ly.controlc(c_union, i + 1, 0)\n else:\n c_union = None\n\n c_par0 = Controles.CHB(self, \"\", par0).anchoFijo(20)\n ly.controlc(c_par0, i + 1, 1)\n c_campo = Controles.CB(self, li_fields, campo)\n ly.controlc(c_campo, i + 1, 2)\n c_condicion = Controles.CB(self, li_condicion, condicion)\n ly.controlc(c_condicion, i + 1, 3)\n c_valor = Controles.ED(self, valor)\n ly.controlc(c_valor, i + 1, 4)\n c_par1 = Controles.CHB(self, \"\", par1).anchoFijo(20)\n ly.controlc(c_par1, i + 1, 5)\n\n liC.append((c_union, c_par0, c_campo, c_condicion, c_valor, c_par1))\n\n self.liC = liC\n\n # Toolbar\n li_acciones = [\n (_(\"Accept\"), Iconos.Aceptar(), self.aceptar),\n None,\n (_(\"Cancel\"), Iconos.Cancelar(), self.reject),\n None,\n (_(\"Reinit\"), Iconos.Reiniciar(), self.reiniciar),\n None,\n (_(\"Save/Restore\"), Iconos.Grabar(), self.grabar),\n None,\n ]\n\n tb = QTVarios.LCTB(self, li_acciones)\n\n # Layout\n layout = Colocacion.V().control(tb).otro(ly).margen(3)\n self.setLayout(layout)\n\n liC[0][2].setFocus()\n\n if nFiltro > 0:\n self.lee_filtro(self.liFiltro)\n\n def grabar(self):\n if not self.lee_filtro_actual():\n return\n with UtilSQL.DictSQL(self.dbSaveNom, tabla=\"Filters\") as dbc:\n liConf = dbc.keys(si_ordenados=True)\n if len(liConf) == 0 and len(self.liFiltro) == 0:\n return\n menu = Controles.Menu(self)\n SELECCIONA, BORRA, GRABA = range(3)\n for x in liConf:\n menu.opcion((SELECCIONA, x), x, Iconos.PuntoAzul())\n menu.separador()\n\n if len(self.liFiltro) > 0:\n submenu = menu.submenu(_(\"Save current\"), Iconos.Mas())\n if liConf:\n for x in liConf:\n submenu.opcion((GRABA, x), x, Iconos.PuntoAmarillo())\n submenu.separador()\n submenu.opcion((GRABA, None), _(\"New\"), Iconos.NuevoMas())\n\n if liConf:\n menu.separador()\n submenu = menu.submenu(_(\"Remove\"), Iconos.Delete())\n for x in liConf:\n submenu.opcion((BORRA, x), x, Iconos.PuntoRojo())\n resp = menu.lanza()\n\n if resp:\n op, name = resp\n\n if op == SELECCIONA:\n liFiltro = dbc[name]\n self.lee_filtro(liFiltro)\n elif op == BORRA:\n if QTUtil2.pregunta(self, _X(_(\"Delete %1?\"), name)):\n del dbc[name]\n elif op == GRABA:\n if self.lee_filtro_actual():\n if name is None:\n li_gen = [FormLayout.separador]\n li_gen.append((_(\"Name\") + \":\", \"\"))\n\n resultado = FormLayout.fedit(li_gen, title=_(\"Filter\"), parent=self, icon=Iconos.Libre())\n if resultado:\n accion, li_gen = resultado\n\n name = li_gen[0].strip()\n if name:\n dbc[name] = self.liFiltro\n else:\n dbc[name] = self.liFiltro\n\n def lee_filtro(self, liFiltro):\n self.liFiltro = liFiltro\n nFiltro = len(liFiltro)\n\n for i in range(self.numC):\n if nFiltro > i:\n union, par0, campo, condicion, valor, par1 = liFiltro[i]\n else:\n union, par0, campo, condicion, valor, par1 = None, False, None, None, \"\", False\n c_union, c_par0, c_campo, c_condicion, c_valor, c_par1 = self.liC[i]\n if c_union:\n c_union.set_value(union)\n c_par0.set_value(par0)\n c_campo.set_value(campo)\n c_condicion.set_value(condicion)\n c_valor.set_text(valor)\n c_par1.set_value(par1)\n\n def reiniciar(self):\n for i in range(self.numC):\n self.liC[i][1].set_value(False)\n self.liC[i][2].setCurrentIndex(0)\n self.liC[i][3].setCurrentIndex(0)\n self.liC[i][4].set_text(\"\")\n self.liC[i][5].set_value(False)\n if i > 0:\n self.liC[i][0].setCurrentIndex(0)\n self.aceptar()\n\n def lee_filtro_actual(self):\n self.liFiltro = []\n\n npar = 0\n\n for i in range(self.numC):\n par0 = self.liC[i][1].valor()\n campo = self.liC[i][2].valor()\n condicion = self.liC[i][3].valor()\n valor = self.liC[i][4].texto().rstrip()\n par1 = self.liC[i][5].valor()\n\n if campo and condicion:\n if campo == \"PLIES\":\n valor = valor.strip()\n if valor.isdigit():\n valor = \"%d\" % int(valor) # fonkap patch %3d -> %d\n if par0:\n npar += 1\n if par1:\n npar -= 1\n if npar < 0:\n break\n if i > 0:\n union = self.liC[i][0].valor()\n if union:\n self.liFiltro.append([union, par0, campo, condicion, valor, par1])\n else:\n self.liFiltro.append([None, par0, campo, condicion, valor, par1])\n else:\n break\n if npar:\n QTUtil2.message_error(self, _(\"The parentheses are unbalanced.\"))\n return False\n return True\n\n def aceptar(self):\n if self.lee_filtro_actual():\n self.accept()\n\n def where(self):\n where = \"\"\n for union, par0, campo, condicion, valor, par1 in self.liFiltro:\n valor = valor.upper()\n if condicion in (\"LIKE\", \"NOT LIKE\"):\n valor = valor.replace(\"*\", \"%\")\n if not (\"%\" in valor):\n valor = \"%\" + valor + \"%\"\n\n if union:\n where += \" %s \" % union\n if par0:\n where += \"(\"\n if condicion in (\"=\", \"<>\") and not valor:\n where += \"(( %s %s ) OR (%s %s ''))\" % (\n campo,\n \"IS NULL\" if condicion == \"=\" else \"IS NOT NULL\",\n campo,\n condicion,\n )\n else:\n valor = valor.upper()\n if valor.isupper():\n # where += \"UPPER(%s) %s '%s'\" % (campo, condicion, valor) # fonkap patch\n where += \"%s %s '%s' COLLATE NOCASE\" % (campo, condicion, valor) # fonkap patch\n elif valor.isdigit(): # fonkap patch\n where += \"CAST(%s as decimal) %s %s\" % (campo, condicion, valor) # fonkap patch\n else:\n where += \"%s %s '%s'\" % (campo, condicion, valor) # fonkap patch\n if par1:\n where += \")\"\n return where\n\n\nclass EM_SQL(Controles.EM):\n def __init__(self, owner, where, li_fields):\n self.li_fields = li_fields\n Controles.EM.__init__(self, owner, where, siHTML=False)\n\n def mousePressEvent(self, event):\n Controles.EM.mousePressEvent(self, event)\n if event.button() == QtCore.Qt.RightButton:\n menu = QTVarios.LCMenu(self)\n rondo = QTVarios.rondoPuntos()\n for txt, key in self.li_fields:\n menu.opcion(key, txt, rondo.otro())\n resp = menu.lanza()\n if resp:\n self.insertarTexto(resp)\n\n\nclass WFiltrarRaw(LCDialog.LCDialog):\n def __init__(self, w_parent, o_columns, where):\n LCDialog.LCDialog.__init__(self, w_parent, _(\"Filter\"), Iconos.Filtrar(), \"rawfilter\")\n\n self.where = \"\"\n li_fields = [(x.head, x.key) for x in o_columns.li_columns if x.key != \"__num__\"]\n f = Controles.TipoLetra(puntos=12) # 0, peso=75 )\n\n lbRaw = Controles.LB(self, \"%s:\" % _(\"Raw SQL\")).ponFuente(f)\n self.edRaw = EM_SQL(self, where, li_fields).altoFijo(72).anchoMinimo(512).ponFuente(f)\n\n lbHelp = Controles.LB(self, _(\"Right button to select a column of database\")).ponFuente(f)\n lyHelp = Colocacion.H().relleno().control(lbHelp).relleno()\n\n ly = Colocacion.H().control(lbRaw).control(self.edRaw)\n\n # Toolbar\n li_acciones = [\n (_(\"Accept\"), Iconos.Aceptar(), self.aceptar),\n None,\n (_(\"Cancel\"), Iconos.Cancelar(), self.reject),\n None,\n ]\n tb = QTVarios.LCTB(self, li_acciones)\n\n # Layout\n layout = Colocacion.V().control(tb).otro(ly).otro(lyHelp).margen(3)\n self.setLayout(layout)\n\n self.edRaw.setFocus()\n\n self.restore_video(siTam=False)\n\n def aceptar(self):\n self.where = self.edRaw.texto()\n self.save_video()\n self.accept()\n\n\ndef mensajeEntrenamientos(owner, liCreados, liNoCreados):\n txt = \"\"\n if liCreados:\n txt += _(\"Created the following trainings\") + \":\"\n txt += \"
    \"\n for x in liCreados:\n txt += \"
  • %s
  • \" % os.path.basename(x)\n txt += \"
\"\n if liNoCreados:\n txt += _(\"No trainings created due to lack of data\") + \":\"\n txt += \"
    \"\n for x in liNoCreados:\n txt += \"
  • %s
  • \" % os.path.basename(x)\n txt += \"
\"\n QTUtil2.message_bold(owner, txt)\n\n\ndef create_tactics(procesador, wowner, li_registros_selected, li_registros_total, rutina_datos, name):\n nregs = len(li_registros_selected)\n\n form = FormLayout.FormLayout(wowner, _(\"Create tactics training\"), Iconos.Tacticas())\n\n form.separador()\n form.edit(_(\"Name\"), name)\n\n form.separador()\n li_j = [(_(\"By default\"), 0), (_(\"White\"), 1), (_(\"Black\"), 2)]\n form.combobox(_(\"Point of view\"), li_j, 0)\n\n form.separador()\n form.checkbox(_(\"Skip the first move\"), False)\n\n form.separador()\n selected = nregs > 1\n form.checkbox(\"%s (%d)\" % (_(\"Only selected games\"), nregs), selected)\n form.separador()\n\n resultado = form.run()\n\n if not resultado:\n return\n\n accion, li_gen = resultado\n\n menuname = li_gen[0].strip()\n if not menuname:\n return\n pointview = str(li_gen[1])\n skip_first = li_gen[2]\n only_selected = li_gen[3]\n\n li_registros = li_registros_selected if only_selected else li_registros_total\n nregs = len(li_registros)\n\n rest_dir = Util.valid_filename(menuname)\n nom_dir = os.path.join(Code.configuration.folder_tactics(), rest_dir)\n nom_ini = os.path.join(nom_dir, \"Config.ini\")\n if os.path.isfile(nom_ini):\n dic_ini = Util.ini2dic(nom_ini)\n n = 1\n while True:\n if \"TACTIC%d\" % n in dic_ini:\n if \"MENU\" in dic_ini[\"TACTIC%d\" % n]:\n if dic_ini[\"TACTIC%d\" % n][\"MENU\"].upper() == menuname.upper():\n break\n else:\n break\n n += 1\n else:\n break\n nom_tactic = \"TACTIC%d\" % n\n else:\n Util.create_folder(nom_dir)\n nom_tactic = \"TACTIC1\"\n dic_ini = {}\n nom_fns = os.path.join(nom_dir, \"Puzzles.fns\")\n if os.path.isfile(nom_fns):\n n = 1\n nom_fns = os.path.join(nom_dir, \"Puzzles-%d.fns\")\n while os.path.isfile(nom_fns % n):\n n += 1\n nom_fns = nom_fns % n\n\n # Se crea el file con los puzzles\n f = open(nom_fns, \"wt\", encoding=\"utf-8\", errors=\"ignore\")\n\n tmp_bp = QTUtil2.BarraProgreso(wowner, menuname, \"%s: %d\" % (_(\"Games\"), nregs), nregs)\n tmp_bp.mostrar()\n\n fen0 = FEN_INITIAL\n\n t = time.time()\n\n for n in range(nregs):\n\n if tmp_bp.is_canceled():\n break\n\n tmp_bp.pon(n + 1)\n if time.time() - t > 1.0 or (nregs - n) < 10:\n tmp_bp.mensaje(\"%d/%d\" % (n + 1, nregs))\n t = time.time()\n\n recno = li_registros[n]\n\n dic_valores = rutina_datos(recno, skip_first)\n plies = dic_valores[\"PLIES\"]\n if plies == 0:\n continue\n\n pgn = dic_valores[\"PGN\"]\n li = pgn.split(\"\\n\")\n if len(li) == 1:\n li = pgn.split(\"\\r\")\n li = [linea for linea in li if not linea.strip().startswith(\"[\")]\n num_moves = \" \".join(li).replace(\"\\r\", \"\").replace(\"\\n\", \"\")\n if not num_moves.strip(\"*\"):\n continue\n\n def xdic(k):\n x = dic_valores.get(k, \"\")\n if x is None:\n x = \"\"\n elif \"?\" in x:\n x = x.replace(\".?\", \"\").replace(\"?\", \"\")\n return x.strip()\n\n fen = dic_valores.get(\"FEN\")\n if not fen:\n fen = fen0\n\n event = xdic(\"EVENT\")\n site = xdic(\"SITE\")\n date = xdic(\"DATE\")\n gameurl = xdic(\"GAMEURL\")\n themes = xdic(\"THEMES\")\n if site == event:\n es = event\n else:\n es = event + \" \" + site\n es = es.strip()\n if date:\n if es:\n es += \" (%s)\" % date\n else:\n es = date\n white = xdic(\"WHITE\")\n black = xdic(\"BLACK\")\n wb = (\"%s-%s\" % (white, black)).strip(\"-\")\n\n li_titulo = []\n\n def add_titulo(txt):\n if txt:\n li_titulo.append(txt)\n\n add_titulo(es)\n add_titulo(wb)\n add_titulo(themes)\n if gameurl:\n add_titulo('%s' % (gameurl, gameurl))\n for other in (\"TASK\", \"SOURCE\"):\n v = xdic(other)\n add_titulo(v)\n titulo = \"
\".join(li_titulo)\n\n if skip_first:\n pgn_real = dic_valores[\"PGN_REAL\"].replace(\"\\n\", \" \").replace(\"\\r\", \" \")\n txt = fen + \"|%s|%s|%s\\n\" % (titulo, num_moves, pgn_real)\n else:\n txt = fen + \"|%s|%s\\n\" % (titulo, num_moves)\n\n f.write(txt)\n\n f.close()\n tmp_bp.cerrar()\n\n # Se crea el file de control\n dic_ini[nom_tactic] = d = {}\n d[\"MENU\"] = menuname\n d[\"FILESW\"] = \"%s:100\" % os.path.basename(nom_fns)\n d[\"POINTVIEW\"] = pointview\n\n Util.dic2ini(nom_ini, dic_ini)\n\n def sp(num):\n return \" \" * num\n\n QTUtil2.message_bold(\n wowner,\n (\n \"%s
%s

%s
%s
%s\"\n % (\n _(\"Tactic training %s created.\") % menuname,\n _(\"You can access this training from\"),\n \"%s/%s\" % (_(\"Train\"), _(\"Tactics\")),\n \"%s1) %s / %s
%s➔ %s\"\n % (sp(5), _(\"Training positions\"), _(\"Tactics\"), sp(12), _(\"for a standard training\")),\n \"%s2) %s / %s
%s➔ %s\"\n % (\n sp(5),\n _(\"Learn tactics by repetition\"),\n _(\"Personal tactics\"),\n sp(12),\n _(\"for a training by repetition\"),\n ),\n )\n ),\n )\n\n procesador.entrenamientos.rehaz()\n","repo_name":"lukasmonk/lucaschessR2","sub_path":"bin/Code/Databases/WDB_Utils.py","file_name":"WDB_Utils.py","file_ext":"py","file_size_in_byte":17630,"program_lang":"python","lang":"en","doc_type":"code","stars":202,"dataset":"github-code","pt":"9"} +{"seq_id":"37542957760","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 20 09:36:44 2023\n\n@author: kbillesk\n\"\"\"\n\nfrom llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, download_loader, GPTRAKEKeywordTableIndex\nfrom llama_index import LLMPredictor, ServiceContext, StorageContext, load_index_from_storage\nfilename_fn = lambda filename: {'file_name': filename}\n\nservice_context = ServiceContext.from_defaults()\ndocuments = SimpleDirectoryReader('examples/gdpr_expert/data', file_metadata=filename_fn).load_data()\n#The following two lines are used to build/update the vectorstore index.\n#If you dont need this comment out these lines and activate two susequent lines, which reads from the existing index\nindex = GPTVectorStoreIndex.from_documents(documents,service_context=service_context)\nindex.storage_context.persist(persist_dir=\"examples/gdpr_expert/storage\")\n#storage_context = StorageContext.from_defaults(persist_dir=\"example/gdpr_expert/storage\")\n#index = load_index_from_storage(storage_context)\nquery_engine = index.as_query_engine(service_context=service_context)\nquestion = \"ww\"\nwhile question != \"quit\":\n question = input(\"Stil dit GDPR spørgsmål?\\n\")\n if question != \"quit\":\n response = query_engine.query(question)\n print(response)\n for node in response.source_nodes:\n print('-----')\n text_fmt = node.node.text.strip().replace('\\n', ' ')[:1000]\n doc_data = node.node.get_doc_hash()\n #print(f\"Text:\\t {text_fmt} ...\")\n print(f'Metadata:\\t {node.node.ref_doc_id}')\n print(f'Metadata:\\t {node.node.extra_info_str}')\n print(doc_data)\n print(f'Score:\\t {node.score:.3f}')\n #print(response)\n","repo_name":"kbillesk/chat-gdpr","sub_path":"llama_test_basic.py","file_name":"llama_test_basic.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"28272577537","text":"import boto3\nimport json\nimport logging\nimport os\n\n__author__ = 'Mystique'\n__email__ = 'miztiik@github'\n__version__ = '0.0.1'\n__status__ = 'production'\n\n\nclass global_args:\n \"\"\" Global statics \"\"\"\n OWNER = \"Mystique\"\n ENVIRONMENT = \"production\"\n MODULE_NAME = \"s3_event_processor\"\n LOG_LEVEL = os.getenv(\"LOG_LEVEL\", \"INFO\").upper()\n\n\n_s3_client = boto3.client('s3')\n_ddb = boto3.resource('dynamodb')\n\n\ndef _ddb_put_item(item):\n \"\"\" Insert Item into DynamoDb Table \"\"\"\n if os.environ.get('DDB_TABLE_NAME'):\n _ddb_table = _ddb.Table(os.environ.get('DDB_TABLE_NAME'))\n try:\n return(_ddb_table.put_item(Item=item))\n except Exception as e:\n raise\n\n\ndef lambda_handler(event, context):\n global LOGGER\n LOGGER = logging.getLogger()\n LOGGER.setLevel(level=os.getenv(\"LOG_LEVEL\", \"INFO\").upper())\n\n LOGGER.info(f\"received_event:{event}\")\n resp = {\n \"statusCode\": 400,\n \"body\": json.dumps({\"message\": {}})\n }\n try:\n if \"Records\" in event:\n item = {}\n item[\"_id\"] = event[\"Records\"][0][\"s3\"][\"object\"][\"key\"]\n item[\"_size\"] = event[\"Records\"][0][\"s3\"][\"object\"][\"size\"]\n item[\"_bucket\"] = event[\"Records\"][0][\"s3\"][\"bucket\"][\"name\"]\n item[\"_bucket_owner\"] = event[\"Records\"][0][\"s3\"][\"bucket\"][\"ownerIdentity\"][\"principalId\"]\n _put_resp = _ddb_put_item(item)\n resp[\"statusCode\"] = 200\n resp[\"body\"] = json.dumps({\"message\": _put_resp})\n except Exception as e:\n LOGGER.error(f\"{str(e)}\")\n resp[\"body\"] = json.dumps({\n \"message\": f\"ERROR:{str(e)}\"\n })\n\n return resp\n","repo_name":"miztiik/my-first-cdk-project","sub_path":"advanced_use_cases/lambda_src/s3_event_processor.py","file_name":"s3_event_processor.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"9"} +{"seq_id":"42138247098","text":"import json\nimport os\nimport random\n\nRAW_TEXT = 'data/raw_text_gutenberg.txt'\nJSON_OUT = 'data/meditations.json'\n\nbooks = [\n 'THE FIRST BOOK',\n 'THE SECOND BOOK',\n 'THE THIRD BOOK',\n 'THE FOURTH BOOK',\n 'THE FIFTH BOOK',\n 'THE SIXTH BOOK',\n 'THE SEVENTH BOOK',\n 'THE EIGHTH BOOK',\n 'THE NINTH BOOK',\n 'THE TENTH BOOK',\n 'THE ELEVENTH BOOK',\n 'THE TWELFTH BOOK',\n '\\n\\n\\nAPPENDIX'\n ]\n\ndef main():\n meditations = {}\n with open(RAW_TEXT) as o:\n data = o.read().strip()\n for i, (start, end) in enumerate(zip(books, books[1:])):\n start_index = data.index(start)\n end_index = data.index(end)\n contents = data[start_index:end_index]\n contents = contents.strip().split('\\n\\n')[1:]\n # remove first entry (Name of book) and prepended roman numerals\n contents = [c[c.index(' ')+1:].replace('\\n', ' ') for c in contents]\n for j, c in enumerate(contents):\n meditations[f'Book {i+1} : {j+1}'] = c\n with open(JSON_OUT, 'w') as o:\n o.write(json.dumps(meditations))\n\nif __name__ == '__main__':\n main()\n","repo_name":"samhattangady/aurelius_tabs","sub_path":"data/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"9296010281","text":"import sys\nsys.stdin = open(\"input.txt\", \"r\")\n\ndef DFS(x, y):\n global cnt\n if x == endx and y == endy:\n cnt +=1\n else:\n for k in range(4):\n xx = x + dx[k]\n yy = y + dy[k]\n if 0<= xx board[i][j]:\n st = board[i][j]\n stx = i\n sty = j\n if end < board[i][j]:\n end = board[i][j]\n endx = i\n endy = j\nDFS(stx,sty)\nprint(cnt)\n \n","repo_name":"tae100k/Python-Algorithm","sub_path":"Sec7/DFS/11.Climbing mountain/AA.py","file_name":"AA.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"12085220172","text":"import numpy as np\nimport pandas as pd\nfrom solartoolbox import spatial, cmv\nimport pvlib\nimport matplotlib.pyplot as plt\n\n\"\"\"\nThis script will do a demonstration of computing the Cloud Motion Vector (CMV)\nusing the methods available in solartoolbox. \n\"\"\"\n\n# ##### INPUTS ##### #\nfn = \"data/nrcan_ald_hv.h5\"\nwin = pd.date_range('2015-08-12 08:00:00', '2015-08-12 16:00:00',\n freq='10ms',\n tz='Etc/GMT+5') # ALD HV\npt_ref = 'AFN11'\nresample = '250ms'\n\n\n# Read in the file, convert plant to UTM. Define Center Position\npos = pd.read_hdf(fn, mode=\"r\", key=\"latlon\")\npos_utm = spatial.latlon2utm(pos['lat'], pos['lon'])\ncenter_loc = pvlib.location.Location(pos['lat'][pt_ref], pos['lon'][pt_ref])\n\n# Read, select the window, and resample the dataset\nts = pd.read_hdf(fn, mode=\"r\", key=\"data\")\nts = ts.loc[win]\nts = ts.resample(resample).mean()\n\n# Compute the clearsky\ncs_ghi = center_loc.get_clearsky(ts.index, model='simplified_solis')['ghi']\n# Apply the clear sky to the data column by column from pvlib\nkt = ts.apply(lambda x:\n pvlib.irradiance.clearsky_index(x, cs_ghi, 1000), axis=0)\n\n# Compute the cloud motion vector using both methods\ncld_spd_gag, cld_dir_gag, dat_gag = cmv.compute_cmv(kt, pos_utm,\n reference_id=None,\n method='gagne',\n corr_scaling='coeff')\ncld_spd_jam, cld_dir_jam, dat_jam = cmv.compute_cmv(kt, pos_utm,\n reference_id=None,\n method='jamaly',\n corr_scaling='coeff')\n\n# Print the answer\nprint(\"Method Spd Angle N_good\")\nprint(\"Gagne {:0.2f} {:0.2f}\".format(cld_spd_gag, np.rad2deg(cld_dir_gag)),\n sum(dat_gag.pair_flag == cmv.Flag.GOOD))\nprint(\"Jamaly {:0.2f} {:0.2f}\".format(cld_spd_jam, np.rad2deg(cld_dir_jam)),\n sum(dat_jam.pair_flag == cmv.Flag.GOOD))\n\n# ##### PLOTS ##### #\nplt.title('Result Data for Jamaly Method')\n# Correlation scatter\nplt.scatter(dat_jam.pair_lag, dat_jam.pair_dists, c=dat_jam.corr_lag,\n vmin=0, vmax=1)\n# Label the points that came through as GOOD from the CMV routine\nplt.plot(dat_jam.pair_lag[dat_jam.pair_flag == cmv.Flag.GOOD],\n dat_jam.pair_dists[dat_jam.pair_flag == cmv.Flag.GOOD], 'x')\n# Best fit line\nplt.plot([np.min(dat_jam.pair_lag), np.max(dat_jam.pair_lag)],\n cld_spd_jam *\n np.array([np.min(dat_jam.pair_lag),np.max(dat_jam.pair_lag)]), 'r:')\nplt.xlabel('Lag (s)')\nplt.ylabel('Distance (m)')\nplt.legend(['Point Correlation', 'GOOD points', 'Speed Fit'])\nplt.tight_layout()\nplt.show()\n","repo_name":"jranalli/solartoolbox","sub_path":"demos/simple_cmv_demo.py","file_name":"simple_cmv_demo.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"4245820419","text":"# Import required libraries\nimport pandas as pd\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\nimport plotly.graph_objects as go\n\n# Read the airline data into pandas dataframe\nspacex_df = pd.read_csv(\"spacex_launch_dash.csv\")\nmax_payload = spacex_df['Payload Mass (kg)'].max()\nmin_payload = spacex_df['Payload Mass (kg)'].min()\n\n# Create a dash application\napp = dash.Dash(__name__)\n\n# Create an app layout\napp.layout = html.Div(children=[\n html.H1('SpaceX Launch Records Dashboard',\n style={'textAlign': 'center', 'color': '#503D36', 'font-size': 40}),\n \n # TASK 1: Add a dropdown list to enable Launch Site selection\n # The default select value is for ALL sites\n dcc.Dropdown(\n id='site-dropdown',\n options=[\n {'label': 'All Sites', 'value': 'ALL'},\n {'label': 'CCAFS LC-40', 'value': 'CCAFS LC-40'},\n {'label': 'VAFB SLC-4E', 'value': 'VAFB SLC-4E'},\n {'label': 'KSC LC-39A', 'value': 'KSC LC-39A'},\n {'label': 'CCAFS SLC-40', 'value': 'CCAFS SLC-40'},\n ],\n value='ALL',\n placeholder=\"Select a Launch Site here\",\n searchable=True,\n ),\n \n html.Br(),\n\n # TASK 2: Add a pie chart to show the total successful launches count for all sites\n # If a specific launch site was selected, show the Success vs. Failed counts for the site\n html.Div(dcc.Graph(id='success-pie-chart')),\n html.Br(),\n \n # TASK 3: Add a slider to select payload range\n dcc.RangeSlider(\n id='payload-slider',\n min=min_payload,\n max=max_payload,\n step=1000,\n marks={str(payload): str(payload) for payload in range(int(min_payload), int(max_payload)+1, 1000)},\n value=[min_payload, max_payload]\n ),\n\n # TASK 4: Add a scatter chart to show the correlation between payload and launch success\n html.Div(dcc.Graph(id='success-payload-scatter-chart')),\n])\n# Define the callback function\n@app.callback(\n Output(component_id='success-payload-scatter-chart', component_property='figure'),\n [Input(component_id='site-dropdown', component_property='value'),\n Input(component_id='payload-slider', component_property='value')]\n)\ndef update_scatter_chart(selected_site, selected_payload_range):\n # Check if ALL sites were selected or just a specific launch site\n if selected_site == 'ALL':\n filtered_df = spacex_df # No filtering needed for 'ALL' sites\n else:\n # Filter the spacex_df for the selected launch site\n filtered_df = spacex_df[spacex_df['Launch Site'] == selected_site]\n \n # Filter the filtered_df based on the selected payload range\n filtered_df = filtered_df[\n (filtered_df['Payload Mass (kg)'] >= selected_payload_range[0]) &\n (filtered_df['Payload Mass (kg)'] <= selected_payload_range[1])\n ]\n \n # Create the scatter plot\n fig = px.scatter(\n data_frame=filtered_df,\n x='Payload Mass (kg)',\n y='class',\n color='Booster Version Category',\n title='Payload vs Class with Booster Version Category'\n )\n \n return fig\n\n# Callback function for `site-dropdown` as input, `success-pie-chart` as output\n@app.callback(\n Output('success-pie-chart', 'figure'),\n Input('site-dropdown', 'value')\n)\ndef update_pie_chart(selected_site):\n if selected_site == 'ALL':\n # Calculate success rates for all launch sites\n success_rates = spacex_df.groupby('Launch Site')['class'].mean()\n\n fig = go.Figure(data=[go.Pie(labels=success_rates.index, values=success_rates.values)])\n # fig.update_layout(title='Success Rate for All Launch Sites')\n\n return fig\n else:\n selected_df = spacex_df[spacex_df['Launch Site'] == selected_site]\n success_count = selected_df[selected_df['class'] == 1]['class'].count()\n failed_count = selected_df[selected_df['class'] == 0]['class'].count()\n labels = ['Success', 'Failed']\n values = [success_count, failed_count]\n\n fig = go.Figure(data=[go.Pie(labels=labels, values=values)])\n return fig\n\n# Callback function for `site-dropdown` and `payload-slider` as inputs, `success-payload-scatter-chart` as output\n\n# Run the app\nif __name__ == '__main__':\n app.run_server()\n","repo_name":"cradke58/This_Should_be_Good","sub_path":"spacex_dash_app (4) (1).py","file_name":"spacex_dash_app (4) (1).py","file_ext":"py","file_size_in_byte":4375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"10118136148","text":"# 처음 시도한 코드.. 답도 잘안나오네...\n# import sys\n#\n# n, s = map(int, sys.stdin.readline().split())\n# num = list(map(int, sys.stdin.readline().split()))\n#\n# start = 0\n# end = 1\n# length = 100001\n# check = False\n#\n# while True:\n# if end == len(num) and start == end - 1:\n# break\n#\n# if sum(num[start:end]) >= s:\n# check = True\n# if end - start < length:\n# length = end - start + 1\n# start += 1\n#\n# else:\n# if end != len(num)-1:\n# end += 1\n# else:\n# start += 1\n#\n# if end == start:\n# end += 1\n#\n# if check:\n# print(length)\n# else:\n# print(0)\n\n#기존에 배웠던 투 포인터 공식을 활용했다. 책을 좀 참고하여 풀긴했는데.. 그래도 성공!\nimport sys\n\nn, s = map(int, sys.stdin.readline().split())\nnum = list(map(int, sys.stdin.readline().split()))\n\nstart = 0\nend = 0\ninterval_sum = 0\nlength = 100001\ncheck = False\n\nfor start in range(n):\n while interval_sum < s and end < n:\n interval_sum += num[end]\n end += 1\n\n if interval_sum >= s:\n check = True\n if length > end - start:\n length = end - start\n interval_sum -= num[start]\n\nif check:\n print(length)\nelse:\n print(0)","repo_name":"CASY82/CHH_StudyRoom","sub_path":"Algo/etc/Baek_1806.py","file_name":"Baek_1806.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"26826428241","text":"import time\nimport logging\nimport os\nimport joblib\nimport uvicorn\nfrom typing import List, Union, Optional\nimport pandas as pd\nfrom fastapi import FastAPI, HTTPException\nfrom pydantic import BaseModel, conlist\nimport pipeline\nfrom pipeline import BuilderPipe\n\nlogger = logging.getLogger(__name__)\nNAME_MODEL = \"model.pkl\"\nITEMS = 13\nPATH = os.getcwd()\n\n\ndef load_object(path: str) -> BuilderPipe:\n with open(path, \"rb\") as f:\n return joblib.load(f)\n\n\nclass DataModel(BaseModel):\n data: List[conlist(Union[int, float, str, None],\n min_items=ITEMS,\n max_items=ITEMS)]\n features: List[str]\n\n\nclass DiseasePrediction(BaseModel):\n id: str\n label: int\n\n\nmodel: Optional[BuilderPipe] = None\ntime_start: float = 0.0\n\n\ndef make_predict(\n data: List, features: List[str], model: BuilderPipe,\n) -> List[DiseasePrediction]:\n data = pd.DataFrame(data, columns=features)\n ids = data.index\n predicts = model.pipe.predict(data)\n\n return [\n DiseasePrediction(id=id_, label=float(label)) for id_, label in zip(ids, predicts)\n ]\n\n\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef main():\n return \"it is entry point of our predictor\"\n\n\n@app.on_event(\"startup\")\ndef load_model():\n global model\n global time_start\n time.sleep(30)\n time_start = time.time()\n model_path = os.getenv(\"PATH_TO_MODEL\")\n if model_path is None:\n err = f\"PATH_TO_MODEL {model_path} is None\"\n logger.error(err)\n raise RuntimeError(err)\n\n model = load_object(model_path)\n\n\n@app.get(\"/healthz\")\ndef health() -> bool:\n return not (model is None)\n\n\n@app.get(\"/live\")\ndef live():\n if time.time() - time_start >= 150:\n raise HTTPException(404)\n return True\n\n\ndef load_glob_model():\n global model\n model_path = PATH + \"/\" + NAME_MODEL\n model = load_object(model_path)\n\n\n@app.get(\"/predict/\", response_model=List[DiseasePrediction])\ndef predict(request: DataModel):\n try:\n return make_predict(request.data, request.features, model)\n except Exception as e:\n raise HTTPException(status_code=400, detail=str(e))\n\n\nif __name__ == \"__main__\":\n uvicorn.run(\"app:app\", host=\"0.0.0.0\", port=os.getenv(\"PORT\", 80))\n","repo_name":"made-ml-in-prod-2021/alinazemlev","sub_path":"online_inference/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"40030048451","text":"# Licensed under the Apache License, Version 2.0 ; The TensorFlow Authors\n\n\n\"\"\"Convolutional Neural Network Estimator for MNIST, built with tf.layers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ndef cnn_model_fn(features, labels, mode):\n \"\"\"Model function for CNN.\"\"\"\n # Input Layer\n # Reshape X to 4-D tensor: [batch_size, width, height, channels]\n # MNIST images are 28x28 pixels, and have one color channel\n input_layer = tf.reshape(features[\"x\"], [-1, 28, 28, 1])\n\n # Convolutional Layer #1\n # Computes 32 features using a 5x5 filter with ReLU activation.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 28, 28, 1]\n # Output Tensor Shape: [batch_size, 28, 28, 32]\n conv1 = tf.layers.conv2d(\n inputs=input_layer,\n filters=32,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Pooling Layer #1\n # First max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 28, 28, 32]\n # Output Tensor Shape: [batch_size, 14, 14, 32]\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\n # Convolutional Layer #2\n # Computes 64 features using a 5x5 filter.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 14, 14, 32]\n # Output Tensor Shape: [batch_size, 14, 14, 64]\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=64,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Pooling Layer #2\n # Second max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 14, 14, 64]\n # Output Tensor Shape: [batch_size, 7, 7, 64]\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n # Flatten tensor into a batch of vectors\n # Input Tensor Shape: [batch_size, 7, 7, 64]\n # Output Tensor Shape: [batch_size, 7 * 7 * 64]\n pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])\n\n # Dense Layer\n # Densely connected layer with 1024 neurons\n # Input Tensor Shape: [batch_size, 7 * 7 * 64]\n # Output Tensor Shape: [batch_size, 1024]\n dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n\n # Add dropout operation; 0.6 probability that element will be kept\n ## Dropout consists in randomly setting a fraction rate of input units to 0 at each update during training time, which helps prevent overfitting. The units that are kept are scaled by 1 / (1 - rate), so that their sum is unchanged at training time and inference time.\n dropout = tf.layers.dropout(\n inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n # Logits layer\n # Input Tensor Shape: [batch_size, 1024]\n # Output Tensor Shape: [batch_size, 10]\n logits = tf.layers.dense(inputs=dropout, units=10)\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n }\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n ## cross entropy is used as the loss metric\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n ## Calculates how often predictions matches labels.\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])}\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)\n\n\ndef main(n_steps):\n # Load training and eval data\n mnist = tf.contrib.learn.datasets.load_dataset(\"mnist\")\n train_data = mnist.train.images # Returns np.array\n train_labels = np.asarray(mnist.train.labels, dtype=np.int32)\n eval_data = mnist.test.images # Returns np.array\n eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)\n\n # Create the Estimator\n ## (TensorFlow class for performing high-level model training, evaluation, and inference)\n mnist_classifier = tf.estimator.Estimator(\n model_fn=cnn_model_fn, model_dir=\"/tmp/mnist_convnet_model\")\n\n # Set up logging for predictions\n # Log the values in the \"Softmax\" tensor with label \"probabilities\"\n ## logging so we can track progress during training\n ## probabilities will be printed after every 50 steps of training\n tensors_to_log = {\"probabilities\": \"softmax_tensor\"}\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=100)\n\n # Train the model\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": train_data},\n y=train_labels,\n batch_size=100, ## the model will train on minibatches of 100 examples at each step\n num_epochs=None, ## the model will train until the specified number of steps is reached (see mnist_classifier.train)\n shuffle=True) ## shuffle the training data\n mnist_classifier.train(\n input_fn=train_input_fn,\n steps=n_steps, ## the model will train for n_steps steps total\n hooks=[logging_hook]) ## triggered logging_hook during training\n\n # Evaluate the model and print results\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": eval_data},\n y=eval_labels,\n num_epochs=1, ## evaluates the metrics over one epoch\n shuffle=False) ## iterate through the data sequentially\n eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)\n return(eval_results)\n\n","repo_name":"ready-player-one-supelec/eponge-tf-cnn","sub_path":"CNN_MNIST_tf.py","file_name":"CNN_MNIST_tf.py","file_ext":"py","file_size_in_byte":6081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"12097984136","text":"# -*- coding: utf-8 -*-\n\nfrom msatformula import MSatFormula\nfrom satsolver import SATSolver\n\n\nclass WPM1:\n\n WPM1_UNSATISFIABLE = -1\n WPM1_UNKNOWN = -2\n\n #\n #\n def __init__(self, formula, sat_solver):\n self.formula = formula\n self.sat_solver = sat_solver\n\n #\n #\n def solve(self):\n \"\"\"solve(): (cost:int, assignation/core: [])\n\n Solves the formula using the WPM1 algorithm\n\n Returns\n cost: positive integer value which represents the cost of solving\n the given formula or a negative value, if the formula is\n unsatisfiable or if the solver finishes due to a time or\n memory limit.\n\n assignation/core: A list filled with the truth values assignation if\n the formula is satisfiable, the core if it is unsatisfiable\n or a string if the solver can not determine the solution due\n a time or memory limit (the string should contain the reason)\n \"\"\"\n nvars, formula = self.formula.getHardClausesFormula()\n sat, core = self.sat_solver.solve(nvars, formula)\n\n if sat == SATSolver.SOLVER_UNSATISFIABLE:\n return (WPM1.WPM1_UNSATISFIABLE, core)\n\n cost = 0\n wmax = self.formula.getMaxWeightLessThan(MSatFormula.TOP)\n\n while True:\n nvars, formula = self.formula.getFormulaWithMinWeight(wmax)\n sat, sout = self.sat_solver.solve(nvars, formula)\n\n if wmax == 0 and sat == SATSolver.SOLVER_SATISFIABLE:\n return (cost, sout)\n elif sat == SATSolver.SOLVER_SATISFIABLE:\n wmax = self.formula.getMaxWeightLessThan(wmax)\n elif sat == SATSolver.SOLVER_UNSATISFIABLE:\n blocking_vars = []\n wmin = self.formula.getMinWeightOfClauses(sout)\n\n for c in sout:\n if not self.formula.isHardClause(c):\n b = self.formula.relaxClause(c, wmin)\n blocking_vars.append(b)\n\n self.formula.addCardinalityConstraint(blocking_vars,\n MSatFormula.EXACTLY_ONE,\n MSatFormula.TOP)\n cost += wmin\n\n elif sat == SATSolver.SOLVER_UNKNOWN:\n return (WPM1.WPM1_UNKNOWN,\n 'Underlaying solver unknown result\\n'\n 'It can be due to memory or cpu time limits')\n","repo_name":"jponf/wpm1py","sub_path":"wpm1.py","file_name":"wpm1.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"41088004009","text":"from unittest import TestCase, main\nfrom tested.gen_replacer_package import gen_replaceer\nfrom tested.gen_replacer_package.gen_replaceer import replacer_func\nimport doctest\n\n\ndef load_tests(loader, tests, ignore):\n tests.addTests(doctest.DocTestSuite(gen_replaceer))\n return tests\n\n\nclass ReplacerTest(TestCase):\n def test_it_works(self):\n self.assertEqual(replacer_func(comp='По результатам собеседования в КОМПАНИЯ были приняты следующие сотрудники',\n names=['Mars', 'Nike', 'Apple', 'Google']),\n ['По результатам собеседования в Mars были приняты следующие сотрудники',\n 'По результатам собеседования в Nike были приняты следующие сотрудники',\n 'По результатам собеседования в Apple были приняты следующие сотрудники',\n 'По результатам собеседования в Google были приняты следующие сотрудники'])\n\n def test_tuple_in_input(self):\n with self.assertRaises(TypeError) as e:\n replacer_func(comp={'Попринятыследующиесотрудники '}, names=['Mars', 'Nike', 'Apple', 'Google'])\n self.assertEqual('Передан невернвйй тип даннных', e.exception.args[0])\n\n def test_set_in_input(self):\n with self.assertRaises(TypeError) as e:\n replacer_func(comp='Попринятыследующиесотрудники ', names={'Mars', 'Nike', 'Apple', 'Google'})\n self.assertEqual('Передан невернвйй тип даннных', e.exception.args[0])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Philin-coder/Pyrepo","sub_path":"tests/gen_replacer_test.py","file_name":"gen_replacer_test.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"70673723174","text":"import os.path\nimport pathlib\nfrom importlib import reload\nfrom types import SimpleNamespace\n\nfrom hypothesis import given\nfrom hypothesis import strategies as st\n\nimport asm\nfrom gtemu import RAM, Emulator\n\nSRC_DIR = (pathlib.Path(__file__).parent / \"..\" / \"src\").resolve()\nSCRIPT = SRC_DIR / \"half-square.asm.py\"\n\n\ndef setup_module():\n global vars\n \"\"\"Load the Emulator from the multiplication script\"\"\"\n reload(asm)\n name, _ = os.path.splitext(os.path.basename(SCRIPT))\n script_globals = {\"__file__\": str(SCRIPT.absolute()), \"__name__\": name}\n with SCRIPT.open(\"rb\") as file:\n exec(compile(file.read(), SCRIPT, \"exec\"), script_globals)\n Emulator.load_rom_from_asm_module()\n vars = SimpleNamespace(**script_globals)\n\n\n_bytes = st.integers(min_value=0, max_value=255)\n\n\n@given(a=_bytes, b=_bytes)\ndef test_multiplication_8(a, b):\n \"\"\"Multiplication of two eight-bit integers should work\"\"\"\n Emulator.reset()\n RAM[asm.symbol(\"A\")] = a\n RAM[asm.symbol(\"B\")] = b\n Emulator.next_instruction = \"start\"\n\n cycles = Emulator.run_to(\"end\")\n\n result = int.from_bytes(\n RAM[asm.symbol(\"result\") : asm.symbol(\"result\") + 2],\n \"little\",\n signed=False,\n )\n assert a * b == result\n assert 119 == cycles\n","repo_name":"kervinck/gigatron-rom","sub_path":"Contrib/psr/multiply/test/test_half_square.py","file_name":"test_half_square.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":211,"dataset":"github-code","pt":"9"} +{"seq_id":"11892860873","text":"from flask import jsonify, request, Blueprint\nfrom datetime import datetime\n\nfrom minic_server.compiler.compiler import Compiler\nfrom minic_server.auth.auth import Auth\nfrom minic_server.restapi.basic_auth import minicc_auth\n\ncompiler_api = Blueprint('compiler_api', __name__)\n\n@compiler_api.route('/service', methods=['POST'])\n@minicc_auth.login_required\ndef serve():\n operation = request.args.get('operation')\n if operation not in [\"compile\", \"execute\"]:\n return jsonify(\"Operation is unknown, please specify in query parameter\"), 400\n\n username = request.authorization[\"username\"]\n request_payload = request.json\n file_name = request_payload.get(\"file_name\", \"\")\n language = request_payload.get(\"language\")\n optLevel = request_payload.get(\"optLevel\", 0)\n code = request_payload.get(\"code\", \"\")\n\n if not language:\n return jsonify(\"Language is unknown\"), 400\n if not file_name:\n file_name = f\"{username}-{datetime.utcnow().strftime('%Y-%m-%d-%H:%M:%S.%f')[:-3]}\"\n if optLevel not in [0, 1, 2, 3]:\n optLevel = 0\n\n result, success = Compiler.process_service_request(operation, username, file_name, language, optLevel, code)\n return jsonify(result), 200 if success else 400\n","repo_name":"wzhao18/minic","sub_path":"minic_server/restapi/compiler_api.py","file_name":"compiler_api.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"33639926550","text":"import sys\nsys.stdin = open(\"2117_input.txt\")\n\ndr = (-1, 1, 0, 0)\ndc = (0, 0, -1, 1)\n\ndef check(K):\n global city, visited, MAX, M\n cnt = 0\n for i in range(N):\n for j in range(N):\n if visited[i][j] and city[i][j]:\n cnt += 1\n\n # 보안회사의 이익이 0보다 크면\n temp = cnt * M - (K*K + (K-1)*(K-1))\n if temp > 0:\n MAX = max(cnt, MAX)\n print(cnt)\n return MAX\n\n# 길이 K에 따라 마름모 visited 구하기(BFS)\ndef bfs(sr, sc, K):\n global N, visited, Q\n visited = [[0] * N for _ in range(N)]\n if K == 1:\n visited[sr][sc] = 1\n # check(K)\n print(visited)\n return\n Q = [(sr, sc)]\n visited[sr][sc] = 1\n dis = visited[sr][sc]\n while Q:\n r, c = Q.pop(0)\n if dis == K:\n return\n for i in range(4):\n nr = r + dr[i]\n nc = c + dc[i]\n if not (0 <= nr < N and 0 <= nc < N):\n continue\n if visited[nr][nc]:\n continue\n visited[nr][nc] = 1\n Q.append((nr, nc))\n dis = visited[r][c] + 1\n print(visited)\n return\n\nT = int(input())\nfor tc in range(T):\n N, M = map(int, input().split())\n city = [list(map(int, input().split())) for _ in range(N)]\n\n MAX = 0\n for i in range(N):\n for j in range(N):\n # 중심 정하면 운영영역 k 구하기\n for k in range(1, N):\n visited = [[0] * N for _ in range(N)]\n bfs(i, j, k)\n check(k)\n print(\"#{} {}\".format(tc+1, MAX))\n\n#1 5\n#2 4\n#3 24\n#4 48\n#5 3\n#6 65\n#7 22\n#8 22\n#9 78\n#10 400","repo_name":"chelseashin/My-Algorithm","sub_path":"algorithm_study/모의SW역량테스트/2117_홈방범서비스(실패).py","file_name":"2117_홈방범서비스(실패).py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"26772114448","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/6/1 10:04\n# @Author : tianyunzqs\n# @Description :\n\n\"\"\"\nhttps://leetcode-cn.com/circle/discuss/vMYOmI/\n已知一个无序数组 array,元素均为正整数。给定一个目标值 target,输出数组中是否存在若干元素的组合,相加为目标值。\n\n对于以下无序数组\ncandidates = [3, 9, 5, 8, 7, 17]\ntarget = 16\n输出\n[[3, 5, 8],\n[9, 7]]\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def sumTarget(self, candidates: List[int], target: int) -> List[List[int]]:\n def fun(candidates, target, res):\n if not candidates or target < min(candidates) or target > sum(candidates):\n return []\n for i, num in enumerate(candidates):\n res.append(num)\n if num == target:\n result.append(res[:])\n fun(candidates[i + 1:], target - num, res)\n res.pop()\n\n result = []\n fun(candidates, target, [])\n return [list(r) for r in set([tuple(sorted(item)) for item in result])]\n\n\nif __name__ == '__main__':\n candidates = [3, 9, 5, 8, 7, 17]\n target = 46\n for t in range(57):\n print(t, Solution().sumTarget(candidates, t))\n","repo_name":"tianyunzqs/LeetCodePractise","sub_path":"array/sumTarget.py","file_name":"sumTarget.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"9155102698","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\n\nFONTSIZE = 16\n\ndef plot_beam_test(outputfile_dir,pickle_name,optics,label,filename):\n\t\n\tpos_l = np.arange(0,len(optics),1)\n\tdeltaQmin_l = np.zeros(len(optics)) \n\tfor i , optics in enumerate(optics):\n\t\twith open(f\"{outputfile_dir}/{pickle_name}opticsfile.{optics}\",\"rb\") as p:\n\t\t\tmultiple_correction_dict = pickle.load(p)\n\t\t\n\t\tfor key in multiple_correction_dict:\n\t\t\tdelQmin = multiple_correction_dict[key][\"deltaQmin\"].to_numpy()[0]\n\t\t\tprint(optics,delQmin)\n\t\t\tdeltaQmin_l[i] = delQmin\n\n\t\n\tlabels = [\"11\",\"8.3\",\"4.1\",\"2.1\"]\n\t\n\tfig , ax = plt.subplots()\n\tax.plot(pos_l,deltaQmin_l,\"x\",label=label)\n\tplt.xticks(pos_l, labels,fontsize = FONTSIZE) \n\tax.set_xlabel(r\"$\\beta *$\",fontsize = FONTSIZE)\n\tax.set_ylabel(r\"$\\Delta Q_{min}$\",fontsize = FONTSIZE)\n\tax.legend(fontsize=FONTSIZE,loc=\"upper left\")\n\tax.set_ylim(0,0.04)\n\tplt.tight_layout()\n\tplt.savefig(f\"plots/{filename}\")\n\tplt.show()\n","repo_name":"EirikJaccheri/global_coupling_correction3","sub_path":"beam_test/plot_beam_test.py","file_name":"plot_beam_test.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"13767726398","text":"#!/usr/bin/env python3\nimport os\nfrom caproto.server import pvproperty, PVGroup, template_arg_parser, run\n\nfrom cls.utils.dirlist import dirlist\nfrom cls.utils.json_utils import json_to_dict, file_to_json\nimport json\n\n\ndef get_populated_motor_dict(field_path=None):\n '''\n return a dict from disk of the populated fields for all motors\n :param field_path:\n :return:\n '''\n if(field_path is None):\n field_path = r'C:\\controls\\epics\\iocapp_pvnames\\rec_fields'\n\n fpath = os.path.join(field_path, 'motors.json')\n js = file_to_json(fpath)\n dct = json_to_dict(js)\n return(dct)\n\ndef get_rec_type(_type):\n if(_type.find('ChannelType.ENUM') > -1):\n return('bo')\n elif(_type.find('ChannelType.DOUBLE') > -1):\n return('ao')\n elif (_type.find('ChannelType.FLOAT') > -1):\n return ('ao')\n elif(_type.find('ChannelType.LONG') > -1):\n return ('ao')\n elif (_type.find('ChannelType.INT') > -1):\n return ('ao')\n elif (_type.find('ChannelType.STRING') > -1):\n return ('stringin')\n elif (_type.find('ChannelType.CHAR') > -1):\n return ('stringin')\n elif (_type.find('DoubleMotor') > -1):\n return ('motor')\n else:\n print('what is this type [%s]' %_type)\n return('stringin')\n\n\n\ndef ingest_pv_dict(inp):\n type_map = {'float': float,'int': int, 'ChannelType.ENUM': int, 'ChannelType.DOUBLE': float ,'ChannelType.LONG': int, 'ChannelType.STRING': str, \\\n 'ChannelType.CHAR': str, 'ChannelType.FLOAT': float, 'ChannelType.INT': int, 'DoubleMotor': float}\n\n body = {}\n # for (i, (k, (dtype, rec_type))) in enumerate(inp.items()):\n # body[str(i)] = pvproperty(name=k,\n # dtype=type_map[dtype],\n # mock_record=rec_type)\n i = 0\n for k, dct in inp.items():\n #print('ingesting [%d][%s]' % (i, k))\n if(type(dct) is dict):\n try:\n rec_type = get_rec_type(dct['type'])\n if(type(dct['value']) is str):\n if(dct['value'].find('array[') > -1):\n val = arrstr_to_array(rec_type, dct['value'])\n else:\n if(len(dct['value']) > 0):\n if(is_mtr_str_fld(k)):\n rec_type = 'stringin'\n val = dct['value']\n else:\n val = type_map[dct['type']](dct['value'])\n else:\n val = 0\n else:\n val = type_map[dct['type']](dct['value'])\n\n\n if(k.find('SIM_IOC:m100.DESC') > -1):\n pass\n body[str(i)] = pvproperty(name=k,\n dtype=type_map[dct['type']],\n mock_record=rec_type,\n value=val)\n i += 1\n except TypeError:\n print('what is this type:' , dct['type'])\n\n\n return type('BucketOPVs', (PVGroup,), body)\n\ndef is_mtr_str_fld(k):\n str_flds = ['DESC', 'EGU', 'NAME', 'OUT', 'DINP', 'RDBL', 'STOO']\n for fld in str_flds:\n if (k.find(fld) > -1):\n return(True)\n\n return(False)\n\n\ndef arrstr_to_array(rec_type, arrstr):\n l = arrstr.replace('array[','')\n l = l.replace('...]', '')\n l_items = l.split(',')\n #if('unknown' in l_items):\n if(type(l_items[0]) is not str):\n pass\n elif (l_items[0].find('unknown') > -1):\n l = [0,0,0,0,0]\n elif(rec_type.find('stringin') > -1):\n l = convert_asciiarr_to_string(l_items)\n elif (rec_type.find('ao') > -1):\n l = convert_asciiarr_to_float(l_items)\n elif (rec_type.find('bo') > -1):\n l = convert_asciiarr_to_int(l_items)\n\n return(l)\n\ndef convert_asciiarr_to_string(ascii_lst):\n '''\n take a list of ascii code chars like ['69', '114', '114', '111', '114'] and returns the string 'Error'\n :param ascii_lst:\n :return:\n '''\n l_int = list(map(int, ascii_lst))\n s = ''.join(chr(i) for i in l_int)\n return(s)\n\ndef convert_asciiarr_to_float(ascii_lst):\n '''\n take a list of ascii code chars like ['69', '114', '114', '111', '114'] and returns the string 'Error'\n :param ascii_lst:\n :return:\n '''\n l_float = list(map(float, ascii_lst))\n return(l_float)\n\n\ndef convert_asciiarr_to_int(ascii_lst):\n '''\n take a list of ascii code chars like ['69', '114', '114', '111', '114'] and returns the string 'Error'\n :param ascii_lst:\n :return:\n '''\n l_int = list(map(int, ascii_lst))\n return (l_int)\n\ndef load_json_files():\n str_flds = ['DESC', 'EGU', 'NAME', 'OUT']\n none_str_flds = ['DINP', 'RDBL', 'STOO', 'TSEL', 'SDIS', 'PREM', 'POST', 'INIT', 'DOL', 'ASG','RINP','RLNK','PREM']\n #fout = open('initpvs.bat', 'w')\n mtr_dct = get_populated_motor_dict()\n fpath = r'C:\\controls\\epics\\iocapp_pvnames'\n fnames = dirlist(fpath, 'json', remove_suffix=False)\n i = 0\n dct = {}\n for fname in fnames:\n _fpath = os.path.join(fpath, fname)\n f = open(_fpath, 'r')\n js = json.load(f)\n jdct = json_to_dict(js)\n for fname in list(jdct.keys()):\n for pv_nm in list(jdct[fname].keys()):\n if (pv_nm in list(mtr_dct.keys())):\n dct['SIM_' + pv_nm] = jdct[fname][pv_nm]\n dct['SIM_' + pv_nm]['type'] = 'DoubleMotor'\n for fld in list(mtr_dct[pv_nm].keys()):\n if(len(fld) > 0):\n dct['SIM_' + pv_nm + '.%s' % fld] = {}\n\n if (fld in none_str_flds):\n dct['SIM_' + pv_nm + '.%s' % fld]['type'] = 'ChannelType.STRING'\n mtr_dct[pv_nm][fld] = '0'\n elif(fld in str_flds):\n dct['SIM_' + pv_nm + '.%s' % fld]['type'] = 'ChannelType.STRING'\n else:\n dct['SIM_' + pv_nm + '.%s' % fld]['type'] = 'ChannelType.FLOAT'\n\n if (mtr_dct[pv_nm][fld] is ''):\n mtr_dct[pv_nm][fld] = '0'\n\n if(fld.find('MSTA') > -1):\n #force it to be a good status value\n dct['SIM_' + pv_nm + '.%s' % fld]['value'] = 18690\n else:\n dct['SIM_' + pv_nm + '.%s' % fld]['value'] = mtr_dct[pv_nm][fld]\n print('serving MOTOR: [%s], value=[%s]' % ('SIM_' + pv_nm + '.%s' % fld, dct['SIM_' + pv_nm + '.%s' % fld]['value']))\n else:\n dct['SIM_' + pv_nm] = jdct[fname][pv_nm]\n\n #fout.write('caput %s %s\\n' % ('SIM_' + pv_nm, jdct[fname][pv_nm]['value']))\n #fout.close()\n return(dct)\n\n\n\n\n\n\nif __name__ == '__main__':\n parser, split_args = template_arg_parser(\n default_prefix='',\n desc='An IOC that servers a bucket of disconnected PVs.')\n\n inp = load_json_files()\n\n # parser.add_argument('--json',\n # help='The file to read the PVs from',\n # required=True, type=str)\n args = parser.parse_args()\n #args.interfaces = ['127.0.0.1']\n ioc_options, run_options = split_args(args)\n\n # with open(args.json, 'r') as fin:\n # inp = json.load(fin)\n klass = ingest_pv_dict(inp)\n\n ioc = klass(**ioc_options)\n run(ioc.pvdb, **run_options)","repo_name":"mrakitin/pyStxm3","sub_path":"bcm/sim/caproto/my_bucket_of_pvs.py","file_name":"my_bucket_of_pvs.py","file_ext":"py","file_size_in_byte":7644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"2514334668","text":"import os\nimport pytest\nimport shutil\nimport subprocess\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef initialize(request):\n if not os.path.exists('test/data'):\n shutil.copytree('data', 'test/data')\n root = os.path.dirname(os.path.realpath(__file__))\n subprocess.run([os.path.join(root, 'src/_build/simplecrop')], check=True, cwd=os.path.join(root, 'test'))\n\n\ndef read_soil_lines(fname):\n with open(fname) as f:\n content = f.read()\n lines = content.splitlines()\n return lines[7:]\n\n\ndef read_plant_lines(fname):\n with open(fname) as f:\n content = f.read()\n return content.splitlines()\n\n\ndef test_soil_output():\n soil_orig = read_soil_lines('output/soil.out')\n soil_new = read_soil_lines('test/output/soil.out')\n assert soil_orig == soil_new\n\n\ndef test_plant_output():\n plant_orig = read_plant_lines('output/plant.out')\n plant_new = read_plant_lines('test/output/plant.out')\n assert plant_orig == plant_new","repo_name":"openmodelingfoundation/SimpleCrop","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"9"} +{"seq_id":"75042682533","text":"# 골드바흐의 추측\n# 문제\n# 1742년, 독일의 아마추어 수학가 크리스티안 골드바흐는 레온하르트 오일러에게 다음과 같은 추측을 제안하는 편지를 보냈다.\n#\n# 4보다 큰 모든 짝수는 두 홀수 소수의 합으로 나타낼 수 있다.\n# 예를 들어 8은 3 + 5로 나타낼 수 있고, 3과 5는 모두 홀수인 소수이다.\n# 또, 20 = 3 + 17 = 7 + 13, 42 = 5 + 37 = 11 + 31 = 13 + 29 = 19 + 23 이다.\n#\n# 이 추측은 아직도 해결되지 않은 문제이다.\n#\n# 백만 이하의 모든 짝수에 대해서, 이 추측을 검증��는 프로그램을 작성하시오.\n#\n# 입력\n# 입력은 하나 또는 그 이상의 테스트 케이스로 이루어져 있다.\n# 테스트 케이스의 개수는 100,000개를 넘지 않는다.\n#\n# 각 테스트 케이스는 짝수 정수 n 하나로 이루어져 있다. (6 ≤ n ≤ 1000000)\n# 입력의 마지막 줄에는 0이 하나 주어진다.\n#\n# 출력\n# 각 테스트 케이스에 대해서, n = a + b 형태로 출력한다.\n# 이때, a와 b는 홀수 소수이다. 숫자와 연산자는 공백 하나로 구분되어져 있다.\n# 만약, n을 만들 수 있는 방법이 여러 가지라면, b-a가 가장 큰 것을 출력한다.\n# 또, 두 홀수 소수의 합으로 n을 나타낼 수 없는 경우에는 \"Goldbach's conjecture is wrong.\"을 출력한다.\n#\n# 예제 입력 1\n# 8\n# 20\n# 42\n# 0\n# 예제 출력 1\n# 8 = 3 + 5\n# 20 = 3 + 17\n# 42 = 5 + 37\n\nimport sys\n\nis_prime_number = [True] * 1000001\nis_prime_number[1] = False\ncases = []\nmax_input = 0\nwhile True:\n last_input = int(sys.stdin.readline().rstrip())\n\n if last_input != 0:\n cases.append(last_input)\n max_input = max(max_input, last_input)\n else:\n break\n\n# 2부터 max_input의 루트값(포함)까지 범위에 대해 에라토스테네스의 체를 적용하여\n# 미리 소수의 목록을 작성\nfor i in range(2, int(max_input**0.5) + 1):\n if is_prime_number[i]:\n for j in range(i*i, max_input + 1, i):\n is_prime_number[j] = False\n\n\ndef verify_goldbach(num):\n # num = a + b이고 a, b를 각각 2i+1, 2j+1 (i, j는 1 이상의 자연수) 라고 했을 때\n # 가장 작은 홀수는 3, 가장 큰 홀수는 num - 3이므로 i의 범위는 다음과 같다.\n # 1) 2i + 1이 가장 큰 홀수일 때 (num - 3 = 2i + 1)\n # 0.5 * (num - 4) = 0.5 * num - 2 = i\n # 2) 2i + 1이 가장 작은 홀수일 때 (3 = 2i + 1)\n # i = 1\n\n # 문제에서 b-a가 가장 큰 것을 요구하였으므로,\n # 가장 작은 a부터 탐색하여 가장 먼저 나오는 소수 a, b 조합을 답으로 출력\n for i in range(1, int(0.5 * num) - 2 + 1):\n a = 2 * i + 1\n b = num - a\n if is_prime_number[a] and is_prime_number[num - a]:\n return \"{0} = {1} + {2}\".format(num, a, b)\n\n # 소수 조합을 찾지 못하였을 때 출력\n return \"Goldbach's conjecture is wrong.\"\n\n\nfor case in cases:\n print(verify_goldbach(case))\n","repo_name":"Alfred-Walker/pythonps","sub_path":"Baekjoon/Goldbach.py","file_name":"Goldbach.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"7807617140","text":"#######################\n#Willard Wider\n#6/27/18\n#ELEC4400\n#Lab 8\n#######################\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.fftpack import fft\nfrom scipy.io import wavfile\n\ndef function_name(file_name):\n #https://stackoverflow.com/questions/2060628/reading-wav-files-in-python\n fs, data = wavfile.read(file_name)\n #fast fourier transform of the data\n fftOut = fft(data)\n #total ammount of time in the recording\n length = len(data) / fs\n #a list of sample indicies, probably audio frames\n n = np.arange(len(data))\n #fs the the sampling frequency, the number of samples per second\n #we need the duration of time between samples\n #time between is 1 (sec) divided by frequency\n total_frequencies = n / length\n plt.plot(total_frequencies,np.abs(fftOut))\n #limit the plot\n plt.xlim(([0,4186]))\n plt.title(\"Piano Note Analysis\")\n plt.xlabel(\"frequency (Hz)\")\n plt.ylabel(\"amplitude (Db)\")\n plt.show()\n return\n\nif __name__ == \"__main__\":\n #function_name('kpt.wav')\n #question 1: to convince myself, the amplitude is the intensity (loudness)\n #of the sound at that frequency, and it would stand to reason\n #that louder sounds at that frequency tend to exist more in the file\n #\n #frequencies - notes\n #233 - A3#\n #310 - D4#\n #392 - G4\n #\n #pause...\n function_name('hkp.wav')\n #\n #frequencies - notes\n #392 - G4\n #465 - A4#\n #621 - D5#\n #\n #pause...\n #my analysis was core-rekt\n","repo_name":"Willster419/ELEC4400_DSP","sub_path":"lab8/Lab8.py","file_name":"Lab8.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"2813201741","text":"from nf_common_source.code.nf.types.nf_column_types import NfColumnTypes\nfrom nf_ea_common_tools_source.b_code.services.general.nf_ea.com.common_knowledge.column_types.nf_ea_com_column_types import NfEaComColumnTypes\n\nLIST_OF_NF_UUID_COLUMN_NAMES = [\n NfColumnTypes.NF_UUIDS.column_name,\n NfEaComColumnTypes.ELEMENT_COMPONENTS_CONTAINING_EA_CLASSIFIER.column_name,\n NfEaComColumnTypes.ELEMENT_COMPONENTS_CLASSIFYING_EA_CLASSIFIER.column_name,\n NfEaComColumnTypes.ELEMENTS_CLASSIFIER.column_name,\n NfEaComColumnTypes.CLASSIFIERS_CONTAINING_EA_ELEMENT.column_name,\n NfEaComColumnTypes.PACKAGEABLE_OBJECTS_PARENT_EA_ELEMENT.column_name,\n # It is used in the data both as uuid field and uuid list field\n NfEaComColumnTypes.ELEMENTS_SUPPLIER_PLACE1_END_CONNECTORS.column_name,\n # It is used in the data both as uuid field and uuid list field\n NfEaComColumnTypes.ELEMENTS_CLIENT_PLACE2_END_CONNECTORS.column_name,\n 'provider',\n 'dependent',\n 'specialisation',\n 'generalisation',\n 'child',\n 'parent',\n 'ea_client',\n 'ea_stereotype',\n NfEaComColumnTypes.STEREOTYPE_EA_STEREOTYPE_GROUP.column_name,\n NfEaComColumnTypes.STEREOTYPE_CLIENT_NF_UUIDS.column_name,\n 'stereotype_nf_uuids'\n]\n\nLIST_OF_NF_UUID_LISTS_COLUMN_NAMES = [\n NfEaComColumnTypes.CLASSIFIERS_ALL_COMPONENT_EA_ATTRIBUTES.column_name,\n # It is used in the data both as uuid field and uuid list field\n NfEaComColumnTypes.ELEMENTS_SUPPLIER_PLACE1_END_CONNECTORS.column_name,\n # It is used in the data both as uuid field and uuid list field\n NfEaComColumnTypes.ELEMENTS_CLIENT_PLACE2_END_CONNECTORS,\n NfEaComColumnTypes.CLASSIFIERS_ALL_COMPONENT_EA_OPERATIONS.column_name,\n NfEaComColumnTypes.ELEMENTS_CONTAINED_EA_DIAGRAMS.column_name,\n NfEaComColumnTypes.ELEMENTS_CONTAINED_EA_CLASSIFIERS.column_name,\n NfEaComColumnTypes.STEREOTYPEABLE_OBJECTS_EA_OBJECT_STEREOTYPES.column_name,\n NfEaComColumnTypes.PACKAGES_CONTAINED_EA_PACKAGES.column_name,\n 'paths'\n]\n","repo_name":"boro-alpha/bclearer","sub_path":"bclearer_source/b_code/substages/operations/b_evolve/content_operations/merge_universes/nf_uuid_mapping_processes/uuid_columns_lists_constants.py","file_name":"uuid_columns_lists_constants.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"10880108242","text":"from sense_hat import SenseHat\nimport time\n\nsense = SenseHat()\n\nsense.show_letter(\">\")\n\nangles = [0, 90, 180, 270, 0, 90, 180, 270]\nfor r in angles:\n sense.set_rotation(r)\n time.sleep(0.5)\n","repo_name":"whatevergeek/PUGS-SenseHAT-Demo","sub_path":"Demo 1 - Runthrough of Features/5_spinning.py","file_name":"5_spinning.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"9"} +{"seq_id":"36579687586","text":"#Best case O(n)\n#Worst case O(n^2)\n#Use when only few items or items are mostly sorted already\nprint(\"Insertion sort\")\nprint(\"__\" * 30)\n\narr = [3, 6, 4, 1, 7, 9, 2]\nn = len(arr)\n\n\ndef insert(pos, val):\n print(\"For value: \", val)\n prev= pos-1\n\n while(prev>=0 and arr[prev] > val):\n arr[prev], arr[prev+1]= arr[prev+1], arr[prev]\n print(\"Swapping\", prev, prev+1)\n prev -= 1\n\n\n\nfor i in range(1, n):\n insert(i, arr[i])\n print(arr)\n\n","repo_name":"gaurang98671/Algorithms","sub_path":"Sorting Algo/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"37690961635","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nfrom airflow.models.dag import DAG\nfrom airflow.operators.bash import BashOperator\n\nfrom google_drive_operator import GoogleDriveOperator\n\ndag = DAG(\n dag_id='upload_to_drive',\n description='Create spare parts analysis sheet in Google Drive',\n schedule_interval='@daily',\n start_date=datetime(2021, 2, 10),\n end_date=datetime(2021, 2, 13)\n)\n\ncreate_file = BashOperator(\n task_id='create_file',\n bash_command=(\n 'echo file created on {{ ds }}. > '\n '${AIRFLOW_HOME}/tmp/my_file_{{ ds }}.txt'\n ),\n dag=dag\n)\n\nupload_file = GoogleDriveOperator(\n task_id='upload_file',\n local_path='tmp/my_file_{{ ds }}.txt',\n drive_folder='google-drive-operator',\n gcp_conn_id='airflow-to-drive',\n delegate_to='denis@gontcharov.be',\n)\n\ncreate_file >> upload_file\n","repo_name":"gontcharovd/google-drive-operator","sub_path":"dags/upload_to_drive_dag.py","file_name":"upload_to_drive_dag.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"9"} +{"seq_id":"5881603953","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[86]:\n\n\n#question 1\n\nimport pandas as pd\nprint(pd.__version__)\n\n\n# In[87]:\n\n\n#question 2\nimport numpy as np\narr = np.array([1,2,4,5,5,2121,4323])\nseries = pd.Series(arr)\nprint(series)\n\n\n# In[10]:\n\n\n#question 3\n\nind = series.index\npd.DataFrame(ind)\n\n\n# In[11]:\n\n\n#question 4\n\nimport seaborn as sns\n\n\n# In[12]:\n\n\ndset = sns.load_dataset('mpg')\n\n\n# In[13]:\n\n\ndset\n\n\n# In[16]:\n\n\n#question 5\n\norigins = dset['origin'].unique()\nprint(origins)\n\n\n# In[97]:\n\n\n#question 6 \n\ndset[dset['origin']=='usa']\n\n","repo_name":"tramsy/Data-Science","sub_path":"Day4 Assignment.py","file_name":"Day4 Assignment.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"10599592348","text":"import torch\nimport torchtext\nimport spacy\nimport nltk\n#nltk.download('stopwords')\nimport logging\nfrom spacy.lang.en import English\nfrom typing import Union, List\nfrom pathlib import Path\nfrom typing import NamedTuple, Set\nfrom torchtext.data import Field, BucketIterator, TabularDataset\nfrom transformers import BertTokenizer, BertModel\nfrom torch import Tensor\n\n# Custom data types and structures\nPathOrStr = Union[Path, str]\n\"\"\"Custom type for Paths or pathlike objects.\"\"\"\n\nFilters = List[int]\n\"\"\"Custom data type representing a list of filter lengths.\"\"\"\n\nStringlike = Union[str, List[str]]\n\"\"\"Single string or list of strings for evaluating recall.\"\"\"\n\nclass IteratorData(NamedTuple):\n \"\"\" Container holding the iterators needed to train the NCN model.\"\"\"\n\n cntxt: Field\n \"\"\"**cntxt** *(torch.text.data.Field)*: Field containing preprocessing steps and vocabulary for context data.\"\"\"\n ttl: Field\n \"\"\"**ttl** *(torch.text.data.Field)*: Field containing preprocessing steps and vocabulary for title data.\"\"\"\n aut: Field\n \"\"\"**aut** *(torch.text.data.Field)*: Field containing preprocessing steps and vocabulary for author data.\"\"\"\n train_iter: BucketIterator\n \"\"\"\n **train_iter** *(torch.text.data.BucketIterator)*: \n Iterator containing the training samples of the form context, citing_authors, title, cited_authors.\n Data is bucketted according to the title length.\n \"\"\"\n valid_iter: BucketIterator\n \"\"\"\n **valid_iter** *(torch.text.data.BucketIterator)*: \n Iterator containing samples for the validation pass. Format: context, citing_authors, title, cited_authors.\n Data is bucketted according to the title length.\n \"\"\"\n test_iter: BucketIterator\n \"\"\"\n **test_iter** *(torch.text.data.BucketIterator)*: \n Iterator containing samples for the test pass. Format: context, citing_authors, title, cited_authors.\n Data is bucketted according to the title length.\n \"\"\"\n\n\nclass BaseData(NamedTuple):\n \"\"\"Container holding base data for the arxiv CS dataset.\"\"\"\n\n cntxt: Field\n \"\"\"**cntxt** *(torch.text.data.Field)*: Field containing preprocessing steps and vocabulary for context data\"\"\"\n ttl: Field\n \"\"\"**ttl** *(torch.text.data.Field)*: Field containing preprocessing steps and vocabulary for title data.\"\"\"\n aut: Field\n \"\"\"**aut** *(torch.text.data.Field)*: Field containing preprocessing steps and vocabulary for author data.\"\"\"\n train: TabularDataset\n \"\"\"\n **train** *(torch.text.data.TabularDataset)*: \n Dataset containing the training samples of the form context, citing_authors, title, cited_authors.\n \"\"\"\n valid: TabularDataset\n \"\"\"\n **valid** *(torch.text.data.TabularDataset)*: \n Dataset containing the validation samples of the form context, citing_authors, title, cited_authors.\n \"\"\"\n test: TabularDataset\n \"\"\"\n **test** *(torch.text.data.TabularDataset)*: \n Dataset containing the training samples of the form context, citing_authors, title, cited_authors.\n \"\"\"\n\n\n# Global constants\nCITATION_PATTERNS = r\"|\"\n\"\"\"Regex patterns for matching citations in document sentences.\"\"\"\n\n\nMAX_TITLE_LENGTH = 30\n\"\"\"Maximum decoder sequence length. Also determines the number of attention weights.\"\"\"\n\nMAX_CONTEXT_LENGTH = 100\n\"\"\"Maximum encoder sequence length.\"\"\"\n\nMAX_AUTHORS = 5\n\"\"\"Maximum number of authors considered\"\"\"\n\nSEED = 34\n\"\"\"RNG seed for reproducability.\"\"\"\n\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\"\"\"Check for a GPU globally.\"\"\"\n\n\n# base logger for the ncn module\nlogging.basicConfig(level=logging.INFO, style='$')\nlogger = logging.getLogger(__name__)\n\"\"\"\n Base logger for the neural citation package.\n The package wide logging level is set here.\n\"\"\"\n\n# general functions\ndef get_stopwords() -> Set:\n \"\"\"\n Returns spacy and nltk stopwords unified into a single set. \n \n ## Output: \n \n - **STOPWORDS** *(Set)*: Set containing the stopwords for preprocessing \n \"\"\"\n STOPWORDS = spacy.lang.en.stop_words.STOP_WORDS\n nltk_stopwords = set(nltk.corpus.stopwords.words('english'))\n STOPWORDS.update(nltk_stopwords)\n return STOPWORDS\n\ndef learn_BERT_sent_representation(bertTokenizer: BertTokenizer,\n bertModel: BertModel,\n sentence) -> Tensor:\n # Split the sentence into tokens\n indexed_tokens = bertTokenizer.encode(sentence, add_special_tokens=False, truncation=True)\n\n # Mark each of token as belonging to sentence \"1\".\n segments_ids = [1] * len(indexed_tokens)\n\n # Convert inputs to PyTorch tensors\n tokens_tensor = torch.tensor([indexed_tokens])\n segments_tensors = torch.tensor([segments_ids])\n\n with torch.no_grad():\n outputs = bertModel(tokens_tensor, segments_tensors)\n # `hidden_states` has shape [13 x 1 x number_token_in_sentence x 768]\n hidden_states = outputs[2]\n\n # `token_vecs` is a tensor with shape [number_token_in_sentence x 768]\n token_vecs = hidden_states[-2][0]\n\n # Calculate the average of all token vectors.\n sentence_embedding = torch.mean(token_vecs, dim=0)\n\n return sentence_embedding\n\ndef get_BERT_embdding_from_context(\n bertTokenizer: BertTokenizer,\n bertModel: BertModel,\n context_vocab: torchtext.vocab.Vocab,\n context: Tensor) -> Tensor:\n context_embeddings = []\n for sentence_token_idxes in context:\n sentence_tokens = [context_vocab.itos[token_idx.item()] for token_idx in sentence_token_idxes]\n sentence = ' '.join(sentence_tokens)\n sentence_embs = learn_BERT_sent_representation(bertTokenizer, bertModel, sentence)\n context_embeddings.append(sentence_embs.cpu().numpy())\n context_embeddings = torch.FloatTensor(context_embeddings)\n return context_embeddings","repo_name":"dinhngocthi/GAsource","sub_path":"NeuralCitationNetwork/neural_citation-master/docs/ncn/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"33248609556","text":"import sys\n#sys.stdin=open(\"input\", \"r\")\nfrom collections import deque\n\na = input()\nn = int(input())\nfor i in range(n):\n plan = input()\n dq = deque(a) # 필수과목을 큐로\n for x in plan: # 현수의 과목을 하나씩 빼보면서\n if x in dq: # 필수과목에 현수의 과목이 들어가있으면?\n if x != dq.popleft(): # 그런데 필수과목의 첫번째와 현수의 과목이 같지 않으면? NO출력\n print('#%d NO' %(i+1))\n break # for문을 멈춰버림\n else: # 정상적으로 끝났을때 (순서 통과)\n if len(dq) == 0: # 필수과목이 안남았을때\n print('#%d YES' %(i+1))\n else: # 필수과목이 남았을때\n print('#%d NO' %(i+1))\n\n","repo_name":"hk1486/CodingTest_python_algorithm","sub_path":"3. 자료구조(스택, 큐, 해쉬, 힙)/04-7_교육과정설계(큐).py","file_name":"04-7_교육과정설계(큐).py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"7731035607","text":"import json\nfrom typing import Optional\n\nimport torch\nfrom omegaconf import DictConfig, OmegaConf\nfrom pytorch_lightning import Trainer\nfrom torch.cuda.amp import autocast\n\nfrom nemo.collections.common.losses import SpanningLoss\nfrom nemo.collections.nlp.data import SquadDataset\nfrom nemo.collections.nlp.data.question_answering_squad.qa_squad_processing import (\n EVALUATION_MODE,\n INFERENCE_MODE,\n TRAINING_MODE,\n)\nfrom nemo.collections.nlp.models.nlp_model import NLPModel\nfrom nemo.collections.nlp.modules.common import TokenClassifier\nfrom nemo.collections.nlp.parts.utils_funcs import tensor2list\nfrom nemo.core.classes.common import PretrainedModelInfo, typecheck\nfrom nemo.utils import logging\n\n__all__ = ['QAModel']\n\n\nclass QAModel(NLPModel):\n \"\"\"\n BERT encoder with QA head training.\n \"\"\"\n\n def __init__(self, cfg: DictConfig, trainer: Trainer = None):\n super().__init__(cfg=cfg, trainer=trainer)\n self.classifier = TokenClassifier(\n hidden_size=self.hidden_size,\n num_classes=cfg.token_classifier.num_classes,\n num_layers=cfg.token_classifier.num_layers,\n activation=cfg.token_classifier.activation,\n log_softmax=cfg.token_classifier.log_softmax,\n dropout=cfg.token_classifier.dropout,\n use_transformer_init=cfg.token_classifier.use_transformer_init,\n )\n\n self.loss = SpanningLoss()\n\n @typecheck()\n def forward(self, input_ids, attention_mask, token_type_ids):\n with autocast():\n hidden_states = self.bert_model(\n input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask\n )\n\n if isinstance(hidden_states, tuple):\n hidden_states = hidden_states[0]\n\n logits = self.classifier(hidden_states=hidden_states)\n return logits\n\n def training_step(self, batch, batch_idx):\n input_ids, input_type_ids, input_mask, unique_ids, start_positions, end_positions = batch\n logits = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)\n loss, _, _ = self.loss(logits=logits, start_positions=start_positions, end_positions=end_positions)\n lr = self._optimizer.param_groups[0]['lr']\n self.log('train_loss', loss)\n self.log('lr', lr, prog_bar=True)\n return {'loss': loss, 'lr': lr}\n\n def validation_step(self, batch, batch_idx):\n if self.trainer.testing:\n prefix = 'test'\n else:\n prefix = 'val'\n\n input_ids, input_type_ids, input_mask, unique_ids, start_positions, end_positions = batch\n logits = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)\n loss, start_logits, end_logits = self.loss(\n logits=logits, start_positions=start_positions, end_positions=end_positions\n )\n\n tensors = {\n 'unique_ids': unique_ids,\n 'start_logits': start_logits,\n 'end_logits': end_logits,\n }\n loss = {f'{prefix}_loss': loss, f'{prefix}_tensors': tensors}\n self.validation_step_outputs.append(loss) if prefix == 'val' else self.test_step_outputs.append(loss)\n return loss\n\n def test_step(self, batch, batch_idx):\n return self.validation_step(batch, batch_idx)\n\n def on_validation_epoch_end(self):\n if self.trainer.testing:\n prefix = 'test'\n outputs = self.test_step_outputs\n else:\n prefix = 'val'\n outputs = self.validation_step_outputs\n\n avg_loss = torch.stack([x[f'{prefix}_loss'] for x in outputs]).mean()\n\n unique_ids = torch.cat([x[f'{prefix}_tensors']['unique_ids'] for x in outputs])\n start_logits = torch.cat([x[f'{prefix}_tensors']['start_logits'] for x in outputs])\n end_logits = torch.cat([x[f'{prefix}_tensors']['end_logits'] for x in outputs])\n\n all_unique_ids = []\n all_start_logits = []\n all_end_logits = []\n if torch.distributed.is_initialized():\n world_size = torch.distributed.get_world_size()\n for ind in range(world_size):\n all_unique_ids.append(torch.empty_like(unique_ids))\n all_start_logits.append(torch.empty_like(start_logits))\n all_end_logits.append(torch.empty_like(end_logits))\n torch.distributed.all_gather(all_unique_ids, unique_ids)\n torch.distributed.all_gather(all_start_logits, start_logits)\n torch.distributed.all_gather(all_end_logits, end_logits)\n else:\n all_unique_ids.append(unique_ids)\n all_start_logits.append(start_logits)\n all_end_logits.append(end_logits)\n\n exact_match, f1, all_predictions, all_nbest = -1, -1, [], []\n if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:\n\n unique_ids = []\n start_logits = []\n end_logits = []\n for u in all_unique_ids:\n unique_ids.extend(tensor2list(u))\n for u in all_start_logits:\n start_logits.extend(tensor2list(u))\n for u in all_end_logits:\n end_logits.extend(tensor2list(u))\n\n eval_dataset = self._test_dl.dataset if self.trainer.testing else self._validation_dl.dataset\n exact_match, f1, all_predictions, all_nbest = eval_dataset.evaluate(\n unique_ids=unique_ids,\n start_logits=start_logits,\n end_logits=end_logits,\n n_best_size=self._cfg.dataset.n_best_size,\n max_answer_length=self._cfg.dataset.max_answer_length,\n version_2_with_negative=self._cfg.dataset.version_2_with_negative,\n null_score_diff_threshold=self._cfg.dataset.null_score_diff_threshold,\n do_lower_case=self._cfg.dataset.do_lower_case,\n )\n\n logging.info(f\"{prefix} exact match {exact_match}\")\n logging.info(f\"{prefix} f1 {f1}\")\n\n self.log(f'{prefix}_loss', avg_loss)\n self.log(f'{prefix}_exact_match', exact_match)\n self.log(f'{prefix}_f1', f1)\n self.validation_step_outputs.clear() if prefix == 'val' else self.test_step_outputs.clear() # free memory\n\n def on_test_epoch_end(self):\n return self.on_validation_epoch_end()\n\n @torch.no_grad()\n def inference(\n self,\n file: str,\n batch_size: int = 1,\n num_samples: int = -1,\n output_nbest_file: Optional[str] = None,\n output_prediction_file: Optional[str] = None,\n ):\n \"\"\"\n Get prediction for unlabeled inference data\n\n Args:\n file: inference data\n batch_size: batch size to use during inference\n num_samples: number of samples to use of inference data. Default: -1 if all data should be used.\n output_nbest_file: optional output file for writing out nbest list\n output_prediction_file: optional output file for writing out predictions\n \n Returns:\n model predictions, model nbest list\n \"\"\"\n # store predictions for all queries in a single list\n all_predictions = []\n all_nbest = []\n mode = self.training\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n try:\n # Switch model to evaluation mode\n self.eval()\n self.to(device)\n logging_level = logging.get_verbosity()\n logging.set_verbosity(logging.WARNING)\n dataloader_cfg = {\n \"batch_size\": batch_size,\n \"file\": file,\n \"shuffle\": False,\n \"num_samples\": num_samples,\n 'num_workers': 2,\n 'pin_memory': False,\n 'drop_last': False,\n }\n dataloader_cfg = OmegaConf.create(dataloader_cfg)\n infer_datalayer = self._setup_dataloader_from_config(cfg=dataloader_cfg, mode=INFERENCE_MODE)\n\n all_logits = []\n all_unique_ids = []\n for i, batch in enumerate(infer_datalayer):\n input_ids, token_type_ids, attention_mask, unique_ids = batch\n logits = self.forward(\n input_ids=input_ids.to(device),\n token_type_ids=token_type_ids.to(device),\n attention_mask=attention_mask.to(device),\n )\n all_logits.append(logits)\n all_unique_ids.append(unique_ids)\n logits = torch.cat(all_logits)\n unique_ids = tensor2list(torch.cat(all_unique_ids))\n s, e = logits.split(dim=-1, split_size=1)\n start_logits = tensor2list(s.squeeze(-1))\n end_logits = tensor2list(e.squeeze(-1))\n (all_predictions, all_nbest, scores_diff) = infer_datalayer.dataset.get_predictions(\n unique_ids=unique_ids,\n start_logits=start_logits,\n end_logits=end_logits,\n n_best_size=self._cfg.dataset.n_best_size,\n max_answer_length=self._cfg.dataset.max_answer_length,\n version_2_with_negative=self._cfg.dataset.version_2_with_negative,\n null_score_diff_threshold=self._cfg.dataset.null_score_diff_threshold,\n do_lower_case=self._cfg.dataset.do_lower_case,\n )\n\n with open(file, 'r') as test_file_fp:\n test_data = json.load(test_file_fp)[\"data\"]\n id_to_question_mapping = {}\n for title in test_data:\n for par in title[\"paragraphs\"]:\n for question in par[\"qas\"]:\n id_to_question_mapping[question[\"id\"]] = question[\"question\"]\n\n for question_id in all_predictions:\n all_predictions[question_id] = (id_to_question_mapping[question_id], all_predictions[question_id])\n\n if output_nbest_file is not None:\n with open(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest, indent=4) + \"\\n\")\n if output_prediction_file is not None:\n with open(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n finally:\n # set mode back to its original value\n self.train(mode=mode)\n logging.set_verbosity(logging_level)\n\n return all_predictions, all_nbest\n\n def setup_training_data(self, train_data_config: Optional[DictConfig]):\n if not train_data_config or not train_data_config.file:\n logging.info(\n f\"Dataloader config or file_path for the train is missing, so no data loader for test is created!\"\n )\n self._test_dl = None\n return\n self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config, mode=TRAINING_MODE)\n\n def setup_validation_data(self, val_data_config: Optional[DictConfig]):\n if not val_data_config or not val_data_config.file:\n logging.info(\n f\"Dataloader config or file_path for the validation is missing, so no data loader for test is created!\"\n )\n self._test_dl = None\n return\n self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config, mode=EVALUATION_MODE)\n\n def setup_test_data(self, test_data_config: Optional[DictConfig]):\n if not test_data_config or test_data_config.file is None:\n logging.info(\n f\"Dataloader config or file_path for the test is missing, so no data loader for test is created!\"\n )\n self._test_dl = None\n return\n self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, mode=EVALUATION_MODE)\n\n def _setup_dataloader_from_config(self, cfg: DictConfig, mode: str):\n dataset = SquadDataset(\n tokenizer=self.tokenizer,\n data_file=cfg.file,\n keep_doc_spans='all', # self._cfg.dataset.keep_doc_spans,\n doc_stride=self._cfg.dataset.doc_stride,\n max_query_length=self._cfg.dataset.max_query_length,\n max_seq_length=self._cfg.dataset.max_seq_length,\n version_2_with_negative=self._cfg.dataset.version_2_with_negative,\n num_samples=cfg.num_samples,\n mode=mode,\n use_cache=self._cfg.dataset.use_cache,\n )\n\n dl = torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=cfg.batch_size,\n collate_fn=dataset.collate_fn,\n drop_last=cfg.drop_last,\n shuffle=cfg.shuffle,\n num_workers=cfg.num_workers,\n pin_memory=cfg.pin_memory,\n )\n return dl\n\n @classmethod\n def list_available_models(cls) -> Optional[PretrainedModelInfo]:\n \"\"\"\n This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.\n\n Returns:\n List of available pre-trained models.\n \"\"\"\n result = []\n\n result.append(\n PretrainedModelInfo(\n pretrained_model_name=\"qa_squadv1.1_bertbase\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv1_1_bertbase/versions/1.0.0rc1/files/qa_squadv1.1_bertbase.nemo\",\n description=\"Question answering model finetuned from NeMo BERT Base Uncased on SQuAD v1.1 dataset which obtains an exact match (EM) score of 82.78% and an F1 score of 89.97%.\",\n )\n )\n\n result.append(\n PretrainedModelInfo(\n pretrained_model_name=\"qa_squadv2.0_bertbase\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv2_0_bertbase/versions/1.0.0rc1/files/qa_squadv2.0_bertbase.nemo\",\n description=\"Question answering model finetuned from NeMo BERT Base Uncased on SQuAD v2.0 dataset which obtains an exact match (EM) score of 75.04% and an F1 score of 78.08%.\",\n )\n )\n\n result.append(\n PretrainedModelInfo(\n pretrained_model_name=\"qa_squadv1_1_bertlarge\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv1_1_bertlarge/versions/1.0.0rc1/files/qa_squadv1.1_bertlarge.nemo\",\n description=\"Question answering model finetuned from NeMo BERT Large Uncased on SQuAD v1.1 dataset which obtains an exact match (EM) score of 85.44% and an F1 score of 92.06%.\",\n )\n )\n\n result.append(\n PretrainedModelInfo(\n pretrained_model_name=\"qa_squadv2.0_bertlarge\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv2_0_bertlarge/versions/1.0.0rc1/files/qa_squadv2.0_bertlarge.nemo\",\n description=\"Question answering model finetuned from NeMo BERT Large Uncased on SQuAD v2.0 dataset which obtains an exact match (EM) score of 80.22% and an F1 score of 83.05%.\",\n )\n )\n\n result.append(\n PretrainedModelInfo(\n pretrained_model_name=\"qa_squadv1_1_megatron_cased\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv1_1_megatron_cased/versions/1.0.0rc1/files/qa_squadv1.1_megatron_cased.nemo\",\n description=\"Question answering model finetuned from Megatron Cased on SQuAD v1.1 dataset which obtains an exact match (EM) score of 88.18% and an F1 score of 94.07%.\",\n )\n )\n\n result.append(\n PretrainedModelInfo(\n pretrained_model_name=\"qa_squadv2.0_megatron_cased\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv2_0_megatron_cased/versions/1.0.0rc1/files/qa_squadv2.0_megatron_cased.nemo\",\n description=\"Question answering model finetuned from Megatron Cased on SQuAD v2.0 dataset which obtains an exact match (EM) score of 84.73% and an F1 score of 87.89%.\",\n )\n )\n\n result.append(\n PretrainedModelInfo(\n pretrained_model_name=\"qa_squadv1.1_megatron_uncased\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv1_1_megatron_uncased/versions/1.0.0rc1/files/qa_squadv1.1_megatron_uncased.nemo\",\n description=\"Question answering model finetuned from Megatron Unased on SQuAD v1.1 dataset which obtains an exact match (EM) score of 87.61% and an F1 score of 94.00%.\",\n )\n )\n\n result.append(\n PretrainedModelInfo(\n pretrained_model_name=\"qa_squadv2.0_megatron_uncased\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv2_0_megatron_uncased/versions/1.0.0rc1/files/qa_squadv2.0_megatron_uncased.nemo\",\n description=\"Question answering model finetuned from Megatron Uncased on SQuAD v2.0 dataset which obtains an exact match (EM) score of 84.48% and an F1 score of 87.65%.\",\n )\n )\n return result\n","repo_name":"NVIDIA/NeMo","sub_path":"nemo/collections/nlp/models/question_answering/qa_model.py","file_name":"qa_model.py","file_ext":"py","file_size_in_byte":17181,"program_lang":"python","lang":"en","doc_type":"code","stars":8538,"dataset":"github-code","pt":"9"} +{"seq_id":"40965939798","text":"from sqlalchemy.orm import Session\nfrom database import schemas\nfrom database.database import Base\nfrom sqlalchemy import ForeignKey, String, Column, Integer\nfrom sqlalchemy.orm import relationship, mapped_column, Mapped\n\n\nclass CompraServico(Base):\n __tablename__ = \"compra_servico\"\n cliente_id: Mapped[int] = mapped_column(ForeignKey(\"cliente.id\", ondelete=\"CASCADE\"), primary_key=True)\n servico_id: Mapped[int] = mapped_column(ForeignKey(\"servico.id\", ondelete=\"CASCADE\"), primary_key=True)\n raca = Column(String)\n tipo = Column(String)\n quatidade = Column(Integer)\n\n cliente = relationship(\"Cliente\", back_populates=\"compras_servico\")\n\n\ndef get_compra_servico_by_cliente_id_and_servico_id(\n db: Session, cliente_id: int, servico_id: int\n):\n return (\n db.query(CompraServico)\n .filter(\n (CompraServico.cliente_id == cliente_id)\n & (CompraServico.servico_id == servico_id)\n )\n .first()\n )\n\n\ndef get_compra_servico_by_all_campos(\n db: Session, cliente_id: int, servico_id: int, raca: str, tipo: str\n):\n return (\n db.query(CompraServico)\n .filter(\n (CompraServico.cliente_id == cliente_id)\n & (CompraServico.servico_id == servico_id)\n & (CompraServico.raca == raca)\n & (CompraServico.tipo == tipo)\n )\n .first()\n )\n\n\ndef get_compra_servico_by_cliente_id_all(\n db: Session, cliente_id: int, skip: int = 0, limit: int = 100\n):\n return (\n db.query(CompraServico)\n .filter(CompraServico.cliente_id == cliente_id)\n .offset(skip)\n .limit(limit)\n .all()\n )\n\n\ndef get_compra_servico_by_servico_id_all(\n db: Session, servico_id: int, skip: int = 0, limit: int = 100\n):\n \"\"\"Busca todas as relação do processo com usuarios e retorna os 100 primeiros\n registro com capacidade para ir seguindo a busca\"\"\"\n return (\n db.query(CompraServico)\n .filter(CompraServico.servico_id == servico_id)\n .offset(skip)\n .limit(limit)\n .all()\n )\n\n\ndef get_all_compra_servico(db: Session, skip: int = 0, limit: int = 100):\n \"\"\"Busca todas as relação de usuario-processo e retorna os 100 primeiros\n registro com capacidade para ir seguindo a busca\"\"\"\n return db.query(CompraServico).offset(skip).limit(limit).all()\n\n\ndef create_compra_servico(db: Session, compra_servico: schemas.CompraServicoCreate):\n \"\"\"Cria uma relação de processo com usuario\"\"\"\n db_compra_servico = CompraServico(\n cliente_id=compra_servico.cliente_id,\n servico_id=compra_servico.servico_id,\n raca=compra_servico.raca,\n tipo=compra_servico.tipo,\n quatidade=compra_servico.quatidade,\n )\n db.add(db_compra_servico)\n db.commit()\n db.refresh(db_compra_servico)\n return db_compra_servico\n\n\ndef delete_compra_servico(db: Session, cliente_id: int, servico_id: int):\n \"\"\"Se a relação existir, deleta a relação do banco\"\"\"\n\n db_compra_servico = (\n db.query(CompraServico)\n .filter(\n (CompraServico.cliente_id == cliente_id)\n & (CompraServico.servico_id == servico_id)\n )\n .first()\n )\n\n if db_compra_servico:\n db.delete(db_compra_servico)\n db.commit()\n\n return db_compra_servico\n\n\n\ndef update_compra_servico(db: Session, compraServico: schemas.CompraServico):\n db_compra_produto = db.query(CompraServico).filter(\n (CompraServico.cliente_id == compraServico.cliente_id)\n & (CompraServico.servico_id == compraServico.servico_id)\n & (CompraServico.raca == compraServico.raca)\n & (CompraServico.tipo == compraServico.tipo)\n ).first()\n \n db_compra_produto.quatidade = compraServico.quatidade\n db.commit()\n db.refresh(db_compra_produto)\n return db_compra_produto","repo_name":"KhovetS2/ListasPO","sub_path":"atvv-pl-typescript/backend/models/compra_servico_crud.py","file_name":"compra_servico_crud.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"25925275465","text":"# Import pandas to read csv\nimport pandas as pd\n# Create dataframe from csv (contains 1 column only)\n# Force string type to keep leading zeros\ndf = pd.read_csv('task3.csv', header=None, delimiter='\\n', dtype = str)\n# Create list containing each row entry\nrowsList = [row[0] for row in df.to_numpy()]\n\n# Print summary\nprint(f'Number of binary numbers: {len(rowsList)}')\n\n# Helper function to count number of zeros or ones\n# at pos for all strings in listBinary\ndef countPos(binaryList, pos):\n # Initialise counts\n zeros = 0\n ones = 0\n # Iterate over binary numbers in list\n for binaryNum in binaryList:\n # Check if character at pos is a 0\n if binaryNum[pos] == '0':\n # Increment zeros count\n zeros += 1\n # Check if character at pos is a 1\n elif binaryNum[pos] == '1':\n # Increment ones count\n ones += 1\n # Check for unexpected input\n else:\n print(f'Something else happened: {binaryNum[pos]}')\n # Return counts\n return zeros, ones\n\n\n# Function to calculate oxygen binary number\ndef oxyGen(binaryList):\n # Create copy of input list of binary numbers (to make changes)\n binaryListOxy = binaryList.copy()\n # Initialise variable to track current position within binary numbers\n pos = 0\n # Loop until only 1 number is remaining in copied list of binary numbers\n while len(binaryListOxy) > 1:\n # Calculate count of zeros and ones at pos in all remaining numbers\n zeros, ones = countPos(binaryListOxy, pos)\n # Check if there are more zeros than ones\n if zeros > ones:\n # Identify most common digit as a zero\n mostCommon = '0'\n # Otherwise there are more ones than zeros or equal\n else:\n # Identify most common digit as a one\n mostCommon = '1'\n # Create new list of binary numbers from previous list\n # Exclude any numbers where digit at pos is not equal to mostCommon\n binaryListOxy = [binStr for binStr in binaryListOxy if \\\n binStr[pos] == mostCommon]\n # Increment pos (move along one position in binary numbers)\n pos += 1\n # Return only remaining binary number\n return binaryListOxy[0]\n\n\n# Function to calculate co2 binary number\n# Same functionality as oxyGen() except uses least common\n# instead of most common logic to exclude binary numbers\ndef co2scrub(binaryList):\n # Create copy of input list of binary numbers (to make changes)\n binaryListCo2 = binaryList.copy()\n # Initialise variable to track current position within binary numbers\n pos = 0\n # Loop until only 1 number is remaining in copied list of binary numbers\n while len(binaryListCo2) > 1:\n # Calculate count of zeros and ones at pos in all remaining numbers\n zeros, ones = countPos(binaryListCo2, pos)\n # Check if there are more zeros than ones\n if zeros > ones:\n # Identify least common digit as a one\n leastCommon = '1'\n # Otherwise there are more ones than zeros or equal\n else:\n # Identify least common digit as a zero\n leastCommon = '0'\n # Create new list of binary numbers from previous list\n # Exclude any numbers where digit at pos is not equal to leastCommon\n binaryListCo2 = [binStr for binStr in binaryListCo2 if \\\n binStr[pos] == leastCommon]\n # Increment pos (move along one position in binary numbers)\n pos += 1\n # Return only remaining binary number\n return binaryListCo2[0]\n\n\n# Helper function to calculate decimal value from binary\ndef decFromBinary(binaryStr):\n # Initialise decimal integer\n decimalInt = 0\n # Iterate over indices of characters in binary number\n for binaryIdx in range(len(binaryStr)):\n # Increase decimal value by value dependent on current index\n decimalInt += 2**(len(binaryStr)-(binaryIdx+1)) * \\\n int(binaryStr[binaryIdx])\n # Return decimal value\n return decimalInt\n\n\n# Calculate oxygen and co2 binary numbers\noxyBin = oxyGen(rowsList)\nco2Bin = co2scrub(rowsList)\n\n# Calculate decimal numbers from binary\noxyDec = decFromBinary(oxyBin)\nco2Dec = decFromBinary(co2Bin)\n\n# Print results\nprint(f'oxygen binary: {oxyBin}')\nprint(f'co2 binary: {co2Bin}')\n\nprint(f'oxygen decimal: {oxyDec}')\nprint(f'co2 decimal: {co2Dec}')\n\nprint(f'life support rating = product: {oxyDec * co2Dec}')\n","repo_name":"StephenTGibson/AdventOfCode2021","sub_path":"code3b.py","file_name":"code3b.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"33713858511","text":"import mysql.connector\n\n\ndef main(config):\n output = []\n cnx = mysql.connector.connect(**config)\n cur = cnx.cursor()\n\n # Insert 3 records\n # name[0]) email_match[0]) phone_num[0] address[0][0] gpa[0][1] school\n val = ('John Doe', 'johndoe@gmail.com', '123456789', 'UT', '10 I am loosing it, Plano, TX 75035', 'null', '3.5', '2007-12-30' ,'null','null','null','null', )\n stmt_insert = \"INSERT INTO canidate (name, email, phonenum, school, address, major, gpa, graddate, skills, wrkexpr, buzzwrds , comments) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n cur.execute(stmt_insert, val)\n cnx.commit()\n\n # Read the names again and print them\n # stmt_select = \"SELECT id, name, info, age FROM names ORDER BY id\"\n # cur.execute(stmt_select)\n\n cur.close()\n cnx.close()\n return output\n\n\nif __name__ == '__main__':\n\n config = {\n 'host': 'localhost',\n 'port': 3306,\n 'database': 'Profile',\n 'user': 'root',\n 'password': 'password123',\n 'charset': 'utf8',\n 'use_unicode': True,\n 'get_warnings': True,\n }\n\n out = main(config)\n print('\\n'.join(out))","repo_name":"dyunkim/ChangeNameLater","sub_path":"database_stuff/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"11784478082","text":"#!/usr/bin/env python3\n\nimport sys\nimport re\nimport socket\nimport string\nimport time\n\nfrom urllib import request\n\n\ndef info(msg):\n print(\"[+] {0}\".format(msg))\n\n\nclass IRC:\n\n # Defaults values\n irc_server = \"localhost\"\n irc_port = 6667\n irc_channel = \"#tuxoncloud\"\n my_love = \"fccagou\"\n my_irc_nick = \"cagoubot\"\n my_irc_user = \"cagoubot\"\n my_irc_realname = \"Cagou Bot\"\n\n # IRC protcol.\n #:fccagou!fccagou@localhost PRIVMSG toto :hello man\n irc_privmsg = re.compile(\":([^!]*)!([^@]*)@([a-z\\.-]*) PRIVMSG ([^ ]*) :(.*)\")\n #:fccagou!fccagou@localhost JOIN :#tuxoncloud\n irc_join = re.compile(\":([^!]*)!([^@]*)@([a-z\\.-]*) JOIN :(.*)\")\n #:fccagou!fccagou@localhost QUIT :\"leaving\"\n irc_quit = re.compile(\":([^!]*)!([^@]*)@([a-z\\.-]*) QUIT :(.*)\")\n # :opium.gin.local 353 toto = #tuxoncloud :toto\n irc_names = re.compile(\":([^ ]*) ([^ ]*) ([^ ]*) = ([^ ]*) :(.*)\")\n\n events_level = {\n \"none\": 0,\n \"talk\": 10,\n \"join\": 50,\n \"love_present\": 100,\n }\n\n def __init__(\n self,\n irc_server=\"localhost\",\n irc_port=6667,\n irc_channel=\"#tuxoncloud\",\n my_love=\"fccagou\",\n my_irc_nick=\"cagoubot\",\n my_irc_user=\"cagoubot\",\n my_irc_realname=\"Cagou Bot\",\n pyap_url=None,\n ):\n self.irc_server = irc_server\n self.irc_port = irc_port\n self.irc_channel = irc_channel\n self.my_love = my_love\n self.my_irc_nick = my_irc_nick\n self.my_irc_user = my_irc_user\n self.my_irc_realname = my_irc_realname\n self.is_my_love_connected = False\n self.socket = None\n self.readbuffer = \"\"\n self.nb_hello_love = 0\n self.last_event = IRC.events_level[\"none\"]\n self.pyap_url = pyap_url\n\n def connect(self):\n info(\"Server {0}:{1} connection...\".format(self.irc_server, self.irc_port))\n self.socket = socket.socket()\n self.socket.connect((self.irc_server, self.irc_port))\n\n def _send(self, msg):\n info(\" >> {0}\".format(msg))\n self.socket.send(bytes(\"{0}\\r\\n\".format(msg), \"UTF-8\"))\n\n def _recv(self):\n self.readbuffer = self.readbuffer + self.socket.recv(1024).decode(\"UTF-8\")\n data = str.split(self.readbuffer, \"\\n\")\n self.readbuffer = data.pop()\n return data\n\n def send_nick(self):\n self._send(\"NICK {0}\".format(self.my_irc_nick))\n\n def send_user(self):\n self._send(\n \"USER {0} {1} bla :{2}\".format(\n self.my_irc_user, self.irc_server, self.my_irc_realname\n )\n )\n\n def send_mesg(self, to, msg):\n self._send(\"PRIVMSG {0} :{1}\".format(to, msg))\n\n def talk_to_love(self, msg):\n if self.is_my_love_connected:\n self.send_mesg(self.my_love, msg)\n\n def pong(self, msg):\n self._send(\"PONG {0}\".format(msg))\n\n def hello_love(self, tempo=0):\n if tempo > 0:\n info(\"Tempo before talking to my love ({0} s)...\".format(tempo))\n time.sleep(tempo)\n\n info(\"Say hello to my love {0}\".format(self.my_love))\n if self.nb_hello_love == 0:\n self.talk_to_love(\n \"Hello {0}, how are U today ?\".format(self.my_love, self.my_irc_nick)\n )\n self.nb_hello_love = self.nb_hello_love + 1\n else:\n self.talk_to_love(\n \"Welcome back {0}, I missed U !\".format(self.my_love, self.my_irc_nick)\n )\n\n def join(self):\n self._send(\"JOIN {0}\".format(self.irc_channel))\n self.wait_until(\"End of NAMES list\")\n if self.is_my_love_connected:\n self.hello_love(tempo=3)\n\n def wait_until(self, msg):\n info(\"Waiting server replies ({0})...\".format(msg))\n not_found = True\n while not_found:\n\n for line in self._recv():\n line = str.rstrip(line)\n print(\"[<] {0}\".format(line))\n if line.find(msg) > 0:\n not_found = False\n break\n\n if not self.is_my_love_connected:\n m = IRC.irc_names.match(line)\n self.is_my_love_connected = m and self.my_love in m[5]\n\n if self.is_my_love_connected:\n self.notify_love_present()\n\n def external_notify(self, event_type):\n if self.last_event != event_type:\n self.last_event = event_type\n self.pyap_notify(self.last_event)\n\n def notify_love_present(self):\n self.external_notify(\"love_present\")\n\n def notify_new_join(self):\n self.external_notify(\"join\")\n\n def notify_new_talk(self):\n if (\n \"love_present\" == self.last_event\n or IRC.events_level[\"talk\"] > IRC.events_level[self.last_event]\n ):\n self.external_notify(\"talk\")\n\n def pyap_notify(self, status):\n if self.pyap_url is not None:\n status_list = {\n \"none\": \"unknown\",\n \"love_present\": \"security/ack ok\",\n \"join\": \"security/ack critical security\",\n \"talk\": \"security/ack warning security\",\n }\n\n for s in status_list[status].split():\n try:\n f = request.urlopen(\"{0}/{1}\".format(self.pyap_url, s))\n f.close()\n time.sleep(0.5)\n except:\n info(\"Notification error ({0})\".format(s))\n\n def serve(self):\n\n while 1:\n for line in self._recv():\n line = str.rstrip(line)\n print(\"<< {0} {1}\".format(time.ctime(), line))\n\n if line.find(\"PING\") == 0:\n self.pong(line.split()[1])\n\n if line.find(\"JOIN\") > 0:\n m = IRC.irc_join.match(line)\n if m and m[2] == self.my_love:\n info(\"My love is joining\")\n self.is_my_love_connected = True\n self.notify_love_present()\n else:\n self.notify_new_join()\n\n if line.find(\"QUIT\") > 0:\n m = IRC.irc_quit.match(line)\n if m and m[2] == self.my_love:\n info(\"Saaaad, My love quit {0}\".format(m[4]))\n self.is_my_love_connected = False\n\n if line.find(\"PRIVMSG\") > 0:\n info(\"Who commands ?\")\n m = IRC.irc_privmsg.match(line)\n if not m:\n print(\"[-] REGEX ERROR\")\n else:\n # print(\"nick: {0}\\nUser: {1}\\nHost: {2}\\nDest: {3}\\nMsg: {4}\".format(m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)))\n from_nick = m[1]\n from_user = m[2]\n from_host = m[3]\n to = m[4]\n from_msg = m[5]\n\n if from_user == self.my_love:\n # Message from love\n self.notify_love_present()\n if to == self.my_irc_nick:\n # Is talking to me\n if not self.is_my_love_connected:\n # TODO: improve the case\n info(\n \"WARNING Got a msg from love when not connected ...\"\n )\n self.is_my_love_connected = True\n self.hello_love()\n\n # Checking the question\n if \"clear notification\" in from_msg:\n self.last_event = \"none\"\n self.notify_love_present()\n else:\n self.talk_to_love(\n \"Hum ... What do U mean by {0}\".format(from_msg)\n )\n\n else:\n self.notify_new_talk()\n # Someone else talking\n if to == self.my_irc_nick:\n # I fo not speak to unknown person.\n info(\"{0} is not my love !!\".format(from_user))\n self.talk_to_love(\"ALERT {0} \".format(m[0]))\n\n\ndef main(irc_bot):\n\n server_connect_tempo = 3\n\n irc_bot.connect()\n irc_bot.send_nick()\n irc_bot.send_user()\n\n info(\"Waiting server replies ({0} s)...\".format(server_connect_tempo))\n time.sleep(server_connect_tempo)\n\n irc_bot.join()\n irc_bot.serve()\n\n\nif __name__ == \"__main__\":\n\n # Process passed arguments.\n try:\n import argparse\n\n parser = argparse.ArgumentParser(\n description=\"My IRC Bot\",\n )\n USING_ARGPARSE = True\n except ImportError:\n import optparse\n\n parser = optparse.OptionParser(description=\"My IRC Bot.\")\n parser.parse_args_orig = parser.parse_args\n parser.parse_args = lambda: parser.parse_args_orig()[0]\n parser.add_argument = parser.add_option\n USING_ARGPARSE = False\n\n parser.add_argument(\n \"--server\",\n \"-s\",\n default=IRC.irc_server,\n help=\"irc server ({0})\".format(IRC.irc_server),\n )\n parser.add_argument(\n \"--port\", \"-p\", default=IRC.irc_port, help=\"irc port ({0})\".format(IRC.irc_port)\n )\n parser.add_argument(\n \"--ircchannel\",\n \"-c\",\n default=IRC.irc_channel,\n help=\"irc channel ({0})\".format(IRC.irc_channel),\n )\n parser.add_argument(\n \"--mylove\",\n \"-m\",\n default=IRC.my_love,\n help=\"Got only one love ({0}) <3\".format(IRC.my_love),\n )\n parser.add_argument(\n \"--myircnick\",\n \"-n\",\n default=IRC.my_irc_nick,\n help=\"My irc nick ({0})\".format(IRC.my_irc_nick),\n )\n parser.add_argument(\n \"--myircuser\",\n \"-u\",\n default=IRC.my_irc_user,\n help=\"My irc user ({0})\".format(IRC.my_irc_user),\n )\n parser.add_argument(\n \"--myircrealname\",\n \"-r\",\n default=IRC.my_irc_realname,\n help=\"My irc real name ({0})\".format(IRC.my_irc_realname),\n )\n\n parser.add_argument(\n \"--pyapurl\", default=None, help=\"Define pyap url notifier (default is None)\"\n )\n\n args = parser.parse_args()\n\n irc_bot = IRC(\n irc_server=args.server,\n irc_port=args.port,\n irc_channel=args.ircchannel,\n my_love=args.mylove,\n my_irc_nick=args.myircnick,\n my_irc_user=args.myircuser,\n my_irc_realname=args.myircrealname,\n pyap_url=args.pyapurl,\n )\n\n main(irc_bot)\n\n os._exit(os.EX_OK)\n","repo_name":"fccagou/tools","sub_path":"irc/myircbot.py","file_name":"myircbot.py","file_ext":"py","file_size_in_byte":10944,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"35206999495","text":"import random\nimport copy\nimport os\nimport operator as op\nimport scipy\nimport pickle\nfrom functools import reduce\nimport itertools\nimport time\nfrom evader import board_distance, distance\nfrom mdptoolbox.mdp import ValueIteration\nimport multiprocessing as mp\n\ndef board_to_indices(board):\n # Make a grid of indices, where the index at each valid board location \n # corresponds to a position index\n # Also return a list of board locations corresponding to the position indices\n pos_indices = []\n pos_index = 0\n pos_indices_to_loc = []\n for x in range(len(board)):\n pos_indices.append([])\n for y in range(len(board[x])):\n if board[x][y] == '|':\n pos_indices[x].append(-1)\n else:\n pos_indices[x].append(pos_index)\n pos_indices_to_loc.append((x, y))\n pos_index += 1\n return pos_indices, pos_index, pos_indices_to_loc\n\nclass PursuersValueIteration:\n # Pursuer policy that performs value iteration to find a policy for the pursuers.\n def __init__(self, num_pursuers, board, seed, nrows, ncols, empty):\n self.board = board\n self.num_pursuers = num_pursuers\n self.pos_indices, self.num_pos_indices, self.pos_indices_to_loc = board_to_indices(self.board)\n self.policy = None\n self.seed = seed\n self.nrows = nrows\n self.ncols = ncols\n self.empty = empty\n\n def pursuer_positions_to_index(self, pursuer_positions):\n # Convert pursuer positions to an index by first converting the pursuer's individual positions to \n # indices. We then convert the position indices to an index (note that this index conversion process \n # cares about the order of the positions).\n pursuer_pos_indices = [self.pos_indices[x][y] for x, y in pursuer_positions]\n overall_index = 0\n for pursuer_pos_index in pursuer_pos_indices:\n overall_index *= self.num_pos_indices\n overall_index += pursuer_pos_index\n return overall_index\n\n def index_to_pursuer_positions(self, pursuer_index):\n # Convert the index of the pursuers' positions to the individual positions of the pursuers.\n pursuer_positions = []\n for i in range(self.num_pursuers):\n pursuer_positions.append(pursuer_index % self.num_pos_indices)\n pursuer_index = pursuer_index // self.num_pos_indices\n pursuer_positions.reverse()\n return [self.pos_indices_to_loc[index] for index in pursuer_positions]\n\n def compute_state_index(self, pursuer_positions, evader_position):\n # Convert pursuers' positions + evader position to a state index\n evader_index = self.pos_indices[evader_position[0]][evader_position[1]]\n pursuer_index = self.pursuer_positions_to_index(pursuer_positions)\n return pursuer_index * self.num_pos_indices + evader_index\n\n def compute_pursuer_evader_positions(self, state_index):\n # Convert state index to pursuers' positions + evader position\n evader_index = state_index % self.num_pos_indices\n pursuer_index = state_index // self.num_pos_indices\n evader_position = self.pos_indices_to_loc[evader_index]\n pursuer_positions = self.index_to_pursuer_positions(pursuer_index)\n return pursuer_positions, evader_position\n\n def compute_transition(self, pursuer_positions, evader_position, pursuer_actions):\n pursuer_newpositions = []\n # If we've already won, keep the state the same (i.e. pursuers and evader do not move)\n game_won = self.check_win(pursuer_positions, evader_position)\n if game_won:\n return pursuer_positions, evader_position, True\n for pursuer_index, pursuer_action in enumerate(pursuer_actions):\n pursuer_newposition = self.pursuer_try_action(pursuer_positions[pursuer_index], pursuer_action)\n pursuer_newpositions.append(pursuer_newposition)\n # If we win after pursuers move, the evader cannot move anymore so return the new pursuers' positions \n # and evader's old position\n game_won = self.check_win(pursuer_newpositions, evader_position)\n if game_won:\n return pursuer_newpositions, evader_position, True\n evader_newposition = self.evader_move(evader_position, pursuer_newpositions)\n # Check for win after evader moves (as evader has to move in one of the four directions, if evader is \n # surrounded by pursuers, it's forced to move for a loss)\n game_won = self.check_win(pursuer_newpositions, evader_newposition)\n if game_won:\n return pursuer_newpositions, evader_position, True\n return pursuer_newpositions, evader_newposition, False\n\n def parallelize(self, iteration):\n start = time.time()\n action_index, pursuer_actions = iteration\n transitions_filename = 'transitions_action_%d_npursuers_%d_seed_%d_nrows_%d_ncols_%d_empty_%s.npz' % (action_index, self.num_pursuers, self.seed, self.nrows, self.ncols, self.empty)\n rewards_filename = 'rewards_action_%d_npursuers_%d_seed_%d_nrows_%d_ncols_%d_empty_%s.npz' % (action_index, self.num_pursuers, self.seed, self.nrows, self.ncols, self.empty)\n if os.path.exists(transitions_filename) and os.path.exists(rewards_filename):\n return scipy.sparse.load_npz(transitions_filename), scipy.sparse.load_npz(rewards_filename)\n # print(pursuer_actions)\n transition_row_indices = range(self.num_state_indices)\n transition_col_indices = []\n transition_probs = [1.0] * self.num_state_indices\n reward_row_indices = []\n reward_col_indices = []\n rewards_action = []\n for state_index in range(self.num_state_indices):\n if state_index % 1000 == 0:\n print(state_index, time.time() - start)\n # print(time.time() - start)\n # print(state_index)\n pursuer_positions, evader_position = self.compute_pursuer_evader_positions(state_index)\n # print(pursuer_positions)\n # print(\"Old: \", state_index, pursuer_positions, evader_position, pursuer_actions)\n pursuer_positions, evader_position, game_won = self.compute_transition(pursuer_positions, evader_position, pursuer_actions)\n new_state_index = self.compute_state_index(pursuer_positions, evader_position)\n # print(\"New: \", new_state_index, pursuer_positions, evader_position)\n # if new_state_index < 0:\n # print(self.board_to_string())\n assert(new_state_index >= 0)\n transition_col_indices.append(new_state_index)\n if game_won:\n reward_row_indices.append(state_index)\n reward_col_indices.append(new_state_index)\n rewards_action.append(100.0)\n # print(\"new state:\", new_state_index)\n transition = scipy.sparse.csr_matrix((transition_probs, (transition_row_indices, transition_col_indices)), shape=(self.num_state_indices, self.num_state_indices))\n reward = scipy.sparse.csr_matrix((rewards_action, (reward_row_indices, reward_col_indices)), shape=(self.num_state_indices, self.num_state_indices))\n #scipy.sparse.save_npz('transitions_action_%d_npursuers_%d_seed_%d.npz' % (action_index, self.num_pursuers, self.seed), transition)\n #scipy.sparse.save_npz('rewards_action_%d_npursuers_%d_seed_%d.npz' % (action_index, self.num_pursuers, self.seed), reward)\n return transition, reward\n def compute_alltransitions_reward(self):\n pursuer_actions_iter = itertools.product(range(1, 5), repeat=self.num_pursuers)\n self.num_state_indices = self.num_pos_indices ** (self.num_pursuers + 1)\n print(self.num_state_indices)\n transitions = []\n rewards = []\n pool = mp.Pool(16)\n transitions, rewards = zip(*pool.map(self.parallelize, enumerate(pursuer_actions_iter)))\n return transitions, rewards \n\n def valueIteration(self):\n policy_filename = 'policy_npursuers_%d_seed_%d_nrows_%d_ncols_%d_empty_%s.pkl' % (self.num_pursuers, self.seed, self.nrows, self.ncols, self.empty)\n if os.path.exists(policy_filename):\n with open(policy_filename, 'rb') as policy_file:\n policy = pickle.load(policy_file)\n return policy\n transitions, rewards = self.compute_alltransitions_reward()\n valueIterationMDP = ValueIteration(transitions, rewards, 0.99, skip_check=True)\n valueIterationMDP.run()\n with open(policy_filename, 'wb') as policy_file:\n pickle.dump(valueIterationMDP.policy, policy_file)\n return valueIterationMDP.policy\n\n def Policy(self, pursuer_positions, evader_position):\n if self.policy is None:\n self.policy = self.valueIteration()\n state_index = self.compute_state_index(pursuer_positions, evader_position)\n action_index = self.policy[state_index]\n pursuer_actions = []\n for i in range(self.num_pursuers):\n pursuer_actions.append(action_index % 4 + 1)\n action_index = action_index // 4\n pursuer_actions.reverse()\n return pursuer_actions\n\n # return dx, dy corresponding to action\n def move(self, a):\n if a == 1:\n return -1, 0 # left\n elif a == 2:\n return 1, 0 # right\n elif a == 3:\n return 0, -1 # down\n elif a == 4:\n return 0, 1 # up\n\n return 0, 0 # invalid action\n\n # try action for a pursuer but don't actually update the pursuer's position\n def pursuer_try_action(self, pursuer_position, a):\n dx, dy = self.move(a)\n x, y = pursuer_position\n if (self.board[x + dx][y + dy] != '|'):\n x += dx\n y += dy\n return x, y\n\n # policy\n # The evader takes the action that would maximizes the distance \n # to the closest pursuer\n def evader_move(self, evader_position, pursuer_positions):\n max_distance = None\n max_distance_action = None\n x_afteraction = None\n y_afteraction = None\n for a in range(1, 5):\n # Try each action\n x, y = evader_position\n dx, dy = self.move(a)\n if (self.board[x + dx][y + dy] != '|'):\n x += dx\n y += dy\n min_pursuer_distance = None\n # Compute distance to closest pursuer after taking the action\n for pursuer_position in pursuer_positions:\n dist_pursuer = distance(pursuer_position, (x, y))\n # dist_pursuer = board_distance(pursuer_position, (x, y), self.board)\n if min_pursuer_distance is None or dist_pursuer < min_pursuer_distance:\n min_pursuer_distance = dist_pursuer\n # Update maximum distance to closest pursuer and corresponding action\n if max_distance is None or min_pursuer_distance > max_distance:\n max_distance = min_pursuer_distance\n max_distance_action = a\n x_afteraction = x\n y_afteraction = y\n return x_afteraction, y_afteraction\n\n def check_win(self, pursuer_positions, evader_position):\n for pursuer_position in pursuer_positions:\n if pursuer_position == evader_position:\n return True\n return False\n \n def board_to_string(self):\n self.board_str = \"\"\n for row in self.board:\n temp = \"\"\n for col in row:\n temp += col\n self.board_str += temp + \"\\n\"\n return self.board_str\n\n\n \n \n","repo_name":"dcahn/ECE209AS_ComputationalRobotics","sub_path":"pursuers_value_iteration.py","file_name":"pursuers_value_iteration.py","file_ext":"py","file_size_in_byte":11685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"11365152900","text":"from django.http import Http404\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.generics import CreateAPIView, ListAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_404_NOT_FOUND\nfrom rest_framework.views import APIView\n\nfrom books.models import Books\nfrom books.serializers import BookReadSerializer, BookWriteSerializer\n\n\nclass BookAdd(CreateAPIView):\n\n queryset = Books.objects.all()\n serializer_class = BookWriteSerializer\n\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n\n\nclass BookList(ListAPIView):\n \"\"\"List books of (added by) user\n \"\"\"\n\n serializer_class = BookReadSerializer\n\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n\n def get_queryset(self):\n return Books.objects.filter(user=self.request.user).values()\n\n\nclass BookInfo(APIView):\n\n authentication_classes = (TokenAuthentication,)\n #permission_classes = (IsAuthenticated,)\n\n def get_object(self, slug):\n try:\n return Books.objects.get(slug=slug).__dict__\n except Books.DoesNotExist:\n raise Http404\n\n\n def get(self, request, slug, format=None):\n book = self.get_object(slug)\n serializer = BookReadSerializer(book, context={'request': request})\n return Response(serializer.data)\n\n\nclass BookSearch(ListAPIView):\n\n serializer_class = BookReadSerializer\n\n def get_queryset(self):\n\n authorQuery = self.request.query_params.get('authorQuery', None)\n bookQuery = self.request.query_params.get('bookQuery', None)\n branch = self.request.query_params.get('branch', None)\n semester = self.request.query_params.get('semester', None)\n\n filters = {}\n\n if authorQuery is not None:\n filters['author__icontains'] = authorQuery\n\n if bookQuery is not None:\n filters['title__icontains'] = bookQuery\n\n if branch is not None and len(branch) > 1:\n filters['branch'] = branch\n\n if semester is not None and len(semester) is 1:\n filters['semester'] = semester\n\n return Books.objects.filter(**filters)\n\n\nclass BookChat(APIView):\n\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n\n def post(self, request, slug, format=None):\n if not Books.objects.filter(slug=slug).exists():\n raise Http404\n","repo_name":"subins2000/bex","sub_path":"books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"9"} +{"seq_id":"1212778253","text":"import socket\r\nimport sys\r\n\r\ndef client_socket():\r\n\r\n # host and port \r\n _HOST = socket.gethostname()\r\n _PORT = 5000\r\n\r\n # adjust socket() and others using with statement\r\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sckt:\r\n\r\n try:\r\n # connect by connect() and pass parameters to function\r\n sckt.connect((_HOST,_PORT))\r\n except:\r\n print(\"Connection Error!\")\r\n sys.exit()\r\n\r\n # the first input to send server\r\n print(\"to exit the system -> 'press q' \")\r\n message = input(\" => \")\r\n\r\n while message.lower().strip() != \"q\": # if data is 'q' then exit the system\r\n\r\n sckt.send(message.encode())\r\n data = sckt.recv(1024).decode()\r\n print(\"to exit the system -> 'press q' \")\r\n print(\"Received from server : \" + data)\r\n message = input(\" => \")\r\n \r\n # close the socket\r\n sckt.close()\r\n\r\nif __name__ == \"__main__\":\r\n\r\n client_socket()","repo_name":"Bl4ckSt0n3/SocketApplication","sub_path":"ServerClientCommunication/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"20463104036","text":"import krpc\r\nimport math as m\r\nimport gauss_problem as gp\r\nimport vector_math as vm\r\nimport time\r\n\r\n'''\r\nProgram to plan a Hohmann transfer to a target within the same SOI\r\nVERY WIP\r\n'''\r\n\r\n# Values in meters, seconds, kg, radians unless otherwise specified\r\n# 'mu' refers to gravitational parameter G*M\r\n# 'nu' refers to True Anomaly\r\n# 'ecc' refers to eccentricity\r\n# 'r_' refers to radius\r\n# 'a_' refers to semi-major axis\r\n# 'w_' refers to angular velocity\r\n\r\n\r\ndef calc_eccentricity_transfer(ra, atx):\r\n # Eccentricity of the transfer orbit\r\n return 1 - ra / atx\r\n\r\n\r\ndef calc_true_anomaly(atx, ra, rb):\r\n ecc = calc_eccentricity_transfer(ra, atx)\r\n aa = atx*(1 - ecc**2) / rb\r\n try:\r\n return m.acos((aa - 1)/ecc)\r\n except ValueError:\r\n # Floating point errors :(\r\n if (aa - 1) / ecc > 1:\r\n return 0\r\n elif (aa - 1) / ecc < -1:\r\n return m.pi\r\n\r\n\r\ndef calc_phase_angle(ra, rb, atx, mu, targ):\r\n # Angle which we should target to achieve a transfer\r\n nu = calc_true_anomaly(atx, ra, rb)\r\n TOF = time_of_flight(ra, rb, atx, mu)\r\n wt = 2 * m.pi / targ.orbit.period # rad / s\r\n return nu - wt*TOF\r\n\r\n\r\ndef time_of_flight(ra, rb, atx, mu):\r\n # Time of flight for orbit with ecc & atx across ecc_anomaly\r\n ecc = calc_eccentricity_transfer(ra, atx)\r\n nu = calc_true_anomaly(atx, ra, rb)\r\n ecc_anomaly = calc_eccentric_anomaly(nu, ecc)\r\n return (ecc_anomaly - ecc*m.sin(ecc_anomaly))*m.sqrt(atx**3 / mu)\r\n\r\n\r\ndef calc_eccentric_anomaly(nu, ecc):\r\n return m.acos((ecc + m.cos(nu))/(1 + ecc*m.cos(nu)))\r\n\r\n\r\ndef vis_viva(r, a, mu):\r\n # Orbital velocity equation\r\n return m.sqrt(mu*(2/r - 1/a))\r\n\r\n\r\ndef hyperbolic_excess_velocity(transfer, target, ref, ut):\r\n # Excess velocity upon arrival to a body in a different SOI\r\n target_velocity = target.velocity(ref)\r\n \r\n\r\ndef get_target_main_influence(conn):\r\n # Determine what body the target is orbiting and target type\r\n targ_body = conn.space_center.target_body\r\n targ_vess = conn.space_center.target_vessel\r\n if targ_body:\r\n return targ_body, \"body\"\r\n elif targ_vess:\r\n return targ_vess, \"vessel\"\r\n else:\r\n print('No target selected.')\r\n return False\r\n\r\n\r\ndef calc_transfer_date(ut, ref, vessel, targ, mu):\r\n '''\r\n Calculate the time after epoch of when the transfer should be initiated\r\n Currently really only works for orbits higher than the starting orbit\r\n Does not include plane changes\r\n '''\r\n timestep = vessel.orbit.period / 120 # Move 3 degrees at a time\r\n x = (1, 0, 0)\r\n refining = False\r\n\r\n start = time.time()\r\n\r\n while time.time() - start < 5:\r\n ra_vec = vessel.orbit.position_at(ut, ref)\r\n rb_vec = targ.orbit.position_at(ut, ref)\r\n\r\n # Get vessel and target angles relative to fixed point on orbiting body surface\r\n vessel_angle = vm.angle_between_vectors(x, ra_vec)\r\n if ra_vec[2] < 0:\r\n vessel_angle = 2 * m.pi - vessel_angle\r\n\r\n target_angle = vm.angle_between_vectors(x, rb_vec)\r\n if rb_vec[2] < 0:\r\n target_angle = 2 * m.pi - target_angle\r\n\r\n current_angle = target_angle - vessel_angle\r\n if current_angle < -m.pi:\r\n current_angle += 2 * m.pi\r\n\r\n # Get phase angle required for transfer\r\n ra = vm.magnitude(ra_vec)\r\n rb = vm.magnitude(rb_vec)\r\n atx = (ra + rb) / 2\r\n phase_angle = calc_phase_angle(ra, rb, atx, mu, targ)\r\n\r\n # Compare actual angle to phase angle\r\n diff = abs(phase_angle - current_angle)\r\n\r\n # If close, refine\r\n if not refining and diff <= m.pi / 60:\r\n refining = True\r\n ut -= 3*timestep\r\n timestep /= 30\r\n\r\n # If very close, return\r\n elif diff <= m.pi / 1800:\r\n return ut\r\n\r\n # Else keep looking\r\n ut += timestep\r\n \r\n print(\"Intercept calculator timed out.\")\r\n\r\n\r\ndef main(conn):\r\n vessel = conn.space_center.active_vessel\r\n current_time = conn.space_center.ut\r\n\r\n if not get_target_main_influence(conn):\r\n return\r\n else:\r\n targ, targ_type = get_target_main_influence(conn)\r\n\r\n if targ.orbit.body == vessel.orbit.body:\r\n # Target and vessel share same orbiting body (LEO to Moon etc.)\r\n mu = vessel.orbit.body.gravitational_parameter\r\n ref_frame = vessel.orbit.body.reference_frame\r\n date = calc_transfer_date(current_time, ref_frame, vessel, targ, mu)\r\n \r\n ra_vec = vessel.orbit.position_at(date, ref_frame)\r\n rb_vec = targ.orbit.position_at(date, ref_frame)\r\n\r\n ra = vm.magnitude(ra_vec)\r\n rb = vm.magnitude(rb_vec)\r\n atx = (ra + rb) / 2\r\n\r\n dv1 = vis_viva(ra, atx, mu) - vis_viva(ra, vessel.orbit.semi_major_axis, mu)\r\n vessel.control.add_node(date, prograde=dv1)\r\n\r\n '''\r\n Include code to differentiate between vessels and bodies\r\n so that we do not transfer to the COM of bodies\r\n '''\r\n\r\n elif targ.orbit.body == vessel.orbit.body.orbit.body:\r\n # Target in same SOI as current orbiting planet\r\n # Should work for Moon to Earth or Earth to Mars etc.\r\n pass\r\n\r\n\r\nif __name__ == \"__main__\":\r\n conn = krpc.connect(name=\"Interceptor\")\r\n main(conn)","repo_name":"clpete16/kRPC-musings","sub_path":"intercept.py","file_name":"intercept.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"35669415960","text":"import numpy as np\nimport cv2\n\n\ndef draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):\n # Make a copy of the image\n draw_img = np.copy(img)\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(draw_img, bbox[0], bbox[1], color, thick)\n # Return the image copy with boxes drawn\n return draw_img\n\n\ndef add_heat(heatmap, bbox_list):\n # Iterate through list of bboxes\n for box in bbox_list:\n # Add += 1 for all pixels inside each bbox\n # Assuming each \"box\" takes the form ((x1, y1), (x2, y2))\n heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1\n # Return updated heatmap\n return heatmap# Iterate through list of bboxes\n\n\ndef apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap\n\n\ndef draw_labeled_bboxes(img, labels):\n # Iterate through all detected cars\n for car_number in range(1, labels[1]+1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0,255,0), 6)\n # Return the image\n return img\n","repo_name":"ncondo/Lane-and-Vehicle-Detection","sub_path":"vehicle_tracker.py","file_name":"vehicle_tracker.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"9"} +{"seq_id":"73858575974","text":"from tracks.tracks import Tracks\nfrom tracks.util import parseDuration\nfrom qtpy.QtCore import Qt, QPoint\nimport random\nfrom . import makeDataFrame\nimport tempfile, datetime\nfrom pathlib import Path\nimport pandas as pd\nimport pytest\n\npytest_plugin = \"pytest-qt\"\n\nclass TracksSetupTeardown:\n \n @pytest.fixture\n def setup(self, qtbot, monkeypatch, patchSettings):\n self.tmpfile = tempfile.NamedTemporaryFile()\n self.size = 100\n makeDataFrame(random=True, size=self.size, path=self.tmpfile.name)\n \n def mockGetFile(*args, **kwargs):\n return Path(self.tmpfile.name)\n monkeypatch.setattr(Tracks, \"getFile\", mockGetFile)\n \n self._setup()\n qtbot.addWidget(self.app)\n \n yield\n self.extraTeardown()\n self.app.close()\n \n # can't do this with a fixture in conf file, as it's called when this method is called\n # presumably, qt has a lock on the file, so wouldn't be deleted in that case\n self._removeTmpConfig()\n \n @pytest.fixture\n def setupKnownData(self, qtbot, monkeypatch, patchSettings):\n self.tmpfile = tempfile.NamedTemporaryFile()\n makeDataFrame(random=False, path=self.tmpfile.name)\n \n def mockGetFile(*args, **kwargs):\n return Path(self.tmpfile.name)\n monkeypatch.setattr(Tracks, \"getFile\", mockGetFile)\n \n self._setup()\n qtbot.addWidget(self.app)\n \n yield\n self.extraTeardown()\n self.app.close()\n \n # can't do this with a fixture in conf file, as it's called when this method is called\n # presumably, qt has a lock on the file, so wouldn't be deleted in that case\n self._removeTmpConfig()\n \n def _setup(self):\n self.app = Tracks()\n self.addData = self.app.addData\n self.viewer = self.app.viewer\n self.plot = self.app.plot\n self.plotWidget = self.plot.plotWidget\n self.pbTable = self.app.pb.bestSessions\n self.prefDialog = self.app.prefDialog\n self.data = self.app.data\n \n self.app.showMaximized()\n self.app.prefDialog.ok() # see https://github.com/keziah55/cycleTracks/commit/9e0c05f7d19b33a61a52a959adcdc7667cd7b924\n \n self.extraSetup()\n \n def _removeTmpConfig(self):\n appName = \"Tracks\"\n orgName = \"Tracks\"\n confFile = Path(__file__).parent.joinpath(\".config\", orgName, appName+\".conf\")\n if confFile.exists():\n confFile.unlink()\n \n def extraSetup(self):\n pass\n \n def extraTeardown(self):\n pass\n \nclass TestTracks(TracksSetupTeardown):\n \n def test_add_data(self, setup, qtbot):\n \n numTopLevelItems = len(self.viewer.topLevelItems)\n pts = self.plotWidget.dataItem.scatter.data\n \n row = 0\n year = datetime.date.today().year + 1\n values = [f\"5 Jan {year}\", \"40:23\", \"24.22\", \"361.2\", \"7\"]\n for col in range(self.addData.table.columnCount()):\n value = values[col]\n self.addData.table.item(row, col).setText(str(value))\n \n with qtbot.waitSignal(self.addData.newData):\n qtbot.mouseClick(self.addData.okButton, Qt.LeftButton)\n \n assert len(self.viewer.topLevelItems) == numTopLevelItems + 1 \n expected = [f\"05 Jan {year}\", \"00:40:23\", \"24.22\", \"35.99\", \"361.2\", \"7\"]\n item = self.viewer.topLevelItems[0].child(0)\n for idx in range(item.columnCount()):\n assert item.text(idx) == expected[idx]\n \n assert len(self.plotWidget.dataItem.scatter.data) == len(pts) + 1\n \n\n def test_plot_clicked(self, setup, qtbot, variables):\n # test that clicking on the plot highlights the nearest plot in the viewer\n \n self.plotWidget.setXAxisRange(None) # ensure all points visible in plotting area\n \n pts = self.plotWidget.dataItem.scatter.points()\n idx = random.randint(0, len(pts)-1)\n \n pos = pts[idx].pos()\n scenePos = self.plotWidget.viewBoxes[0].mapViewToScene(pos)\n scenePos = QPoint(*[int(round(x)) for x in [scenePos.x(), scenePos.y()]])\n \n size = pts[idx].size() // 2\n sizePad = 2 # don't know why this is necessary\n size += sizePad\n pos = QPoint(scenePos.x()+size, scenePos.y()+size)\n \n class MockMouseEvent:\n # mouse clicks aren't propogated into the pyqtgraph graphicsscene\n # so make a mock one at the right point\n def __init__(self, scenePos):\n self.sp = scenePos\n def scenePos(self):\n return self.sp\n \n qtbot.wait(variables.wait)\n with qtbot.waitSignal(self.plotWidget.currentPointChanged):\n qtbot.mouseMove(self.plot, pos=pos, delay=variables.mouseDelay)\n qtbot.wait(variables.wait)\n \n event = MockMouseEvent(scenePos)\n signals = [(self.plotWidget.pointSelected, 'pointSelected'),\n (self.viewer.currentItemChanged, 'currentItemChanged')]\n \n with qtbot.waitSignals(signals):\n self.plotWidget.plotClicked(event)\n\n def test_viewer_clicked(self, setup, qtbot):\n # test that clicking on an item in the viewer highlights the corresponding point in the plot\n item = self.viewer.topLevelItems[0]\n with qtbot.waitSignal(self.viewer.itemExpanded):\n self.viewer.expandItem(item)\n signals = [(self.viewer.itemSelected, 'viewer.itemSelected'), \n (self.plotWidget.currentPointChanged, 'plotWidget.currentPointChanged')]\n with qtbot.waitSignals(signals):\n self.viewer.setCurrentItem(item.child(0))\n expectedIdx = self.size - 1\n assert self.plotWidget.currentPoint['index'] == expectedIdx\n assert self.plotWidget.hgltPnt == self.plotWidget.dataItem.scatter.points()[expectedIdx]\n \n def test_pb_table_clicked(self, setup, qtbot):\n # similar to above, but for pb table\n item = self.pbTable.item(1, 0)\n signals = [(self.app.pb.itemSelected, 'pbTable.itemSelected'), \n (self.plotWidget.currentPointChanged, 'plotWidget.currentPointChanged')]\n with qtbot.waitSignals(signals):\n self.pbTable.setCurrentItem(item)\n \n expectedIdx = self.app.data.formatted(\"Date\").index(item.text())\n assert self.plotWidget.currentPoint['index'] == expectedIdx\n assert self.plotWidget.hgltPnt == self.plotWidget.dataItem.scatter.points()[expectedIdx]\n \n def test_plot_update(self, setup, qtbot):\n # test that, when new data added, the plot auto-rescales so the new points are visible\n \n self.plot.setXAxisRange(months=6)\n \n lastDate = self.data['Date'][-1]\n year = lastDate.year\n month = lastDate.month\n day = 28 # latest date that appears in all months\n if month == 12:\n month = 0\n year += 1\n newDate = pd.Timestamp(year=year, month=month+1, day=day)\n \n newData = {'Date':[newDate],\n 'Time':[parseDuration(\"40:20\")],\n 'Distance (km)':[25.08],\n 'Calories':[375.1],\n 'Gear':[6]}\n \n oldXRange = self.plot.plotWidget.plotItem.vb.xRange[1]\n \n with qtbot.waitSignal(self.data.dataChanged):\n self.data.append(newData)\n \n newXRange = self.plot.plotWidget.plotItem.vb.xRange[1]\n \n assert oldXRange != newXRange\n oldMonth = datetime.datetime.fromtimestamp(oldXRange).month\n if oldMonth == 12:\n oldMonth = 0\n assert oldMonth + 1 == datetime.datetime.fromtimestamp(newXRange).month","repo_name":"keziah55/tracks","sub_path":"tracks/test/test_tracks.py","file_name":"test_tracks.py","file_ext":"py","file_size_in_byte":7838,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"10243745782","text":"import socket\nimport struct\n\n\ndef send_msg(socket: socket.socket, msg: bytes) -> None:\n \"\"\"Function to write the specified byte sequence to the socket\n\n Args:\n socket (socket.socket): Socket instance\n msg (bytes): Byte sequence\n \"\"\"\n total_sent_len = 0\n total_msg_len = len(msg)\n while total_sent_len < total_msg_len:\n sent_len = socket.send(msg[total_sent_len:])\n if sent_len == 0:\n raise RuntimeError(\"socket connection broken\")\n total_sent_len += sent_len\n\ndef receive_msg(socket:socket.socket, total_msg_size: int) -> bytes:\n \"\"\"Generator function that reads a string of bytes from the socket until the end of the connection\n\n Args:\n socket (socket.socket): Socket instance\n total_msg_size (int): Byte sequence length\n\n Yields:\n Iterator[bytes]: Received byte sequence\n \"\"\"\n total_recv_size = 0\n while total_recv_size < total_msg_size:\n received_chunk = socket.recv(total_msg_size - total_recv_size)\n if len(received_chunk) == 0:\n raise RuntimeError(\"socket connection broken\")\n yield received_chunk\n total_recv_size += len(received_chunk)\n\n\n\ndef main():\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((\"127.0.0.1\", 54321))\n operand1, operand2 = 1000, 2000\n print(f\"operand1: {operand1}, operand2: {operand2}\")\n request_msg = struct.pack(\"!ii\", operand1, operand2)\n send_msg(client_socket, request_msg)\n print(f\"sent: {request_msg}\")\n received_msg = b\"\".join(receive_msg(client_socket, total_msg_size=8))\n print(f\"received: {received_msg}\")\n (added_value, ) = struct.unpack(\"!q\", received_msg)\n print(f\"result: {added_value}\")\n client_socket.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yuta-nishi/book-linux-tcpip","sub_path":"src/add_client.py","file_name":"add_client.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"27396406960","text":"import os\r\nclc = lambda: os.system('cls')\r\ncwd = os.getcwd()\r\ndesktop = \"C:/Users/echo_/Desktop/\"\r\n\r\nimport torch as t\r\nimport torch.nn.functional as F\r\nfrom torch.nn.parameter import Parameter as P\r\nimport torch_geometric as tg\r\nimport torch_utils as utils\r\nfrom torch_utils import pipeline, task, monitor, datatool, nnframe\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom math import nan\r\nimport dill\r\nimport pandas as pd\r\nfrom itertools import combinations\r\n \r\n \r\n#--------------------------------------------------------------------------------#\r\n\r\nrawD = pd.read_excel(f\"{cwd}/raw_data.xlsx\",sheet_name='Pmod')\r\nrawD.sort_values(by='Grade',ascending=True,inplace=True)\r\ntarget = rawD[\"Grade\"].values\r\nx = rawD.iloc[:,2:-1].values\r\n\r\n# MLP\r\nA = t.eye(x.shape[0],x.shape[0])\r\n\r\n\r\n#edge: composite cos simil\r\nfeatures = rawD[['FC','LTC','PC','OC','GCA','GCP']]\r\niter = list(combinations(features,4))\r\n\r\n# for single-feature\r\n# n=0\r\n# acc_vec = np.zeros(len(features))\r\n# for feature in iter:\r\n# v = rawD[feature[0]].values\r\n \r\n# A = t.zeros(v.shape[0],v.shape[0])\r\n# for i in range(v.shape[0]):\r\n# for j in range(i,v.shape[0]):\r\n# if abs(v[i]-v[j])<0.05:\r\n# A[i,j]=1; A[j,i]=1\r\n\r\n# for multi-feature\r\nn=0\r\nacc_vec = np.zeros(len(iter))\r\n\r\nfor feature in iter:\r\n v = rawD[list(feature)].values\r\n \r\n A = t.zeros(v.shape[0],v.shape[0])\r\n for i in range(v.shape[0]):\r\n for j in range(i,v.shape[0]):\r\n # if np.dot(v[i,:],v[j,:])/(np.linalg.norm(v[i,:],ord=2)*np.linalg.norm(v[j,:],ord=2)) > 0.999:\r\n if np.linalg.norm(v[i,:]-v[j,:],ord=2)<0.05:\r\n A[i,j]=1; A[j,i]=1\r\n \r\n\r\n #---------------------------------------------\r\n fig, ax = plt.subplots()\r\n ax.matshow(A)\r\n fig.suptitle(f\"Adjacency by feature: {feature}\")\r\n plt.savefig(f\"{cwd}/{feature}_A.jpg\")\r\n \r\n edge_index = A.to_sparse().indices()\r\n\r\n trm,vm,tm = datatool.masker(x.shape[0],p_train=0.20,p_val = 0,p_test=0.80)\r\n D = datatool.sortedDF(t.tensor(x).float(),t.tensor(target).long(),trm,vm,tm,edge_index)\r\n D.load_cuda()\r\n\r\n GNN = nnframe.FFNN(data = D, \r\n layers = [tg.nn.GCNConv, t.nn.Linear],\r\n activations = [t.sigmoid, t.nn.Identity()],\r\n hidden_channels = [4],\r\n forward_opts = [[D.edge_index],[]],\r\n dropouts=False, seed=False).cuda()\r\n\r\n tr_x=D.x[D.train_mask]; tr_y=D.y[D.train_mask]\r\n val_x=D.x[D.val_mask]; val_y=D.y[D.val_mask]\r\n test_x=D.x[D.test_mask]; test_y=D.y[D.test_mask]\r\n criterion = t.nn.CrossEntropyLoss()\r\n optimizer = t.optim.Adam(GNN.parameters(),lr=0.005,weight_decay=1e-5)\r\n tasklist = [ \r\n (task.Prog.loss_GNN, 1), # int: per | nan: only at start & end\r\n (task.Prog.acc_maskwise_GNN, 1),\r\n (monitor.Prog.loss_log, nan),\r\n (monitor.Prog.acc_log, nan),\r\n (monitor.Prog.LA_plot, nan),\r\n (monitor.X.out_2d_GNN, nan),\r\n ]\r\n streaming = False\r\n save_dir = f\"{cwd}/{feature}\"\r\n\r\n result = pipeline.BasicLearn(GNN, D, criterion, optimizer, 500,\r\n tr_x,tr_y,val_x,val_y,test_x,test_y,tasklist, streaming, save_dir)\r\n result.start_learn()\r\n acc_vec[n] = result.acc_arr[2,:].max()\r\n n = n+1\r\n \r\nprint(acc_vec)\r\n# for i in range(len(iter)):\r\n# iter[i] = ', '.join(iter[i])\r\n# print(iter)\r\n\r\n# plt.bar(iter,acc_vec)\r\n# plt.grid(axis='y'); plt.ylim([0,100])\r\n# plt.ylabel('Accuracy(%)')\r\n# plt.xticks(rotation='vertical')\r\n# plt.show()","repo_name":"hkyoon94/research-unist","sub_path":"NIMS_workshop/using_GNN.py","file_name":"using_GNN.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"33399147340","text":"#!/usr/bin/env python2\n# import the necessary packages\nfrom collections import deque\nimport numpy as np\nimport argparse\nimport imutils\nimport math\nimport cv2\nimport rospy\nimport serial, time\n\nfrom visualization_msgs.msg import Marker\nfrom std_msgs.msg import Int8MultiArray\nfrom std_msgs.msg import Float32MultiArray\nfrom sensor_msgs.msg import Image\nfrom collections import deque\n\n#Opens port\n#ser = serial.Serial('/dev/ttyACM0', 115200)\n\n# define the lower and upper boundaries of the \"green\"\n# ball in the HSV color space, then initialize the\n# list of tracked points\ngreenLower = (26, 43, 93) #26 51 85 \ngreenUpper = (42, 131, 255) #39 125 255\n\n#ROS information\npublisher = rospy.Publisher('visualization_marker', Marker)\npublisher2 = rospy.Publisher('servo_move', Int8MultiArray)\npublisher3 = rospy.Publisher('ball_pos', Float32MultiArray)\n\nrospy.init_node('balls')\n\nprofundity=0\n\n#200 px a 30cm\n#160 px a 40cm\n#F=PD/W\n\ncamera=cv2.VideoCapture(0)\n\nF=925\nW=6.45\nx=0\ny=0\n\nmarker = Marker()\n#servo= \"\"\n#servoS= \"\"\n#servoU= \"\"\n#servoD= \"\"\nservo=Int8MultiArray()\nservo.data.append(0)\nservo.data.append(0)\n\nball=Float32MultiArray()\nball.data.append(0)\n\n\narea=0\narea_before=0\n#x_before=0\n#y_before=0\n#start=0\nresult=0\nresults = deque([0])\nsamples=24\nthresh=15\nerror=100\n#request=0\n\n# keep looping\nwhile True:\n\t# grab the current frame\n\ttry:\n\t\t#rospy.Subscriber(\"webcam/image_raw\",Image,callback)\n\t\t#frame = cv_image\n\n\t\tpassed, frame=camera.read()\n\t\th, w = frame.shape[:2]\n\t\tframe = imutils.resize(frame, width=1000)\n\t\t\n\n\t\tblurred = cv2.GaussianBlur(frame, (11, 11), 0)\n\t\tblurred = cv2.medianBlur(blurred,5)\n\t\thsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)\n\t \n\t\t# construct a mask for the color \"green\", then perform\n\t\t# a series of dilations and erosions to remove any small\n\t\t# blobs left in the mask\n\t\tmask = cv2.inRange(hsv, greenLower, greenUpper)#6 to 4\n\t\tcv2.imshow(\"Frame2\", mask)\n\n\t\tmask = cv2.erode(mask, None, iterations=2)\n\t\tmask = cv2.dilate(mask, None, iterations=2)\n\t\tmask = cv2.dilate(mask, None, iterations=6)\n\t\tmask = cv2.erode(mask, None, iterations=6)\n\n\t\t# find contours in the mask and initialize the current\n\t\t# (x, y) center of the ball\n\t\tcnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\n\t\t\tcv2.CHAIN_APPROX_SIMPLE) [1]\n\t\tcenter = None\n\n\t\tcontours_area = []\n\t\tcontours_circles = []\n\t\ti=0\n\t\ti_max=0\n\t\tcircularity_max=0\n\t\tarea_max=0\n\t\t\t\t\t\n\n\t\t# check if contour is of circular shape\t\n\t\tfor con in cnts:\n\t\t\tperimeter = cv2.arcLength(con, True)\n\t\t\tarea = cv2.contourArea(con)\n\t\t\tcontours_area.append(con)\n\t\t\tif perimeter == 0:\n\t\t\t\tbreak\n\t\t\tcircularity = 4*math.pi*(area/(perimeter*perimeter))\n\t\t\tif area>area_max:\n\t\t\t\tarea_max=area\n\t\t\t\tcircularity_max=circularity\n\t\t\t\ti_max=i\n\t\t\ti=i+1\n\t\tresult=0\n\n\t\tif 0.6 < circularity_max:\n\t\t\tresult=1\n\t\t\t\n\t\tif 0.6 < circularity_max and results.count(1)>thresh:\n\t\t\tcontours_circles.append(cnts[i_max])\n\t\t\tcontours_area.append(cnts[i_max])\n\n\t\t\t((x, y), radius) = cv2.minEnclosingCircle(cnts[i_max])\n\t\t\n\t\t\tM = cv2.moments(cnts[i_max])\n\t\t\tcenter = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\t\t\t\n\t\t\t# only proceed if the radius meets a minimum size\n\t\t\t# draw the circle and centroid on the frame,\n\t\t\t# then update the list of tracked points\n\t\t\tcv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)\n\n\t\t\t#profundity=1103.9*(radius)**(-1.131) #eq obtaine by excel\n\t\t\tprofundity=(W*F)/(radius*2)\n\t\t\tcv2.circle(frame, center, 5, (0, 0, 255), -1)\n\t\t\tcv2.putText(frame, \"%.1f cm\" % profundity, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2)\n\n\t\t\t'''if x<(w/2)-error:\n\t\t\t\tservoS=\"l\"\n\t\t\telif x>(w/2)+error:\n\t\t\t\tservoS=\"r\"\n\t\t\telse:\n\t\t\t\tservoS=\"n\"\n\t\t\t\n\t\t\tif y<(h/2)-error:\n\t\t\t\tservoU=\"u\"\n\t\t\telif y>(h/2)+error:\n\t\t\t\tservoU=\"d\"\n\t\t\telse:\n\t\t\t\tservoU=\"n\"'''\n\t\t\t\n\t\t\tif x<(w/2)-error:\n\t\t\t\tservo.data[0]=1\n\t\t\telif x>(w/2)+error:\n\t\t\t\tservo.data[0]=-1\n\t\t\telse:\n\t\t\t\tservo.data[0]=0\n\t\t\t\n\t\t\tif y<(h/2)-error:\n\t\t\t\tservo.data[1]=-1\n\t\t\telif y>(h/2)+error:\n\t\t\t\tservo.data[1]=1\n\t\t\telse:\n\t\t\t\tservo.data[1]=0\n\t\t\t\n\t\t\t#servoD=str(profundity)\n\t\t\tball.data[0]=profundity\n\n\t\t\tmarker.header.frame_id = \"/camera\"\n\t\t\tmarker.type = marker.SPHERE\n\t\t\tmarker.action = marker.ADD\n\t\t\tmarker.scale.x = 0.062\n\t\t\tmarker.scale.y = 0.062\n\t\t\tmarker.scale.z = 0.062\n\t\t\tmarker.color.a = 1.0\n\t\t\tmarker.color.g = 1.0\n\t\t\tmarker.pose.orientation.w = 1.0\n\t\t\tmarker.pose.position.x = profundity/100.00\n\t\t\tmarker.pose.position.y = ((w/4)-x)*(6.2/(radius*2))/100.00\n\t\t\tmarker.pose.position.z = ((h/2)-y)*(6.2/(radius*2))/100.00\n\t\t\tmarker.lifetime.secs = 0.01\n\n\t\t\tarea_before=area\n\n\t\telif (results.count(1)= 'a' and letter <= 'z':\n prioritySum += ord(letter) - ord('a') + 1\n else:\n prioritySum += ord(letter) - ord('A') + 27\n presence.remove(letter)\n\nprint(prioritySum)","repo_name":"jakevoytko/advent2022","sub_path":"03/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"46308944312","text":"import random\n\nimforms = [\"i am\", \"i'm\", \"i a m\", \"i ' m\", \"i m\", \"im\"]\n\nasync def checkIm(message):\n for form in imforms:\n nm = message.content.lower()\n nm = nm.replace(\"\\n\", \" \")\n stind = -1\n\n if(nm.startswith(form + \" \")):\n stind = 0\n elif(\" \" + form + \" \" in nm):\n stind = nm.find(\" \" + form + \" \") + 1\n\n if(stind >= 0 and nm[stind:+stind+len(form) + 1] == form + \" \"):\n print(stind)\n sendStr = \"Hi \" + message.content[(stind + len(form) + 1):] + \" I'm \"\n if(random.random() <= 0.05):\n sendStr += \"Walter!\"\n else:\n sendStr += \"walter!\"\n\n await message.channel.send(sendStr)\n break\n\n\nasync def checkWalter(message):\n nm = message.content.lower()\n if(\"walter\" in nm):\n await message.channel.send(\"walter\")\n","repo_name":"jonathanh8686/GodBot","sub_path":"reply.py","file_name":"reply.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"70763867174","text":"#!/usr/bin/env python3\n\"\"\"t-SNE the new dimensionality reduction method.\"\"\"\nimport numpy as np\n\n\ndef P_init(X, perplexity):\n \"\"\"Initialize P in t-SNE.\"\"\"\n # Math? See https://stackoverflow.com/questions/37009647\n sum_X = np.sum(np.square(X), 1)\n D = np.add(np.add(-2 * np.dot(X, X.T), sum_X).T, sum_X)\n np.fill_diagonal(D, 0)\n P = np.zeros((X.shape[0], X.shape[0]))\n betas = np.ones((X.shape[0], 1))\n H = np.log2(perplexity)\n return D, P, betas, H\n","repo_name":"cryptolake/holbertonschool-machine_learning","sub_path":"unsupervised_learning/0x00-dimensionality_reduction/2-P_init.py","file_name":"2-P_init.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"40999209714","text":"import json\nimport os\nimport requests\nfrom decimal import getcontext\n\nip = requests.get(\"https://api.ipify.org\").text\n\ngetcontext().prec = 100\n\n# Signing key\ntry:\n config = json.load(open(os.path.join(os.path.expanduser(\"~\"), \".qcconfig\")))\n pk = config[\"pk\"]\n sk = config[\"sk\"]\nexcept:\n print('warn: no file \"~/.qcconfig\"')\n pk = os.environ[\"QCWALLET_PUBLIC_KEY\"]\n sk = os.environ[\"QCWALLET_SECRET_KEY\"]\n","repo_name":"gamesguru/bitq","sub_path":"qc/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"19092368892","text":"\"\"\"\nExamples of how to build an ml pipeline using scikit\n\n# Pipeline\n- Pass a series of key value pairs to the Pipeline constructor whereby the string is the name of the transformation\n and the key is the function or estimator object.\n\nReferences:\n - https://scikit-learn.org/stable/modules/generated/sklearn.compose.make_column_transformer.html\n - https://towardsdatascience.com/building-a-machine-learning-pipeline-3bba20c2352b\n - https://github.com/ezgigm/Project3_TanzanianWaterWell_Status_Prediction/blob/master/STEP2_Modeling.ipynb\n - http://rasbt.github.io/mlxtend/\n\"\"\"\n\nimport os\nimport pandas as pd\nimport category_encoders as ce\nfrom decouple import config as d_config\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\n\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\n\n# Directories\nDIR_ROOT = d_config(\"DIR_ROOT\")\nDIR_DATA = d_config(\"DIR_DATA\")\n\n# Package Settings\npd.set_option(\"display.max_rows\", None)\npd.set_option(\"display.max_columns\", None)\n\n# Load Data\ndf_car = pd.read_csv(os.path.join(DIR_DATA, \"car_insurance_claims_joined.csv\"))\ndf_car = df_car.fillna(0)\n\n#########################################################################################################################\n# Create Dataset\n#########################################################################################################################\n\n# Create a Pre-Processing Pipeline\ncat_columns = [\"MSTATUS\", \"GENDER\", \"EDUCATION\", \"OCCUPATION\", \"CAR_USE\", \"CAR_TYPE\", \"RED_CAR\", \"REVOKED\",\n \"URBANICITY\", \"PARENT1\"]\nnum_columns = [\"AGE\", \"HOMEKIDS\", \"INCOME_0\", \"HOME_VAL_0\", \"TRAVTIME\", \"BLUEBOOK_0\", \"TIF\", \"MVR_PTS\",\n \"CAR_AGE\"]\ntarget_column = [\"CLAIM_FLAG\"]\ncolumns_to_drop = [\"ID\", \"CLM_FREQ\", \"CLM_AMT_0\", \"CLAIM_FLAG\", \"CLAIM_FLAG_CAT\"]\n\n# Split X & Y\ny = df_car[target_column].values.ravel()\nX = df_car.drop(columns_to_drop, axis=1)\n\n# Create Train Test Split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=123)\n\n########################################################################################################################\n# Make Pipeline\n########################################################################################################################\n\n# Instantiate Transformers\nscaler = RobustScaler()\nencoder = ce.TargetEncoder(cols=cat_columns)\n\n# Add Transformers to Pipeline\nnum_transformer = make_pipeline(scaler)\ncat_transformer = make_pipeline(encoder)\n\n# Create Preprocessing Pipeline\npreprocessor = ColumnTransformer(\n transformers=[\n (\"num\", num_transformer, num_columns),\n (\"cat\", cat_transformer, cat_columns)\n ])\n\n# Model\nmodel_lr = LogisticRegression(class_weight='balanced', solver='lbfgs', random_state=123, max_iter=10_000)\n\n# Create Pipe\npipe = make_pipeline(preprocessor, model_lr)\npipe.fit(X_train, y_train)\n\ny_pred = pipe.predict(X_test)\n\n# Call Score to get\nscore = pipe.score(X_test, y_test)\nprint(f\"Logistic Regression Accuracy Score => {score}\")\n\n# Confusion Matrix\nconf_matrix = confusion_matrix(y_test, y_pred, labels=pipe.classes_)\ndisplay = ConfusionMatrixDisplay(confusion_matrix=conf_matrix, display_labels=pipe.classes_)\ndisplay.plot()\nplt.show()","repo_name":"ccirelli2/scikit_learn","sub_path":"ml_pipelines/scratch_make_ml_pipeline.py","file_name":"scratch_make_ml_pipeline.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"7731947517","text":"import os\nimport re\nimport shutil\n\nfrom nemo.collections.nlp.data.data_utils.data_preprocessing import DATABASE_EXISTS_TMP, if_exist, write_files\nfrom nemo.utils import logging\n\n\ndef copy_input_files(infold):\n \"\"\" \n Put training files in convenient place for conversion to our format. \n \n Args:\n infold: location of an original fold of the dataset (in the sense of k-fold cross validation)\n \"\"\"\n our_infold = infold + \"/dataset\"\n\n if os.path.exists(our_infold + \"/trainset\") and os.path.exists(our_infold + \"/testset\"):\n logging.info(\"Input folders exists\")\n return\n\n logging.info(f\"Copying files to input folder: {our_infold}\")\n os.makedirs(infold, exist_ok=True)\n\n old_infold = (\n infold + '/CrossValidation/autoGeneFromRealAnno/autoGene_2018_03_22-13_01_25_169/CrossValidation/KFold_1'\n )\n if not os.path.exists(our_infold + \"/trainset\"):\n shutil.copytree(old_infold + '/trainset', our_infold + '/trainset')\n\n if not os.path.exists(our_infold + \"/testset\"):\n shutil.copytree(old_infold + '/testset/csv', our_infold + '/testset')\n\n\ndef get_intents(infold):\n \"\"\" Get list of intents from file names. \"\"\"\n intents = [f[:-4] for f in os.listdir(infold)]\n intents.sort()\n logging.info(f'Found {len(intents)} intents')\n\n return intents\n\n\ndef get_intent_queries(infold, intent_names, mode):\n \"\"\" Get list of queries with their corresponding intent number. \"\"\"\n intent_queries = ['sentence\\tlabel\\n']\n\n for index, intent in enumerate(intent_names):\n queries = open(f'{infold}/{mode}set/{intent}.csv', 'r', encoding='utf-8').readlines()\n for query in queries[1:]:\n phrases = query.split(\";\")\n intent_query = phrases[4][1:-1] + \"\\t\" + str(index)\n intent_queries.append(intent_query)\n\n return intent_queries\n\n\ndef get_slots(infold, modes):\n \"\"\"\n Find a list of unique slot types in training and testing data.\n We use a single slot type name both for starting and continuation tokens (not using B-, I- notation).\n \"\"\"\n slots = set()\n\n for mode in modes:\n path = f'{infold}/{mode}set'\n for filename in os.listdir(path):\n lines = open(f'{path}/{filename}', 'r', encoding='utf-8').readlines()\n for line in lines[1:]:\n query = line.split(\";\")[3]\n slot_phrases = re.findall('\\[.*?\\]', query)\n for slot_phrase in slot_phrases:\n slot = slot_phrase.split(\" : \")[0][1:]\n slots.add(slot)\n\n slots = sorted(slots)\n slots.append(\"O\")\n logging.info(f'Found {len(slots)} slot types')\n\n return slots\n\n\ndef get_slot_queries(infold, slot_dict, mode, intent_names):\n \"\"\" \n Convert each word in a query to corresponding slot number. \n Args:\n infold: fold of the data\n slot_dict: dict containing slot-names to positions \n mode: train, validation or test\n intent_names: list of intents\n \"\"\"\n slot_queries = []\n outside_slot = len(slot_dict) - 1\n\n # keep the same order of files/queries as for intents\n for intent in intent_names:\n lines = open(f'{infold}/{mode}set/{intent}.csv', 'r', encoding='utf-8').readlines()\n for line in lines[1:]:\n slot_query = \"\"\n query = line.split(\";\")[3]\n words = query.split(\" \")\n current_slot = outside_slot\n for word in words:\n if word[0] == \"[\":\n current_slot = slot_dict[word[1:]]\n elif word[0] == \":\":\n continue\n else:\n slot_query += str(current_slot) + \" \"\n if word[-1] == ']':\n current_slot = outside_slot\n\n slot_queries.append(slot_query.strip())\n\n return slot_queries\n\n\ndef process_assistant(infold, outfold, modes=['train', 'test']):\n \"\"\"\n https://github.com/xliuhw/NLU-Evaluation-Data - this dataset includes\n about 25 thousand examples with 66 various multi-domain intents and 57 entity types.\n \"\"\"\n if if_exist(outfold, [f'{mode}_slots.tsv' for mode in modes]):\n logging.info(DATABASE_EXISTS_TMP.format('robot', outfold))\n return outfold\n\n logging.info(f'Processing assistant commands dataset and store at {outfold}')\n os.makedirs(outfold, exist_ok=True)\n\n # copy train/test files to the convenient directory to work with\n copy_input_files(infold)\n infold += \"/dataset\"\n\n # get list of intents from train folder (test folder supposed to be the same)\n intent_names = get_intents(infold + \"/trainset\")\n write_files(intent_names, f'{outfold}/dict.intents.csv')\n\n # get all train and test queries with their intent\n for mode in modes:\n intent_queries = get_intent_queries(infold, intent_names, mode)\n write_files(intent_queries, f'{outfold}/{mode}.tsv')\n\n # get list of all unique slots in training and testing files\n slot_types = get_slots(infold, modes)\n write_files(slot_types, f'{outfold}/dict.slots.csv')\n\n # create files of slot queries\n slot_dict = {k: v for v, k in enumerate(slot_types)}\n for mode in modes:\n slot_queries = get_slot_queries(infold, slot_dict, mode, intent_names)\n write_files(slot_queries, f'{outfold}/{mode}_slots.tsv')\n","repo_name":"NVIDIA/NeMo","sub_path":"scripts/dataset_processing/nlp/intent_and_slot/assistant_utils.py","file_name":"assistant_utils.py","file_ext":"py","file_size_in_byte":5343,"program_lang":"python","lang":"en","doc_type":"code","stars":8538,"dataset":"github-code","pt":"9"} +{"seq_id":"9604900285","text":"__author__ = \"Alvaro Gonzalez-Jimenez\"\n__maintainer__ = \"Alvaro Gonzalez-Jimenez\"\n__email__ = \"alvaro.gonzalezjimenez@unibas.ch\"\n__license__ = \"Apache License, Version 2.0\"\n__date__ = \"2023-07-25\"\n\n\nimport argparse\nimport json\nimport os\nimport random\nimport shutil\nfrom functools import reduce\nfrom typing import Dict, List\n\nimport cv2\nimport numpy as np\nimport pandas as pd\nimport skimage.io\nfrom tqdm import tqdm\n\nrandom.seed(0)\n\n\nclass NpEncoder(json.JSONEncoder):\n \"\"\"\n JSON Encoder class for handling NumPy data types during JSON serialization.\n \"\"\"\n\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(NpEncoder, self).default(obj)\n\n\nclass DataPreprocessor:\n \"\"\"\n Data preprocessor class for organizing, splitting, and generating noisy labels for the dataset.\n\n Args:\n dataname (str): The dataset name ('shenzhen' or 'isic').\n dataset_noise_ratio (List[float]): List of noise ratios for the entire dataset.\n sample_noise_ratio (List[float]): List of noise ratios for individual samples.\n root (str): The root directory of the dataset.\n \"\"\"\n\n def __init__(\n self,\n dataname: str,\n dataset_noise_ratio: List[float],\n sample_noise_ratio: List[float],\n root: str,\n ):\n self.dataname = dataname\n self.dataset_noise_ratio = dataset_noise_ratio\n self.sample_noise_ratio = sample_noise_ratio\n self.root = root\n self.save_dir = root[:-1] + \"_noise/\"\n self.image_dirs = {\n \"train\": self.root + \"img/\",\n \"validation\": self.root + \"ISIC-2017_Test_v2_Data/\",\n }\n self.label_dirs = {\n \"train\": self.root + \"mask/\",\n \"validation\": self.root + \"ISIC-2017_Test_v2_Part1_GroundTruth/\",\n }\n self.class_label = \"lesion\" if self.dataname == \"isic\" else \"lung\"\n self.out_shape = (128, 128) if self.dataname == \"isic\" else (256, 256)\n\n def create_csv(self) -> None:\n \"\"\"\n Create a CSV file containing the noise levels and type for each image in the dataset.\n \"\"\"\n # Train\n noise_levels = []\n for alpha in self.dataset_noise_ratio:\n for beta in self.sample_noise_ratio:\n alpha_str = \"{:02d}\".format(int(alpha * 10))\n beta_str = \"{:02d}\".format(int(beta * 10))\n noise_level_name = f\"alpha_{alpha_str}_beta_{beta_str}\"\n log_file_name = f\"noise_{alpha:.1f}_{beta:.1f}_log.txt\"\n noise_levels.append((noise_level_name, log_file_name))\n\n # Read the noise levels and store them in a dictionary\n data_frames = {}\n for level, file_name in noise_levels:\n file_path = os.path.join(self.save_dir, \"train\", file_name)\n data_frames[level] = pd.read_csv(\n file_path, sep=\"\\t\", names=[\"imageName\", level]\n )\n\n # Merge all DataFrames on 'imageName' column using outer join\n df_merged = reduce(\n lambda left, right: pd.merge(left, right, on=[\"imageName\"], how=\"outer\"),\n data_frames.values(),\n )\n df_merged.to_csv(os.path.join(self.save_dir, \"split\", \"train.csv\"), index=False)\n\n # Test\n with open(os.path.join(self.save_dir, \"split\", \"validation.txt\"), \"r\") as file:\n lines = file.read().splitlines()\n # Create a DataFrame using Pandas\n data = {\"imageName\": lines, \"noise_alpha_00_beta_00\": \"clean\"}\n df = pd.DataFrame(data)\n # Save DataFrame to a .csv file\n df.to_csv(os.path.join(self.save_dir, \"split\", \"validation.csv\"), index=False)\n\n def make_directory(self, directory: str) -> None:\n \"\"\"\n Create a directory if it doesn't exist.\n\n Args:\n directory (str): The directory path to be created.\n \"\"\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n def copy_files(self, src_dir: Dict[str, str], tar_dir: Dict[str, str]) -> None:\n \"\"\"\n Copy files from the source directory to the target directory.\n\n Args:\n src_dir (Dict[str, str]): Source directory paths.\n tar_dir (Dict[str, str]): Target directory paths.\n \"\"\"\n for key in src_dir:\n shutil.copytree(src_dir[key], tar_dir[key])\n\n def split_shenzhen_dataset(self) -> None:\n \"\"\"\n Split the Shenzhen dataset into train and validation sets.\n \"\"\"\n image_dir = self.root + \"img/\"\n mask_dir = self.root + \"mask/\"\n samples = [\n name[:-9]\n for name in os.listdir(mask_dir)\n if \".png\" in name and name[0] != \".\"\n ]\n samples = sorted(samples)\n\n val_ratio = 0.3\n val_num = round(len(samples) * val_ratio)\n validation_samples = random.sample(samples, val_num)\n train_samples = [name for name in samples if name not in validation_samples]\n\n data_names = {\n \"validation\": validation_samples,\n \"train\": train_samples,\n }\n\n split_dir = self.save_dir + \"split/\"\n self.make_directory(split_dir)\n\n for phase in [\"train\", \"validation\"]:\n with open(split_dir + \"%s.txt\" % phase, \"w\") as f:\n for _id in data_names[phase]:\n f.write(\"%s\\n\" % _id)\n\n for phase in [\"train\", \"validation\"]:\n self.make_directory(self.save_dir + phase + \"/\")\n image_dir, label_dir = (\n self.save_dir + phase + \"/image/\",\n self.save_dir + phase + \"/label_png/\",\n )\n self.make_directory(image_dir)\n self.make_directory(label_dir)\n\n for name in tqdm(data_names[phase]):\n src = self.root + \"img/\" + name + \".png\"\n dst = image_dir + name + \".png\"\n arr = skimage.io.imread(src, as_gray=True)\n arr = cv2.resize(arr, self.out_shape, interpolation=cv2.INTER_LINEAR)\n arr = arr.astype(np.uint8)\n skimage.io.imsave(dst, arr, check_contrast=False)\n\n src = self.root + \"mask/\" + name + \"_mask.png\"\n dst = label_dir + name + \".png\"\n arr = skimage.io.imread(src, as_gray=True)\n arr[arr > 0] = 1\n arr = cv2.resize(arr, self.out_shape, interpolation=cv2.INTER_NEAREST)\n arr[arr > 0] = 255\n skimage.io.imsave(dst, arr, check_contrast=False)\n\n def split_isic_dataset(self) -> None:\n \"\"\"\n Split the ISIC dataset into train and validation sets.\n \"\"\"\n data_names = {}\n _label_dirs = {\n \"train\": self.root + \"ISIC-2017_Training_Part1_GroundTruth/\",\n \"validation\": self.root + \"ISIC-2017_Test_v2_Part1_GroundTruth/\",\n }\n\n for phase in [\"train\", \"validation\"]:\n _dir = _label_dirs[phase]\n samples = os.listdir(_dir)\n samples = [\n sample[:12]\n for sample in samples\n if \".png\" in sample and sample[0] != \".\"\n ]\n data_names[phase] = sorted(samples)\n\n split_dir = self.save_dir + \"split/\"\n self.make_directory(split_dir)\n\n for phase in [\"train\", \"validation\"]:\n with open(split_dir + \"%s.txt\" % phase, \"w\") as f:\n for _id in data_names[phase]:\n f.write(\"%s\\n\" % _id)\n\n out_shape = (256, 256)\n _img_dirs = {\n \"train\": self.root + \"ISIC-2017_Training_Data/\",\n \"validation\": self.root + \"ISIC-2017_Test_v2_Data/\",\n }\n\n for phase in [\"train\", \"validation\"]:\n self.make_directory(self.save_dir + phase + \"/\")\n image_dir, label_dir = (\n self.save_dir + phase + \"/image/\",\n self.save_dir + phase + \"/label_png/\",\n )\n self.make_directory(image_dir)\n self.make_directory(label_dir)\n\n for name in tqdm(data_names[phase]):\n src = _img_dirs[phase] + name + \".jpg\"\n dst = image_dir + name + \".png\"\n arr = skimage.io.imread(src)\n arr = cv2.resize(arr, out_shape, interpolation=cv2.INTER_LINEAR)\n arr = arr.astype(np.uint8)\n skimage.io.imsave(dst, arr, check_contrast=False)\n\n src = _label_dirs[phase] + name + \"_segmentation.png\"\n dst = label_dir + name + \".png\"\n arr = skimage.io.imread(src)\n arr[arr > 0] = 1\n arr = cv2.resize(arr, out_shape, interpolation=cv2.INTER_NEAREST)\n arr[arr > 0] = 255\n skimage.io.imsave(dst, arr, check_contrast=False)\n\n def split_dataset(self) -> None:\n \"\"\"\n Split the dataset based on the specified dataname.\n \"\"\"\n if self.dataname == \"shenzhen\":\n self.split_shenzhen_dataset()\n elif self.dataname == \"isic\":\n self.split_isic_dataset()\n\n def noisy_label_generation(self) -> None:\n \"\"\"\n Generate noisy labels for the dataset based on the specified noise ratios.\n \"\"\"\n from noise_generation import add_noise\n\n if self.dataname in [\"shenzhen\", \"isic\"]:\n load_dir = self.save_dir + \"train/image/\"\n sample_ids = [\n _id[:-4]\n for _id in os.listdir(load_dir)\n if _id[0] != \".\" and \".png\" in _id\n ]\n sample_ids = sorted(sample_ids)\n\n for alpha in tqdm(self.dataset_noise_ratio):\n for beta in self.sample_noise_ratio:\n target_dir = os.path.join(\n self.save_dir,\n \"train/label_noise_{}_{}_png/\".format(alpha, beta),\n )\n if os.path.exists(target_dir):\n shutil.rmtree(target_dir)\n self.make_directory(target_dir)\n\n log_path = os.path.join(\n self.save_dir, \"train/noise_{}_{}_log.txt\".format(alpha, beta)\n )\n log = open(log_path, \"w\")\n\n random.seed(0)\n ids = sample_ids.copy()\n random.shuffle(ids)\n noisy_sample_num = int(len(ids) * alpha)\n noisy_ids = ids[:noisy_sample_num]\n\n for _id in sample_ids:\n clean_label_path = os.path.join(\n self.save_dir, \"train/label_png/{}.png\".format(_id)\n )\n noisy_label_path = os.path.join(\n target_dir, \"{}.png\".format(_id)\n )\n clean_label = skimage.io.imread(clean_label_path, as_gray=True)\n clean_label[clean_label > 0] = 1\n\n if _id in noisy_ids:\n noisy_label, noise_type = add_noise(\n clean_label, noise_ratio=beta\n )\n else:\n noisy_label, noise_type = clean_label, \"clean\"\n noisy_label[noisy_label > 0] = 255\n skimage.io.imsave(\n noisy_label_path, noisy_label, check_contrast=False\n )\n log.write(\"%s\\t%s\\n\" % (_id, noise_type))\n log.close()\n\n def organize_json_directory(self) -> None:\n \"\"\"\n Organize the JSON directories for each class, split, and noise level.\n \"\"\"\n src_dirs = [\"label_png\"]\n for alpha in self.dataset_noise_ratio:\n for beta in self.sample_noise_ratio:\n src_dirs.append(\n os.path.join(\"label_noise_{}_{}_png\".format(alpha, beta))\n )\n\n phases = [\"train\", \"validation\"]\n\n for phase in phases:\n for subdir in src_dirs:\n src_dir = os.path.join(self.save_dir, phase, subdir)\n tar_dir = os.path.join(self.save_dir, phase, subdir[:-4])\n\n if phase == \"validation\":\n src_dir = os.path.join(self.save_dir, \"validation\", \"label_png\")\n tar_dir = os.path.join(self.save_dir, \"validation\", \"label\")\n self.make_directory(tar_dir)\n self.save_json_files(src_dir, tar_dir)\n else:\n self.make_directory(tar_dir)\n self.save_json_files(src_dir, tar_dir)\n\n def save_json_files(self, src_dir, tar_dir) -> None:\n \"\"\"\n Save JSON files from label images in the source directory to the target directory.\n\n Args:\n src_dir (str): Source directory path.\n tar_dir (str): Target directory path.\n \"\"\"\n label_files = os.listdir(src_dir)\n\n for label_file in tqdm(label_files):\n src_path = os.path.join(src_dir, label_file)\n tar_path = os.path.join(tar_dir, label_file[:-4] + \".json\")\n\n label_image = skimage.io.imread(src_path)\n label_image[label_image > 0] = 1\n label = {self.class_label: label_image}\n\n with open(tar_path, \"w\") as json_file:\n json.dump(label, json_file, cls=NpEncoder)\n\n def preprocess(self) -> None:\n self.make_directory(self.save_dir)\n self.split_dataset()\n self.noisy_label_generation()\n self.organize_json_directory()\n self.create_csv()\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser(\n description=\"Add noise for the ISIC/Shenzen dataset\"\n )\n parser.add_argument(\n \"--dataname\",\n choices=[\"shenzhen\", \"isic\"],\n default=\"isic\",\n help=\"The dataset name ('shenzhen' or 'isic').\",\n )\n parser.add_argument(\n \"--dataset_noise_ratio\",\n nargs=\"+\",\n type=float,\n default=[0.3, 0.5, 0.7],\n help=\"List of noise ratios for the entire dataset.\",\n )\n parser.add_argument(\n \"--sample_noise_ratio\",\n nargs=\"+\",\n type=float,\n default=[0.5, 0.7],\n help=\"List of noise ratios for individual samples.\",\n )\n parser.add_argument(\n \"--root\",\n required=True,\n help=\"The root directory of the dataset.\",\n )\n\n args = parser.parse_args()\n\n preprocessor = DataPreprocessor(\n args.dataname,\n args.dataset_noise_ratio,\n args.sample_noise_ratio,\n args.root,\n )\n preprocessor.preprocess()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Digital-Dermatology/t-loss","sub_path":"datasets/noise/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":14909,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"9"} +{"seq_id":"73057905253","text":"import os\nfrom shutil import copyfile\nimport random\nimport torch\nimport numpy as np\nimport json\nfrom ptutils import PytorchLoop\n\ndef start_task(config, config_path):\n if os.path.exists(config['log_path']):\n config['log_path'] = config['log_path'] + '_new'\n return start_task(config, config_path)\n\n os.makedirs(config['log_path'])\n copyfile(config_path, os.path.join(config['log_path'], 'config_might_be_changed.py'))\n with open(os.path.join(config['log_path'], 'config.json'), \"w\") as f:\n f.write(json.dumps(config, indent=3, sort_keys=False))\n\n for value in config.values():\n if isinstance(value, dict):\n for sub_value in value.values():\n if 'source' in sub_value and '.py' in sub_value['source']:\n copyfile(sub_value['source'], os.path.join(config['log_path'], sub_value['source'].split('/')[-1]))\n\n torch.manual_seed(config['seed'])\n np.random.seed(config['seed'])\n random.seed(config['seed'])\n\n print(\"Begin working on task. Write output to \" + config['log_path'])\n loop = PytorchLoop.PytorchLoop(config)\n loop.start_loop()","repo_name":"Criscraft/pytorch_classification","sub_path":"pytorchtools/ptutils/start_task.py","file_name":"start_task.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"22772950624","text":"import os\nimport re\nfrom datetime import datetime\nfrom time import time\n\nfrom api import get_event, push_event\nfrom config import (\n ALLOWED_FILETYPES,\n EVENT_COOLDOWN,\n IMG_PATH,\n LOG_PATH,\n MAX_FILE_SIZE,\n MAX_POST_LENGTH,\n MAX_POST_ROWS,\n MIN_POST_LENGTH,\n REPORTS,\n)\nfrom flask import current_app, flash\nfrom flask_wtf import FlaskForm\nfrom utils import get_ip_from_request, get_new_uid, get_username, make_img_from_request\nfrom werkzeug.datastructures import FileStorage\nfrom werkzeug.utils import secure_filename\nfrom wtforms import (\n Form,\n PasswordField,\n SelectField,\n StringField,\n TextAreaField,\n validators,\n)\n\n\nclass FeedbackForm(FlaskForm):\n subject = StringField(\n label='Subject',\n validators=[validators.Length(min=1, max=64), validators.InputRequired()],\n )\n message = TextAreaField(\n label='Message',\n validators=[\n validators.Length(min=MIN_POST_LENGTH, max=MAX_POST_LENGTH),\n validators.InputRequired(),\n ],\n id='feedback_form_message_id',\n )\n\n\nclass ReportForm(FlaskForm):\n category = SelectField(label='Category', choices=[(r, r) for r in REPORTS])\n message = TextAreaField(\n label='Report',\n validators=[\n validators.Length(max=MAX_POST_LENGTH),\n ],\n id='report_form_message_id',\n )\n\n\nclass LoginForm(FlaskForm):\n username = StringField(\n label='Username',\n validators=[validators.Length(min=1, max=32), validators.InputRequired()],\n )\n password = PasswordField(label='Password', validators=[validators.InputRequired()])\n\n\nclass PostCompiler:\n def __init__(\n self,\n request,\n form_text_name=None,\n form_img_name=None,\n require_text=True,\n validate_text=True,\n require_img=True,\n ):\n self.valid = True\n self.invalid_message = None\n\n self.request = request\n self.text_name = form_text_name\n self.img_name = form_img_name\n self.require_text = require_text\n self.require_img = require_img\n self.validate_text = validate_text\n\n self.ip = get_ip_from_request(self.request)\n self.text = request.form.get(form_text_name, None)\n self.img = make_img_from_request(request, form_img_name)\n self.user = get_username()\n\n self.event = get_event(self.ip)\n push_event(self.ip, event=self.event)\n\n self.set_is_valid()\n\n def is_invalid_text(self):\n text_len = len(re.sub(r'\\s', '', self.text))\n if text_len < MIN_POST_LENGTH or text_len > MAX_POST_LENGTH:\n return f'Min characters ({MIN_POST_LENGTH}). Max characters ({MAX_POST_LENGTH}).'\n\n text_row_count = self.text.count('\\n')\n if text_row_count > MAX_POST_ROWS:\n return f'Max rows ({MAX_POST_ROWS})'\n\n return False\n\n def set_is_valid(self):\n assert self.invalid_message == None\n\n if self.require_img:\n if not self.img:\n self.invalid_message = 'No image submitted.'\n\n elif self.require_text:\n if not self.text:\n self.invalid_message = 'No text submitted.'\n elif self.validate_text:\n self.invalid_message = self.is_invalid_text()\n\n if self.invalid_message:\n self.valid = False\n","repo_name":"baronrustamov/Lamendo","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"43252092706","text":"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport inspect\nimport os\nimport re\nimport sys\nimport tempfile\nimport unittest\n\nfrom nose_parameterized import parameterized\nfrom nose.plugins.skip import SkipTest\nfrom nose.tools import istest\nfrom nose.tools import nottest\n\nfrom prov_interop import standards\nfrom prov_interop.component import ConfigError\nfrom prov_interop.converter import Converter\nfrom prov_interop.files import load_yaml\nfrom prov_interop.harness import HarnessResources\nfrom prov_interop.interop_tests import harness\n\n@nottest\ndef test_case_name(testcase_func, param_num, param):\n \"\"\":mod:`nose_parameterized` callback function to create custom \n test function names.\n\n This overrides the default method names created by\n :mod:`nose_parameterized`.\n\n :param testcase_func: test function\n :type testcase_func: function\n :param param_num: number of parameters in `param`\n :type param_num: int\n :param param: tuple of arguments to test function\n :type param: tuple of form (int, str or unicode, _, str or unicode,\n _) \n :return: test function name of form ``N_EXTIN_EXTOUT`` (e.g. \n ``test_case_1_provx_json``)\n :rtype: str or unicode\n \"\"\"\n (index, ext_in, _, ext_out, _) = param.args\n return str(\"%s_%s\" %(\n testcase_func.__name__,\n parameterized.to_safe_name(str(index) + \"_\" + ext_in + \"_\" + ext_out)))\n\n@nottest\nclass ConverterTestCase(unittest.TestCase):\n \"\"\"Base class for converter interoperability tests.\n\n This class implements the procedure for testing a converter using a\n test case and a comparator: \n \n - A converter translates ``testcaseNNNN/file.`` to \n ``converted.``.\n - A comparator compares ``testcaseNNNN/file.`` to \n ``converted.`` for equivalence, which results in either\n success or failure. \n\n This class is sub-classed by test classes for each converter.\n \"\"\"\n\n SKIP_TESTS = \"skip-tests\"\n \"\"\"str or unicode: configuration key for tests to skip\"\"\"\n\n _multiprocess_can_split_ = True\n\n def setUp(self):\n super(ConverterTestCase, self).setUp()\n self.converter = None\n self.skip_tests = []\n self.converter_ext_out = None\n\n def tearDown(self):\n super(ConverterTestCase, self).tearDown()\n if self.converter_ext_out != None and \\\n os.path.isfile(self.converter_ext_out):\n os.remove(self.converter_ext_out)\n\n def shortDescription(self):\n \"\"\"Suppress use of docstring by nose when printing tests being run\"\"\"\n return None\n\n def configure(self, config_key, env_var, default_file_name):\n \"\"\"Get the configuration for the converter to be tested within a\n sub-class. \n\n The method assumes the converter has been created and stored in an\n instance variable. It loads the contents of a YAML file (using\n :func:`prov_interop.files.load_yaml`) into a Python\n dictionary. The file loaded is: \n\n - The value of an entry in\n :class:`prov_interop.harness.HarnessResources` configuration with\n name `config_key`, if any. \n - Else, the file named in the environment variable named in\n `env_var`, if such an environment variable has been defined. \n - Else, `default_file_name`.\n\n Once loaded, a dictionary entry with whose key is the value of\n `config_key` is extracted and used to configure the converter via\n its :meth:`prov_interop.converter.Converter.configure` method. \n\n In addition to converter-specific configuration, this\n configuration can also hold:\n\n - ``skip-tests``: a list of the indices of zero or more tests that\n are to be skipped for this converter. \n\n If so, then this list is cached in an instance variable.\n\n An example configuration, in the form of a Python dictionary, and\n for ProvPy ``prov-convert``, is::\n\n {\n \"ProvPy\": {\n \"executable\": \"prov-convert\"\n \"arguments\": \"-f FORMAT INPUT OUTPUT\"\n \"input-formats\": [\"json\"]\n \"output-formats\": [\"provn\", \"provx\", \"json\"]\n skip-tests: [2, 3, 5]\n }\n }\n\n The corresponding YAML configuration file is::\n\n ---\n ProvPy: \n executable: prov-convert\n arguments: -f FORMAT INPUT OUTPUT\n input-formats: [json]\n output-formats: [provn, provx, json]\n skip-tests: [2, 3, 5]\n\n :param config_key: Key to access converter-specific configuration\n :type config_key: str or unicode\n :param env_var: Environment variable with configuration file name\n :type env_var: str or unicode\n :param default_file_name: Default configuration file name\n :type file_name: str or unicode\n :raises IOError: if the file is not found\n :raises ConfigError: if there is no entry with value `config_key`\n within the configuration, or if converter-specific\n configuration information is missing\n :raises YamlError: if the file is an invalid YAML file\n \"\"\"\n config_file_name = None\n if config_key in harness.harness_resources.configuration:\n config_file_name = harness.harness_resources.configuration[config_key]\n config = load_yaml(env_var,\n default_file_name,\n config_file_name)\n if config_key not in config:\n raise ConfigError(\"Missing configuration for \" + config_key)\n self.converter.configure(config[config_key])\n if ConverterTestCase.SKIP_TESTS in self.converter.configuration:\n self.skip_tests = self.converter.configuration[\n ConverterTestCase.SKIP_TESTS]\n\n def skip_member_of_skip_set(self, index):\n \"\"\"Raise a :class:`nose.plugins.skip.SkipTest` if this test\n case is marked as to be skipped for the converter. Tests to be\n skipped are recorded in the optional ``skip-tests``\n configuration. \n\n :param index: Test case index\n :type index: int\n :raises nose.plugins.skip.SkipTest: always\n \"\"\"\n print((\"Skipping as \" + str(index) + \" in skip-tests\"))\n raise SkipTest((\"Test case \" + str(index) +\n \" in \" + self.converter.__class__.__name__ + \n \" skip-tests\"))\n\n def skip_unsupported_format(self, index, format, format_type):\n \"\"\"Raise a :class:`nose.plugins.skip.SkipTest` if a specific\n conversion is to be skipped because the converter does not support\n one of the formats. \n\n :param index: Test case index\n :type index: int\n :param format: one of the formats in :mod:`prov_interop.standards`\n :type format: str or unicode\n :param format_type: Converter configuration key indicating \n which format is not supported (e.g. ``input-format`` or\n ``output-format``\n :type format_type: str or unicode\n :raises nose.plugins.skip.SkipTest: always\n \"\"\"\n print((\"Skipping as \" + str(index) + \" in skip-tests\"))\n print((\"Skipping as \" + format + \" not in converter's \" + format_type))\n raise SkipTest((\"Format \" + format +\n \" not in \" + self.converter.__class__.__name__ + \n \" \" + format_type))\n\n @nottest\n def initialise_test_harness():\n \"\"\"Initialises the test harness and provide the test cases as a\n generator. \n\n The test harness is bootstrapped by a call to\n :func:`prov_interop.interop_tests.harness.initialise_harness_from_file`. \n This method provides test case tuples by returning the generator,\n :meth:`prov_interop.harness.HarnessResources.test_cases_generator`,\n so that :mod:`nose_parameterized` can dynamically creates the test\n methods (see\n :meth:`prov_interop.interop_tests.test_converter.ConverterTestCase.test_case`).\n\n If running Sphinx to create API documentation then the test\n harness initialisation is not done and, instead, a generator \n that contains zero test cases is returned. This is a hack to\n workaround Sphinx's execution of the Python it parses.\n\n :returns: test case tuple\n :rtype: tuple of (int, str or unicode, str or unicode, str or\n unicode, str or unicode) \n :raises ConfigError: if the test cases directory is not found\n \"\"\"\n if \"sphinx-build\" in sys.argv[0]:\n return (nothing for nothing in ())\n else:\n harness.initialise_harness_from_file()\n return harness.harness_resources.test_cases_generator()\n\n @parameterized.expand(initialise_test_harness(), \n testcase_func_name=test_case_name)\n def test_case(self, index, ext_in, file_ext_in, ext_out, file_ext_out):\n \"\"\"Test a converter's conversion of a file in one format to\n another format. \n\n This generic test method implements the following test procedure: \n\n - If the test case `index` is in the optional ``skip-tests``\n configuration for the converter then the test is skipped, by\n raising :class:`nose.plugins.skip.SkipTest`. \n - If `ext_in` or `ext_out` are not in the ``input-formats`` or\n ``output-formats`` for the converter then the test is skipped,\n again by raising :class:`nose.plugins.skip.SkipTest`. \n - The converter translates ``testcaseNNNN/file_ext_in`` to \n ``out.ext_out``.\n - The comparator for `ext_out` registered with\n :class:`prov_interop.harness.HarnessResources` is retrieved. \n - The comparator compares ``testcaseNNNN/file.ext_out`` to \n ``out.ext_out`` for equivalence, which results in either success\n or failure. \n\n :mod:`nose_parameterized`, in conjunction with the test case\n tuples provided via the generator,\n :meth:`prov_interop.harness.HarnessResources.test_cases_generator`,\n is used to dynamically create test methods for each test case\n tuple. When this class is loaded, :mod:`nose_parameterized`\n will iterate through each of the test cases and create\n corresponding test methods::\n\n test_case_1_json_json\n test_case_1_provx_json\n test_case_1_json_provx\n test_case_1_provx_provx\n ...\n\n The arguments passed into each test method, `(index, ext_in,\n file_ext_in, ext_out, file_ext_out)` are those from the tuple that\n was used to create that method. \n\n :param index: Test case index\n :type index: int\n :param ext_in: input format, one of the formats in\n :mod:`prov_interop.standards`\n :type ext_in: str or unicode\n :param file_ext_in: input file, assumed to have extension `ext_in`\n :type file_ext_in: str or unicode\n :param ext_out: output format, one of the formats in\n :mod:`prov_interop.standards`\n :type ext_out: str or unicode\n :param file_ext_out: output file, assumed to have extension `ext_out`\n :type file_ext_out: str or unicode\n :raises nose.plugins.skip.SkipTest: if the test case is to be\n skipped, or the input format or output format are not supported\n by the converter\n \"\"\"\n print((\"Test case: \" + str(index) + \n \" from \" + ext_in + \n \" to \" + ext_out + \" Process: \" + str(os.getpid())))\n if index in self.skip_tests:\n self.skip_member_of_skip_set(index)\n if (not ext_in in self.converter.input_formats):\n self.skip_unsupported_format(index, ext_in, Converter.INPUT_FORMATS)\n if (not ext_out in self.converter.output_formats):\n self.skip_unsupported_format(index, ext_out, Converter.OUTPUT_FORMATS)\n self.converter_ext_out = \"out.\" + str(os.getpid()) + \".\" + ext_out\n self.converter.convert(file_ext_in, self.converter_ext_out)\n comparator = harness.harness_resources.format_comparators[ext_out]\n are_equivalent = comparator.compare(file_ext_out, self.converter_ext_out)\n self.assertTrue(are_equivalent, \\\n msg=\"Test failed: \" + file_ext_out + \n \" does not match \" + self.converter_ext_out + \n\t \" converted from \" + file_ext_in)\n","repo_name":"openprov/interop-test-harness","sub_path":"prov_interop/interop_tests/test_converter.py","file_name":"test_converter.py","file_ext":"py","file_size_in_byte":11639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"75068682214","text":"from django.contrib import admin\n\n# Local\nfrom .models import Message\n\n\n@admin.register(Message)\nclass MessageAdmin(admin.ModelAdmin):\n\n readonly_fields = (\n \"sender\",\n \"recipient\",\n \"parent\",\n \"message\",\n \"community\",\n \"read\",\n )\n list_display = (\"sender\", \"recipient\", \"created\")\n search_fields = (\n \"search_document\",\n \"recipient__username\",\n \"sender__username\",\n )\n ordering = (\"-created\",)\n","repo_name":"danjac/localhub","sub_path":"localhub/private_messages/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"9"} +{"seq_id":"36055428109","text":"from pprint import pprint\nimport gzip\nimport gensim\nimport gensim.corpora as corpora\nfrom gensim.utils import simple_preprocess\nfrom gensim.models import CoherenceModel\nimport pickle\n\ndef topic_model(cleaned_captions_fn, outfolder=\"data/\"):\n video_captions = pd.read_csv(cleaned_captions_fn)\n data_words = video_captions[\"text\"].str.split(\" \").to_list()\n \n # Build the bigram and trigram models\n bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases.\n trigram = gensim.models.Phrases(bigram[data_words], threshold=100) \n\n # Faster way to get a sentence clubbed as a trigram/bigram\n bigram_mod = gensim.models.phrases.Phraser(bigram)\n trigram_mod = gensim.models.phrases.Phraser(trigram)\n \n # Form Bigrams\n data_words_bigrams = make_bigrams(data_words)\n \n # Create Dictionary\n id2word = corpora.Dictionary(data_words)\n\n # Create Corpus\n texts = data_words\n\n # Term Document Frequency\n corpus = [id2word.doc2bow(text) for text in texts]\n\n # View\n # print(corpus[:1])\n\n # Human readable format of corpus (term-frequency)\n [[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]]\n \n # Find optimal number of topics\n limit=40; start=2; step=6;\n model_list, coherence_values = compute_coherence_values(dictionary=id2word, corpus=corpus, texts=data_words, start=start, limit=limit, \n step=step)\n x = range(start, limit, step)\n \n max_coherence = 0\n optimal_topics = 0\n for m, cv in zip(x, coherence_values):\n if cv > max_coherence:\n optimal_topics = x\n \n # Build LDA model\n lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,\n id2word=id2word,\n num_topics=optimal_topics, \n random_state=100,\n update_every=1,\n chunksize=100,\n passes=10,\n alpha='auto',\n per_word_topics=True)\n \n with open('results/top_topics.txt', 'w') as topic_file:\n topics=lda_model.top_topics(corpus)\n topic_file.write('\\n'.join('%s %s' %topic for topic in topics))\n \n with open(\"results/LdaModel.pk\", 'wb') as pickle_file:\n pickle.dump(lda_model, pickle_file)\n\n\ndef make_bigrams(texts):\n return [bigram_mod[doc] for doc in texts]\n\ndef make_trigrams(texts):\n return [trigram_mod[bigram_mod[doc]] for doc in texts]\n \ndef compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):\n \"\"\"\n Compute c_v coherence for various number of topics\n\n Parameters:\n ----------\n dictionary : Gensim dictionary\n corpus : Gensim corpus\n texts : List of input texts\n limit : Max num of topics\n\n Returns:\n -------\n model_list : List of LDA topic models\n coherence_values : Coherence values corresponding to the LDA model with respective number of topics\n \"\"\"\n coherence_values = []\n model_list = []\n for num_topics in range(start, limit, step):\n model = gensim.models.ldamodel.LdaModel(corpus=corpus,\n id2word=id2word,\n num_topics=num_topics, \n random_state=100,\n update_every=1,\n chunksize=100,\n passes=10,\n alpha='auto',\n per_word_topics=True)\n model_list.append(model)\n coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')\n coherence_values.append(coherencemodel.get_coherence())\n\n return model_list, coherence_values","repo_name":"fieryashes/DSC180B_Misinformation_Project","sub_path":"src/analysis/topic_modeling.py","file_name":"topic_modeling.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"8312454817","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport os\nimport numpy as np\n\n\n# In[2]:\n\n\ncolumns = ['day', 'hospitalizeNum', 'isolateNum', 'quarantineNum', 'confineNum',\n 'free', 'CurrentHealthy', 'CurrentInfected', 'CurrentEffective','CurrentSusceptible', 'CurrentIncubation', 'CurrentDiscovered', 'CurrentCritical', 'CurrentRecovered',\n 'AccDiscovered', 'AccCritical', 'AccAcquaintance', 'AccStranger', 'measurement'] \n\n# Here are some consts for metrics\nI_threshold = 500\nQ_threshold = 10000\nQ_weight = 1\ntime = 59 # The last day of our simulation\n\n\n# In[3]:\n\n\ndef process_file(file):\n # Read data\n\n df = pd.read_csv(file, sep=',', engine='python', header=None)\n df.columns = columns\n\n # Accumulate \n for item in ['hospitalizeNum', 'isolateNum', 'quarantineNum', 'confineNum']:\n sum = 0\n list_sum = []\n ind = 0\n l = len(df[item])\n while ind < l:\n sum += df[item][ind]\n list_sum.append(sum)\n if df[\"day\"][ind] == time:\n sum = 0\n ind += 1\n df[\"sum_\"+item] = np.array(list_sum)\n return df\n\n\n# In[4]:\n\n\ndef get_I_Q(df, time):\n # Get I and Q value at the given time\n\n df_sub = df[df[\"day\"]==time]\n I = df_sub[\"CurrentInfected\"].mean()\n\n inHospital_mean = df_sub[\"sum_hospitalizeNum\"].mean()\n isolateNum_mean = df_sub[\"sum_isolateNum\"].mean()\n confineNum_mean = df_sub[\"sum_confineNum\"].mean()\n quarantineNum_mean = df_sub[\"sum_quarantineNum\"].mean()\n Q = 1 * inHospital_mean + 0.5 * isolateNum_mean+ 0.3 * quarantineNum_mean + 0.2 * confineNum_mean\n\n return I, Q\n\n\n# In[5]:\n\n\ndef get_least_Q_score(I, Q, I_threshold):\n score = np.copy(Q)\n score[I > I_threshold] = 1e6\n\n return score\n\n\n# In[6]:\n\n\ndef get_exp_score(I, Q, I_threshold, Q_threshold, Q_weight):\n I_score = np.exp(I/I_threshold)\n Q_score = Q_weight * (np.exp(Q/ Q_threshold))\n \n return I_score + Q_score\n\n\n# In[7]:\ndef main():\n df = process_file(\"examples/test/cnt_test.txt\")\n I, Q = get_I_Q(df, time)\n least_Q_score = get_least_Q_score(I, Q, I_threshold)\n exp_score = get_exp_score(I, Q, I_threshold, Q_threshold, Q_weight)\n print(least_Q_score, exp_score)","repo_name":"prescriptive-analytics/starter-kit","sub_path":"summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"9"} +{"seq_id":"1798372337","text":"def MultipleBrackets(string):\n stackofBrackets = []\n count = 0\n \n if (string.count(\"(\") == 0 and string.count(\")\") == 0) or (string.count(\"[\") == 0 and string.count(\"]\") == 0):\n return 1\n \n \n for x in string:\n if x == \"(\" or x== \"[\":\n #print(x)\n stackofBrackets.append(x)\n if x == \")\":\n if \"(\" in stackofBrackets:\n idx = stackofBrackets.index(\"(\")\n del stackofBrackets[idx]\n count += 1\n else:\n #print(\"failed ) \")\n return 0 \n elif x == \"]\":\n if \"[\" in stackofBrackets:\n idx = stackofBrackets.index(\"[\")\n del stackofBrackets[idx]\n count += 1\n else:\n #print(\"failed ]\")\n return 0\n #print(stackofBrackets)\n \n #print(stackofBrackets)\n if len(stackofBrackets) == 0:\n return str(1) +\" \"+ str(count)\n else:\n return 0 \n \n \n \n\n\nprint(MultipleBrackets( \"one(bracket)\" ))\n ","repo_name":"gokou00/python_programming_challenges","sub_path":"coderbyte/MultipleBrackets.py","file_name":"MultipleBrackets.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"19234766808","text":"import time\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nimport matplotlib\nmatplotlib.use('TKAgg')\nfrom matplotlib import pyplot\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import auc, roc_curve\nfrom sklearn import metrics, svm\nfrom sklearn.metrics import precision_recall_fscore_support\nimport pandas as pd\nfrom sklearn import preprocessing\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom sklearn.utils import shuffle\nfrom sklearn import tree\nfrom sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score, auc\n\n################################################################################\ndef dataProc(X1,y1):\n X1 = pd.DataFrame(X1)\n X1 = X1.fillna(X1.mean())\n X1=X1.to_numpy()\n y1 = y1.to_numpy()\n \n \n min_max_scaler = preprocessing.MinMaxScaler()\n X1 = min_max_scaler.fit_transform(X1)\n \n X1, y1 = shuffle(X1, y1, random_state=0)\n return X1,y1\n\n\ndef evaluateROC (clf, Xt, yt):\n aucScores = []\n y_pred = clf.predict(Xt)\n aucScore = roc_auc_score(yt, y_pred) \n fpr, tpr, thresholds = metrics.roc_curve(yt,y_pred)\n \n return fpr, tpr, thresholds, aucScore\n\n\n\n\n \n#################################################################################\n\ndef classifierTest(sysName):\n testType = \"ClassifiersTest_AUC\"\n X = []\n Xv = []\n Xt = []\n y = []\n yv = []\n yt = []\n \n for i in range(1,11,1):\n X.append(pd.read_csv(\"Data/{}/{}Data_10k_{}Sparsity.csv\".format(sysName, sysName, i/10 ), header=None))\n Xt.append(pd.read_csv(\"Data/{}/{}Data_2k_{}Sparsity.csv\".format(sysName, sysName, i/10 ), header=None))\n y.append(pd.read_csv(\"Data/{}/{}Labels_10k_{}Sparsity.csv\".format(sysName, sysName, i/10 ), header=None))\n yt.append(pd.read_csv(\"Data/{}/{}Labels_2k_{}Sparsity.csv\".format(sysName, sysName, i/10 ), header=None))\n \n X[i-1],y[i-1] = dataProc(X[i-1],y[i-1])\n Xt[i-1],yt[i-1] = dataProc(Xt[i-1],yt[i-1])\n \n \n \n numFeatures = len(X[0])\n X_train = np.concatenate((X[0],X[1]), axis=0)\n X_test = np.concatenate((Xt[0],Xt[1]), axis=0)\n \n y_train = np.concatenate((y[0],y[1]), axis=0)\n y_test = np.concatenate((yt[0],yt[1]), axis=0)\n \n for i in range(2,10,1):\n X_train = np.concatenate((X_train,X[i]), axis=0) \n X_test = np.concatenate((X_test,Xt[i]), axis=0) \n y_train = np.concatenate((y_train,y[i]), axis=0)\n y_test = np.concatenate((y_test,yt[i]), axis=0)\n \n \n X_train, y_train = shuffle(X_train, y_train, random_state=0)\n X_test, y_test = shuffle(X_test, y_test, random_state=0)\n \n #######################################################################################\n #Classifiers\n gnb = GaussianNB()\n knn = KNeighborsClassifier(n_neighbors=20)\n dtc = tree.DecisionTreeClassifier()\n svmL = svm.SVC(kernel = 'linear')\n svmR = svm.SVC(kernel = 'rbf')\n \n print(\"Initializing training\")\n #Training Classifiers\n time1 = time.time()\n gnb.fit(X_train,y_train)\n time2 = time.time()\n gnbTime = time2 - time1;\n print(\"GNB DONE\")\n fpr_gnb, tpr_gnb, thresh_gnb, auc_gnb = evaluateROC(gnb, X_test, y_test)\n \n time1= time.time()\n dtc.fit(X_train,y_train)\n time2 = time.time()\n dctTime = time2 - time1;\n print(\"DT DONE\")\n \n fpr_dtc, tpr_dtc, thresh_dtc, auc_dtc = evaluateROC(dtc, X_test, y_test)\n \n time1 = time.time()\n knn.fit(X_train,y_train)\n time2 = time.time()\n knnTime = time2 - time1;\n print(\"KNN DONE\")\n fpr_knn, tpr_knn, thresh_knn, auc_knn = evaluateROC(knn, X_test, y_test)\n \n print(\"Evaluation Done\")\n \n finalModels = ['DCT',\n 'KNN',\n 'NB'\n ]\n \n ################ DATA OUTPUT (Saving in Excel) ###############\n # Create a Pandas Excel writer using XlsxWriter as the engine.\n writer = pd.ExcelWriter('output/RESULTS_'+sysName+'_'+testType+'.xlsx', engine='xlsxwriter') #CHANGE THE NAME OF THE OUTPUT EXCEL FILE HERE\n \n \n Results_dct = pd.DataFrame({'False Positive Rate': fpr_dtc, 'True Positive Rate': tpr_dtc, 'Thresholds': thresh_dtc, 'AUC': auc_dtc})\n Results_gnb = pd.DataFrame({'False Positive Rate': fpr_gnb, 'True Positive Rate': tpr_gnb, 'Thresholds': thresh_gnb, 'AUC': auc_gnb})\n Results_knn = pd.DataFrame({'False Positive Rate': fpr_knn, 'True Positive Rate': tpr_knn, 'Thresholds': thresh_knn, 'AUC': auc_knn})\n \n \n #Results_times = pd.DataFrame({'Model': finalModels, 'Training Time': finalTimes})\n \n # Convert the dataframe to an XlsxWriter Excel object.\n Results_dct.to_excel(writer, sheet_name=\"DCT\")\n Results_gnb.to_excel(writer, sheet_name=\"GNB\")\n Results_knn.to_excel(writer, sheet_name=\"KNN\")\n \n # Close the Pandas Excel writer and output the Excel file.\n writer.save() \n\n\n\n\n\n\n\n\n\n\n\n\nclassifierTest(\"IEEE14\")\nclassifierTest(\"IEEE30\")\nclassifierTest(\"IEEE57\")\nprint(\"PROGRAM IS COMPLETE !!!!! \")\n","repo_name":"jsakhnin/DL_FDI","sub_path":"ClassifiersTest_auc.py","file_name":"ClassifiersTest_auc.py","file_ext":"py","file_size_in_byte":5255,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"9"} +{"seq_id":"21194003941","text":"from typing import Any, Dict\nfrom unittest.mock import patch\n\nimport pytest\nfrom pytest_assert_utils import assert_model_attrs\nfrom pytest_common_subject import precondition_fixture\nfrom pytest_drf import (\n Returns200,\n Returns201,\n Returns204,\n Returns500,\n UsesDeleteMethod,\n UsesDetailEndpoint,\n UsesGetMethod,\n UsesListEndpoint,\n UsesPatchMethod,\n UsesPostMethod,\n ViewSetTest,\n)\nfrom pytest_drf.util import pluralized, url_for\nfrom pytest_lambda import lambda_fixture, static_fixture\nfrom requests import HTTPError\nfrom tools.models import Organization\n\npytestmark = pytest.mark.django_db\n\n\ndef express_organization(organization: Organization) -> Dict[str, Any]:\n return {\n \"id\": organization.id,\n \"name\": organization.name,\n \"code\": organization.code,\n \"tags\": [tag for tag in organization.tags.all()],\n }\n\n\nexpress_organizations = pluralized(express_organization)\n\n\nclass TestOrganizationViewSet(ViewSetTest):\n @pytest.fixture\n def organizations(self):\n with patch(\"katalogus.client.KATalogusClientV1\"), patch(\"tools.models.OctopoesAPIConnector\"):\n return [\n Organization.objects.create(name=\"Test Organization 1\", code=\"test1\", tags=[\"tag1\", \"tag2\"]),\n Organization.objects.create(name=\"Test Organization 2\", code=\"test2\"),\n ]\n\n organization = lambda_fixture(lambda organizations: organizations[0])\n\n list_url = lambda_fixture(lambda: url_for(\"organization-list\"))\n detail_url = lambda_fixture(lambda organization: url_for(\"organization-detail\", organization.pk))\n\n @pytest.fixture\n def client(self, create_drf_client, admin_user):\n client = create_drf_client(admin_user)\n # We need to set this so that the test client doesn't throw an\n # exception, but will return error in the API we can test\n client.raise_request_exception = False\n return client\n\n class TestList(\n UsesGetMethod,\n UsesListEndpoint,\n Returns200,\n ):\n def test_it_returns_values(self, organizations, json):\n expected = express_organizations(organizations)\n actual = json\n assert actual == expected\n\n class TestCreate(\n UsesPostMethod,\n UsesListEndpoint,\n Returns201,\n ):\n data = static_fixture({\"name\": \"Test Org 3\", \"code\": \"test3\", \"tags\": [\"tag2\", \"tag3\"]})\n\n initial_ids = precondition_fixture(\n lambda mock_models_katalogus, mock_models_octopoes, organizations: set(\n Organization.objects.values_list(\"id\", flat=True)\n ),\n async_=False,\n )\n\n def test_it_creates_new_organization(self, initial_ids, json):\n expected = initial_ids | {json[\"id\"]}\n actual = set(Organization.objects.values_list(\"id\", flat=True))\n assert actual == expected\n\n def test_it_sets_expected_attrs(self, data, json):\n organization = Organization.objects.get(pk=json[\"id\"])\n\n expected = data\n assert_model_attrs(organization, expected)\n\n def test_it_returns_organization(self, json):\n organization = Organization.objects.get(pk=json[\"id\"])\n\n expected = express_organization(organization)\n actual = json\n assert actual == expected\n\n class TestCreateKatalogusError(\n UsesPostMethod,\n UsesListEndpoint,\n Returns500,\n ):\n data = static_fixture({\"name\": \"Test Org 3\", \"code\": \"test3\", \"tags\": [\"tag2\", \"tag3\"]})\n\n @pytest.fixture(autouse=True)\n def mock_services(self, mocker):\n mocker.patch(\"tools.models.KATalogusClientV1.organization_exists\", return_value=False)\n mocker.patch(\"tools.models.KATalogusClientV1.create_organization\", side_effect=HTTPError(\"Test error\"))\n mocker.patch(\"tools.models.KATalogusClientV1.health\")\n mocker.patch(\"tools.models.OctopoesAPIConnector.root_health\")\n mocker.patch(\"tools.models.OctopoesAPIConnector.create_node\")\n\n def test_it_returns_error(self, json):\n expected = {\n \"type\": \"server_error\",\n \"errors\": [\n {\n \"code\": \"error\",\n \"detail\": \"Failed creating organization in the Katalogus\",\n \"attr\": None,\n }\n ],\n }\n assert json == expected\n\n class TestCreateOctopoesError(\n UsesPostMethod,\n UsesListEndpoint,\n Returns500,\n ):\n data = static_fixture({\"name\": \"Test Org 3\", \"code\": \"test3\", \"tags\": [\"tag2\", \"tag3\"]})\n\n @pytest.fixture(autouse=True)\n def mock_services(self, mocker):\n mocker.patch(\"tools.models.KATalogusClientV1.health\")\n mocker.patch(\"tools.models.KATalogusClientV1.organization_exists\", return_value=False)\n mocker.patch(\"tools.models.KATalogusClientV1.create_organization\")\n mocker.patch(\"tools.models.KATalogusClientV1.delete_organization\") # Needed because of the \"rollback\"\n mocker.patch(\"tools.models.OctopoesAPIConnector.root_health\")\n mocker.patch(\"tools.models.OctopoesAPIConnector.create_node\", side_effect=HTTPError(\"Test error\"))\n\n def test_it_returns_error(self, json):\n expected = {\n \"type\": \"server_error\",\n \"errors\": [\n {\n \"code\": \"error\",\n \"detail\": \"Failed creating organization in Octopoes\",\n \"attr\": None,\n }\n ],\n }\n assert json == expected\n\n class TestRetrieve(\n UsesGetMethod,\n UsesDetailEndpoint,\n Returns200,\n ):\n def test_it_returns_organization(self, organization, json):\n expected = express_organization(organization)\n actual = json\n assert actual == expected\n\n class TestUpdate(\n UsesPatchMethod,\n UsesDetailEndpoint,\n Returns200,\n ):\n data = static_fixture(\n {\n \"name\": \"Changed Organization\",\n \"code\": \"test4\",\n \"tags\": [\"tag3\", \"tag4\"],\n }\n )\n\n # Code is read only so shouldn't change\n expected_data = {\n \"name\": \"Changed Organization\",\n \"code\": \"test1\",\n }\n\n @pytest.fixture(autouse=True)\n def mock_services(self, mocker):\n mocker.patch(\"tools.models.KATalogusClientV1.health\")\n mocker.patch(\"tools.models.KATalogusClientV1.organization_exists\", return_value=False)\n mocker.patch(\"tools.models.KATalogusClientV1.create_organization\")\n mocker.patch(\"tools.models.KATalogusClientV1.delete_organization\") # Needed because of the \"rollback\"\n mocker.patch(\"tools.models.OctopoesAPIConnector\")\n\n def test_it_sets_expected_attrs(self, organization):\n # We must tell Django to grab fresh data from the database, or we'll\n # see our stale initial data and think our endpoint is broken!\n organization.refresh_from_db()\n\n assert_model_attrs(organization, self.expected_data)\n assert {str(tag) for tag in organization.tags.all()} == {\"tag3\", \"tag4\"}\n\n def test_it_returns_organization(self, organization, json):\n organization.refresh_from_db()\n\n expected = express_organization(organization)\n actual = json\n assert actual == expected\n\n class TestDestroy(\n UsesDeleteMethod,\n UsesDetailEndpoint,\n Returns204,\n ):\n initial_ids = precondition_fixture(\n lambda mock_models_katalogus, mock_models_octopoes, organizations: set(\n Organization.objects.values_list(\"id\", flat=True)\n ),\n async_=False,\n )\n\n def test_it_deletes_organization(self, initial_ids, organization):\n expected = initial_ids - {organization.id}\n actual = set(Organization.objects.values_list(\"id\", flat=True))\n assert actual == expected\n\n class TestDestroyKatalogusError(\n UsesDeleteMethod,\n UsesDetailEndpoint,\n Returns500,\n ):\n @pytest.fixture(autouse=True)\n def mock_services(self, mocker):\n mocker.patch(\"tools.models.KATalogusClientV1.health\")\n mocker.patch(\"tools.models.KATalogusClientV1.delete_organization\", side_effect=HTTPError(\"Test error\"))\n mocker.patch(\"tools.models.OctopoesAPIConnector\")\n\n def test_it_returns_error(self, json):\n expected = {\n \"type\": \"server_error\",\n \"errors\": [\n {\n \"code\": \"error\",\n \"detail\": \"Failed deleting organization in the Katalogus\",\n \"attr\": None,\n }\n ],\n }\n assert json == expected\n\n class TestDestroyOctopoesError(\n UsesDeleteMethod,\n UsesDetailEndpoint,\n Returns500,\n ):\n @pytest.fixture(autouse=True)\n def mock_services(self, mocker):\n mocker.patch(\"tools.models.KATalogusClientV1.health\")\n mocker.patch(\"tools.models.KATalogusClientV1.delete_organization\")\n mocker.patch(\"tools.models.OctopoesAPIConnector.root_health\")\n mocker.patch(\"tools.models.OctopoesAPIConnector.delete_node\", side_effect=HTTPError(\"Test error\"))\n\n def test_it_returns_error(self, json):\n expected = {\n \"type\": \"server_error\",\n \"errors\": [\n {\n \"code\": \"error\",\n \"detail\": \"Failed deleting organization in Octopoes\",\n \"attr\": None,\n }\n ],\n }\n assert json == expected\n","repo_name":"minvws/nl-kat-coordination","sub_path":"rocky/tests/test_api_organization.py","file_name":"test_api_organization.py","file_ext":"py","file_size_in_byte":10026,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"9"} +{"seq_id":"21399955712","text":"from homeassistant import config_entries\nfrom homeassistant.const import CONF_NAME, CONF_URL, CONF_TIMEOUT, CONF_SCAN_INTERVAL\nfrom homeassistant.helpers.selector import TextSelector, TextSelectorConfig, TextSelectorType, NumberSelector, NumberSelectorConfig, NumberSelectorMode\nfrom homeassistant.helpers.aiohttp_client import async_create_clientsession\nimport voluptuous as vol\n\nfrom .api import FileFlowsApiClient\nfrom .const import DOMAIN\n\n\nclass FileFlowsConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):\n \"\"\"Config flow for file flows integration\"\"\"\n # The schema version of the config\n # Home Assistant will call your migrate method if the version changes\n VERSION = 1\n\n async def async_step_user(self, user_input):\n errors = {}\n\n if user_input is not None:\n # Test connection\n valid = await self.__test_connection(\n user_input[CONF_URL], user_input[CONF_TIMEOUT]\n )\n if valid:\n return self.async_create_entry(\n title=user_input[CONF_NAME], data=user_input\n )\n else:\n errors[\"base\"] = \"connection_failed\"\n else:\n user_input = {}\n\n data_schema = vol.Schema(\n {\n vol.Required(CONF_NAME, default=user_input.get(CONF_NAME) or \"\"): str,\n vol.Required(CONF_URL, default=user_input.get(CONF_URL) or \"\"): TextSelector(TextSelectorConfig(type=TextSelectorType.URL)),\n vol.Required(CONF_SCAN_INTERVAL, default=user_input.get(CONF_SCAN_INTERVAL) or 30): NumberSelector(NumberSelectorConfig(min=10, step=1, unit_of_measurement=\"s\", mode=NumberSelectorMode.BOX)),\n vol.Required(CONF_TIMEOUT, default=user_input.get(CONF_TIMEOUT) or 10): NumberSelector(NumberSelectorConfig(min=1, max=30, step=1, unit_of_measurement=\"s\", mode=NumberSelectorMode.SLIDER))\n }\n )\n return self.async_show_form(step_id=\"user\", data_schema=data_schema, errors=errors)\n\n async def __test_connection(self, url, timeout):\n \"\"\"Return true if connection is successful.\"\"\"\n try:\n session = async_create_clientsession(self.hass)\n client = FileFlowsApiClient(url, timeout, session)\n if await client.async_get_system_version():\n return True\n except Exception: # pylint: disable=broad-except\n pass\n return False\n","repo_name":"deosrc/home-assistant-fileflows","sub_path":"custom_components/fileflows/config_flow.py","file_name":"config_flow.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"16011197162","text":"class Solution(object):\n def removeElement(self, nums, val):\n \"\"\"\n :type nums: List[int]\n :type val: int\n :rtype: int\n \"\"\"\n length = len(nums)\n newindex = 0\n for i in range(length):\n if nums[i] != val:\n nums[newindex] = nums[i]\n newindex += 1\n return newindex\n\n # v1 wrong answer\n # Your input\n\n # [3,2,2,3]\n # 3\n # [2,3,4,5,6,7,8,7,6,5,4,4,5,6,7,6,6,5,5,4,4]\n # 4\n # Your answer\n\n # [3,2]\n # [2,3,4,5,6,7,8,7,6,5,4,4,5,6,7,6]\n # Expected answer\n\n # [2,2]\n # [2,3,5,6,7,8,7,6,5,5,6,7,6,6,5,5]\n def removeElement_v1(self, nums, val):\n \"\"\"\n :type nums: List[int]\n :type val: int\n :rtype: int\n \"\"\"\n length = len(nums)\n rnum = length\n for i in range(length):\n if nums[i] == val:\n rnum -= 1\n return rnum\n\n","repo_name":"mistwave/leetcode","sub_path":"Python3/no27_Remove_Element.py","file_name":"no27_Remove_Element.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"15140861108","text":"\nfrom fractions import Fraction\nfrom functools import reduce\n\n_default_wave_function = 'sine'\ndef set_default_wave_function(f):\n global _default_wave_function\n _default_wave_function = f\n\n_default_envelope_function = 'exp_falloff'\ndef set_default_envelope_function(f):\n global _default_envelope_function\n _default_envelope_function = f\n\n# Accidental contstants\nFLAT = -1\nNATURAL = 0\nSHARP = 1\n\n# Key Signatures\nclass KeySignature:\n __slots__ = ('sharps', 'flats')\n\n def __init__(self, sharps, flats):\n self.sharps = sharps\n self.flats = flats\n\nC_MAJOR = A_MINOR = KeySignature(set(), set())\nG_MAJOR = E_MINOR = KeySignature({'F'}, set())\nD_MAJOR = B_MINOR = KeySignature({'F', 'C'},set())\nA_MAJOR = F_SHARP_MINOR = KeySignature({'F', 'C', 'G'}, set())\nE_MAJOR = C_SHARP_MINOR = KeySignature({'F', 'C', 'G', 'D'}, set())\nB_MAJOR = G_SHARP_MINOR = KeySignature({'F', 'C', 'G', 'D', 'A'}, set())\nF_SHARP_MAJOR = D_SHARP_MINOR = KeySignature({'F', 'C', 'G', 'D', 'A', 'E'},set())\nC_SHARP_MAJOR = KeySignature({'F', 'C', 'G', 'D', 'A', 'E', 'B'}, set())\nC_FLAT_MAJOR = KeySignature(set(), {'B', 'E', 'A', 'D', 'G', 'C', 'F'})\nG_FLAT_MAJOR = E_FLAT_MINOR = KeySignature(set(), {'B', 'E', 'A', 'D', 'G', 'C'})\nD_FLAT_MAJOR = B_FLAT_MINOR = KeySignature(set(), {'B', 'E', 'A', 'D', 'G'})\nA_FLAT_MAJOR = F_MINOR = KeySignature(set(), {'B', 'E', 'A', 'D'})\nE_FLAT_MAJOR = C_MINOR = KeySignature(set(), {'B', 'E', 'A'})\nB_FLAT_MAJOR = G_MINOR = KeySignature(set(), {'B', 'E'})\nF_MAJOR = D_MINOR = KeySignature(set(), {'B'})\n\nB0 = 'B0'\nC1 = 'C1'\nD1 = 'D1'\nE1 = 'E1'\nF1 = 'F1'\nG1 = 'G1'\nA1 = 'A1'\nB1 = 'B1'\nC2 = 'C2'\nD2 = 'D2'\nE2 = 'E2'\nF2 = 'F2'\nG2 = 'G2'\nA2 = 'A2'\nB2 = 'B2'\nC3 = 'C3'\nD3 = 'D3'\nE3 = 'E3'\nF3 = 'F3'\nG3 = 'G3'\nA3 = 'A3'\nB3 = 'B3'\nC4 = 'C4'\nD4 = 'D4'\nE4 = 'E4'\nF4 = 'F4'\nG4 = 'G4'\nA4 = 'A4'\nB4 = 'B4'\nC5 = 'C5'\nD5 = 'D5'\nE5 = 'E5'\nF5 = 'F5'\nG5 = 'G5'\nA5 = 'A5'\nB5 = 'B5'\nC6 = 'C6'\nD6 = 'D6'\nE6 = 'E6'\nF6 = 'F6'\nG6 = 'G6'\nA6 = 'A6'\nB6 = 'B6'\nC7 = 'C7'\nD7 = 'D7'\nE7 = 'E7'\nF7 = 'F7'\nG7 = 'G7'\nA7 = 'A7'\nB7 = 'B7'\nC8 = 'C8'\nD8 = 'D8'\n\n# Notes\nNOTES = [\n # Octave 0\n (B0, 31),\n\n # Octave 1\n (C1, 33),\n (None, 35),\n (D1, 37),\n (None, 39),\n (E1, 41),\n (F1, 44),\n (None, 46),\n (G1, 49),\n (None, 52),\n (A1, 55),\n (None, 58),\n (B1, 62),\n\n # Octave 2\n (C2, 65),\n (None, 69),\n (D2, 73),\n (None, 78),\n (E2, 82),\n (F2, 87),\n (None, 93),\n (G2, 98),\n (None, 104),\n (A2, 110),\n (None, 117),\n (B2, 123),\n\n # Octave 3\n (C3, 131),\n (None, 139),\n (D3, 147),\n (None, 156),\n (E3, 165),\n (F3, 175),\n (None, 185),\n (G3, 196),\n (None, 208),\n (A3, 220),\n (None, 233),\n (B3, 247),\n\n # Octave 4\n (C4, 262),\n (None, 277),\n (D4, 294),\n (None, 311),\n (E4, 330),\n (F4, 349),\n (None, 370),\n (G4, 392),\n (None, 415),\n (A4, 440),\n (None, 466),\n (B4, 494),\n\n # Octave 5\n (C5, 523),\n (None, 554),\n (D5, 587),\n (None, 622),\n (E5, 659),\n (F5, 698),\n (None, 740),\n (G5, 784),\n (None, 831),\n (A5, 880),\n (None, 932),\n (B5, 988),\n\n # Octave 6\n (C6, 1047),\n (None, 1109),\n (D6, 1175),\n (None, 1245),\n (E6, 1319),\n (F6, 1397),\n (None, 1480),\n (G6, 1568),\n (None, 1661),\n (A6, 1760),\n (None, 1865),\n (B6, 1976),\n\n # Octave 7\n (C7, 2093),\n (None, 2217),\n (D7, 2349),\n (None, 2489),\n (E7, 2637),\n (F7, 2794),\n (None, 2960),\n (G7, 3136),\n (None, 3322),\n (A7, 3520),\n (None, 3729),\n (B7, 3951),\n\n # Octave 8\n (C8, 4186),\n (None, 4435),\n (D8, 4699),\n (None, 4978)\n]\n\n# Note Index by Name Lookup. Useful for applying accidental offsets to notes.\nNOTE_BY_NAME = {}\nfor i, (name, value) in enumerate(NOTES):\n if name is not None:\n NOTE_BY_NAME[name] = i\n\nclass Note:\n __slots__ = ('length', 'note', 'accidental', 'wave_function', 'envelope_function')\n\n def __init__(self, length, note, accidental=None, wave_function=None, envelope_function=None):\n if wave_function is None:\n wave_function = _default_wave_function\n if envelope_function is None:\n envelope_function = _default_envelope_function\n\n self.length = length\n self.note = note\n self.accidental = accidental\n self.wave_function = wave_function\n self.envelope_function = envelope_function\n\n def hz(self):\n accidental = self.accidental\n if accidental is None:\n accidental = 0\n\n # Rest has no Hz.\n if self.note == 'REST':\n return 0\n\n # Determine the note from the table.\n idx = NOTE_BY_NAME[self.note] + accidental\n\n # If we're outside the supported range, treat this as a rest.\n if idx < 0 or idx >= len(NOTES):\n return 0\n\n # Return the Hz from the table.\n return NOTES[idx][1]\n\n# lengths\ndef Dot(note):\n note.length *= Fraction(3,2)\n return note\n\ndef Whole(note, accidental=None):\n return Note(Fraction(1,1), note, accidental)\n\ndef Half(note, accidental=None):\n return Note(Fraction(1,2), note, accidental)\n\ndef Quarter(note, accidental=None):\n return Note(Fraction(1,4), note, accidental)\n\ndef Eighth(note, accidental=None):\n return Note(Fraction(1/8), note, accidental)\n\ndef Sixteenth(note, accidental=None):\n return Note(Fraction(1/16), note, accidental)\n\nclass Measure:\n __slots__ = ('tempo', 'time_signature', 'key', 'notes')\n\n def __init__(self, tempo, time_signature, key, notes):\n # Propagate the accidentals across the measure to make life easier later.\n active_accidentals = {}\n self.notes = []\n for note in notes:\n if note.accidental is None:\n letter = note.note[0]\n if letter in key.sharps:\n self.notes.append(Note(note.length, note.note, SHARP))\n elif letter in key.flats:\n self.notes.append(Note(note.length, note.note, FLAT))\n elif note.note in active_accidentals:\n self.notes.append(Note(note.length, note.note, active_accidentals[note.note]))\n else:\n self.notes.append(Note(note.length, note.note, NATURAL))\n else:\n active_accidentals[note.note] = note.accidental\n self.notes.append(note)\n\n self.tempo = tempo\n self.time_signature = time_signature\n self.key = key\n\n def is_time_valid(self):\n return Fraction(*self.time_signature) == reduce(lambda a, b: a + b.length, notes, Fraction(0,1))\n","repo_name":"rcythr/music-synthesizer","sub_path":"synth/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"7585089579","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport math\r\n\r\ndef jin_lossb(outputs, partialY):\r\n Y = partialY/partialY.sum(dim=1,keepdim=True)\r\n q = 0.7\r\n sm_outputs = F.softmax(outputs, dim=1)\r\n pow_outputs = torch.pow(sm_outputs, q)\r\n sample_loss = (1-(pow_outputs*Y).sum(dim=1))/q \r\n return sample_loss\r\n\r\ndef jin_lossu(outputs, partialY):\r\n Y = partialY/partialY.sum(dim=1,keepdim=True)\r\n logsm = nn.LogSoftmax(dim=1)\r\n logsm_outputs = logsm(outputs)\r\n final_outputs = logsm_outputs * Y\r\n sample_loss = - final_outputs.sum(dim=1)\r\n return sample_loss\r\n\r\ndef cour_lossb(outputs, partialY):\r\n sm_outputs = F.softmax(outputs, dim=1)\r\n candidate_outputs = ((sm_outputs*partialY).sum(dim=1))/(partialY.sum(dim=1))\r\n sig = nn.Sigmoid()\r\n candidate_loss = sig(candidate_outputs) \r\n noncandidate_loss = (sig(-sm_outputs)*(1-partialY)).sum(dim=1) \r\n sample_loss = (candidate_loss + noncandidate_loss).mean()\r\n return sample_loss\r\n\r\ndef squared_hinge_loss(z):\r\n hinge = torch.clamp(1-z, min=0)\r\n return hinge*hinge\r\n\r\ndef cour_lossu(outputs, partialY):\r\n sm_outputs = F.softmax(outputs, dim=1)\r\n candidate_outputs = ((sm_outputs*partialY).sum(dim=1))/(partialY.sum(dim=1))\r\n candidate_loss = squared_hinge_loss(candidate_outputs) \r\n noncandidate_loss = (squared_hinge_loss(-sm_outputs)*(1-partialY)).sum(dim=1) \r\n sample_loss = (candidate_loss + noncandidate_loss).mean()\r\n return sample_loss\r\n\r\ndef mae_loss(outputs, partialY):\r\n sm_outputs = F.softmax(outputs, dim=1)\r\n loss_fn = nn.L1Loss(reduction='none')\r\n loss_matrix = loss_fn(sm_outputs, partialY.float())\r\n sample_loss = loss_matrix.sum(dim=-1)\r\n return sample_loss\r\n\r\ndef mse_loss(outputs, Y):\r\n sm_outputs = F.softmax(outputs, dim=1)\r\n loss_fn = nn.MSELoss(reduction='none')\r\n loss_matrix = loss_fn(sm_outputs, Y.float())\r\n sample_loss = loss_matrix.sum(dim=-1)\r\n return sample_loss\r\n\r\ndef gce_loss(outputs, Y):\r\n q = 0.7\r\n sm_outputs = F.softmax(outputs, dim=1)\r\n pow_outputs = torch.pow(sm_outputs, q)\r\n sample_loss = (1-(pow_outputs*Y).sum(dim=1))/q # n\r\n return sample_loss\r\n\r\ndef phuber_ce_loss(outputs, Y):\r\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n trunc_point = 0.1\r\n n = Y.shape[0]\r\n soft_max = nn.Softmax(dim=1)\r\n sm_outputs = soft_max(outputs)\r\n final_outputs = sm_outputs * Y\r\n final_confidence = final_outputs.sum(dim=1)\r\n \r\n ce_index = (final_confidence > trunc_point)\r\n sample_loss = torch.zeros(n).to(device)\r\n\r\n if ce_index.sum() > 0:\r\n ce_outputs = outputs[ce_index,:]\r\n logsm = nn.LogSoftmax(dim=-1) # because ce_outputs might have only one example\r\n logsm_outputs = logsm(ce_outputs)\r\n final_ce_outputs = logsm_outputs * Y[ce_index,:]\r\n sample_loss[ce_index] = - final_ce_outputs.sum(dim=-1)\r\n\r\n linear_index = (final_confidence <= trunc_point)\r\n\r\n if linear_index.sum() > 0:\r\n sample_loss[linear_index] = -math.log(trunc_point) + (-1/trunc_point)*final_confidence[linear_index] + 1\r\n\r\n return sample_loss\r\n\r\ndef cce_loss(outputs, Y):\r\n logsm = nn.LogSoftmax(dim=1)\r\n logsm_outputs = logsm(outputs)\r\n final_outputs = logsm_outputs * Y\r\n sample_loss = - final_outputs.sum(dim=1)\r\n return sample_loss\r\n\r\ndef focal_loss(outputs, Y):\r\n logsm = nn.LogSoftmax(dim=1)\r\n logsm_outputs = logsm(outputs)\r\n soft_max = nn.Softmax(dim=1)\r\n sm_outputs = soft_max(outputs)\r\n final_outputs = logsm_outputs * Y * (1-sm_outputs) ** 0.5\r\n sample_loss = - final_outputs.sum(dim=1)\r\n return sample_loss\r\n\r\ndef pll_estimator(loss_fn, outputs, partialY, device):\r\n n, k = partialY.shape[0], partialY.shape[1]\r\n comp_num = partialY.sum(dim=1)\r\n temp_loss = torch.zeros(n, k).to(device)\r\n\r\n for i in range(k):\r\n tempY = torch.zeros(n, k).to(device)\r\n tempY[:, i] = 1.0 \r\n temp_loss[:, i] = loss_fn(outputs, tempY)\r\n\r\n coef = 1.0 / comp_num\r\n total_loss = coef * (temp_loss*partialY).sum(dim=1) \r\n total_loss = total_loss.sum()\r\n return total_loss\r\n\r\n# def proden_loss(output1, target, true, eps=1e-12):\r\n# output = F.softmax(output1, dim=1)\r\n# l = target * torch.log(output)\r\n# loss = (-torch.sum(l)) / l.size(0)\r\n\r\n# revisedY = target.clone()\r\n# revisedY[revisedY > 0] = 1\r\n# # revisedY = revisedY * (output.clone().detach())\r\n# revisedY = revisedY * output\r\n# revisedY = revisedY / (revisedY).sum(dim=1).repeat(revisedY.size(1), 1).transpose(0, 1)\r\n# new_target = revisedY\r\n\r\nclass proden_loss:\r\n def __init__(self, train_p_Y, device):\r\n self.conf = train_p_Y / train_p_Y.sum(dim=1, keepdim=True)\r\n self.conf = self.conf.to(device)\r\n self.device = device\r\n \r\n def __call__(self, output1, indexes):\r\n target = self.conf[indexes].clone().detach()\r\n output = F.softmax(output1, dim=1)\r\n l = target * torch.log(output)\r\n loss = (-torch.sum(l)) / l.size(0)\r\n\r\n return loss\r\n\r\n def update_conf(self, output1, indexes):\r\n target = self.conf[indexes].clone().detach()\r\n output = F.softmax(output1, dim=1)\r\n revisedY = target.clone()\r\n revisedY[revisedY > 0] = 1\r\n # revisedY = revisedY * (output.clone().detach())\r\n revisedY = revisedY * output\r\n revisedY = revisedY / (revisedY).sum(dim=1).repeat(revisedY.size(1), 1).transpose(0, 1)\r\n self.conf[indexes,:] = revisedY.clone().detach()\r\n\r\n \r\n\r\ndef cc_loss(outputs, partialY):\r\n sm_outputs = F.softmax(outputs, dim=1)\r\n final_outputs = sm_outputs * partialY\r\n average_loss = - torch.log(final_outputs.sum(dim=1)).mean()\r\n return average_loss\r\n\r\n\r\n# def rc_loss(outputs, confidence, index):\r\n# logsm_outputs = F.log_softmax(outputs, dim=1)\r\n# final_outputs = logsm_outputs * confidence[index, :]\r\n# average_loss = - ((final_outputs).sum(dim=1)).mean()\r\n# return average_loss\r\n\r\nclass rc_loss:\r\n def __init__(self, train_p_Y, device):\r\n self.conf = train_p_Y / train_p_Y.sum(dim=1, keepdim=True)\r\n self.conf = self.conf.to(device)\r\n self.device = device\r\n \r\n def __call__(self, outputs, index):\r\n logsm_outputs = F.log_softmax(outputs, dim=1)\r\n final_outputs = logsm_outputs * self.conf[index, :]\r\n average_loss = - ((final_outputs).sum(dim=1)).mean()\r\n return average_loss\r\n \r\n def update_conf(self, model, batchX, batchY, batch_index):\r\n confidence = self.conf.clone().detach()\r\n with torch.no_grad():\r\n batch_outputs = model(batchX)\r\n temp_un_conf = F.softmax(batch_outputs, dim=1)\r\n confidence[batch_index,:] = temp_un_conf * batchY # un_confidence stores the weight of each example\r\n #weight[batch_index] = 1.0/confidence[batch_index, :].sum(dim=1)\r\n base_value = confidence.sum(dim=1).unsqueeze(1).repeat(1, confidence.shape[1])\r\n confidence = confidence/base_value\r\n self.conf = confidence.clone().detach()\r\n\r\nclass cavl_loss:\r\n def __init__(self, train_p_Y, device):\r\n self.conf = train_p_Y / train_p_Y.sum(dim=1, keepdim=True)\r\n self.conf = self.conf.to(device)\r\n self.device = device\r\n \r\n def __call__(self, outputs, index):\r\n logsm_outputs = F.log_softmax(outputs, dim=1)\r\n final_outputs = logsm_outputs * self.conf[index, :]\r\n average_loss = - ((final_outputs).sum(dim=1)).mean()\r\n return average_loss\r\n \r\n def update_conf(self, model, batchX, batchY, batch_index):\r\n confidence = self.conf.clone().detach()\r\n with torch.no_grad():\r\n batch_outputs = model(batchX)\r\n cav = (batch_outputs * torch.abs(1 - batch_outputs)) * batchY\r\n cav_pred = torch.max(cav, dim=1)[1]\r\n gt_label = F.one_hot(cav_pred, batchY.shape[1]) # label_smoothing() could be used to further improve the performance for some datasets\r\n confidence[batch_index, :] = gt_label.float()\r\n self.conf = confidence.clone().detach()\r\n\r\n return confidence\r\n\r\nclass lws_loss:\r\n def __init__(self, train_p_Y, device, lw_weight=1, lw_weight0=1, epoch_ratio=None):\r\n self.conf = train_p_Y / train_p_Y.sum(dim=1, keepdim=True)\r\n self.conf = self.conf.to(device)\r\n self.device = device\r\n self.lw_weight = lw_weight\r\n self.lw_weight0 = lw_weight0\r\n self.epoch_ratio=None\r\n\r\n \r\n def __call__(self, outputs, partialY, index):\r\n device = self.device\r\n confidence = self.conf.clone().detach()\r\n lw_weight = self.lw_weight\r\n lw_weight0 = self.lw_weight0\r\n epoch_ratio=self.epoch_ratio\r\n\r\n onezero = torch.zeros(outputs.shape[0], outputs.shape[1])\r\n onezero[partialY > 0] = 1\r\n counter_onezero = 1 - onezero\r\n onezero = onezero.to(device)\r\n counter_onezero = counter_onezero.to(device)\r\n\r\n sig_loss1 = 0.5 * torch.ones(outputs.shape[0], outputs.shape[1])\r\n sig_loss1 = sig_loss1.to(device)\r\n sig_loss1[outputs < 0] = 1 / (1 + torch.exp(outputs[outputs < 0]))\r\n sig_loss1[outputs > 0] = torch.exp(-outputs[outputs > 0]) / (\r\n 1 + torch.exp(-outputs[outputs > 0]))\r\n l1 = confidence[index, :] * onezero * sig_loss1\r\n average_loss1 = torch.sum(l1) / l1.size(0)\r\n\r\n sig_loss2 = 0.5 * torch.ones(outputs.shape[0], outputs.shape[1])\r\n sig_loss2 = sig_loss2.to(device)\r\n sig_loss2[outputs > 0] = 1 / (1 + torch.exp(-outputs[outputs > 0]))\r\n sig_loss2[outputs < 0] = torch.exp(\r\n outputs[outputs < 0]) / (1 + torch.exp(outputs[outputs < 0]))\r\n l2 = confidence[index, :] * counter_onezero * sig_loss2\r\n average_loss2 = torch.sum(l2) / l2.size(0)\r\n\r\n average_loss = lw_weight0 * average_loss1 + lw_weight * average_loss2\r\n return average_loss#, lw_weight0 * average_loss1, lw_weight * average_loss2\r\n \r\n def update_conf(self, model, batchX, batchY, batch_index):\r\n confidence = self.conf.clone().detach()\r\n with torch.no_grad():\r\n device = self.device\r\n batch_outputs = model(batchX)\r\n sm_outputs = F.softmax(batch_outputs, dim=1)\r\n\r\n onezero = torch.zeros(sm_outputs.shape[0], sm_outputs.shape[1])\r\n onezero[batchY > 0] = 1\r\n counter_onezero = 1 - onezero\r\n onezero = onezero.to(device)\r\n counter_onezero = counter_onezero.to(device)\r\n\r\n new_weight1 = sm_outputs * onezero\r\n new_weight1 = new_weight1 / (new_weight1 + 1e-8).sum(dim=1).repeat(\r\n confidence.shape[1], 1).transpose(0, 1)\r\n new_weight2 = sm_outputs * counter_onezero\r\n new_weight2 = new_weight2 / (new_weight2 + 1e-8).sum(dim=1).repeat(\r\n confidence.shape[1], 1).transpose(0, 1)\r\n new_weight = new_weight1 + new_weight2\r\n\r\n confidence[batch_index, :] = new_weight\r\n \r\n self.conf = confidence.clone().detach()\r\n\r\nclass plcr_loss:\r\n def __init__(self, train_p_Y, device, lam=1):\r\n self.conf = train_p_Y / train_p_Y.sum(dim=1, keepdim=True)\r\n self.conf = self.conf.to(device)\r\n self.device = device\r\n self.consistency_criterion = torch.nn.KLDivLoss(reduction='batchmean').to(device)\r\n self.lam = lam\r\n \r\n def __call__(self, y_pred_aug0, y_pred_aug1, y_pred_aug2, targets, indexes, epoch):\r\n y_pred_aug0_probas_log = torch.log_softmax(y_pred_aug0, dim=-1)\r\n y_pred_aug1_probas_log = torch.log_softmax(y_pred_aug1, dim=-1)\r\n y_pred_aug2_probas_log = torch.log_softmax(y_pred_aug2, dim=-1)\r\n\r\n y_pred_aug0_probas = torch.softmax(y_pred_aug0, dim=-1)\r\n y_pred_aug1_probas = torch.softmax(y_pred_aug1, dim=-1)\r\n y_pred_aug2_probas = torch.softmax(y_pred_aug2, dim=-1)\r\n\r\n # consist loss\r\n consist_loss0 = self.consistency_criterion(y_pred_aug0_probas_log,\r\n self.conf[indexes].clone().detach())\r\n consist_loss1 = self.consistency_criterion(y_pred_aug1_probas_log,\r\n self.conf[indexes].clone().detach())\r\n consist_loss2 = self.consistency_criterion(y_pred_aug2_probas_log,\r\n self.conf[indexes].clone().detach())\r\n \r\n # supervised loss\r\n super_loss = -torch.mean(\r\n torch.sum(torch.log(1.0000001 - F.softmax(y_pred_aug0, dim=1)) * (1 - targets), dim=1))\r\n # dynamic lam\r\n self.lam = min((epoch / 100) * self.lam, self.lam)\r\n\r\n # Unified loss\r\n final_loss = self.lam * (consist_loss0 + consist_loss1 + consist_loss2) + super_loss\r\n # update confidence\r\n self.confidence_update(y_pred_aug0_probas, y_pred_aug1_probas, y_pred_aug2_probas, targets, indexes)\r\n return final_loss\r\n \r\n def confidence_update(self, y_pred_aug0_probas, y_pred_aug1_probas, y_pred_aug2_probas, targets, indexes):\r\n y_pred_aug0_probas = y_pred_aug0_probas.detach()\r\n y_pred_aug1_probas = y_pred_aug1_probas.detach()\r\n y_pred_aug2_probas = y_pred_aug2_probas.detach()\r\n\r\n revisedY0 = targets.clone()\r\n\r\n revisedY0 = revisedY0 * torch.pow(y_pred_aug0_probas, 1 / (2 + 1)) \\\r\n * torch.pow(y_pred_aug1_probas, 1 / (2 + 1)) \\\r\n * torch.pow(y_pred_aug2_probas, 1 / (2 + 1))\r\n revisedY0 = revisedY0 / revisedY0.sum(dim=1).repeat(revisedY0.size(1), 1).transpose(0, 1)\r\n\r\n self.conf[indexes, :] = revisedY0.clone().detach()\r\n \r\n\r\n\r\n","repo_name":"palm-ml/idgp","sub_path":"utils2/utils_loss.py","file_name":"utils_loss.py","file_ext":"py","file_size_in_byte":13722,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"74716542374","text":"# -*- coding: utf-8 -*-\n# @Organization : TMT\n# @Author : Cuong Tran\n# @Time : 8/2/2022\n\n\nimport cv2\nimport numpy as np\nimport torch\nfrom torchvision import transforms\nimport onnxruntime\n\nfrom config import *\nfrom src.utils.utils import OCRLabelConverter\nfrom glob import glob\n\n\nclass TextRecognition:\n def __init__(self, weight='checkpoints/crnn_r18.onnx'):\n self.session = onnxruntime.InferenceSession(weight)\n self.device = torch.device('cuda' if torch.cuda.is_available() and args['cuda'] else 'cpu')\n\n self.converter = OCRLabelConverter(alphabet)\n\n self.transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))])\n self.imgH = args['imgH']\n self.imgW = 1200\n self.input_mean = 0.5\n self.alphabet = alphabet\n\n def __call__(self, img):\n \"\"\"\n img is opencv img (bgr)\n \"\"\"\n h, w = img.shape[:2]\n\n ratio = w / float(h)\n imgW = int(np.floor(ratio * self.imgH))\n imgW = max(self.imgH, imgW) # assure imgH >= imgW\n img = cv2.resize(img, (imgW, self.imgH))\n\n blob = cv2.dnn.blobFromImage(img, 1.0 / 127.5, (imgW, self.imgH),\n (self.input_mean, self.input_mean, self.input_mean), swapRB=True)\n\n logits = self.session.run(None, {'input': blob})[0]\n pred = self.decode(logits.T[0], [len(logits)])\n return pred\n\n def decode(self, t, length, raw=False):\n if len(length) == 1:\n length = length[0]\n if raw:\n return ''.join([self.alphabet[i - 1] for i in t])\n else:\n char_list = []\n for i in range(length):\n if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):\n char_list.append(self.alphabet[t[i] - 1])\n return ''.join(char_list)\n else:\n # batch mode\n texts = []\n index = 0\n for i in range(len(length)):\n l = length[i]\n texts.append(\n self.decode(t[index:index + l], [l], raw=raw))\n index += l\n return texts\n\nif __name__ == '__main__':\n import random\n text_recognizer = TextRecognition()\n\n path = 'D:/TextSpotter/vietnamese_dataset/img_crop'\n list_images = glob(f'{path}/*.[jp][pn]*')\n random.shuffle(list_images)\n for img_path in list_images:\n img = cv2.imread(img_path)\n pred = text_recognizer(img)\n print(pred)\n cv2.imshow('cc', img)\n cv2.waitKey()\n\n","repo_name":"cuongdtone/Text-Recognition","sub_path":"predict_onnx.py","file_name":"predict_onnx.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"2283051162","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.cluster import MeanShift\r\nfrom sklearn.cluster import estimate_bandwidth\r\n\r\nbase_dir = '..\\\\Projects\\\\fifa\\\\'\r\n\r\n\r\ndef run_all():\r\n df = get_df()\r\n # df = groom_df()\r\n description = df.describe()\r\n print(description.to_string())\r\n print()\r\n cluster_data(df)\r\n i = 0\r\n\r\n\r\ndef get_df():\r\n df = pd.read_csv(base_dir + 'groomed_data.csv', encoding='utf-8')\r\n return df\r\n\r\n\r\ndef groom_df():\r\n # Get the data frame\r\n df = pd.read_csv(base_dir + 'data.csv', encoding='utf-8')\r\n df = df.loc[df['Work Rate'].notnull()]\r\n df = df.loc[df['Position'].notnull()]\r\n df = df.loc[df['Club'].notnull()]\r\n\r\n df = df.fillna(0)\r\n\r\n # Drop the unnecessary columns\r\n df = df.drop(columns=['Unnamed: 0', 'Photo', 'Flag', 'Club Logo'])\r\n\r\n # Encode the nationality.\r\n nationality_label_encoder = LabelEncoder()\r\n nationality_label_encoder = nationality_label_encoder.fit(df['Nationality'])\r\n df['Nationality ID'] = nationality_label_encoder.transform(df['Nationality'])\r\n\r\n # Encode the club\r\n club_label_encoder = LabelEncoder()\r\n club_label_encoder = club_label_encoder.fit(df['Club'])\r\n df['Club ID'] = club_label_encoder.transform(df['Club'])\r\n\r\n # Fix value and wage\r\n def fix_value(row, col_name):\r\n value = row[col_name]\r\n value = str(value).replace('€', '')\r\n if 'M' in value:\r\n value = value.replace('M', '')\r\n value = float(value)\r\n value = value * 1e6\r\n elif 'K' in value:\r\n value = value.replace('K', '')\r\n value = float(value)\r\n value = value * 1e3\r\n return value\r\n\r\n df['Value'] = df.apply(lambda row: fix_value(row, 'Value'), axis=1)\r\n df['Wage'] = df.apply(lambda row: fix_value(row, 'Wage'), axis=1)\r\n df['Release Clause'] = df.apply(lambda row: fix_value(row, 'Release Clause'), axis=1)\r\n\r\n # Encode the preferred foot\r\n foot_map = {'Right': 1,\r\n 'Left': 0}\r\n df['Preferred Foot'] = df['Preferred Foot'].map(foot_map)\r\n\r\n # Encode the work rates\r\n df['Attack Work Rate'] = df.apply(lambda row: row['Work Rate'].split('/ ')[0], axis=1)\r\n df['Defense Work Rate'] = df.apply(lambda row: row['Work Rate'].split('/ ')[1], axis=1)\r\n\r\n work_rate_map = {'High': 2,\r\n 'Medium': 1,\r\n 'Low': 0}\r\n df['Attack Work Rate'] = df['Attack Work Rate'].map(work_rate_map)\r\n df['Defense Work Rate'] = df['Defense Work Rate'].map(work_rate_map)\r\n\r\n # Fix Height and Weight\r\n df['Height'] = df.apply(lambda row: int(row['Height'].split(\"'\")[0]) * 12 + int(row['Height'].split(\"'\")[1]),\r\n axis=1)\r\n df['Weight'] = df.apply(lambda row: row['Weight'].replace('lbs', ''), axis=1)\r\n df['Height'] = df['Height'].astype(np.int64)\r\n df['Weight'] = df['Weight'].astype(np.int64)\r\n\r\n # Encode the body type\r\n body_type_map = {'Messi': 'Lean',\r\n 'C. Ronaldo': 'Stocky',\r\n 'Neymar': 'Lean',\r\n 'Courtois': 'Stocky',\r\n 'PLAYER_BODY_TYPE_25': 'Lean',\r\n 'Shaqiri': 'Lean',\r\n 'Lean': 'Lean',\r\n 'Normal': 'Normal',\r\n 'Stocky': 'Stocky',\r\n 'Akinfenwa': 'Stocky'}\r\n df['Body Type'] = df['Body Type'].map(body_type_map)\r\n body_label_encoder = LabelEncoder()\r\n body_label_encoder = body_label_encoder.fit(df['Body Type'])\r\n df['Body Type ID'] = body_label_encoder.transform(df['Body Type'])\r\n\r\n # Encode the faces\r\n real_face_map = {'Yes': 1,\r\n 'No': 0}\r\n df['Real Face'] = df['Real Face'].map(real_face_map)\r\n\r\n # Encode the position\r\n position_label_encoder = LabelEncoder()\r\n position_label_encoder = position_label_encoder.fit(df['Position'])\r\n df['Position ID'] = position_label_encoder.transform(df['Position'])\r\n\r\n def fix_joined(row):\r\n import maya\r\n joined_str = row['Joined']\r\n if str(joined_str) == '0':\r\n joined_str = row['Contract Valid Until']\r\n joined = maya.when(joined_str)\r\n epoch = joined.epoch\r\n return epoch\r\n\r\n df['Joined'] = df.apply(lambda row: fix_joined(row), axis=1)\r\n\r\n def fix_valid_until(row):\r\n import maya\r\n joined_str = row['Contract Valid Until']\r\n joined = maya.when(joined_str)\r\n epoch = joined.epoch\r\n return epoch\r\n\r\n df['Contract Valid Until'] = df.apply(lambda row: fix_valid_until(row), axis=1)\r\n\r\n def fix_position_skill_rating(row, position):\r\n rating = row[position]\r\n if '+' in str(rating):\r\n base = rating.split('+')[0]\r\n comp = rating.split('+')[1]\r\n rating = int(base) + int(comp)\r\n else:\r\n rating = int(rating)\r\n return rating\r\n\r\n df['LS'] = df.apply(lambda row: fix_position_skill_rating(row, 'LS'), axis=1)\r\n df['ST'] = df.apply(lambda row: fix_position_skill_rating(row, 'ST'), axis=1)\r\n df['RS'] = df.apply(lambda row: fix_position_skill_rating(row, 'RS'), axis=1)\r\n\r\n df['LW'] = df.apply(lambda row: fix_position_skill_rating(row, 'LW'), axis=1)\r\n df['LF'] = df.apply(lambda row: fix_position_skill_rating(row, 'LF'), axis=1)\r\n df['CF'] = df.apply(lambda row: fix_position_skill_rating(row, 'CF'), axis=1)\r\n df['RF'] = df.apply(lambda row: fix_position_skill_rating(row, 'RF'), axis=1)\r\n df['RW'] = df.apply(lambda row: fix_position_skill_rating(row, 'RW'), axis=1)\r\n\r\n df['LAM'] = df.apply(lambda row: fix_position_skill_rating(row, 'LAM'), axis=1)\r\n df['CAM'] = df.apply(lambda row: fix_position_skill_rating(row, 'CAM'), axis=1)\r\n df['RAM'] = df.apply(lambda row: fix_position_skill_rating(row, 'RAM'), axis=1)\r\n\r\n df['LM'] = df.apply(lambda row: fix_position_skill_rating(row, 'LM'), axis=1)\r\n df['LCM'] = df.apply(lambda row: fix_position_skill_rating(row, 'LCM'), axis=1)\r\n df['CM'] = df.apply(lambda row: fix_position_skill_rating(row, 'CM'), axis=1)\r\n df['RCM'] = df.apply(lambda row: fix_position_skill_rating(row, 'RCM'), axis=1)\r\n df['RM'] = df.apply(lambda row: fix_position_skill_rating(row, 'RM'), axis=1)\r\n\r\n df['LWB'] = df.apply(lambda row: fix_position_skill_rating(row, 'LWB'), axis=1)\r\n df['LDM'] = df.apply(lambda row: fix_position_skill_rating(row, 'LDM'), axis=1)\r\n df['CDM'] = df.apply(lambda row: fix_position_skill_rating(row, 'CDM'), axis=1)\r\n df['RDM'] = df.apply(lambda row: fix_position_skill_rating(row, 'RDM'), axis=1)\r\n df['RWB'] = df.apply(lambda row: fix_position_skill_rating(row, 'RWB'), axis=1)\r\n\r\n df['LB'] = df.apply(lambda row: fix_position_skill_rating(row, 'LB'), axis=1)\r\n df['LCB'] = df.apply(lambda row: fix_position_skill_rating(row, 'LCB'), axis=1)\r\n df['CB'] = df.apply(lambda row: fix_position_skill_rating(row, 'CB'), axis=1)\r\n df['RCB'] = df.apply(lambda row: fix_position_skill_rating(row, 'RCB'), axis=1)\r\n df['RB'] = df.apply(lambda row: fix_position_skill_rating(row, 'RB'), axis=1)\r\n\r\n df = df.drop(columns=['Nationality', 'Club', 'Work Rate', 'Position', 'Loaned From', 'Body Type'])\r\n\r\n df.to_csv(base_dir + 'groomed_data.csv', index=False)\r\n return df\r\n\r\n\r\ndef cluster_data(df):\r\n names = df['Name']\r\n\r\n # Remove goal keepers, they are a known separate cluster\r\n df = df.loc[df['Position ID'] != 5]\r\n df = df.drop(columns=['Name', 'ID', 'Value', 'Wage', 'Special', 'Real Face', 'Jersey Number',\r\n 'Joined', 'Contract Valid Until', 'Height', 'Weight', 'Nationality ID',\r\n 'Club ID', 'Body Type ID', 'Release Clause'])\r\n print('Irrelevant values dropped')\r\n # band_est = estimate_bandwidth(df) # -> 1\r\n band_est = 68\r\n print('Bandwidth:', band_est)\r\n analyzer = MeanShift(bandwidth=band_est)\r\n print('Fitting...')\r\n analyzer.fit(df)\r\n\r\n labels = analyzer.labels_\r\n\r\n df['Cluster Group'] = np.nan\r\n data_length = len(df)\r\n for i in range(data_length):\r\n df.iloc[i, df.columns.get_loc('Cluster Group')] = labels[i]\r\n\r\n df_clusters = df.groupby(['Cluster Group']).mean()\r\n df_clusters['Counts'] = pd.Series(df.groupby(['Cluster Group']).size())\r\n\r\n defense = df.loc[df['Cluster Group'] == 0]\r\n print(defense.describe().to_string())\r\n print()\r\n\r\n offense = df.loc[df['Cluster Group'] == 1]\r\n print(offense.describe().to_string())\r\n\r\n df['Name'] = names\r\n return df\r\n","repo_name":"crzonca/FIFA-19-Player-Clustering","sub_path":"Fifa.py","file_name":"Fifa.py","file_ext":"py","file_size_in_byte":8572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"35413790640","text":"# -*- coding: utf-8 -*-\r\nimport re\r\nimport json\r\n\r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\n\r\n\r\nclass NltkPhoneticWordsMatching(object):\r\n \"\"\"A practical samll program to do the phonetic matching with nltk\"\"\"\r\n \r\n def __init__(self):\r\n self.phonetic_list = list()\r\n self.phonetic_list_final_unique = list()\r\n \r\n def get_the_phonetic_from_nltk_cmudict(self):\r\n phonetic_list_final_tmp = list()\r\n \r\n for items in nltk.corpus.cmudict.entries():\r\n self.phonetic_list.append(items[1])\r\n\r\n for extracted_items in self.phonetic_list:\r\n for phonetics in extracted_items:\r\n phonetic_list_final_tmp.append(phonetics)\r\n \r\n # nltk还有重读音节等等,这个地方直接判断为有这种音节就算有这个音素\r\n for index in range(len(phonetic_list_final_tmp)):\r\n if (phonetic_list_final_tmp[index][-1] == \"0\") | (phonetic_list_final_tmp[index][-1] == \"1\") | (phonetic_list_final_tmp[index][-1] == \"2\"):\r\n phonetic_list_final_tmp[index] = phonetic_list_final_tmp[index][:-1]\r\n \r\n for items in phonetic_list_final_tmp:\r\n if items in self.phonetic_list_final_unique:\r\n pass\r\n else:\r\n self.phonetic_list_final_unique.append(items)\r\n \r\n return self.phonetic_list_final_unique\r\n\r\n @staticmethod\r\n def get_data_from_raw_sents():\r\n # 绝对路径,请把py文件和data文件放在一起\r\n with open(\"phonetic_transaction_raw_data\") as file:\r\n data = file.read()\r\n \r\n # 先根据换行符切分\r\n tmp_list = data.split(\"\\n\")\r\n \r\n for index in range(len(tmp_list)):\r\n tmp_list[index] = re.sub(\"\\.\", \"\", tmp_list[index])\r\n \r\n final_list = []\r\n bracket = []\r\n\r\n for index in range(len(tmp_list)):\r\n if len(bracket) == 11:\r\n final_list.append(bracket)\r\n bracket = list()\r\n bracket.append(tmp_list[index])\r\n else:\r\n bracket.append(tmp_list[index])\r\n\r\n # 由于未知的原因缺少了list 72,只有一个那就直接人工按照规则补上\r\n final_list.append(\r\n ['List 72',\r\n 'A gold ring will please most any girl',\r\n 'The long journey home took a year',\r\n \"She saw a cat in the neighbor's house\",\r\n 'A pink shell was found on the sandy beach',\r\n 'Small children came to see him',\r\n 'The grass and bushes were wet with dew',\r\n 'The blind man counted his old coins',\r\n 'A severe storm tore down the barn',\r\n 'She called his name many times',\r\n 'When you hear the bell, come quickly']\r\n )\r\n\r\n return final_list\r\n \r\n @staticmethod\r\n def get_phoneics_in_each_list(the_phonetic_list: list):\r\n # 设置使用英语的停止词\r\n stop = set(stopwords.words('english'))\r\n \r\n # 列表解析获取第一个数值并做成初始列表,得到各个list去除stopwords的版本\r\n finished_list = [i for i in the_phonetic_list[0:1]]\r\n for sents in the_phonetic_list[1:]:\r\n process_finished_sents = [i for i in sents.lower().split() if i not in stop]\r\n # 处理掉一些逗号什么的\r\n for index in range(len(process_finished_sents)):\r\n process_finished_sents[index] = re.sub(\",\", \"\", process_finished_sents[index])\r\n # 如果有这种连接词,那就split好了之后去掉本身这个并extends拆分出来的词\r\n if \"-\" in process_finished_sents[index]:\r\n tmp_words_list = process_finished_sents[index].split(\"-\")\r\n process_finished_sents.pop(index)\r\n process_finished_sents.extend(tmp_words_list)\r\n\r\n finished_list.append(process_finished_sents)\r\n\r\n tmp_list = list()\r\n # 设置应用cmudict的arpabet\r\n arpabet = nltk.corpus.cmudict.dict()\r\n finished_list0 = [i for i in finished_list[0:1]]\r\n for lists in finished_list[1:]:\r\n for word in lists:\r\n try:\r\n tmp_list.append(arpabet[word])\r\n except KeyError: # 很可能存在无法辨识的word\r\n print(word)\r\n finished_list0.append(tmp_list)\r\n tmp_list = list()\r\n\r\n finished_list_final = [i for i in finished_list0[0:1]]\r\n for items in finished_list0[1:]:\r\n try:\r\n for index in range(len(items)):\r\n for phonetics in items[index][0]:\r\n # 先进行尾部的处理,适应39个音素的需求\r\n if (phonetics[-1] == \"0\") | (phonetics[-1] == \"1\") | (phonetics[-1] == \"2\"):\r\n phonetics = phonetics[:-1]\r\n if phonetics not in finished_list_final:\r\n finished_list_final.append(phonetics)\r\n else:\r\n pass\r\n except IndexError:\r\n print(items)\r\n \r\n return finished_list_final\r\n \r\n def gen_the_final_phonetics_list(self):\r\n finished_phonetic_list_final = list()\r\n print(len(self.get_data_from_raw_sents()))\r\n for lists in self.get_data_from_raw_sents():\r\n finished_phonetic_list_final.append(self.get_phoneics_in_each_list(lists))\r\n \r\n return finished_phonetic_list_final\r\n \r\n # matching check, 因为是unique的,只要是len为39的就可以确定为全部match\r\n def matching_check(self):\r\n all_matched_phonetics_from_lists = list()\r\n matching_check_list = self.gen_the_final_phonetics_list()\r\n list_tag = 1\r\n for phonetics_lists_index in range(len(matching_check_list)):\r\n if len(matching_check_list[phonetics_lists_index]) == 39:\r\n all_matched_phonetics_from_lists.append(matching_check_list[phonetics_lists_index])\r\n all_matched_phonetics_from_lists.append(\"List {}\".format(str(phonetics_lists_index)))\r\n all_matched_phonetics_from_lists.append(len(matching_check_list[phonetics_lists_index]))\r\n else:\r\n all_matched_phonetics_from_lists.append(\"List {}, \".format(str(phonetics_lists_index)))\r\n all_matched_phonetics_from_lists.append(len(matching_check_list[phonetics_lists_index]))\r\n\r\n with open(\"matching_check.txt\", 'w', encoding='utf-8') as file:\r\n json.dump(all_matched_phonetics_from_lists, file)\r\n \r\n # 输出所有能找到的phonetics\r\n with open(\"all_results_for_phonetics.txt\", 'w', encoding='utf-8') as file:\r\n json.dump(matching_check_list, file)\r\n \r\n return 0\r\n\r\n\r\nif __name__ == \"__main__\":\r\n phonetic_obj = NltkPhoneticWordsMatching()\r\n all_phonetics = phonetic_obj.get_the_phonetic_from_nltk_cmudict()\r\n print(all_phonetics)\r\n results_for_console_checking = phonetic_obj.gen_the_final_phonetics_list()\r\n print(results_for_console_checking)\r\n phonetic_obj.matching_check()\r\n # a = nltk.corpus.cmudict.dict()\r\n # print(a[\"shaped\"])\r\n","repo_name":"SigismundWu/NltkPhoneticWordsMatch","sub_path":"NltkPhoneticWordsMatching/NltkPhoneticWordsMatching.py","file_name":"NltkPhoneticWordsMatching.py","file_ext":"py","file_size_in_byte":7337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"10283000867","text":"# packages\nimport biosppy.clustering as bioc\nimport biosppy.plotting as biop\nimport biosppy.metrics as biom\nfrom sklearn.metrics import (\n silhouette_score,\n davies_bouldin_score,\n calinski_harabasz_score\n)\nfrom scipy.cluster import hierarchy\nfrom sklearn.model_selection import ParameterGrid\nimport numpy as np\nimport matplotlib.pyplot as plt\n# local\nfrom kmeans import KmeansClustering\nfrom gaussian_mixture import GaussianMixtureClustering\n\n\nclass ConsensusKmeans:\n\n def __init__(self, kmin=None, kmax=None, n_ensemble=100):\n self.kmin = kmin\n self.kmax = kmax\n self.n_ensemble = n_ensemble\n\n def ensemble(self, data):\n\n N = len(data)\n\n if self.kmin is None:\n self.kmin = int(round(np.sqrt(N) / 2.))\n\n if self.kmax is None:\n self.kmax = int(round(np.sqrt(N)))\n\n grid = {\n 'k': np.random.randint(low=self.kmin, high=self.kmax, size=self.n_ensemble)\n }\n\n ensemble, = bioc.create_ensemble(data.to_numpy(), fcn=bioc.kmeans, grid=grid)\n return ensemble\n\n # metrics = ['EUCLIDEAN', 'EUCLIDEAN_SQUARE', 'MANHATTAN', 'CHEBYSHEV', 'CANBERRA', 'CHI_SQUARE']\n # grid = ParameterGrid(grid)\n\n # run kmeans for each ensemble\n # ensemble = []\n \n # for params in grid:\n # # algs = ['kmeans', 'gaussian']\n # # metric = np.random.choice(metrics, 1, p=[0.4, 0.5, 0.025, 0.025, 0.025, 0.025])\n # # alg = np.random.choice(algs, 1, p=[0.7, 0.3])\n # # if alg == 'kmeans':\n # # k_means = KmeansClustering(data=data, init='random', metric='EUCLIDEAN_SQUARE', **params)\n # # else:\n # # k_means = GaussianMixtureClustering(covariance_type='spherical', **params)\n # # print(k_means)\n # k_means = KmeansClustering(data=data, init='random', metric='EUCLIDEAN', **params)\n # y_pred = k_means.fit_predict(data)\n # ensemble.append(bioc._extract_clusters(y_pred))\n # return ensemble\n\n def coassoc_matrix(self, ensemble, data_size, path=None, show=False):\n coassoc, = bioc.create_coassoc(ensemble, data_size)\n plt.imshow(coassoc, interpolation='nearest')\n if path is not None:\n plt.savefig(path + '.png')\n if show:\n plt.show()\n return coassoc\n\n def coassoc_partition(self, coassoc, k, linkage):\n clusters, = bioc.coassoc_partition(coassoc, k, linkage)\n return clusters\n\n def visualize_clusters(self, data, clusters, path=None, show=False):\n biop.plot_clustering(data.to_numpy(), clusters, path, show)\n\n # determine number of clusters\n keys = list(clusters)\n n_rows = len(data)\n y_pred = np.ones((n_rows,), dtype=int)\n\n for k in keys:\n y_pred[clusters[k]] = k\n # if i == 0:\n # axis_x = data.iloc[clusters[k], :]\n\n fig = plt.figure(figsize=(10, 10))\n axis = fig.add_subplot(111, projection='3d')\n sc = axis.scatter(\n data.iloc[:, 0], data.iloc[:, 1], data.iloc[:, 2], c=y_pred\n )\n axis.set_xlabel(data.columns[0], fontsize=10)\n axis.set_ylabel(data.columns[1], fontsize=10)\n axis.set_zlabel(data.columns[2], fontsize=10)\n plt.legend(*sc.legend_elements(), loc=1, title='Clusters')\n if path is not None:\n plt.savefig(path + '.png')\n if show:\n plt.show()\n return y_pred\n\n def evaluate_clusters(self, data, y_pred, path=None, show=False):\n clusters = np.unique(np.array(y_pred)) \n\n if len(clusters) == 1:\n c_h_score = 'Only one cluster found'\n d_b_score = 'Only one cluster found'\n s_score = 'Only one cluster found'\n else: \n c_h_score = calinski_harabasz_score(data, y_pred)\n d_b_score = davies_bouldin_score(data, y_pred)\n s_score = silhouette_score(data, y_pred)\n \n if path:\n with open(path + 'evaluation.txt', 'a+') as f:\n for _, v in enumerate(clusters):\n n = len(y_pred[y_pred == v])\n f.write('N instances belonging to cluster {}: {} \\n'.format(v, n)) \n f.write('Calinski score: {} \\n'.format(c_h_score))\n f.write('Davies-Bouldin score: {} \\n'.format(d_b_score))\n f.write('Silhouette score: {} \\n \\n'.format(s_score))\n if show:\n for _, v in enumerate(clusters):\n n = len(y_pred[y_pred == v])\n print('N instances belonging to cluster {}:'.format(v), n) \n print('Calinski score:', c_h_score)\n print('Davies-Bouldin score:', d_b_score)\n print('Silhouette score:', s_score, '\\n')\n","repo_name":"luisl12/Driver-Profile-Classification","sub_path":"modeling/consensus_kmeans.py","file_name":"consensus_kmeans.py","file_ext":"py","file_size_in_byte":4803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"14420543585","text":"import unittest\nimport inspect\nimport os\nfrom lib.ddt import ddt, data # 导入ddt,ddt和data必须同时导入\n# 尽量选择导入类,而不是直接导入对象,因为导入同一个对象(同一个绘画)来测试不同的接口这种做法不太标准\nfrom script.webservice_class import Webservice\n# 导入Excel类\nfrom script.excel_class import HandleExcel\n# 导入配置文件类\nfrom script.config_class import do_config, do_config2\n# 导入日志类\nfrom script.log_class import do_log\n\nfrom script.constant import DATA_FILE_PATH, CONFIG_USER_FILE_PATH\n\nfrom script.handle_context import Context\n\ndo_excel = HandleExcel(DATA_FILE_PATH, 'userregister')\n\n\n@ddt # 在类的上一行加\nclass TestUserRegister(unittest.TestCase):\n \"\"\"\n 测试用户注册功能\n \"\"\"\n cases_list = do_excel.get_cases()\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n 重写父类的类方法,全部实例方法(用例)执行完只会被调用1次\n :return:\n \"\"\"\n # cls.file_name = 'test_result.txt'\n # cls.file_name = do_config('file path', 'log_path')\n # print('打开【{}】文件'.format(cls.file_name))\n do_log.info(\"{:=^40s}\".format(\"开始执行用例\"))\n # print(\"{:=^40s}\".format(\"开始执行用例\"))\n # cls.file = open(cls.file_name, mode='a', encoding='utf8')\n # cls.file.write(\"{:=^40s}\\n\".format(\"开始执行用例\"))\n\n @classmethod\n def tearDownClass(cls):\n do_log.info(\"{:=^40s}\".format(\"用例执行结束\"))\n # cls.file.write('{:=^40s}\\n'.format('用例执行结束'))\n # cls.file.close()\n\n @data(*cases_list) # 遍历用例\n def test_case(self, case_value):\n # print('\\nRunning Test Method: {}'.format(inspect.stack()[0][3]))\n do_log.info('\\nRunning Test Method: {}'.format(inspect.stack()[0][3]))\n # data_namedtuple = cases_list.pop(0)\n case_id = case_value.case_id\n msg = case_value.title\n url = case_value.url\n data2 = Context.register_parameterization(case_value.data)\n if case_id == 1:\n if os.path.exists(CONFIG_USER_FILE_PATH):\n pass\n else:\n user_data = {'user_info': {'mobile': data2['mobile'],\n 'user_id': data2['user_id'],\n 'verify_code': data2['verify_code']}}\n do_config.write_config(user_data, \"user_data.ini\")\n do_config.write_config(user_data, \"user_data2.ini\")\n method = case_value.method\n expect_result = case_value.expected\n actual_result = str(Webservice(url, data2, method).result_output())\n\n if case_id == do_excel.ws.max_row-1:\n os.remove(CONFIG_USER_FILE_PATH)\n # 将实际结果写入excel\n # ws.cell(row=case_id + 1, column=6, value=actual_result)\n try:\n self.assertEqual(actual_result, expect_result, msg='测试{}失败'.format(msg))\n except AssertionError as e:\n # print('具体异常为:{}'.format(e))\n do_log.error('具体异常为:{}'.format(e))\n # self.file.write('{},执行结果:{},具体异常为:{}\\n'.format(msg, 'Fail', e))\n # self.file.write('{},执行结果:{},具体异常为:{}\\n'.format(msg, do_config('msg', 'fail_result'), e))\n # ws.cell(row=case_id + 1, column=7, value='Fail')\n # self.handle_excel.write_result(case_id + 1, actual_result, 'Fail')\n do_excel.write_result(case_id + 1, actual_result, do_config('msg', 'fail_result'))\n raise e\n else:\n # self.file.write('{},执行结果:{}\\n'.format(msg, 'Pass'))\n # self.file.write('{},执行结果:{}\\n'.format(msg, do_config('msg', 'success_result')))\n # ws.cell(row=case_id + 1, column=7, value='Pass')\n # self.handle_excel.write_result(case_id + 1, actual_result, 'Pass')\n do_excel.write_result(case_id + 1, actual_result, do_config('msg', 'success_result'))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"dransonjs/my_python_test_codes","sub_path":"PycharmProjects/WebService_API_Test/case/test_02_userregister.py","file_name":"test_02_userregister.py","file_ext":"py","file_size_in_byte":4134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"10904197271","text":"import shutil\nimport tempfile\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.core.paginator import Paginator\nfrom django.test import Client, TestCase, override_settings\nfrom django.urls import reverse\n\nfrom yatube.settings import POSTS_ON_PAGE\n\nfrom ..models import Comment, Follow, Group, Post, User\n\nINDEX_PAGE = reverse('posts:index')\nGROUP_SLUG = 'test_slug'\nGROUP_POSTS = reverse('posts:group_list', args=[GROUP_SLUG])\nAUTHOR_NAME = 'TestUser'\nAUTHOR_POSTS = reverse('posts:profile', args=[AUTHOR_NAME])\nPOST_ID = '1'\nPOST_PAGE = reverse('posts:post_detail', args=[POST_ID])\nPOST_CREATE_PAGE = reverse('posts:post_create')\nPOST_EDIT_PAGE = reverse('posts:post_edit', args=[POST_ID])\nTEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)\n\n\n@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)\nclass PostPagesTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.author = User.objects.create_user(username='TestUser')\n cls.non_author = User.objects.create_user(username='non_author')\n cls.group = Group.objects.create(\n title='Тест группа',\n slug='test_slug',\n description='Описание',\n )\n cls.scnd_group = Group.objects.create(\n title='Вторая тест группа',\n slug='scnd_test_slug',\n description='Описание второй',\n )\n cls.follower = Follow.objects.create(\n author=cls.author,\n user=cls.non_author\n )\n cls.image = (\n b'\\x47\\x49\\x46\\x38\\x39\\x61\\x02\\x00'\n b'\\x01\\x00\\x80\\x00\\x00\\x00\\x00\\x00'\n b'\\xFF\\xFF\\xFF\\x21\\xF9\\x04\\x00\\x00'\n b'\\x00\\x00\\x00\\x2C\\x00\\x00\\x00\\x00'\n b'\\x02\\x00\\x01\\x00\\x00\\x02\\x02\\x0C'\n b'\\x0A\\x00\\x3B'\n )\n cls.uploaded = SimpleUploadedFile(\n name='smallVIEWS.png',\n content=cls.image,\n content_type='image/png'\n )\n\n post_obj = [Post(\n text='Тестовый текст',\n group=cls.group,\n author=cls.author,\n image=cls.uploaded,\n pk='%s' % i\n ) for i in range(12)]\n cls.posts = Post.objects.bulk_create(post_obj)\n\n cls.post = Post.objects.create(\n author=cls.author,\n text='Тестовый текст',\n group=cls.group,\n image=cls.uploaded,\n )\n cls.comment = Comment.objects.create(\n author=cls.author,\n text='Тестовый коммент',\n post=cls.post,\n )\n cls.paginator = Paginator(Post.objects.all(), POSTS_ON_PAGE)\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)\n\n def setUp(self):\n self.guest_client = Client()\n self.author_client = Client()\n self.author_client.force_login(self.author)\n self.non_author_client = Client()\n self.non_author_client.force_login(self.non_author)\n\n def test_pages_uses_correct_template_authorized(self):\n \"\"\"Использование автором posts view ожидаемых шаблонов.\"\"\"\n template_pages = {\n 'posts/index.html': INDEX_PAGE,\n 'posts/group_list.html': GROUP_POSTS,\n 'posts/post_detail.html': POST_PAGE,\n 'posts/profile.html': AUTHOR_POSTS,\n 'posts/create_post.html': POST_CREATE_PAGE,\n }\n for template, reverse_name in template_pages.items():\n with self.subTest(reverse_name=reverse_name):\n response = self.author_client.get(reverse_name)\n self.assertTemplateUsed(response, template)\n\n def test_post_edit_correct_template_non_author(self):\n \"\"\"Использование ложным автором posts view ожидаемых шаблонов.\"\"\"\n template_pages = {'posts/create_post.html':\n POST_EDIT_PAGE,\n }\n for template, reverse_name in template_pages.items():\n with self.subTest(reverse_name=reverse_name):\n response = self.non_author_client.get(reverse_name)\n self.assertTemplateNotUsed(response, template)\n\n def test_create_show_correct_context(self):\n \"\"\"Проверка форм страницы создания поста.\"\"\"\n response = self.author_client.get(POST_CREATE_PAGE)\n form_field = {\n 'text': forms.fields.CharField,\n 'group': forms.fields.ChoiceField,\n 'image': forms.fields.ImageField,\n }\n for value, expected in form_field.items():\n with self.subTest(value=value):\n form_field = response.context.get('form').fields.get(value)\n self.assertIsInstance(form_field, expected)\n\n def test_edit_show_correct_context(self):\n \"\"\"Проверка форм страницы редактирования поста.\"\"\"\n response = self.author_client.get(\n POST_EDIT_PAGE)\n form_field = {\n 'text': forms.fields.CharField,\n 'group': forms.fields.ChoiceField,\n 'image': forms.fields.ImageField,\n }\n for value, expected in form_field.items():\n with self.subTest(value=value):\n form_field = response.context.get('form').fields.get(value)\n self.assertIsInstance(form_field, expected)\n\n def first_object(self, response):\n \"\"\"Первый объект из контекста posts views.\"\"\"\n return response.context['page_obj'][0]\n\n def test_index_show_correct_context(self):\n \"\"\"Проверка контекста главной страницы.\"\"\"\n response = self.guest_client.get(INDEX_PAGE)\n page_obj = {\n self.post.text: self.first_object(response).text,\n self.post.author: self.first_object(response).author,\n self.post.group: self.first_object(response).group,\n self.post.image: self.first_object(response).image,\n }\n for obj, context in page_obj.items():\n with self.subTest(context=context):\n self.assertEqual(obj, context)\n\n def test_group_list_show_correct_context(self):\n \"\"\"Проверка контекста страницы постов групп.\"\"\"\n response = self.author_client.get(GROUP_POSTS)\n group = response.context['group']\n page_obj = {\n self.post.text: self.first_object(response).text,\n self.post.author: self.first_object(response).author,\n self.post.group: group,\n self.post.image: self.first_object(response).image,\n }\n for obj, context in page_obj.items():\n with self.subTest(context=context):\n self.assertEqual(obj, context)\n\n def test_wrong_group_list_show_correct_context(self):\n \"\"\"Проверка отсутствия поста на странице чужой группы.\"\"\"\n response = self.guest_client.get(GROUP_POSTS)\n group = response.context['group']\n self.assertNotEqual(group, self.scnd_group)\n\n def test_profile_list_show_correct_context(self):\n \"\"\"Проверка контекста страницы постов автора.\"\"\"\n response = self.author_client.get(AUTHOR_POSTS)\n author = response.context['author']\n context_counter = response.context['count']\n page_obj = {\n self.post.text: self.first_object(response).text,\n self.post.group: self.first_object(response).group,\n self.post.author: author,\n self.post.image: self.first_object(response).image,\n Post.objects.filter(author=self.author).count(): context_counter,\n }\n for obj, context in page_obj.items():\n with self.subTest(context=context):\n self.assertEqual(obj, context)\n\n def test_detail_show_correct_context(self):\n \"\"\"Проверка контекста страницы поста.\"\"\"\n response = self.guest_client.get(\n reverse('posts:post_detail', kwargs={'post_id': self.post.pk})\n )\n post_obj = response.context['post']\n author = response.context['author']\n context_counter = response.context['post_count']\n comments = response.context['comments'][0]\n page_obj = {\n self.post.text: post_obj.text,\n self.post.author: author,\n self.post.image: post_obj.image,\n self.comment: comments,\n Post.objects.filter(author=self.author).count(): context_counter,\n }\n for obj, context in page_obj.items():\n with self.subTest(context=context):\n self.assertEqual(obj, context)\n\n def test_first_page_contains_ten_records(self):\n \"\"\"Первая страница пагинатора -- 10 постов.\"\"\"\n url_list = {\n INDEX_PAGE,\n GROUP_POSTS,\n AUTHOR_POSTS,\n }\n for url in url_list:\n with self.subTest(url=url):\n response = self.guest_client.get(url)\n self.assertCountEqual(\n response.context['page_obj'], POSTS_ON_PAGE)\n\n def test_first_page_contains_ten_records(self):\n \"\"\"Вторая страница пагинатора оставшиеся посты.\"\"\"\n url_list = {\n INDEX_PAGE + '?page=2',\n GROUP_POSTS + '?page=2',\n AUTHOR_POSTS + '?page=2',\n }\n for url in url_list:\n with self.subTest(url=url):\n response = self.guest_client.get(url)\n self.assertEqual(\n len(response.context['page_obj']),\n self.paginator.count % POSTS_ON_PAGE)\n\n def test_comment_only_authorized(self):\n \"\"\"Проверка создания комментария.\"\"\"\n response = self.author_client.get(POST_PAGE)\n form_field = {\n 'text': forms.fields.CharField,\n }\n for value, expected in form_field.items():\n with self.subTest(value=value):\n form_field = response.context.get('form').fields.get(value)\n self.assertIsInstance(form_field, expected)\n\n def test_index_cache(self):\n \"\"\"Кэширование главной страницы.\"\"\"\n post_count = Post.objects.count()\n self.author_client.delete(POST_PAGE)\n self.assertEqual(Post.objects.count(), post_count)\n\n\nclass FollowTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.author = User.objects.create_user(username='author')\n cls.user = User.objects.create_user(username='non_author')\n cls.post = Post.objects.create(\n text='Тестовый текст',\n author=cls.author,\n )\n cls.following = Client()\n cls.following.force_login(cls.author)\n cls.follower = Client()\n cls.follower.force_login(cls.user)\n\n def test_subscription_available_authorize(self):\n \"\"\"Доступность возможности подписаться.\"\"\"\n counter = Follow.objects.filter(\n author=self.author, user=self.user).count()\n self.follower.get(\n reverse('posts:profile_follow', args=[self.author]))\n self.assertEqual(Follow.objects.count(), counter + 1)\n\n def test_unsub_available(self):\n \"\"\"Доступность отписки.\"\"\"\n counter = Follow.objects.filter(\n author=self.author, user=self.user).count()\n self.follower.get(\n reverse('posts:profile_unfollow', args=[self.author]))\n self.assertEqual(Follow.objects.count(), counter)\n\n def test_double_following(self):\n \"\"\"Нельзя подписаться повторно.\"\"\"\n counter = Follow.objects.filter(\n author=self.author, user=self.user).count()\n Follow.objects.create(\n author=self.author,\n user=self.user,\n )\n self.follower.get(\n reverse('posts:profile_follow', args=[self.author]))\n self.assertEqual(Follow.objects.count(), counter + 1)\n\n def test_self_follow(self):\n \"\"\"Подписка на себя.\"\"\"\n counter = Follow.objects.filter(\n author=self.author, user=self.user).count()\n self.follower.get(\n reverse('posts:profile_follow', args=[self.user]))\n self.assertEqual(Follow.objects.count(), counter)\n\n def test_post_follow_page(self):\n \"\"\"Пост на странице подписчика.\"\"\"\n Follow.objects.create(user=self.user, author=self.author)\n response = self.follower.get(reverse('posts:follow_index'))\n self.assertIn(self.post, response.context['page_obj'])\n","repo_name":"SowaSova/hw05_final","sub_path":"yatube/posts/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":13081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"391564130","text":"from autolib import *\r\n\r\ndef f(x1,x2):\r\n return x2+1-1.4*x1*x1,0.3*x1\r\n\r\nax = init_figure(-1.5,1.5,-0.5,0.5)\r\n\r\n\r\nfor k in range(1,3000): # Plot 3000 points\r\n x1 = 4*rand() - 2\r\n x2 = 4*rand() - 2\r\n for ind in range(1,100): # 100 iteration to obtain those in inv(A)\r\n\r\n if (x1<2)&(x1>-2)&(x2<2) &(x2>-2): # f(x1,x2) included in A=[-2,2]*[2,2]\r\n x1,x2 = f(x1,x2)\r\n ax.scatter(x1,x2, color = 'red',linewidth=0.1)\r\n\r\npause(10)\r\n\r\n\r\n\r\n\r\n","repo_name":"bigfahma/Robotics---Modelisation-control-and-simulation-State-space-approach-AutoMOOC","sub_path":"HenonExo4.py","file_name":"HenonExo4.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"25685566961","text":"from datetime import datetime\n\nfrom flask import Blueprint, render_template, request, url_for, g, flash\nfrom werkzeug.utils import redirect\n\nfrom .. import db\nfrom ..forms import QuestionForm, AnswerForm\nfrom ..models import Question, Answer, User\n\nfrom pybo.views.auth_views import login_required\n\nbp = Blueprint(\"question\",__name__, url_prefix=\"/question\")\n#질문과 관련한거 관리하는 파일\n\n@bp.route('/list/')\ndef _list():\n #print(\"#################\",request.args.get)\n page = request.args.get('page', type=int, default=1) #페이지 클릭으로 접근시 사용\n kw = request.args.get('kw', type=str, default='') #검색시 keyword 가져오는 인자\n question_list = Question.query.order_by(Question.create_date.desc())\n if kw:\n search = '%%{}%%'.format(kw)\n sub_query = db.session.query(Answer.question_id, Answer.content, User.username).join(User, Answer.user_id == User.id).subquery()\n question_list = question_list.join(User).outerjoin(sub_query, sub_query.c.question_id == Question.id).filter(Question.subject.ilike(search) | # 질문제목\n Question.content.ilike(search) | # 질문내용\n User.username.ilike(search) | # 질문작성자\n sub_query.c.content.ilike(search) | # 답변내용\n sub_query.c.username.ilike(search) # 답변작성자\n ) \\\n .distinct()\n question_list = question_list.paginate(page=page, per_page=10) #db 객체에 paginate 등 지정 가능\n print(\"#################\",question_list.has_prev)\n return render_template('question/question_list.html', question_list=question_list, page=page, kw=kw) #render template은 맨처음엔 넘겨줄 html 파일 정의하고 뒤에는 인자 전달\n\n@bp.route(\"/detail//\") #/다음에 uri로 다루는 파트는 무조건 아래 함수가 파라미터로 받아야함\ndef detail(question_id):\n form = AnswerForm()\n question = Question.query.get_or_404(question_id)\n return render_template(\"question/question_detail.html\", question=question, form=form) #question 페이지에 렌더링\n\n@bp.route(\"/create/\", methods=(\"GET\",\"POST\"))\n@login_required #로그인 되어 있는지 확인해주는 함수\ndef create():\n form = QuestionForm()\n #print(\"########ASDFHJSHDGA!#%@$^%\",request.form[\"content\"]) #글 생성하는 함수\n if request.method == \"POST\" and form.validate_on_submit(): # 요청 방식이 post고 양식이 유효하다면 ~\n question = Question(subject=form.subject.data, content=form.content.data, create_date=datetime.now()) #form의 내용들을 퀘스쳔 객체로 생성\n db.session.add(question) #퀘스쳔 항목을 db에 올리고\n db.session.commit() #확정시킴\n return redirect(url_for(\"main.index\")) #글 썼으니 메인 인덱스로 리다이렉트 하면 다시 퀘스쳔 리스트로 리다이렉트됨\n return render_template(\"question/question_form.html\",form=form) #유효하지 않으면 퀘스쳔 폼 html로 감 퀘스쳔 폼 html은 에러 코드 띄우는 곳\n\n@bp.route(\"/modify/\", methods=(\"GET\", \"POST\"))\n@login_required\ndef modify(question_id):\n print(\"#################\",request.method)\n question = Question.query.get_or_404(question_id)\n if g.user != question.user: #수정 권한이 있는지 먼저 검증\n flash(\"수정권한이 없습니다\")\n return redirect(url_for(\"question.detail\", question_id=question_id))\n if request.method == \"POST\": #방법이 포스트라면 또 수정 권한이 있다면? 위의 if를 뚫으려면 수정권한이 있어야 함\n form = QuestionForm() \n if form.validate_on_submit(): #질문 양식 준수하는가 validate_on_submit은 페이지가 준수하는지 확인하는 과정, FlaskForm 내재 패키지 메서드\n form.populate_obj(question) #퀘스쳔폼 객체 업데이트 하는 역할, add랑 비슷\n question.modify_date = datetime.now()\n db.session.commit() #업데이트 된 객체 커밋\n return redirect(url_for(\"question.detail\",question_id=question_id))\n else:\n form = QuestionForm(obj=question) #request 방식이 포스트가 아니라면? => 아 수정하러 들어온거면 수정할 수 있도록 html 파일 제공\n return render_template(\"question/question_form.html\",form=form)\n\n@bp.route('/delete/')\n@login_required #로그인 되어있나\ndef delete(question_id):\n question = Question.query.get_or_404(question_id) #퀘스쳔 뭐 가져오삼\n if g.user != question.user:\n flash('삭제권한이 없습니다')\n return redirect(url_for('question.detail', question_id=question_id)) #권한 없으면 탈락\n db.session.delete(question) #이 질문 삭제하삼\n db.session.commit() #커밋 ㄱㄱ\n return redirect(url_for('question._list'))\n\n@bp.route('/vote//')\n@login_required #로그인 되어잇?\ndef vote(question_id):\n _question = Question.query.get_or_404(question_id) #퀘스쳔 객체 가져오삼, 퀘스쳔 데이터 가져와\n if g.user == _question.user:\n flash('본인이 작성한 글은 추천할수 없습니다')\n else:\n _question.voter.append(g.user)\n db.session.commit()\n return redirect(url_for('question.detail', question_id=question_id))\n\n","repo_name":"dshinek/pybo","sub_path":"pybo/views/question_views.py","file_name":"question_views.py","file_ext":"py","file_size_in_byte":5392,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"72947007974","text":"from aws_cdk import (\n aws_ec2 as ec2,\n)\n\nfrom constructs import Construct\n\n\nmy_ip=\"84.85.157.1/32\"\n\nclass webvpc_sg_construct(Construct):\n\n def __init__(self, scope: Construct, construct_id: str, vpc, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # Create and configure webserver Security Group\n self.webvpc_sg = ec2.SecurityGroup(\n self, \"webvpc_sg\",\n vpc=vpc,\n allow_all_outbound=True,\n )\n\n # add rule to allow inbound SSH from only admin server,\n self.webvpc_sg.connections.allow_from(ec2.Peer.ipv4(\"10.20.0.0/16\"), ec2.Port.tcp(22))\n\n\nclass adminvpc_sg_construct(Construct):\n\n def __init__(self, scope: Construct, construct_id: str, vpc, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # Create and configure manageserver Security Group\n self.adminvpc_sg = ec2.SecurityGroup(\n self, \"adminvpc_sg\",\n vpc=vpc,\n allow_all_outbound=True,\n )\n\n ## add inbound rules for the Managevpc SG\n\n # add rule for allow inbound SSH traffic from my IP\n self.adminvpc_sg.add_ingress_rule(\n peer=ec2.Peer.ipv4(my_ip),\n connection=ec2.Port.tcp(22),\n description=\"Allow all SSH traffic from my IP\",\n )\n\n self.adminvpc_sg.add_ingress_rule(\n peer=ec2.Peer.ipv4(my_ip),\n connection=ec2.Port.tcp(3389),\n description=\"Allow all RDP traffic from my IP\",\n )\n\n self.adminvpc_sg.add_ingress_rule(\n peer=ec2.Peer.any_ipv4(),\n connection=ec2.Port.tcp(80),\n description=\"Allow all HTTP traffic from my anywhere\",\n )\n\n self.adminvpc_sg.add_ingress_rule(\n peer=ec2.Peer.any_ipv4(),\n connection=ec2.Port.tcp(443),\n description=\"Allow all HTTPS traffic from my anywhere\",\n )","repo_name":"TechGrounds-Cloud8/cloud8-Killian97","sub_path":"project_v1_1/project_v1_1/project_v1_1/sg_construct.py","file_name":"sg_construct.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"24101826691","text":"# 22.08.22\n\n# 둘째 줄부터 N개의 줄에 공간의 상태가 주어진다.\n# 공간의 상태는 0, 1, 2, 3, 4, 5, 6, 9로 이루어져 있고, 아래와 같은 의미를 가진다.\n# 0: 빈 칸\n# 1, 2, 3, 4, 5, 6: 칸에 있는 물고기의 크기\n# 9: 아기 상어의 위치\n\nfrom collections import deque\nINF = 1e9\n\nN = int(input())\n\nsea = []\nfor i in range(N):\n sea.append(list(map(int, input().split())))\n\nnow_size = 2\nnow_x, now_y = 0, 0\n\nfor i in range(N):\n for j in range(N):\n if sea[i][j] == 9:\n now_x, now_y = i, j\n sea[now_x][now_y] = 0\n\ndx = [-1, 0, 1, 0]\ndy = [0, 1, 0, -1]\n\n# BFS일 것 같지...?\n# 짧은 거리\n# 고려사항 : 작은 거나 같은 건 지나갈 수 있음!!!!\ndef shortest():\n dist = [[-1]*N for _ in range(N)]\n q = deque([(now_x, now_y)])\n dist[now_x][now_y] = 0\n while q:\n x, y = q.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0<=nx and nx= now_size:\n now_size += 1\n ate = 0\n","repo_name":"yogjesi/agrt","sub_path":"BOJ/BOJ_16236_아기_상어_G3.py","file_name":"BOJ_16236_아기_상어_G3.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"7732878257","text":"import json\nimport time\n\nimport flask\nimport torch\nfrom flask import Flask, json, request\nfrom flask_cors import CORS\n\nimport nemo.collections.nlp as nemo_nlp\nfrom nemo.utils import logging\n\nMODELS_DICT = {}\n\nmodel = None\napi = Flask(__name__)\nCORS(api)\n\n\ndef initialize(config_file_path: str):\n \"\"\"\n Loads 'language-pair to NMT model mapping'\n \"\"\"\n __MODELS_DICT = None\n\n logging.info(\"Starting NMT service\")\n logging.info(f\"I will attempt to load all the models listed in {config_file_path}.\")\n logging.info(f\"Edit {config_file_path} to disable models you don't need.\")\n if torch.cuda.is_available():\n logging.info(\"CUDA is available. Running on GPU\")\n else:\n logging.info(\"CUDA is not available. Defaulting to CPUs\")\n\n # read config\n with open(config_file_path) as f:\n __MODELS_DICT = json.load(f)\n\n if __MODELS_DICT is not None:\n for key, value in __MODELS_DICT.items():\n logging.info(f\"Loading model for {key} from file: {value}\")\n if value.startswith(\"NGC/\"):\n model = nemo_nlp.models.machine_translation.MTEncDecModel.from_pretrained(model_name=value[4:])\n else:\n model = nemo_nlp.models.machine_translation.MTEncDecModel.restore_from(restore_path=value)\n if torch.cuda.is_available():\n model = model.cuda()\n MODELS_DICT[key] = model\n else:\n raise ValueError(\"Did not find the config.json or it was empty\")\n logging.info(\"NMT service started\")\n\n\n@api.route('/translate', methods=['GET', 'POST', 'OPTIONS'])\ndef get_translation():\n try:\n time_s = time.time()\n langpair = request.args[\"langpair\"]\n src = request.args[\"text\"]\n do_moses = request.args.get('do_moses', False)\n if langpair in MODELS_DICT:\n if do_moses:\n result = MODELS_DICT[langpair].translate(\n [src], source_lang=langpair.split('-')[0], target_lang=langpair.split('-')[1]\n )\n else:\n result = MODELS_DICT[langpair].translate([src])\n\n duration = time.time() - time_s\n logging.info(\n f\"Translated in {duration}. Input was: {request.args['text']} <############> Translation was: {result[0]}\"\n )\n res = {'translation': result[0]}\n response = flask.jsonify(res)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n else:\n logging.error(f\"Got the following langpair: {langpair} which was not found\")\n except Exception as ex:\n res = {'translation': str(ex)}\n response = flask.jsonify(res)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return res\n\n\nif __name__ == '__main__':\n initialize('config.json')\n api.run(host='0.0.0.0')\n","repo_name":"NVIDIA/NeMo","sub_path":"tools/nmt_webapp/nmt_service.py","file_name":"nmt_service.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":8538,"dataset":"github-code","pt":"9"} +{"seq_id":"73301313252","text":"# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nproject = 'Basana'\ncopyright = '2022, Gabriel Martin Becedillas Ruiz'\nauthor = 'Gabriel Martin Becedillas Ruiz'\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = [\n 'sphinx.ext.duration',\n 'sphinx.ext.doctest',\n 'sphinx.ext.autodoc',\n 'sphinx_rtd_theme',\n]\n\ntemplates_path = ['_templates']\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\nautodoc_typehints = 'description'\nautodoc_typehints_format = 'short'\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\nhtml_theme = 'alabaster'\nhtml_static_path = ['_static']\nhtml_theme_options = {\n 'github_user': 'gbeced',\n 'github_repo': 'basana',\n 'github_banner': 'false',\n 'github_type': 'star',\n 'github_count': 'true',\n}\n","repo_name":"gbeced/basana","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":234,"dataset":"github-code","pt":"9"} +{"seq_id":"8870142847","text":"import numpy as np\ndef lcsAux(A : str, B : str, i : int, j : int):\n if i == len(A) or j == len(B):\n return 0\n elif A[i] == B[j]:\n return 1 + lcsAux(A,B,i+1,j+1)\n else:\n return max( lcsAux(A,B,i+1,j), lcsAux(A,B,i,j+1))\n\ndef lcs(A : str, B : str) :\n return lcsAux(A,B,0,0)\n\ndef lcsDP(A:str,B:str):\n a,b=len(A)+1,len(B)+1\n resultado=np.zeros((a,b))\n for i in range (1,a):\n for j in range (1,b):\n if A[i-1]==B[j-1]:\n resultado[i][j]=1+resultado[i-1][j-1]\n else:\n resultado[i][j]=max(resultado[i-1][j],resultado[i][j-1],)\n return resultado[a-1][b-1]\n\ndef lcsDP_DevolvientdoCadena(A:str,B:str):\n a,b=len(A)+1,len(B)+1\n resultado=np.zeros((a,b))\n for i in range (1,a):\n for j in range (1,b):\n if A[i-1]==B[j-1]:\n resultado[i][j]=1+resultado[i-1][j-1]\n else:\n resultado[i][j]=max(resultado[i-1][j],resultado[i][j-1],)\n i,j=a-1,b-1\n cadena=\"\"\n while i != 0 or j != 0:\n if A[i-1]==B[j-1]:\n cadena= A[i-1]+cadena\n i,j=i-1,j-1\n else:\n if i==0:\n i,j=i,j-1\n elif j==0:\n i,j=i-1,j\n else:\n maximo = max(resultado[i,j-1],resultado[i-1,j])\n if resultado[i-1][j]==maximo:\n i,j=i-1,j\n else:\n i,j=i,j-1\n return cadena\n\nprint(lcsDP(\"AADCFTGH\",\"ADYCTGWH\"))\nprint(lcsDP_DevolvientdoCadena(\"AADCFTGH\",\"ADYCTGWH\"))","repo_name":"DanielPalacios05/ST0247-02","sub_path":"talleres/taller10/LongCommonSequence.py","file_name":"LongCommonSequence.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"26793123268","text":"import urllib.request\nimport json\nimport sublime, sublime_plugin\n\nsettings = sublime.load_settings(\"html2haml.sublime-settings\")\n\nclass HtmlToHamlFromFileCommand(sublime_plugin.TextCommand):\n\tdef run(self, edit):\n\t\tsource = self.view.file_name()\n\t\tif source.endswith(\".erb\"):\n\t\t\ttarget = source.replace('.erb', '.haml')\n\t\tif source.endswith(\".html\"):\n\t\t\ttarget = source + '.haml'\n\t\tif target:\n\t\t\twith open(source, 'r') as f:\n\t\t\t\thtml = f.read()\n\t\t\thaml = HTHTools.post_html_return_haml(html)\n\t\t\tif haml != None:\n\t\t\t\twith open(target, 'w') as f:\n\t\t\t\t\tf.write(haml)\n\t\t\t\tself.view.window().open_file(target)\n\n\tdef is_enabled(self):\n\t\treturn True #return (self.view.file_name().endswith(\".html\") or self.view.file_name().endswith(\".erb\"))\n\nclass HtmlToHamlFromSelectionCommand(sublime_plugin.TextCommand):\n\tdef run(self, edit):\n\t\tfor region in self.view.sel():\n\t\t\tif not region.empty():\n\t\t\t\thtml = self.view.substr(region)\n\t\t\t\thaml = HTHTools.post_html_return_haml(html)\n\t\t\t\tif haml != None:\n\t\t\t\t\tself.view.replace(edit, region, haml)\n\n\tdef is_enabled(self):\n\t\treturn True #return self.view.file_name().endswith(\".haml\")\n\nclass HtmlToHamlFromClipboardCommand(sublime_plugin.TextCommand):\n\tdef run(self, edit):\n\t\thtml = sublime.get_clipboard()\n\t\thaml = HTHTools.post_html_return_haml(html)\n\t\tif haml != None:\n\t\t\tfor region in self.view.sel():\n\t\t\t\tself.view.replace(edit, region, haml)\n\n\tdef is_enabled(self):\n\t\treturn True #return self.view.file_name().endswith(\".haml\")\n\nclass HTHTools:\n\t@classmethod\n\tdef post_html_return_haml(self, html):\n\t\thost = 'http://html2haml.herokuapp.com/api.json'\n\t\toptions = {}\n\t\tif settings.get(\"html_style_attributes\", False):\n\t\t\toptions['html_style_attributes'] = \"true\"\n\t\tif settings.get(\"ruby19_style_attributes\", False):\n\t\t\toptions['ruby19_style_attributes'] = \"true\"\n\t\tdata = { 'page': {'html': html}, 'options': options }\n\t\tdata_json = json.dumps(data)\n\t\tdata_json = data_json.encode('utf-8')\n\t\treq = urllib.request.Request(host, data_json, {'content-type': 'application/json'})\n\t\tresponse_stream = urllib.request.urlopen(req)\n\t\tresult = json.loads(response_stream.read().decode(\"utf-8\"))\n\n\t\tif result[\"page\"]:\n\t\t\treturn result[\"page\"][\"haml\"]\n\t\telse:\n\t\t\treturn None\n","repo_name":"pachkovsky/sublime-html-to-haml","sub_path":"html2haml.py","file_name":"html2haml.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"9"} +{"seq_id":"31843915771","text":"from __future__ import print_function\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import input\nfrom builtins import str\nfrom builtins import range\nimport os, shutil, sys\nimport json, pickle\nimport argparse\nimport configparser\nfrom advprint import AdvPrint\n\nclass Info(dict):\n analyses = list()\n paths = dict()\n files = dict()\n flags = dict()\n parameters = dict()\n identifiers = dict() # Contains map from all used identifiers to all booked event files. Together with function \"bookEvent\" makes sure that no identifier is booked twice\n\n @classmethod\n def init(cls,config_paths): \n cls.identifiers = dict()\n # Standard flag values\n paths = config_paths\n flags = dict()\n flags['fullcls'] = False\n flags['likelihood'] = False\n flags['likelihoodRootmethod'] = False\n flags['no_mc_stat_err'] = False\n flags['tempmode'] = False\n flags['quietmode'] = False\n flags['skipanalysis'] = False\n flags['skipparamcheck'] = False\n flags['skippythia'] = False\n flags['skipevaluation'] = False\n flags['controlregions'] = False\n flags['run_atlas_analyses'] = False\n flags['run_cms_analyses'] = False\n flags['write_delphes_events'] = False \n flags['write_pythia_events'] = False \n flags['eff_tab'] = False\n flags['zsig'] = False\n flags['mg5'] = False\n parameters = dict()\n parameters['outputexists'] = \"ask\"\n parameters['bestcls'] = 0\n parameters['randomseed'] = 0\n parameters['invisiblePIDs'] = []\n parameters['longlivedPIDs'] = []\n parameters['EventResultFileColumns'] = ['analysis', 'sr', 'signal_normevents', 'signal_err_tot']\n parameters['ProcessResultFileColumns'] = ['analysis', 'sr', 'signal_normevents', 'signal_err_tot']\n parameters['TotalEvaluationFileColumns'] = ['analysis', 'sr', 'o', 'b', 'db', 's', 'ds', 's95obs', 's95exp', 'robscons', 'rexpcons']\n parameters['BestPerAnalysisEvaluationFileColumns'] = ['analysis', 'sr', 'o', 'b', 'db', 's', 'ds', 's95obs', 's95exp', 'robscons', 'rexpcons']\n parameters['statcomb'] = \"none\"\n \n cls.analysis_groups = {\n \"ATLAS_7TeV\",\n \"ATLAS_8TeV\",\n \"ATLAS_13TeV\",\n \"ATLAS_14TeV_HighLumi\",\n \"CMS_7TeV\",\n \"CMS_8TeV\",\n \"CMS_13TeV\",\n \"CMS_14TeV_HighLumi\" \n }\n cls.experiments = {\n \"atlas\",\n \"atlas7tev\",\n \"atlas8tev\",\n \"atlas13tev\",\n \"atlas14tev_projected\",\n \"atlas14tev_hl_flatbtagger\",\n \"cms\",\n \"cms7tev\",\n \"cms8tev\",\n \"cms13tev\",\n \"cms14tev_projected\"\n }\n cls.detector_setups = {\n \"ATLAS\": {\n 7.0: {\"default\": (\"atlas7tev\", \"ATLAS_7TeV\")},\n 8.0: {\"default\": (\"atlas8tev\", \"ATLAS_8TeV\")},\n 13.0: {\"default\": (\"atlas13tev\", \"ATLAS_13TeV\")},\n 14.0: {\n \"projected\": (\"atlas14tev_projected\", \"ATLAS_14TeV_HighLumi\"),\n \"hl_flatbtagger\": (\"atlas14tev_hl_flatbtagger\", \"ATLAS_14TeV_HighLumi\")\n },\n },\n \"CMS\": {\n 7.0: {\"default\": (\"cms7tev\", \"CMS_7TeV\")},\n 8.0: {\"default\": (\"cms8tev\", \"CMS_8TeV\")},\n 13.0: {\"default\": (\"cms13tev\", \"CMS_13TeV\")},\n 14.0: {\"projected\": (\"cms14tev_projected\", \"CMS_14TeV_HighLumi\")},\n }\n }\n cls.analysis_handlers = {\n \"atlas\": \"AnalysisHandlerATLAS\",\n \"atlas7tev\": \"AnalysisHandlerATLAS_7TeV\",\n \"atlas8tev\": \"AnalysisHandlerATLAS_8TeV\",\n \"atlas13tev\": \"AnalysisHandlerATLAS_13TeV\",\n \"atlas14tev_projected\": \"AnalysisHandlerATLAS_14TeV_projected\",\n \"atlas14tev_hl_flatbtagger\": \"AnalysisHandlerATLAS_14TeV_HL_FlatBtagger\",\n \"cms\": \"AnalysisHandlerCMS\",\n \"cms7tev\": \"AnalysisHandlerCMS_7TeV\",\n \"cms8tev\": \"AnalysisHandlerCMS_8TeV\",\n \"cms13tev\": \"AnalysisHandlerCMS_13TeV\",\n \"cms14tev_projected\": \"AnalysisHandlerCMS_14TeV_projected\",\n }\n cls.paths = paths\n cls.used_experiments = set()\n cls.config = configparser.RawConfigParser()\n cls.flags = flags \n cls.parameters = parameters \n \n @classmethod\n def save(cls, filename):\n \"\"\" Stores the current status of this instance in a file \"\"\"\n contents = dict()\n contents[\"analyses\"] = Info.analyses\n contents[\"paths\"] = Info.paths\n contents[\"files\"] = Info.files\n contents[\"flags\"] = Info.flags\n contents[\"parameters\"] = Info.parameters\n #contents[\"identifiers\"] = pickle.dumps(Info.identifiers)\n with open(filename, \"w\") as f:\n json.dump(contents, f, indent=2)\n with open(filename[:-1]+\"raw\", \"wb\") as f:\n pickle.dump(Info.identifiers, f)\n \n @classmethod\n def load(cls, filename):\n \"\"\" Loads contents for current instance from a valid file \"\"\"\n with open(filename, \"r\") as f:\n contents = json.load(f) \n try:\n cls.analyses = contents[\"analyses\"]\n cls.paths = contents[\"paths\"]\n cls.files = contents[\"files\"]\n cls.flags = contents[\"flags\"]\n cls.parameters = contents[\"parameters\"]\n #cls.identifiers = pickle.loads(contents[\"identifiers\"])\n except KeyError:\n AdvPrint.cerr_exit(\"Problem loading info.j file \"+inputfile)\n with open(filename[:-1]+\"raw\", \"rb\") as f:\n cls.identifiers = pickle.load(f)\n \n \n @classmethod \n def parse_arguments(cls, emptyparser=False): \n parser = argparse.ArgumentParser(description='CheckMATE takes an arbitrary set of eventfiles (in .hepmc, .hep or lhe format), processes them with Delphes and analyzes the output with a particular analysis to be chosen from a given subset. It is then tested how well the given event files is in agreement with the current results of the corresponding collider experiment in order to get an estimate, how strong this model is already excluded by experimental data..')\n parser.add_argument('-n', '--name', dest='name', default=\"CheckMATE_run\", help='name for the run') \n parser.add_argument('-a', '--analysis', dest='analysis', default=\"atlas8TeV,cms8TeV\", help='Analysis/es to be applied on the processed event files')\n parser.add_argument('-p', '--process', dest='process', default=\"process\", type=str, help=\"Process identifier. When combining, events from equal/different processes are averaged/summed.\")\n parser.add_argument('-maxev', '--max-events', dest='maxevents', default=-1, type=int, help=\"Maximum number of events to be simulated and/or analysed.\")\n \n parser.add_argument('-xs', '--xsect', dest='xsect', type=str, default=\"\", help=\"Cross section for the given event. Example format: 1.73 FB. \")\n parser.add_argument('-xse', '--xsecterr', dest='xsecterr', type=str, default=\"\", help=\"Cross section error for the given event;. Example format: 0.01*FB or 10.4 %%. \") \n parser.add_argument('-xsth', '--xsectthreshold', dest='xsthresh', type=str, default=\"\", help=\"Only in MG5+Pythia8: Set result to 0 if parton xs is smaller than this.\") \n parser.add_argument('-kf', '--kfactor', dest='kfactor', type=str, default=\"\", help=\"K-factor for the given event.\")\n parser.add_argument('-ev', '--events', dest='events', default=\"\", type=str, help='List of event files to be analysed (.hepmc, .hep, .lhe), separated by semicolons')\n \n parser.add_argument('-slha', '--slha-file', dest='slhafile', default=\"\", help='slha file (for pythia).') \n parser.add_argument('-invpids', '--invisible-pids', dest='invpids', default=\"\", help='PIDs for BSM particles that should be considered invisible for the detector (separate with ,)') \n parser.add_argument('-llpids', '--longlived-pids', dest='llpids', default=\"\", help='PIDs for BSM particles that should be considered as long lived (separate with,)') \n parser.add_argument('-mgcommand', '--madgraph-command', dest='mgcommand', default=\"\", type=str, help='Process commands to be used by MG5_aMC@NLO (e.g. \"import model mssm; generate p p > go go\")')\n parser.add_argument('-mgproc', '--madgraph-process', dest='mgprocess', default=\"\", type=str, help='Process card to be generated by MG5_aMC@NLO')\n parser.add_argument('-mgparam', '--madgraph-param', dest='mgparam', default=\"\", type=str, help='Commands for MadGraph5 (param card)')\n parser.add_argument('-mgrun', '--madgraph-run', dest='mgrun', default=\"\", type=str, help='Run card for MadGraph5')\n parser.add_argument('-mgconfig', '--madgraph-config', dest='mgconfig', default=\"\", type=str, help='Config card for MadGraph5')\n parser.add_argument('-pyp', '--pythia-process', dest='pyprocess', default=\"\", type=str, help='Process to be generated by Pythia8')\n parser.add_argument('-pyc', '--pythia-card', dest='pycard', default=\"\", type=str, help='Input card used by Pythia8')\n parser.add_argument('-pyr', '--pythia-rndm', dest='pyrndm', default=\"\", type=str, help='Binary generator random state for Pythia8')\n \n parser.add_argument('-wp8', '--write-pythia8', dest='writepythia8', action='store_true', help='If set, pythia .hepmc files are stored.')\n parser.add_argument('-wd', '--write-delphes', dest='writedelphes', action='store_true', help='If set, delphes .root files are stored.') \n \n parser.add_argument('-cls', '--full-cls', dest='fullcls', action='store_true', help=\"Evaluate full CLs to the evaluated number of signal events (instead of just comparing to 95 percent limit).\")\n parser.add_argument('-bcls', '--best-cls', dest='bestcls', type=int, help=\"Evaluates CLs of the best signal region determined by r-value test.\")\n parser.add_argument('-likeli', '--likelihood', dest='likelihood', action='store_true', help=\"Evaluate likelihood for each signal region using the MC-approach (and sum all signal regions).\")\n parser.add_argument('-likelirm', '--likelihoodRootmethod', dest='likelihoodRootmethod', action='store_true', help=\"Evaluate likelihood for each signal region using the root method approach (and sum all signal regions).\")\n parser.add_argument('-mcstats_off', '--no_mc_stat_err', dest='no_mc_stat_err', action='store_true', help=\"Do not include Monte Carlo statistical error in total uncertainty\")\n parser.add_argument('-eff_tab', '--eff_tab', dest='eff_tab', action='store_true', help=\"Creates efficiency tables for every signal region in each analysis run\")\n parser.add_argument('-zsig', '--zsig', dest='zsig', action='store_true', help=\"Evaluate signal significance in a fully frequentist fashion.\")\n \n parser.add_argument('-erfc', '--event-result-file-columns', dest='erfc', type=str, default=\"\", help=\"Columns that should be stored in event-wise result files\")\n parser.add_argument('-prfc', '--process-result-file-columns', dest='prfc', type=str, default=\"\", help=\"Columns that should be stored in process-wise result files\")\n parser.add_argument('-bpaefc', '--bestperanalysis-evaluation-file-columns', dest='bpaefc', type=str, default=\"\", help=\"Columns that should be stored in best-per-analysis evaluation file\")\n parser.add_argument('-tefc', '--total-evaluation-file-columns', dest='tefc', type=str, default=\"\", help=\"Columns that should be stored in final evaluation file\")\n \n \n parser.add_argument('-od', '--outdir', dest='odir', default=cls.paths['results'], help='Directory where the results should be saved (default: '+cls.paths['results']+').')\n parser.add_argument('-oe', '--output-exists', dest='output_exists', default=\"ask\", type=str, help=\"What to do if output already exists. overwrite will delete existing output and overwrite it with the new results. add will add the current results to the old ones. In any other case, a prompt will ask you.\") \n parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', help='Suppresses all output (sets automatially).')\n parser.add_argument('-sp', '--skip-paramcheck', dest='force', action='store_true', help=\"Skip startup parameter check.\") \n parser.add_argument('-sa', '--skip-analysis', dest='skipanalysis', action='store_true', help='Skips analysis step (e.g. to only produce a Delphes root file.')\n parser.add_argument('-spy', '--skip-pythia', dest='skippythia', action='store_true', help='Skpis Pythia step. Delphes will work with the provided LHE file.')\n parser.add_argument('-se', '--skip-evaluation', dest='skipevaluation', action='store_true', help='Skips evaluation step.')\n parser.add_argument('-cr', '--control-regions', dest='controlregions', action='store_true', help='Analyses control regions instead of signal regions. Sets -se automatically.')\n parser.add_argument('-rs', '--random-seed', dest='randomseed', type=int, default=0, help='Chooses fixed seed for random number generator. 0 chooses a random seed automatically.') \n parser.add_argument('-mb', '--multibin', dest='statcomb', default=\"none\", type=str, help='Whether to perform multibin fit.') \n \n # Parse arguments and set return parameters\n if emptyparser:\n return parser.parse_args([]) # needed for fill_info_from_file\n else:\n return parser.parse_args()\n \n @classmethod\n def fill_info_from_args(cls, args):\n cls.files[\"slha\"] = args.slhafile\n cls.files[\"py8rndm\"] = args.pyrndm\n cls.paths[\"results\"] = args.odir\n if args.invpids != \"\":\n try:\n cls.parameters['invisiblePIDs'] = [int(x) for x in args.invpids.split(\",\")]\n except ValueError:\n AdvPrint.cerr_exit(\"invisible PIDs are in wrong format. Must be integer numbers, separated by ','\")\n if args.llpids != \"\":\n try:\n cls.parameters['longlivedPIDs'] = [int(x) for x in args.llpids.split(\",\")]\n except ValueError:\n AdvPrint.cerr_exit(\"long-lived PIDs are in wrong format. Must be integer numbers, separated by ','\") \n cls.parameters['randomseed'] = args.randomseed\n cls.parameters[\"outputexists\"] = args.output_exists\n cls.parameters[\"statcomb\"] = args.statcomb\n if args.force:\n cls.flags[\"skipparamcheck\"] = True\n if args.quiet:\n cls.flags[\"quietmode\"] = True\n if args.skipanalysis:\n cls.flags[\"skipanalysis\"] = True\n if args.skippythia:\n cls.flags[\"skippythia\"] = True\n if args.skipevaluation:\n cls.flags['skipevaluation'] = True \n if args.fullcls:\n cls.flags[\"fullcls\"] = True\n if args.bestcls:\n cls.flags[\"bestcls\"] = int(args.bestcls)\n if args.likelihood:\n cls.flags[\"likelihood\"] = True\n if args.likelihoodRootmethod:\n cls.flags[\"likelihoodRootmethod\"] = True\n if args.no_mc_stat_err:\n cls.flags[\"no_mc_stat_err\"] = True\n if args.eff_tab:\n cls.flags[\"eff_tab\"] = True \n if args.zsig:\n cls.flags['zsig'] = True\n if args.controlregions:\n cls.flags[\"controlregions\"] = True\n if args.writepythia8: \n cls.flags['write_pythia_events'] = True \n if args.writedelphes: \n cls.flags['write_delphes_events'] = True\n if args.tefc != \"\":\n cls.parameters[\"TotalEvaluationFileColumns\"] = args.tefc.split(\",\")\n if args.prfc != \"\":\n cls.parameters[\"ProcessResultFileColumns\"] = args.prfc.split(\",\")\n if args.erfc != \"\":\n cls.parameters[\"EventResultFileColumns\"] = args.erfc.split(\",\")\n if args.bpaefc != \"\":\n cls.parameters[\"BestPerAnalysisEvaluationFileColumns\"] = args.bpaefc.split(\",\") \n cls.make_flags_consistent()\n \n cls.load_analyses(args.analysis)\n output_name = args.name.replace(\" \", \"_\")\n cls.fill_output_paths_and_files(cls.paths['results'], output_name) \n cls.check_info()\n \n @classmethod\n def fill_info_from_parameters(cls):\n args = cls.parse_arguments()\n cls.fill_info_from_args(args)\n \n @classmethod\n def fill_info_from_file(cls, pfile):\n if not os.path.isfile(pfile):\n AdvPrint.cerr_exit(\"Parameter file '\"+pfile+\"' does not exist.\") \n \n Config = configparser.ConfigParser()\n Config.read(pfile)\n sections = Config.sections()\n \n # read parameters, starting with the default from argparse\n args = cls.parse_arguments(True)\n \n # spaces in \"name\" are replaced by underscores to prevent problems in the file handling\n output_name = \"CheckMATE_run\"\n analyses_to_load = \"atlas & 8TeV, cms & 8TeV\"\n cls.files[\"slha\"] = \"\"\n if \"Parameters\" in sections:\n for optional_parameter in Config.options(\"Parameters\"):\n if optional_parameter == \"skipparamcheck\":\n args.force = Config.getboolean(\"Parameters\", \"skipparamcheck\")\n elif optional_parameter == \"outputexists\":\n args.output_exists = Config.get(\"Parameters\", \"outputexists\")\n elif optional_parameter == \"quietmode\":\n args.quiet = Config.getboolean(\"Parameters\", \"quietmode\")\n elif optional_parameter == \"skipanalysis\":\n args.skipanalysis = Config.getboolean(\"Parameters\", \"skipanalysis\")\n elif optional_parameter == \"skippythia\":\n args.skippythia = Config.getboolean(\"Parameters\",\"skippythia\")\n elif optional_parameter == \"skipevaluation\":\n args.skipevaluation = Config.getboolean(\"Parameters\", \"skipevaluation\")\n elif optional_parameter == \"controlregions\":\n args.controlregions = Config.getboolean(\"Parameters\", \"controlregions\")\n elif optional_parameter == \"efftab\":\n args.eff_tab = Config.get(\"Parameters\", \"efftab\")\n elif optional_parameter == \"fullcls\":\n args.fullcls = Config.getboolean(\"Parameters\", \"fullcls\")\n elif optional_parameter == \"bestcls\":\n args.bestcls = Config.getint(\"Parameters\", \"bestcls\")\n elif optional_parameter == \"likelihood\":\n args.likelihood = Config.getboolean(\"Parameters\", \"likelihood\")\n elif optional_parameter == \"likelihoodrootmethod\":\n args.likelihoodRootmethod = Config.getboolean(\"Parameters\", \"likelihoodRootmethod\")\n elif optional_parameter == \"no_mc_stat_err\":\n args.no_mc_stat_err = Config.getboolean(\"Parameters\", \"no_mc_stat_err\") \n elif optional_parameter == \"zsig\":\n args.zsig = Config.getboolean(\"Parameters\", \"zsig\")\n elif optional_parameter == \"outputdirectory\":\n args.odir = Config.get(\"Parameters\", \"outputdirectory\")\n elif optional_parameter == \"invisiblepids\":\n args.invpids = Config.get(\"Parameters\", \"invisiblepids\")\n elif optional_parameter == \"longlivedpids\":\n args.llpids = Config.get(\"Parameters\", \"longlivedpids\") \n elif optional_parameter == \"slhafile\":\n args.slhafile = Config.get(\"Parameters\", \"slhafile\")\n elif optional_parameter == \"mgprocess\":\n args.mgprocess = Config.get(\"Parameters\", \"mgprocess\")\n elif optional_parameter == \"randomseed\":\n args.randomseed = Config.getint(\"Parameters\", \"randomseed\")\n elif optional_parameter == \"writepythiaevents\":\n args.writepythia8 = Config.getboolean(\"Parameters\", \"writepythiaevents\")\n elif optional_parameter == \"writedelphesevents\":\n args.writedelphes = Config.getboolean(\"Parameters\", \"writedelphesevents\")\n elif optional_parameter == \"processresultfilecolumns\":\n args.prfc = Config.get(\"Parameters\", \"processresultfilecolumns\")\n elif optional_parameter == \"eventresultfilecolumns\":\n args.erfc = Config.get(\"Parameters\", \"eventresultfilecolumns\")\n elif optional_parameter == \"bestperanalysisresultfilecolumns\":\n args.bpaefc = Config.get(\"Parameters\", \"bestperanalysisresultfilecolumns\")\n elif optional_parameter == \"totalresultfilecolumns\":\n args.tefc = Config.get(\"Parameters\", \"totalresultfilecolumns\")\n elif optional_parameter == \"name\":\n args.name = Config.get(\"Parameters\", \"name\")\n elif optional_parameter == \"analyses\":\n args.analysis = Config.get(\"Parameters\", \"analyses\")\n elif optional_parameter == \"multibin\":\n args.statcomb = Config.get(\"Parameters\", \"multibin\") \n else:\n AdvPrint.cerr_exit(\"Unknown optional parameter '\"+optional_parameter+\"'\") \n cls.fill_info_from_args(args)\n \n @classmethod\n def check_info(cls):\n \"\"\" Checks if info parameters are valid and updates if the output directory already exists \"\"\"\n \n # check if the evaluation columns the user asked for actually exist\n # headers must be imported locally to avoid circular reference\n from evaluator import Evaluator\n from resultcollector import ResultCollector\n pseudoCollector = ResultCollector(\"\", \"\", \"\")\n pseudoEvaluator = Evaluator(pseudoCollector)\n pseudoCollector.line_from_data(cls.parameters['EventResultFileColumns'])\n pseudoCollector.line_from_data(cls.parameters['ProcessResultFileColumns'])\n pseudoEvaluator.line_from_data(cls.parameters['TotalEvaluationFileColumns'])\n pseudoEvaluator.line_from_data(cls.parameters['BestPerAnalysisEvaluationFileColumns'])\n \n _check_outputexists()\n if os.path.isdir(Info.paths['output']) and os.path.isfile(Info.files['internal_info']):\n if Info.parameters[\"outputexists\"] == \"ask\":\n while True:\n AdvPrint.cout(\"Output directory with results already exists!\")\n c = input(\"Choose: (o)verwrite, (a)dd to existing results, (s)top\\n\")\n if c == \"o\":\n Info.parameters[\"outputexists\"] = \"overwrite\"\n break\n elif c == \"s\":\n exit(1)\n elif c == \"a\":\n Info.parameters[\"outputexists\"] = \"add\"\n break\n if Info.parameters[\"outputexists\"] == \"add\":\n AdvPrint.cout(\" ('add' mode: settings of previous run are used and only new events/processes are added!)\") \n Info.load(Info.files['internal_info'])\n Info.parameters[\"outputexists\"] = \"add\" # might have been overwritten during the loading process\n\n elif os.path.isdir(Info.paths['output']):\n if Info.parameters[\"outputexists\"] == \"ask\":\n while True:\n AdvPrint.cout(\"Output directory with incomplete results already exists!\")\n c = input(\"Choose: (o)verwrite, (s)top\\n\")\n if c == \"o\":\n Info.parameters[\"outputexists\"] = \"overwrite\"\n break\n elif c == \"s\":\n exit(1)\n\n @classmethod\n def fill_process_from_args(cls, args):\n from process import Process\n from events import HEPEvents, HepMCEvents, DelphesEvents, Pythia8Events, MG5Events, LHEEvents\n process = Process(args.process.replace(\" \", \"_\"))\n potentialPy8Events = None \n potentialMG5Events = None \n \n if args.events != \"\":\n lhefilevector = list() # need to be storesd separately as they go in one overall LHEEvents object\n for eventfile in args.events.replace(\";\", \",\").split(\",\"):\n eventfile = Info.check_and_absolutize_file(eventfile)\n ending = eventfile.split(\".\")[-1].lower()\n events = None\n if ending == \"hep\":\n events = HEPEvents(args.process, eventfile)\n elif ending == \"hepmc\":\n events = HepMCEvents(args.process, eventfile)\n elif ending == \"root\":\n events = DelphesEvents(args.process, eventfile)\n elif ending == \"lhe\":\n lhefilevector.append(eventfile)\n else:\n AdvPrint.cerr_exit(\"File ending \"+ending+\" not known!\") \n if events != None:\n process.eventsList.append(events)\n \n if lhefilevector != list():\n process.eventsList.append(LHEEvents(args.process, lhefilevector))\n \n if args.xsect != \"\":\n xsect_str = args.xsect\n if \"*\" in xsect_str:\n xsect_split = xsect_str.split(\"*\")\n else:\n xsect_split = xsect_str.split(\" \") \n if len(xsect_split) != 2:\n AdvPrint.cerr_exit(\"Cross section in invalid format (\"+str(xsect_str)+\")\")\n process.have_xsect = True\n process.xsec = float(xsect_split[0])\n process.xsec_unit = xsect_split[1]\n \n if args.xsecterr != \"\":\n xsecterr_str = args.xsecterr \n if \"*\" in xsecterr_str:\n xsecterr_split = xsecterr_str.split(\"*\")\n else:\n xsecterr_split = xsecterr_str.split(\" \") \n # Note that no unit has to be given if the error is exactly 0\n if len(xsecterr_split) != 2 and float(xsecterr_split[0]) != 0.0: \n AdvPrint.cerr_exit(\"Cross section error in invalid format (\"+str(xsecterr_str)+\")\")\n process.have_xerr = True\n process.xerr = float(xsecterr_split[0])\n process.xerr_unit = xsecterr_split[1]\n \n if args.kfactor != \"\":\n process.have_kfac = True\n process.kfac = float(args.kfactor)\n \n \n \n # first, global check for py8 parameters which require py8 linking and which load the Py8 event\n if args.pyprocess != \"\" or args.pycard != \"\":\n if 'pythia8_lib_path' not in Info.paths or Info.paths['pythia8_lib_path'] == \"\":\n AdvPrint.cerr_exit(\"You cannot generate/shower events with Pythia8 without properly linking CheckMATE to this tool. Please restart the CheckMATE-installation routine and use the --with-pythia parameter during the ./configure step!\")\n potentialPy8Events = Pythia8Events(args.process)\n \n if args.pyprocess != \"\": \n potentialPy8Events.set_processString(args.pyprocess)\n \n if args.pycard != \"\":\n potentialPy8Events.set_inFile(args.pycard)\n\n # same for MG5Events\n if args.mgcommand != \"\" or args.mgprocess != \"\" or args.mgparam != \"\" or args.mgrun != \"\" or args.mgconfig != \"\" or args.mgcommand != \"\" or args.xsthresh != \"\":\n if 'mg5_source_path' not in Info.paths or Info.paths['mg5_source_path'] == \"\":\n AdvPrint.cerr_exit(\"You cannot generate events using MG5_aMC@NLO without properly linking CheckMATE to this tool. Please restart the CheckMATE-installation routine and use the --with-madgraph parameter during the ./configure step!\")\n \n if 'pythia8_lib_path' not in Info.paths or Info.paths['pythia8_lib_path'] == \"\":\n AdvPrint.cerr_exit(\"You cannot generate using MG5_aMC@NLO without properly linking CheckMATE to both MG5_aMC@NLO and Pythia8!. Please restart the CheckMATE-installation routine and use the --with-pythia parameter during the ./configure step!\")\n potentialMG5Events = MG5Events(args.process)\n \n if args.mgcommand != \"\":\n potentialMG5Events.set_commandstring(args.mgcommand)\n \n if args.mgprocess != \"\":\n potentialMG5Events.set_proccard(Info.check_and_absolutize_file(args.mgprocess))\n \n if args.mgparam != \"\":\n potentialMG5Events.set_paramcard(Info.check_and_absolutize_file(args.mgparam))\n \n if args.mgrun != \"\":\n potentialMG5Events.set_runcard(Info.check_and_absolutize_file(args.mgrun))\n \n if args.mgconfig != \"\":\n potentialMG5Events.set_configcard(Info.check_and_absolutize_file(args.mgconfig))\n\t \n if args.xsthresh != \"\": \n xsectth_str = args.xsthresh\n if \"*\" in xsectth_str:\n xsectth_split = xsectth_str.split(\"*\")\n else:\n xsectth_split = xsectth_str.split(\" \") \n if len(xsectth_split) != 2:\n AdvPrint.cerr_exit(\"Cross section Threshold in invalid format (\"+str(xsectth_str)+\")\")\n potentialMG5Events.set_xsthr(float(xsectth_split[0]), xsectth_split[1])\n \n if potentialPy8Events != None:\n process.eventsList.append(potentialPy8Events)\n if potentialMG5Events != None:\n process.eventsList.append(potentialMG5Events)\n \n if args.maxevents != \"\":\n for events in process.eventsList: \n events.maxEvents = int(args.maxevents)\n \n process.ecm = cls.parameters[\"ecm\"]\n return process\n \n @classmethod\n def fill_processes_from_parameters(cls):\n args = cls.parse_arguments()\n # command line input only allows for 1 process at a time\n return [cls.fill_process_from_args(args)]\n \n @classmethod\n def fill_processes_from_file(cls, pfile):\n if not os.path.isfile(pfile):\n AdvPrint.cerr_exit(\"Parameter file '\"+pfile+\"' does not exist.\") \n \n Config = configparser.ConfigParser()\n Config.read(pfile)\n sections = Config.sections()\n \n # Remove non-process blocks \n if \"Parameters\" in sections:\n sections.remove(\"Parameters\")\n \n # Set up event information. Remaining sections consist of individual processes\n procList = []\n for process_block in sections:\n # translate Config object in args\n \n # first, get template args\n args = cls.parse_arguments(True) \n\n args.process = process_block\n # Check and save event file\n if \"events\" in Config.options(process_block):\n args.events = Config.get(process_block, \"events\").replace(\"\\n\", \",\").replace(\",,\", \",\").replace(\" \", \"\").replace(\"\\\\\", \" \")\n\n # Read cross section in the form \"Num Unit\" or \"Num*Unit\"\n if \"xsect\" in Config.options(process_block):\n args.xsect = Config.get(process_block, \"xsect\")\n if \"xsecterr\" in Config.options(process_block):\n args.xsecterr = Config.get(process_block, \"xsecterr\")\n if \"xsectthreshold\" in Config.options(process_block):\n args.xsthresh = Config.get(process_block, \"xsectthreshold\")\n \n if \"kfactor\" in Config.options(process_block):\n args.kfactor = Config.get(process_block, \"kfactor\")\n \n if \"pythia8process\" in Config.options(process_block):\n args.pyprocess = Config.get(process_block, \"pythia8process\")\n if \"pythia8rndm\" in Config.options(process_block):\n args.pyrndm = Config.get(process_block, \"pythia8rndm\")\n \n if \"pythia8card\" in Config.options(process_block):\n args.pycard = Config.get(process_block, \"pythia8card\")\n\n if \"mgprocess\" in Config.options(process_block):\n args.mgprocess = Config.get(process_block,\"mgprocess\")\n if \"mgcommand\" in Config.options(process_block):\n args.mgcommand = Config.get(process_block,\"mgcommand\")\n if \"mgparam\" in Config.options(process_block):\n args.mgparam = Config.get(process_block,\"mgparam\")\n if \"mgrun\" in Config.options(process_block):\n args.mgrun = Config.get(process_block,\"mgrun\")\n if \"mgconfig\" in Config.options(process_block):\n args.mgconfig = Config.get(process_block,\"mgconfig\")\n if \"maxevents\" in Config.options(process_block):\n args.maxevents = Config.get(process_block, \"maxevents\")\n procList.append(cls.fill_process_from_args(args))\n \n return procList \n\n @classmethod\n def load_analyses(cls, analysis_input_string):\n \"\"\" Uses the input string from the user to load one or more analyses \"\"\" \n analysis_input_string = analysis_input_string.lower() # to avoid capitalisation errors\n tokens = analysis_input_string.split(\",\")\n for token in tokens:\n any_passed = False\n token = token.strip().lower()\n \n for a in cls.analysis_list:\n parameters = cls.get_analysis_parameters(a)\n # to avoid case issues, transform parameters to lower()\n\n passed = True\n\n if token not in parameters[\"experiment\"] and token != a:\n passed = False\n \n if passed:\n any_passed = True\n if \"ecm\" in cls.parameters and cls.parameters[\"ecm\"] != 0.0 and ( abs(float(cls.parameters[\"ecm\"]) - float(parameters[\"ecm\"]))>0.01):\n AdvPrint.cerr_exit(\" You must not load analyses with different center of mass energies!\\nIf you desire to do this, please run CheckMATE separately for each center of mass energy!\")\n cls.parameters[\"ecm\"] = parameters[\"ecm\"]\n cls.analyses.append(a)\n experiment = parameters[\"experiment\"]\n if not experiment in cls.experiments:\n AdvPrint.cerr_exit(\"Unknown experiment: \"+experiment+\" used by analysis\"+a)\n cls.used_experiments.add(experiment)\n if not any_passed:\n AdvPrint.cerr_exit(\"Couldn't find any analyses for '\"+token+\"'.\")\n \n \n @classmethod\n def book_events(cls, events):\n # Registers a process and gives a unique identifier\n from events import Events\n if not isinstance(events, Events):\n AdvPrint.cerr_exit(\"Internal Error while trying to book an event! (event does not seem to be an 'Event')\")\n same_ident = [ident for ident in cls.identifiers if (events.name == ident or events.name+\"_\" in ident)]\n if len(same_ident) == 0: # No same id: Just register\n events.identifier = events.name\n elif len(same_ident) == 1: # One same id already: rename old 'name_1' and register 'name_2' \n move_events = cls.identifiers[events.name]\n cls.identifiers.pop(events.name)\n # rename old identifier to \"identifier_1\" only if it hasn't been processed yet (to not screp up match to existing filenames)\n if not move_events.processed:\n move_events.identifier = move_events.identifier+\"_event1\" \n cls.identifiers[move_events.identifier] = move_events\n events.identifier = events.name+\"_event2\"\n else: # More: find max of the _X and add _X+1\n maxnum = max([int(ident.split(\"_event\")[-1]) for ident in same_ident if \"_event\" in ident])\n events.identifier = events.name+\"_event\"+str(maxnum+1) \n cls.identifiers[events.identifier] = events\n return\n \n @classmethod\n def unbook_event(cls, events):\n \"\"\"Sometimes initially booked events turn out to false (e.g. double booking in \"add\" mode or when trying to merge .lhe files which count as 1 event\"\"\"\n if events.__class__.__name__ != \"Event\":\n AdvPrint.cerr_exit(\"Internal error while trying to book an event!\")\n same_ident = [ident for ident in cls.identifiers if events.name in ident]\n if len(same_ident) == 0: # error: the event can only be unbooked if it exists\n AdvPrint.cerr_exit(\"Internal error: trying to unbook an unbooked event!\")\n elif len(same_ident) == 1: # Simple: Just remove event from booking list\n cls.identifiers.pop(events.name)\n elif len(same_ident) == 2: # Remove event and the other event does not need an \"_X\" index\n cls.identifiers.pop(events.name)\n if not cls.identifiers[0].processed: # to avoid changing things which \n cls.identifiers[0].identifier = cls.identifiers[0].identifier[:cls.identifiers[0].identifier.rfind(\"_\")]\n else: # remove this event and shift index of all upcoming ones down by 1\n # suffix of to-be-removed event\n remove_index = int(cls.identifiers[0].identifier[:cls.identifiers[0].identifier.rfind(\"_event\")+6:])\n #remove the event\n cls.identifiers.pop(events.name)\n # shift all events which have a higher index down by one (if they haven't been processed yet, that is)\n for ident in same_ident:\n curr_event = cls.identifiers[ident].identifier\n if not curr_events.processed:\n old_index = int(curr_events.identifier[curr_events.rfind(\"_event\")+6:])\n if old_index > remove_index:\n new_index = old_index-1\n cls.identifiers[ident].identifier = curr_events.identifier[:curr_events.identifier.rfind(\"_event\")]+\"_event\"+str(new_index)\n return\n\n @classmethod\n def add_analysis(cls, analysis_name, analysis_group):\n \"\"\"\n Inserts an analysis in the info class\n \"\"\"\n if analysis_name in cls.analysis_list:\n # The analysis is already there. Don't add it twice.\n return\n cls.analysis_list.append(analysis_name)\n cls.analysis_group_map[analysis_name] = analysis_group\n cls.files[\"analysis_settings\"][analysis_name] = os.path.join(cls.paths['analysis_info'], analysis_group, \"{}_var.j\".format(analysis_name))\n cls.files[\"evaluation_reference\"][analysis_name] = os.path.join(cls.paths['analysis_info'], analysis_group, \"{}_ref.dat\".format(analysis_name))\n cls.files[\"analysis_source\"][analysis_name] = os.path.join(cls.paths['analysis'], 'src', 'analyses', analysis_group, analysis_name+'.cc')\n cls.files[\"analysis_CR_source\"][analysis_name] = os.path.join(cls.paths['analysis'], 'src', 'analyses', analysis_group, analysis_name+'_CR.cc')\n cls.files[\"analysis_header\"][analysis_name] = os.path.join(cls.paths['analysis'], 'include', 'analyses', analysis_group, analysis_name+'.h')\n\n @classmethod\n def fill_standard_paths_and_files(cls, config_paths=dict()):\n \"\"\"Fills cls.paths with standard paths of the CheckMATE installation\"\"\"\n\n if config_paths != dict() and config_paths['mg5_source_path']:\n cls.paths['mg5_source_path'] = config_paths['mg5_source_path']\n if config_paths != dict() and config_paths['pythia8_lib_path']:\n cls.paths['pythia8_lib_path'] = config_paths['pythia8_lib_path']\n\n cls.paths['checkmate'] = os.path.split(os.path.split(os.path.split(os.path.realpath(__file__))[0])[0])[0]\n \n cls.paths['results'] = os.path.join(cls.paths['checkmate'], 'results')\n cls.paths['tools'] = os.path.join(cls.paths['checkmate'], 'tools')\n cls.paths['data'] = os.path.join(cls.paths['checkmate'], 'data')\n \n cls.paths['analysis'] = os.path.join(cls.paths['tools'], 'analysis')\n cls.paths['fritz'] = os.path.join(cls.paths['tools'], 'fritz')\n \n cls.paths['analysis_info'] = os.path.join(cls.paths['data'], 'analysis_info')\n cls.paths['cards'] = os.path.join(cls.paths['data'], 'cards') \n cls.files['list_of_analyses'] = dict()\n for group in cls.analysis_groups:\n cls.files[\"list_of_analyses\"][group] = os.path.join(cls.paths['analysis_info'], '{}_analyses.txt'.format(group))\n \n cls.files['pythia_settings_template'] = os.path.join(cls.paths['cards'], 'pythia_default_card.in')\n cls.files['pythia_mg5minimal_template'] = os.path.join(cls.paths['cards'], 'pythia_mg5minimal_default_card.in')\n cls.files['pythia_lhe_template'] = os.path.join(cls.paths['cards'], 'pythia_default_LHE_card.in') \n cls.files['mg5_run_template'] = os.path.join(cls.paths['cards'], 'mg5_default_run_card.dat') \n cls.files['me5_configuration_template'] = os.path.join(cls.paths['cards'], 'mg5_default_me5_configuration.txt')\n\n delphes_global_config = dict()\n delphes_global_config[\"atlas\"] = os.path.join(cls.paths['cards'], 'delphes_skimmed_ATLAS.tcl')\n delphes_global_config[\"atlas7tev\"] = os.path.join(cls.paths['cards'], 'delphes_skimmed_ATLAS.tcl')\n delphes_global_config[\"atlas8tev\"] = os.path.join(cls.paths['cards'], 'delphes_skimmed_ATLAS.tcl')\n delphes_global_config[\"atlas13tev\"] = os.path.join(cls.paths['cards'], 'delphes_skimmed_ATLAS_13TeV.tcl')\n delphes_global_config[\"atlas14tev_projected\"] = os.path.join(cls.paths['cards'], 'delphes_skimmed_ATLAS_14TeV.tcl')\n delphes_global_config[\"atlas14tev_hl_flatbtagger\"] = os.path.join(cls.paths['cards'], 'delphes_skimmed_ATLAS_14TeV.tcl')\n delphes_global_config[\"cms\"] = os.path.join(cls.paths['cards'], 'delphes_skimmed_CMS.tcl')\n delphes_global_config[\"cms7tev\"] = os.path.join(cls.paths['cards'], 'delphes_skimmed_CMS.tcl')\n delphes_global_config[\"cms8tev\"] = os.path.join(cls.paths['cards'], 'delphes_skimmed_CMS.tcl')\n delphes_global_config[\"cms13tev\"] = os.path.join(cls.paths['cards'], 'delphes_skimmed_CMS_13TeV.tcl')\n delphes_global_config[\"cms14tev_projected\"] = os.path.join(cls.paths['cards'], 'delphes_skimmed_CMS_14TeV.tcl')\n cls.files['delphes_global_config'] = delphes_global_config\n \n cls.files['fritz_bin'] = os.path.join(cls.paths['fritz'], 'bin', \"fritz\")\n \n cls.files['analysis_makefile'] = os.path.join(cls.paths['analysis'], \"Makefile.am\")\n cls.files['analysis_template_source'] = os.path.join(cls.paths['analysis'], 'src', 'base', 'template.cc.raw')\n cls.files['analysis_template_CR_source'] = os.path.join(cls.paths['analysis'], 'src', 'base', 'template_CR.cc.raw')\n cls.files['analysis_template_header'] = os.path.join(cls.paths['analysis'], 'include', 'base', 'template.h.raw')\n cls.files['analysis_template_header_cr'] = os.path.join(cls.paths['analysis'], 'include', 'base', 'template_cr.h.raw')\n \n # analysis information\n cls.files['analysis_settings'] = dict()\n cls.files[\"analysis_source\"] = dict()\n cls.files[\"analysis_CR_source\"] = dict()\n cls.files[\"analysis_header\"] = dict()\n cls.files['delphes_config'] = dict()\n cls.files['evaluation_reference'] = dict()\n \n # analysis handler sources\n src_dir = os.path.join(cls.paths[\"fritz\"], \"src\", \"analysishandler\")\n inc_dir = os.path.join(cls.paths[\"fritz\"], \"include\", \"analysishandler\")\n for analysis_handler in list(cls.analysis_handlers.values()):\n cls.files[analysis_handler] = dict()\n cls.files[analysis_handler][\"src\"] = os.path.join(src_dir, \"{}.cc\".format(analysis_handler))\n cls.files[analysis_handler][\"include\"] = os.path.join(inc_dir, \"{}.h\".format(analysis_handler))\n \n analyses, groups = cls.get_all_checkmate_analyses()\n cls.analysis_list = analyses\n cls.analysis_group_map = groups\n for a in cls.analysis_list:\n group = cls.analysis_group_map[a]\n cls.files[\"analysis_settings\"][a] = os.path.join(cls.paths['analysis_info'], group, \"{}_var.j\".format(a))\n cls.files[\"evaluation_reference\"][a] = os.path.join(cls.paths['analysis_info'], group, \"{}_ref.dat\".format(a))\n cls.files[\"analysis_source\"][a] = os.path.join(cls.paths['analysis'], 'src', 'analyses', group, a+'.cc')\n cls.files[\"analysis_CR_source\"][a] = os.path.join(cls.paths['analysis'], 'src', 'analyses', group, a+'_CR.cc')\n cls.files[\"analysis_header\"][a] = os.path.join(cls.paths['analysis'], 'include', 'analyses', group, a+'.h')\n\n\n\n @classmethod\n def fill_output_paths_and_files(cls, odir, oname):\n \"\"\"Fills cls.paths with paths given a particular output directory\"\"\"\n cls.paths['output'] = os.path.join(odir, oname)\n cls.paths['output_delphes'] = os.path.join(cls.paths['output'], \"delphes\")\n cls.paths['output_pythia'] = os.path.join(cls.paths['output'], \"pythia\")\n cls.paths['output_mg5'] = os.path.join(cls.paths['output'], \"mg5amcatnlo\")\n cls.paths['output_internal'] = os.path.join(cls.paths['output'], \"internal\")\n cls.paths['mg5_procs'] = list() # List of generated process directories; will be filled dynamically by each process\n cls.files['mg5_proc_cards'] = list() # List of generated process directories; will be filled dynamically by each process\n cls.paths['output_fritz'] = os.path.join(cls.paths['output'], \"fritz\")\n cls.paths['output_analysis'] = os.path.join(cls.paths['output'], \"analysis\")\n cls.paths['output_evaluation'] = os.path.join(cls.paths['output'], \"evaluation\")\n #cls.files['output_progress'] = os.path.join(cls.paths['output'], \"progress.txt\") TODO: Outdated. USed in CM1, but in CM2 there are the internal/var.j files\n \n cls.files['pythia_cards'] = list()\n cls.files['pythia_events'] = list()\n cls.files['delphes_events'] = list()\n \n cls.files['internal_info'] = os.path.join(cls.paths['output_internal'], \"info.j\")\n cls.files['internal_processes'] = os.path.join(cls.paths['output_internal'], \"processes.raw\")\n \n cls.files['delphes_log'] = os.path.join(cls.paths['output_delphes'], \"delphes_output.log\")\n cls.files['pythia_log'] = os.path.join(cls.paths['output_pythia'], \"pythia_output.log\")\n cls.files['analysis_log'] = os.path.join(cls.paths['output_analysis'], \"analysisstdout\")\n cls.files['fritz_log'] = os.path.join(cls.paths['output_fritz'], \"fritz_error.log\")\n cls.files['mg5_log'] = os.path.join(cls.paths['output_mg5'], \"mg5amcatnlo_output.log\")\n \n if cls.flags[\"likelihood\"]:\n cls.files['likelihood'] = os.path.join(cls.paths['output'], \"likelihood.txt\")\n if cls.flags[\"likelihoodRootmethod\"]:\n cls.files['likelihoodRootmethod'] = os.path.join(cls.paths['output'], \"likelihoodrm.txt\")\n cls.files['output_totalresults'] = os.path.join(cls.paths['output'], \"evaluation\", \"total_results.txt\") \n cls.files['output_bestsignalregions'] = os.path.join(cls.paths['output'], \"evaluation\", \"best_signal_regions.txt\")\n cls.files['output_result'] = os.path.join(cls.paths['output'], \"result.txt\")\n \n if cls.flags[\"zsig\"]:\n cls.files['output_result_zsig'] = os.path.join(cls.paths['output'], \"result_Zexp.txt\")\n cls.files['output_bestsignificanceregions'] = os.path.join(cls.paths['output'], \"evaluation\", \"best_significance_regions.txt\") \n \n cls.files['output_evaluation_event_numbers'] = dict()\n cls.files['output_evaluation_r_limits'] = dict()\n if cls.flags[\"fullcls\"]:\n cls.files['output_evaluation_cl_limits'] = dict()\n cls.files['output_evaluation_likelihood'] = dict() \n cls.files['output_evaluation_likelihoodRootmethod'] = dict() \n cls.files['eff_tab'] = dict()\n if cls.flags['zsig']:\n cls.files['output_evaluation_zsig'] = dict()\n if cls.flags[\"likelihood\"]:\n cls.files['output_evaluation_likelihood'] = dict()\n if cls.flags[\"likelihoodRootmethod\"]:\n cls.files['output_evaluation_likelihoodRootmethod'] = dict()\n \n for a in cls.analyses:\n cls.files['output_evaluation_event_numbers'][a] = os.path.join(cls.paths['output'], \"evaluation\", a+\"_event_numbers.txt\")\n cls.files['output_evaluation_r_limits'][a] = os.path.join(cls.paths['output'], \"evaluation\", a+\"_r_limits.txt\")\n if cls.flags[\"fullcls\"]:\n cls.files['output_evaluation_cl_limits'][a] = os.path.join(cls.paths['output'], \"evaluation\", a+\"_cl_limits.txt\")\n cls.files['output_evaluation_likelihood'][a] = os.path.join(cls.paths['output'], \"evaluation\", a+\"_likelihood.txt\")\n cls.files['output_evaluation_likelihoodRootmethod'][a] = os.path.join(cls.paths['output'], \"evaluation\", a+\"_likelihoodrm.txt\")\n cls.files['eff_tab'][a] = os.path.join(cls.paths['output'], \"evaluation\", a+\"_eff_tab.txt\")\n if cls.flags['zsig']:\n cls.files['output_evaluation_zsig'][a] = os.path.join(cls.paths['output'], \"evaluation\", a+\"_zsig.txt\")\n\n @classmethod\n def fill_result_files(cls, analysis):\n \"\"\"Fills cls.files with files that belong to a certain analysis in a given result directory\"\"\"\n \n # Get all files in output folder\n if 'output_analysis' not in cls.paths:\n exit(\"Error in Info.fill_result_files()\")\n rdir = cls.paths['output_analysis']\n signals = [os.path.join(rdir, f) for f in os.listdir(rdir) if \"signal\" in f and analysis in f]\n #cutflows = [os.path.join(rdir, f) for f in os.listdir(rdir) if \"cutflow\" in f and analysis in f] Not yet further evaluated\n \n cls.files['results_signal'] = dict()\n # Read the prefix of every file and use it as key for the files dictionaries\n for i in range(len(signals)):\n prefix = os.path.split(signals[i])[1][0:3]\n cls.files['results_signal'][prefix] = signals[i]\n\n @classmethod\n def prepare_output_directories(cls):\n if os.path.isdir(cls.paths['output']): \n shutil.rmtree(cls.paths['output'])\n \n os.makedirs(cls.paths['output'])\n os.mkdir(cls.paths['output_mg5'])\n os.mkdir(cls.paths['output_pythia'])\n os.mkdir(cls.paths['output_fritz'])\n os.mkdir(cls.paths['output_delphes'])\n os.mkdir(cls.paths['output_analysis'])\n os.mkdir(cls.paths['output_evaluation']) \n os.mkdir(cls.paths['output_internal']) \n #open(cls.files['output_progress'], 'a').close() TODO: Outdated. Not needed any more\n \n \n @classmethod\n def prepare_config(cls):\n randomseed = cls.parameters['randomseed']\n if randomseed!=0:\n name = \"Global\"\n if not cls.config.has_section(name):\n cls.config.add_section(name)\n cls.config.set(name, 'randomseed', randomseed)\n \n @classmethod\n def make_flags_consistent(cls):\n \"\"\" In case there are flags which are dependent on one another \"\"\"\n if cls.flags[\"controlregions\"]:\n cls.flags[\"skipevaluation\"] = True\n \n if cls.flags[\"quietmode\"]:\n cls.flags[\"skipparamcheck\"] = True\n AdvPrint.mute()\n\n if cls.flags[\"skippythia\"]:\n cls.flags[\"write_pythia_events\"] = False\n \n\n @classmethod\n def get_all_checkmate_analyses(cls):\n if 'list_of_analyses' not in cls.files:\n AdvPrint.cerr_exit(\"Cannot get list of analyses!\")\n analysis_list = list()\n groups = dict()\n for group in cls.analysis_groups:\n path = cls.files[\"list_of_analyses\"][group]\n with open(path, \"r\") as f:\n for line in f:\n if line.startswith(\"#\") or line.strip() == \"\":\n continue\n analysis = line.split()[0]\n analysis_list.append(analysis)\n groups[analysis] = group\n return (analysis_list, groups)\n \n @classmethod\n def get_analysis_parameters(cls, analysis): \n \"\"\" Reads in the _var.j file of a given analysis and returns the parameters\n as a dictionary \"\"\"\n if 'analysis_settings' not in cls.files or analysis not in cls.files['analysis_settings']:\n print(cls.files['analysis_settings'])\n AdvPrint.cerr_exit(\"Cannot find files for reading parameters of analysis \"+analysis)\n jfile = open(cls.files['analysis_settings'][analysis], \"rb\")\n parameters = json.loads(jfile.read())\n jfile.close()\n return parameters\n\n @classmethod\n def unit(cls, in_unit):\n \"\"\" Define everything in fb\"\"\" \n if in_unit.lower() == \"fb\":\n return 1.\n elif in_unit.lower() == \"pb\":\n return 1.E3\n elif in_unit.lower() == \"nb\":\n return 1.E6\n elif in_unit.lower() == \"mub\":\n return 1.E9\n elif in_unit.lower() == \"mb\":\n return 1.E12\n elif in_unit.lower() == \"b\":\n return 1.E15\n else:\n AdvPrint.cerr_exit(\"Unit \"+str(in_unit)+\" unknown!\")\n\n @classmethod\n def check_and_absolutize_file(cls, filename, abortOnFail=True):\n \"\"\" If the user provides relative files, pythia etc cannot handle them. So try to find the file with the given #filename at the following places\n - #filename\n - (if param.dat provided): #folder_of_given_param.dat/#filename\n - $PWD/#filename\n \"\"\"\n absolutePath = True\n guesses = list()\n if os.path.isfile(filename):\n return os.path.abspath(filename)\n guesses.append(filename)\n if (len(sys.argv) == 2):\n paramFileDir = os.path.dirname(os.path.abspath(sys.argv[1]))\n guess = os.path.join(paramFileDir, filename)\n if os.path.isfile(guess):\n return guess\n guesses.append(guess)\n cwd = os.getcwd()\n guess = os.path.join(cwd, filename)\n if os.path.isfile(guess):\n return guess3\n guesses.append(guess)\n guesses = list(set(guesses))\n error_output = \"Couldn't find file \"+filename+\". Tried the following possibilities:\"\n for guess in guesses:\n error_output += \"\\n - \"+guess\n if abortOnFail:\n AdvPrint.cerr_exit(error_output)\n else:\n return None\n \n\n\ndef _check_outputexists():\n \"\"\"\n Test if the user entered something useful for 'outputexists'.\n\n The allowed values of the outputexists field are:\n * add\n * ask\n * overwrite\n\n The value of the parameter is treated as case insensitive and is normalized\n to lower case.\n \"\"\"\n string = Info.parameters['outputexists'].lower()\n Info.parameters['outputexists'] = string\n if string != \"add\" and string != \"overwrite\" and string != \"ask\":\n AdvPrint.cerr_exit(\n \"Invalid argument for 'OutputExists': \"\n +string\n +\"\\nAllowed values are: 'add', 'ask' and 'overwrite'\")\n\n","repo_name":"CheckMATE2/checkmate2","sub_path":"tools/python/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":56263,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"9"} +{"seq_id":"72004031974","text":"#!/usr/bin/env python3\n\"\"\" class to manage the API authentication. \"\"\"\nfrom flask import request\nfrom typing import List, TypeVar\n\n\nclass Auth():\n \"\"\"\n api Auth\n \"\"\"\n def require_auth(self, path: str, excluded_paths: List[str]) -> bool:\n \"\"\"\n Returns True if path is None\n Returns True if excluded_paths is None or empty\n Returns False if path is in excluded_paths\n You can assume excluded_paths contains string path always ending by a /\n This method must be slash tolerant: path=/api/v1/status and\n path=/api/v1/status/ must be returned False if excluded_paths\n contains /api/v1/status/\n \"\"\"\n if not path or not excluded_paths:\n return True\n if path[-1] != '/':\n path += '/'\n if path in excluded_paths:\n return False\n return True\n\n def authorization_header(self, request=None) -> str:\n \"\"\"\n request is None, returns None\n If request doesn’t contain the header key Authorization, returns None\n Otherwise, return the value of the header request Authorizatio\n \"\"\"\n if request is None:\n return None\n if not request.headers.get(\"Authorization\"):\n return None\n return request.headers.get(\"Authorization\")\n\n def current_user(self, request=None) -> TypeVar('User'):\n \"\"\"\n returns None - request will be the Flask request object\n \"\"\"\n return None\n","repo_name":"TimSimms84/holbertonschool-web_back_end","sub_path":"0x03-Basic_authentication/api/v1/auth/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"29790216357","text":"from alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.orm import sessionmaker\n\nfrom portal.models.communication_request import CommunicationRequest\nfrom portal.models.identifier import Identifier\nfrom portal.system_uri import TRUENTH_CR_NAME\n\n\"\"\"Correct IRONMAN 6 month iteration count\n\nRevision ID: cea9fbdd98f9\nRevises: 72dcf1946d3f\nCreate Date: 2017-12-13 13:57:59.215573\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'cea9fbdd98f9'\ndown_revision = '72dcf1946d3f'\n\nSession = sessionmaker()\n\n\ndef upgrade():\n # Correct the iteration count for the existing IRONMAN 6 month\n # communication requests (need to start at index 0, not 1)\n # Necessary to do as a migration, otherwise we break FK constraints\n bind = op.get_bind()\n session = Session(bind=bind)\n idents = session.query(Identifier).filter(\n Identifier.system == TRUENTH_CR_NAME).filter(\n Identifier._value.like(u'IRONMAN Recurring | 6 Month %'))\n for id in idents:\n cr = CommunicationRequest.find_by_identifier(id)\n # bring into *this* session\n cr = session.query(CommunicationRequest).get(cr.id)\n cr.qb_iteration = 0\n session.commit()\n\ndef downgrade():\n bind = op.get_bind()\n session = Session(bind=bind)\n idents = session.query(Identifier).filter(\n Identifier.system == TRUENTH_CR_NAME).filter(\n Identifier._value.like(u'IRONMAN Recurring | 6 Month %'))\n for id in idents:\n cr = CommunicationRequest.find_by_identifier(id)\n # bring into *this* session\n cr = session.query(CommunicationRequest).get(cr.id)\n cr.qb_iteration = 1\n session.commit()\n","repo_name":"uwcirg/truenth-portal","sub_path":"portal/migrations/versions/cea9fbdd98f9_.py","file_name":"cea9fbdd98f9_.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"9"} +{"seq_id":"8981094752","text":"import sys\n\ninn = sys.stdin\nnext(inn)\n\nfor line in inn:\n line = line.strip()\n (k, b, n) = (i for i in line.split(\" \"))\n ssum = 0\n pw = 0\n for i in range(len(n) - 1, -1, -1):\n print(n[i], \"times\", b, \"pow\", pw)\n ssum += int(n[i]) * (int(b)**pw)\n pw +=1\n print(ssum)\n fin = 0\n for i in str(ssum):\n fin += (int(i) ** 2)\n print(k, fin)\n\n\n","repo_name":"lamida/kattis","sub_path":"python/ssd_unfinished.py","file_name":"ssd_unfinished.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"23598250699","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nimport pandas as pd\n\n\n# In[2]:\n\n\n#Import all the things to work\n\n\n# In[3]:\n\n\nfrom aqcuire import telco_data\n\n\n# In[15]:\n\n\ntelco = telco_data()\n\n\n# In[5]:\n\n\n#Disaster, I can not use the aquired data due to 'null' being foreign, I tried to create a value for null, \n#but it was Ignored\n\n\n# In[16]:\n\n\ntelco_prep= telco.drop(columns=['senior_citizen',\n 'partner',\n 'dependents',\n 'internet_service_type_id',\n 'online_security',\n 'device_protection',\n 'tech_support',\n 'paperless_billing',\n 'payment_type_id'])\n\n\n# In[9]:\n\n\n#Dropping what doesn't seem necessary \n\n\n# In[17]:\n\n\ntelco_prep.head()\n\n\n# In[11]:\n\n\n#And it's time for the main project\n\n","repo_name":"emichaudiv/project_report","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"42879186076","text":"# Smoothing or blurring images\r\n# -common use is to remove noise\r\n# Homogeneous filter, Gaussian filter, Median filter, Bilateral filter\r\n# Homogeneous filter is the most simple filter,\r\n# each output pixel is the mean of its kernel neighbors\r\n# Libraries:\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport cv2 as cv\r\n# Read Image in BGR format-\r\nimg = cv.imread('Pictures/Lenna.png')\r\n# Convert color from BGR to RGB format-\r\nimg = cv.cvtColor(img, code=cv.COLOR_BGR2RGB)\r\n# Kernel 1/25 (5x5)\r\nk = np.ones(shape=(5, 5), dtype=np.float32)/25\r\n# Homogeneous filter = filter2D\r\ndst_img = cv.filter2D(img, -1, kernel=k)\r\n# LPF blurring, HPF edge detection\r\n# Averaging algorithm: blur method\r\nblur = cv.blur(img, (5, 5))\r\n# Gaussian filter is nothing but using different-weight-kernel in x and y\r\ngausssian = cv.GaussianBlur(img, (5, 5), 0) # used to remove high frequency noise\r\n# Median filter replace each pixel's value with median of its neighboring pixels\r\nmedian = cv.medianBlur(img, 5) # ksize = 5 can be odd except 1\r\n# used to dealing with \"salt and pepper\" noise\r\n# salt and pepper noise: impulse noise\r\n# caused by sharp and sudden disturbances in image signal\r\n# presents itself as sparsely occurring white and black pixels\r\n# Bilateral filter - keep image edge sharp + remove noises\r\nbilateral = cv.bilateralFilter(img, d=9, sigmaColor=75, sigmaSpace=75) # d = diameter around pixel\r\n\r\ntitles = ['Image', '2D Convolution', 'Blur', 'Gaussian', 'Median', 'Bilateral']\r\nimages = [img, dst_img, blur, gausssian, median, bilateral]\r\n\r\nfor i in range(6):\r\n plt.subplot(2, 3, i+1)\r\n plt.imshow(images[i], cmap='gray')\r\n plt.title(titles[i])\r\n plt.xticks([])\r\n plt.yticks([])\r\n\r\n\r\nplt.show()\r\n","repo_name":"Robiul-1304009/Open-CV-Basics","sub_path":"OpenCV_18.py","file_name":"OpenCV_18.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"36591421364","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport pathlib\nimport seaborn as sns\n\n\ndef load_obj(fname: pathlib.Path):\n data = pd.read_csv(fname, sep=' ', header=None, index_col=0, names=['h', 'x', 'y', 'z'])\n vertices = data.loc['v', :]\n vertices = vertices.reset_index()\n vertices = vertices.sort_values('x')\n return vertices\n\n\ndef load_points(fname: pathlib.Path):\n data = pd.read_csv(fname, header=None, names=['y', 'x', 'z', 'r'])\n data = data.sort_values('x')\n return data\n\n\ndef plot_data(data: pd.DataFrame):\n sns.scatterplot(data=data, x='idx', y='x', hue='origin')\n\n\nif __name__ == '__main__':\n fname = '/data/neural_collision_detection/data/neurons/AP120410_s1c1.obj'\n fname_pts = '/data/neural_collision_detection/data/neurons/AP120410_s1c1_balls.csv'\n verts = load_obj(fname)\n verts['origin'] = 'o'\n data = load_points(fname_pts)\n data['origin'] = 'p'\n data = pd.concat([verts, data], ignore_index=True)\n data['idx'] = np.arange(len(data))\n plot_data(data)\n plt.show()\n","repo_name":"PBLab/neural_collision_detection","sub_path":"src/ncd_post_process/check_data_rotation.py","file_name":"check_data_rotation.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"10990349628","text":"import turtle\r\nturtle.bgcolor(\"black\")\r\nturtle.pensize(2)\r\n\r\ndef curve():\r\n for i in range(200):\r\n turtle.right(1)\r\n turtle.forward(1)\r\n\r\nturtle.speed(0)\r\nturtle.color(\"pink\", \"white\")\r\n\r\nturtle.begin_fill()\r\nturtle.left(140)\r\nturtle.forward(111.65)\r\ncurve()\r\n\r\nturtle.left(120)\r\ncurve()\r\nturtle.forward(111.65)\r\nturtle.end_fill()\r\nturtle.hideturtle()\r\n\r\nimport turtle\r\nt = turtle.Turtle()\r\ns = turtle.Screen()\r\ns.bgcolor(\"black\")\r\nt.speed(3)\r\nt.color('white')\r\nstyle = ('times new roman',20,'bold')\r\nt.write('APRILIYANDA',font=style,align='left',move=True)\r\nt.hideturtle()\r\nturtle.bgcolor(\"black\")\r\nturtle.pensize(2)\r\n","repo_name":"Fauzanilhamdi/make-import-turtle","sub_path":"Untitled-2.py","file_name":"Untitled-2.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"70641264285","text":"import sys\nfrom os import path as p\nfrom pypostgres.util import SubbableStr, get_formattedNames\nfrom pypostgres.pypostgres import dbexecute, psql_command\nfrom pypostgres.config.config import Config\nfrom pypostgres.util import subproc\n\n\ndef set_geometry_from_latlon(tableName, schemaName, logger=None):\n ''' Set geometry col using columns latitude and longitude '''\n\n if logger: logger.info(f'Create geom column for {schemaName}.{tableName}')\n\n dbexecute(f\"create table {schemaName}_{tableName}_temp as \" \\\n + \"select *, geom = st_setsrid(st_makepoint(longitude, latitude), 4326) \" \\\n + \"from {schemaName}.{tableName};\")\n\n dbexecute(f\"drop table {schemaName}.{tableName};\")\n dbexecute(f\"alter table {schemaName}.{tableName}_temp rename to {schemaName}.{tableName};\")\n\n if logger: logger.info(f'Create geom column for {schemaName}.{tableName}: DONE')\n \n \ndef create_spatial_index(tableName, schemaName, indexMethod='gist', logger=None):\n ''' Create spatial index using 'geom' column '''\n\n if logger: logger.info(f\"Create spatial index on 'geom' using mehod '{indexMethod}' for table {schemaName}.{tableName}\")\n \n indexName = f'{schemaName}_{tableName}_{indexMethod}_idx' # note: indexes are always created in same schema as parent table\n dbexecute(f'drop index if exists {schemaName}.{indexName};')\n dbexecute(f'create index {indexName} on {schemaName}.{tableName} using {indexMethod} (geom);')\n \n if logger: logger.info(f\"Create spatial index on 'geom' using mehod '{indexMethod}' for table {schemaName}.{tableName}: DONE\")\n\n \ndef create_btree_index(colName, tableName, schemaName, lower=False, logger=None):\n ''' Create btree index on column '''\n\n if logger: logger.info(f\"Create btree index on {colName} in table {schemaName}.{tableName}. lower={lower}\")\n\n if lower:\n indexName = f'{schemaName}_{tableName}_{colName}_lower_btree_idx'\n else:\n indexName = f'{schemaName}_{tableName}_{colName}_btree_idx' \n\n dbexecute(f'drop index if exists {schemaName}.{indexName};')\n\n if lower:\n dbexecute(f'create index {indexName} on {schemaName}.{tableName} ((lower({colName})));')\n else:\n dbexecute(f'create index {indexName} on {schemaName}.{tableName} ({colName});')\n \n if logger: logger.info(f\"Create btree index on {colName} in table {schemaName}.{tableName}. lower={lower}: DONE\")\n \n\ndef append_csv(tableName, schemaName, csvfile, header=True, logger=None):\n ''' Append data in csvfile to table '''\n\n if logger: logger.info(f'Table {schemaName}.{tableName}: loading file {csvfile}')\n\n psql_client = Config.get('psql_client_string')\n \n if header:\n tailpipe = ' tail +2 | '\n else:\n tailpipe = ''\n \n # Load csvfile. Replace missing values (\"\") with null.\n copy_command = f\"cat {csvfile} | {tailpipe} sed --regexp-extended -e 's/\\\\\\\"\\\\\\\"/null/g' | \" + psql_client \\\n + rf''' --no-psqlrc --echo-all -c \"\\copy {schemaName}.{tableName} from stdin with (format csv, null 'null')\" '''\n\n res = subproc(copy_command)\n if res.returnVal != 0:\n if logger: logger.error(res.text, stack_info=True)\n print(res.text)\n sys.exit(1)\n\n if logger: logger.info(f'Table {schemaName}.{tableName}: loading file {csvfile}: DONE')\n\n \ndef create_table(tableName, schemaName, cols, logger=None):\n ''' Create table from list of tuples (colName, colType) '''\n\n if logger: logger.info(f'Creating table {schemaName}.{tableName}')\n\n # Column definitions string\n colDefs = [f'{x[0]} {x[1]}' for x in cols]\n colDefs_str = ', '.join(colDefs)\n\n # Drop and create table\n dbexecute(f'drop table if exists {schemaName}.{tableName};')\n dbexecute(f'create table {schemaName}.{tableName} ( {colDefs_str} );')\n\n\n# Create a table and set column names according to a csvfile header\ndef define_varchar_table_from_csv(csvfile, tableName, schemaName=None, logger=None):\n\n if not schemaName:\n schemaName = 'public'\n \n # Build column definition string from csv headerline\n colNames = get_formattedNames(csvfile)\n colDefs = [f'{name} varchar' for name in colNames]\n colDefs_str = ', '.join(colDefs)\n\n # Drop and create table\n dbexecute(f'create schema if not exists {schemaName};') \n dbexecute(f'drop table if exists {schemaName}.{tableName};') \n\n create_cmd = f'create table {schemaName}.{tableName} ( {colDefs_str} );'\n if logger: logger.info(f'Creating table with: {create_cmd}')\n dbexecute(create_cmd)\n\n\n# Append data in csvfile to table\ndef append_varchar_table_from_csv(csvfile, tableName, schemaName, skipHeader=True, logger=None):\n\n if skipHeader:\n copy_command = f\"\\\\copy {schemaName}.{tableName} from program 'tail -n +2 {csvfile}' with (format csv)\"\n else:\n copy_command = f\"\\\\copy {schemaName}.{tableName} from program 'cat {csvfile}' with (format csv)\" \n\n if logger: logger.info(f' Loading: {copy_command}')\n psql_command(copy_command)\n\n\n# Cast columns to a new table.\n# cols parameter is a list of tuples where each tuple contains three strings: column name, old type, new type.\n# Only columns that appear in cols are included in the new table.\n# To avoid casting a column, set old type and/or new type to None, or set both to the same value.\ndef cast_to_new_table(tableName, newTableName, schemaName, cols):\n\n selectStrings = []\n\n for col in cols:\n colName = col[0]\n oldType = col[1]\n newType = col[2]\n\n # build selected column strings for sql command, preserving column order\n\n if oldType == newType or oldType is None or newType is None:\n\n # no cast\n selectStrings.append(colName)\n\n else:\n\n # cast\n if oldType == 'varchar':\n selectStrings.append(f\"cast(nullif({colName}, '') as {newType})\")\n else:\n selectStrings.append(f'cast({colName} as {newType})')\n\n cast_cols_string = ', '.join(selectStrings)\n colNames_string = ', '.join([col[0] for col in cols])\n \n ##\n # Execute select/cast\n ##\n\n # drop and create table to receive casted columns\n dbexecute(f'drop table if exists {schemaName}.{newTableName};')\n\n newColumnDefs = ', '.join([f'{col[0]} {col[2]}' for col in cols])\n cmd = f'create table {schemaName}.{newTableName} ({newColumnDefs});'\n dbexecute(cmd)\n \n # Perform cast\n cmd = f'insert into {schemaName}.{newTableName} ({colNames_string}) ' \\\n + f'select {cast_cols_string} from {schemaName}.{tableName};'\n dbexecute(cmd) \n\n \n##\n# Notes\n##\n\n# '\\copy' example:\n#psql --no-psqlrc -c \"\\copy epa.eight_hour from '/mnt/scratch/epa/extract/8hour_42101/8hour_42101_1997.csv' delimiter ',' csv header\"\n\n# note: psql 'copy' requires superuser privs:\n#dbexecute(f\"COPY epa.{tableName} ({colNames_str}) \" +\n# f\"FROM '/mnt/scratch/epa/extract/8hour_42101/8hour_42101_1997.csv' DELIMITER ',' CSV HEADER;\")\n\n# In postgres 12+: geom can be a generated col:\n#dbexecute(f'create table \"epa.{tableName}\" ( ' + \\\n# f' {colDefs_str}, ' + \\\n# f' geom geometry(Point,4326) generated always as (ST_SetSRID(ST_MakePoint(longitude, latitude), 4326)) stored );')\n\n\n\n","repo_name":"cwillhu/pypostgres","sub_path":"pypostgres/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":7308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"23297949668","text":"from math import isqrt\r\ndef extended_euclidean(a,b):\r\n \"\"\"Returns gcd(a,b) along with integers s,t such that gcd(a,b)=as+bt using\r\n the extended euclidean algorithm.\"\"\"\r\n\r\n r_old, r = a,b\r\n s_old, s = 1,0\r\n t_old, t = 0,1\r\n while r > 0:\r\n q = r_old // r\r\n r_old, r = r, r_old%r\r\n s_old, s = s, s_old-s*q\r\n t_old, t = t, t_old-t*q\r\n return r_old, s_old, t_old\r\n\r\n\r\ndef modular_inverse(a,m):\r\n \"\"\"Returns an integer b such that a*b=1 mod m if gcd(a,m)=1\"\"\"\r\n g,s,t = extended_euclidean(a,m)\r\n if g != 1:\r\n raise ValueError('Input must be coprime')\r\n return s%m\r\n\r\ndef babysteps_giantsteps(g,h,p,N=None):\r\n \"\"\"Returns the discrete log of h with respect to the base g modulo p, where g has order N modulo p.\r\n \"\"\"\r\n if not N: N = p-1\r\n n = isqrt(N)+1\r\n # n = floor(sqrt(N)) + 1\r\n # make a dictionary {g**i%p:i} for fast lookup of the exponent\r\n babystep = 1\r\n babysteps_table = {}\r\n for i in range(n):\r\n babysteps_table[babystep] = i\r\n babystep = (babystep * g) % p\r\n g_inv = modular_inverse(g,p)\r\n g_n_inv = pow(g_inv, n, p)\r\n # giantsteps: h*g^(-jn) for j in [0..n-1]\r\n giantstep = h\r\n for j in range(n+1):\r\n if giantstep in babysteps_table:\r\n i = babysteps_table[giantstep]\r\n return i+n*j\r\n giantstep = (giantstep * g_n_inv) % p\r\n print(\"no solution!\")\r\n return None\r\n","repo_name":"Jordanwhattt/Crptography","sub_path":"github-classroom/MATH4176/pohlig-hellman-Jordanwhattt/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"27438766198","text":"\r\nimport pygame\r\nimport pyscroll\r\nimport pytmx\r\n\r\nfrom pygame.constants import K_DOWN, K_LEFT, K_RIGHT, K_SPACE, K_UP\r\nfrom pygame.locals import *\r\n\r\nfrom Menus.shop_and_end_screens import interwave_menu, menu_game_over, menu_victory\r\nfrom Entities.entities import *\r\nfrom Entities.lifebar import *\r\nfrom Entities.player import *\r\nfrom Entities.player_model import *\r\nfrom Entities.weapons import *\r\nfrom Entities.spritesheet import *\r\nfrom Settings.settings import *\r\nfrom SoundEffects.sounds import GameSoundEffects\r\nfrom Source.waves import *\r\n\r\n\r\nclass Game:\r\n '''\r\n Defines a game session, handling the screen and the different entities.\r\n '''\r\n def __init__(self):\r\n pygame.init()\r\n self.screen = pygame.display.set_mode((WIDTH, HEIGHT))\r\n pygame.display.set_caption(TITLE)\r\n self.clock = pygame.time.Clock()\r\n self.sound_effects = GameSoundEffects()\r\n\r\n self.tmx_data = pytmx.load_pygame(\"Textures/map_tiled.tmx\", pixelalpha=True)\r\n self.map_data = pyscroll.TiledMapData(self.tmx_data)\r\n self.map_layer = pyscroll.BufferedRenderer(\r\n self.map_data, (WIDTH, HEIGHT))\r\n self.map_layer.zoom = 1.2\r\n\r\n def new(self, difficulty=1):\r\n \"\"\"\r\n Creation of the entities.\r\n\r\n :param difficulty: the difficulty to apply to the enemies's stats\r\n \"\"\"\r\n self.inmenu = False\r\n self.all_sprites = pyscroll.PyscrollGroup(\r\n map_layer=self.map_layer)\r\n self.all_sprites_window = pygame.sprite.Group()\r\n self.difficulty = difficulty\r\n self.player = Player(self, (POS_PL_BASE))\r\n self.player_model = PlayerModel(self)\r\n self.wave = Wave(self)\r\n\r\n self.item_available = [True] * (len(cost) + 1) # displays whether or not a shop item has been bought\r\n\r\n self.enemy_lifebars_outline = {}\r\n self.enemy_lifebars = {}\r\n\r\n pygame.mixer.music.load(self.sound_effects.main_music)\r\n pygame.mixer.music.play(-1)\r\n\r\n self.enemies = self.wave.enemies()\r\n\r\n self.walls = [\r\n Wall(self, \"up\", 0, MAP_WIDTH, -TILESIZE // 2, TILESIZE // 2),\r\n Wall(self, \"down\", 0, MAP_WIDTH, MAP_HEIGHT -\r\n TILESIZE // 2, MAP_HEIGHT + TILESIZE // 2),\r\n Wall(self, \"left\", -TILESIZE // 2, TILESIZE // 2, 0, MAP_HEIGHT),\r\n Wall(self, \"right\", MAP_WIDTH - TILESIZE // 2,\r\n MAP_WIDTH + TILESIZE // 2, 0, MAP_HEIGHT),\r\n ]\r\n\r\n self.weapon = Weapon(self)\r\n\r\n self.lifebar_player_outline = PlayerLifebarOutline(self)\r\n self.lifebar_player = PlayerLifebar(self)\r\n\r\n def run(self):\r\n \"\"\"\r\n Main loop.\r\n \"\"\"\r\n self.running = True\r\n while self.running:\r\n self.events()\r\n self.update()\r\n self.draw()\r\n self.clock.tick(60)\r\n\r\n def events(self):\r\n \"\"\"\r\n Handles pygame events.\r\n \"\"\"\r\n for event in pygame.event.get():\r\n # Closing the window\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n return\r\n\r\n # Attacking with space bar\r\n if event.type == pygame.KEYDOWN and event.key == K_SPACE:\r\n self.player.attack()\r\n\r\n keys_pressed = pygame.key.get_pressed()\r\n if keys_pressed[K_ESCAPE]:\r\n pygame.quit()\r\n return\r\n\r\n # Moving the player if the arrows keys are pressed\r\n if keys_pressed[K_LEFT]:\r\n # update animation\r\n current_time = pygame.time.get_ticks()\r\n if (\r\n current_time - self.player_model.last_update\r\n > self.player_model.animation_cooldown\r\n ):\r\n self.player_model.frame = (\r\n self.player_model.frame + 1\r\n ) % self.player_model.animation_steps\r\n self.player_model.last_update = current_time\r\n self.player_model.image = self.player_model.animation_list_left[\r\n self.player_model.frame]\r\n\r\n if keys_pressed[K_DOWN]:\r\n self.player.move(-1, 1)\r\n elif keys_pressed[K_UP]:\r\n self.player.move(-1, -1)\r\n else:\r\n self.player.move(-1, 0)\r\n self.player.update_pl_facing(\"left\")\r\n\r\n if keys_pressed[K_RIGHT]:\r\n current_time = pygame.time.get_ticks()\r\n if (\r\n current_time - self.player_model.last_update\r\n > self.player_model.animation_cooldown\r\n ):\r\n self.player_model.frame = (\r\n self.player_model.frame + 1\r\n ) % self.player_model.animation_steps\r\n self.player_model.last_update = current_time\r\n self.player_model.image = self.player_model.animation_list_right[\r\n self.player_model.frame]\r\n\r\n if keys_pressed[K_DOWN]:\r\n self.player.move(1, 1)\r\n elif keys_pressed[K_UP]:\r\n self.player.move(1, -1)\r\n else:\r\n self.player.move(1, 0)\r\n self.player.update_pl_facing(\"right\")\r\n if (\r\n keys_pressed[K_UP]\r\n and not keys_pressed[K_RIGHT]\r\n and not keys_pressed[K_LEFT]\r\n ):\r\n self.player.move(0, -1)\r\n self.player.update_pl_facing(\"up\")\r\n current_time = pygame.time.get_ticks()\r\n if (\r\n current_time - self.player_model.last_update\r\n > self.player_model.animation_cooldown\r\n ):\r\n self.player_model.frame = (\r\n self.player_model.frame + 1\r\n ) % self.player_model.animation_steps\r\n self.player_model.last_update = current_time\r\n self.player_model.image = self.player_model.animation_list_up[\r\n self.player_model.frame]\r\n if (\r\n keys_pressed[K_DOWN]\r\n and not keys_pressed[K_RIGHT]\r\n and not keys_pressed[K_LEFT]\r\n ):\r\n self.player.move(0, 1)\r\n self.player.update_pl_facing(\"down\")\r\n current_time = pygame.time.get_ticks()\r\n if (\r\n current_time - self.player_model.last_update\r\n > self.player_model.animation_cooldown\r\n ):\r\n self.player_model.frame = (\r\n self.player_model.frame + 1\r\n ) % self.player_model.animation_steps\r\n self.player_model.last_update = current_time\r\n self.player_model.image = self.player_model.animation_list_down[\r\n self.player_model.frame]\r\n\r\n\r\n def update(self):\r\n \"\"\"\r\n Updates the entities' data.\r\n \"\"\"\r\n if self.inmenu:\r\n interwave_menu(self)\r\n\r\n if not self.inmenu:\r\n if not self.enemies:\r\n if not self.wave.wave_over:\r\n self.inmenu = True\r\n\r\n if (\r\n self.wave.wave_over\r\n and pygame.time.get_ticks() - self.wave_time > WAVE_COOLDOWN\r\n ):\r\n self.wave.next_wave()\r\n self.enemies = self.wave.enemies()\r\n self.wave.wave_over = False\r\n\r\n for enemy in self.enemies:\r\n current_time = pygame.time.get_ticks()\r\n if current_time - enemy.last_update > enemy.animation_cooldown:\r\n enemy.frame = (enemy.frame + 1) % enemy.animation_steps\r\n enemy.last_update = current_time\r\n enemy.image = enemy.animation_list[enemy.frame]\r\n if pygame.sprite.spritecollideany(self.player, [enemy]):\r\n enemy.knockbacks_enemy_player(self.player.kb)\r\n self.player.hurt(enemy.atk)\r\n if enemy.hp <= 0:\r\n self.player.add_gold(enemy.loot)\r\n self.player.add_score(enemy.loot)\r\n self.enemies.remove(enemy)\r\n self.enemy_lifebars_outline[enemy].kill()\r\n self.enemy_lifebars_outline.pop(enemy)\r\n self.enemy_lifebars[enemy].kill()\r\n self.enemy_lifebars.pop(enemy)\r\n enemy.kill()\r\n enemy.kill()\r\n pygame.mixer.Sound.play(self.sound_effects.enemy_dies)\r\n else:\r\n for wall in self.walls:\r\n if pygame.sprite.spritecollideany(enemy, [wall]):\r\n wall.knockbacks_enemy(enemy)\r\n enemy.update_enemy()\r\n for wall in self.walls:\r\n if pygame.sprite.spritecollideany(self.player, [wall]):\r\n wall.knockbacks_player()\r\n if self.weapon.animation and not self.weapon.playing_wall_tap and (\r\n pygame.sprite.spritecollideany(self.weapon, [wall])):\r\n pygame.mixer.Sound.play(self.sound_effects.sword_wall)\r\n self.weapon.playing_wall_tap = True\r\n self.weapon.update_weapon()\r\n self.player_model.update_pos()\r\n\r\n def draw(self):\r\n \"\"\"\r\n Displays the different sprites on the screen.\r\n \"\"\"\r\n self.all_sprites.center(self.player.rect.center)\r\n self.all_sprites.draw(self.screen)\r\n self.all_sprites_window.draw(self.screen)\r\n self.player.print_gold()\r\n self.player.print_score()\r\n pygame.display.update()\r\n\r\n def quit(self):\r\n \"\"\"\r\n Stops the program.\r\n \"\"\"\r\n pygame.quit()\r\n return\r\n\r\n def lost(self):\r\n \"\"\"\r\n Handles the defeat of a game by closing the window and printing a message.\r\n \"\"\"\r\n self.running = False\r\n pygame.mixer.music.stop()\r\n menu_game_over(self)\r\n\r\n def won(self):\r\n \"\"\"\r\n Handles the victory of a game by closing the window and printing a message.\r\n \"\"\"\r\n self.running = False\r\n pygame.mixer.music.stop()\r\n menu_victory(self)\r\n","repo_name":"Raptornythorink/Battle-Arena","sub_path":"Source/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":10075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"34671729724","text":"import json\nimport os\nfrom json.decoder import JSONDecodeError\nfrom pprint import pprint\n\n\ndef load_json(file):\n try:\n data = json.loads(open(file, encoding='utf-8').read())\n except JSONDecodeError:\n raise (\n BaseException(\"Error while parsing file \\\"\" + file + \"\\\". See JSONDecodeError above for details.\"))\n return data\n\n\ndef write_json(data: dict, path: str):\n with open(path, 'w') as outfile:\n json.dump(data, outfile)\n\n\ndef find_class_labels(training_data):\n labels = set()\n dataset_size = len(training_data)\n for i in range(dataset_size):\n label = training_data[str(i)]['intent']\n labels.add(label)\n return labels\n\n\ndef find_slot_labels(training_data, _class_labels):\n dataset_size = len(training_data)\n _slot_labels = {}\n for class_label in _class_labels:\n _slot_labels[class_label] = set()\n\n for i in range(dataset_size):\n labels = training_data[str(i)]['slots']\n class_label = training_data[str(i)]['intent']\n for label in labels.keys():\n _slot_labels[str(class_label)].add(str(label))\n return _slot_labels\n\n\ndef split_data(data, split_ratio):\n data_size = len(data)\n samples1 = int(round(data_size * split_ratio))\n samples2 = data_size - samples1\n\n split1 = {}\n split2 = {}\n for i in range(samples1):\n split1[str(i)] = data[str(i)]\n\n for i in range(samples2):\n split2[str(i)] = data[str(i + samples1)]\n\n return split1, split2\n\n\nif __name__ == '__main__':\n train = load_json(os.path.join(\"data\", \"orig\", \"train.json\"))\n class_labels = find_class_labels(train)\n # slot_labels = find_slot_labels(train, class_labels)\n pprint(class_labels)\n # pprint(slot_labels)\n\n # Train Test split\n # train, dev = split_data(train, 0.8)\n # write_json(train, os.path.join(\"data\", \"train.json\"))\n # write_json(train, os.path.join(\"data\", \"dev.json\"))\n","repo_name":"Alex25i/DeepLearningNLU","sub_path":"data_handler.py","file_name":"data_handler.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"34053468129","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.autograd as autograd\nimport torch.optim as optim\nimport torchvision\nimport numpy as np\nimport pdb\nfrom utils import *\n\n\n# --------------------------------------------------------------------------\n# Core Models \n# --------------------------------------------------------------------------\n\nclass VAE(nn.Module):\n def __init__(self, args):\n super(VAE, self).__init__()\n self.args = args\n\n # Encoder architecture\n mult = 1\n nz = args.z_dim * mult\n nc = 3 if args.no_polar else 2\n ndf = 64\n lf = (1,32)\n self.encoder_conv2d_1 = nn.Sequential(nn.Conv2d(nc, ndf, 4, 2, 1, bias=False))\n self.encoder_leakyrelu_2 = nn.Sequential(nn.LeakyReLU(0.2, inplace=True))\n self.encoder_conv2d_3 = nn.Sequential(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False))\n\n self.encoder_batchnorm2d_4 = nn.Sequential(nn.BatchNorm2d(ndf * 2))\n self.encoder_leakyrelu_5 = nn.Sequential(nn.LeakyReLU(0.2, inplace=True))\n self.encoder_conv2d_6 = nn.Sequential(nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False))\n\n self.encoder_batchnorm2d_7 = nn.Sequential(nn.BatchNorm2d(ndf * 4))\n self.encoder_leakyrelu_8 = nn.Sequential(nn.LeakyReLU(0.2, inplace=True))\n self.encoder_conv2d_9 = nn.Sequential(nn.Conv2d(ndf * 4, ndf * 8, (3,4), 2, (0,1), bias=False))\n\n self.encoder_batchnorm2d_10 = nn.Sequential(nn.BatchNorm2d(ndf * 8))\n self.encoder_leakyrelu_11 = nn.Sequential(nn.LeakyReLU(0.2, inplace=True))\n\n self.encoder_conv2d_12 = nn.Sequential(nn.Conv2d(ndf * 8, nz, lf, 1, 0, bias=False))\n\n # Decoder architecture\n ngf=64\n base=4\n ff=(1,32)\n nz=args.z_dim\n nc=2\n self.decoder_convtranspose2d_13 = nn.Sequential(nn.ConvTranspose2d(nz, ngf * 8, ff, 1, 0, bias=False))\n self.decoder_batchnorm2d_14 = nn.Sequential(nn.BatchNorm2d(ngf * 8))\n self.decoder_relu_15 = nn.Sequential(nn.ReLU(True))\n\n\n self.decoder_convtranspose2d_16 = nn.Sequential(nn.ConvTranspose2d(ngf * 8, ngf * 4, (4,4), stride=2, padding=(0,1), bias=False))\n self.decoder_batchnorm2d_17 = nn.Sequential(nn.BatchNorm2d(ngf * 4))\n self.decoder_relu_18 = nn.Sequential(nn.ReLU(True))\n\n self.decoder_convtranspose2d_19 = nn.Sequential(nn.ConvTranspose2d(ngf * 4, ngf * 2, (4,4), stride=2, padding=(1,1), bias=False))\n self.decoder_batchnorm2d_20 = nn.Sequential(nn.BatchNorm2d(ngf * 2))\n self.decoder_relu_21 = nn.Sequential(nn.ReLU(True))\n\n\n self.decoder_convtranspose2d_22 = nn.Sequential(nn.ConvTranspose2d(ngf * 2, ngf * 1, (4,4), stride=2, padding=(1,1), bias=False))\n self.decoder_batchnorm2d_23 = nn.Sequential(nn.BatchNorm2d(ngf * 1))\n self.decoder_relu_24 = nn.Sequential(nn.ReLU(True))\n\n\n self.decoder_convtranspose2d_25 = nn.Sequential(nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False))\n self.tanh_26 = nn.Sequential(nn.Tanh())\n\n\n def forward(self, x):\n # Encoder network forward pass # Input size : (3,29,512)\n x1 = self.encoder_conv2d_1(x) # Size start : (64,14,256)\n x2 = self.encoder_leakyrelu_2(x1)\n x3 = self.encoder_conv2d_3(x2) # Downsample : (128,7,128)\n\n x4 = self.encoder_batchnorm2d_4(x3)\n x5 = self.encoder_leakyrelu_5(x4)\n x6 = self.encoder_conv2d_6(x5) # Downsample : (256,3,64)\n\n x7 = self.encoder_batchnorm2d_7(x6)\n x8 = self.encoder_leakyrelu_8(x7)\n x9 = self.encoder_conv2d_9(x8) # Downsample : (512,1,32)\n\n x10 = self.encoder_batchnorm2d_10(x9)\n x11 = self.encoder_leakyrelu_11(x10)\n\n x12 = self.encoder_conv2d_12(x11) # Hidden size : (Z_dim,1,1)\n z = x12\n\n while z.dim() != 2: \n z = z.squeeze(-1) \n\n # Decoder network forward pass\n x13 = self.decoder_convtranspose2d_13(x12) # Size restart : (512,1,32)\n x14 = self.decoder_batchnorm2d_14(x13)\n x15 = self.decoder_relu_15(x14)\n\n x16 = self.decoder_convtranspose2d_16(x15) # Upsample : (256,4,64)\n x17 = self.decoder_batchnorm2d_17(x16)\n x18 = self.decoder_relu_18(x17)\n\n x19 = self.decoder_convtranspose2d_19(x18) # Upsample : (128,8,128)\n x20 = self.decoder_batchnorm2d_20(x19)\n x21 = self.decoder_relu_21(x20)\n\n x22 = self.decoder_convtranspose2d_22(x21) # Upsample : (64,16,256)\n x23 = self.decoder_batchnorm2d_23(x22)\n x24 = self.decoder_relu_24(x23)\n\n x25 = self.decoder_convtranspose2d_25(x24) # Output size : (2,32,512)\n x26 = self.tanh_26(x25)\n recon = x26\n\n return recon, None, z\n\n\n\n\n\n def sample(self, nb_samples=16, tmp=1):\n noise = torch.cuda.FloatTensor(nb_samples, self.args.z_dim).normal_(0, tmp)\n return self.decode(noise)\n\n @staticmethod\n def gaussian_kl(mu, logvar):\n return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=-1)\n \n @staticmethod\n def log_gauss(z, params):\n [mu, std] = params\n return - 0.5 * (t.pow(z - mu, 2) * t.pow(std + 1e-8, -2) + 2 * t.log(std + 1e-8) + math.log(2 * math.pi)).sum(1) \n\n\n# --------------------------------------------------------------------------\n# Baseline (AtlasNet), taken from https://github.com/ThibaultGROUEIX/AtlasNet\n# --------------------------------------------------------------------------\nclass PointNetfeat_(nn.Module):\n def __init__(self, num_points = 40 * 256, global_feat = True):\n super(PointNetfeat_, self).__init__()\n self.conv1 = torch.nn.Conv1d(3, 64, 1)\n self.conv2 = torch.nn.Conv1d(64, 128, 1)\n self.conv3 = torch.nn.Conv1d(128, 1024, 1)\n\n self.bn1 = torch.nn.BatchNorm1d(64)\n self.bn2 = torch.nn.BatchNorm1d(128)\n self.bn3 = torch.nn.BatchNorm1d(1024)\n\n #self.mp1 = torch.nn.MaxPool1d(num_points)\n self.num_points = num_points\n self.global_feat = global_feat\n def forward(self, x):\n batchsize = x.size()[0]\n \n x = F.relu(self.bn1(self.conv1(x)))\n pointfeat = x\n x = F.relu(self.bn2(self.conv2(x)))\n x = self.bn3(self.conv3(x))\n x,_ = torch.max(x, 2)\n x = x.view(-1, 1024)\n return x\n\n\nclass PointGenCon(nn.Module):\n def __init__(self, bottleneck_size = 128):\n self.bottleneck_size = bottleneck_size\n super(PointGenCon, self).__init__()\n self.conv1 = torch.nn.Conv1d(self.bottleneck_size, self.bottleneck_size, 1)\n self.conv2 = torch.nn.Conv1d(self.bottleneck_size, self.bottleneck_size // 2, 1)\n self.conv3 = torch.nn.Conv1d(self.bottleneck_size // 2, self.bottleneck_size // 4, 1)\n self.conv4 = torch.nn.Conv1d(self.bottleneck_size // 4, 3, 1)\n\n self.th = nn.Tanh()\n self.bn1 = torch.nn.BatchNorm1d(self.bottleneck_size)\n self.bn2 = torch.nn.BatchNorm1d(self.bottleneck_size // 2)\n self.bn3 = torch.nn.BatchNorm1d(self.bottleneck_size // 4)\n\n def forward(self, x):\n batchsize = x.size()[0]\n \n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.relu(self.bn3(self.conv3(x)))\n x = self.th(self.conv4(x))\n return x\n\n\nclass AE_AtlasNet(nn.Module):\n def __init__(self, num_points = 40 * 256, bottleneck_size = 1024, nb_primitives = 2, AE=True):\n super(AE_AtlasNet, self).__init__()\n bot_enc = bottleneck_size if AE else 2 * bottleneck_size\n self.num_points = num_points\n self.bottleneck_size = bottleneck_size\n self.nb_primitives = nb_primitives\n self.encoder = nn.Sequential(\n PointNetfeat_(num_points, global_feat=True),\n nn.Linear(1024, bot_enc),\n nn.BatchNorm1d( bot_enc),\n nn.ReLU()\n )\n self.decoder = nn.ModuleList([PointGenCon(bottleneck_size = 2 + self.bottleneck_size) for i in range(0,self.nb_primitives)])\n\n\n def encode(self, x):\n if x.dim() == 4 : \n if x.size(1) != 3: \n assert x.size(-1) == 3 \n x = x.permute(0, 3, 1, 2).contiguous()\n x = x.reshape(x.size(0), 3, -1)\n else: \n if x.size(1) != 3: \n assert x.size(-1) == 3 \n x = x.transpose(-1, -2).contiguous()\n \n x = self.encoder(x)\n return x\n\n def decode(self, x):\n outs = []\n for i in range(0,self.nb_primitives):\n rand_grid = (torch.cuda.FloatTensor(x.size(0),2,self.num_points // self.nb_primitives))\n rand_grid.data.uniform_(0,1)\n y = x.unsqueeze(2).expand(x.size(0),x.size(1), rand_grid.size(2)).contiguous()\n y = torch.cat( (rand_grid, y), 1).contiguous()\n outs.append(self.decoder[i](y))\n return torch.cat(outs,2).contiguous().transpose(2,1).contiguous()\n\n\n\nif __name__ == '__main__':\n points = torch.cuda.FloatTensor(10, 3, 40, 256).normal_()\n AE = AE_AtlasNet(num_points = 40 * 256).cuda()\n out = AE(points)\n loss = get_chamfer_dist()(points, out)\n x =1\n\n\n# --------------------------------------------------------------------------\n# Baseline (Panos's paper)\n# --------------------------------------------------------------------------\nclass PointGenPSG2(nn.Module):\n def __init__(self, nz=100, num_points = 40 * 256):\n super(PointGenPSG2, self).__init__()\n self.num_points = num_points\n self.fc1 = nn.Linear(nz, 256)\n self.fc2 = nn.Linear(256, 512)\n self.fc3 = nn.Linear(512, 1024)\n self.fc4 = nn.Linear(1024, self.num_points * 3 // 2)\n\n self.fc11 = nn.Linear(nz, 256)\n self.fc21 = nn.Linear(256, 512)\n self.fc31 = nn.Linear(512, 1024)\n self.fc41 = nn.Linear(1024, self.num_points * 3 // 2)\n self.th = nn.Tanh()\n self.nz = nz\n \n \n def forward(self, x):\n batchsize = x.size()[0]\n \n x1 = x\n x2 = x\n x1 = F.relu(self.fc1(x1))\n x1 = F.relu(self.fc2(x1))\n x1 = F.relu(self.fc3(x1))\n x1 = self.th(self.fc4(x1))\n x1 = x1.view(batchsize, 3, -1)\n\n x2 = F.relu(self.fc11(x2))\n x2 = F.relu(self.fc21(x2))\n x2 = F.relu(self.fc31(x2))\n x2 = self.th(self.fc41(x2))\n x2 = x2.view(batchsize, 3, -1)\n\n return torch.cat([x1, x2], 2)\n\n","repo_name":"sabyasachis/dslr_backup","sub_path":"static_reconstruction_method/models_symm_default.py","file_name":"models_symm_default.py","file_ext":"py","file_size_in_byte":10562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"31012436579","text":"import random\nimport cv2\nimport numpy as np\nimport paddle\nimport numbers\nimport collections\nfrom PIL import Image,ImageFilter\nimport paddle.vision.transforms.functional as F\nfrom paddle.vision import transforms\n\nfrom ..builder import TRANSFORM\n\ndef to_tensor(data):\n \"\"\"Convert objects of various python types to :obj:`torch.Tensor`.\n Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,\n :class:`Sequence`, :class:`int` and :class:`float`.\n Args:\n data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to\n be converted.\n \"\"\"\n\n if isinstance(data, paddle.Tensor):\n return data\n elif isinstance(data, np.ndarray):\n return paddle.to_tensor(data)\n elif isinstance(data, int):\n return paddle.to_tensor([data])\n elif isinstance(data, float):\n return paddle.to_tensor([data])\n else:\n raise TypeError(f'type {type(data)} cannot be converted to tensor.')\n\n\n@TRANSFORM.register()\nclass ToTensor(object):\n \"\"\"Convert some results to :obj:`torch.Tensor` by given keys.\n Args:\n keys (Sequence[str]): Keys that need to be converted to Tensor.\n collect_keys (Sequence[str]): Keys that need to keep, but not to Tensor.\n \"\"\"\n\n def __init__(self, keys=['img', 'mask'], collect_keys=[], cfg=None):\n self.keys = keys\n self.collect_keys = collect_keys\n\n def __call__(self, sample):\n data = {}\n if len(sample['img'].shape) < 3:\n sample['img'] = np.expand_dims(sample['img'], -1)\n for key in sample.keys():\n if key in self.keys:\n data[key] = paddle.to_tensor(sample[key])\n if data[key].dtype == paddle.uint8:\n data[key] = data[key].astype(paddle.int64)\n if key in self.collect_keys:\n data[key] = sample[key]\n data['img'] = data['img'].transpose((2, 0, 1))\n return data\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self.keys})'\n\n\n@TRANSFORM.register()\nclass RandomLROffsetLABEL(object):\n def __init__(self,max_offset, cfg=None):\n self.max_offset = max_offset\n def __call__(self, sample):\n img = sample['img'] \n label = sample['mask'] \n offset = np.random.randint(-self.max_offset,self.max_offset)\n h, w = img.shape[:2]\n\n img = np.array(img)\n if offset > 0:\n img[:,offset:,:] = img[:,0:w-offset,:]\n img[:,:offset,:] = 0\n if offset < 0:\n real_offset = -offset\n img[:,0:w-real_offset,:] = img[:,real_offset:,:]\n img[:,w-real_offset:,:] = 0\n\n label = np.array(label)\n if offset > 0:\n label[:,offset:] = label[:,0:w-offset]\n label[:,:offset] = 0\n if offset < 0:\n offset = -offset\n label[:,0:w-offset] = label[:,offset:]\n label[:,w-offset:] = 0\n sample['img'] = img\n sample['mask'] = label\n \n return sample \n\n@TRANSFORM.register()\nclass RandomUDoffsetLABEL(object):\n def __init__(self,max_offset, cfg=None):\n self.max_offset = max_offset\n def __call__(self, sample):\n img = sample['img'] \n label = sample['mask'] \n offset = np.random.randint(-self.max_offset,self.max_offset)\n h, w = img.shape[:2]\n\n img = np.array(img)\n if offset > 0:\n img[offset:,:,:] = img[0:h-offset,:,:]\n img[:offset,:,:] = 0\n if offset < 0:\n real_offset = -offset\n img[0:h-real_offset,:,:] = img[real_offset:,:,:]\n img[h-real_offset:,:,:] = 0\n\n label = np.array(label)\n if offset > 0:\n label[offset:,:] = label[0:h-offset,:]\n label[:offset,:] = 0\n if offset < 0:\n offset = -offset\n label[0:h-offset,:] = label[offset:,:]\n label[h-offset:,:] = 0\n sample['img'] = img\n sample['mask'] = label\n return sample \n\n@TRANSFORM.register()\nclass Resize(object):\n def __init__(self, size, cfg=None):\n assert (isinstance(size, collections.Iterable) and len(size) == 2)\n self.size = size\n\n def __call__(self, sample):\n out = list()\n sample['img'] = cv2.resize(sample['img'], self.size,\n interpolation=cv2.INTER_CUBIC)\n if 'mask' in sample:\n sample['mask'] = cv2.resize(sample['mask'], self.size,\n interpolation=cv2.INTER_NEAREST)\n return sample\n\n\n@TRANSFORM.register()\nclass RandomCrop(object):\n def __init__(self, size, cfg=None):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n def __call__(self, img_group):\n h, w = img_group[0].shape[0:2]\n th, tw = self.size\n\n out_images = list()\n h1 = random.randint(0, max(0, h - th))\n w1 = random.randint(0, max(0, w - tw))\n h2 = min(h1 + th, h)\n w2 = min(w1 + tw, w)\n\n for img in img_group:\n assert (img.shape[0] == h and img.shape[1] == w)\n out_images.append(img[h1:h2, w1:w2, ...])\n return out_images\n\n\n@TRANSFORM.register()\nclass CenterCrop(object):\n def __init__(self, size, cfg=None):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n def __call__(self, img_group):\n h, w = img_group[0].shape[0:2]\n th, tw = self.size\n\n out_images = list()\n h1 = max(0, int((h - th) / 2))\n w1 = max(0, int((w - tw) / 2))\n h2 = min(h1 + th, h)\n w2 = min(w1 + tw, w)\n\n for img in img_group:\n assert (img.shape[0] == h and img.shape[1] == w)\n out_images.append(img[h1:h2, w1:w2, ...])\n return out_images\n\n@TRANSFORM.register()\nclass RandomRotation(object):\n def __init__(self, degree=(-10, 10), interpolation=(cv2.INTER_LINEAR, cv2.INTER_NEAREST), padding=None, cfg=None):\n self.degree = degree\n self.interpolation = interpolation\n self.padding = padding\n if self.padding is None:\n self.padding = [0, 0]\n\n def _rotate_img(self, sample, map_matrix):\n h, w = sample['img'].shape[0:2]\n sample['img'] = cv2.warpAffine(\n sample['img'], map_matrix, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=self.padding)\n\n def _rotate_mask(self, sample, map_matrix):\n if 'mask' not in sample:\n return\n h, w = sample['mask'].shape[0:2]\n sample['mask'] = cv2.warpAffine(\n sample['mask'], map_matrix, (w, h), flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=self.padding)\n\n\n def __call__(self, sample):\n v = random.random()\n if v < 0.5:\n degree = random.uniform(self.degree[0], self.degree[1])\n h, w = sample['img'].shape[0:2]\n center = (w / 2, h / 2)\n map_matrix = cv2.getRotationMatrix2D(center, degree, 1.0)\n self._rotate_img(sample, map_matrix)\n self._rotate_mask(sample, map_matrix)\n return sample\n\n\n@TRANSFORM.register()\nclass RandomBlur(object):\n def __init__(self, applied, cfg=None):\n self.applied = applied\n\n def __call__(self, img_group):\n assert (len(self.applied) == len(img_group))\n v = random.random()\n if v < 0.5:\n out_images = []\n for img, a in zip(img_group, self.applied):\n if a:\n img = cv2.GaussianBlur(\n img, (5, 5), random.uniform(1e-6, 0.6))\n out_images.append(img)\n if len(img.shape) > len(out_images[-1].shape):\n out_images[-1] = out_images[-1][...,\n np.newaxis] # single channel image\n return out_images\n else:\n return img_group\n\n\n@TRANSFORM.register()\nclass RandomHorizontalFlip(object):\n \"\"\"Randomly horizontally flips the given numpy Image with a probability of 0.5\n \"\"\"\n\n def __init__(self, cfg=None):\n pass\n\n def __call__(self, sample):\n v = random.random()\n if v < 0.5:\n sample['img'] = np.fliplr(sample['img'])\n if 'mask' in sample: sample['mask'] = np.fliplr(sample['mask'])\n return sample\n\n\n@TRANSFORM.register()\nclass Normalize(object):\n def __init__(self, img_norm, cfg=None):\n self.mean = np.array(img_norm['mean'], dtype=np.float32)\n self.std = np.array(img_norm['std'], dtype=np.float32)\n\n def __call__(self, sample):\n m = self.mean\n s = self.std\n img = sample['img'] \n img = img.astype(np.float32, copy=False) / 255.0\n if len(m) == 1:\n img = img - np.array(m) # single channel image\n img = img / np.array(s)\n else:\n img = img - np.array(m)[np.newaxis, np.newaxis, ...]\n img = img / np.array(s)[np.newaxis, np.newaxis, ...]\n sample['img'] = img\n\n return sample \n\n\n@TRANSFORM.register()\nclass Colorjitters(transforms.ColorJitter):\n def __init__(self,cfg = None,**kwargs):\n super().__init__(**kwargs)\n\n def _apply_image(self, sample):\n \"\"\"\n Args:\n img (PIL Image): Input image.\n\n Returns:\n PIL Image: Color jittered image.\n \"\"\"\n transform = self._get_param(\n self.brightness, self.contrast, self.saturation, self.hue\n )\n sample['img'] = transform(sample['img'])\n return sample\n\n\n@TRANSFORM.register()\nclass RandomErasings(transforms.RandomErasing):\n def __init__(self,cfg = None,**kwargs):\n super().__init__(**kwargs)\n def _apply_image(self, sample):\n sample['img'] = super()._apply_image(sample['img'])\n return sample\n\n@TRANSFORM.register()\nclass GaussianBlur(object):\n \"\"\"\n Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709\n Adapted from MoCo:\n https://github.com/facebookresearch/moco/blob/master/moco/loader.py\n Note that this implementation does not seem to be exactly the same as\n described in SimCLR.\n \"\"\"\n\n def __init__(self, kernel_size,sigma=[0.1, 2.0],cfg =None):\n self.sigma = sigma\n if isinstance(kernel_size,int):\n self.kernel_size = (kernel_size,kernel_size)\n else:\n self.kernel_size = kernel_size\n\n def __call__(self, sample):\n img = sample['img']\n # sigma = random.uniform(self.sigma[0], self.sigma[1])\n img = cv2.GaussianBlur(img,ksize=self.kernel_size,sigmaX=self.sigma[0],sigmaY=self.sigma[1])\n sample['img'] = img\n return sample\n\n@TRANSFORM.register()\nclass RandomGrayScale(object):\n def __init__(self,p,cfg = None):\n self.p = p\n \n def __call__(self,sample):\n img = sample['img']\n v = random.random()\n H,W,C = img.shape\n if v < self.p:\n img = transforms.to_grayscale(img,num_output_channels=C)\n sample['img'] = img\n\n return sample \n \ndef CLRTransforms(img_h, img_w):\n return [\n dict(name='Resize',\n parameters=dict(size=dict(height=img_h, width=img_w)),\n p=1.0),\n dict(name='HorizontalFlip', parameters=dict(p=1.0), p=0.5),\n dict(name='Affine',\n parameters=dict(translate_percent=dict(x=(-0.1, 0.1),\n y=(-0.1, 0.1)),\n rotate=(-10, 10),\n scale=(0.8, 1.2)),\n p=0.7),\n dict(name='Resize',\n parameters=dict(size=dict(height=img_h, width=img_w)),\n p=1.0),\n ]\n","repo_name":"zkyseu/PPlanedet","sub_path":"pplanedet/datasets/preprocess/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":11799,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"86"} +{"seq_id":"12631268519","text":"#!/usr/bin/python\nfrom PyQt5.QtWidgets import QApplication\nfrom fenetreMDI_e import FenetreMDI\nimport sys\n\n# ===================================================================\n# Programme principal\n# ===================================================================\n\nif __name__ == \"__main__\":\n # Vérification de la syntaxe d'appel\n syntaxe =\"Syntaxe: \"+sys.argv[0]\n if(len(sys.argv) != 1):\n print(syntaxe)\n exit\n \n app=QApplication([])\n mf=FenetreMDI()\n app.exec_()","repo_name":"mwenger9/INSA_S5","sub_path":"FUS/Script/TP2_script/principalMDI_e.py","file_name":"principalMDI_e.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"22170393186","text":"import csv\nimport glob\nimport math\n\nimport numpy as np\nimport pandas as pd\nimport scipy.stats\nfrom scipy.integrate import quad\n\nimport Utilities\n\n\nclass Bands:\n def __init__(self, index, lower_bound, upper_bound, mid_point):\n self.index = index\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n self.mid_point = mid_point\n\n\ndef gaussian_bands(resolution):\n mean = 0\n std = 0.25\n x_min = -1.0\n x_max = 1.0\n x = np.linspace(x_min, x_max, 100)\n\n def normal_distribution_function(x):\n value = scipy.stats.norm.pdf(x, mean, std)\n return value\n\n total_area, err = quad(normal_distribution_function, x_min, x_max)\n total_area = round(total_area, 5)\n list_bands = list()\n upper_bound = 1\n index = 1\n for i in range(1, 2 * resolution + 1):\n x1 = (i - resolution - 1) / resolution\n x2 = (i - resolution) / resolution\n area, err = quad(normal_distribution_function, x1, x2)\n area = round(area, 5)\n length = round(2.0 * (area / total_area), 5)\n\n lower_bound = round(upper_bound - length, 5)\n upper_bound = round(upper_bound, 5)\n mid_point = round(upper_bound - ((upper_bound - lower_bound) / 2), 5)\n band = Bands(index, lower_bound, upper_bound, mid_point)\n upper_bound = upper_bound - length\n list_bands.append(band)\n index += 1\n return list_bands\n\n\ndef quantization(df, bands):\n mid_point = df.copy()\n for band in bands:\n df.mask((df >= band.lower_bound) & (df < band.upper_bound), band.index, inplace=True)\n df.loc[:] = df.astype(int)\n for band in bands:\n mid_point.mask((mid_point >= band.lower_bound) & (mid_point < band.upper_bound), band.mid_point, inplace=True)\n return df, mid_point\n\n\ndef normalizeSensorWise(df):\n df_norm = df.subtract(df.min(axis=1), axis=0).multiply(2) \\\n .divide(df.max(axis=1) - df.min(axis=1), axis=0).subtract(1).combine_first(df)\n return df_norm\n\n\ndef create_word_dictionary(avg_std_df, mid_point_df, quantized_data, directory, window_length, shift_length):\n folder_name = directory.split(\"\\\\\")[-1]\n word_list = list()\n for index, row in quantized_data.iterrows():\n for i in range(0, quantized_data.shape[1], shift_length):\n if i + window_length < quantized_data.shape[1]:\n avg_q = mid_point_df.loc[index][i:i + window_length].tolist()\n win = row[i:i + window_length].tolist()\n pair = [folder_name, index + 1, ' '.join(map(str, win)), i, avg_std_df.iloc[index]['avg'],\n avg_std_df.iloc[index]['std'], np.mean(avg_q)]\n word_list.append(pair)\n return word_list\n\n\ndef avg_std(df):\n avg = df.apply(np.mean, axis=0)\n std = df.apply(np.std, axis=0)\n return avg, std\n\n\ndef calculate_avg_std_sensor_wise(df):\n tran_df = pd.DataFrame(df.T)\n avg_temp, std_temp = avg_std(tran_df)\n avg_amplitude = avg_temp.to_frame('avg')\n std_deviation = std_temp.to_frame('std')\n amp_std = pd.concat([avg_amplitude, std_deviation], axis=1)\n return amp_std\n\n\ndef read_gestures_from_csv(all_files, directory, shift_length, window_length, bands, word_dict):\n for file_ in all_files:\n file_name = file_.split(\"\\\\\")[-1].split(\".\")[0]\n if file_name not in word_dict.keys():\n word_dict[file_name] = list()\n df = pd.read_csv(file_, header=None)\n column_names = [x for x in range(1, df.shape[1])]\n df = pd.DataFrame(df, columns=column_names)\n avg_std_df = calculate_avg_std_sensor_wise(df)\n df_norm = normalizeSensorWise(df)\n quantized_data, mid_point_df = quantization(df_norm.copy(), bands)\n word_dict[file_name].extend(create_word_dictionary(avg_std_df, mid_point_df, quantized_data, directory,\n window_length, shift_length))\n\n\ndef task0a(folder_directory, window_length, shift_length, resolution):\n dirs = Utilities.get_all_sub_folders(folder_directory)\n print(\"Building Gaussian Bands...\")\n bands = gaussian_bands(resolution)\n print(\"Done!\")\n word_dict = dict()\n for folder in dirs:\n print(\"Processing for Folder \", folder, \"...\")\n all_files = glob.glob(folder_directory + '\\\\' + folder + \"/*.csv\")\n file_directory = folder_directory + '\\\\' + folder\n read_gestures_from_csv(all_files, file_directory, shift_length, window_length, bands, word_dict)\n print(\"Done!\")\n\n for key, value in word_dict.items():\n word_file = folder_directory + '\\\\' + str(key) + '.wrd'\n with open(word_file, 'w', newline=\"\") as f:\n csv.writer(f, delimiter=',').writerows(value)\n f.close()\n print(\" ****Created dictionaries(.wrd) for all the gesture files****\")\n\n\ndef parse_and_store_file_data(file_dict, file, all_words, vectors):\n file_name = file.split(\"\\\\\")[-1].split(\".\")[0]\n with open(file, 'r') as f:\n if file_name not in file_dict.keys():\n file_dict[file_name] = all_words.copy()\n for line in f:\n row = line.strip().split(',')\n word = ' '.join(map(str, row[0:3]))\n if word in file_dict[file_name].keys():\n file_dict[file_name][word] += 1\n vector = dict()\n vector[\"file\"] = file_name\n vector.update(file_dict[file_name])\n vectors.append(vector)\n\n\ndef fill_word_dictionary(directory, all_words):\n all_files = glob.glob(directory + \"/*.wrd\")\n file_dict = dict()\n vectors = list()\n for file in all_files:\n parse_and_store_file_data(file_dict, file, all_words, vectors)\n df = pd.DataFrame(vectors)\n return file_dict, df\n\n\ndef calculations(directory, data_dict, data_df, all_words):\n # to keep track of the unique words\n with open(directory + '/header.txt', 'w', newline=\"\") as f:\n csv.writer(f).writerow(list(all_words.keys()))\n f.close()\n\n tf = all_words.copy()\n tf_idf = all_words.copy()\n\n for file_name, words_dict in data_dict.items():\n tf_vector = list()\n tf_idf_vector = list()\n total_words = sum(words_dict.values())\n\n num_of_words_in_gesture = data_df.astype(bool).sum(axis=0)\n for word, count in words_dict.items():\n tf[word] = count / total_words\n\n d_idf = float(num_of_words_in_gesture[word])\n tf_idf[word] = float(tf[word]) * (math.log10(len(data_df.columns) / d_idf)) if d_idf > 0.0 else 0.0\n\n tf_vector.append(tf.values())\n tf_idf_vector.append(tf_idf.values())\n\n tf = dict.fromkeys(tf, 0)\n tf_idf = dict.fromkeys(tf_idf, 0)\n\n writeToFile(directory + '/tf_vectors_' + file_name + '.txt', tf_vector)\n writeToFile(directory + '/tfidf_vectors_' + file_name + '.txt', tf_idf_vector)\n\n\ndef writeToFile(file_name, data):\n with open(file_name, 'w', newline=\"\") as f:\n csv.writer(f).writerows(data)\n f.close()\n\n\ndef task0b(directory):\n all_words = Utilities.fetchAllWordsFromDictionary(directory)\n print(\"Building all words dictionary\")\n data_dict, data_df = fill_word_dictionary(directory, all_words)\n print(\"Performing TF, TF-IDF calculations\")\n calculations(directory, data_dict, data_df, all_words)\n print(\" ****Created .txt files****\")\n\n\nif __name__ == '__main__':\n task0a(Utilities.read_directory(), 3, 3, 3)\n task0b(Utilities.read_directory())\n","repo_name":"SheenDullu/CSE515-MWDB-Group-6","sub_path":"Task0.py","file_name":"Task0.py","file_ext":"py","file_size_in_byte":7424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"74413740125","text":"# coding = utf-8\nimport configparser\nimport datetime\nimport logging\nimport os\nimport sys\nimport uuid\n\nfrom HouseCrawler.Items.ItemsFS import *\n\nsys.path.append(os.path.abspath('.'))\nsys.path.append(os.path.abspath('..'))\nsys.path.append(os.path.abspath('../..'))\n\nlogger = logging.getLogger(__name__)\n\n\nclass FSPipeline(object):\n def __init__(self, settings):\n self.settings = settings\n self.check_key = ['HouseUseType', 'HouseNature',\n 'HouseType',\n 'BalconyType',\n 'ForecastBuildingArea',\n 'ForecastInsideOfBuildingArea',\n 'ForecastPublicArea',\n 'MeasuredBuildingArea',\n 'MeasuredInsideOfBuildingArea',\n 'MeasuredSharedPublicArea',\n 'IsMortgage',\n 'IsAttachment',\n 'Adress',\n 'TotalPrice',\n 'ComplateTag',\n 'HouseSaleState']\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler.settings)\n\n def safe_format_value(self, value):\n try:\n value = '%.05f' % float(value)\n return str(value)\n except Exception:\n pass\n if isinstance(value, dict):\n try:\n value = dict(value)\n return value\n except Exception:\n pass\n if isinstance(value, list):\n try:\n value.sort()\n return value\n except Exception:\n pass\n return str(value)\n\n def check_item_change(self, item):\n diff_flag = False\n q_object = item.django_model.objects\n if isinstance(item, House_Detail_Item):\n res_object = q_object.filter(HouseUUID=item['HouseUUID']).latest(field_name='RecordTime')\n res_tag = res_object.ComplateTag\n diff_keys = ['HouseSaleState', ]\n if not int(item['ComplateTag']):\n if self.safe_format_value(item.get('HouseSaleState')) != self.safe_format_value(\n getattr(res_object, 'HouseSaleState')):\n diff_flag = True\n else:\n diff_keys.extend(self.check_key)\n for key in diff_keys:\n if self.safe_format_value(item.get(key)) != self.safe_format_value(getattr(res_object, key)):\n diff_flag = True\n break\n if diff_flag:\n item['HouseSaleStateLatest'] = getattr(res_object, 'HouseSaleState')\n if (not int(item['ComplateTag'])) and int(res_tag):\n for ck in self.check_key:\n item[ck] = getattr(res_object, ck)\n elif isinstance(item, Project_Detail_Item):\n diff_keys = ['SourceUrl', 'Pages_num', \"Pk\", 'CheackTimeLatest', 'RecordTime']\n res_object = q_object.filter(ProjectUUID=item['ProjectUUID']).latest(field_name='RecordTime')\n for key in item:\n if (self.safe_format_value(item.get(key)) != self.safe_format_value(getattr(res_object, key))) and (\n key not in diff_keys):\n diff_flag = True\n break\n elif isinstance(item, Building_Detail_Item):\n diff_keys = ['SourceUrl', 'PresalePermitUrl', 'CheackTimeLatest', 'RecordTime']\n res_object = q_object.filter(BuildingUUID=item['BuildingUUID']).latest(field_name='RecordTime')\n for key in item:\n if (self.safe_format_value(item.get(key)) != self.safe_format_value(getattr(res_object, key))) and (\n key not in diff_keys):\n diff_flag = True\n elif isinstance(item, Certificate_Detail_Item):\n diff_keys = ['SourceUrl', 'PresalePermitUrl', 'CheackTimeLatest', 'RecordTime']\n res_object = q_object.filter(PresalePermitNumberUUID=item['PresalePermitNumberUUID']).latest(\n field_name='RecordTime')\n for key in item:\n if (self.safe_format_value(item.get(key)) != self.safe_format_value(getattr(res_object, key))) and (\n key not in diff_keys):\n diff_flag = True\n elif isinstance(item, Monitor_Item):\n res_object = q_object.filter(riqi=item['riqi']).latest(field_name='RecordTime')\n if self.safe_format_value(item.get('quanshi_zhuzhai_taoshu')) != self.safe_format_value(\n getattr(res_object, 'quanshi_zhuzhai_taoshu')):\n diff_flag = True\n\n return diff_flag, item\n\n @staticmethod\n def check_item_exist(item):\n exist_flag = False\n q_object = item.django_model.objects\n if isinstance(item, House_Detail_Item):\n if q_object.filter(HouseUUID=item['HouseUUID']).latest(field_name='RecordTime'):\n exist_flag = True\n elif isinstance(item, Project_Detail_Item):\n if q_object.filter(ProjectUUID=item['ProjectUUID']).latest(field_name='RecordTime'):\n exist_flag = True\n elif isinstance(item, Building_Detail_Item):\n if q_object.filter(BuildingUUID=item['BuildingUUID']).latest(field_name='RecordTime'):\n exist_flag = True\n elif isinstance(item, Certificate_Detail_Item):\n if q_object.filter(PresalePermitNumberUUID=item['PresalePermitNumberUUID']).latest(\n field_name='RecordTime'):\n exist_flag = True\n elif isinstance(item, Monitor_Item):\n if q_object.filter(riqi=item['riqi']).latest(field_name='RecordTime'):\n exist_flag = True\n return exist_flag\n\n @staticmethod\n def storage_item(item):\n if hasattr(item, 'save') and hasattr(item, 'django_model'):\n item['RecordID'] = uuid.uuid1()\n item['RecordTime'] = str(datetime.datetime.now())\n item.save()\n\n def process_item(self, item, spider):\n if item:\n if self.check_item_exist(item):\n logger.debug(\"item: %(item)s UUID existed\",\n {'item': item})\n diff_result, diff_item = self.check_item_change(item)\n if diff_result:\n logger.debug(\"item: %(item)s changed\",\n {'item': item})\n if not isinstance(item, Monitor_Item):\n self.storage_item(item)\n else:\n item.save()\n else:\n self.storage_item(item)\n logger.debug(\"item: %(item)s met first\",\n {'item': item})\n","repo_name":"codingEnzo/ProjectNewHouseMonitor","sub_path":"HouseCrawler/AirflowAdmin/ServiceCore/HouseCrawler/Pipelines/PipelinesFS.py","file_name":"PipelinesFS.py","file_ext":"py","file_size_in_byte":6856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"47140127202","text":"import numpy as np\nfrom scipy.linalg import orth\n\ndef sample_dpp(vals, vecs, k=0, one_hot=False):\n \"\"\"\n This function expects \n \n Arguments: \n vals: NumPy 1D Array of Eigenvalues of Kernel Matrix\n vecs: Numpy 2D Array of Eigenvectors of Kernel Matrix\n\n \"\"\"\n n = vecs.shape[0] # number of items in ground set\n \n # k-DPP\n if k:\n index = sample_k(vals, k) # sample_k, need to return index\n\n # Sample set size\n else:\n index = (np.random.rand(n) < (vals / (vals + 1)))\n k = np.sum(index)\n \n # Check for empty set\n if not k:\n return np.zeros(n) if one_hot else np.empty(0)\n \n # Check for full set\n if k == n:\n return np.ones(n) if one_hot else np.arange(k, dtype=float) \n \n V = vecs[:, index]\n\n # Sample a set of k items \n items = list()\n\n for i in range(k):\n p = np.sum(V**2, axis=1)\n p = np.cumsum(p / np.sum(p)) # item cumulative probabilities\n item = (np.random.rand() <= p).argmax()\n items.append(item)\n \n # Delete one eigenvector not orthogonal to e_item and find new basis\n j = (np.abs(V[item, :]) > 0).argmax() \n Vj = V[:, j]\n V = orth(V - (np.outer(Vj,(V[item, :] / Vj[item])))) \n \n items.sort()\n sample = np.array(items, dtype=float) \n\n if one_hot:\n sample = np.zeros(n)\n sample[items] = np.ones(k)\n \n return sample ","repo_name":"mbp28/determinantal-point-processes","sub_path":"sampling/sample_dpp.py","file_name":"sample_dpp.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"86"} +{"seq_id":"69983372125","text":"######################################################################################################\n# import all required packages and functions\n# from Functions import get_GM_API_Key\n# from Functions import lumber_species_properties_processing\n# from Functions import calculate_lumber_impact\n# from Functions import calculate_distance\n# from Functions import calculate_energy_impacts\nfrom Google_Maps_Functions import *\nimport googlemaps\nimport gmaps\nimport gmaps.datasets\nimport json\nimport requests\nimport pandas as pd\nimport xlsxwriter\nimport warnings\nwarnings.filterwarnings('ignore')\nimport math as math\nimport numpy as np\n#import matplotlib\nimport matplotlib.pyplot as plt\n\n# from itertools import tee ##this line may not be needed\n\n\n###import api_key from text file and configure the key\napikey_text = get_GM_API_Key()\ngmaps.configure(api_key=apikey_text)\nSawmills_Data = pd.read_excel(\".../CLT-LCA-Tool/Mill Datasets/Old/MasterDatasetSawmills.xlsx\")\nstates_list = [\"WA\", \"AR\"]\n\nSawmills_WA = Sawmills_Data[Sawmills_Data[\"STATE\"] == \"WA\"]\nprint(Sawmills_WA[\"STATE\"])\nSawmills_AR = Sawmills_Data[Sawmills_Data[\"STATE\"] == \"AR\"]\n\n#Mills_WA = pd.read_excel('C:/Users/SATNOORK/Documents/GitHub/CLT-LCA-Tool/Forest_GIS_Data.xlsx', sheet_name='DFWA')\n#Mills_AR = pd.read_excel('C:/Users/SATNOORK/Desktop/CLT Literature/Mill Datasets/mill2005south.xls', sheet_name='AR')\n\n###rows having Douglas fir in Washington\nDFWA = pd.read_excel('.../CLT-LCA-Tool/Forest_GIS_Data_WA.xlsx')\nprint(DFWA.head())\n###rows having loblolly pine in Arkansas\n#LPAR = pd.read_excel('C:/Users/SATNOORK/Documents/GitHub/CLT-LCA-Tool/Forest_GIS_Data.xlsx', sheet_name='LPAR')\n#(\"DONE\")\n\nSawmills_WA[\"Total Area\"] = 0\nSawmills_WA[\"Counted\"] = \"\"\nSawmills_AR[\"Total Area\"] = \"\"\nSawmills_AR[\"Counted\"] = 0\ncount_mill = 0\nindex_count = 0\n\nfor index, row in Sawmills_WA.iterrows():\n mill_coordinates = str(row['LAT']) + ', ' + str(row['LON'])\n\n for index_f, row_f in DFWA.iterrows():\n forest_coordinates = str(row_f['G5km_Lat']) + ', ' + str(row_f['G5km_Lon'])\n distance_f_m = calculate_distance(forest_coordinates, mill_coordinates, apikey_text)\n\n print(index_f)\n print(distance_f_m)\n previous_total_area = Sawmills_WA[\"Total Area\"].iloc[index_count]\n #print(row_f[\"Area_km2\"])\n #print(previous_total_area)\n\n if distance_f_m <= 200:\n Sawmills_WA.loc[index, \"Total Area\"] = previous_total_area + row_f[\"Area_km2\"]\n\n if (distance_f_m > 200) & (distance_f_m <= 225):\n Sawmills_WA.loc[index, \"Total Area\"] = previous_total_area + 0.5 * row_f[\"Area_km2\"]\n #print(Sawmills_WA[\"Total Area\"].iloc[index_count])\n #if (distance_f_m > 225) & (distance_f_m <= 250):\n #Sawmills_WA.loc[index, \"Total Area\"] = previous_total_area + 0.1 * row_f[\"Area_km2\"]\n\n if Sawmills_WA[\"Total Area\"].iloc[index_count] >= 5:\n #print(Sawmills_WA[\"Total Area\"].iloc[index_count])\n break\n\n print(Sawmills_WA[\"Total Area\"].iloc[index_count])\n\n if row[\"Total Area\"] >= 5:\n Sawmills_WA.loc[index, \"Counted\"] = \"Y\"\n count_mill = count_mill + 1\n print(str(\"mill\" + str(count_mill)))\n\n index_count = index_count + 1\n\nSawmills_WA.to_excel(\".../CLT-LCA-Tool/Mill Datasets/DFWAcalculateddata.xlsx\")\n\nprint(\"DONE DONE DONE\")\n\n\n\n","repo_name":"NREL/CLT-LCA-Tool","sub_path":"Code/SawmillsMapping.py","file_name":"SawmillsMapping.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"31841801296","text":"import boto3\nimport random\nimport string\n\n\ndef lambda_handler(event, context):\n message = event['message']\n user_id = random_generator()\n\n lex = boto3.client('lex-runtime', region_name='us-east-1')\n\n response = lex.post_content(\n botName='FindPhotos',\n botAlias='Find_Photos',\n userId=user_id,\n contentType=\"text/plain; charset=utf-8\",\n inputStream=message\n )\n\n keywords = []\n for key, val in response['slots'].items():\n if (key == 'Animal' or key == 'Object') and val is not None:\n keywords += val.split(' ')\n if (key == 'City' or key == 'Country') and val is not None:\n keywords.append(val)\n\n done = True if len(keywords) > 0 else False\n\n return {\n 'status': done,\n 'keywords': keywords\n }\n\n\ndef random_generator(size=10, char=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(char) for i in range(size))\n","repo_name":"liuqx0717/COMS6998","sub_path":"hw3/lex/lex.py","file_name":"lex.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"2148731205","text":"from api.models import Mode\r\nfrom django.utils.decorators import method_decorator\r\nfrom drf_yasg import openapi\r\nfrom drf_yasg.utils import swagger_auto_schema\r\nfrom rest_framework import viewsets\r\nfrom api.serializers import ModeSerializer\r\nfrom django.core.exceptions import SuspiciousOperation\r\nfrom rest_framework.authentication import TokenAuthentication\r\nfrom rest_framework.permissions import IsAuthenticated\r\n\r\n\r\n@method_decorator(name='list', decorator=swagger_auto_schema(\r\n operation_description=\"Return list of modes based on given query string\",\r\n manual_parameters=[\r\n openapi.Parameter(\r\n name='id', in_=openapi.IN_QUERY,\r\n type=openapi.TYPE_INTEGER,\r\n description=\"Id of a mode\",\r\n ), openapi.Parameter(\r\n name='slug', in_=openapi.IN_QUERY,\r\n type=openapi.TYPE_INTEGER,\r\n description=\"A mode slug\",\r\n )\r\n ]\r\n))\r\nclass ModeViewSet(viewsets.ModelViewSet):\r\n queryset = Mode.objects.all()\r\n serializer_class = ModeSerializer\r\n permission_classes = (IsAuthenticated,)\r\n authentication_classes = (TokenAuthentication,)\r\n\r\n def build_params_filter(self, query):\r\n pk = query.get(\"id\", None)\r\n slug = query.get(\"slug\", None)\r\n filter = {}\r\n if pk is not None:\r\n filter[\"id\"] = pk\r\n if slug is not None:\r\n filter[\"slug__iexact\"] = slug\r\n return filter\r\n\r\n def get_queryset(self):\r\n queryset = Mode.objects.all()\r\n if self.request.query_params:\r\n filter = self.build_params_filter(self.request.query_params)\r\n if filter:\r\n return queryset.filter(**filter)\r\n else:\r\n raise SuspiciousOperation\r\n return queryset\r\n","repo_name":"WycliffeAssociates/tE-backend","sub_path":"tRecorderApi/api/views/mode.py","file_name":"mode.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"43174322126","text":"import logging\nimport glob\nimport os\nimport argparse\nfrom typing import List\nfrom threading import Thread\n\nfrom PIL import Image\nimport numpy as np\nfrom mtcnn import MTCNN\nfrom tqdm import tqdm\n\n\nWORKERS_NUM = 8\n\n\ndef init_logger(path: str):\n logger = logging.getLogger(__name__)\n fh = logging.FileHandler(path)\n logger.addHandler(fh)\n return logger\n\n\nclass Cropper(Thread):\n def __init__(self, paths: List[str], dst_root: str, pbar: tqdm, logger: logging.Logger):\n self.paths = paths\n self.dst_root = dst_root\n self.detector = MTCNN()\n self.pbar = pbar\n self.logger = logger\n super().__init__()\n\n def run(self):\n for p in self.paths:\n self.preprocess(p)\n\n def preprocess(self, path: str):\n try:\n img = Image.open(path)\n faces = self.detector.detect_faces(np.array(img))\n if len(faces) == 1:\n x, y, w, h = faces[0][\"box\"]\n img_crop = img.crop((x, y, x + w, y + h))\n img.close()\n dst_path = \"/\".join([self.dst_root] + path.split(\"/\")[-2:])\n img_crop.save(dst_path)\n img_crop.close()\n else:\n img.close()\n self.pbar.update(1)\n except:\n self.logger.exception(f\"Could not preprocess {path}\")\n self.pbar.update(1)\n\n\ndef split_paths(paths: List[str], n: int) -> List[List[str]]:\n dim = len(paths) // n\n chunks = []\n for i in range(n - 1):\n chunks.append(paths[i * dim: (i + 1) * dim])\n chunks.append(paths[(i + 1) * dim:])\n return chunks\n\n\ndef preprocess_season(season_dir: str, dst_root: str, logger):\n paths = glob.glob(season_dir + \"/*.png\")\n chunks = split_paths(paths, WORKERS_NUM)\n with tqdm(total=len(paths)) as pbar:\n croppers = [Cropper(c, dst_root, pbar, logger) for c in chunks]\n for c in croppers:\n c.start()\n for c in croppers:\n c.join()\n\n\ndef preprocess_dataset(src_root: str):\n dst_root = f\"{src_root}_preprocessed\"\n os.mkdir(dst_root)\n logger = init_logger(dst_root + \"/exceptions.log\")\n seasons_dirs = glob.glob(src_root + \"/*/\")\n for i, season_dir in enumerate(seasons_dirs, 1):\n os.mkdir(dst_root + \"/\" + season_dir.split(\"/\")[-2])\n print(f\"{season_dir.split('/')[-2]} ({i}/{len(seasons_dirs)})\")\n preprocess_season(season_dir, dst_root, logger)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--src_dataset\", help=\"Directory of the dataset\", required=True)\n\n args = parser.parse_args()\n\n preprocess_dataset(args.src_dataset)\n","repo_name":"lajota13/armocromia","sub_path":"scripts/preprocess_dataset.py","file_name":"preprocess_dataset.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"18521061237","text":"from flask import Flask, render_template,request,redirect,url_for,jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import extract,func,or_\nfrom sqlalchemy.sql.expression import extract\nfrom openpyxl import load_workbook\nfrom datetime import date,datetime\nfrom dateutil.parser import parse\nimport logging\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///employer.db\"\napp.config['SQLALCHEMY_BINDS']={'login':\"sqlite:///login.db\",\n 'delete_user':\"sqlite:///delete.db\"\n }\n\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\ndb = SQLAlchemy(app)\n\nclass Employee(db.Model):\n Sno = db.Column(db.Integer, primary_key=True, autoincrement=True)\n Emp_id = db.Column(db.String(500))\n Name = db.Column(db.String(500))\n Designation = db.Column(db.String(500))\n Department = db.Column(db.String(500))\n Project = db.Column(db.String(500))\n Job_role = db.Column(db.String(500))\n Employment_status = db.Column(db.String(500))\n Joining_date = db.Column(db.String(500))\n Experience = db.Column(db.String(500))\n Location = db.Column(db.String(500))\n Last_promoted = db.Column(db.String(500))\n Comments = db.Column(db.String(500))\nclass Login(db.Model):\n __bind_key__=\"login\"\n id=db.Column(db.Integer,primary_key=True)\n email=db.Column(db.String(500))\n password=db.Column(db.String(200))\nclass Delete_user(db.Model):\n __bind_key__=\"delete_user\"\n id=db.Column(db.Integer,primary_key=True) \n Name=db.Column(db.String(200))\n Date=db.Column(db.String(200)) \ndef extract_data_from_excel():\n wb = load_workbook(\"employee_data 1.xlsx\")\n ws = wb.active\n column_mappings = {\n 'Sno': 0,\n 'Emp_id': 1,\n 'Name': 2,\n 'Designation': 3,\n 'Department': 4,\n 'Project': 5,\n 'Job_role': 6,\n 'Employment_status': 7,\n 'Joining_date': 8,\n 'Experience': 9,\n 'Location': 10,\n 'Last_promoted': 11,\n 'Comments': 12\n }\n for row in ws.iter_rows(min_row=2, values_only=True):\n \n if not all(cell is None for cell in row):\n Sno = row[column_mappings['Sno']]\n Emp_id = row[column_mappings['Emp_id']]\n Name = row[column_mappings['Name']]\n Designation = row[column_mappings['Designation']]\n Department = row[column_mappings['Department']]\n Project = row[column_mappings['Project']]\n Job_role = row[column_mappings['Job_role']]\n Employment_status = row[column_mappings['Employment_status']]\n Joining_date = row[column_mappings['Joining_date']]\n Experience = row[column_mappings['Experience']]\n formatted_date = None\n if isinstance(Joining_date, datetime):\n join_date = Joining_date \n formatted_date=join_date.strftime(\"%d-%m-%Y\")\n \n day = join_date.day\n month = join_date.month\n year = join_date.year\n\n current_date = date.today()\n Experience = current_date.year - year\n\n if (current_date.month, current_date.day) < (month, day):\n Experience -= 1\n\n if Experience < 1:\n Experience = \"Less than 1 year\"\n else:\n join_date = None\n day = None\n month = None\n year = None\n Experience = None\n\n \n Location = row[column_mappings['Location']]\n Last_promoted = row[column_mappings['Last_promoted']]\n Comments = row[column_mappings['Comments']]\n \n\n\n existing_data = Employee.query.filter_by(Name=Name).first()\n if not existing_data:\n employee = Employee(Emp_id=Emp_id, Name=Name, Designation=Designation,\n Department=Department, Project=Project, Job_role=Job_role,\n Employment_status=Employment_status, Joining_date=formatted_date,\n Experience=Experience, Location=Location, Last_promoted=Last_promoted,\n Comments=Comments)\n db.session.add(employee)\n db.session.commit()\n\ndef allowed_file(filename):\n return \".\" in filename and filename.rsplit(\".\",1)[1] in [\"xlsx\",\"csv\"]\n@app.route(\"/\",methods=[\"GET\",\"POST\"])\ndef signPage():\n correct_user=None\n error_message=None\n if request.method==\"POST\":\n email=request.form[\"email\"]\n password=request.form[\"password\"]\n correct_user=Login.query.filter_by(email=email).first()\n if correct_user:\n if correct_user.password==password:\n return redirect(url_for(\"dashBoard\"))\n else:\n correct_user=None\n error_message=\"invalid login credentials\"\n if correct_user == None:\n error_message=\"invalid login credentials\" \n return render_template(\"sign.html\",error_message=error_message) \n@app.route(\"/dashboard\")\ndef dashBoard():\n status=Employee.query.with_entities(Employee.Employment_status).distinct()\n # for stat in status:\n # count = Employee.query.filter_by(Employment_status=stat.Employment_status).count()\n # print(f\"Count for {stat.Employment_status}: {count}\")\n # joining_dates = Employee.query.with_entities(Employee.Joining_date).all()\n # print(f\"Joining dates: {joining_dates}\")\n \n del_employers=Delete_user.query.all()\n deleted_jan_employers=[]\n deleted_feb_employers=[]\n deleted_march_employers=[]\n deleted_april_employers=[]\n deleted_may_employers=[]\n deleted_june_employers=[]\n deleted_july_employers=[]\n deleted_aug_employers=[]\n deleted_sep_employers=[]\n deleted_oct_employers=[]\n deleted_nov_employers=[]\n deleted_dec_employers=[]\n for emp in del_employers:\n if emp.Date:\n split_date=emp.Date.split(\"-\")\n modified_month=split_date[1]\n if modified_month==\"01\":\n deleted_jan_employers.append(emp.Name) \n if modified_month==\"02\":\n deleted_feb_employers.append(emp.Name) \n if modified_month==\"03\":\n deleted_march_employers.append(emp.Name)\n if modified_month==\"04\":\n deleted_april_employers.append(emp.Name)\n if modified_month==\"05\":\n deleted_may_employers.append(emp.Name) \n if modified_month==\"06\":\n deleted_june_employers.append(emp.Name)\n if modified_month==\"07\":\n deleted_july_employers.append(emp.Name) \n if modified_month==\"08\":\n deleted_aug_employers.append(emp.Name)\n if modified_month==\"09\":\n deleted_sep_employers.append(emp.Name) \n if modified_month=='10':\n deleted_oct_employers.append(emp.Name) \n if modified_month=='11':\n deleted_nov_employers.append(emp.Name) \n if modified_month=='12':\n deleted_dec_employers.append(emp.Name) \n \n \n \n employers = Employee.query.all()\n jan_employers=[]\n feb_employers=[]\n march_employers=[]\n april_employers=[]\n may_employers=[]\n june_employers=[]\n july_employers=[]\n aug_employers=[]\n sep_employers=[]\n oct_employers=[]\n nov_employers=[]\n dec_employers=[]\n for employee in employers:\n if employee.Joining_date:\n split_date = employee.Joining_date.split('-')\n modified_month =split_date[1]\n # print(\"split dates\")\n # print(modified_month) \n if modified_month ==\"01\":\n jan_employers.append(employee.Name) \n if modified_month ==\"02\":\n feb_employers.append(employee.Name) \n if modified_month ==\"03\":\n march_employers.append(employee.Name)\n if modified_month ==\"04\":\n april_employers.append(employee.Name) \n if modified_month ==\"05\":\n may_employers.append(employee.Name)\n if modified_month=='06':\n june_employers.append(employee.Name) \n if modified_month=='07':\n july_employers.append(employee.Name) \n if modified_month=='08':\n aug_employers.append(employee.Name) \n if modified_month=='09':\n sep_employers.append(employee.Name)\n if modified_month=='10':\n oct_employers.append(employee.Name)\n if modified_month=='11':\n nov_employers.append(employee.Name)\n if modified_month=='12':\n dec_employers.append(employee.Name) \n # print(\"june employers..\")\n # print(june_employers)\n\n employment_status_counts={}\n for stat in status:\n count=Employee.query.filter_by(Employment_status=stat.Employment_status).count()\n \n employment_status_counts[stat.Employment_status]=count\n return render_template(\"dashboard.html\",employment_status_counts=employment_status_counts,june_employers=june_employers,deleted_june_employers=deleted_june_employers,deleted_jan_employers=deleted_jan_employers,deleted_feb_employers=deleted_feb_employers,deleted_march_employers=deleted_march_employers,deleted_april_employers=deleted_april_employers,deleted_may_employers=deleted_may_employers,deleted_july_employers=deleted_july_employers,deleted_aug_employers=deleted_aug_employers,deleted_sep_employers=deleted_sep_employers,deleted_oct_employers=deleted_oct_employers,deleted_nov_employers=deleted_nov_employers,deleted_dec_employers=deleted_dec_employers,jan_employers=jan_employers,feb_employers=feb_employers,march_employers=march_employers,april_employers=april_employers,may_employers=may_employers,july_employers=july_employers,aug_employers=aug_employers,sep_employers=sep_employers,oct_employers=oct_employers,nov_employers=nov_employers,dec_employers=dec_employers) \n@app.route(\"/home\")\ndef Home():\n data=Employee.query.all()\n return render_template(\"index.html\",data=data)\n\n@app.route(\"/add\", methods=[\"GET\", \"POST\"])\ndef Add():\n if request.method == \"POST\":\n emp_id = request.form.get(\"emp_id\")\n name = request.form.get(\"name\")\n designation = request.form.get(\"designation\")\n department = request.form.get(\"department\")\n project = request.form.get(\"project\")\n job_role = request.form.get(\"job_role\")\n employment_status = request.form.get(\"employment_status\")\n joining_date = request.form.get(\"joining_date\")\n date_parts = joining_date.split('-')\n if len(date_parts) == 3:\n formatted_date = f\"{date_parts[2]}-{date_parts[1]}-{date_parts[0]}\"\n join_date=datetime.strptime(formatted_date, \"%d-%m-%Y\")\n current_date=date.today() \n experience=current_date.year -int(join_date.year)\n if (current_date.month,current_date.day) < (join_date.month,join_date.day):\n experience-=1\n if experience <1:\n experience=\"Less than 1 year\" \n else:\n formatted_date = None \n experience=None\n # experience = request.form.get(\"experience\")\n location = request.form.get(\"location\")\n last_promoted = request.form.get(\"last_promoted\")\n comments = request.form.get(\"comments\")\n existing_data=Employee.query.filter_by(Name=name).first()\n if not existing_data:\n employee = Employee(\n Emp_id=emp_id,\n Name=name,\n Designation=designation,\n Department=department,\n Project=project,\n Job_role=job_role,\n Employment_status=employment_status,\n Joining_date=formatted_date,\n Experience=experience,\n Location=location,\n Last_promoted=last_promoted,\n Comments=comments\n )\n db.session.add(employee)\n db.session.commit()\n return redirect(\"/home\")\n return render_template(\"add.html\")\n\n@app.route(\"/update/\",methods=[\"GET\",\"POST\"])\ndef Update(sno):\n selected_date = request.args.get(\"date\")\n if request.method == \"POST\":\n emp_id = request.form.get(\"emp_id\")\n name = request.form.get(\"name\")\n designation = request.form.get(\"designation\")\n department = request.form.get(\"department\")\n project = request.form.get(\"project\")\n job_role = request.form.get(\"job_role\")\n employment_status = request.form.get(\"employment_status\")\n joining_date = request.form.get(\"joining_date\")\n date_parts=joining_date.split(\"-\")\n if len(date_parts)==3:\n formatted_date=f\"{date_parts[2]}-{date_parts[1]}-{date_parts[0]}\"\n join_date=datetime.strptime(formatted_date,\"%d-%m-%Y\")\n current_day=date.today()\n experience=current_day.year-int(join_date.year)\n if (current_day.month,current_day.day) < (join_date.month,join_date.day):\n experience-=1\n if experience < 1:\n experience=\"Less than 1 year\"\n else:\n formatted_date = None \n experience=None \n # experience = request.form.get(\"experience\")\n location = request.form.get(\"location\")\n last_promoted = request.form.get(\"last_promoted\")\n comments = request.form.get(\"comments\")\n employee=Employee.query.filter_by(Sno=sno).first()\n employee.Emp_id=emp_id\n employee.Name=name\n employee.Designation=designation\n employee.Department=department\n employee.Project=project\n employee.Job_role=job_role\n employee.Employment_status=employment_status\n employee.Joining_date=formatted_date\n employee.Experience=experience\n employee.Location=location\n employee.Last_promoted=last_promoted\n employee.Comments=comments\n db.session.add(employee)\n db.session.commit()\n return redirect(\"/home\")\n employee=Employee.query.filter_by(Sno=sno).first()\n return render_template(\"update.html\",employee=employee,selected_date=selected_date)\n\n@app.route(\"/delete/\")\ndef Delete(sno):\n employee=Employee.query.filter_by(Sno=sno).first()\n delete=Delete_user(Name=employee.Name,Date=employee.Joining_date)\n db.session.add(delete)\n db.session.commit()\n db.session.delete(employee)\n db.session.commit()\n return redirect(\"/home\")\nwith app.app_context():\n db.create_all()\n data=extract_data_from_excel()\n\n@app.route(\"/bulk\",methods=[\"GET\",\"POST\"])\ndef bulk():\n if request.method==\"POST\":\n file = request.files['file']\n if file and allowed_file(file.filename):\n if file.filename.endswith(\".xlsx\"):\n wb=load_workbook(file)\n ws=wb.active\n column_mappings = {\n 'Sno': 0,\n 'Emp_id': 1,\n 'Name': 2,\n 'Designation': 3,\n 'Department': 4,\n 'Project': 5,\n 'Job_role': 6,\n 'Employment_status': 7,\n 'Joining_date': 8,\n 'Experience': 9,\n 'Location': 10,\n 'Last_promoted': 11,\n 'Comments': 12\n }\n for row in ws.iter_rows (min_row=2,values_only=True):\n if not all(cell is None for cell in row):\n \n Emp_id = row[column_mappings['Emp_id']]\n Name = row[column_mappings['Name']]\n Designation = row[column_mappings['Designation']]\n Department = row[column_mappings['Department']]\n Project = row[column_mappings['Project']]\n Job_role = row[column_mappings['Job_role']]\n Employment_status = row[column_mappings['Employment_status']]\n Joining_date = row[column_mappings['Joining_date']]\n Experience = row[column_mappings['Experience']]\n formatted_date = None\n if isinstance(Joining_date,datetime):\n join_date=Joining_date\n formatted_date=join_date.strftime(\"%d-%m-%Y\")\n month=join_date.month\n day=join_date.day\n year=join_date.year\n current_date=date.today()\n Experience=current_date.year-year\n if (current_date.month,current_date.day) < (month,day):\n Experience-=1\n if Experience < 1:\n Experience=\"Less than 1 year\"\n else:\n join_date = None\n day = None\n month = None\n year = None\n Experience = None\n\n \n Location = row[column_mappings['Location']]\n Last_promoted = row[column_mappings['Last_promoted']]\n Comments = row[column_mappings['Comments']]\n existing_data=Employee.query.filter_by(Name=Name).first()\n if not existing_data:\n employee = Employee(Emp_id=Emp_id, Name=Name, Designation=Designation,\n Department=Department, Project=Project, Job_role=Job_role,\n Employment_status=Employment_status, Joining_date=formatted_date,\n Experience=Experience, Location=Location, Last_promoted=Last_promoted,\n Comments=Comments)\n db.session.add(employee)\n db.session.commit()\n return redirect(\"/home\") \n\n \n return render_template(\"bulk.html\")\n \n@app.route(\"/view/\")\ndef view(sno):\n data=Employee.query.filter_by(Sno=sno).first()\n return render_template(\"view.html\",data=data) \n@app.route(\"/register\",methods=[\"GET\",\"POST\"])\ndef register():\n if request.method==\"POST\":\n email=request.form[\"email\"]\n password=request.form[\"password\"]\n user=Login(email=email,password=password)\n db.session.add(user)\n db.session.commit()\n return redirect(\"/\")\n\n return render_template(\"register.html\")\n@app.route(\"/get_employees_list/\")\ndef get_employees_list(employment_status):\n employees=Employee.query.filter_by(Employment_status=employment_status).all()\n employee_names=[employee.Name for employee in employees]\n return jsonify({'employeeList': employee_names})\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n\n\n","repo_name":"whitedevil1233rrffrfrrferf/adding_data_to_database_new","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":19352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"40499348413","text":"# Loop Tuples\nrecord = (1, \"Grimdiana\", \"Bones\", \"boulders\")\n\nrow = ''\n\nfor x in record:\n print(x)\n row = row + str(x) + \", \"\n\nprint(row)\n\n# Loop Lists\nvalues_list = [1, 1, 2, 3, 5, 8, 13, 21, 34]\n\nfor x in values_list:\n print(x)\n\nindex_list = []\n\nfor i in range(len(values_list)):\n print(i)\n index_list.insert(0, i)\n\nprint(index_list)\n\nfor i in index_list:\n if i % 2 != 0:\n values_list.pop(i)\n\nprint(values_list)\n\n# Loop Sets\nvowels = {\"A\", \"E\", \"I\", \"O\", \"U\"}\nparts_of_the_big_letter = {\"L\", \"M\", \"N\", \"O\", \"P\"}\n\nfor i in vowels:\n parts_of_the_big_letter.discard(i)\n\nprint(parts_of_the_big_letter)\n\n# Loop dictionary\nplayer_positions = {\n \"Who\": \"1B\",\n \"What\": \"2B\",\n \"I Don't Know\": \"3B\",\n \"Why\": \"LF\",\n \"Because\": \"CF\",\n \"Tomorrow\": \"P\",\n \"Today\": \"C\",\n \"I Don't Care\": \"SS\"\n }\n\nplayers = []\n\nfor x in player_positions:\n players.insert(0, x)\n\nprint(players)\n\npositions = []\n\nfor x in player_positions.values():\n positions.insert(0, x)\n\nprint(positions)\n\nfor x, y in player_positions.items():\n print(x, \" is on \", y)\n","repo_name":"zdanielshi/ds-py-0422-code-solutions","sub_path":"ds-prep-python-loops/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"1736386592","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 20 13:23:48 2019\n\n@author: juan\n\"\"\"\nnum = int(input('digite o numero: '))\n\nmilhar = num//1000\nnum = (num - milhar*1000)\nprint ('milhar: ',milhar)\n\ncentena = (num//100)\nnum = (num - centena*100)\nprint ('centena: ',centena)\n\ndezena = (num//10)\nnum = (num - dezena*10)\nprint('dezena: ',dezena)\n\nunidade = num\nprint('unidade: ',unidade) \n\n\n\n\"\"\" O mesmo codigo, mas usando recurso de string para responder\nnum = input('digite o numero: ')\nprint('milhar: ',num[0:1])\nprint('centena: ',num[1:2])\nprint('dezena: ',num[2:3])\nprint('unidade: ',num[3:])\n\n\"\"\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"2017100235/Mix---Python","sub_path":"Skill curso em video - Python - mundo 1/desafio 23.py","file_name":"desafio 23.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"34452871263","text":"from aip import AipOcr\n\n\ndef get_file_content(filePath):\n with open(filePath, 'rb') as fp:\n return fp.read()\n\n\nif __name__ == '__main__':\n APP_ID = '2'\n API_KEY = '2'\n SECRET_KEY = '2'\n client = AipOcr(APP_ID, API_KEY, SECRET_KEY)\n image = get_file_content('grey.png')\n options = {}\n options[\"language_type\"] = \"CHN_ENG\"\n general = client.basicGeneral(image, options)\n print(general)\n","repo_name":"WakeUpYoung/test-spider","sub_path":"test/opencvTest/BaiduAip.py","file_name":"BaiduAip.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"39804160954","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef subfigures(a):\n#Write function subfigures that creates a figure that has two subfigures (two axes in matplotlib parlance). The function gets a two dimensional array a as a parameter. In the left subfigure draw using the plot method a graph, whose x coordinates are in the first column of a and the y coordinates are in the second column of a. In the right subfigure draw using the scatter method a set of points whose x coords are again in the first column of a and whose y coordinates are in the second column of a. Additionally, the points should get their color from the third column of a, and size of the point from the fourth column of a. For this, use the c and s named parameters of scatter, respectively\n fig, (ax1, ax2) = plt.subplots(1, 2)\n ax1.plot(a[:,0], a[:,1])\n ax2.scatter(a[:,0], a[:,1], c=a[:,2], s=a[:,3])\n plt.show()\n\ndef main():\n a = np.array([[1,2,3,4], [2,3,4,5], [3,4,5,6], [4,5,6,7], [5,6,7,8], [6,7,8,9]])\n subfigures(a)\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jreisgen/Data-Analysis-with-Python","sub_path":"mooc-data-analysis-with-python-2022/part03-e10_subfigures/src/subfigures.py","file_name":"subfigures.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"44453570399","text":"import numpy as np\nfrom PIL import Image\n\nimg = Image.open('thor.jpg')\nimg = img.convert('L')\n\narr = np.array(img)\nnew_arr = arr[0:500,0:500]\n\nimg1 = Image.fromarray(new_arr,'L')\nimg1.show()","repo_name":"nitesh619/DataScience","sub_path":"cvOpen/practice/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"74496589725","text":"import os\nimport time\nimport argparse\n\nfrom pycparser import c_ast\n\nimport preprocess\nimport astvisitor\nimport pycstructure\nimport format_string_parser\n\n\nap = argparse.ArgumentParser()\nap.add_argument('project', help='project to be analyzed')\nap.add_argument('-c', '--config', \n help='compilation options for specific project')\n# ap.add_argument('-s', '--sources', \n# help='source paths already given in a file')\nargs = ap.parse_args()\n\n\ncount_compilable = 0\ncount_wrapper = 0\ncount_wrapper_has_fmtstr = 0 # can have several fmtstr's in one wrapper function\ncount_wrapper_marked_noarg = 0\nbugs_flag_mismatch = set()\n\n\nstart_time = time.time()\n\n\n# project -> C source\nsources = []\nif os.path.isfile(args.project):\n sources.append(args.project)\nelse:\n for dir, subdirs, files in os.walk(args.project):\n for f in files:\n if f.endswith('.c'):\n sources.append(os.path.join(dir, f))\n\n\nfor s in sources:\n print(s)\n\n # C source -> ast\n # preprocess, parse\n ast = None\n try:\n ast = preprocess.cpp(s, args.config)\n count_compilable += 1\n except Warning as w:\n print(w)\n except Exception as e:\n print(e)\n print(\"[ERROR] Preprocess failed, fix compilation options.\")\n if not ast:\n continue\n\n # ast -> pymethoddef-s array\n pymethoddef_visitor = astvisitor.PyMethodDefVisitor(s)\n pymethoddef_visitor.visit(ast)\n for method in pymethoddef_visitor.pymethoddefs:\n # pymethoddef -> C implementation (function declaration)\n funcdef_visitor = astvisitor.FuncDefVisitor(method.ml_meth.name)\n funcdef_visitor.visit(ast)\n\n # C implementation -> argument parsing Python/C API call\n funccall_visitor = astvisitor.FuncCallVisitor()\n if not funcdef_visitor.wrapper:\n continue\n funccall_visitor.visit(funcdef_visitor.wrapper[0])\n\n method.c_impl = funcdef_visitor.wrapper[0]\n print(method)\n count_wrapper += 1\n\n BUG_MSG = \"[BUG] function declared without param or with unused param but ml_flag is not METH_NOARGS\"\n\n if method.ml_flag.value != '0x0004' and method.ml_flag.value != '0x0010': # METH_NOARGS, * | METH_CLASS\n # check unused parameter in C implementation\n try:\n id_visitor = astvisitor.IDVisitor(funcdef_visitor.params[1].name)\n id_visitor.visit(method.c_impl.body)\n if not id_visitor.called:\n method.param_unused = True\n if funcdef_visitor.params[1].name.startswith('__NPY_UNUSED_TAGGED'):\n print(BUG_MSG)\n bugs_flag_mismatch.add(method)\n except Exception as e:\n print(BUG_MSG)\n bugs_flag_mismatch.add(method)\n\n if funccall_visitor.apis:\n count_wrapper_has_fmtstr += 1\n\n # API call -> format string\n for api in funccall_visitor.apis:\n fmtstr = None\n if api['name'] in pycstructure.PCAPI_AP_KWD:\n if isinstance(api['args'][2], c_ast.Constant):\n fmtstr = api['args'][2].value[1:-1]\n else:\n pass\n # TODO propagation\n elif api['name'] in pycstructure.PCAPI_AP_POS:\n if isinstance(api['args'][1], c_ast.Constant):\n fmtstr = api['args'][1].value[1:-1]\n else:\n pass\n # TODO propagation\n else:\n pass\n\n if fmtstr is not None:\n if fmtstr: # skip \"\"\n print(fmtstr)\n\n # format string -> type signature\n type_signature = format_string_parser.parse(fmtstr)\n format_string_parser.print_signature(type_signature)\n else:\n if method.ml_flag.value == '0x0004':\n print(\"[]\")\n count_wrapper_marked_noarg += 1\n elif method.param_unused:\n print(\"[]\")\n print(BUG_MSG)\n bugs_flag_mismatch.add(method)\n\n print('--->>>')\n # C implementation -> valid return\n return_visitor = astvisitor.ReturnVisitor(funcdef_visitor.wrapper[0])\n return_visitor.visit(funcdef_visitor.wrapper[0])\n format_string_parser.print_signature(return_visitor.returns)\n\n\nend_time = time.time()\n\nprint(\"========== statistics ==========\")\n\nprint(\"time elapse: {:.3f}s\".format(end_time - start_time))\nprint(\"file analyzed directly: {}\".format(len(sources)))\nprint(\"file analyzed directly and compilable: {}\".format(count_compilable))\n\nprint(\"wrapper function (WP): {}\".format(count_wrapper))\nprint(\"WP has format string(s): {}\".format(count_wrapper_has_fmtstr))\nprint(\"WP with METH_NOARGS: {}\".format(count_wrapper_marked_noarg))\n\nprint(\"WP possibly with a bug: {}\".format(len(bugs_flag_mismatch)))\nfor bug in bugs_flag_mismatch:\n print(bug)","repo_name":"S4Plus/pyctype","sub_path":"src/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"26694112326","text":"class Unweighted_graph:\n def __init__(self, vertices, directed=True):\n \"\"\"\n initialise the adjacency list\n \"\"\"\n self.vertices = vertices\n self.directed = directed\n self.aList = {t:[] for t in range(vertices)}\n \n def __str__(self):\n \"\"\"\n prints adjacency list\n \"\"\"\n return(str(self.aList))\n \n def has_vertex(self, vertex):\n \"\"\"\n This method checks if a vertex is present in hgraph or not.\n \"\"\"\n return (0 <= vertex < self.vertices)\n \n def has_edge(self, start, end):\n \"\"\"\n This method checks if an edge is present in graph or not.\n \"\"\"\n if (self.has_vertex(start) and self.has_vertex(end)):\n if end in self.aList[start] :\n return True\n return False\n \n def add_edge(self, start, end):\n \"\"\"\n This method add an edge to the graph.\n \"\"\"\n if (self.has_vertex(start) and self.has_vertex(end)):\n if not self.has_edge(start, end):\n self.aList[start].append(end)\n if not self.directed:\n self.aList[end].append(start)\n return True\n return False\n \n def add_edges(self, edges):\n \"\"\"\n This method adds multiple edges to the graph\n \"\"\"\n for (start, end) in edges:\n self.add_edge(start, end)\n return \n\n def longest_path_DAG(self):\n (indegree, toposort_seq, lpath) = ({t:0 for t in range(self.vertices)}, [], {})\n \n for i in self.aList:\n lpath[i] = 0\n for j in self.aList[i]:\n indegree[j] += 1\n \n q = [i for i in range(self.vertices) if indegree[i] == 0]\n\n while(len(q)> 0):\n c = q.pop(0)\n toposort_seq.append(c)\n indegree[c] -= 1\n for k in self.aList[c] :\n indegree[k] -= 1\n lpath[k] = max(lpath[k], lpath[c] + 1 )\n q = [i for i in range(self.vertices) if indegree[i] == 0 ]\n\n return((toposort_seq, lpath))\n\nedges = [(0,2),(0,3),(0,4),(1,2),(1,7),(2,5),(3,5),(3,7),(4,7),(5,6),(6,7)]\ng1 = Unweighted_graph(8, directed=True)\ng1.add_edges(edges)\n# print(g1.aList)\n\nprint(g1.longest_path_DAG())\n \n\n\n","repo_name":"shahbaz42/python","sub_path":"PDSA/graphs/longest_path_DAG.py","file_name":"longest_path_DAG.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"34291718795","text":"import turtle\nimport pandas\nfrom state import State\nfrom scoreboard import Scoreboard\n\n\nscreen = turtle.Screen()\nscreen.title(\"US_state_game\")\nimage = \"US_state_game/blank_states_img.gif\"\nscreen.addshape(image)\nturtle.shape(image)\nscoreboard = Scoreboard()\n\ndata = pandas.read_csv(\"US_state_game/50_states.csv\")\nstates = data.state.to_list()\ngame_on = True\nwhile game_on:\n answer = screen.textinput(title=\"Guess the state\", prompt=\"What's another state's name?\").title()\n if answer in states:\n row = data[data.state == answer]\n State(row.state.item(), row.x, row.y)\n data = data.drop(row.index)\n scoreboard.increase_score()\n if scoreboard.score >= 50:\n screen.clear()\n scoreboard.game_over()\n game_on = False\n elif answer == \"Exit\":\n data.state.to_csv(\"US_state_game/missed_answers.csv\")\n game_on = False\n\n\nscreen.exitonclick()\n","repo_name":"kvanst3/Backend","sub_path":"US_state_game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"24852516874","text":"import numpy as np\nimport networkx as nx\n\n\nfrom pyunicorn import ResNetwork\nfrom .ResistiveNetwork_utils import makeNW, parallelCopy, serialCopy, nx2nw\n\ndebug = 0\n\"\"\" Test for basic sanity, parallel and serial circiuts\n\"\"\"\n\n\ndef testParallelTrivial():\n r\"\"\" Trivial parallel case:\n a) 0 --- 1 --- 2\n\n /---- 3 ---\\\n b) 0 --- 1 --- 2\n\n c) /---- 3 ---\\\n 0 --- 1 --- 2\n \\____ 4 ___/\n\n ER(a) = 2*ER(b) = 3*ER(c)\n \"\"\"\n nws = []\n # construct nw1\n idI, idJ = [0, 1], [1, 2]\n nws.append(makeNW(idI, idJ, [.1]))\n\n # construct nw2\n idI += [0, 3]\n idJ += [3, 2]\n nws.append(makeNW(idI, idJ, [.1]))\n\n # nw3\n idI += [0, 4]\n idJ += [4, 2]\n nws.append(makeNW(idI, idJ, [.1]))\n\n ER = []\n for nw in nws:\n rnw = ResNetwork(nw)\n ER.append(rnw.effective_resistance(0, 2))\n\n assert abs(ER[0]/2-ER[1]) < .1E-6\n assert abs(ER[0]/3-ER[2]) < .1E-6\n\n\ndef testParallelLessTrivial():\n \"\"\" Less Trivial Parallel Case:\n |--- 1 --- 0\n a) 2 |\n |--- 3 ----4\n\n |--- 1 --- 0 --- 5 --- |\n b) 2 | | 7\n |--- 3 ----4 --- 6 --- |\n\n |---- 8 ----------- |\n | | |\n | |----------| |\n | | |\n |--- 1 --- 0 --- 5 --- | | |\n c) 2 | | 7 | 9\n |--- 3 ----4 --- 6 --- | | |\n | | |\n | ----------| |\n | | |\n |---- 10 -----------|\n \"\"\"\n nws = []\n\n idI = [0, 1, 1, 2, 3]\n idJ = [1, 2, 3, 3, 4]\n nws.append(makeNW(idI, idJ, [1]*len(idI)))\n\n idI.extend([0, 5, 5, 6, 6])\n idJ.extend([5, 6, 7, 7, 4])\n nws.append(makeNW(idI, idJ, [1]*len(idI)))\n\n idI.extend([0, 8, 8, 9, 10])\n idJ.extend([8, 9, 10, 10, 4])\n nws.append(makeNW(idI, idJ, [1]*len(idI)))\n\n ER = []\n for nw in nws:\n rnw = ResNetwork(nw)\n ER.append(rnw.effective_resistance(0, 4))\n # Gs.append(nx.DiGraph(nw))\n # # showGraphs(Gs)\n # # s = ''\n # # for i,e in enumerate(ER):\n # # s = s + \"NW{:d} {:.3f}\\t\".format(i,e)\n # # print(\"Effective resistances (0,2)\\n %s\" % (s))\n\n assert abs(ER[0]/2-ER[1]) < .1E-6\n assert abs(ER[0]/3-ER[2]) < .1E-6\n\n # \"\"\" Less Trivial Parallel Case:\n # /--- 1 --- 0\n # a) 2 |\n # \\--- 3 ----4\n\n # /--- 1 --- 0 --- 5 --- \\\n # b) 2 | | 7\n # \\--- 3 ----4 --- 6 --- /\n\n # / --- 8 ----------- \\\n # | \\\n # /--- 1 --- 0 --- 5 --- \\ \\\n # c) 2 7 9\n # \\--- 3 ----4 --- 6 --- / /\n # | /\n # \\ --- 10 -----------/\n # \"\"\"\n # nws =[]\n # #construct nw1\n\n # idI = [0,1,1,2,3]\n # idJ = [1,2,3,3,4]\n # val = [.1] * 5\n # nws.append(makeNW(idI,idJ,[.1]*len(idI))[0])\n\n # idI.extend([0,5,6,7])\n # idJ.extend([5,6,7,4])\n # val.extend( val * 6)\n # nws.append(makeNW(idI,idJ,[.1]*len(idI))[0])\n\n # idI.extend([0,8,9,10])\n # idJ.extend([8,9,10,4])\n # val.extend( val * 4)\n # nws.append(makeNW(idI,idJ,val)[0])\n\n # ER = []\n # for nw in nws:\n # rnw = ResNetwork(nw)\n # ER.append( rnw.effective_resistance(0,4))\n\n # s = ''\n # for i,e in enumerate(ER):\n # s = s + \"NW{:d} {:.3f}\\t\".format(i,e)\n # print(\"Effective resistances (0,2)\\n %s\" % (s))\n\n # assert abs(ER[0]/2-ER[1]) < .1E-6\n # assert abs(ER[0]/3-ER[2]) < .1E-6\n\n\ndef testParallelRandom():\n \"\"\" 50 random parallel cases\n \"\"\"\n\n N = 10\n p = .7\n\n runs = 0\n while runs < 50:\n\n G = nx.fast_gnp_random_graph(N, p)\n a = 0\n b = G.number_of_nodes()-1\n\n try:\n nx.shortest_path(G, source=a, target=b)\n except RuntimeError:\n continue\n\n i, j = [], []\n for xx in G.edges():\n i.append(xx[0])\n j.append(xx[1])\n\n # %.1f values for resistance\n val = np.round(np.random.ranf(len(i))*100)/10\n\n # and test\n nw1 = makeNW(i, j, val)\n nw2 = parallelCopy(nw1, a, b)\n ER1 = ResNetwork(nw1).effective_resistance(a, b)\n ER2 = ResNetwork(nw2).effective_resistance(a, b)\n\n # assertion\n assert (ER1/2-ER2) < 1E-6\n\n # increment runs\n runs += 1\n\n\ndef testSerialTrivial():\n \"\"\"Trivial serial test case\n\n a) 0 --- 1 --- 2\n\n b) 0 --- 1 --- 2 --- 3 --- 4\n\n ER(a)/2 = ER(b)\n \"\"\"\n\n # construct nw1\n idI = [0, 1]\n idJ = [1, 2]\n val = [1, 1]\n\n nw1 = np.zeros((3, 3))\n for i, j, v in zip(idI, idJ, val):\n nw1[i, j] = v\n nw1[j, i] = v\n\n # construct nw2\n idI = idI + [2, 3]\n idJ = idJ + [3, 4]\n val = val + [1, 1]\n\n nw2 = np.zeros((5, 5))\n for i, j, v in zip(idI, idJ, val):\n nw2[i, j] = v\n nw2[j, i] = v\n\n # init ResNetworks\n rnw1 = ResNetwork(nw1)\n rnw2 = ResNetwork(nw2)\n\n ER1 = rnw1.effective_resistance(0, 2)\n ER2 = rnw2.effective_resistance(0, 4)\n\n print(\"Effective resistances (0,2)\")\n print(\"NW1 %.3f\\tNW2 %.3f\\t 2*NW1 = %.3f\" % (ER1, ER2, 2*ER1))\n\n assert (ER1*2-ER2) < 1E-6\n\n\ndef testSerialRandom():\n \"\"\" 50 Random serial test cases\n \"\"\"\n\n N = 10\n p = .7\n runs = 50\n for run in range(0, runs):\n\n # a random graph\n G = nx.fast_gnp_random_graph(N, p)\n try:\n nx.shortest_path(G, source=0, target=N-1)\n except RuntimeError:\n continue\n except nx.NetworkXNoPath:\n pass\n # convert to plain ndarray\n nw1 = nx2nw(G)\n\n # copy and join network\n nw2 = serialCopy(nw1)\n\n # compute effective resistance\n ER1 = ResNetwork(\n nw1, silence_level=3).effective_resistance(0, len(nw1)-1)\n ER2 = ResNetwork(\n nw2, silence_level=3).effective_resistance(0, len(nw2)-1)\n\n # assertion\n # print(ER1*2-ER2)\n assert (ER1*2-ER2) < 1E-6\n","repo_name":"manmeet3591/python_class","sub_path":"pyunicorn/tests/test_core/TestResitiveNetwork-circuits.py","file_name":"TestResitiveNetwork-circuits.py","file_ext":"py","file_size_in_byte":6178,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"86"} +{"seq_id":"20293233238","text":"# Not my code used one member of Dicsord group #Python. Username @fake\n\nimport heapq\n\nwith open(\"data\") as f:\n cave = [list(map(int, line.strip())) for line in f.readlines()]\n height, width = len(cave), len(cave[0])\n\n\ndef in_cave(y, x, cave_size):\n return 0 <= y < height*cave_size and 0 <= x < width*cave_size\n\ndef risk_level(cave, y, x):\n #\n risk_level = cave[y%height][x%width] + x//height + y//width\n return risk_level % 9 or risk_level\n\ndef dijkstras(cave, cave_size=1):\n src, dest = (0, 0), (height*cave_size-1, width*cave_size-1)\n heap, visited = [(0,) + src], {src}\n while heap:\n risk, y, x = heapq.heappop(heap)\n if (y, x) == dest: return risk\n for dy, dx in [(y+1, x), (y, x+1), (y-1, x), (y, x-1)]:\n if (dy, dx) in visited or not in_cave(dy, dx, cave_size):\n continue\n r = risk_level(cave, dy, dx)\n heapq.heappush(heap, (risk+r, dy, dx))\n visited.add((dy, dx))\n\nprint(dijkstras(cave, cave_size=1))\nprint(dijkstras(cave, cave_size=5))\n\n### INTERESTING DIFFERNT APPROACH #####\n# def search(dest):\n# frontier = PriorityQueue()\n# frontier.put((0, (0, 0)))\n# dangers = {}\n# while not frontier.empty():\n# danger, curr = frontier.get()\n# if curr in dangers:\n# continue\n# dangers[curr] = danger\n# if curr == dest:\n# return danger\n#\n# for neighbor in neighbor_indexes(grid, curr):\n# x, y = neighbor\n# frontier.put((danger + grid[y][x], neighbor))\n# assert False\n","repo_name":"evaldasJankus/_CODING","sub_path":"PROGRAMING/PYTHON/python_advent_to_code/2021/Day15/chitons.py","file_name":"chitons.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"10155855199","text":"from pyomo.core import (Var, Set, Constraint, BuildAction, Expression,\n NonNegativeReals, Binary, NonNegativeIntegers)\nfrom pyomo.core.base.block import SimpleBlock\n\n\nclass Flow(SimpleBlock):\n r\"\"\" Flow block with definitions for standard flows.\n\n **The following variables are created**:\n\n negative_gradient :\n Difference of a flow in consecutive timesteps if flow is reduced\n indexed by NEGATIVE_GRADIENT_FLOWS, TIMESTEPS.\n\n positive_gradient :\n Difference of a flow in consecutive timesteps if flow is increased\n indexed by NEGATIVE_GRADIENT_FLOWS, TIMESTEPS.\n\n **The following sets are created:** (-> see basic sets at\n :class:`.Model` )\n\n SUMMED_MAX_FLOWS\n A set of flows with the attribute :attr:`summed_max` being not None.\n SUMMED_MIN_FLOWS\n A set of flows with the attribute :attr:`summed_min` being not None.\n NEGATIVE_GRADIENT_FLOWS\n A set of flows with the attribute :attr:`negative_gradient` being not\n None.\n POSITIVE_GRADIENT_FLOWS\n A set of flows with the attribute :attr:`positive_gradient` being not\n None\n INTEGER_FLOWS\n A set of flows wher the attribute :attr:`integer` is True (forces flow\n to only take integer values)\n\n **The following constraints are build:**\n\n Flow max sum :attr:`om.Flow.summed_max[i, o]`\n .. math::\n \\sum_t flow(i, o, t) \\cdot \\tau\n \\leq summed\\_max(i, o) \\cdot nominal\\_value(i, o), \\\\\n \\forall (i, o) \\in \\textrm{SUMMED\\_MAX\\_FLOWS}.\n\n Flow min sum :attr:`om.Flow.summed_min[i, o]`\n .. math::\n \\sum_t flow(i, o, t) \\cdot \\tau\n \\geq summed\\_min(i, o) \\cdot nominal\\_value(i, o), \\\\\n \\forall (i, o) \\in \\textrm{SUMMED\\_MIN\\_FLOWS}.\n\n Negative gradient constraint :attr:`om.Flow.negative_gradient_constr[i, o]`:\n .. math:: flow(i, o, t-1) - flow(i, o, t) \\geq \\\n negative\\_gradient(i, o, t), \\\\\n \\forall (i, o) \\in \\textrm{NEGATIVE\\_GRADIENT\\_FLOWS}, \\\\\n \\forall t \\in \\textrm{TIMESTEPS}.\n\n Positive gradient constraint :attr:`om.Flow.positive_gradient_constr[i, o]`:\n .. math:: flow(i, o, t) - flow(i, o, t-1) \\geq \\\n positive\\__gradient(i, o, t), \\\\\n \\forall (i, o) \\in \\textrm{POSITIVE\\_GRADIENT\\_FLOWS}, \\\\\n \\forall t \\in \\textrm{TIMESTEPS}.\n\n **The following parts of the objective function are created:**\n\n If :attr:`variable_costs` are set by the user:\n .. math::\n \\sum_{(i,o)} \\sum_t flow(i, o, t) \\cdot variable\\_costs(i, o, t)\n\n The expression can be accessed by :attr:`om.Flow.variable_costs` and\n their value after optimization by :meth:`om.Flow.variable_costs()` .\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def _create(self, group=None):\n r\"\"\" Creates sets, variables and constraints for all standard flows.\n\n Parameters\n ----------\n group : list\n List containing tuples containing flow (f) objects and the\n associated source (s) and target (t)\n of flow e.g. groups=[(s1, t1, f1), (s2, t2, f2),..]\n \"\"\"\n if group is None:\n return None\n\n m = self.parent_block()\n\n # ########################## SETS #################################\n # set for all flows with an global limit on the flow over time\n self.SUMMED_MAX_FLOWS = Set(initialize=[\n (g[0], g[1]) for g in group if g[2].summed_max is not None and\n g[2].nominal_value is not None])\n\n self.SUMMED_MIN_FLOWS = Set(initialize=[\n (g[0], g[1]) for g in group if g[2].summed_min is not None and\n g[2].nominal_value is not None])\n\n self.NEGATIVE_GRADIENT_FLOWS = Set(\n initialize=[(g[0], g[1]) for g in group\n if g[2].negative_gradient['ub'][0] is not None])\n\n self.POSITIVE_GRADIENT_FLOWS = Set(\n initialize=[(g[0], g[1]) for g in group\n if g[2].positive_gradient['ub'][0] is not None])\n\n self.INTEGER_FLOWS = Set(\n initialize=[(g[0], g[1]) for g in group\n if g[2].integer])\n # ######################### Variables ################################\n\n self.positive_gradient = Var(self.POSITIVE_GRADIENT_FLOWS,\n m.TIMESTEPS)\n\n self.negative_gradient = Var(self.NEGATIVE_GRADIENT_FLOWS,\n m.TIMESTEPS)\n\n self.integer_flow = Var(self.INTEGER_FLOWS,\n m.TIMESTEPS, within=NonNegativeIntegers)\n # set upper bound of gradient variable\n for i, o, f in group:\n if m.flows[i, o].positive_gradient['ub'][0] is not None:\n for t in m.TIMESTEPS:\n self.positive_gradient[i, o, t].setub(\n f.positive_gradient['ub'][t] * f.nominal_value)\n if m.flows[i, o].negative_gradient['ub'][0] is not None:\n for t in m.TIMESTEPS:\n self.negative_gradient[i, o, t].setub(\n f.negative_gradient['ub'][t] * f.nominal_value)\n\n # ######################### CONSTRAINTS ###############################\n\n def _flow_summed_max_rule(model):\n \"\"\"Rule definition for build action of max. sum flow constraint.\n \"\"\"\n for inp, out in self.SUMMED_MAX_FLOWS:\n lhs = sum(m.flow[inp, out, ts] * m.timeincrement[ts]\n for ts in m.TIMESTEPS)\n rhs = (m.flows[inp, out].summed_max *\n m.flows[inp, out].nominal_value)\n self.summed_max.add((inp, out), lhs <= rhs)\n self.summed_max = Constraint(self.SUMMED_MAX_FLOWS, noruleinit=True)\n self.summed_max_build = BuildAction(rule=_flow_summed_max_rule)\n\n def _flow_summed_min_rule(model):\n \"\"\"Rule definition for build action of min. sum flow constraint.\n \"\"\"\n for inp, out in self.SUMMED_MIN_FLOWS:\n lhs = sum(m.flow[inp, out, ts] * m.timeincrement[ts]\n for ts in m.TIMESTEPS)\n rhs = (m.flows[inp, out].summed_min *\n m.flows[inp, out].nominal_value)\n self.summed_min.add((inp, out), lhs >= rhs)\n self.summed_min = Constraint(self.SUMMED_MIN_FLOWS, noruleinit=True)\n self.summed_min_build = BuildAction(rule=_flow_summed_min_rule)\n\n def _positive_gradient_flow_rule(model):\n \"\"\"Rule definition for positive gradient constraint.\n \"\"\"\n for inp, out in self.POSITIVE_GRADIENT_FLOWS:\n for ts in m.TIMESTEPS:\n if ts > 0:\n lhs = m.flow[inp, out, ts] - m.flow[inp, out, ts-1]\n rhs = self.positive_gradient[inp, out, ts]\n self.positive_gradient_constr.add((inp, out, ts),\n lhs <= rhs)\n else:\n pass # return(Constraint.Skip)\n self.positive_gradient_constr = Constraint(\n self.POSITIVE_GRADIENT_FLOWS, m.TIMESTEPS, noruleinit=True)\n self.positive_gradient_build = BuildAction(\n rule=_positive_gradient_flow_rule)\n\n def _negative_gradient_flow_rule(model):\n \"\"\"Rule definition for negative gradient constraint.\n \"\"\"\n for inp, out in self.NEGATIVE_GRADIENT_FLOWS:\n for ts in m.TIMESTEPS:\n if ts > 0:\n lhs = m.flow[inp, out, ts-1] - m.flow[inp, out, ts]\n rhs = self.negative_gradient[inp, out, ts]\n self.negative_gradient_constr.add((inp, out, ts),\n lhs <= rhs)\n else:\n pass # return(Constraint.Skip)\n self.negative_gradient_constr = Constraint(\n self.NEGATIVE_GRADIENT_FLOWS, m.TIMESTEPS, noruleinit=True)\n self.negative_gradient_build = BuildAction(\n rule=_negative_gradient_flow_rule)\n\n def _integer_flow_rule(block, i, o, t):\n \"\"\"Force flow variable to NonNegativeInteger values.\n \"\"\"\n return self.integer_flow[i, o, t] == m.flow[i, o, t]\n\n self.integer_flow_constr = Constraint(self.INTEGER_FLOWS, m.TIMESTEPS,\n rule=_integer_flow_rule)\n\n def _objective_expression(self):\n r\"\"\" Objective expression for all standard flows with fixed costs\n and variable costs.\n \"\"\"\n m = self.parent_block()\n\n variable_costs = 0\n gradient_costs = 0\n\n for i, o in m.FLOWS:\n if m.flows[i, o].variable_costs[0] is not None:\n for t in m.TIMESTEPS:\n variable_costs += (m.flow[i, o, t] * m.objective_weighting[t] *\n m.flows[i, o].variable_costs[t])\n\n if m.flows[i, o].positive_gradient['ub'][0] is not None:\n for t in m.TIMESTEPS:\n gradient_costs += (self.positive_gradient[i, o, t] *\n m.flows[i, o].positive_gradient[\n 'costs'])\n\n if m.flows[i, o].negative_gradient['ub'][0] is not None:\n for t in m.TIMESTEPS:\n gradient_costs += (self.negative_gradient[i, o, t] *\n m.flows[i, o].negative_gradient[\n 'costs'])\n\n return variable_costs + gradient_costs\n\n\nclass InvestmentFlow(SimpleBlock):\n r\"\"\"Block for all flows with :attr:`investment` being not None.\n\n **The following sets are created:** (-> see basic sets at\n :class:`.Model` )\n\n FLOWS\n A set of flows with the attribute :attr:`invest` of type\n :class:`.options.Investment`.\n FIXED_FLOWS\n A set of flow with the attribute :attr:`fixed` set to `True`\n SUMMED_MAX_FLOWS\n A subset of set FLOWS with flows with the attribute :attr:`summed_max`\n being not None.\n SUMMED_MIN_FLOWS\n A subset of set FLOWS with flows with the attribute\n :attr:`summed_min` being not None.\n MIN_FLOWS\n A subset of FLOWS with flows having set a value of not None in the\n first timestep.\n\n **The following variables are created:**\n\n invest :attr:`om.InvestmentFlow.invest[i, o]`\n Value of the investment variable i.e. equivalent to the nominal\n value of the flows after optimization (indexed by FLOWS)\n\n **The following constraints are build:**\n\n Actual value constraint for fixed invest\n flows :attr:`om.InvestmentFlow.fixed[i, o, t]`\n .. math::\n flow(i, o, t) = actual\\_value(i, o, t) \\cdot invest(i, o), \\\\\n \\forall (i, o) \\in \\textrm{FIXED\\_FLOWS}, \\\\\n \\forall t \\in \\textrm{TIMESTEPS}.\n\n Lower bound (min) constraint for invest flows\n :attr:`om.InvestmentFlow.min[i, o, t]`\n .. math::\n flow(i, o, t) \\geq min(i, o, t) \\cdot invest(i, o), \\\\\n \\forall (i, o) \\in \\textrm{MIN\\_FLOWS}, \\\\\n \\forall t \\in \\textrm{TIMESTEPS}.\n\n Upper bound (max) constraint for invest flows\n :attr:`om.InvestmentFlow.max[i, o, t]`\n .. math::\n flow(i, o, t) \\leq max(i, o, t) \\cdot invest(i, o), \\\\\n \\forall (i, o) \\in \\textrm{FLOWS}, \\\\\n \\forall t \\in \\textrm{TIMESTEPS}.\n\n Flow max sum for invest flow\n :attr:`om.InvestmentFlow.summed_max[i, o]`\n .. math::\n \\sum_t flow(i, o, t) \\cdot \\tau \\leq summed\\_max(i, o) \\\n \\cdot invest(i, o) \\\\\n \\forall (i, o) \\in \\textrm{SUMMED\\_MAX\\_FLOWS}.\n\n Flow min sum for invest flow :attr:`om.InvestmentFlow.summed_min[i, o]`\n .. math::\n \\sum_t flow(i, o, t) \\cdot \\tau \\geq summed\\_min(i, o) \\\n \\cdot invest(i, o) \\\\\n \\forall (i, o) \\in \\textrm{SUMMED\\_MIN\\_FLOWS}.\n\n\n **The following parts of the objective function are created:**\n\n Equivalent periodical costs (epc) expression\n :attr:`om.InvestmentFlow.investment_costs`:\n .. math::\n \\sum_{i, o} invest(i, o) \\cdot ep\\_costs(i, o)\n\n The expression can be accessed by :attr:`om.InvestmentFlow.variable_costs`\n and their value after optimization by\n :meth:`om.InvestmentFlow.variable_costs()` . This works similar for\n investment costs with :attr:`*.investment_costs` etc.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def _create(self, group=None):\n r\"\"\"Creates sets, variables and constraints for Flow with investment\n attribute of type class:`.Investment`.\n\n Parameters\n ----------\n group : list\n List containing tuples containing flow (f) objects that have an\n attribute investment and the associated source (s) and target (t)\n of flow e.g. groups=[(s1, t1, f1), (s2, t2, f2),..]\n \"\"\"\n if group is None:\n return None\n\n m = self.parent_block()\n\n # ######################### SETS #####################################\n self.FLOWS = Set(initialize=[(g[0], g[1]) for g in group])\n\n self.FIXED_FLOWS = Set(\n initialize=[(g[0], g[1]) for g in group if g[2].fixed])\n\n self.SUMMED_MAX_FLOWS = Set(initialize=[\n (g[0], g[1]) for g in group if g[2].summed_max is not None])\n\n self.SUMMED_MIN_FLOWS = Set(initialize=[\n (g[0], g[1]) for g in group if g[2].summed_min is not None])\n\n self.MIN_FLOWS = Set(initialize=[\n (g[0], g[1]) for g in group if (\n g[2].min[0] != 0 or len(g[2].min) > 1)])\n\n # ######################### VARIABLES #################################\n def _investvar_bound_rule(block, i, o):\n \"\"\"Rule definition for bounds of invest variable.\n \"\"\"\n return (m.flows[i, o].investment.minimum,\n m.flows[i, o].investment.maximum)\n # create variable bounded for flows with investement attribute\n self.invest = Var(self.FLOWS, within=NonNegativeReals,\n bounds=_investvar_bound_rule)\n\n # ######################### CONSTRAINTS ###############################\n\n # TODO: Add gradient constraints\n\n def _investflow_fixed_rule(block, i, o, t):\n \"\"\"Rule definition of constraint to fix flow variable\n of investment flow to (normed) actual value\n \"\"\"\n return (m.flow[i, o, t] == (\n (m.flows[i, o].investment.existing + self.invest[i, o]) *\n m.flows[i, o].actual_value[t]))\n self.fixed = Constraint(self.FIXED_FLOWS, m.TIMESTEPS,\n rule=_investflow_fixed_rule)\n\n def _max_investflow_rule(block, i, o, t):\n \"\"\"Rule definition of constraint setting an upper bound of flow\n variable in investment case.\n \"\"\"\n expr = (m.flow[i, o, t] <= (\n (m.flows[i, o].investment.existing + self.invest[i, o]) *\n m.flows[i, o].max[t]))\n return expr\n self.max = Constraint(self.FLOWS, m.TIMESTEPS,\n rule=_max_investflow_rule)\n\n def _min_investflow_rule(block, i, o, t):\n \"\"\"Rule definition of constraint setting a lower bound on flow\n variable in investment case.\n \"\"\"\n expr = (m.flow[i, o, t] >= (\n (m.flows[i, o].investment.existing + self.invest[i, o]) *\n m.flows[i, o].min[t]))\n return expr\n self.min = Constraint(self.MIN_FLOWS, m.TIMESTEPS,\n rule=_min_investflow_rule)\n\n def _summed_max_investflow_rule(block, i, o):\n \"\"\"Rule definition for build action of max. sum flow constraint\n in investment case.\n \"\"\"\n expr = (sum(m.flow[i, o, t] * m.timeincrement[t]\n for t in m.TIMESTEPS) <=\n m.flows[i, o].summed_max * (\n self.invest[i, o] + m.flows[i, o].investment.existing))\n return expr\n self.summed_max = Constraint(self.SUMMED_MAX_FLOWS,\n rule=_summed_max_investflow_rule)\n\n def _summed_min_investflow_rule(block, i, o):\n \"\"\"Rule definition for build action of min. sum flow constraint\n in investment case.\n \"\"\"\n expr = (sum(m.flow[i, o, t] * m.timeincrement[t]\n for t in m.TIMESTEPS) >=\n ((m.flows[i, o].investment.existing + self.invest[i, o]) *\n m.flows[i, o].summed_min))\n return expr\n self.summed_min = Constraint(self.SUMMED_MIN_FLOWS,\n rule=_summed_min_investflow_rule)\n\n def _objective_expression(self):\n r\"\"\" Objective expression for flows with investment attribute of type\n class:`.Investment`. The returned costs are fixed, variable and\n investment costs.\n \"\"\"\n if not hasattr(self, 'FLOWS'):\n return 0\n\n m = self.parent_block()\n investment_costs = 0\n\n for i, o in self.FLOWS:\n if m.flows[i, o].investment.ep_costs is not None:\n investment_costs += (self.invest[i, o] *\n m.flows[i, o].investment.ep_costs)\n else:\n raise ValueError(\"Missing value for investment costs!\")\n\n self.investment_costs = Expression(expr=investment_costs)\n return investment_costs\n\n\nclass Bus(SimpleBlock):\n r\"\"\"Block for all balanced buses.\n\n **The following constraints are build:**\n\n Bus balance :attr:`om.Bus.balance[i, o, t]`\n .. math::\n \\sum_{i \\in INPUTS(n)} flow(i, n, t) =\n \\sum_{o \\in OUTPUTS(n)} flow(n, o, t), \\\\\n \\forall n \\in \\textrm{BUSES},\n \\forall t \\in \\textrm{TIMESTEPS}.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def _create(self, group=None):\n \"\"\"Creates the balance constraints for the class:`Bus` block.\n\n Parameters\n ----------\n group : list\n List of oemof bus (b) object for which the bus balance is created\n e.g. group = [b1, b2, b3, .....]\n \"\"\"\n if group is None:\n return None\n\n m = self.parent_block()\n\n I = {}\n O = {}\n for n in group:\n I[n] = [i for i in n.inputs]\n O[n] = [o for o in n.outputs]\n\n def _busbalance_rule(block):\n for t in m.TIMESTEPS:\n for n in group:\n lhs = sum(m.flow[i, n, t] for i in I[n])\n rhs = sum(m.flow[n, o, t] for o in O[n])\n expr = (lhs == rhs)\n # no inflows no outflows yield: 0 == 0 which is True\n if expr is not True:\n block.balance.add((n, t), expr)\n self.balance = Constraint(group, m.TIMESTEPS, noruleinit=True)\n self.balance_build = BuildAction(rule=_busbalance_rule)\n\n\nclass Transformer(SimpleBlock):\n r\"\"\"Block for the linear relation of nodes with type\n :class:`~oemof.solph.network.Transformer`\n\n **The following sets are created:** (-> see basic sets at\n :class:`.Model` )\n\n TRANSFORMERS\n A set with all :class:`~oemof.solph.network.Transformer` objects.\n\n **The following constraints are created:**\n\n Linear relation :attr:`om.Transformer.relation[i,o,t]`\n .. math::\n flow(i, n, t) / conversion\\_factor(n, i, t) = \\\n flow(n, o, t) / conversion\\_factor(n, o, t), \\\\\n \\forall t \\in \\textrm{TIMESTEPS}, \\\\\n \\forall n \\in \\textrm{TRANSFORMERS}, \\\\\n \\forall i \\in \\textrm{INPUTS(n)}, \\\\\n \\forall o \\in \\textrm{OUTPUTS(n)}.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def _create(self, group=None):\n \"\"\" Creates the linear constraint for the class:`Transformer`\n block.\n Parameters\n ----------\n group : list\n List of oemof.solph.Transformers objects for which\n the linear relation of inputs and outputs is created\n e.g. group = [trsf1, trsf2, trsf3, ...]. Note that the relation\n is created for all existing relations of all inputs and all outputs\n of the transformer. The components inside the list need to hold\n an attribute `conversion_factors` of type dict containing the\n conversion factors for all inputs to outputs.\n \"\"\"\n if group is None:\n return None\n\n m = self.parent_block()\n\n in_flows = {n: [i for i in n.inputs.keys()] for n in group}\n out_flows = {n: [o for o in n.outputs.keys()] for n in group}\n\n self.relation = Constraint(\n [(n, i, o, t)\n for t in m.TIMESTEPS\n for n in group\n for o in out_flows[n]\n for i in in_flows[n]], noruleinit=True)\n\n def _input_output_relation(block):\n for t in m.TIMESTEPS:\n for n in group:\n for o in out_flows[n]:\n for i in in_flows[n]:\n try:\n lhs = (m.flow[i, n, t] /\n n.conversion_factors[i][t])\n rhs = (m.flow[n, o, t] /\n n.conversion_factors[o][t])\n except ValueError:\n raise ValueError(\n \"Error in constraint creation\",\n \"source: {0}, target: {1}\".format(\n n.label, o.label))\n block.relation.add((n, i, o, t), (lhs == rhs))\n self.relation_build = BuildAction(rule=_input_output_relation)\n\n\nclass NonConvexFlow(SimpleBlock):\n r\"\"\"\n **The following sets are created:** (-> see basic sets at\n :class:`.Model` )\n\n A set of flows with the attribute :attr:`nonconvex` of type\n :class:`.options.NonConvex`.\n MIN_FLOWS\n A subset of set NONCONVEX_FLOWS with the attribute :attr:`min`\n being not None in the first timestep.\n ACTIVITYCOSTFLOWS\n A subset of set NONCONVEX_FLOWS with the attribute\n :attr:`activity_costs` being not None.\n STARTUPFLOWS\n A subset of set NONCONVEX_FLOWS with the attribute\n :attr:`maximum_startups` or :attr:`startup_costs`\n being not None.\n MAXSTARTUPFLOWS\n A subset of set STARTUPFLOWS with the attribute\n :attr:`maximum_startups` being not None.\n SHUTDOWNFLOWS\n A subset of set NONCONVEX_FLOWS with the attribute\n :attr:`maximum_shutdowns` or :attr:`shutdown_costs`\n being not None.\n MAXSHUTDOWNFLOWS\n A subset of set SHUTDOWNFLOWS with the attribute\n :attr:`maximum_shutdowns` being not None.\n MINUPTIMEFLOWS\n A subset of set NONCONVEX_FLOWS with the attribute\n :attr:`minimum_uptime` being not None.\n MINDOWNTIMEFLOWS\n A subset of set NONCONVEX_FLOWS with the attribute\n :attr:`minimum_downtime` being not None.\n\n **The following variables are created:**\n\n Status variable (binary) :attr:`om.NonConvexFlow.status`:\n Variable indicating if flow is >= 0 indexed by FLOWS\n\n Startup variable (binary) :attr:`om.NonConvexFlow.startup`:\n Variable indicating startup of flow (component) indexed by\n STARTUPFLOWS\n\n Shutdown variable (binary) :attr:`om.NonConvexFlow.shutdown`:\n Variable indicating shutdown of flow (component) indexed by\n SHUTDOWNFLOWS\n\n **The following constraints are created**:\n\n Minimum flow constraint :attr:`om.NonConvexFlow.min[i,o,t]`\n .. math::\n flow(i, o, t) \\geq min(i, o, t) \\cdot nominal\\_value \\\n \\cdot status(i, o, t), \\\\\n \\forall t \\in \\textrm{TIMESTEPS}, \\\\\n \\forall (i, o) \\in \\textrm{NONCONVEX\\_FLOWS}.\n\n Maximum flow constraint :attr:`om.NonConvexFlow.max[i,o,t]`\n .. math::\n flow(i, o, t) \\leq max(i, o, t) \\cdot nominal\\_value \\\n \\cdot status(i, o, t), \\\\\n \\forall t \\in \\textrm{TIMESTEPS}, \\\\\n \\forall (i, o) \\in \\textrm{NONCONVEX\\_FLOWS}.\n\n Startup constraint :attr:`om.NonConvexFlow.startup_constr[i,o,t]`\n .. math::\n startup(i, o, t) \\geq \\\n status(i,o,t) - status(i, o, t-1) \\\\\n \\forall t \\in \\textrm{TIMESTEPS}, \\\\\n \\forall (i,o) \\in \\textrm{STARTUPFLOWS}.\n\n Maximum startups constraint\n :attr:`om.NonConvexFlow.max_startup_constr[i,o,t]`\n .. math::\n \\sum_{t \\in \\textrm{TIMESTEPS}} startup(i, o, t) \\leq \\\n N_{start}(i,o)\n \\forall (i,o) \\in \\textrm{MAXSTARTUPFLOWS}.\n\n Shutdown constraint :attr:`om.NonConvexFlow.shutdown_constr[i,o,t]`\n .. math::\n shutdown(i, o, t) \\geq \\\n status(i, o, t-1) - status(i, o, t) \\\\\n \\forall t \\in \\textrm{TIMESTEPS}, \\\\\n \\forall (i, o) \\in \\textrm{SHUTDOWNFLOWS}.\n\n Maximum shutdowns constraint\n :attr:`om.NonConvexFlow.max_startup_constr[i,o,t]`\n .. math::\n \\sum_{t \\in \\textrm{TIMESTEPS}} startup(i, o, t) \\leq \\\n N_{shutdown}(i,o)\n \\forall (i,o) \\in \\textrm{MAXSHUTDOWNFLOWS}.\n\n Minimum uptime constraint :attr:`om.NonConvexFlow.uptime_constr[i,o,t]`\n .. math::\n (status(i, o, t)-status(i, o, t-1)) \\cdot minimum\\_uptime(i, o) \\\\\n \\leq \\sum_{n=0}^{minimum\\_uptime-1} status(i,o,t+n) \\\\\n \\forall t \\in \\textrm{TIMESTEPS} | \\\\\n t \\neq \\{0..minimum\\_uptime\\} \\cup \\\n \\{t\\_max-minimum\\_uptime..t\\_max\\} , \\\\\n \\forall (i,o) \\in \\textrm{MINUPTIMEFLOWS}.\n \\\\ \\\\\n status(i, o, t) = initial\\_status(i, o) \\\\\n \\forall t \\in \\textrm{TIMESTEPS} | \\\\\n t = \\{0..minimum\\_uptime\\} \\cup \\\n \\{t\\_max-minimum\\_uptime..t\\_max\\} , \\\\\n \\forall (i,o) \\in \\textrm{MINUPTIMEFLOWS}.\n\n Minimum downtime constraint :attr:`om.NonConvexFlow.downtime_constr[i,o,t]`\n .. math::\n (status(i, o, t-1)-status(i, o, t)) \\\n \\cdot minimum\\_downtime(i, o) \\\\\n \\leq minimum\\_downtime(i, o) \\\n - \\sum_{n=0}^{minimum\\_downtime-1} status(i,o,t+n) \\\\\n \\forall t \\in \\textrm{TIMESTEPS} | \\\\\n t \\neq \\{0..minimum\\_downtime\\} \\cup \\\n \\{t\\_max-minimum\\_downtime..t\\_max\\} , \\\\\n \\forall (i,o) \\in \\textrm{MINDOWNTIMEFLOWS}.\n \\\\ \\\\\n status(i, o, t) = initial\\_status(i, o) \\\\\n \\forall t \\in \\textrm{TIMESTEPS} | \\\\\n t = \\{0..minimum\\_downtime\\} \\cup \\\n \\{t\\_max-minimum\\_downtime..t\\_max\\} , \\\\\n \\forall (i,o) \\in \\textrm{MINDOWNTIMEFLOWS}.\n\n **The following parts of the objective function are created:**\n\n If :attr:`nonconvex.startup_costs` is set by the user:\n .. math::\n \\sum_{i, o \\in STARTUPFLOWS} \\sum_t startup(i, o, t) \\\n \\cdot startup\\_costs(i, o)\n\n If :attr:`nonconvex.shutdown_costs` is set by the user:\n .. math::\n \\sum_{i, o \\in SHUTDOWNFLOWS} \\sum_t shutdown(i, o, t) \\\n \\cdot shutdown\\_costs(i, o)\n\n If :attr:`nonconvex.activity_costs` is set by the user:\n .. math::\n \\sum_{i, o \\in ACTIVITYCOSTFLOWS} \\sum_t status(i, o, t) \\\n \\cdot activity\\_costs(i, o)\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def _create(self, group=None):\n \"\"\" Creates set, variables, constraints for all flow object with\n an attribute flow of type class:`.NonConvexFlow`.\n\n Parameters\n ----------\n group : list\n List of oemof.solph.NonConvexFlow objects for which\n the constraints are build.\n \"\"\"\n if group is None:\n return None\n\n m = self.parent_block()\n # ########################## SETS #####################################\n self.NONCONVEX_FLOWS = Set(initialize=[(g[0], g[1]) for g in group])\n\n self.MIN_FLOWS = Set(initialize=[(g[0], g[1]) for g in group\n if g[2].min[0] is not None])\n self.STARTUPFLOWS = Set(initialize=[(g[0], g[1]) for g in group\n if g[2].nonconvex.startup_costs[0]\n is not None\n or g[2].nonconvex.maximum_startups\n is not None])\n self.MAXSTARTUPFLOWS = Set(initialize=[(g[0], g[1]) for g in group\n if g[2].nonconvex.maximum_startups\n is not None])\n self.SHUTDOWNFLOWS = Set(initialize=[(g[0], g[1]) for g in group\n if g[2].nonconvex.shutdown_costs[0]\n is not None\n or g[2].nonconvex.maximum_shutdowns\n is not None])\n self.MAXSHUTDOWNFLOWS = Set(initialize=[(g[0], g[1]) for g in group\n if g[2].nonconvex.maximum_shutdowns\n is not None])\n self.MINUPTIMEFLOWS = Set(initialize=[(g[0], g[1]) for g in group\n if g[2].nonconvex.minimum_uptime\n is not None])\n\n self.MINDOWNTIMEFLOWS = Set(initialize=[(g[0], g[1]) for g in group\n if g[2].nonconvex.minimum_downtime\n is not None])\n\n self.ACTIVITYCOSTFLOWS = Set(\n initialize=[(g[0], g[1]) for g in group\n if g[2].nonconvex.activity_costs[0] is not None])\n\n # ################### VARIABLES AND CONSTRAINTS #######################\n self.status = Var(self.NONCONVEX_FLOWS, m.TIMESTEPS, within=Binary)\n\n if self.STARTUPFLOWS:\n self.startup = Var(self.STARTUPFLOWS, m.TIMESTEPS, within=Binary)\n\n if self.SHUTDOWNFLOWS:\n self.shutdown = Var(self.SHUTDOWNFLOWS, m.TIMESTEPS, within=Binary)\n\n def _minimum_flow_rule(block, i, o, t):\n \"\"\"Rule definition for MILP minimum flow constraints.\n \"\"\"\n expr = (self.status[i, o, t] *\n m.flows[i, o].min[t] * m.flows[i, o].nominal_value <=\n m.flow[i, o, t])\n return expr\n self.min = Constraint(self.MIN_FLOWS, m.TIMESTEPS,\n rule=_minimum_flow_rule)\n\n def _maximum_flow_rule(block, i, o, t):\n \"\"\"Rule definition for MILP maximum flow constraints.\n \"\"\"\n expr = (self.status[i, o, t] *\n m.flows[i, o].max[t] * m.flows[i, o].nominal_value >=\n m.flow[i, o, t])\n return expr\n self.max = Constraint(self.MIN_FLOWS, m.TIMESTEPS,\n rule=_maximum_flow_rule)\n\n def _startup_rule(block, i, o, t):\n \"\"\"Rule definition for startup constraint of nonconvex flows.\n \"\"\"\n if t > m.TIMESTEPS[1]:\n expr = (self.startup[i, o, t] >= self.status[i, o, t] -\n self.status[i, o, t-1])\n else:\n expr = (self.startup[i, o, t] >= self.status[i, o, t] -\n m.flows[i, o].nonconvex.initial_status)\n return expr\n self.startup_constr = Constraint(self.STARTUPFLOWS, m.TIMESTEPS,\n rule=_startup_rule)\n\n def _max_startup_rule(block, i, o):\n \"\"\"Rule definition for maximum number of start-ups.\n \"\"\"\n lhs = sum(self.startup[i, o, t] for t in m.TIMESTEPS)\n return lhs <= m.flows[i, o].nonconvex.maximum_startups\n self.max_startup_constr = Constraint(self.MAXSTARTUPFLOWS,\n rule=_max_startup_rule)\n\n def _shutdown_rule(block, i, o, t):\n \"\"\"Rule definition for shutdown constraints of nonconvex flows.\n \"\"\"\n if t > m.TIMESTEPS[1]:\n expr = (self.shutdown[i, o, t] >= self.status[i, o, t-1] -\n self.status[i, o, t])\n else:\n expr = (self.shutdown[i, o, t] >=\n m.flows[i, o].nonconvex.initial_status -\n self.status[i, o, t])\n return expr\n self.shutdown_constr = Constraint(self.SHUTDOWNFLOWS, m.TIMESTEPS,\n rule=_shutdown_rule)\n\n def _max_shutdown_rule(block, i, o):\n \"\"\"Rule definition for maximum number of start-ups.\n \"\"\"\n lhs = sum(self.shutdown[i, o, t] for t in m.TIMESTEPS)\n return lhs <= m.flows[i, o].nonconvex.maximum_shutdowns\n self.max_shutdown_constr = Constraint(self.MAXSHUTDOWNFLOWS,\n rule=_max_shutdown_rule)\n\n def _min_uptime_rule(block, i, o, t):\n \"\"\"Rule definition for min-uptime constraints of nonconvex flows.\n \"\"\"\n if m.flows[i, o].nonconvex.max_up_down <= t\\\n <= m.TIMESTEPS[-1]-m.flows[i, o].nonconvex.max_up_down:\n expr = 0\n expr += ((self.status[i, o, t]-self.status[i, o, t-1]) *\n m.flows[i, o].nonconvex.minimum_uptime)\n expr += -sum(self.status[i, o, t+u] for u in range(0,\n m.flows[i, o].nonconvex.minimum_uptime))\n return expr <= 0\n else:\n expr = 0\n expr += self.status[i, o, t]\n expr += -m.flows[i, o].nonconvex.initial_status\n return expr == 0\n self.min_uptime_constr = Constraint(\n self.MINUPTIMEFLOWS, m.TIMESTEPS, rule=_min_uptime_rule)\n\n def _min_downtime_rule(block, i, o, t):\n \"\"\"Rule definition for min-downtime constraints of nonconvex flows.\n \"\"\"\n if m.flows[i, o].nonconvex.max_up_down <= t\\\n <= m.TIMESTEPS[-1]-m.flows[i, o].nonconvex.max_up_down:\n expr = 0\n expr += ((self.status[i, o, t-1]-self.status[i, o, t]) *\n m.flows[i, o].nonconvex.minimum_downtime)\n expr += - m.flows[i, o].nonconvex.minimum_downtime\n expr += sum(self.status[i, o, t+d] for d in range(0,\n m.flows[i, o].nonconvex.minimum_downtime))\n return expr <= 0\n else:\n expr = 0\n expr += self.status[i, o, t]\n expr += -m.flows[i, o].nonconvex.initial_status\n return expr == 0\n self.min_downtime_constr = Constraint(\n self.MINDOWNTIMEFLOWS, m.TIMESTEPS, rule=_min_downtime_rule)\n\n # TODO: Add gradient constraints for nonconvex block / flows\n\n def _objective_expression(self):\n r\"\"\"Objective expression for nonconvex flows.\n \"\"\"\n if not hasattr(self, 'NONCONVEX_FLOWS'):\n return 0\n\n m = self.parent_block()\n\n startup_costs = 0\n shutdown_costs = 0\n activity_costs = 0\n\n if self.STARTUPFLOWS:\n for i, o in self.STARTUPFLOWS:\n if m.flows[i, o].nonconvex.startup_costs[0] is not None:\n startup_costs += sum(\n self.startup[i, o, t] *\n m.flows[i, o].nonconvex.startup_costs[t]\n for t in m.TIMESTEPS)\n self.startup_costs = Expression(expr=startup_costs)\n\n if self.SHUTDOWNFLOWS:\n for i, o in self.SHUTDOWNFLOWS:\n if m.flows[i, o].nonconvex.shutdown_costs[0] is not None:\n shutdown_costs += sum(\n self.shutdown[i, o, t] *\n m.flows[i, o].nonconvex.shutdown_costs[t]\n for t in m.TIMESTEPS)\n self.shutdown_costs = Expression(expr=shutdown_costs)\n\n if self.ACTIVITYCOSTFLOWS:\n for i, o in self.ACTIVITYCOSTFLOWS:\n if m.flows[i, o].nonconvex.activity_costs[0] is not None:\n activity_costs += sum(\n self.status[i, o, t] *\n m.flows[i, o].nonconvex.activity_costs[t]\n for t in m.TIMESTEPS)\n\n self.activity_costs = Expression(expr=activity_costs)\n\n return startup_costs + shutdown_costs + activity_costs\n","repo_name":"ChillkroeteTTS/oemof","sub_path":"oemof/solph/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":37233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"86"} +{"seq_id":"27392282998","text":"print(\"Welcome to the Love Calculator!\")\r\nname1 = input(\"What is your name? \\n\")\r\nname2 = input(\"What is their name? \\n\")\r\n\r\n# declaring variables for counting the occurence of true and love\r\nname1_value=0\r\nname2_value=0\r\ntrue_value=0\r\nlove_value=0\r\n\r\n#finding the sum of the letters of true in both names\r\nfor letter in \"true\":\r\n name1_value += name1.lower().count(letter)\r\n name2_value += name2.lower().count(letter)\r\ntrue_value = name1_value + name2_value\r\n\r\nname1_value=0\r\nname2_value=0\r\n#finding the sum of the letters of love in both names\r\nfor letter in \"love\":\r\n name1_value += name1.lower().count(letter)\r\n name2_value += name2.lower().count(letter)\r\nlove_value = name1_value + name2_value\r\n\r\n\r\nscore = int(str(true_value)+str(love_value))\r\n# printing out the score\r\nif score < 10 or score > 90:\r\n print(f\"Your score is {score}, you go together like coke and mentos.\")\r\nelif score > 40 and score < 50:\r\n print(f\"Your score is {score}, you are alright together.\")\r\nelse:\r\n print(f\"Your score is {score}.\")\r\n\r\nprint(true_value)\r\nprint(love_value)\r\n\r\n","repo_name":"Hyper-9O9/100-Days-Of-Python","sub_path":"Day3_exercises/Day3_Love_Calculator.py","file_name":"Day3_Love_Calculator.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"40288073059","text":"from math import *\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom scara.scara_library import *\n\n\ndef tf_forward_kinematics(l1, l2, q1, q2):\n x2 = l1 * tf.cos(q1) + l2 * tf.cos(q1 + q2)\n y2 = l1 * tf.sin(q1) + l2 * tf.sin(q1 + q2)\n return x2, y2\n\n\nclass Scara:\n def __init__(self, l1, l2, q_start, q_end, x0=0, y0=0):\n self.l1 = l1 # mm\n self.l2 = l2 # mm\n self.q_start = q_start # rad\n self.q_end = q_end # rad\n self.q1 = None # rad\n self.q2 = None # rad\n self.x0 = x0\n self.y0 = y0\n self.epsilon_high = 0.03 * pi\n self.epsilon_low = -self.epsilon_high\n\n def forward_kinematics(self):\n x2 = self.x0 + self.l1 * np.cos(self.q1) + self.l2 * np.cos(self.q1 + self.q2)\n y2 = self.y0 + self.l1 * np.sin(self.q1) + self.l2 * np.sin(self.q1 + self.q2)\n return x2, y2\n\n def build_random_data(self, num_samples):\n self.q1 = np.random.uniform(\n low=self.q_start, high=self.q_end, size=(num_samples, 1)\n )\n self.q2 = np.random.uniform(\n low=self.q_start, high=self.q_end, size=(num_samples, 1)\n )\n epsilon1 = np.random.uniform(\n low=self.epsilon_low, high=self.epsilon_high, size=(num_samples, 1)\n )\n epsilon2 = np.random.uniform(\n low=self.epsilon_low, high=self.epsilon_high, size=(num_samples, 1)\n )\n self.q1_curr = np.clip(self.q1 + epsilon1, self.q_start, self.q_end)\n self.q2_curr = np.clip(self.q2 + epsilon2, self.q_start, self.q_end)\n x, y = self.forward_kinematics()\n outputs = np.concatenate((self.q1, self.q2), axis=1)\n inputs = np.concatenate((x, y, self.q1_curr, self.q2_curr), axis=1)\n return inputs, outputs\n\n def build_simple_continuous_data(self, step):\n self.q1 = np.arange(self.q_start, self.q_end + step, step)\n self.q2 = np.arange(self.q_start, self.q_end + step, step)\n self.q1_curr = np.insert(\n self.q1[:-1], 0, 0\n ) # select all elements except the last one, insert zero at the 1st position\n self.q2_curr = np.insert(self.q2[:-1], 0, 0)\n x, y = self.forward_kinematics()\n outputs = np.column_stack((self.q1, self.q2))\n inputs = np.column_stack((x, y, self.q1_curr, self.q2_curr))\n return inputs, outputs\n\n def build_continuous_data(self, step):\n q1 = np.arange(self.q_start, self.q_end + step, step)\n q2 = np.arange(self.q_start, self.q_end + step, step)\n self.q2, self.q1 = np.meshgrid(q2, q1)\n self.q1, self.q2 = self.q1.ravel(), self.q2.ravel()\n self.q1_curr = np.insert(self.q1[:-1], 0, 0)\n self.q2_curr = np.insert(self.q2[:-1], 0, 0)\n x, y = self.forward_kinematics()\n outputs = np.column_stack((self.q1, self.q2))\n inputs = np.column_stack((x, y, self.q1_curr, self.q2_curr))\n return inputs, outputs\n\n def generate_random_data(self, batch_size):\n while True:\n self.q1 = np.random.uniform(\n low=self.q_start, high=self.q_end, size=(batch_size, 1)\n )\n self.q2 = np.random.uniform(\n low=self.q_start, high=self.q_end, size=(batch_size, 1)\n )\n epsilon1 = np.random.uniform(\n low=self.epsilon_low, high=self.epsilon_high, size=(batch_size, 1)\n )\n epsilon2 = np.random.uniform(\n low=self.epsilon_low, high=self.epsilon_high, size=(batch_size, 1)\n )\n self.q1_curr = np.clip(self.q1 + epsilon1, self.q_start, self.q_end)\n self.q2_curr = np.clip(self.q2 + epsilon2, self.q_start, self.q_end)\n x, y = self.forward_kinematics()\n inputs = np.column_stack((x, y, self.q1_curr, self.q2_curr))\n outputs = np.column_stack((self.q1, self.q2))\n yield inputs, outputs\n\n def get_physics_loss(self, y_true, y_pred):\n q1_true, q2_true, q1_pred, q2_pred = (\n y_true[:, 0],\n y_true[:, 1],\n y_pred[:, 0],\n y_pred[:, 1],\n )\n x2_true, y2_true = tf_forward_kinematics(self.l1, self.l2, q1_true, q2_true)\n x2_pred, y2_pred = tf_forward_kinematics(self.l1, self.l2, q1_pred, q2_pred)\n loss = tf.reduce_mean(\n tf.square(x2_true - x2_pred) + tf.square(y2_true - y2_pred)\n )\n return loss\n\n def get_BC_loss(self, y_true, y_pred):\n q1_pred, q2_pred = y_pred[:, 0], y_pred[:, 1]\n loss = tf.reduce_mean(\n tf.square(tf.maximum(self.q_start - q1_pred, 0.0))\n + tf.square(tf.maximum(self.q_start - q2_pred, 0.0))\n + tf.square(tf.maximum(q1_pred - self.q_end, 0.0))\n + tf.square(tf.maximum(q2_pred - self.q_end, 0.0))\n )\n return loss\n\n def get_data_loss(self, y_true, y_pred):\n q1_true, q2_true, q1_pred, q2_pred = (\n y_true[:, 0],\n y_true[:, 1],\n y_pred[:, 0],\n y_pred[:, 1],\n )\n loss = tf.reduce_mean(\n tf.square(q1_true - q1_pred) + tf.square(q2_true - q2_pred)\n )\n return loss\n\n def get_total_loss(self, y_true, y_pred):\n data_loss = self.get_data_loss(y_true, y_pred)\n physics_loss = self.get_physics_loss(y_true, y_pred)\n BC_loss = self.get_BC_loss(y_true, y_pred)\n return (\n data_loss + physics_loss + 1e10 * BC_loss\n ) # normalization:(x-x_min)/(x_max-x_min)\n","repo_name":"nguyenngocvy1/Thesis","sub_path":"Article/scara/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"25577673621","text":"# US\nfrom flask import Flask, request, jsonify\nfrom socket import *\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello world!'\n\n\n@app.route('/fibonacci', methods=['GET'])\ndef read_request():\n hostname = request.args.get('hostname')\n fs_port = request.args.get('fs_port')\n num = request.args.get('number')\n as_ip = request.args.get('as_ip')\n as_port = request.args.get('as_port')\n\n # 400\n if not(hostname) or not(fs_port) or not(num) or not(as_ip) or not(as_port):\n return jsonify(\"Parameters missing.\"), 400\n # 200\n else:\n client_socket = socket(AF_INET, SOCK_DGRAM)\n message = \"TYPE={}\\nNAME={}\\n\".format('A', hostname)\n client_socket.sendto(message.encode(), (as_ip, int(as_port)))\n temp_message, server = client_socket.recvfrom(2048)\n client_socket.close()\n temp_message = temp_message.decode()\n temp_str = temp_message.split('\\n')\n name = temp_str[1].split('=')[1]\n value = temp_str[2].split('=')[1]\n # Create a GET Request\n url = \"http://{}:{}/fibonacci?number={}\".format(\n value, fs_port, num)\n return jsonify(url.json()), 200\n\n\napp.run(host='0.0.0.0',\n port=8080,\n debug=True)\n","repo_name":"gdrdrdr/dcn_spring2022_1","sub_path":"dns_app/US/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"7540785151","text":"__author__ = 'Frederic Escudie'\n__copyright__ = 'Copyright (C) 2018 IUCT-O'\n__license__ = 'GNU General Public License'\n__version__ = '1.0.0'\n__email__ = 'escudie.frederic@iuct-oncopole.fr'\n__status__ = 'prod'\n\nimport argparse\nfrom copy import deepcopy\nfrom anacore.bed import BEDIO\nfrom anacore.msi import Status, LocusResPairsCombi, MSILocus, MSIReport\nfrom anacore.msiannot import getLocusAnnotDict, addLociResToSpl, MSIAnnot\n\n\n########################################################################\n#\n# FUNCTIONS\n#\n########################################################################\ndef getAggregatedSpl(in_reports):\n \"\"\"\n Return one list of MSISample from several MSReport.\n\n :param in_reports: Pathes to the MSIReport files.\n :type in_reports: list of MSIReport\n :return: List of MSISample.\n :rtype: list\n \"\"\"\n aggregated_spl = []\n for curr_report in in_reports:\n msi_samples = MSIReport.parse(curr_report)\n for curr_spl in msi_samples:\n aggregated_spl.append(curr_spl)\n return aggregated_spl\n\n\ndef writeStatusMetrics(msi_samples, result_id, out_summary):\n \"\"\"\n Write the statistics of status by loci in population of samples.\n\n :param msi_samples: The samples processed.\n :type msi_samples: list of MSISample\n :param result_id: Only the results of this methd are processed.\n :type result_id: str\n :param out_summary: Path to the output file.\n :type out_summary: str\n \"\"\"\n status_by_locus = dict()\n locus_name_by_id = dict()\n authorized_status = Status.authorizedValues()\n # Get number of samples by status for each locus\n for spl in msi_samples:\n for locus_id, locus in spl.loci.items():\n locus_name_by_id[locus_id] = locus.name\n if locus_id not in status_by_locus:\n status_by_locus[locus_id] = {status: 0 for status in authorized_status} # init each status for the current locus\n if result_id in locus.results:\n status = locus.results[result_id].status\n status_by_locus[locus_id][status] += 1\n # Write results\n with open(out_summary, \"w\") as FH_out:\n FH_out.write(\"Nb retained samples: {}\\n\".format(len(msi_samples)))\n print(\n \"Locus_position\", \"Locus_name\", \"\\t\".join([str(status) for status in authorized_status]),\n sep=\"\\t\",\n file=FH_out\n )\n for locus_id, locus_name in locus_name_by_id.items():\n print(\n locus_id,\n locus_name,\n \"\\t\".join(\n [str(status_by_locus[locus_id][status]) for status in authorized_status]\n ),\n sep=\"\\t\",\n file=FH_out\n )\n\n\ndef populateLoci(msi_samples, ref_loci):\n \"\"\"\n Add loci if they are missing in sample.\n\n :param msi_samples: The samples to populate.\n :type msi_samples: list of MSISample\n :param ref_loci: The loci to add if they are missing in samples.\n :type ref_loci: str\n \"\"\"\n for spl in msi_samples:\n for ref_locus in ref_loci:\n if ref_locus.position not in spl.loci:\n spl.addLocus(deepcopy(ref_locus))\n\n\ndef pruneResults(msi_samples, result_id, min_support_fragments):\n \"\"\"\n Remove LocusRes where the status is not determined (none or undetermined)\n and/or where the number of fragment used to determine status is lower than\n min_support_fragments.\n\n :param msi_samples: The pruned samples.\n :type msi_samples: list of MSISample\n :param result_id: The method on which the filters are applied.\n :type result_id: str\n :param min_support_fragments: The minimum number of fragments to keep a LocusRes in data.\n :type min_support_fragments: int\n \"\"\"\n removed_spl_idx = list()\n for spl_idx, spl in enumerate(msi_samples):\n nb_results = 0\n for locus_id, msi_locus in spl.loci.items():\n if result_id in msi_locus.results:\n if msi_locus.results[result_id].status not in [Status.stable, Status.unstable]:\n msi_locus.delResult(result_id)\n elif msi_locus.results[result_id].getNbFrag() < min_support_fragments:\n msi_locus.delResult(result_id)\n else:\n nb_results += 1\n if nb_results == 0:\n removed_spl_idx.append(spl_idx)\n for spl_idx in sorted(removed_spl_idx)[::-1]:\n del(msi_samples[spl_idx])\n\n\ndef process(args):\n \"\"\"\n Create training data for MSI classifiers. These references are stored in\n MSIReport format.\n\n :param args: The namespace extracted from the script arguments.\n :type args: Namespace\n \"\"\"\n # Get method name from annotations file\n method_names = set()\n for record in MSIAnnot(args.input_loci_annot):\n method_names.add(record[\"method_id\"])\n if len(method_names) != 1:\n raise ValueError('The annotation file must contain only one value for method_id. The file \"{}\" contains {}.'.format(args.input_reports, method_names))\n result_id = list(method_names)[0]\n # Get reference loci from targets file\n ref_loci = []\n with BEDIO(args.input_targets) as FH_in:\n for record in FH_in:\n ref_loci.append(\n MSILocus(\n \"{}:{}-{}\".format(record.chrom, record.start - 1, record.end),\n record.name\n )\n )\n # Aggregate samples\n msi_samples = getAggregatedSpl(args.inputs_report)\n # Add locus result info\n data_by_spl = getLocusAnnotDict(args.input_loci_annot)\n for curr_spl in msi_samples:\n addLociResToSpl(curr_spl, data_by_spl[curr_spl.name], LocusResPairsCombi)\n # Filter locus results\n populateLoci(msi_samples, ref_loci)\n pruneResults(msi_samples, result_id, args.min_support_fragments)\n # Display metrics\n writeStatusMetrics(msi_samples, result_id, args.output_info)\n # Write output\n MSIReport.write(msi_samples, args.output_references)\n\n\n########################################################################\n#\n# MAIN\n#\n########################################################################\nif __name__ == \"__main__\":\n # Manage parameters\n parser = argparse.ArgumentParser(description='Create training data for MSI classifiers. These references are stored in MSIReport format. All the loci are represented in all samples but all the loci does not have a result (if the data does not fit filters criteria).')\n parser.add_argument('-s', '--min-support-fragments', type=int, default=200, help='Minimum number of fragment in size distribution to keep the result. The distribution must contains a sufficient amount of data to be representative of length distribution profile for the current locus. [Default: %(default)s]')\n parser.add_argument('-v', '--version', action='version', version=__version__)\n group_input = parser.add_argument_group('Inputs') # Inputs\n group_input.add_argument('-r', '--inputs-report', required=True, nargs='+', help='Path(es) to the file(s) evaluated in references creation process (format: MSIReport).')\n group_input.add_argument('-l', '--input-loci-annot', required=True, help='Path to the file containing for each sample for each targeted locus the stability status (format: MSIAnnot). First line must be: samplelocus_positionmethod_idkeyvaluetype. The method_id should be \"model\" and an example of line content is: H2291-1_S154:55598140-55598290modelstatusMSSstr.')\n group_input.add_argument('-t', '--input-targets', required=True, help='The locations of the microsatellite of interest (format: BED).')\n group_output = parser.add_argument_group('Outputs') # Outputs\n group_output.add_argument('-o', '--output-references', required=True, help='The path to the file containing the references distribution for each locus (format: MSIReport).')\n group_output.add_argument('-i', '--output-info', required=True, help='The path to the file describing the number of references by status for each locus (format: TSV).')\n args = parser.parse_args()\n\n # Process\n process(args)\n","repo_name":"bialimed/miams","sub_path":"jflow/workflows/MIAmS_learn/bin/createMSIRef.py","file_name":"createMSIRef.py","file_ext":"py","file_size_in_byte":8165,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"1438119343","text":"from bs4 import BeautifulSoup as bs\nimport datetime\nimport pandas as pd\nimport sys\nfrom urllib.request import urlopen\nfrom urllib.error import HTTPError\n\n\n\nnow = datetime.datetime.now()\nxl = pd.ExcelFile(\"links-to-track.xlsx\")\ndf_client = xl.parse(sys.argv[1])\n\n\n# Get anchor text\n# Multiple links\n\ntrack_links = {\n \"Page URL\": [],\n \"Link URL\": [],\n \"Live?\":[],\n \"Last Check\":[]\n}\n\n\nfor i, page_url in enumerate(df_client[\"Page URL\"]):\n track_links[\"Page URL\"].append(page_url)\n track_links[\"Link URL\"].append(df_client[\"Link URL\"][i])\n total_rows = i\n\n\nfor i, page_url in enumerate(df_client['Page URL']):\n print(page_url)\n html = urlopen(page_url)\n html_bs = bs(html.read(), \"html.parser\")\n\n for a in html_bs.findAll(\"a\", href=True):\n if a['href'] == df_client[\"Link URL\"][i]:\n print(\"FOUND -->\", a, \"\\t\", now.strftime(\"%d-%m-%Y (%H:%M)\"), \"\\n\")\n track_links[\"Live?\"].append(\"Yes\")\n track_links[\"Last Check\"].append(now.strftime(\"%d-%m-%Y (%H:%M)\"))\n\n for k, v in track_links.items():\n print(len(v))\n if k == \"Live?\" and len(v) < (i + 1):\n v.append(\"No\")\n if k == \"Last Check\" and len(v) < (i + 1):\n v.append(now.strftime(\"%d-%m-%Y (%H:%M)\"))\n\n\n\n\ndf_final = pd.DataFrame.from_dict(track_links, orient='columns', dtype=None)\n\nwriter = pd.ExcelWriter(xl, engine=\"xlsxwriter\")\ndf_final.to_excel(writer, sheet_name=\"RT\")\nwriter.save()\n","repo_name":"fanderson2/link-checker","sub_path":"find-my-link.py","file_name":"find-my-link.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"31612430380","text":"from flask import Flask, render_template, request, redirect, flash, url_for\r\nimport main\r\n#import urllib.request \r\n#from urllib import request\r\nfrom app import app\r\nfrom werkzeug.utils import secure_filename\r\nfrom main import getPrediction\r\nimport os\r\ntry:\r\n from urllib.request import urlopen\r\nexcept ImportError:\r\n from urllib2 import urlopen\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/', methods=['POST'])\r\ndef submit_file():\r\n if request.method == 'POST':\r\n if 'file' not in request.files:\r\n flash('No file part')\r\n return redirect(request.url)\r\n file = request.files['file']\r\n if file.filename == '':\r\n flash('No file selected for uploading')\r\n return redirect(request.url)\r\n if file:\r\n filename = secure_filename(file.filename)\r\n filename= 'TEST.JPG'\r\n file.save(os.path.join(app.config['UPLOAD_FOLDER'],filename))\r\n path1=r\"C:\\Users\\Reddy\\Desktop\\proj_gui\\uploads\\TEST.JPG\"\r\n label = getPrediction(path1)\r\n print('in index.py')\r\n flash(label)\r\n return redirect('/')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run()","repo_name":"SrijanReddy/Plant-Disease-Detection-using-CNN","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"70008676765","text":"from Plan import PlanAhorro\r\n\r\nimport csv\r\n\r\nclass lista:\r\n __lista = []\r\n \r\n def __init__(self):\r\n self.__lista = []\r\n \r\n def agregarregisrto(self,plan):\r\n self.__lista.append(plan)\r\n \r\n def cargararchivo(self):\r\n archivo = open(\"archivo.csv\")\r\n reader = csv.reader(archivo,delimiter=\";\")\r\n fila = next(reader)\r\n bandera = fila[4]\r\n archivo.seek(0)\r\n for fila in reader:\r\n codigo = int(fila[0])\r\n modelo = fila[1]\r\n version = fila[2]\r\n valor = int(fila[3])\r\n cantcuotas = int(fila[4])\r\n cantlicitar = int(fila[5])\r\n if bandera != cantcuotas:\r\n PlanAhorro.cantidadcuotasplan=cantcuotas\r\n PlanAhorro.cantidadcuotaslicitar = cantlicitar\r\n bandera = cantcuotas\r\n unplan = PlanAhorro(codigo,modelo,version,valor)\r\n self.agregarregisrto(unplan) \r\n else:\r\n unplan = PlanAhorro(codigo,modelo,version,valor)\r\n self.agregarregisrto(unplan) \r\n archivo.close()\r\n \r\n def ModificarValor(self):\r\n for plan in self.__lista:\r\n print(plan)\r\n valor = int(input(\"Ingrese nuevo valor del vehiculo: \"))\r\n plan.ModificarValorVehiculo(valor)\r\n \r\n def BuscarCuotaInferior(self,valor):\r\n for plan in (self.__lista):\r\n #valor cuota = (importe vehículo/cantidad de cuotas) + importe vehículo * 0.1\r\n importe = plan.GetValor()\r\n cant = PlanAhorro.getcantcuotas()\r\n valorCuota = (importe/cant) + importe * 0.1\r\n if valorCuota \")\r\nendPort = input(\"End port => \")\r\n\r\nstartPort = int(startPort)\r\nendPort = int(endPort)\r\n\r\ndef scanner(port):\r\n try:\r\n sock.connect((inputHost,port))\r\n return True\r\n except:\r\n return False\r\n\r\nfor portNumber in range(startPort,endPort):\r\n print(\"Skeniranje porta: \", portNumber)\r\n if scanner(portNumber):\r\n print('Port: ',portNumber,'/tcp',' je otvoren!')","repo_name":"frankob2806/MrezProg","sub_path":"pythonfiles/pythonfiles_vjezba5/sc.py","file_name":"sc.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"2445909145","text":"import numpy\r\nimport pyttsx3\r\nimport datetime\r\nfrom datetime import date\r\nimport speech_recognition as sr\r\nimport pyaudio\r\nimport subprocess\r\nimport pyjokes \r\nfrom playsound import playsound\r\nimport keyboard\r\nfrom tkinter import *\r\nimport wikipedia\r\nimport webbrowser\r\nimport os\r\nimport smtplib\r\nimport socket\r\nname_assistant = \"SOPHIE\"\r\nsocket.getaddrinfo('localhost', 8080)\r\nengine = pyttsx3.init('sapi5')\r\nvoices = engine.getProperty('voices')\r\n#print(voices[0].id)\r\nengine.setProperty('voice',voices[1].id)\r\n\r\n\r\n\r\ndef speak(audio):\r\n engine.say(audio)\r\n engine.runAndWait()\r\n\r\ndef wishMe():\r\n hour = int(datetime.datetime.now().hour)\r\n if hour >= 0 and hour<12:\r\n speak(\"Good Morning!\")\r\n\r\n elif hour >= 12 and hour<18:\r\n speak(\"Good Afternoon!\")\r\n \r\n else:\r\n speak(\"Good Evening!\")\r\n\r\n speak(\"I am SOPHIE one point O, SOPHIE stands for Series One Processor Hyper Intelligent Encrypter, Please tell me how may I assist you\")\r\n\r\ndef note(text):\r\n date = datetime.datetime.now()\r\n file_name = str(date).replace(\":\", \"-\") + \"-note.txt\"\r\n with open(file_name, \"w\") as f:\r\n f.write(text)\r\n\r\n subprocess.Popen([\"notepad.exe\", file_name])\r\n\r\ndef takeCommand():\r\n '''\r\n it takes microphone input from the user and gives string output.\r\n '''\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listening...\")\r\n r.pause_threshold = 1\r\n r.energy_threshold = 1000\r\n audio = r.listen(source)\r\n\r\n try:\r\n print(\"Recognizing...\")\r\n query = r.recognize_google(audio, language='en-in')\r\n print(f\"user said: {query}\\n\")\r\n except Exception as e:\r\n print(e)\r\n print(\"Say that again please...\")\r\n return \"None\"\r\n return query\r\n\r\n\r\ndef sendEmail(to,content):\r\n server = smtplib.SMTP('smtplib.gmail.com',587)\r\n server.ehlo()\r\n server.starttls()\r\n server.login('tewariaman1899@gmail.com','Shivhari@99')\r\n server.sendmail('tewarisaman1899@gmail.com',to,content)\r\n server.close()\r\n\r\ndef change_name():\r\n pass\r\n\r\ndef change():\r\n pass\r\n\r\ndef info():\r\n pass\r\ndef main_screen():\r\n\r\n global screen\r\n screen = Tk()\r\n screen.title(name_assistant)\r\n screen.geometry(\"100x250\")\r\n screen.iconbitmap('app_icon.ico')\r\n\r\n\r\n name_label = Label(text = name_assistant,width = 300, bg = \"black\", fg=\"white\", font = (\"Calibri\", 13))\r\n name_label.pack()\r\n\r\n\r\n microphone_photo = PhotoImage(file = \"E:\\\\virtual_assistant.png\")\r\n microphone_button = Button(image=microphone_photo, command = takeCommand)\r\n microphone_button.pack(pady=10) \r\n \r\nif __name__==\"__main__\":\r\n wishMe()\r\n query = takeCommand().lower()\r\n\r\nif 'wikipedia' in query:\r\n speak('Searching Wikipedia...')\r\n query = query.replace(\"wikipedia\",\"\")\r\n results = wikipedia.summary(query,sentences= 10)\r\n speak(\"According to wikipedia\")\r\n speak(results)\r\nif 'note this' in query: \r\n statement = query.replace(\"note this\", \"\")\r\n note(statement) \r\nif 'joke' in query:\r\n speak(pyjokes.get_joke())\r\n\r\nelif 'open youtube' in query:\r\n webbrowser.open(\"https://www.youtube.com/\")\r\nelif'will you marry me' in query:\r\n speak('we are not allowed to marry our creators, I am trying on Siri hahaha')\r\nelif 'what do you look like' in query:\r\n speak('What do you think I look like, a beautiful woman !!!')\r\nelif 'do you love me' in query:\r\n speak('I love every creation of GOD, of course I love you !!!')\r\nelif 'open google' in query:\r\n webbrowser.open(\"https://www.google.co.in/\")\r\nelif 'open stack overflow' in query:\r\n webbrowser.open(\"https://stackoverflow.com/\")\r\nelif 'open amazon' in query:\r\n webbrowser.open(\"https://www.amazon.in/\")\r\nelif 'the date' in query:\r\n today = date.today()\r\n strDate = today.strftime(\"%d/%m/%Y\")\r\n speak(f\"Sir, the date is{strDate}\")\r\nelif 'the time' in query:\r\n strTime = datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n speak(f\"Sir, the Time is{strTime}\")\r\nelif 'open maps' in query:\r\n webbrowser.open(\"https://www.google.com/maps\")\r\n\r\nelif 'open code' in query:\r\n codePath = \"C:\\\\Users\\\\tewar\\\\AppData\\\\Local\\\\Programs\\\\Microsoft VS Code\\\\Code.exe\"\r\n os.startfile(codePath)\r\nelif 'play music' in query:\r\n music_dir = 'E:\\\\Music'\r\n songs = os.listdir(music_dir)\r\n print(songs) \r\n os.startfile(os.path.join(music_dir, songs[1]))\r\nelif 'i am worried' in query:\r\n webbrowser.open(\"https://www.youtube.com/watch?v=zSioX0v5iPQ\")\r\n\r\nelif 'open Gmail' in query:\r\n webbrowser.open(\"gmail.com\")\r\n\r\nelif 'send email' in query:\r\n try:\r\n speak(\"What should I say?\")\r\n content = takeCommand()\r\n to = \"tewarishivhari999@gmail.com\"\r\n sendEmail(to, content)\r\n speak(\"Email has been sent\")\r\n except Exception as e:\r\n print(e)\r\n speak(\"Sorry Sir, I can't send this email at the moment \")\r\nelif 'thank you' in query:\r\n speak(\"always at your service SIR\")\r\n","repo_name":"Shivhari99/SOPHIE","sub_path":"SOPHIE.py","file_name":"SOPHIE.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"8583644565","text":"import gi\nimport os\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\nclass MainWindow(Gtk.Window):\n def __init__(self, control):\n self.control = control\n Gtk.Window.__init__(self, title=\"Audacity2Praat\")\n self.set_default_size(50, 50)\n self.set_position(Gtk.WindowPosition.CENTER)\n\n self.box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)\n self.add(self.box)\n\n self.label = Gtk.Label(\"ready\")\n self.box.pack_start(self.label, True, True, 0)\n\n self.button = Gtk.Button(label=\"Select Audacity file\")\n self.button.connect(\"clicked\", self.onButtonClicked)\n self.box.pack_start(self.button, True, True, 0)\n\n self.connect(\"delete-event\", Gtk.main_quit)\n\n def toShow(self):\n self.show_all()\n Gtk.main()\n\n def onButtonClicked(self, widget):\n dialog = Gtk.FileChooserDialog(\"Please choose the labels file\", self,\n Gtk.FileChooserAction.OPEN,\n (Gtk.STOCK_OPEN, Gtk.ResponseType.OK,\n Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL))\n\n response = dialog.run()\n if response == Gtk.ResponseType.OK:\n origFile = dialog.get_filename()\n self.control.parseAudicity(origFile)\n\n newFile = os.path.splitext(origFile)[0]\n self.control.saveResults(newFile)\n\n self.label.set_text(\"Complete!\")\n\n dialog.destroy()\n","repo_name":"babylanguagelab/bll_app","sub_path":"src/app/audicity2praat/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"3422258020","text":"# -*- coding: utf-8 -*-\n\"\"\"\nClusters object\n\"\"\"\nimport pandas as pd\nimport nationalparks as usnp\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.metrics import silhouette_samples, silhouette_score\nimport numpy\n\nclass Clusters():\n '''\n Train clustering model for all parks\n '''\n\n def __init__(self, parkunit):\n self.park = usnp.Park(parkunit)\n\n def train_DBSCAN(self, verbose=True):\n '''\n Performs DBSCAN clustering of images based on latitude and longitude.\n '''\n if verbose: print(\"... \" + self.park.parkname + \" (\" + self.park.parkunit + \")\")\n ## get all photos\n df_photos = self.park.get_photos()\n ## sort by latitude and longitude\n df_photos = df_photos.sort_values(by=['longitude', 'latitude'])\n ## extract latitude and longitude\n df_geo = df_photos[['latitude', 'longitude']]\n ## compute differences\n diff = df_geo.diff()\n ## compute euclidian distance\n diff['distance'] = (diff['latitude']**2 + diff['longitude']**2) ** 0.5\n ## format dataframe\n diff = diff.rename(columns={'latitude':\"latitude_diff\", 'longitude':'longitude_diff'})\n ## merge data\n df_geo = pd.concat([df_photos[df_photos.columns.difference(['latitude', 'longitude'])], df_geo, diff], axis=1)\n\n ## HYPER-PARAMETER TUNING\n ## eps\n best_score = -1\n best_eps = 0\n ## create a set of candidate values for eps based on quantile distribution of the distance between points\n range_eps = df_geo[~df_geo['distance'].isnull()]['distance'].quantile([0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.675,0.70,0.75,0.8,0.825,0.85,0.875,0.90,0.925,0.95,0.975,0.98,0.99,0.995])\n \n if df_geo.shape[0]<=100:\n min_cluster_count = 2\n max_cluster_count = 5\n elif df_geo.shape[0]<=1000:\n min_cluster_count = 5\n max_cluster_count = 50\n elif df_geo.shape[0]<=10000:\n min_cluster_count= 10\n max_cluster_count = 500\n else:\n min_cluster_count = 10\n max_cluster_count = 500 \n \n for i in range_eps:\n if i==0:\n continue\n if i > 0.4 * df_geo['distance'].max():\n continue\n ## create and train DBSCAN\n db = DBSCAN(eps=i, min_samples=5, n_jobs=-1).fit(df_geo[['latitude', 'longitude']]) \n labels = db.labels_\n ## compute silhouette score\n if len(set(labels))>=min_cluster_count and len(set(labels))<=max_cluster_count:\n if min_cluster_count==1 and len(set(labels))==1 and best_score==-1:\n print(\" For eps value = \"+str(i), \"\\n Number of clusters: {}\".format(len(set(labels))))\n best_eps = i\n else:\n silhouette_avg = silhouette_score(df_geo[['longitude', 'latitude']], labels)\n print(\" For eps value: \"+str(i), \", quantile: {:.5f}\".format(i), \"\\n Number of clusters: {}\".format(len(set(labels))),\n \"\\n Avg silhouette score is: {:.4f}\".format(silhouette_avg))\n if best_score < silhouette_avg:\n print(\" => Improved\")\n best_score = silhouette_avg\n best_eps = i\n else:\n print(\" eps: {:.5f} out of cluster bounds\".format(i))\n \n ## min_samples\n best_score = -1\n best_min_samples = 0\n min_samples = [2,3,5,6,7,8,9,10,12,14,16,18,20]\n for i in min_samples:\n ## create and train DBSCAN\n db = DBSCAN(eps=best_eps, min_samples=i, n_jobs=-1).fit(df_geo[['longitude', 'latitude']])\n labels = db.labels_\n ## compute silhouette score\n if len(set(labels)) >= min_cluster_count:\n if min_cluster_count==1 and len(set(labels))==1 and best_score==-1 and best_min_samples==0:\n print(\" For eps value = \"+str(i), \"\\n Number of clusters: {}\".format(len(set(labels))))\n best_min_samples=i\n else:\n silhouette_avg = silhouette_score(df_geo[['longitude', 'latitude']], labels)\n print(\" For min_sample value = \"+str(i), \"\\n Number of clusters: {}\".format(len(set(labels))),\n \"\\n Avg silhouette score is: {:.4f}\".format(silhouette_avg))\n if best_score < silhouette_avg and len(set(labels))>=min_cluster_count:\n print(\" => Improved\")\n best_score = silhouette_avg\n best_min_samples = i\n \n ## train final DBSCAN\n db = DBSCAN(eps=best_eps, min_samples=best_min_samples, n_jobs=-1).fit(df_geo[['longitude', 'latitude']])\n\n ## get core samples\n core_sample_mask = numpy.zeros_like(db.labels_, dtype=bool)\n core_sample_mask[db.core_sample_indices_] = True\n df_geo['core'] = core_sample_mask\n df_geo['labels'] = db.labels_\n\n del df_geo['distance']\n del df_geo['latitude_diff']\n del df_geo['longitude_diff']\n\n return df_geo, db.labels_.max() + 1, best_eps, best_min_samples\n\n def jaccard_index(tags_cluster_1, tags_cluster_2):\n '''\n Returns the Jaccard Similarity Index between two cluster's tags\n \n Inputs:\n tags_cluster_1: list of tags (with potential duplicates) associated to cluster_1\n tags_cluster_2: list of tags (with potential duplicates) associated to cluster_2\n Outputs:\n Jaccard Similarity Index between two cluster's tags\n '''\n \n if len(set(tags_cluster_1)) == 0 and len(set(tags_cluster_2)) == 0:\n return 0\n else:\n shared = len(set(tags_cluster_1).intersection(tags_cluster_2))\n return shared / float(len(set(tags_cluster_1)) + len(set(tags_cluster_2)) - shared)\n\n def get_clusters(self):\n '''\n Queries clusters of park from database\n '''\n query = {\n 'parkunit': self.parkunit,\n }\n\n my_clusters = list(usnp.db.clusters.find(query))\n df = pd.DataFrame(my_clusters)\n df = df.set_index('id', drop=True)\n return df\n\n","repo_name":"tdody/NationalParks","sub_path":"nationalparks/clusters.py","file_name":"clusters.py","file_ext":"py","file_size_in_byte":6336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"37479417359","text":"from util import getKaggleMNIST\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom sklearn.utils import shuffle\n\n# siec 784 -> 350 -> 784\n\n\nclass AutoEncoder():\n def __init__(self, D, M):\n\n # input hidden\n Wi = tf.random_normal(shape=(D, M)) * 2 / np.sqrt(M)\n bi = np.zeros(M).astype(np.float32)\n\n # hidden output\n Wh = tf.random_normal(shape=(M, D)) * 2 / np.sqrt(D)\n bh = np.zeros(D).astype(np.float32)\n\n self.Wi = tf.Variable(Wi)\n self.Wh = tf.Variable(Wh)\n self.bi = tf.Variable(bi)\n self.bh = tf.Variable(bh)\n\n # placeholder for a batch of data\n self.X_in = tf.placeholder(tf.float32, shape=(None, D))\n self.X_hat = self.forward_output(self.X_in)\n\n logits = self.forward_logit(self.X_in)\n self.cost = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=self.X_in,\n logits=logits\n )\n )\n\n self.train_op = tf.train.AdamOptimizer(1e-3).minimize(self.cost)\n #self.train_op = tf.train.RMSPropOptimizer(learning_rate=0.0001).minimize(self.cost)\n\n def fit(self, X, epochs=1, batch_sz=100, show_plot=False):\n N, D = X.shape\n\n n_batches = N // batch_sz\n\n costs = []\n print(\"trainnig autoencoder\")\n for i in range(epochs):\n print(\"epoch: \", epochs)\n X = shuffle(X)\n for j in range(n_batches):\n batch = X[j * batch_sz:j * batch_sz + batch_sz]\n _, c = self.session.run((self.train_op, self.cost), feed_dict={self.X_in: batch})\n if j % 10:\n print(\"j / n_batches:\", j, \"/\", n_batches, \"cost:\", c)\n costs.append(c)\n if show_plot:\n plt.plot(costs)\n plt.show()\n\n def forward_hidden(self, X):\n # return tf.nn.sigmoid(tf.matmul(X, self.Wi) + self.bi)\n return tf.nn.relu(tf.matmul(X, self.Wi) + self.bi)\n # return tf.nn.tanh(tf.matmul(X, self.Wi) + self.bi)\n\n def forward_logit(self, X): # just output without using sigmoid on it\n Z = self.forward_hidden(X)\n return tf.matmul(Z, self.Wh) + self.bh\n\n def forward_output(self, X):\n return tf.nn.sigmoid(self.forward_logit(X))\n\n def set_session(self, session):\n self.session = session\n\n def predict(self, X):\n return self.session.run(self.X_hat, feed_dict={self.X_in: X})\n\n\ndef single_AE():\n X, Y, Xtest, Ytest = getKaggleMNIST()\n\n _, D = X.shape\n\n autoencoder = AutoEncoder(D, 250)\n init_op = tf.global_variables_initializer()\n with tf.Session() as session:\n session.run(init_op)\n autoencoder.set_session(session)\n autoencoder.fit(X)\n\n done = False\n while not done:\n i = np.random.choice(len(Xtest))\n x = Xtest[i]\n y = autoencoder.predict([x])\n\n plt.subplot(1, 2, 1)\n plt.imshow(x.reshape(28, 28), cmap='gray')\n plt.title('Original')\n\n plt.subplot(1, 2, 2)\n plt.imshow(y.reshape(28, 28), cmap='gray')\n plt.title('Reconstructed')\n\n plt.show()\n\n ans = input(\"Generate another?\")\n if ans and ans[0] in ('n' or 'N'):\n done = True\n\n\nif __name__ == '__main__':\n single_AE()\n","repo_name":"RKorzeniowski/Lazy_programmer_projects","sub_path":"unsupervised_ml/basic_autoencoder_tf.py","file_name":"basic_autoencoder_tf.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"29460250283","text":"'''\n Given an undirected graph and an integer M.\n The task is to determine if the graph can be colored with at most M colors \n such that no two adjacent vertices of the graph are colored with the same color. \n Here coloring of a graph means the assignment of colors to all vertices. \n Print True if it is possible to colour vertices and False otherwise.\n'''\nprint('''Given an undirected graph and an integer M.\nFunction to determine if graph can be coloured with at most M colours such\nthat no two adjacent vertices of graph are coloured with same colour.\n''')\ndef graphColoring(graph, N, M):\n \n #your code here\n color = []\n while len(color) < N:\n for i in range(1,M+1):\n color.append(i)\n\n for i in range(N):\n for j in range(i+1, N):\n if graph[i][j] & color[j] == color[i]:\n result = False\n else:\n result = True\n #print(graph)\n return result\n \n\nif __name__ == \"__main__\":\n print('Enter the number of test (t=1)')\n t = int(input())\n while(t>0):\n print('Nodes(N = 14)')\n N = int(input())\n print('Color(M = 4)')\n M = int(input())\n print('Edges(E=65)')\n E = int(input())\n print('Edges[]= 3 6 1 8 2 8 9 14 4 14 13 14 1 6 4 6 11 14 2 7 3 10 10 11 4 5 2 3 8 13 10 13 12 14 6 8 4 9 5 6 1 13 10 12 8 14 1 10 12 13 3 7 6 14 6 12 10 14 4 11 1 14 9 11 5 9 6 7 7 10 7 8 9 13 5 14 8 10 5 8 3 12 6 13 1 12 2 11 1 2 9 10 4 7 8 12 11 12 5 12 2 12 5 10 6 9 7 13 4 10 4 12 5 11 8 11 3 13 7 11 5 7 7 14 1 7 1 4 7 12')\n list = [int(x) for x in input().strip().split()]\n graph = [[0 for i in range(N)] for j in range(N)]\n cnt = 0\n for i in range(E):\n graph[list[cnt]-1][list[cnt+1]-1]=1\n graph[list[cnt+1]-1][list[cnt]-1]=1\n cnt+=2\n #print(graph)\n print(graphColoring(graph, N, M))\n t = t-1\n","repo_name":"PawanKrGunjan/DSA-and-AI-Algorithm","sub_path":"Python Problems/M- Coloring Problem/M__Coloring_Problem.py","file_name":"M__Coloring_Problem.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"36397461624","text":"from __future__ import print_function\n\nimport os\nimport sys\nimport cv2\nimport dlib\nimport time\nimport argparse\nimport numpy as np\n\nfrom pyopenface import OpenfaceComputer\n\n\nclass Profiler(object):\n \"\"\"Accumulate images and features for various identities\"\"\"\n\n def __init__(self, low_threshold=0.3, high_threshold=0.5):\n \"\"\"\n \"\"\"\n self.low_threshold = low_threshold\n self.high_threshold = high_threshold\n self.gallery = None\n self.images = []\n\n def add(self, image, row128):\n \"\"\"Add 'row128' to the 'gallery' if the Euclidian distance between\n 'row128' and all the other rows of 'self.gallery are <\n low_threshold.\n\n \"\"\"\n if self.gallery is None:\n self.gallery = row128\n self.images.append(image)\n print(\"Add gallery\")\n else:\n distances = np.linalg.norm(self.gallery - row128, axis=1)\n distances = np.sort(distances)\n if distances[0] > self.low_threshold:\n self.gallery = np.vstack((self.gallery, row128))\n self.images.append(image)\n print(\"gallery {} images {}\".format(self.gallery.shape,\n len(self.images)))\n\n\n def save(self, dirname, extension=\"png\"):\n \"\"\"Save the images\"\"\"\n if not os.path.exists(dirname): os.mkdir(dirname)\n\n for i, image in enumerate(self.images):\n name = \"{:06d}.{}\".format(i, extension)\n name = os.path.join(dirname, name)\n cv2.imwrite(name, image)\n\n\n def cluster(self, dirname, extension=\"png\"):\n \"\"\"Perform a clustering of the images to identify identities.\n\n The images are then saved in the directory 'dirname'\n alongside with a a text file woth following format:\n\n The clustering is performed by selecting randomly an image,\n and accumulate in the same cluster all images of distance <\n self.high_threshold. When no image are < self.high_threshold,\n a new one is randomly selected, etc. until all images have\n been assigned to as cluster.\n\n # A comment\n 1 CHANGE-NAME\n /dirname/image1.png\n /dirname/image2.png\n /dirname/image3.png\n 2 CHANGE-NAME\n /dirname/image4.png\n /dirname/image5.png\n /dirname/image6.png\n 3 CHANGE\n ...\n\n \"\"\"\n pool = self.gallery.copy()\n\n clusters = []\n n_images = self.gallery.shape[0]\n remaining = set(range(n_images))\n\n while len(remaining) > 0:\n # Pick a sample\n current = remaining.pop()\n is_included = 0\n\n for no_cluster, cluster in enumerate(clusters):\n for idx in cluster:\n x1 = self.gallery[idx]\n x2 = self.gallery[current]\n distance = np.linalg.norm(x2 - x1)\n # print(\"|{} - {}| = {}\".format(current, idx, distance))\n if distance < self.high_threshold:\n cluster.append(current)\n is_included = 1\n # print(\"Add {} to {}\".format(current, no_cluster))\n break\n\n if is_included:\n break\n\n if not is_included:\n clusters.append([current])\n\n # print(clusters)\n\n if not os.path.exists(dirname): os.mkdir(dirname)\n filename = os.path.join(dirname, \"list.txt\")\n with open(filename, \"w\") as fid:\n fid.write(\"# Gallery of persons\\n\")\n for no_cluster, cluster in enumerate(clusters):\n fid.write(\"{} set_name_here_in_one_word\\n\".format(no_cluster))\n prefix = os.path.join(dirname, \"person_{}\".format(no_cluster))\n for idx in cluster:\n image_name = \"{}-{}.{}\".format(prefix, idx, extension)\n cv2.imwrite(image_name, self.images[idx])\n fid.write(\"{}\\n\".format(image_name))\n\n\nif __name__ == \"__main__\":\n \"\"\"\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dlib\",\n type=str,\n help=\"Path to shape_predictor_68_face_landmarks.dat\")\n parser.add_argument(\"--pwd\",\n type=str,\n default=\"/tmp/profiler\",\n help=\"Directory where to save images (should ne absolute path)\")\n parser.add_argument(\"--verbose\",\n type=int,\n default=0,\n help=\"Verbosity level\")\n opts = parser.parse_args()\n\n detector = dlib.get_frontal_face_detector()\n\n computer = OpenfaceComputer(opts.dlib, useCuda=True)\n\n profiler = Profiler(low_threshold=0.3)\n\n # Open webcam\n camera = cv2.VideoCapture(0)\n if not camera.isOpened():\n print(\"Cannot open camera\")\n sys.exit(1)\n\n while 1:\n _, frame = camera.read()\n\n detections = detector(frame)\n\n for d in detections:\n x0 = d.left()\n x1 = d.right()\n y0 = d.top()\n y1 = d.bottom()\n dx = 0.1 * (y1 - y0)\n dy = 0.1 * (x1 - x0)\n x0 = max(0, int(x0 - dx))\n x1 = min(int(x1 + dx), frame.shape[1]-1)\n y0 = max(0, int(y0 - dy))\n y1 = min(int(y1 + dy), frame.shape[0]-1)\n crop = frame[y0:y1-1,x0:x1-1] # No copy\n features, debug = computer.compute_on_image(crop, visu=1)\n if features is not None:\n profiler.add(crop, features)\n if \"visu\" in debug:\n cv2.imshow(\"crop\", debug[\"visu\"])\n\n # Display output\n display = frame.copy()\n for d in detections:\n bb = [d.left(), d.top(), d.left()+d.width(), d.top()+d.height()]\n cv2.rectangle(display, (bb[0],bb[1]), (bb[2],bb[3]), (255,0,0), 3)\n\n cv2.imshow(\"Camera\", display)\n\n if cv2.waitKey(30) == 113: # 'q'\n cv2.destroyAllWindows()\n # profiler.save(opts.pwd)\n profiler.cluster(opts.pwd)\n break\n","repo_name":"idiap/pytopenface","sub_path":"pytopenface/profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"73200844456","text":"import json\nimport requests\nimport settings\nfrom faker import Faker\nfrom client.socket_http_client import SocketClientHTTP\nfrom mock.flask_mock import SURNAME_DATA\n\n\nfake = Faker()\ns_client = SocketClientHTTP()\nurl = f'http://{settings.Mock.HOST}:{settings.Mock.PORT}'\n\n\ndef test_by_socket_get():\n name, surname = [fake.first_name(), fake.last_name()]\n SURNAME_DATA[name] = surname\n res = s_client.mock_request(f'/get_surname/{name}')\n assert json.loads(res[-1])['surname'] == surname\n\n\ndef test_by_socket_put():\n name, surname, new_surname = [fake.first_name(), fake.last_name(), fake.last_name()]\n SURNAME_DATA[name] = surname\n res = s_client.mock_request(f'/get_surname/{name}')\n assert json.loads(res[-1])['surname'] == surname\n s_client.mock_request(f'/put_surname/{name}', jdata={'surname': new_surname}, method='PUT')\n res = s_client.mock_request(f'/get_surname/{name}')\n assert json.loads(res[-1])['surname'] == new_surname\n\n\ndef test_by_socket_del():\n name, surname = [fake.first_name(), fake.last_name()]\n SURNAME_DATA[name] = surname\n res = s_client.mock_request(f'/get_surname/{name}')\n assert json.loads(res[-1])['surname'] == surname\n s_client.mock_request(f'/delete_surname/{name}', method='DELETE')\n res = s_client.mock_request(f'/get_surname/{name}')\n assert json.loads(res[-1]) == f\"Surname for user {name} not fount\"\n","repo_name":"Froztgal/2021-1-MAILRU-SDET-Python-I-Malakhov","sub_path":"homework_7/code/tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"20523965380","text":"from aoc2019.helpers.day01 import calculateFuel, calculateFuelWithAddedMass\nfrom aoc2019.shared.solution import Solution\n\n\nclass Day1(Solution):\n def __init__(self):\n Solution.__init__(self)\n self.inputPath = self._dirPath + \"/../input/day01.txt\"\n\n def part1(self):\n fuelSum = 0\n with open(self.inputPath) as input:\n for line in input:\n fuelSum += calculateFuel(int(line))\n return fuelSum\n\n def part2(self):\n fuelSum = 0\n with open(self.inputPath) as input:\n for line in input:\n fuelSum += calculateFuelWithAddedMass(int(line))\n return fuelSum\n","repo_name":"mdalzell/advent-of-code-2019","sub_path":"aoc2019/solutions/day01.py","file_name":"day01.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"72207968298","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/10/23 0023 9:22\n# @Author : 没有蜡笔的小新\n# @E-mail : sqw123az@sina.com\n# @FileName: N-Queens.py\n# @Software: PyCharm\n# @Blog :https://blog.csdn.net/Asunqingwen\n# @GitHub :https://github.com/Asunqingwen\n\"\"\"\nThe n-queens puzzle is the problem of placing n queens on an n×n chessboard such that no two queens attack each other.\n\n\n\nGiven an integer n, return all distinct solutions to the n-queens puzzle.\n\nEach solution contains a distinct board configuration of the n-queens' placement, where 'Q' and '.' both indicate a queen and an empty space respectively.\n\nExample:\n\nInput: 4\nOutput: [\n [\".Q..\", // Solution 1\n \"...Q\",\n \"Q...\",\n \"..Q.\"],\n\n [\"..Q.\", // Solution 2\n \"Q...\",\n \"...Q\",\n \".Q..\"]\n]\nExplanation: There exist two distinct solutions to the 4-queens puzzle as shown above.\n\"\"\"\nfrom typing import List\n\n\ndef solveNQueens(n: int) -> List[List[str]]:\n\tdef helper():\n\t\trow = len(queens)\n\t\tif row == n:\n\t\t\tres.append(queens[:])\n\t\tfor col in range(n):\n\t\t\tif col not in queens and xy_dif[row - col] and xy_sum[row + col]:\n\t\t\t\tqueens.append(col) # 放置棋子\n\t\t\t\txy_dif[row - col], xy_sum[row + col] = 0, 0\n\t\t\t\thelper() # 下一行\n\t\t\t\tqueens.pop() # 回溯\n\t\t\t\txy_dif[row - col], xy_sum[row + col] = 1, 1\n\n\tqueens = []\n\txy_dif = [1] * (2 * n - 1) # 主对角线\n\txy_sum = [1] * (2 * n - 1) # 副对角线\n\tres = []\n\thelper()\n\treturn [['.' * i + 'Q' + '.' * (n - i - 1) for i in sol] for sol in res]\n\n\nif __name__ == '__main__':\n\tn = 4\n\tresult = solveNQueens(n)\n\tprint(result)\n","repo_name":"Asunqingwen/LeetCode","sub_path":"hard/N-Queens.py","file_name":"N-Queens.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18420484989","text":"import sys\nfrom bisect import *\nfrom heapq import *\nfrom collections import *\nfrom itertools import *\nfrom functools import *\nfrom math import *\nfrom fractions import *\n\nsys.setrecursionlimit(100000000)\ninput = lambda: sys.stdin.readline().rstrip()\n\nN, K = map(int, input().split())\nS = list(map(int, input()))\n\nX = []\nfor c in S:\n if len(X) >= 1 and X[-1][0] == c:\n X[-1][1] += 1\n else:\n X.append([c, 1])\n\nsm = [0] * (len(X) + 1)\nfor i in range(len(X)):\n sm[i + 1] = sm[i] + X[i][1]\n\nans = 0\nfor i in range(len(X)):\n if X[i][0] == 0:\n # X[i] ... X[i + (K - 1) * 2 + 1]\n ans = max(ans, sm[min(i + (K - 1) * 2 + 2, len(X))] - sm[i])\n pass\n else:\n # X[i] ... X[i + K * 2]\n ans = max(ans, sm[min(i + K * 2 + 1, len(X))] - sm[i])\nprint(ans)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03074/s491803500.py","file_name":"s491803500.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"74880807976","text":"import os\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nimport dtim\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\nsetup(\n name='dtim',\n packages=['dtim'],\n version=dtim.__version__,\n description='datetime improvement for standard datetime.datetime',\n long_description=read('README.rst'),\n author='Cuong Manh Le',\n author_email='cuong.manhle.vn@gmail.com',\n license='BSD',\n url='https://github.com/cuonglm/dtim',\n keywords=['datetime']\n)\n","repo_name":"cuonglm/dtim","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"73375429416","text":"class Solution(object):\n def plusOne(self, digits):\n for i in range(len(digits) - 1, -1, -1):\n if digits[i] == 9:\n digits[i] = 0\n if i == 0:\n digits.insert(0, 1)\n else:\n digits[i] += 1\n return digits\n return digits\n","repo_name":"Francoforever/my-leetcode-solutions","sub_path":"66-Plus One.py","file_name":"66-Plus One.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"24697002728","text":"#!/usr/bin/python\n\nimport sys\n\n# how to use:\n# Run './hostname_to_azurename.py '\n# Prints the corresponding instance name found in azure portal\n\ndef main(hostname):\n hostname_backwards = str(hostname[::-1])\n total_number = 36 * int(hostname_backwards[1])\n first_char = hostname_backwards[0]\n if first_char.isalpha():\n total_number += ord(first_char) - 55\n else:\n total_number += int(first_char)\n print(hostname[:26] + \"-vmss0_\" + str(total_number))\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n","repo_name":"HSLdevcom/digitransit-mesos-deploy","sub_path":"scripts/hostname_to_azurename.py","file_name":"hostname_to_azurename.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"13387212461","text":"import warnings\r\nwarnings.filterwarnings('ignore')\r\nimport os\r\nimport cv2\r\nimport numpy\r\nimport mahotas\r\nfrom scipy.stats import skew,kurtosis\r\nfrom skimage.feature import graycomatrix,graycoprops\r\nfrom sklearn.model_selection import train_test_split\r\nfrom keras.models import Sequential,Model\r\nfrom keras.layers import Dense\r\nfrom tensorflow.keras import optimizers\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import classification_report,confusion_matrix,ConfusionMatrixDisplay,plot_confusion_matrix\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nimport pickle\r\npath=os.getcwd()\r\n\r\ndata_path=os.path.join(path,'ucmd')\r\n\r\ndata_list=os.listdir(data_path)\r\ncolor_feature=[]\r\ntexture_feature=[]\r\nfeature=[]\r\nlabel=[]\r\ncount=0\r\nlabel_name=[]\r\n\r\nfor i in data_list:\r\n \r\n subfolder_path=os.path.join(data_path,i)\r\n \r\n subfolder_list=os.listdir(subfolder_path)\r\n for j in subfolder_list:\r\n \r\n image_path=os.path.join(subfolder_path,j)\r\n \r\n if j.endswith('.db'):\r\n continue\r\n print(image_path)\r\n label_name.append(image_path)\r\n image=cv2.imread(image_path)\r\n cv2.imshow('image',image)\r\n cv2.waitKey(10)\r\n blue_plane=image[:,:,0]\r\n green_plane=image[:,:,1]\r\n red_plane=image[:,:,2]\r\n mean_blue=numpy.mean(blue_plane)\r\n mean_green=numpy.mean(green_plane)\r\n mean_red=numpy.mean(red_plane)\r\n # print(mean_blue,mean_green,mean_red)\r\n\r\n var_blue=numpy.var(blue_plane)\r\n var_green=numpy.var(green_plane)\r\n var_red=numpy.var(red_plane)\r\n # print(var_blue,var_green,var_red)\r\n\r\n skew_blue=skew(blue_plane.reshape(-1))\r\n skew_green=skew(green_plane.reshape(-1))\r\n skew_red=skew(red_plane.reshape(-1))\r\n # print(skew_blue,skew_green,skew_red)\r\n\r\n kurtosis_blue=kurtosis(blue_plane.reshape(-1))\r\n kurtosis_green=kurtosis(green_plane.reshape(-1))\r\n kurtosis_red=kurtosis(red_plane.reshape(-1))\r\n # print(kurtosis_blue,kurtosis_green,kurtosis_red)\r\n hsv=cv2.cvtColor(image,cv2.COLOR_BGR2HSV)\r\n cv2.imshow('hsv',hsv)\r\n cv2.waitKey(10)\r\n hue_plane=hsv[:,:,0]\r\n saturation_plane=hsv[:,:,1]\r\n value_plane=hsv[:,:,2]\r\n mean_hue=numpy.mean(hue_plane)\r\n mean_saturation=numpy.mean(saturation_plane)\r\n mean_value=numpy.mean(value_plane)\r\n # print(mean_hue,mean_saturation,mean_value)\r\n\r\n var_hue=numpy.var(hue_plane)\r\n var_saturation=numpy.var(saturation_plane)\r\n var_value=numpy.var(value_plane)\r\n # print(var_hue,var_saturation,var_value)\r\n\r\n skew_hue=skew(hue_plane.reshape(-1))\r\n skew_saturation=skew(saturation_plane.reshape(-1))\r\n skew_value=skew(value_plane.reshape(-1))\r\n # print(skew_hue,skew_saturation,skew_value)\r\n\r\n kurtosis_hue=kurtosis(hue_plane.reshape(-1))\r\n kurtosis_saturation=kurtosis(saturation_plane.reshape(-1))\r\n kurtosis_value=kurtosis(value_plane.reshape(-1))\r\n # print(kurtosis_hue,kurtosis_saturation,kurtosis_value)\r\n lab=mahotas.colors.rgb2lab(image)\r\n cv2.imshow('lab',lab)\r\n cv2.waitKey(10)\r\n l_plane=lab[:,:,0]\r\n a_plane=lab[:,:,1]\r\n b_plane=lab[:,:,2]\r\n mean_l=numpy.mean(l_plane)\r\n mean_a=numpy.mean(a_plane)\r\n mean_b=numpy.mean(b_plane)\r\n # print(mean_l,mean_a,mean_b)\r\n\r\n var_l=numpy.var(l_plane)\r\n var_a=numpy.mean(a_plane)\r\n var_b=numpy.mean(b_plane)\r\n # print(mean_l,mean_a,mean_b)\r\n\r\n skew_l=skew(l_plane.reshape(-1))\r\n skew_a=skew(a_plane.reshape(-1))\r\n skew_b=skew(b_plane.reshape(-1))\r\n # print(skew_l,skew_a,skew_b)\r\n\r\n kurtosis_l=kurtosis(l_plane.reshape(-1))\r\n kurtosis_a=kurtosis(a_plane.reshape(-1))\r\n kurtosis_b=kurtosis(b_plane.reshape(-1))\r\n # print(kurtosis_l,kurtosis_a,kurtosis_b)\r\n color_feature=[mean_blue,mean_green,mean_red,var_blue,var_green,var_red,\r\n skew_blue,skew_green,skew_red,kurtosis_blue,kurtosis_green,kurtosis_red,\r\n mean_hue,mean_saturation,mean_value,var_hue,var_saturation,var_value,\r\n skew_hue,skew_saturation,skew_value,kurtosis_hue,kurtosis_saturation,kurtosis_value,\r\n mean_l,mean_a,mean_b,var_l,var_a,var_b,skew_l,skew_a,skew_b,kurtosis_l,kurtosis_a,kurtosis_b]\r\n \r\n gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\r\n cv2.imshow('gray',gray)\r\n cv2.waitKey(10)\r\n glcm=graycomatrix(gray,distances=[5],angles=[0],levels=256,symmetric=True,normed=True)\r\n # print(glcm)\r\n correlation=graycoprops(glcm,'correlation')[0,0]\r\n energy=graycoprops(glcm,'energy')[0,0]\r\n contrast=graycoprops(glcm,'contrast')[0,0]\r\n homogeneity=graycoprops(glcm,'homogeneity')[0,0]\r\n # print(correlation,energy,contrast,homogeneity)\r\n texture_feature=[correlation,energy,contrast,homogeneity]\r\n color_feature.extend(texture_feature)\r\n feature.append(color_feature)\r\n label.append(count)\r\n count=count+1\r\ncv2.destroyAllWindows()\r\nfeature=numpy.array(feature)\r\nprint(feature.shape)\r\n# print(label)\r\nlabel=numpy.array(label)\r\nxtrain,xtest,ytrain,ytest=train_test_split(feature,label,test_size=0.3,random_state=42)\r\nprint(xtrain.shape)\r\nprint(xtest.shape)\r\nytrain1=[]\r\nytest1=[]\r\nfor i in ytrain:\r\n empty_list=[0,0,0,0]\r\n empty_list[i]=1\r\n ytrain1.append(empty_list)\r\nfor i in ytest:\r\n empty_list=[0,0,0,0] \r\n empty_list[i]=1\r\n ytest1.append(empty_list)\r\nytrain1=numpy.array(ytrain1)\r\nytest1=numpy.array(ytest1)\r\nmodel=Sequential()\r\nmodel.add(Dense(200,input_dim=40,activation='relu'))\r\nmodel.add(Dense(4,activation='softmax'))\r\nmodel.compile(loss='categorical_crossentropy',optimizer=optimizers.Adam(lr=0.018),metrics=['accuracy'])\r\nhistory=model.fit(xtrain,ytrain1,validation_data=(xtest,ytest1),epochs=20,verbose=2,batch_size=8)\r\nloss,accuracy=model.evaluate(xtest,ytest1)\r\nprint(loss,accuracy)\r\nmodel.summary()\r\nlayer_name='dense_1'\r\nnew_model=Model(inputs=model.input,outputs=model.get_layer(layer_name).output)\r\nann_feature=new_model.predict(feature)\r\nplt.title('training progress-Loss')\r\nplt.plot(history.history['loss'],label='train')\r\nplt.plot(history.history['val_loss'],label='test')\r\nplt.xlabel('epochs')\r\nplt.ylabel('loss')\r\nplt.legend()\r\nplt.show()\r\nplt.title('training progress-accuracy')\r\nplt.plot(history.history['accuracy'],label='train')\r\nplt.plot(history.history['val_accuracy'],label='test')\r\nplt.xlabel('epochs')\r\nplt.ylabel('accuracy')\r\nplt.legend()\r\nplt.show()\r\nmodel_json=model.to_json()\r\nwith open('ann_model.json','w') as json_file:\r\n json_file.write(model_json)\r\nmodel.save_weights('ann_model.h5')\r\n\r\nprediction=model.predict(xtest)\r\ny_list=[]\r\nfor i in prediction:\r\n num_list=list(i)\r\n output=max(num_list)\r\n ind=num_list.index(output)\r\n y_list.append(ind)\r\ncm=confusion_matrix(ytest,y_list)\r\ndisplay=ConfusionMatrixDisplay(confusion_matrix=cm,display_labels=['airport','bareland','playground','railwaystation'])\r\ndisplay.plot()\r\nplt.title('ann')\r\nplt.show()\r\nprint(classification_report(ytest,y_list))\r\n\r\nrf_model=RandomForestClassifier()\r\nrf_model.fit(xtrain,ytrain)\r\nprediction=rf_model.predict(xtest)\r\nprint(classification_report(ytest,prediction))\r\nplot_confusion_matrix(rf_model,xtest,ytest)\r\nplt.title('random forest')\r\nplt.show()\r\npickle.dump(rf_model,open('rf_model.pkl','wb'))\r\nrf_feature=rf_model.predict_proba(feature)\r\npickle.dump(rf_feature,open('rf_feature.pkl','wb'))\r\npickle.dump(label_name,open('label_name.pkl','wb'))\r\npickle.dump(ann_feature,open('ann_feature.pkl','wb'))\r\n\r\n\r\n \r\n \r\n\r\n\r\n\r\n\r\n","repo_name":"anaghamkumar123/sensing_image","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"34585703660","text":"'''\r\n描述\r\n编写程序,求给定字符串s的亲朋字符串s1。\r\n\r\n亲朋字符串s1定义如下:给定字符串s的第一个字符的ASCII值加第二个字符的ASCII值,得到第一个亲朋字符; \r\n给定字符串s的第二个字符的ASCII值加第三个字符的ASCII值,得到第二个亲朋字符;依此类推,直到给定字符串s的倒数第二个字符。\r\n亲朋字符串的最后一个字符由给定字符串s的最后一个字符ASCII值加s的第一个字符的ASCII值。\r\n\r\n输入\r\n输入一行,一个长度大于等于2,小于等于100的字符串。字符串中每个字符的ASCII值不大于63。\r\n输出\r\n输出一行,为变换后的亲朋字符串。输入保证变换后的字符串只有一行。\r\n样例输入\r\n1234\r\n样例输出\r\ncege\r\n'''\r\n\r\ns=input()\r\nn=len(s)\r\ns1=''\r\nfor i in range(n-1):\r\n s1+=chr(ord(s[i])+ord(s[i+1]))\r\ns1+=chr(ord(s[-1])+ord(s[0]))\r\nprint(s1)\r\n\r\n","repo_name":"gxmls/Python_NOI","sub_path":"NOI1-7-05.py","file_name":"NOI1-7-05.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"19113248757","text":"import sys\nfrom datetime import datetime\n\nimport pymysql\nfrom PyQt5.QtGui import QPainter\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QTableWidgetItem, QHeaderView\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom ExceptionUi import Ui_ExceptionWindow\nfrom PyQt5.QtChart import QChartView, QChart, QLineSeries, QCategoryAxis, QValueAxis\n\n\nclass ExceptionWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.ui = Ui_ExceptionWindow()\n self.ui.setupUi(self)\n self.ui.pushButton.clicked.connect(self.load)\n self.setWindowFlag(QtCore.Qt.FramelessWindowHint) # 消除周边的框框\n self.setAttribute(QtCore.Qt.WA_TranslucentBackground)\n\n # 自动调整列宽为填满表格\n header = self.ui.tableWidget.horizontalHeader()\n header.setSectionResizeMode(QHeaderView.Stretch)\n self.ui.tableWidget.setAlternatingRowColors(True) # 使表格颜色交错显示\n\n # 创建图表和图表视图\n self.chart = QChart()\n self.chart_view = QChartView(self.chart)\n self.chart_view.setRenderHint(QPainter.Antialiasing)\n\n layout = QtWidgets.QVBoxLayout(self.ui.graphicsView)\n layout.addWidget(self.chart_view)\n\n def load(self):\n try:\n conn = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"42003717\", db=\"zhou\")\n cursor = conn.cursor()\n # 执行查询语句,获取所有数据\n cursor.execute(\"SELECT * FROM yushi\")\n data = cursor.fetchall()\n # 清空表格\n self.ui.tableWidget.clear()\n self.ui.tableWidget.setRowCount(0)\n # 设置表格的列数和列名\n self.ui.tableWidget.setColumnCount(len(data[0])) # 假设数据的每行具有相同的列数\n column_names = ['温度', '光照', '湿度', '水温', '声音'] # 数据库中的列名\n self.ui.tableWidget.setHorizontalHeaderLabels(column_names)\n # 填充数据到表格\n for row_number, row_data in enumerate(data):\n self.ui.tableWidget.insertRow(row_number)\n for column_number, column_data in enumerate(row_data):\n item = QTableWidgetItem(str(column_data))\n self.ui.tableWidget.setItem(row_number, column_number, item)\n # 创建多个折线系列\n series_list = []\n for column_number in range(5): # 假设有5个变量\n series = QLineSeries()\n series_list.append(series)\n # 将数据添加到折线系列\n for row_number, row_data in enumerate(data):\n x_value = row_number # 直接使用行数作为横轴值\n for column_number, column_data in enumerate(row_data[:5]): # 假设只绘制前5个变量\n y_value = float(column_data) # 将字符串转换为浮点数\n if y_value is not None:\n point = QtCore.QPointF(x_value, y_value)\n series_list[column_number].append(point)\n # 将系列添加到图表\n for series in series_list:\n self.chart.addSeries(series)\n # 设置图表标题和轴标签\n self.chart.setTitle(\"多变量折线图\")\n self.chart.setAnimationOptions(QChart.AllAnimations)\n self.chart.createDefaultAxes()\n self.chart.axisX().setTitleText(\"计数\")\n self.chart.axisY().setTitleText(\"数值\")\n except Exception as e:\n print(e)\n cursor.close()\n conn.close()\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = ExceptionWindow()\n window.show()\n sys.exit(app.exec_())","repo_name":"zhouzhoi/pyqt5-system","sub_path":"Exception.py","file_name":"Exception.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18110508219","text":"from collections import deque\n\nn=int(input())\nque=deque()\n\nfor i in range(n):\n command=input()\n\n if command==\"deleteFirst\":\n que.popleft()\n elif command==\"deleteLast\":\n que.pop()\n else:\n command,num=command.split()\n num=int(num)\n if command==\"insert\":\n que.appendleft(num)\n else:\n if num in que:\n que.remove(num)\n\nprint(*que,sep=\" \")\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02265/s307168789.py","file_name":"s307168789.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"23900776261","text":"from pdb import set_trace\nimport pygame\nfrom pygame.locals import QUIT\n\nfrom player_actor import PlayerActor\nfrom sample_actors import HWall, VWall, Spawner\nfrom collide import handle_collisions\nfrom config import *\n\n\ndef main():\n # Initialise screen\n pygame.init()\n screen = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.display.set_caption('Gogo2')\n\n # Fill background\n background = pygame.Surface(screen.get_size())\n background = background.convert()\n background.fill((0, 0, 0))\n background_orig = background.copy() # in case static sprites change\n\n dynamic_group = pygame.sprite.RenderPlain()\n # Initialise sprites\n player1 = PlayerActor(topleft=screen.get_rect().center, groups=(dynamic_group,))\n\n static_group = pygame.sprite.RenderPlain()\n create_walls(static_group)\n spawner = Spawner(topleft=(232, 232), target_groups=(dynamic_group,), groups=(static_group,))\n\n static_group.draw(background)\n\n # Blit everything to the screen\n screen.blit(background, (0, 0))\n pygame.display.flip()\n\n # Initialise clock\n clock = pygame.time.Clock()\n\n # Event loop\n while 1:\n # Make sure game doesn't run at more than 60 frames per second\n clock.tick(30)\n\n for event in pygame.event.get():\n if event.type == QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):\n return\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:\n player1 = PlayerActor(topleft=screen.get_rect().center, groups=(dynamic_group,))\n elif event.type in [pygame.KEYDOWN, pygame.KEYUP]:\n player1.process_input((event.type, event.key))\n\n for sp in dynamic_group.sprites():\n screen.blit(background, sp.rect, sp.rect)\n spawner.update()\n dynamic_group.update()\n handle_collisions(screen.get_rect(), (static_group, dynamic_group))\n dynamic_group.draw(screen)\n pygame.display.flip()\n\n\ndef create_walls(static_group):\n g = (static_group, )\n VWall((370, 200), g)\n VWall((370, 232), g)\n VWall((370, 264), g)\n VWall((370, 296), g)\n HWall((378, 200), g)\n HWall((378, 320), g)\n VWall((100, 200), g)\n VWall((100, 232), g)\n VWall((100, 264), g)\n VWall((100, 296), g)\n HWall((76, 192), g)\n HWall((76, 328), g)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"shaharyi/gogo2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18262146209","text":"N, M = map(int, input().split())\ns = [0] * M\nc = [0] * M\n\nfor i in range(M):\n s[i], c[i] = map(int, input().split())\n\nran = []\nfor i in range(10 ** (N - 1), 10 ** N):\n ran.append(i)\nif N == 1:\n ran.append(0)\n\nran.sort()\nminimum = 10 ** N\nfor r in ran:\n st = str(r)\n ok = True\n for j in range(M):\n if st[s[j] - 1] != str(c[j]):\n ok = False\n if ok == True:\n minimum = min(minimum, r)\n break\nif minimum == 10 ** N:\n print(-1)\nelse:\n print(minimum)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02761/s218895980.py","file_name":"s218895980.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"4637059337","text":"import pygame\nimport math\nimport numpy as np\nclass Point:\n\tdef __init__(self, x, y):\n\t\tself.x = x\n\t\tself.y = y\n\tdef get_point(self):\n\t\treturn [self.x*512,512-self.y*512]\n\tdef plot(self,screen):\n\t\tscreen.fill(pygame.Color(255,0,0),(self.get_point(),(4,4)))\n\nclass Line:\n\tdef __init__(self):\n\t\tself.a = np.random.rand()*2-1\n\t\tself.b = np.random.rand()*2-1\n\t\tself.lr = 0.7\n\n\tdef get_x(self,y):\n\t\treturn (y-self.b)/self.a\n\n\tdef get_y(self,x):\n\t\treturn self.a*x+self.b\n\n\tdef plot(self,screen):\n\t\tp1 = Point(0,0)\n\t\tp2 = Point(0,0)\n\t\tif (abs(self.a)>abs(self.b)):\n\t\t\tp1.x = 0\n\t\t\tp1.y = self.get_y(p1.x)\n\t\t\tp2.x = 1\n\t\t\tp2.y = self.get_y(p2.x)\n\t\telse:\n\t\t\tp1.y = 0\n\t\t\tp1.x = self.get_x(p1.y)\n\t\t\tp2.y = 1\n\t\t\tp2.x = self.get_x(p2.y)\n\t\tpygame.draw.aaline(screen,pygame.Color(0,0,255),p1.get_point(),p2.get_point(),8)\n\n\tdef fit(self,dataset):\n\t\tdelta_a = 0\n\t\tdelta_b = 0\n\n\t\tfor p in dataset:\n\t\t\tdelta_a += 2*(self.get_y(p.x)-p.y)*p.x\n\t\t\tdelta_b += 2*(self.get_y(p.x)-p.y)\n\n\t\tself.a-=self.lr*(delta_a/len(dataset))\n\t\tself.b-=self.lr*(delta_b/len(dataset))","repo_name":"akn0717/Linear-Regression","sub_path":"Line.py","file_name":"Line.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"25663699955","text":"from scraping import Inflacion, Ventas\nfrom DataSql import *\nimport statistics\nclass Menu:\n\n @staticmethod\n def inicio():\n \n print(\"========== Bienvenido ===========\")\n print(\"1 - Obtener información - Scraping\")\n print(\"2 - Ver o cargar los datos a la base de datos\")\n print(\"3 - Procesamiento de datos estadísticos\")\n\n\n opcion = int(input(\"Ingrese la opción y presione enter: \"))\n\n if(opcion == 1):\n Menu.scraping()\n elif(opcion == 2):\n Menu.data_sql()\n elif(opcion == 3):\n statistics.analysis()\n\n @staticmethod\n def scraping():\n print(\"Aguarde un momento, se recopilará la información estática y dinámica...\")\n Ventas.scraping()\n print(\"Se ha guardado el archivo ventas.csv en la raiz del proyecto\")\n print(\"Se abrirá una ventana del navegador para recopilar la información dinámica...\")\n Inflacion()\n print(\"Se ha guardo con éxito el archivo inflación.csv... volviendo al menu principal\")\n Menu.inicio()\n @staticmethod\n def data_sql():\n print(\"1 - Cargar los datos del scraping a la base de datos\")\n print(\"2 - Ver los datos existentes en la base de datos\\n\")\n print(\"0 - Volver al menu principal...\")\n opcion = int(input(\"Ingrese la opción y presione enter: \"))\n if (opcion == 1):\n print(\"Cargando los datos de inflación....\")\n InflacionSQL().csv_to_sql()\n print(\"Cargando los datos de ventas....\")\n VentasSQL().csv_to_sql()\n Menu.data_sql()\n elif (opcion == 2):\n print(\"Datos existentes de inflación:\\n\")\n data = InflacionSQL().getData()\n for d in data:\n print(d)\n print(\"Datos existentes de ventas:\\n\")\n data = InflacionSQL().getData()\n for d in data:\n print(d)\n Menu.inicio()\n else:\n Menu.inicio()\n\nMenu.inicio()\n","repo_name":"ispc-programador2022/SSTTT5","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"12611553732","text":"import argparse\nimport datetime\nimport multiprocessing\nimport os\nimport time # for simple profiling\nfrom decimal import *\n\n# https://biopython.org/ for parsing structural information\nfrom Bio.PDB import FastMMCIFParser, MMCIF2Dict\n\n# https://www.sqlalchemy.org/ for generating SQL database\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import exc\nfrom sqlalchemy import literal\nfrom sqlalchemy.orm import sessionmaker\nfrom SQLalchemy_declarative import Base, DCDistance, DCSummary\n\n\ndef main():\n \"\"\"\n This main function interprets the task and assigns analysis processes to each core.\n Data is written to a database via SQLalchemy.\n No return.\n \"\"\"\n\n task = Task(args) # generates a task object containing job information\n task.define_queue() # defines the total queue of .cif PDB structure files to be analysed\n\n # Assign multiprocessing. Structure from https://sebastianraschka.com/Articles/2014_multiprocessing.html\n output = multiprocessing.Queue()\n processes = [multiprocessing.Process(target=pdb_process, args=(task, core, output)) for core in\n range(task.cores)] # split total file queue and assign processes with pdb_process function to cores\n for p in processes:\n p.start()\n results = [output.get() for p in processes]\n for result in results:\n print(result)\n for p in processes:\n p.join()\n print(processes)\n\n\nclass Task: # Class managing information about the run itself, e.g. queue, parameters, arguments\n def __init__(self, args): # loads arguments from textfile\n job_filepath = args.job # filepath of textfile with arguments, defined by argparse args.job\n\n # Interprets input file. Structure from https://stackoverflow.com/questions/1305532/convert-nested-python-dict-to-object\n with open(job_filepath, \"r\") as job_fh:\n for row in job_fh:\n arg, value = row.replace(\"\\n\", \"\").split(\"\\t\")\n if arg == \"cutoff\":\n value = (float(value))\n elif arg in [\"chunksize\", \"chunksize_offset\", \"flush_offset\", \"modellimit\", \"cores\", \"filesize_limit\"]:\n value = int(value)\n elif arg in [\"target_residue\", \"target_atom\"]:\n value = value.upper().replace(\" \", \"\").replace('\"', \"\")\n value = value.split(\",\")\n if arg in [\"target_NT\", \"distance_CT\", \"distance_NT\", \"permit_chain_identity\", \"verbose\", \"quiet\",\n \"appending\"]:\n value = (value == \"TRUE\")\n self.__dict__[arg] = value\n\n def define_queue(self):\n if self.appending:\n return self.define_partial_queue()\n\n file_list = []\n for root, dirs, files in os.walk(self.pdb_database):\n for file in files:\n # Bugfix - Creating the pdb object here makes garbage collection impossible and RAM use explodes. Create during task only\n file_list.append((root, file))\n\n self.file_queue = []\n for i in range(0, self.cores):\n self.file_queue.append(file_list[i:len(file_list):self.cores])\n\n print(\"File queue of {} files split across {} cores.\".format(len(file_list), self.cores))\n return True\n\n # This code allows for analysis of new structures without reanalysing the entire local PDB copy\n def define_partial_queue(self): # TODO implement modified timestamp\n engine = create_engine(self.dc_db)\n Base.metadata.bind = engine\n db_session = sessionmaker(bind=engine)\n session = db_session()\n\n file_list = []\n dc_pass_count = 0\n with open(self.dc_pass_log, \"w+\") as outfile:\n for root, dirs, files in os.walk(self.pdb_database):\n for file in files:\n # Bugfix - Creating the pdb object here makes garbage collection impossible and RAM use explodes. Create during task only\n file_id = file.replace(\".cif\", \"\")\n q = session.query(DCSummary).filter(DCSummary.pdb_id == file_id)\n # print(session.query(q.exists()))\n if (session.query(literal(True)).filter(q.exists()).scalar()):\n outfile.write(file_id + \"\\n\")\n dc_pass_count += 1\n else:\n file_list.append((root, file))\n\n self.file_queue = []\n for i in range(0, self.cores):\n self.file_queue.append(file_list[i:len(file_list):self.cores])\n\n print(\"Partial file queue of {} files, skipped {} files. Split across {} cores.\".format(len(file_list),\n dc_pass_count,\n self.cores))\n session.close()\n return True\n\n\n# This is the target function for multiprocessing which handles analysis of the structure file queue\ndef pdb_process(task, core, output):\n process_start_time = time.time() # for simple profiling\n\n chunksize = task.chunksize + task.chunksize_offset * core # chunksize: how many files to analyse before commiting data to the database\n\n # assigning SQLalchemy connection\n engine = create_engine(task.dc_db)\n Base.metadata.bind = engine\n db_session = sessionmaker(bind=engine)\n session = db_session()\n str_parser = FastMMCIFParser(QUIET=1)\n\n print(\"Connected core %i, chunksize %i, offset %i\" % (core, task.chunksize, task.chunksize_offset * core))\n\n # detail of console output\n verbose = task.verbose\n quiet = task.quiet\n\n # buffer to hold data for submission to database and if it has been submitted successfully. Handles potential of locked database while writing from parallel process\n buffer = []\n flush = True\n flush_offset = task.flush_offset\n flush_offset_count = 0\n file_queue = task.file_queue[core]\n\n # loop to analyse individual structure files from file queue\n for pdb_count, (root, file) in enumerate(file_queue):\n pdb = PDB(os.path.join(root, file),\n core) # generate a PDB object to hold all relevant information for a single parsed structure file\n task_summary = \"\"\n task_summary = task_summary + (\"Processing file %s on core %i\" % (pdb.path, core))\n\n # checks before structure analysis, including successful fast structure parsing\n if pdb.filesize > task.filesize_limit: # throw out large files. TODO: Seperate filesize due to large assembly from structure factors\n task_summary = task_summary + \"\\n\\tSize abort\"\n pdb.pass_filesize = False\n pdb.abort = True\n print(task_summary)\n else:\n if task.verbose: task_summary = task_summary + \"\\n\\tSize pass\"\n pdb.pass_filesize = True\n try:\n pdb.structure = str_parser.get_structure(pdb.id, pdb.path)\n if task.verbose: task_summary = task_summary + \"\\n\\tMMCIFParser pass\"\n pdb.pass_structure = True\n except:\n task_summary = task_summary + \"\\n\\tMMCIFParser abort\"\n pdb.pass_structure = False\n pdb.abort = True\n print(task_summary)\n\n # PDB object functions analysing distance data in a structure file and assigning relevant metadata for the database\n pdb.analyse(task)\n pdb.generate_dictionary()\n pdb.assign_dictionary_data()\n pdb.sqla_convert_distances()\n\n if len(\n pdb.filtered_distances) > 0: # hits from filtered_distances are preferred the summary table (here: intermolecular hits)\n representative_distance = pdb.representative_distance_filtered()\n else: # no intermolecular hits found or no hits found at all\n representative_distance = pdb.representative_distance_unfiltered()\n if representative_distance:\n pdb.top_hit_sqla(representative_distance) # populate class with information for summary table\n\n pdb.sqla_summary() # generate entry for summary table\n\n pdb_entries_wrap = pdb.alchemy_distances, pdb.alchemy_sum # results for all distances per structure and single distance for summary table\n\n buffer.append(pdb_entries_wrap) # append result to buffer holding data for submission to database\n if len(buffer) == chunksize:\n print(\"Core %i - %i tasks in %i seconds\" % (core, pdb_count + 1, (time.time() - process_start_time)))\n flush = buffer_SQLal_dc_submission(session, buffer, core) # returns if submission to database successful\n chunksize = task.chunksize # remove offset after first time. This might not be needed\n\n if flush:\n flush_offset_count = 0\n buffer = []\n print(\"Core %i - commit successful\" % core)\n\n if not flush: # Handles incomplete submission of buffer due to busy database. This fix likely already made chunk_offset obsolete\n if flush_offset_count == flush_offset: # Tries to submit again every flush_offset until it succeeds\n print(\"Core %i - not flush at %i tasks with %i tasks in buffer\" % (core, pdb_count + 1, len(buffer)))\n flush_offset_count = 0\n flush = buffer_SQLal_dc_submission(session, buffer, core)\n if flush:\n buffer = []\n flush_offset_count += 1\n\n # # # some text for console\n if not verbose:\n task_summary = task_summary + \"\\nFinished task %i on core %i\\n\" % (pdb_count, core)\n if verbose:\n task_summary = (task_summary + (\"\\n\\t\\t%i\\tdistances\\t\" % len(pdb.distances) + str(pdb.distances)))\n task_summary = task_summary + \"\\n\\t\\t\\t%i of %i distances intermolecular\" % (\n pdb.inter_count, len(pdb.distances))\n task_summary = task_summary + \"\\n\\t\\t\\t%i of %i distances below cutoff\" % (\n pdb.dist_count, len(pdb.distances))\n task_summary = task_summary + \"\\n\\t\\t\\t%i distances below cutoff and intermolecular\" % (pdb.hit_count)\n task_summary = task_summary + \"\\nFinished task %i on core %i\\n\" % (pdb_count, core)\n if not quiet: print(task_summary)\n\n # Final commit for incomplete buffer at end of queue (remainder from chunk size)\n for i in range(0, 10):\n flush = buffer_SQLal_dc_submission(session, buffer, core) # final commit attempted 10 times\n if not flush:\n time.sleep(60)\n print(\"Failed to commit final chunk of size %i on core %i. Waiting 60s\" % (len(buffer), core))\n else:\n print(\"Committed final chunk of size %i on core %i.\" % (len(buffer), core))\n break\n if not flush:\n print(\"Failed to flush after 10 min. Attempting single submission\")\n for i in range(0, 10):\n buffer = single_buffer_SQLal_dc_submission(session, buffer, core) # final commit attempted 10 times\n if len(buffer) == 0:\n flush = True\n else:\n flush = False\n\n if not flush:\n time.sleep(60)\n print(\"Failed to single commit, final chunk of size %i on core %i. Waiting 60s\" % (len(buffer), core))\n else:\n print(\"Committed all chunks on core %i.\" % (core))\n break\n if not flush: # writes error to logfile upon failure to commit final entries after 2*10 attempts\n with open(task.dc_error_log, \"a+\") as outfile:\n for entry in buffer:\n outfile.write(\"{}\\t{}\".format(entry[1].pdb_id, core))\n\n output.put(\"--- Core %s finished %s tasks in %s seconds ---\" % (\n core, len(file_queue), (time.time() - process_start_time)))\n\n\ndef buffer_SQLal_dc_submission(session, buffer, core): # commits entries in buffer to database\n \"\"\"\n Args:\n session: SQLalchemy session connecting to database\n buffer: List containing pdb_wrap tuples\n Returns:\n bool: submission to database successful\n \"\"\"\n\n # # Add entries to session\n for pdb_wrap in buffer:\n for alchemy_entry in pdb_wrap[0]: # [0]: pdb.alchemy_entries\n session.add(alchemy_entry)\n session.add(pdb_wrap[1])\n\n # # Try to submit entries to database\n try:\n # session.flush()\n session.commit()\n return True\n\n # # Failure to submit when database is locked, e.g. busy (other threads submitting)\n except exc.OperationalError as e:\n # Rolls back database to try again later (see pdb_process)\n print(\"SQLalchemy exc.OperationalError on core %i, buffer size %i. Rolling back database. Error message: %s\" % (\n core, len(buffer), e))\n session.rollback()\n return False\n\n\ndef single_buffer_SQLal_dc_submission(session, buffer, core): # commits entries in buffer to database, entry by entry\n # # Add entries to session\n buffer2 = []\n for pdb_wrap in buffer:\n for alchemy_entry in pdb_wrap[0]: # [0]: pdb.alchemy_entries\n session.add(alchemy_entry)\n session.add(pdb_wrap[1])\n try:\n # session.flush()\n session.commit()\n except exc.OperationalError as e:\n # Rolls back database to try again later (see pdb_process)\n print(\n \"Single submission SQLalchemy exc.OperationalError on core %i, buffer size %i. Rolling back database. Full error %s\" % (\n core, len(pdb_wrap), e))\n session.rollback()\n buffer2.append(pdb_wrap)\n return buffer2\n\n\nclass PDB: # main class to handle structure interpretation, analysis, metadata\n # def __del__(self):\n # print(\"PDB deleted\")\n\n def __init__(self, filepath, core):\n self.core = core\n\n # Functionality\n self.path = filepath\n self.filesize = os.stat(filepath).st_size\n self.id = os.path.split(filepath)[1].replace(\".cif\", \"\")\n if \"-\" in filepath: # determines if pdb file was constructed from bio assembly code or not\n self.bio = int(filepath.split(\"-\")[1].split(\".\")[0])\n else:\n self.bio = None\n\n self.structure = []\n self.filtered_distances = []\n self.distances = []\n\n # Set SQL summary entry defaults\n self.pass_filesize = None\n self.pass_structure = None\n self.pass_dictionary = None\n self.pass_analysis = None\n self.hit_analysis = None\n self.abort = False\n self.dist = None\n self.cys_count = 0 # count of cysteines\n self.dist_count = 0 # count of distances below cutoff\n self.inter_count = 0 # count of intermolecular distances\n self.hit_count = 0 # count of intermolecular distances below cutoff\n\n # Set SQLalchemy distance object defaults\n self.nt = None\n self.d1_X_C = None\n self.d2_X_CA = None\n self.d3_X_N = None\n self.d4_CA_C = None\n self.d5_CA_CA = None\n self.d6_CA_N = None\n self.d7_N_C = None\n self.d8_N_CA = None\n self.d9_N_N = None\n self.x_name = None\n self.dynamic_chain = None\n self.dynamic_resn = None\n self.dynamic_resi = None\n self.dynamic_pos = None\n self.dynamic_name = None\n self.dynamic_order = None\n self.static_chain = None\n self.static_resn = None\n self.static_resi = None\n self.static_pos = None\n self.static_name = None\n self.static_order = None\n self.model_count = None\n self.static_chain_res_count = None\n self.poly_ids = None\n self.dynamic_chain_res_count = None\n self.model = None\n self.chain_count = None\n self.distances_count = None # count of all calculated distances\n self.dynamic_poly_id = None\n self.static_poly_id = None\n self.poly_id_eq = None\n self.inter = None\n self.timestamp = str(datetime.datetime.utcnow())\n\n def analyse(self, task): # calls functions to analyse distances for given structure\n self.job_id = task.jobname\n self.model_count = len(self.structure)\n\n if self.model_count > 0:\n count = 0\n for model in self.structure:\n if count < task.modellimit:\n self.define_residues(task, model)\n self.generate_distances(task)\n else:\n break\n count += 1\n self.evaluate_distances(task)\n self.average_models(count)\n self.pass_analysis = True\n\n def define_residues(self, task,\n model): # identifies 'static' (e.g. C-terminus) and 'dynamic'/target (e.g. Lysines) residues\n target_list, nts, cts = [], [], []\n chain_count = 0\n for chain in model:\n chain.index = chain_count\n res_count = 0 # counts resolved residues in chain\n ct, nt = None, False\n for pos, residue in enumerate(chain):\n if not residue.get_resname() in aminoacid: continue # skip heteroresidues\n if not (residue.id[0] == ' '): continue # skip heteroresidues\n\n res_count += 1\n ct = ((pos, residue)) # assigns ct to current residue until end of list, skipping heteroresidues\n if nt is False:\n nt = True\n nts.append((pos, residue, True)) # True/False: is N-terminus?\n\n # can cause duplicate errors if kept together (N-terminus = Lysine if no atoms resolved in either)\n else:\n if residue.get_resname() in task.target_residue:\n target_list.append((pos, residue, False))\n if residue.get_resname() == 'CYS': # tracks cysteines as these can be of special interest for protein production\n self.cys_count += 1\n\n chain.res_count = res_count\n if res_count > 0:\n chain_count += 1\n if ct is not None:\n cts.append(ct)\n model.chain_count = chain_count\n self.dynamic_list = target_list + nts # resets everytime\n self.static_list = cts # resets everytime\n\n def average_models(self, model_count): # determines average values for analysis of multiple models\n self.distances_count = Decimal(\"%.3f\" % (self.distances_count / model_count))\n self.dist_count = Decimal(\"%.3f\" % (self.dist_count / model_count))\n self.inter_count = Decimal(\"%.3f\" % (self.inter_count / model_count))\n self.hit_count = Decimal(\"%.3f\" % (self.hit_count / model_count))\n self.cys_count = Decimal(\"%.3f\" % (self.cys_count / model_count))\n\n def generate_distances(self, task): # calculates distances from residues in static list to residues in dynamic list\n for static_pos, static_res in self.static_list:\n for dynamic_pos, dynamic_res, nt_flag in self.dynamic_list:\n distance = Distance(self, dynamic_pos, dynamic_res, task.target_atom, nt_flag, static_pos, static_res)\n self.distances.append(distance)\n self.distances_count = len(self.distances)\n\n def evaluate_distances(self,\n task): # Pre-evaluates distances for summary table. These are partially unnecessary given subsequent SQL analysis of the distance table\n for distance in self.distances:\n distance.assign_cutoff(task)\n distance.assign_intra()\n distance.assign_hit()\n if not distance.intra: # More conservatively this could also be distance.hit (considering cutoff)\n self.filtered_distances.append(distance)\n\n for distance in self.distances:\n if distance.cut:\n self.dist_count += 1 # only distances below cutoff, distances_count tracks all distances\n if not distance.intra:\n self.hit_count += 1\n if not distance.intra:\n self.inter_count += 1\n\n def sqla_convert_distances(self):\n self.alchemy_distances = []\n for distance in self.distances:\n distance.assign_chain_equality() # requires dictionary to be converted first\n distance.sqlalchemy_conversion()\n self.alchemy_distances.append(distance.alchemy)\n\n def sort_distances(self):\n self.distances = sorted(self.distances)\n\n def sort_filtered_distances(self):\n self.filtered_distances = sorted(self.filtered_distances)\n\n def generate_dictionary(self):\n self.pdb_dict = MMCIF2Dict.MMCIF2Dict(self.path)\n self.name = self.convert_dictionary_data('_struct.title')\n if self.name is None:\n self.pass_dictionary = False\n else:\n self.pass_dictionary = True\n\n def assign_dictionary_data(self):\n self.bio_list = self.convert_dictionary_data('_pdbx_struct_assembly_gen.asym_id_list')\n self.head = self.convert_dictionary_data('_struct.pdbx_descriptor')\n self.genus = self.convert_dictionary_data('_entity_src_gen.pdbx_gene_src_scientific_name')\n self.genes = self.convert_dictionary_data('_entity_src_gen.pdbx_gene_src_gene')\n self.host = self.convert_dictionary_data('_entity_src_gen.pdbx_host_org_scientific_name')\n self.deposition = self.convert_dictionary_data('_pdbx_database_status.recvd_initial_deposition_date')\n self.bio_count = self.count_bio()\n self.method = self.convert_dictionary_data('_exptl.method')\n self.resolution = self.convert_dictionary_data('_refine_hist.d_res_high')\n\n self.poly_ids = self.convert_dictionary_data('_entity_poly.pdbx_strand_id')\n if self.poly_ids is not None:\n # converts string to array\n self.poly_ids = self.poly_ids.replace(\"[\", \"\").replace(\"]\", \"\").replace(\" \", \"\").split(\"'\")\n idsl = []\n for ids in self.poly_ids:\n ids = ids.split(\",\")\n ids_e = []\n for element in ids:\n if len(element) > 0:\n ids_e.append(element)\n if len(ids_e) > 0:\n idsl.append(ids_e)\n self.poly_ids = idsl\n\n def convert_dictionary_data(self, key):\n entry = str(self.pdb_dict.get(key))\n if entry == \"None\":\n entry = None\n return entry\n\n def count_bio(self): # Determines average count of chains in biological assembly\n # This is only somewhat informative as it includes 'chains' which are just heteroatoms / water /...\n entry = self.pdb_dict.get('_pdbx_struct_assembly_gen.asym_id_list')\n if type(entry) == list:\n i = 0\n for j in entry:\n i = i + len(j.replace(',', \"\"))\n i = i / len(entry)\n return Decimal(\"%.3f\" % i)\n elif isinstance(entry, str):\n return Decimal(\"%.3f\" % len(entry.replace(',', \"\")))\n return None\n\n def representative_distance_filtered(\n self): # determines a representative distance to use to populate summary table. Herein prioritizing intermolecular\n self.sort_filtered_distances()\n # search for first distance between non-bio chains and of priority nucleophile\n index = None\n alt1index = None\n alt2index = None\n\n # a bit complex way to find \"min\" -> Might be easier to split into seperate tables by condition instead, then determine min for each\n for pos, distance in enumerate(\n self.filtered_distances): # identifies first filtered distance with non-equal chains\n if not distance.poly_id_eq and distance.poly_id_eq is not None: # or (distance.ref_id_eq is False): /ref_id is not reliable for biological assemblies due to reassignment of chains\n if (distance.dynamic_res.get_resname() == \"LYS\") or distance.nt:\n index = pos\n break # break at first priority nucleophile satisfying not distance.poly_id_eq\n elif alt1index is None: # first one satisfying not distance.poly_id_eq which is not also priority nucleophile\n altindex = pos\n continue\n elif alt2index is None: # first one satisfying condition\n if (distance.dynamic_res.get_resname() == \"LYS\") or distance.nt:\n alt2index = pos\n\n if index is None:\n if alt1index is not None:\n index = alt1index\n else:\n if alt2index is not None:\n index = alt2index\n else:\n index = 0\n\n return self.filtered_distances[index] # top_distance\n\n def representative_distance_unfiltered(self):\n dis_list = []\n for distance in self.distances:\n dis = distance.distance\n if dis is not None:\n dis_list.append(dis)\n if len(dis_list) == 0:\n return False\n # https://stackoverflow.com/questions/2474015/getting-the-index-of-the-returned-max-or-min-item-using-max-min-on-a-_list\n index = min(range(len(dis_list)), key=dis_list.__getitem__) # Finds index for minimum distance\n return self.distances[index] # top_distance\n\n def top_hit_sqla(self, top_distance): # populates information based on top distance\n self.dist = top_distance.distance\n self.model = top_distance.static_res.parent.parent.id\n self.chain_count = top_distance.static_res.parent.parent.chain_count\n self.dynamic_resn = str(top_distance.dynamic_res.get_resname())\n self.static_resn = str(top_distance.static_res.get_resname())\n self.dynamic_chain = str(top_distance.dynamic_res.parent.id)\n self.static_chain = str(top_distance.static_res.parent.id)\n self.dynamic_resi = top_distance.dynamic_res.id[1]\n self.dynamic_pos = top_distance.dynamic_pos\n self.static_resi = top_distance.static_res.id[1]\n self.dynamic_order = top_distance.dynamic_res.disordered\n self.static_order = top_distance.static_res.disordered\n self.static_pos = top_distance.static_pos\n self.static_chain_res_count = top_distance.static_res.parent.res_count\n self.dynamic_chain_res_count = top_distance.dynamic_res.parent.res_count\n self.dynamic_poly_id = str(top_distance.dynamic_res.parent.poly_id)\n self.static_poly_id = str(top_distance.static_res.parent.poly_id)\n self.poly_id_eq = top_distance.poly_id_eq\n self.nt = top_distance.nt\n self.d1_X_C = top_distance.d1_X_C\n self.d2_X_CA = top_distance.d2_X_CA\n self.d3_X_N = top_distance.d3_X_N\n self.d4_CA_C = top_distance.d4_CA_C\n self.d5_CA_CA = top_distance.d5_CA_CA\n self.d6_CA_N = top_distance.d6_CA_N\n self.d7_N_C = top_distance.d7_N_C\n self.d8_N_CA = top_distance.d8_N_CA\n self.d9_N_N = top_distance.d9_N_N\n self.x_name = top_distance.x_name\n self.inter = not top_distance.intra\n\n if top_distance.dynamic is None:\n print(\"filtered_distances[index].dynamic is None on file %s on core %i\" % (self.id, self.core))\n # Note this can occur for unresolved N-terminus\n self.dynamic_name = None\n else:\n self.dynamic_name = str(top_distance.dynamic.get_name())\n\n if top_distance.static is None:\n self.static_name = None\n # Note this can occur for bad file, e.g. 3nso CYS'99 only SG resolved\n else:\n self.static_name = str(top_distance.static.get_name())\n # if args.verbose: print(\"hit_count\", len(self.alchemy_entries), \"dist_count\", self.dist_count, \"filtered_distances\", len(self.filtered_distances))\n\n def sqla_summary(self):\n self.alchemy_sum = DCSummary(\n abort=self.abort,\n pass_filesize=self.pass_filesize,\n filesize=self.filesize,\n pass_structure=self.pass_structure,\n pass_dictionary=self.pass_dictionary,\n pass_analysis=self.pass_analysis,\n dynamic_chain=self.dynamic_chain,\n dynamic_chain_res_count=self.dynamic_chain_res_count,\n dynamic_resn=self.dynamic_resn,\n dynamic_resi=self.dynamic_resi,\n dynamic_name=self.dynamic_name,\n dynamic_order=self.dynamic_order,\n static_chain=self.static_chain,\n static_chain_res_count=self.static_chain_res_count,\n static_resn=self.static_resn,\n static_resi=self.static_resi,\n static_pos=self.static_pos,\n static_name=self.static_name,\n static_order=self.static_order,\n model_count=self.model_count,\n model=self.model,\n name=self.name,\n head=self.head,\n genes=self.genes,\n genus=self.genus,\n host=self.host,\n deposition=self.deposition,\n method=self.method,\n resolution=self.resolution,\n bio=self.bio,\n bio_list=self.bio_list,\n bio_count=self.bio_count,\n chain_count=self.chain_count,\n pdb_id=self.id,\n distance=self.dist,\n d1_X_C=self.d1_X_C,\n d2_X_CA=self.d2_X_CA,\n d3_X_N=self.d3_X_N,\n d4_CA_C=self.d4_CA_C,\n d5_CA_CA=self.d5_CA_CA,\n d6_CA_N=self.d6_CA_N,\n d7_N_C=self.d7_N_C,\n d8_N_CA=self.d8_N_CA,\n d9_N_N=self.d9_N_N,\n nterm=self.nt,\n x_name=self.x_name,\n distances=self.distances_count,\n poly_ids=str(self.poly_ids),\n dynamic_chain_poly_id=self.dynamic_poly_id,\n static_chain_poly_id=self.static_poly_id,\n chain_poly_id_eq=self.poly_id_eq,\n inter=self.inter,\n dist_count=self.dist_count,\n inter_count=self.inter_count,\n hit_count=self.hit_count,\n cys_count=self.cys_count,\n job_id=self.job_id,\n timestamp=self.timestamp\n )\n\n\nclass Distance: # Class to handle distances between two residues\n # consider give it function to test different atoms itself instead of PDB - res-distance instead of distance?\n # def __del__(self):\n # print(\"Distance deleted\")\n\n def __init__(self, pdb, dynamic_pos, dynamic_res, dynamic_target_atoms, nt_flag, static_pos, static_res):\n self.parent_pdb = pdb\n self.static_pos = static_pos\n self.dynamic_pos = dynamic_pos\n self.dynamic_res = dynamic_res\n self.static_res = static_res\n\n # Set SQLalchemy distance object defaults\n self.nt = nt_flag\n self.d1_X_C = None\n self.d2_X_CA = None\n self.d3_X_N = None\n self.d4_CA_C = None\n self.d5_CA_CA = None\n self.d6_CA_N = None\n self.d7_N_C = None\n self.d8_N_CA = None\n self.d9_N_N = None\n self.dynamic = None\n self.x_name = None\n\n # fill values for d1 to d9\n self.calculate_distances(dynamic_target_atoms)\n # Assign static atom to C, then CA, then N, then None\n self.assign_static()\n # This code assigns the primary distance for conflicts between X and N in case of N-terminal residue (favouring N for no static atom)\n self.assign_dynamic()\n # This determines the primary distance\n self.distance = self.atom_atom_distance(self.dynamic, self.static)\n\n # adjust from array index to actual count\n self.static_pos += 1\n self.dynamic_pos += 1\n\n def calculate_distances(self, dynamic_target_atoms):\n for dynamic_atom in self.dynamic_res:\n if dynamic_atom.id in dynamic_target_atoms: # Note: single nucleophile by residue / should be unique -> Lookup table by residue instead?\n self.x_name = dynamic_atom.id # Note: this should be potentially be assigned before, not by searching in dynamic_atoms (if dynamic_atom.id == self.x_name)\n self.dynamic = dynamic_atom # Assigns X as default for dynamic, can change in self.assign_dynamic()\n self.d1_X_C = self.atom_residue_distance(dynamic_atom, self.static_res, 'C')\n self.d2_X_CA = self.atom_residue_distance(dynamic_atom, self.static_res, 'CA')\n self.d3_X_N = self.atom_residue_distance(dynamic_atom, self.static_res, 'N')\n\n elif dynamic_atom.id == 'CA':\n self.d4_CA_C = self.atom_residue_distance(dynamic_atom, self.static_res, 'C')\n self.d5_CA_CA = self.atom_residue_distance(dynamic_atom, self.static_res, 'CA')\n self.d6_CA_N = self.atom_residue_distance(dynamic_atom, self.static_res, 'N')\n\n elif dynamic_atom.id == 'N':\n self.d7_N_C = self.atom_residue_distance(dynamic_atom, self.static_res, 'C')\n self.d8_N_CA = self.atom_residue_distance(dynamic_atom, self.static_res, 'CA')\n self.d9_N_N = self.atom_residue_distance(dynamic_atom, self.static_res, 'N')\n\n def assign_static(self): # assigns preferred static atom (C > CA > N)\n\n for key in ('C', 'CA', 'N'):\n if key in self.static_res:\n self.static = self.static_res[key]\n break\n else:\n self.static = None\n self.static_name = None\n return False\n\n self.static_name = self.static.get_name()\n return True\n\n def assign_dynamic(self): # assigns preferred dynamic atom (also see methods in corresponding paper)\n if self.nt: # for N-terminal residue, re-evaluate 'dynamic' even if it already exists (can be pre-defined in class function calculate_distances)\n if self.static is None:\n try:\n self.dynamic = self.dynamic_res[\n 'N'] # With no distances, default self.dynamic to N before X (best nucleophile)\n except KeyError:\n pass\n else:\n if self.static.id == 'C':\n if self.d7_N_C is None: # If N doesn't exist, default to X\n pass\n elif self.d1_X_C is not None: # If both N and X exist, compare distances and choose shorter one\n if self.d1_X_C > self.d7_N_C:\n self.dynamic = self.dynamic_res['N']\n else: # If X doesn't exist, default to N\n self.dynamic = self.dynamic_res['N']\n elif self.static.id == 'CA':\n if self.d8_N_CA is None:\n pass\n elif self.d2_X_CA is not None:\n if self.d2_X_CA > self.d8_N_CA:\n self.dynamic = self.dynamic_res['N']\n else: # If X doesn't exist, default to N\n self.dynamic = self.dynamic_res['N']\n elif self.static.id == 'N':\n if self.d9_N_N is None:\n pass\n elif self.d3_X_N is not None:\n if self.d3_X_N > self.d9_N_N:\n self.dynamic = self.dynamic_res['N']\n else: # If X doesn't exist, default to N\n self.dynamic = self.dynamic_res['N']\n\n if self.dynamic is None: # If X doesn't exist and dynamic wasn't found for N-terminus either, default to other positions\n\n if 'CA' in self.dynamic_res:\n self.dynamic = self.dynamic_res['CA']\n self.dynamic_name = self.dynamic.get_name()\n return True\n elif 'N' in self.dynamic_res:\n self.dynamic = self.dynamic_res['N']\n self.dynamic_name = self.dynamic.get_name()\n return True\n else:\n self.dynamic = None\n self.dynamic_name = None\n return False\n else:\n self.dynamic_name = self.dynamic.get_name()\n return True\n\n def atom_atom_distance(self, atom_1, atom_2):\n if atom_1 is not None and atom_2 is not None:\n return Decimal(\"%.3f\" % (atom_1 - atom_2))\n else:\n return None\n\n def atom_residue_distance(self, atom_1, residue_2, target_2):\n \"\"\"Determines a distance with None if atom not in residue\"\"\"\n if residue_2.__contains__(target_2):\n return Decimal(\"%.3f\" % (atom_1 - residue_2[target_2]))\n else:\n return None\n\n def residue_residue_distance(self, residue_1, target_1, residue_2, target_2):\n \"\"\"Determines a distance with None if atom not in residue\"\"\"\n if residue_1.__contains__(target_1):\n if residue_2.__contains__(target_2):\n return Decimal(\"%.3f\" % (residue_1[target_1] - residue_2[target_2]))\n else:\n return None\n else:\n return None\n\n def assign_cutoff(self, task):\n if self.distance:\n if self.distance <= task.cutoff:\n self.cut = True\n return True\n else:\n self.cut = False\n return False\n else:\n self.cut = None # return None\n\n def assign_intra(self):\n # Chain identity\n if self.static_res.get_parent() is self.dynamic_res.get_parent():\n self.intra = True\n return True\n else:\n self.intra = False\n return False\n\n def assign_hit(self):\n if self.cut and not self.intra:\n self.hit = True\n return True\n else:\n self.hit = False\n return False\n\n def assign_chain_equality(self): # requires dictionary information\n self.assign_chain_poly_id(self.get_parent_pdb().poly_ids)\n\n if self.dynamic_res.parent.poly_id == self.static_res.parent.poly_id:\n self.poly_id_eq = True\n else:\n self.poly_id_eq = False\n\n def assign_chain_poly_id(self, poly_ids):\n for residue in self.dynamic_res, self.static_res:\n try:\n residue.parent.poly_id\n # print(\"residue.parent.poly_id already assigned\")\n except AttributeError:\n if poly_ids is None:\n residue.parent.poly_id = None\n else:\n for ids_p, ids in enumerate(poly_ids):\n if residue.parent.id in ids:\n residue.parent.poly_id = ids_p\n\n def get_parent_pdb(self):\n return self.parent_pdb\n\n def sqlalchemy_conversion(self):\n self.alchemy = DCDistance(\n pdb_id=self.parent_pdb.id,\n parent_id=self.parent_pdb.id,\n distance=self.distance,\n d1_X_C=self.d1_X_C,\n d2_X_CA=self.d2_X_CA,\n d3_X_N=self.d3_X_N,\n d4_CA_C=self.d4_CA_C,\n d5_CA_CA=self.d5_CA_CA,\n d6_CA_N=self.d6_CA_N,\n d7_N_C=self.d7_N_C,\n d8_N_CA=self.d8_N_CA,\n d9_N_N=self.d9_N_N,\n nterm=self.nt,\n model=str(self.static_res.parent.parent.id),\n dynamic_resn=str(self.dynamic_res.get_resname()),\n dynamic_name=self.dynamic_name,\n x_name=self.x_name,\n static_name=self.static_name,\n static_resn=str(self.static_res.get_resname()),\n dynamic_chain=str(self.dynamic_res.parent.id),\n dynamic_chain_poly_id=str(self.dynamic_res.parent.poly_id),\n static_chain_poly_id=str(self.static_res.parent.poly_id),\n poly_id_eq=self.poly_id_eq,\n dynamic_chain_res_count=self.dynamic_res.parent.res_count,\n static_chain=str(self.static_res.parent.id),\n static_chain_res_count=self.static_res.parent.res_count,\n dynamic_resi=self.dynamic_res.id[1],\n dynamic_pos=self.dynamic_pos,\n static_resi=self.static_res.id[1],\n static_pos=self.static_pos,\n dynamic_order=self.dynamic_res.disordered,\n static_order=self.static_res.disordered,\n name=self.get_parent_pdb().name,\n inter=not self.intra,\n job_id=self.get_parent_pdb().job_id,\n timestamp=self.parent_pdb.timestamp # almost no difference from self.timestamp = datetime.datetime.utcnow()\n )\n\n def __lt__(self, other): # enables sorting of pdb.distances\n if self.distance is None:\n return False # sorts None to highest position\n if other.distance is None:\n return True # sorts None to highest position\n else:\n return self.distance < other.distance\n\n def __repr__(self):\n return \"\" % (self.distance, self.intra, self.hit)\n\n\naminoacid = ['VAL', 'ILE', 'LEU', 'GLU', 'GLN', 'ASP', 'ASN', 'HIS', 'TRP', 'PHE', 'TYR', 'ARG', 'LYS', 'SER', 'THR',\n 'MET', 'ALA', 'GLY', 'PRO', 'CYS'] # defines allowed aminoacid names\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-j\", \"--job\")\n parser.add_argument(\"-v\", \"--verbose\", default=False)\n args = parser.parse_args()\n\n start_time = time.time()\n main()\n print(\"--- Program finished in %s seconds ---\" % (time.time() - start_time))\n","repo_name":"arnescheu/NeissDist","sub_path":"disCrawl.py","file_name":"disCrawl.py","file_ext":"py","file_size_in_byte":41570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18436283359","text":"import math\n\na, b, k = list(map(int, input().split()))\n\nd = math.gcd(a, b)\n\ne = d + 1\nl = 0\nwhile True:\n e -= 1\n if d % e == 0:\n l += 1\n if l == k:\n break\n\n\nprint(e)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03106/s031600435.py","file_name":"s031600435.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18999676801","text":"import pandas as pd\nimport numpy as np \nfrom catboost import CatBoostClassifier\n\ncovid_detect_model = CatBoostClassifier().load_model('models/covid_model_v2')\ncovid_stages_model = CatBoostClassifier().load_model('models/covid_stage_model')\n\ndef predictor(path):\n predictions = []\n data = np.array(pd.read_csv(path))\n for d in data:\n prediction1 = covid_detect_model.predict(d)\n if prediction1 == 0:\n prediction2 = covid_stages_model.predict(d)\n if prediction2 == 1:\n predictions.append('Тест положительный. Слабое повреждение лёгких')\n elif prediction2 == 2:\n predictions.append('Тест положительный. Среднее повреждение лёгких')\n elif prediction2 == 3:\n predictions.append('Тест положительный. Критическое повреждение лёгких')\n else:\n predictions.append('Тест отрицательный.')\n results = pd.DataFrame({'Patient': [i for i in range(len(predictions))], 'Result': predictions})\n results.to_csv('result.csv')\n","repo_name":"mike-yasnov/COVID-19-detection","sub_path":"UI/alg.py","file_name":"alg.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"18333568399","text":"s=input()\nk=int(input())\n\ndef count_change(st):\n st=st+'!'\n cnt=0\n moji=st[0]\n l=0\n r=0\n for i in range(1,len(st)):\n if moji!=st[i]:\n cnt+=(r-l+1)//2\n l=i\n r=i\n moji=st[i]\n else:\n r=i\n\n return cnt\n\nif len(set(s))==1:\n ans=len(s)*k//2\n print(ans)\n exit()\n\nif s[0]==s[-1]:\n a=1\n for i in range(len(s)):\n if s[i]!=s[i+1]:\n break\n else:\n a+=1\n \n b=1\n for i in range(len(s)-1,0,-1):\n if s[i]!=s[i-1]:\n break\n else:\n b+=1\n \n ans=count_change(s)*k-(a//2+b//2-(a+b)//2)*(k-1)\n\nelse:\n ans=count_change(s)*k\n\nprint(ans)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02891/s785877126.py","file_name":"s785877126.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"1547895614","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('start_date', models.DateField()),\n ('start_time', models.TimeField()),\n ('end_date', models.DateField()),\n ('end_time', models.TimeField()),\n ('name', models.CharField(max_length=256)),\n ('email', models.EmailField(max_length=70)),\n ('user_id', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"anna777/ttAvalon","sub_path":"appointments/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"5835266107","text":"\"\"\"Simple script to simulate an MCMC-like routine to check for memory leaks.\"\"\"\nimport numpy as np\nimport os\nimport psutil\nimport tracemalloc\n\nfrom py21cmfast import initial_conditions, perturb_field, run_lightcone\n\ntracemalloc.start()\nsnapshot = tracemalloc.take_snapshot()\nPROCESS = psutil.Process(os.getpid())\noldmem = 0\n\n\ndef trace_print():\n \"\"\"Print a trace of memory leaks.\"\"\"\n global snapshot\n global oldmem\n\n snapshot2 = tracemalloc.take_snapshot()\n snapshot2 = snapshot2.filter_traces(\n (\n tracemalloc.Filter(False, \"\"),\n tracemalloc.Filter(False, \"\"),\n tracemalloc.Filter(False, tracemalloc.__file__),\n )\n )\n\n if snapshot is not None:\n thismem = PROCESS.memory_info().rss / 1024**2\n diff = thismem - oldmem\n print(\n \"===================== Begin Trace (TOTAL MEM={:1.4e} MB... [{:+1.4e} MB]):\".format(\n thismem, diff\n )\n )\n top_stats = snapshot2.compare_to(snapshot, \"lineno\", cumulative=True)\n for stat in top_stats[:4]:\n print(stat)\n print(\"End Trace ===========================================\")\n oldmem = thismem\n\n snapshot = snapshot2\n\n\ntrace_print()\n\nrun_lightcone(\n redshift=15,\n user_params={\n \"USE_INTERPOLATION_TABLES\": True,\n \"N_THREADS\": 1,\n \"DIM\": 100,\n \"HII_DIM\": 25,\n \"PERTURB_ON_HIGH_RES\": True,\n \"USE_FFTW_WISDOM\": False,\n },\n flag_options={\n \"USE_MASS_DEPENDENT_ZETA\": True,\n \"INHOMO_RECO\": True,\n \"USE_TS_FLUCT\": True,\n },\n direc=\"_cache_%s\" % (os.path.basename(__file__)[:-3]),\n random_seed=1993,\n)\n","repo_name":"21cmfast/21cmFAST","sub_path":"devel/simulate_mcmc_memory_leak_lightcone.py","file_name":"simulate_mcmc_memory_leak_lightcone.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"90"} +{"seq_id":"18430776929","text":"MOD = 1000000007\n\nN = int(input())\nS = input()\n\ncnt = {}\nfor s in S:\n if s not in cnt:\n cnt[s] = 1\n else:\n cnt[s] += 1\n\nans = 1\nfor v in cnt.values():\n ans *= v + 1\n ans %= MOD\n\nprint(ans - 1)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03095/s205451723.py","file_name":"s205451723.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"4008995522","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:\n \n if not list1 or not list2:\n return list1 if not list2 else list2\n curr, target = (list1, list2) if list1.val < list2.val else (list2, list1)\n head = curr\n while curr and target:\n while curr.next and curr.next.val < target.val:\n curr = curr.next\n curr.next, target = target, curr.next\n curr = curr.next\n return head\n \n ","repo_name":"tausif-fardin/LeetCode-Problems","sub_path":"0021-merge-two-sorted-lists/0021-merge-two-sorted-lists.py","file_name":"0021-merge-two-sorted-lists.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17972928169","text":"def solve(n):\n res = 0\n while(n != 1):\n if n%2 == 0:\n res += 1\n n /= 2\n else:\n break\n return res\n\nn = int(input())\n\nans = 1\nans_max = 0\nfor i in range(1, n+1):\n cnt = solve(i)\n if cnt > ans_max:\n ans_max = cnt\n ans = i\nprint(ans)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03644/s993968924.py","file_name":"s993968924.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"23187347911","text":"#!/usr/bin/env python\n\nimport os\nimport datetime\nimport subprocess\nimport argparse\n\nfrom util import speak\nfrom util import projections\nfrom util.ncfunc import get_nc_file\n\n\"\"\"\nBuild a CISM dataset\n\"\"\"\n#==== Data Locations ====\n# Link data here or edit \n#========================\nlc_bamber = 'data/BamberDEM/Greenland_bedrock_topography_V3.nc'\nlc_seaRise = 'data/SeaRise/Greenland1km.nc'\nlc_racmo2p0 = 'data/RACMO2.0/Racmo2MeanSMB_1961-1990.nc'\nlc_InSAR = 'data/InSAR/Joughin2015/greenland_vel_mosaic500.nc' #NOTE: will build this file from mosaicOffsets.* files\nlc_massCon = 'data/IceBridge/Greenland/MCdataset-2014-11-19.nc'\nlc_mask = 'data/Ice2Sea/ice2sea_Greenland_geometry_icesheet_mask_Zurich.nc'\n\n\n#==== SETUP =====\n# get args, time \n# load data sets \n#================\nstamp = datetime.date.today().strftime(\"%Y_%m_%d\")\nf_base = 'templates/greenland_1km.mcb.nc'\n\n# parse the command line arguments\nparser = argparse.ArgumentParser() # -h or --help automatically included!\n\nparser.add_argument('-e', '--extended', help='Produce the extended grid.', action='store_true')\n\nvolume = parser.add_mutually_exclusive_group()\nvolume.add_argument(\"-v\", \"--verbose\", help=\"Increase the output verbosity\", action=\"store_true\")\nvolume.add_argument(\"-q\", \"--quiet\", help=\"Run silently\", action=\"store_true\")\n\nargs = parser.parse_args()\n\nspeak.notquiet(args,\"\\nBuilding the Greenland datasets in the Bamber projection.\")\nspeak.notquiet(args, \"=========================================================\\n\")\n\n# load in datasets\nspeak.notquiet(args,\"Loading the datasets.\")\n\nfrom data import bamberdem\nnc_bamber = get_nc_file(lc_bamber,'r')\nspeak.verbose(args,\" Found Bamber DEM\")\n \nfrom data import searise\nnc_seaRise = get_nc_file(lc_seaRise,'r')\nspeak.verbose(args,\" Found Sea Rise data\")\n\nfrom data import racmo2p0\nnc_racmo2p0 = get_nc_file(lc_racmo2p0,'r')\nspeak.verbose(args,\" Found RACMO 2.0 data\")\n\nfrom data import insar\ntry:\n nc_insar = get_nc_file(lc_InSAR,'r')\nexcept Exception:\n speak.verbose(args,\"\\n Building InSAR velocity dataset...\\n\")\n subprocess.call(\"python util/convert_velocities.py \"+os.path.dirname(lc_InSAR), shell=True)\n nc_insar = get_nc_file(lc_InSAR,'r')\nspeak.verbose(args,\" Found InSAR data\")\n\nfrom data import icebridge\nnc_massCon = get_nc_file(lc_massCon,'r')\nspeak.verbose(args,\" Found Mass Conserving Bed data\")\n\nfrom data import ice2sea\nnc_mask = get_nc_file( lc_mask, 'r' )\nspeak.verbose(args,\" Found Zurich mask\")\n\nspeak.verbose(args,\"\\n All data files found!\")\n\n\n#===== Bamber DEM ======\n# this is a 1km dataset \n#=======================\nspeak.verbose(args,\"\\nBuilding the base dataset: \"+f_base)\n\nspeak.notquiet(args,\"\\nCreating the base grid.\"),\n\nnc_base, base = bamberdem.build_base(f_base, nc_bamber)\n\nspeak.notquiet(args,\" Done!\")\n\n#==== Projections ====\n# All the projections \n# needed for the data \n#=====================\nspeak.notquiet(args,\"\\nGetting the projections.\")\n\nproj_epsg3413, proj_eigen_gl04c = projections.greenland()\n\nspeak.notquiet(args,\" Done!\")\n\n# transform meshes. \nspeak.verbose(args,\" Creating the transform meshes: base Bamber grid to EPSG-3413.\")\n\ntrans = projections.transform(base, proj_eigen_gl04c, proj_epsg3413)\n\nspeak.notquiet(args,\" Done!\")\n\n#==== SeaRise Data =====\n# this is a 1km dataset \n#=======================\nspeak.notquiet(args,\"\\nGetting bheatflx and presartm from the SeaRise data.\")\n\nsearise.bheatflx_artm_bamber(args, nc_seaRise, nc_base, base)\n\nnc_seaRise.close()\n#==== RACMO2.0 Data =====\n# this is a 1km dataset \n#========================\nspeak.notquiet(args,\"\\nGetting acab from the RACMO 2.0 data.\")\n\nracmo2p0.acab_bamber(args, nc_racmo2p0, nc_base, base)\n\nnc_racmo2p0.close()\n#==== InSAR velocity Data ====\n# this is a 500m dataset in \n# the ESPG-3413 projection \n#=============================\nspeak.notquiet(args,\"\\nGetting vy, vx, ey, and ex from the InSAR data.\")\n\ninsar.velocity_bamber(args, nc_insar, nc_base, trans)\n\nnc_insar.close()\n#==== Mass Conserving Bed Data ===\n# This is the new (2015) bed data \n#=================================\nspeak.notquiet(args,\"\\nGetting thk, topg, and topgerr from the mass conserving bed data.\")\n\nicebridge.mcb_bamber(args, nc_massCon, nc_bamber, nc_base, base, trans, proj_eigen_gl04c, proj_epsg3413)\n\nnc_bamber.close()\nnc_massCon.close()\n#==== Zurich mask =====\n# apply mask, and get \n# new surface variable \n#======================\nspeak.notquiet(args,\"\\nGetting the Zurich Mask.\")\n\nbase = None\nnc_base.close() # need to read in some data from nc_base now\nnc_base = get_nc_file(f_base,'r+')\n\nice2sea.apply_mask(args, nc_mask, nc_base)\n\nnc_mask.close()\n#==== Done getting data ====\n#===========================\nnc_base.close()\n\n#==== add time dim and shrink ====\n# apply to all the variables and \n# shrink to size around ice sheet \n#=================================\nspeak.notquiet(args,\"\\nAdding the time dimension and creating the 1km dataset.\")\n\nf_1km = 'complete/greenland_1km_'+stamp+'.mcb.nc'\nf_template = 'greenland.mcb.config'\n\nbamberdem.add_time(args, f_base, f_1km, f_template)\n\n#==== Coarsen ==== \n# make 2, 4 and 8 \n# km datasets \n#==================\nspeak.notquiet(args,\"\\nCreating coarser datasets.\")\n\ncoarse_list = [2,4,5,8] # in km\n\nbamberdem.coarsen(args, f_1km, f_template, coarse_list)\n\n#==== and done! ====\n#===================\nspeak.notquiet(args,\"\\nFinished building the datasets.\")\n","repo_name":"jhkennedy/cism-data","sub_path":"build_Bamber_greenland.py","file_name":"build_Bamber_greenland.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"28999501631","text":"\"\"\"Pet Adoption app\"\"\"\n\nfrom flask import Flask, request, render_template, redirect, flash, session\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom models import db, connect_db, Pets\nfrom forms import AddPetForm\n\napp = Flask(__name__)\napp.app_context().push()\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///pet_adoption_db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\napp.config['SECRET_KEY'] = \"thesecretekey898912\"\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n\ndebug = DebugToolbarExtension(app)\nwith app.app_context():\n connect_db(app)\n\n \n@app.route('/')\ndef home():\n return redirect('/pets')\n\n@app.route('/pets')\ndef user_list():\n \"\"\"route main user list\"\"\"\n pets = Pets.query.all()\n return render_template('list_pets.html',pets = pets)\n\n\n@app.route('/add', methods=['GET','POST'])\ndef add_user_form():\n\n form = AddPetForm()\n\n if form.validate_on_submit():\n name = form.name.data\n species = form.species.data\n img_url = form.img_url.data\n age = form.age.data\n comment = form.comment.data\n available = form.available.data\n pet = Pets(name=name,species=species,img_url=img_url,age=age,comment=comment,available=available)\n db.session.add(pet)\n db.session.commit()\n return redirect(\"/\")\n\n else:\n return render_template(\n \"add_pet.html\", form=form)\n\n\n@app.route('/pets/')\ndef pet_detail(user_id):\n pet = Pets.query.get_or_404(user_id)\n return render_template('pet_detail.html', pet = pet)\n\n\n\n@app.route('/pets//edit', methods=[\"GET\",\"POST\"])\ndef edit_pet(pet_id):\n pet = Pets.query.get_or_404(pet_id)\n form = AddPetForm(obj=pet)\n if form.validate_on_submit():\n pet.name = form.name.data\n pet.species = form.species.data\n pet.img_url = form.img_url.data\n pet.age = form.age.data\n pet.comment = form.comment.data\n pet.available = form.available.data\n db.session.commit()\n return redirect ('/')\n else:\n return render_template('edit_pet.html', form=form)\n\n","repo_name":"riomonet/pet_adoption","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"37432533344","text":"from corpus_process import Corpus\nfrom torch.utils.data import DataLoader, random_split\nfrom transformers import BertForTokenClassification, AdamW\nfrom pytorch_pretrained_bert import BertAdam\nimport torch\nfrom seqeval.metrics import accuracy_score, f1_score, classification_report\nfrom seqeval.metrics import classification_report\n\n\n\nfiles_train_paths = ['data/sensitive1.tsv', 'data/sensitive3.tsv']\nfiles_test_path = ['data/sensitive2.tsv']\npretrained_dataset = 'bert-base-uncased'\n\n# ========================================\n# DATA\n# ========================================\n\ndata_train = Corpus(files_train_paths).get_dataset_bert()\ndata_test = Corpus(files_test_path).get_dataset_bert()\n\nval_size = int(0.1 * len(data_train))\ntrain_size = len(data_train) - val_size\n\ntrain_dataset, val_dataset = random_split(data_train, [train_size, val_size])\n\nbatch_size = 64\n\ntrain_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=False)\nval_loader = DataLoader(val_dataset, batch_size=batch_size)\n\nprint(\"Train size : {}\".format(train_size))\nprint(\"Validation size : {}\".format(val_size))\n\n# ========================================\n# MODEL\n# ========================================\n\nLABELS = Corpus(files_test_path).labels\nprint(\"Labels {}\".format(LABELS))\n\nmodel = BertForTokenClassification.from_pretrained(pretrained_dataset, num_labels=len(LABELS))\noptimizer = BertAdam(model.parameters(), lr = 2e-5, eps = 1e-8)\n\n# print functions\ndef p_epoch_status(epoch, epochs):\n print(\"\")\n print('======== Epoch {:} / {:} ========'.format(epoch + 1, epochs))\n print('Training...')\n\ndef p_batch_staus(step, size, epoch):\n print(\n 'Train Epoch: {} [{}/{} ({:.0f}%)]'.format(\n epoch, step * size, len(train_loader.dataset),\n 100. * step / len(train_loader)))\n\n\ndef train(epochs=5):\n\n for epoch in range(epochs):\n print(p_epoch_status(epoch, epochs))\n # ========================================\n # Training\n # ========================================\n total_train_loss = 0\n model.train()\n step = 1\n for input_ids, labels, mask in train_loader:\n\n p_batch_staus(step, len(input_ids), epoch)\n step += 1\n\n model.zero_grad()\n\n output = model(input_ids, token_type_ids=None,\n attention_mask=mask, labels=labels)\n\n loss = output[0]\n total_train_loss += loss.item()\n\n loss.backward()\n optimizer.step()\n\n avg_train_loss = total_train_loss / len(train_loader)\n print(\"Average train loss: {}\".format(avg_train_loss))\n\n\n # ========================================\n # Validation\n # ========================================\n\n print(\"\")\n print(\"Running Validation...\")\n\n model.eval()\n total_val_loss = 0\n\n pred_labels = []\n true_labels = []\n\n for input_ids, labels, mask in val_loader:\n\n with torch.no_grad():\n output = model(input_ids, token_type_ids=None,\n attention_mask=mask, labels=labels)\n\n loss, logits = output[:2]\n total_val_loss += loss.item()\n\n logits = logits.argmax(dim=2).view(-1)\n labels = labels.view(-1)\n input_ids = input_ids.view(-1)\n\n for i in range(len(input_ids)):\n if input_ids[i] != 0:\n pred_labels.append(LABELS[logits[i]])\n true_labels.append(LABELS[labels[i]])\n\n\n eval_loss = total_val_loss / len(val_loader)\n print(\"Validation loss: {}\".format(eval_loss))\n print(\"Validation Accuracy: {}\".format(accuracy_score(true_labels, pred_labels)))\n print(\"Validation F1-Score: {}\".format(f1_score(true_labels, pred_labels)))\n print(classification_report(true_labels, pred_labels))\n\ndef test():\n pred_labels = []\n true_labels = []\n\n for input_ids, labels, mask in data_test:\n\n with torch.no_grad():\n logits = model(torch.tensor([input_ids.numpy()]))\n\n logits = logits[0].argmax(dim=2).view(-1)\n input_ids = input_ids.view(-1)\n\n for i in range(len(input_ids)):\n if input_ids[i] != 0:\n pred_labels.append(LABELS[logits[i]])\n true_labels.append(LABELS[labels[i]])\n\n print(\"Test Accuracy: {}\".format(accuracy_score(true_labels, pred_labels)))\n print(\"Test F1-Score: {}\".format(f1_score(true_labels, pred_labels)))\n print(classification_report(true_labels, pred_labels))\n\n print(classification_report(true_labels, pred_labels))\n\n\nif __name__ == '__main__':\n train(6)\n test()\n\n\n# def eval(input_ids, mask):\n# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n# toks = tokenizer.convert_ids_to_tokens(input_ids)\n# logits = model(torch.tensor([list(input_ids)]), token_type_ids=None ,attention_mask=torch.tensor([list(mask)]))\n# logits = logits.argmax(dim=2).view(-1)\n# labels = [corpus.labels[i] for i in logits]\n# return list(zip(toks, labels))\n\n\n\n","repo_name":"WrathXL/preferences_detection_from_text","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17963642289","text":"from collections import Counter\n\nn = int(input())\nd = Counter(list(map(int, input().split())))\nans = -1\n\nfor k in sorted(d.keys())[::-1]:\n if ans == -1 and d[k] >= 4:\n print(k*k)\n exit()\n elif ans == -1 and d[k] >= 2:\n ans = k\n elif ans != -1 and d[k] >= 2:\n print(ans*k)\n exit()\n\nprint(0)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03625/s488970961.py","file_name":"s488970961.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"5045264142","text":"from itertools import accumulate\n\n\ndef CheckRules(N, K, S):\n \"\"\"引数が条件を満たしていればYes, 満たしていなければNoを返す\"\"\"\n\n one_cusum = list(accumulate([1 if s == \"1\" else 0 for s in S], initial=0))\n zero_cusum = list(accumulate([1 if s == \"0\" else 0 for s in S], initial=0))\n one_cnt = one_cusum[-1]\n exist = False\n for i in range(N - K + 1):\n not_exist_zero = not zero_cusum[i + K] - zero_cusum[i]\n if one_cusum[i + K] - one_cusum[i] == one_cnt and not_exist_zero:\n if exist:\n return \"No\"\n exist = True\n return \"Yes\" if exist else \"No\"\n\n\ndef main():\n T = int(input())\n for _ in range(T):\n n, k = map(int, input().split())\n S = input()\n print(CheckRules(n, k, S))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"valusun/Compe_Programming","sub_path":"AtCoder/ARC/ARC150/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"6092222978","text":"import os\nimport sys\nimport shutil\nfrom progress.bar import Bar\nimport exifread\n\nshot_time_record = dict()\n\nclass FancyBar(Bar):\n copy_speed = 0\n message = 'Syncing'\n fill = '*'\n suffix = '%(percent).1f%% - %(elapsed)ds [remaining %(remaining)d - total %(max)d - speed %(copy_speed)d MB/S]'\n\ndef save_shot_time(file_name, shot_time):\n global shot_time_record\n shot_time_record[os.path.splitext(file_name)[0]] = shot_time\n \ndef query_shot_time_from_history(file_name):\n time = \"\"\n global shot_time_record\n try:\n time = shot_time_record[os.path.splitext(file_name)[0]]\n except:\n time = \"\"\n return time\n\ndef query_shot_time(file_name):\n shot_time = \"\"\n imgexif = open(file_name, 'rb')\n if imgexif is None:\n shot_time = query_shot_time_from_history(file_name)\n return shot_time\n\n exif = exifread.process_file(imgexif)\n if \"EXIF DateTimeOriginal\" in exif.keys():\n shot_time = exif[\"EXIF DateTimeOriginal\"].printable\n elif \"Image DateTimeOriginal\" in exif.keys():\n shot_time = exif[\"Image DateTimeOriginal\"].printable\n else:\n shot_time = query_shot_time_from_history(file_name)\n\n if len(shot_time) > 0:\n shot_time = shot_time.replace(\":\", \"\").replace(\" \", \"\")\n save_shot_time(file_name, shot_time)\n imgexif.close()\n return shot_time\n\ndef search_files(root_name, filter, result):\n for dir_name in os.listdir(root_name):\n file_path = os.path.join(root_name, dir_name)\n if os.path.isdir(file_path):\n search_files(file_path, filter, result)\n else:\n file_name_with_path, ext = os.path.splitext(file_path)\n file_name = file_name_with_path [file_name_with_path.rfind(\"/\")+1: len(file_name_with_path)]\n if ext in filter and file_name[0] != \".\" and \"Trashes\" not in file_path:\n result.append(file_path)\n return result\n\ndef backup_pictures(source_folder, dst_folder, file_filter):\n print(\"begin backup SD CARD files...\")\n folder = source_folder\n if os.path.exists(folder) == False:\n print(\"source folder not exist. \", folder)\n return\n \n result = []\n result = search_files(folder, file_filter, result)\n result = sorted(result, key=lambda x: os.path.splitext(x)[1])\n\n if len(result) == 0:\n print(\"No file found.\")\n return\n if os.path.exists(dst_folder) == False:\n os.makedirs(dst_folder)\n\n bar = FancyBar()\n bar.max = len(result)\n total_file_count = 0\n overwrite_count = 0\n total_copied_size = 0\n for n in range(len(result)):\n each_picture = result[n]\n file_name = each_picture[each_picture.rfind(\"/\")+1: len(each_picture)]\n dst_file_path = dst_folder + \"/\" + file_name\n\n shot_time = query_shot_time(each_picture)\n if len(shot_time) > 0:\n original_path, original_file_name = os.path.split(dst_file_path)\n output_name, output_ext_name = os.path.splitext(original_file_name)\n dst_file_path = original_path + \"/\" + output_name + \"_\" + shot_time + output_ext_name\n\n if os.path.exists(dst_file_path) == True:\n overwrite_count += 1\n ret = shutil.copyfile(each_picture, dst_file_path)\n if ret:\n total_file_count = total_file_count + 1\n total_copied_size += os.path.getsize(each_picture)\n if bar.elapsed != 0:\n bar.copy_speed = total_copied_size / 1024 / 1024 / bar.elapsed\n #print(each_picture, \"copied to \", dst_folder)\n bar.index = n + 1\n bar.update()\n bar.finish()\n print(\"\\nCOPY %d files DONE, %d files overwrited, actually %d copied to %s\" % (total_file_count, overwrite_count, (total_file_count-overwrite_count), dst_folder))\n\nif __name__ == '__main__':\n backup_pictures(\"/Users/junlin/test/sdcard\", \"/Users/junlin/test/backup\", [\".jpg\", \".JPG\", \".jpeg\", \".JPEG\", \".raf\", \".RAF\", \".png\", \".PNG\", \".PSD\", \".psd\", \".mp4\", \".MP4\", \".mov\", \".MOV\", \".dng\", \".DNG\"])\n #backup_pictures(\"/Users/junlin/test/sync_src\", \"/Users/junlin/test/backup\", [\".jpg\", \".JPG\", \".jpeg\", \".JPEG\", \".raf\", \".RAF\", \".png\", \".PNG\", \".PSD\", \".psd\", \".mp4\", \".MP4\", \".mov\", \".MOV\", \".dng\", \".DNG\"])","repo_name":"cpluser09/syncexif","sub_path":"backup_picture.py","file_name":"backup_picture.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"30387014547","text":"class Solution:\n def fib(self, N: int) -> int:\n if N == 0:\n return 0\n if N == 1:\n return 1\n lst = [0, 1]\n for i in range(N-1):\n lst.append(lst[-1] + lst[-2])\n return lst[-1]","repo_name":"VRER1997/leetcode_python","sub_path":"easy/509 Fibonacci Number.py","file_name":"509 Fibonacci Number.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"13590409608","text":"import torch\nfrom torch_geometric.utils import one_hot\n\n\ndef test_one_hot():\n src = torch.LongTensor([1, 0, 3])\n expected_output = [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]]\n\n assert one_hot(src).tolist() == expected_output\n assert one_hot(src, 4).tolist() == expected_output\n\n src = torch.LongTensor([[1, 0], [0, 1], [2, 0]])\n expected_output = [[0, 1, 0, 1, 0], [1, 0, 0, 0, 1], [0, 0, 1, 1, 0]]\n assert one_hot(src).tolist() == expected_output\n assert one_hot(src, torch.tensor([3, 2])).tolist() == expected_output\n","repo_name":"Cyanogenoid/fspool","sub_path":"graphs/test/utils/test_one_hot.py","file_name":"test_one_hot.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"90"} +{"seq_id":"18405886359","text":"import sys\ninput = sys.stdin.readline\nfrom collections import deque\ndef main():\n N = int( input())\n E = [[] for _ in range(N)]\n for _ in range(N-1):\n u, v, w = map( int, input().split())\n u -= 1\n v -= 1\n E[u].append((v,w))\n E[v].append((u,w))\n V = [-1]*N\n V[0] = 0\n d = deque([(0,0)])\n while d:\n u, l = d.popleft()\n for v,w in E[u]:\n if V[v] == -1:\n V[v] = (l+w)%2\n d.append((v, (l+w)%2))\n print(\"\\n\".join( map( str, V)))\n \n \nif __name__ == '__main__':\n main()\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03044/s375697253.py","file_name":"s375697253.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"34476185617","text":"'''\nCreated on 11/feb/2013\n\n@author: sambarza@gmail.com\n'''\nfrom pandac.PandaModules import CollisionRay\nfrom pandac.PandaModules import CollisionNode\nfrom pandac.PandaModules import CollisionHandlerQueue\nfrom pandac.PandaModules import CollisionTraverser\nfrom pandac.PandaModules import BitMask32\n\nclass MouseOnInfo():\n \n def __init__(self, thing, thingId, node, nodeId, mouse_x, mouse_y):\n \n self.thing = thing\n self.thingId = thingId\n self.node = node\n self.nodeId = nodeId\n self.mouse_x = mouse_x\n self.mouse_y = mouse_y\n \nclass Picker(object):\n '''\n classdocs\n '''\n\n def __init__(self, camera, mouseWatcherNode, camNode, things):\n '''\n Constructor\n '''\n \n self.mouseWatcherNode = mouseWatcherNode\n self.camNode = camNode\n self.things = things\n \n self.pickerRay = CollisionRay()\n \n self.pickerNode = CollisionNode('mouseRay')\n self.pickerNode.setFromCollideMask(BitMask32.bit(1))\n self.pickerNode.addSolid(self.pickerRay)\n \n self.pickerNP = camera.attachNewNode(self.pickerNode)\n self.pq = CollisionHandlerQueue() \n \n self.picker = CollisionTraverser()\n self.picker.addCollider(self.pickerNP, self.pq)\n \n def getMouseOn(self, mouse_x, mouse_y):\n \n #Set the position of the ray based on the mouse position\n self.pickerRay.setFromLens(self.camNode, mouse_x, mouse_y)\n \n self.picker.traverse(self.things.node)\n \n if self.pq.getNumEntries() > 0:\n #if we have hit something, sort the hits so that the closest\n #is first, and highlight that node\n self.pq.sortEntries()\n \n selectedNode = self.pq.getEntry(0).getIntoNode()\n \n selectedNodeId = selectedNode.getTag('nodeId')\n thingId = selectedNode.getTag('ID')\n \n mouseOnInfo = MouseOnInfo(self.things.getById(thingId), thingId, selectedNode, selectedNodeId, mouse_x, mouse_y)\n \n return mouseOnInfo\n ","repo_name":"sambarza/bo","sub_path":"world/Picker.py","file_name":"Picker.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18442978769","text":"import numpy as np\n\nn, m = map(int, input().split())\nres = np.ones(m, dtype=np.int8)\nfor _ in range(n):\n a_like = np.zeros_like(res)\n for a in map(lambda x: int(x)-1, input()[2:].split()):\n a_like[a] = 1\n res *= a_like\nprint(np.count_nonzero(res))\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03126/s327067505.py","file_name":"s327067505.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"11805777523","text":"\"\"\"\nGiven an array, rotate the array to the right by k steps, where k is non-negative.\n\n \n\nExample 1:\n\nInput: nums = [1,2,3,4,5,6,7], k = 3\nOutput: [5,6,7,1,2,3,4]\nExplanation:\nrotate 1 steps to the right: [7,1,2,3,4,5,6]\nrotate 2 steps to the right: [6,7,1,2,3,4,5]\nrotate 3 steps to the right: [5,6,7,1,2,3,4]\nExample 2:\n\nInput: nums = [-1,-100,3,99], k = 2\nOutput: [3,99,-1,-100]\nExplanation: \nrotate 1 steps to the right: [99,-1,-100,3]\nrotate 2 steps to the right: [3,99,-1,-100]\n \n\nConstraints:\n\n1 <= nums.length <= 10**5\n-2**31 <= nums[i] <= 2**31 - 1\n0 <= k <= 10**5\n\"\"\"\n\nclass Solution(object):\n def rotate(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: None Do not return anything, modify nums in-place instead.\n \"\"\"\n k = k % len(nums) # make sure k is smaller than length of nums, we don't want to go out of bounds \n # arr[1,2,3,4] --> we don't want to access arr[4] by accident\n\n l, r = 0, len(nums) - 1 # left & right pointers pointing at the start and end of array\n while l < r: # reverse the array --> when l and r touch, the array has been reversed\n nums[l], nums[r] = nums[r], nums[l] \n l, r = l + 1, r - 1\n\n l, r = 0, k - 1 # reverse the first K elements \n while l < r:\n nums[l], nums[r] = nums[r], nums[l] \n l, r = l + 1, r - 1\n\n l, r = k, len(nums) - 1 # reverse the K to end of array elements \n while l < r:\n nums[l], nums[r] = nums[r], nums[l] \n l, r = l + 1, r - 1\n","repo_name":"TheDiamondSkyv32/Codewars","sub_path":"LeetCodeDS/RotateArray.py","file_name":"RotateArray.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"42278993450","text":"# Python3 program to remove invalid parenthesis \n\n# Method checks if character is parenthesis(open \n# or closed) \ndef isParenthesis(c): \n\treturn ((c == '(') or (c == ')')) \n\n# method returns true if contains valid \n# parenthesis \ndef isValidString(str): \n\tcnt = 0\n\tfor i in range(len(str)): \n\t\tif (str[i] == '('): \n\t\t\tcnt += 1\n\t\telif (str[i] == ')'): \n\t\t\tcnt -= 1\n\t\tif (cnt < 0): \n\t\t\treturn False\n\treturn (cnt == 0) \n\t\n# method to remove invalid parenthesis \ndef removeInvalidParenthesis(str): \n\tif (len(str) == 0): \n\t\treturn\n\t\t\n\t# visit set to ignore already visited \n\tvisit = set() \n\t\n\t# queue to maintain BFS \n\tq = [] \n\ttemp = 0\n\tlevel = 0\n\t\n\t# pushing given as starting node into queu \n\tq.append(str) \n\tvisit.add(str) \n\twhile(len(q)): \n\t\tstr = q[0] \n\t\tq.pop() \n\t\tif (isValidString(str)): \n\t\t\tprint(str) \n\t\t\t\n\t\t\t# If answer is found, make level true \n\t\t\t# so that valid of only that level \n\t\t\t# are processed. \n\t\t\tlevel = True\n\t\tif (level): \n\t\t\tcontinue\n\t\tfor i in range(len(str)): \n\t\t\tif (not isParenthesis(str[i])): \n\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t# Removing parenthesis from str and \n\t\t\t# pushing into queue,if not visited already \n\t\t\ttemp = str[0:i] + str[i + 1:] \n\t\t\tif temp not in visit: \n\t\t\t\tq.append(temp) \n\t\t\t\tvisit.add(temp) \n","repo_name":"manvi0308/100DaysOfAlgo","sub_path":"Day 72/RemoveInvalidParenthesis.py","file_name":"RemoveInvalidParenthesis.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"90"} +{"seq_id":"36316826253","text":"from selenium.webdriver import Chrome\nimport re\nimport time\nimport json\ndef recent_25_posts(username):\n \"\"\"With the input of an account page, scrape the 25 most recent posts urls\"\"\"\n url = \"https://www.instagram.com/\" + username + \"/\"\n browser = Chrome('C:/Users/qwerty/Downloads/chromedriver.exe')\n browser.get(url)\n post = 'https://www.instagram.com/p/'\n post_links = []\n while len(post_links) < 25:\n links = [a.get_attribute('href') for a in browser.find_elements_by_tag_name('a')]\n for link in links:\n if post in link and link not in post_links:\n post_links.append(link)\n scroll_down = \"window.scrollTo(0, document.body.scrollHeight);\"\n browser.execute_script(scroll_down)\n time.sleep(2)\n else:\n return post_links[:25]\n \ndef insta_details(urls):\n \"\"\"Take a post url and return post details\"\"\"\n browser = Chrome('C:/Users/qwerty/Downloads/chromedriver.exe')\n post_details = []\n for link in urls:\n browser.get(link)\n try:\n # This captures the standard like count. \n # likes = browser.find_element_by_partial_link_text(' likes').text\n view_id = '//*[@id=\"react-root\"]/section/main/div/div/article/div[2]/section[2]/div/div/a'\n likes = browser.find_element_by_xpath(view_id).text\n except:\n # This captures the like count for videos which is stored\n view_id = '//*[@id=\"react-root\"]/section/main/div/div/article/div[2]/section[2]/div/span'\n likes = browser.find_element_by_xpath(view_id).text\n try:\n location = browser.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/div/article/header/div[2]/div[2]/div[2]/a').text\n except:\n location=\"null\"\n age = browser.find_element_by_css_selector('a time').text\n xpath_c = '//*[@id=\"react-root\"]/section/main/div/div/article/div[2]/div[1]/ul'\n comment = browser.find_element_by_xpath(xpath_c).text\n post_details.append({'likes/views': likes,'age': age, 'comment':comment,\"location\":location})\n time.sleep(2)\n return post_details \n\ndef find_hashtags(comment):\n \"\"\"Find hastags used in comment and return them\"\"\"\n hashtags = re.findall('#[A-Za-z]+', comment)\n return hashtags\n #/ //*[@id=\"react-root\"]/section/main/div/div/article/div[2]/div[1]/ul/div[2]/li/div/div/div","repo_name":"infinity4747/dot-hack","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"832613924","text":"import cherrypy\nfrom cherrypy.lib import cptools\nfrom mako.template import Template\nfrom mako.lookup import TemplateLookup\nimport pymongo\nimport os\n\nimport pygeo.esgf.authentication\nimport traceback\nfrom urlparse import urlparse\n\nfrom myproxy.client import MyProxyClientError\n\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\n\n# Aashish: Is there a better way for finding the path?\ntemplate_dir = os.path.join(current_dir, \"../../../../../\")\nlookup = TemplateLookup(directories=[template_dir])\n\nclass ESGFSessionAuth(cptools.SessionAuth):\n\n def check_username_and_password(self, username, password):\n try:\n pygeo.esgf.authentication.authenticate(username, password)\n except MyProxyClientError as myex:\n return \"ESGF authentication error: %s\" % myex.message\n except Exception as ex:\n return \"Internal server error: %s\" % str(ex)\n\n def login_screen(self, from_page='..', username='', error_msg=''):\n template = lookup.get_template(\"login.html\")\n return template.render(from_page=from_page,\n username=username,\n error_message=error_msg);\n\n def runtool(cls, **kwargs):\n sa = cls()\n for k, v in kwargs.iteritems():\n setattr(sa, k, v)\n return sa.run()\n\n runtool = classmethod(runtool)\n\n def do_check(self):\n request = cherrypy.serving.request\n\n if request.path_info.startswith('/common/') or request.path_info == '/favicon.ico':\n return True\n else:\n return super(ESGFSessionAuth, self).do_check()\n\n def on_login(self, username):\n username = username.strip()\n host = cherrypy.config['mongo.host']\n db = cherrypy.config['mongo.db']\n c = pymongo.Connection(host)[db]['users']\n\n if c.find({'openIdUri': username}).count() == 0:\n c.insert({'openIdUri': username})\n\n # Save base url in session for easy access\n parse_result = urlparse(username)\n esgf_base_url = '%s://%s' % (parse_result.scheme, parse_result.netloc)\n cherrypy.session['ESGFBaseUrl'] = esgf_base_url\n\ncherrypy.tools.esgf_session_auth = cherrypy._cptools.HandlerTool(ESGFSessionAuth.runtool)\n","repo_name":"OpenGeoscience/geoweb","sub_path":"pygeo/src/pygeo/security/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"90"} +{"seq_id":"70194383017","text":"from django import template\nfrom django.db.models import Count\nfrom core.models import Article\nregister = template.Library()\n\n@register.simple_tag\ndef total_articles():\n\treturn Article.objects.count()\n\n@register.inclusion_tag('core/custom-tag/latest_articles.html')\ndef latest_articles(num=3):\n\tarticles = Article.objects.order_by('-created')[:num]\n\treturn {'articles':articles}\n\n@register.inclusion_tag('core/custom-tag/commented_articles.html')\ndef most_commented_articles(num=3):\n\tarticles = Article.objects.annotate(commented_articles = Count('comments')).order_by('-commented_articles')[:num]\n\treturn {'articles' :articles}\n\n","repo_name":"eslamward/my_blog","sub_path":"src/core/templatetags/custom_tags.py","file_name":"custom_tags.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"39017362918","text":"from __future__ import absolute_import\nimport json\nfrom pyes.query import MatchAllQuery\n\n\ndef dump_docs(fp, conn, index_name, doc_type, scroll='5m', encoding='utf8'):\n q = MatchAllQuery()\n for result in conn.search(q, indices=[index_name], doc_types=[doc_type],\n scan=True, scroll=scroll):\n fp.write(json.dumps(result, encoding=encoding))\n fp.write('\\n')\n\n\ndef restore_docs(fp, conn, index_name, doc_type, encoding='utf8'):\n for line in fp:\n doc = json.loads(line, encoding=encoding)\n conn.index(line.strip().decode(encoding), index_name, doc_type,\n bulk=True, id=doc['_id'])\n conn.force_bulk()\n","repo_name":"danfairs/esman","sub_path":"esman/dumprestore.py","file_name":"dumprestore.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"4119306754","text":"from typing import List, Tuple\nfrom simulation.main.Entity import Entity, EntityType\n# from simulation.base.Agent import Agent\nfrom simulation.multi_agent_sim.MultiAgent import MultiAgent\nfrom simulation.concrete.StateRepresentations import StateRepresentation\nimport numpy as np\nfrom simulation.base.State import State\n\nimport simulation.utils.utils as sg\nfrom simulation.multi_agent_sim.hardcode_agents.HardCodeAgents import HardCodePred, HardCodePrey, RandomMultiAgent\nfrom simulation.concrete.SimulDiscreteAction import SimulDiscreteAction\nfrom timeit import default_timer as tmr\nfrom agent.multiagent.ERLMulti import ERLMulti\nclass MultiAgentEntity(Entity):\n \"\"\"\n Don't use this. This is the old, deprecated multi agent sim. It's very slow for multiple agents. \n This is just here for completeness.\n Rather use the DiscreteMultiAgentSimulation.\n \"\"\"\n START_ENERGY = 700\n MAX_ENERGY = 1000 #might need to make this dynamic\n def __init__(self, pos: np.array, type: EntityType, radius: float):\n super().__init__(pos, type, radius=radius)\n self.lifetimer = 0\n self.energy = MultiAgentEntity.START_ENERGY\n\n def update(self, acc: np.array, dt: float):\n # update position and velocity\n super().update(acc, dt)\n \n # life things\n self.lifetimer += 1\n self.energy -= 1\n\n # die if you are too old or too hungry.\n if self.lifetimer > MultiAgentSimulation.entity_lifetimes[self.type]:\n self.make_dead()\n \n if self.energy <= 0:\n # print( self.id , \" id dying.\")\n # just for now.\n self.make_dead()\n\n \n def eat(self, other: \"MultiAgentEntity\"):\n # todo\n if other.type == EntityType.FOOD:\n self.energy += 100\n else:\n self.energy += 250\n\n pass\n\n @staticmethod\n def food(pos):\n return MultiAgentEntity(pos, EntityType.FOOD, 3)\n pass\n\n def copy(self) -> \"MultiAgentEntity\":\n return MultiAgentEntity(self.pos.copy(), self.type, self.radius)\n\n def __repr__(self) -> str:\n return f\"{self.type}, ({self.pos[0], self.pos[1]})\"\n\nclass MultiAgentSimulation:\n # todo\n entity_lifetimes = {\n EntityType.FOOD: float('inf'),\n EntityType.PREY: 4000 * 0.4, # float('inf'), # 4000*100,\n EntityType.PREDATOR: 4000 * 0.5 #float('inf'), #4000*100,\n }\n\n entity_energy_to_reproduce = {\n EntityType.PREY: 800,\n EntityType.PREDATOR: 2000 \n }\n masses = {\n EntityType.PREY: 1,\n EntityType.PREDATOR: 3,\n }\n \n OOB_REWARD = -100 # out of bounds\n SINGLE_STEP_REWARD = 0\n DIE_REWARD = -1000\n VIZ_DIST = 50\n SIZE = 1240\n #SIZE = 1920\n MAX_FOOD = 1000\n\n def __init__(self, state_rep: StateRepresentation, \n agent_preds: List[MultiAgent],\n agent_prey: List[MultiAgent],\n \n ent_preds:List[MultiAgentEntity],\n ent_prey:List[MultiAgentEntity],\n foods: List[MultiAgentEntity],\n ):\n self.size = MultiAgentSimulation.SIZE\n self.e_predators: List[MultiAgentEntity] = ent_preds\n self.e_prey: List[MultiAgentEntity] = ent_prey\n \n self.a_predators: List[MultiAgent] = agent_preds\n self.a_prey: List[MultiAgent] = agent_prey\n\n self.foods: List[MultiAgentEntity] = foods\n self.state_rep: StateRepresentation = state_rep\n\n # entity.id -> state\n self.previous_states = {}\n\n r2 = np.sqrt(2)/2\n self.velocities = {\n SimulDiscreteAction.UP: np.array([0, 1.0]),\n SimulDiscreteAction.DOWN: np.array([0, -1.0]),\n SimulDiscreteAction.LEFT: np.array([-1.0, 0]),\n SimulDiscreteAction.RIGHT: np.array([1.0, 0]),\n\n SimulDiscreteAction.UP_RIGHT: np.array([r2, r2]),\n SimulDiscreteAction.DOWN_RIGHT: np.array([r2, -r2]),\n SimulDiscreteAction.DOWN_LEFT: np.array([-r2, -r2]),\n SimulDiscreteAction.UP_LEFT: np.array([-r2, r2]),\n }\n\n self.rewards = {\n EntityType.FOOD: 1,\n EntityType.PREY: 10\n }\n\n self.dt = 0.1\n\n\n self.current_step_reward = []\n # todo\n extra_obstacles = []\n\n self.obstacles: List[sg.Segment2] = self.get_obstacles(extra_obstacles)\n self.extra_obstacles = extra_obstacles if extra_obstacles is not None else []\n self.prev_time = tmr()\n\n def get_obstacles(self, extra_obstacles) -> List[sg.Segment2]:\n tl = sg.Point2(0,0)\n bl = sg.Point2(0,self.size)\n tr = sg.Point2(self.size,0)\n br = sg.Point2(self.size,self.size)\n\n ans = [\n sg.Segment2(br, bl),\n sg.Segment2(br, tr),\n sg.Segment2(tl, tr),\n sg.Segment2(tl, bl),\n ]\n if extra_obstacles is not None:\n ans.extend(extra_obstacles)\n \n return ans\n @property\n def smart_entities(self) -> List[MultiAgentEntity]:\n return self.e_prey + self.e_predators\n pass\n \n @property\n def agents(self) -> List[MultiAgent]:\n return self.a_prey + self.a_predators\n\n @staticmethod\n def prune(li1: List[MultiAgentEntity], li2: List[MultiAgent]) -> None:\n is_li_2_good = li2 is not None\n for index in range(len(li1)-1, -1, -1):\n # if is_li_2_good:print(\"Is dead: \", li1[index].is_dead)\n if li1[index].is_dead:\n li1.pop(index)\n if is_li_2_good:\n li2.pop(index)\n \n \n\n def update(self):\n self.dt = tmr() - self.prev_time\n self.prev_time = tmr()\n # print(self.dt)\n \"\"\"\n This performs a single step. It gets actions from each agents and adds a sample.\n \"\"\"\n # todo mike.\n actions = []\n states = []\n inds = []\n rewards = [0 for _ in (self.smart_entities)]\n # print('ree here coen', len(self.smart_entities))\n # todo potentially later add functionality to remove agents when they die.\n self.current_step_reward = [0 for i in (self.smart_entities)]\n for i in range(len(self.smart_entities)):\n state, indices = self._get_state(i)\n agent_action = self.agents[i].get_action(state)\n actions.append(agent_action)\n states.append(state)\n inds.append(indices)\n\n for i, action in enumerate(actions):\n rewards[i] += self._perform_action(self.smart_entities[i], action, inds[i])\n \n for i, action in enumerate(actions):\n # print('index ree',i)\n e = self.smart_entities[i] \n # if it died by not being old, then negative reward.\n if e.is_dead and e.lifetimer < self.entity_lifetimes[e.type]:\n rewards[i] += MultiAgentSimulation.DIE_REWARD\n # todo optimise\n self.agents[i].add_sample(states[i], action, rewards[i], self._get_state(i)[0])\n # print('pruning')\n # now prune dead agents:\n MultiAgentSimulation.prune(self.e_predators, self.a_predators)\n MultiAgentSimulation.prune(self.e_prey, self.a_prey)\n MultiAgentSimulation.prune(self.foods, None)\n\n self.grow_food()\n self.reproduce()\n # print('done')\n # self.prev_time = tmr()\n # print(f\"Time for one update = {round((ee - ss)*1000)}ms\")\n def grow_food(self):\n # print(\"Energy: \", self.e_prey[0].energy)\n # some sort of carrying capacity growth rate thing.\n rando = np.random.rand(len(self.foods))\n r = 0.01\n index = rando < r * (1 - len(self.foods) / MultiAgentSimulation. MAX_FOOD)\n for i in np.arange(len(self.foods))[index]:\n # v = (np.random.rand(2) * 2 - 1) * 100\n # k = 5\n # v[(v < k) & (v >= 0)] = k\n # v[(v > -k) & (v <= 0)] = -k\n # pos = np.clip(v + self.foods[i].pos, 0, MultiAgentSimulation.SIZE)\n pos = np.floor(np.random.rand(2) * MultiAgentSimulation.SIZE)\n self.foods.append(MultiAgentEntity.food(pos))\n\n if np.random.rand() < 1 * (1 - len(self.foods) / MultiAgentSimulation. MAX_FOOD):\n # # print('adding')\n self.foods.append(MultiAgentEntity.food(np.floor(np.random.rand(2) * MultiAgentSimulation.SIZE)))\n pass\n\n def reproduce(self):\n \"\"\"\n This goes through all of the entities and reproduces if they have enough energy.\n \"\"\"\n \n v1s = [self.e_prey, self.e_predators]\n v2s = [self.a_prey, self.a_predators]\n for v1, v2 in zip(v1s, v2s):\n new_agents = []\n new_ents = []\n for i, e in enumerate(v1):\n if e.energy >= self.entity_energy_to_reproduce[e.type]:\n # print(\"entities \")\n new_agent = v2[i].reproduce()\n new_entity = v1[i].copy()\n dir = np.array([1, 0])\n dist = (new_entity.radius + e.radius) * 1.2\n if self._outside(new_entity.pos + dir * dist):\n dir *= -1\n new_entity.pos += dir * dist\n \n new_agents.append(new_agent)\n new_ents.append(new_entity)\n e.energy = MultiAgentEntity.START_ENERGY / 2 # self.entity_energy_to_reproduce[e.type] - 5\n # else:print('e=', e.energy)\n v1.extend(new_ents)\n v2.extend(new_agents)\n \n def _get_state(self, index: int) -> Tuple[State, np.array]:\n return self.state_rep.get_state(index, self)\n\n def get_all_entities(self):\n return self.foods + self.smart_entities\n pass\n \n def _outside(self, pos: np.array, gap=0) -> bool:\n return np.any(pos - gap < 0) or np.any(pos+gap >= self.size)\n\n def _perform_action(self, ent: MultiAgentEntity, action: SimulDiscreteAction, indices_of_close_points: np.array):\n if ent.is_dead: \n return self.DIE_REWARD\n \n indices_of_close_points = indices_of_close_points\n index_of_e = self.smart_entities.index(ent)\n acc = self.velocities[action] * 20 / MultiAgentSimulation.masses[ent.type]\n old_pos = ent.pos.copy()\n ent.update(acc, self.dt / 5 * 5)\n new_pos = ent.pos\n # todo remwove\n # ent.pos = old_pos;\n # Am I out of bounds\n if (self._outside(new_pos, ent.radius)):\n ent.pos = old_pos\n ent.velocity *= 0\n self.current_step_reward[index_of_e] += self.OOB_REWARD\n return self.current_step_reward[index_of_e]\n \n # Am I hitting an obstacle TODO\n for s in self.extra_obstacles:\n if sg.circle_line_segment_intersection(new_pos, ent.radius, s.start.as_tuple(), s.end.as_tuple(), False):\n # actual intersection\n ent.pos = old_pos\n ent.velocity *= 0\n # todo not sure if we should return here?\n self.current_step_reward[index_of_e] += self.OOB_REWARD\n return self.current_step_reward[index_of_e]\n # base level reward.\n self.current_step_reward[index_of_e] += self.SINGLE_STEP_REWARD \n\n # Now we go over the entities that this might touch.\n all_entites = self.get_all_entities()\n for index in np.arange(len(indices_of_close_points))[indices_of_close_points]:\n newe = all_entites[index]\n # don't interact with dead agent.\n if newe.is_dead:\n continue\n\n if np.linalg.norm(newe.pos - ent.pos) <= newe.radius + ent.radius:\n # Is touching food, so some interaction can take place.\n if ent.type == EntityType.PREY:\n if newe.type == EntityType.PREY:\n # no interaction with prey\n pass\n elif newe.type == EntityType.PREDATOR:\n index_of_pred = self.smart_entities.index(newe)\n # Do we need to do this, or can the predator eat in there step?\n # index_of_pred_in_\n # get eaten. How do we handle rewards????\n self.current_step_reward[index_of_e] += self.DIE_REWARD\n self.current_step_reward[index_of_pred] += self.rewards[ent.type]\n newe.eat(ent)\n # print(\"Making prey dead bc pred\")\n ent.make_dead()\n\n elif newe.type == EntityType.FOOD:\n # eat\n ent.eat(newe)\n # print(\"Making food dead bc prey\")\n newe.make_dead()\n self.current_step_reward[index_of_e] += self.rewards[newe.type]\n\n elif ent.type == EntityType.PREDATOR:\n #predator does not interact with preds or food (grass).\n\n if newe.type == EntityType.PREY:\n ent.eat(newe)\n self.current_step_reward[index_of_e] += self.rewards[newe.type]\n # eat prey\n pass\n else:\n assert 1 == 0 # shouldn't be here \n return self.current_step_reward[index_of_e]\n pass\n \n @staticmethod\n def get_proper_sim_for_learning(state_rep: StateRepresentation, agent_pred='hard', agent_prey='hard', n_preds=10, n_preys=30) -> \"MultiAgentSimulation\":\n # n_preds = 10\n # n_preys = 30\n pred_pos = [\n # (100, 100),\n # (200, 200),\n # (300, 300),\n # (400, 400),\n # (500, 500),\n ]\n prey_pos = [\n # (600, 600),\n # (700, 700),\n # (800, 800),\n # (900, 900),\n # (1000, 1000),\n ]\n for p in range(n_preds):\n pred_pos.append((np.random.rand(2) * np.array([0.5, 1])) * MultiAgentSimulation.SIZE)\n for p in range(n_preys):\n prey_pos.append((np.random.rand(2) * np.array([0.5, 1]) + np.array([0.5, 0])) * MultiAgentSimulation.SIZE)\n preds = [MultiAgentEntity(np.array(v), EntityType.PREDATOR, 7) for v in pred_pos]\n \n preys = [MultiAgentEntity(np.array(v), EntityType.PREY, 5) for v in prey_pos]\n foods = []\n for i in range(40):\n # break\n foods.append(MultiAgentEntity.food(np.array([i*30, i*30])))\n\n for i in range(300):\n # break\n foods.append(MultiAgentEntity.food(np.floor(np.random.rand(2) * MultiAgentSimulation.SIZE)))\n \n agent_func_pred = HardCodePred if agent_pred == 'hard' else (RandomMultiAgent if agent_pred == 'random' else lambda : ERLMulti(state_rep.get_num_features(), len(SimulDiscreteAction)))\n agent_func_prey = HardCodePrey if agent_prey == 'hard' else (RandomMultiAgent if agent_prey == 'random' else lambda : ERLMulti(state_rep.get_num_features(), len(SimulDiscreteAction)))\n\n agent_preds = [ agent_func_pred() for _ in preds ]\n\n agent_prey = [ agent_func_prey() for _ in preys ]\n\n return MultiAgentSimulation(\n state_rep, agent_preds, agent_prey, preds, preys, foods\n )\n \n\n @staticmethod\n def basic_test(state_rep: StateRepresentation) -> \"MultiAgentSimulation\":\n preds = [\n MultiAgentEntity(np.array([100, 100]), EntityType.PREDATOR, 10), \n MultiAgentEntity(np.array([600, 600]), EntityType.PREDATOR, 10)\n ]\n \n preys = [\n MultiAgentEntity(np.array([500, 500]), EntityType.PREY, 5), \n MultiAgentEntity(np.array([600, 500]), EntityType.PREY, 5), \n MultiAgentEntity(np.array([700, 500]), EntityType.PREY, 5), \n MultiAgentEntity(np.array([800, 500]), EntityType.PREY, 5), \n # MultiAgentEntity(np.array([550, 450]), EntityType.PREY, 5)\n ]\n foods = []\n for i in range(40):\n # break\n foods.append(MultiAgentEntity.food(np.array([i*30, i*30])))\n\n for i in range(100):\n # break\n foods.append(MultiAgentEntity.food(np.floor(np.random.rand(2) * MultiAgentSimulation.SIZE)))\n \n\n agent_preds = [\n HardCodePred(),\n HardCodePred(),\n ]\n\n agent_prey = [\n HardCodePrey(),\n HardCodePrey(),\n HardCodePrey(),\n HardCodePrey(),\n # HardCodePrey(),\n ]\n\n return MultiAgentSimulation(\n state_rep, agent_preds, agent_prey, preds, preys, foods\n )\n \n pass\n","repo_name":"Michael-Beukman/ERLEcosystemSimulation","sub_path":"simulation/multi_agent_sim/MultiAgentSimulation.py","file_name":"MultiAgentSimulation.py","file_ext":"py","file_size_in_byte":16869,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"8"} +{"seq_id":"9127860868","text":"import config as cf\nfrom App import model\nimport datetime\nimport csv\nfrom DISClib.DataStructures import listiterator as it\nimport time\nfrom math import radians\nimport sys\n\n\"\"\"\nEl controlador se encarga de mediar entre la vista y el modelo.\nExisten algunas operaciones en las que se necesita invocar\nel modelo varias veces o integrar varias de las respuestas\ndel modelo en una sola respuesta. Esta responsabilidad\nrecae sobre el controlador.\n\"\"\"\n\n# ___________________________________________________\n# Inicializacion del catalogo\n# ___________________________________________________\n\n\ndef init():\n \"\"\"\n Llama la funcion de inicializacion del modelo.\n \"\"\"\n # catalog es utilizado para interactuar con el modelo\n analyzer = model.newAnalyzer()\n return analyzer\n\n\n\n# ___________________________________________________\n# Funciones para la carga de datos y almacenamiento\n# de datos en los modelos\n# ___________________________________________________\n\ndef loadData(analyzer, accidentsfile):\n \"\"\"\n Carga los datos de los archivos CSV en el modelo\n \"\"\"\n accidentsfile = cf.data_dir + accidentsfile\n input_file = csv.DictReader(open(accidentsfile, encoding=\"utf-8\"),\n delimiter=\",\")\n # print(sys.getsizeof(input_file))\n i = 0\n p = 0\n for accident in input_file:\n\n del accident['Description']\n del accident['Source']\n del accident['TMC']\n del accident['Number']\n del accident['Street']\n del accident['Side']\n del accident['City']\n del accident['County']\n del accident['Zipcode']\n del accident['Timezone']\n del accident['Airport_Code']\n del accident['Weather_Timestamp']\n del accident['Temperature(F)']\n del accident['Wind_Chill(F)']\n del accident['Humidity(%)']\n del accident['Pressure(in)']\n del accident['Visibility(mi)']\n del accident['Wind_Direction']\n del accident['Wind_Speed(mph)']\n del accident['Precipitation(in)']\n del accident['Weather_Condition']\n del accident['Amenity']\n del accident['Bump']\n del accident['Crossing']\n del accident['Give_Way']\n del accident['Junction']\n del accident['No_Exit']\n del accident['Railway']\n del accident['Roundabout']\n del accident['Station']\n del accident['Stop']\n del accident['Traffic_Calming']\n del accident['Traffic_Signal']\n del accident['Turning_Loop']\n del accident['Sunrise_Sunset']\n del accident['Nautical_Twilight']\n del accident['Civil_Twilight']\n del accident['Astronomical_Twilight']\n del accident['End_Lat']\n del accident['End_Lng']\n del accident['End_Time']\n # print(sys.getsizeof(accident))\n model.addaccident(analyzer, accident)\n # if i%29743 == 0:\n if i%8787 == 0:\n #if i%30000 == 0:\n print (\" \" + str(p) + \"%\" + \" completado\", end=\"\\r\")\n p+=1\n i+=1\n # print(sys.getsizeof(analyzer),'Bytes') \n return analyzer\n\n\n# ___________________________________________________\n# Funciones para consultas\n# ___________________________________________________\ndef crimesSize(analyzer):\n \"\"\"\n Numero de crimenes leidos\n \"\"\"\n return model.crimesSize(analyzer)\n\n\ndef indexHeight(analyzer):\n \"\"\"\n Altura del indice (arbol)\n \"\"\"\n return model.indexHeight(analyzer)\n\n\ndef indexSize(analyzer):\n \"\"\"\n Numero de nodos en el arbol\n \"\"\"\n return model.indexSize(analyzer)\n\n\ndef minKey(analyzer):\n \"\"\"\n La menor llave del arbol\n \"\"\"\n return model.minKey(analyzer)\n\n\ndef maxKey(analyzer):\n \"\"\"\n La mayor llave del arbol\n \"\"\"\n return model.maxKey(analyzer)\ndef keyset (map):\n return model.keyset(map)\n\ndef getAccident(tree,key):\n return model.getKey(tree,key)\n\ndef filterSeverityIndividual(tree,raw_date):\n date = datetime.datetime.strptime(raw_date, '%Y-%m-%d').date()\n lst_result = getAccident(tree,date)\n result = it.newIterator(lst_result)\n severity = {\"1\":0,\n \"2\":0,\n \"3\":0,\n \"4\":0}\n while it.hasNext(result):\n accident = it.next(result)\n severity[accident['Severity']] += 1 \n return severity\n\ndef accidentBeforeDate(tree,raw_date):\n date = datetime.datetime.strptime(raw_date, '%Y-%m-%d').date()\n result = model.accidentsBeforeDate(tree,date)\n if result is None:\n return None\n total = 0\n dates = {}\n iterator1 = it.newIterator(result)\n while it.hasNext(iterator1):\n day = it.next(iterator1)\n accidents = model.getKey(tree,day)\n dates[day] = model.listSize(accidents)\n total += model.listSize(accidents) \n return (total,dates)\n\ndef accidentsInDateRange(tree,low_raw_date,high_raw_date):\n loDate = datetime.datetime.strptime(low_raw_date, '%Y-%m-%d').date()\n hiDate = datetime.datetime.strptime(high_raw_date, '%Y-%m-%d').date()\n num_accidents = 0\n severity = {\"1\":0,\n \"2\":0,\n \"3\":0,\n \"4\":0}\n lst_accidents_iterator = it.newIterator(model.range_accidents(tree,loDate,hiDate))\n while it.hasNext(lst_accidents_iterator):\n bucket = it.next(lst_accidents_iterator)\n bucket_iterator = it.newIterator(bucket)\n while it.hasNext(bucket_iterator):\n element = it.next(bucket_iterator)\n num_accidents += 1\n severity[element['Severity']] += 1\n return (num_accidents,severity)\n\ndef accidents4state(tree,low_raw_date,high_raw_date):\n loDate = datetime.datetime.strptime(low_raw_date, '%Y-%m-%d').date()\n hiDate = datetime.datetime.strptime(high_raw_date, '%Y-%m-%d').date()\n date_range = model.range_accidents(tree,loDate,hiDate)\n max_accidents = [0,'']\n date_iterator = it.newIterator(date_range)\n states = {}\n while it.hasNext(date_iterator):\n accidents_lst = it.next(date_iterator)\n num_accidents = model.listSize(accidents_lst)\n if num_accidents > max_accidents[0]:\n max_accidents[0] = num_accidents\n max_accidents[1] = datetime.datetime.strptime(accidents_lst['first']['info']['Start_Time'], '%Y-%m-%d %H:%M:%S').date()\n \n accidents_lst_it = it.newIterator(accidents_lst)\n while it.hasNext(accidents_lst_it):\n accident = it.next(accidents_lst_it)\n if accident['State'] not in states:\n states[accident['State']] = 1\n else:\n states[accident['State']] += 1\n return (max_accidents[1],max_accidents[0],states)\n\n \ndef accidentsrangetime(tree,time1,time2):\n\n time1=model.strtotimedate(time1,'time')\n time2=model.strtotimedate(time2,'time')\n time1=model.RedondearHoras(time1)\n time2=model.RedondearHoras(time2)\n\n result=model.values(tree,time1,time2)\n\n\n if result is None:\n return None\n \n total=0\n severity = {\"1\":0,\n \"2\":0,\n \"3\":0,\n \"4\":0}\n iterator1=it.newIterator(result)\n while it.hasNext(iterator1):\n lst=it.newIterator(it.next(iterator1))\n while it.hasNext(lst):\n total+=1\n severity[it.next(lst)['Severity']] += 1\n\n return (total, severity)\n\ndef filter_distance(lst,lon,lat,radius):\n all_data_iterator = it.newIterator(lst)\n total = 0\n days = [0,0,0,0,0,0,0]\n while it.hasNext(all_data_iterator):\n accident = it.next(all_data_iterator)\n distance = model.distance_lat_lon(radians(lon),radians(lat),radians(float(accident['Start_Lng'])),radians(float(accident['Start_Lat'])))\n if abs(distance) <= radius:\n accidentdate = datetime.datetime.strptime(accident['Start_Time'], '%Y-%m-%d %H:%M:%S')\n total += 1\n days[accidentdate.weekday()] += 1\n\n return (total,days)\n \n","repo_name":"PEPE-EDA-dsanchezu-jarroyo/Reto3-202020-Template","sub_path":"App/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":7909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"8"} +{"seq_id":"39562978085","text":"#The time module provides a function, also named time, that returns the currentGreenwich Mean Time in “the epoch”, which is an arbitrary time used as a reference point. On UNIX systems, the epoch is 1 January 1970.\nimport time\n\nstamp = time.time()\n\n\n#Write a script that reads the current time and converts it to a time of day in hours, minutes, and seconds, plus the number of days since the epoch.\n\ndef humanTime(stamp):\n\n year =int( 1970 + stamp// 31556926)\n month =int( 1 + stamp%31556926//2629743)\n day = int(1 + stamp%2629743//86400)\n hour = int((stamp % 86400) //3600)\n minute = int(stamp%3600//60)\n sec = int(stamp%60)\n print(\"Today is \" + str(day) +\"/\"+ str(month) +\"/\"+ str(day) +\" \"+ str(hour) + \":\"+ str(minute) + \":\"+ str(sec) )\nhumanTime(stamp)","repo_name":"TetianaStrelnikova/Python_turtle","sub_path":"timestamp_convert.py","file_name":"timestamp_convert.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"8220343558","text":"import time\nimport tracemalloc\n\ndef performance(func):\n if not hasattr(performance, \"counter\"):\n performance.counter = 0\n performance.total_time = 0\n performance.total_mem = 0\n\n def wrapped_function(*args, **kwargs):\n start_time = time.perf_counter()\n performance.counter += 1\n tracemalloc.start()\n\n result=func(*args, **kwargs)\n\n end_time = time.perf_counter()\n performance.total_mem += tracemalloc.get_traced_memory()[1]\n tracemalloc.stop()\n performance.total_time +=(end_time-start_time)\n return result\n\n\n\n return wrapped_function\n","repo_name":"canbula/ParallelProgramming","sub_path":"Week03/hw/decorators_cengizhan_cam.py","file_name":"decorators_cengizhan_cam.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"8"} +{"seq_id":"38246718123","text":"# Задача 2: Найдите сумму цифр трехзначного числа.\n# *Пример:*\n# 123 -> 6 (1 + 2 + 3)\n# 100 -> 1 (1 + 0 + 0) \n\n\na = int(input(\"Введите трехзначное число: \"))\nwhile a > 999 or a < 100:\n print(\"Введите трехзначное число:\")\n a = int(input())\n# n1 = a % 10\n# n2 = a // 10 % 10\n# n3 = a // 100\n# total = n1+n2+n3\n# print(total)\n\nn1 = a // 10 ** 0 % 10\nn2 = a // 10 ** 1 % 10\nn3 = a // 10 ** 2 % 10\nprint(n1 + n2 + n3)","repo_name":"Dobrycode/Python_HomeWorks","sub_path":"Task2.py","file_name":"Task2.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"72791504903","text":"import os, sys\nimport logging\nimport logging.handlers\nimport traceback\n\nfrom Queue import Empty\nfrom multiprocessing import Process, Queue, current_process\n\n\n_fileFormat = '%(asctime)s %(levelname)-8s %(processName)-10s %(name)s %(message)s'\n_echoFormat = '%(asctime)s %(levelname)-8s %(processName)-10s %(name)s %(message)s'\n_fileSize = 1000000\n_fileCount = 10\nlogQueue = Queue()\n\nclass QueueLogHandler(logging.Handler):\n def __init__(self, queue):\n logging.Handler.__init__(self)\n self.queue = queue\n \n def emit(self, record):\n try:\n item = record.exc_info\n if item:\n _ = self.format(record)\n record.exc_info = None\n self.queue.put_nowait(record)\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n\ndef echoLog(logger, format):\n console = logging.StreamHandler()\n formatter = logging.Formatter(format)\n console.setFormatter(formatter)\n logger.addHandler(console)\n\ndef fileLog(logger, filename, filesize, count, format):\n handler = logging.handlers.RotatingFileHandler(filename, 'a', filesize, count)\n formatter = logging.Formatter(format)\n handler.setFormatter(formatter)\n log.addHandler(handler)\n\ndef handleLogEvents(inbound, config):\n filename = config.get('filename', None)\n filesize = config.get('logsize', _fileSize)\n filecount = config.get('logcount', _fileCount)\n fileformat = config.get('fileformat', _fileFormat)\n echoformat = config.get('echoformat', _echoFormat)\n logecho = config.get('echo', False)\n log = logging.getLogger()\n\n if logecho:\n echoLog(log, echoformat)\n if filename is not None:\n fileLog(log, filename, filesize, filecount, fileformat)\n\n while True:\n try:\n entry = inbound.get()\n if entry is None:\n break\n logger = logging.getLogger(entry.name)\n logger.handle(entry)\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n print >> sys.stderr, 'exception raised inside of handleLogEvents()'\n traceback.print_exc(file=sys.stderr)\n\ndef start(config):\n \"\"\"Start the global log handler for hakaru\n\n config is a Dict with the following entries and their default values:\n { \"filename\": None,\n \"logsize\": 1000000,\n \"logcount\": 10,\n \"logecho\": False,\n \"fileformat: \"%(asctime)s %(levelname)-8s %(processName)-10s %(name)s %(message)s\"\n \"echoformat: \"%(asctime)s %(levelname)-8s %(processName)-10s %(name)s %(message)s\"\n }\n\n NOTE: this will start a listener process to handle all log events\n \"\"\"\n listener = Process(target=handleLogEvents, args=(logQueue, config.get('log', {})))\n listener.start()\n","repo_name":"bear/hakaru","sub_path":"hakaru/loghandler.py","file_name":"loghandler.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"69842848263","text":"# -*- coding: utf-8 -*-\n\nimport threading\nimport sqlite3\n\nglobal db, sql, lock\ndb = sqlite3.connect('data.db', check_same_thread=False)\nsql = db.cursor()\nlock = threading.Lock() # для блокировки второго потока, потому что sqlite3 не работает с потоками\n\n\ndef new(user_id, islock=True): # создание нового пользователя в базе\n if islock:\n lock.acquire(True)\n sql.execute(\"SELECT id FROM users\")\n fetch = sql.fetchone()\n if not fetch:\n sql.execute(f\"INSERT INTO users VALUES (?, ?, ?, ?, ?)\", (user_id, 0, 0, 0, 1))\n db.commit()\n if islock:\n lock.release()\n\ndef game(user_id, isGame, islock=True): # сообщения будут восприниматься как в игре \"камень-ножницы-бумага\"\n if islock:\n lock.acquire(True)\n sql.execute(f\"UPDATE users SET game = {isGame} WHERE id = {user_id}\")\n db.commit()\n if islock:\n lock.release()\n\n\ndef get_game(user_id, islock=True):\n if islock:\n lock.acquire(True)\n sql.execute(f\"SELECT game FROM users WHERE id = {user_id}\")\n res = sql.fetchone()\n if islock:\n lock.release()\n return res[0]\n\n\ndef situation(user_id, isFeel,\n islock=True): # сообщение будут записываться как запись ситуации. Обычно после '/note'\n lock.acquire(True)\n sql.execute(f\"UPDATE users SET situation = {isFeel} WHERE id = {user_id}\")\n db.commit()\n lock.release()\n\n\ndef get_situation(user_id, islock=True):\n lock.acquire(True)\n sql.execute(f\"SELECT situation FROM users WHERE id = {user_id}\")\n res = sql.fetchone()\n lock.release()\n return int(res[0])\n\n\ndef get_chats(islock=True): # возвращает список id всех пользователей\n if islock:\n lock.acquire(True)\n sql.execute(f\"SELECT id FROM users\")\n res = [i for i in sql.fetchall()]\n lock.release()\n return res\n\n\ndef get_days(user_id, islock=True):\n if islock:\n lock.acquire(True)\n sql.execute(f\"SELECT days FROM users WHERE id = {user_id}\")\n res = sql.fetchone()\n if islock:\n lock.release()\n return res[0]\n\n\ndef new_day(user_id, islock=True):\n if islock:\n lock.acquire(True)\n day = get_days(user_id)\n sql.execute(\"UPDATE diary SET days = days + 1 WHERE id = ?\", (user_id,))\n db.commit()\n if islock:\n lock.release()\n\n\ndef utc(user_id, time, islock=True): # установка часового пояса\n if islock:\n lock.acquire(True)\n sql.execute(f\"UPDATE users SET utc = ? WHERE id = ?\", (time, user_id))\n db.commit()\n if islock:\n lock.release()\n\n\ndef get_utc(user_id, islock=True):\n if islock:\n lock.acquire(True)\n sql.execute(f\"SELECT utc FROM users WHERE id = {user_id}\")\n res = sql.fetchone()\n if islock:\n lock.release()\n return res[0]\n\n\ndef del_table(islock=True): # полная очистка базы (WARNING!)\n if islock:\n lock.acquire(True)\n sql.execute(f\"DELETE from users\")\n db.commit()\n if islock:\n lock.release()","repo_name":"gigantina/meta","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"14965852198","text":"from torch import nn\nimport torch as th\n\nfrom practical_deep_stereo import network_blocks\n\n\ndef _pad_zero_columns_from_left(tensor, number_of_columns):\n return nn.ZeroPad2d((number_of_columns, 0, 0, 0))(tensor)\n\n\nclass Matching(nn.Module):\n def __init__(self, maximum_disparity, operation):\n \"\"\"Returns initialized matching module.\n\n Args:\n maximum_disparity: Upper limit of disparity range\n [0, maximum_disparity].\n operation: Operation that is applied to concatenated\n left-right discriptors for all disparities.\n This can be network module of function.\n \"\"\"\n super(Matching, self).__init__()\n self._maximum_disparity = maximum_disparity\n self._operation = operation\n\n def set_maximum_disparity(self, maximum_disparity):\n self._maximum_disparity = maximum_disparity\n\n def forward(self, left_embedding, right_embedding):\n \"\"\"Returns concatenated compact matching signatures for every disparity.\n\n Args:\n left_embedding, right_embedding: Tensors for left and right\n image embeddings with indices\n [batch_index, feature_index, y, x].\n\n Returns:\n matching_signature: 4D tensor that contains concatenated\n matching signatures (or matching score,\n depending on \"operation\") for every disparity.\n Tensor has indices\n [batch_index, feature_index, disparity_index,\n y, x].\n \"\"\"\n padded_right_embedding = _pad_zero_columns_from_left(\n right_embedding, self._maximum_disparity)\n matching_signatures = []\n concatenated_embedding = th.cat([left_embedding, right_embedding],\n dim=1)\n matching_signatures.append(self._operation(concatenated_embedding))\n for disparity in range(1, self._maximum_disparity + 1):\n shifted_right_embedding = \\\n padded_right_embedding[:, :, :,\n self._maximum_disparity - disparity:-disparity]\n concatenated_embedding = th.cat(\n [left_embedding, shifted_right_embedding], dim=1)\n matching_signatures.append(self._operation(concatenated_embedding))\n return th.stack(matching_signatures, dim=2)\n\n\nclass MatchingOperation(nn.Module):\n \"\"\"Operation applied to concatenated left / right descriptors.\"\"\"\n\n def __init__(self,\n number_of_concatenated_descriptor_features=128,\n number_of_features=64,\n number_of_compact_matching_signature_features=8,\n number_of_residual_blocks=2):\n \"\"\"Returns initialized match operation network.\n\n For every disparity, left image descriptor is concatenated\n along the feature dimension with shifted by the disparity value\n right image descriptor and passed throught the network.\n \"\"\"\n super(MatchingOperation, self).__init__()\n matching_operation_modules = [\n network_blocks.convolution_3x3(\n number_of_concatenated_descriptor_features, number_of_features)\n ]\n matching_operation_modules += [\n network_blocks.ResidualBlock(number_of_features)\n for _ in range(number_of_residual_blocks)\n ]\n matching_operation_modules += [\n network_blocks.convolution_3x3(\n number_of_features,\n number_of_compact_matching_signature_features)\n ]\n self._matching_operation_modules = nn.ModuleList(\n matching_operation_modules)\n\n def forward(self, concatenated_descriptors):\n \"\"\"Returns compact matching signature.\n\n Args:\n concatenated_descriptors: concatenated left / right image\n descriptors of size\n batch_size x 128 x (height / 4) x (width / 4).\n\n Returns:\n compact_matching_signature: tensor of size\n batch_size x 8 x (height / 4) x (width / 4).\n \"\"\"\n compact_matching_signature = concatenated_descriptors\n for _module in self._matching_operation_modules:\n compact_matching_signature = _module(compact_matching_signature)\n return compact_matching_signature\n","repo_name":"tlkvstepan/PracticalDeepStereo_NIPS2018","sub_path":"practical_deep_stereo/matching.py","file_name":"matching.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"8"} +{"seq_id":"8961408974","text":"# The base has to be <10.. Else, the nth power has atleast n+1 digits\n\nCou = 0\n\nfor base in range(1,10):\n Pow = 1\n while(len(str(base**Pow)) == Pow):\n Cou += 1\n Pow += 1\n\nprint(Cou)\n","repo_name":"sanathkuppa/ProjectEuler","sub_path":"Problem_63.py","file_name":"Problem_63.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"1658650841","text":"import random, string\n\ncompany = ['red car', 'blue wagon', 'orange', 'green engine']\ndomain = ['school.edu', 'business.com', 'organization.org', 'government.gov', 'network.net']\nletters = 'abcdefghijklmnopqrstuvwxyz'\ndigits = '0123456789'\n\ndef randomstr(i):\n return ''.join(random.choice(letters) for _ in range(i))\n \ndef randomnum(i):\n return ''.join(random.choice(digits) for _ in range(i))\n\nclass Person:\n def __init__(self):\n # first name\n self.f = randomstr(random.randrange(3,10)).capitalize()\n # last name\n self.l = randomstr(random.randrange(3,10)).capitalize()\n # email address\n if len(self.l) <= 6:\n trunclast = self.l\n else:\n trunclast = self.l[0:6]\n self.a = (self.f[0] + trunclast + '@' + random.choice(domain)).lower()\n # company name\n self.c = string.capwords(random.choice(company))\n # random number\n self.n = randomnum(random.randrange(4,8))\n\nPersonList = [] \nfor i in range(20):\n i = Person()\n PersonList.append(i)\n\n\nx = random.choice(PersonList)\n\n\n\"\"\"\nprint(x.f)\n\"\"\"\n\n\nfor i in PersonList:\n print('===================================')\n print('First Name is: ' + i.f)\n print('Last Name is: ' + i.l)\n print('Email Address is: ' + i.a)\n print('Company is: ' + i.c)\n print('Number is: ' + i.n)\n print('===================================')\n","repo_name":"MWasilczyk/eDiscovery","sub_path":"DataCreation/Person_Class.py","file_name":"Person_Class.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"25442866813","text":"class Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n solutionMap = {}\n for i in range(len(nums)):\n sol = solutionMap.get(nums[i], None)\n if sol is not None:\n return [sol, i]\n else:\n other = target - nums[i]\n solutionMap[other] = i\n return None","repo_name":"mayankpoddar/leetcodeProblems","sub_path":"1-two-sum/1-two-sum.py","file_name":"1-two-sum.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"41952421773","text":"\"\"\"Test HSLuv.\"\"\"\nfrom coloraide import NaN\nimport unittest\nfrom .. import util\nfrom coloraide.everything import ColorAll as Color\nimport pytest\n\n\nclass TestsOkhsl(util.ColorAssertsPyTest):\n \"\"\"Test Okhsl.\"\"\"\n\n COLORS = [\n ('red', 'color(--hsluv 12.177 100 53.237)'),\n ('orange', 'color(--hsluv 44.683 100 74.934)'),\n ('yellow', 'color(--hsluv 85.874 100 97.139)'),\n ('green', 'color(--hsluv 127.72 100 46.228)'),\n ('blue', 'color(--hsluv 265.87 100 32.301)'),\n ('indigo', 'color(--hsluv 279.33 100 20.47)'),\n ('violet', 'color(--hsluv 307.72 79.542 69.695)'),\n ('white', 'color(--hsluv 0 0 100)'),\n ('gray', 'color(--hsluv 0 0 53.585)'),\n ('black', 'color(--hsluv none 0 0)'),\n # Test color\n ('color(--hsluv 270 30 50)', 'color(--hsluv 270 30 50)'),\n ('color(--hsluv 270 30 50 / 0.5)', 'color(--hsluv 270 30 50 / 0.5)'),\n ('color(--hsluv 50% 50% 50% / 50%)', 'color(--hsluv 180 50 50 / 0.5)'),\n ('color(--hsluv none none none / none)', 'color(--hsluv none none none / none)'),\n # Test percent ranges\n ('color(--hsluv 0% 0% 0%)', 'color(--hsluv 0 0 none)'),\n ('color(--hsluv 100% 100% 100%)', 'color(--hsluv 360 100 100 / 1)'),\n ('color(--hsluv -100% -100% -100%)', 'color(--hsluv -360 -100 -100 / 1)')\n ]\n\n @pytest.mark.parametrize('color1,color2', COLORS)\n def test_colors(self, color1, color2):\n \"\"\"Test colors.\"\"\"\n\n self.assertColorEqual(Color(color1).convert('hsluv'), Color(color2))\n\n\nclass TestHSLuvSerialize(util.ColorAssertsPyTest):\n \"\"\"Test HSLuv serialization.\"\"\"\n\n COLORS = [\n # Test color\n ('color(--hsluv 50 30 50 / 0.5)', {}, 'color(--hsluv 50 30 50 / 0.5)'),\n # Test alpha\n ('color(--hsluv 50 30 50)', {'alpha': True}, 'color(--hsluv 50 30 50 / 1)'),\n ('color(--hsluv 50 30 50 / 0.5)', {'alpha': False}, 'color(--hsluv 50 30 50)'),\n # Test None\n ('color(--hsluv 50 30 none)', {}, 'color(--hsluv 50 30 0)'),\n ('color(--hsluv 50 30 none)', {'none': True}, 'color(--hsluv 50 30 none)'),\n # Test Fit (not bound)\n ('color(--hsluv 50 110 50)', {}, 'color(--hsluv 51.208 100 50)'),\n ('color(--hsluv 50 110 50)', {'fit': False}, 'color(--hsluv 50 110 50)')\n ]\n\n @pytest.mark.parametrize('color1,options,color2', COLORS)\n def test_colors(self, color1, options, color2):\n \"\"\"Test colors.\"\"\"\n\n self.assertEqual(Color(color1).to_string(**options), color2)\n\n\nclass TestHSluvProperties(util.ColorAsserts, unittest.TestCase):\n \"\"\"Test HSLuv.\"\"\"\n\n def test_names(self):\n \"\"\"Test HSL-ish names.\"\"\"\n\n self.assertEqual(Color('color(--hsluv none 0% 75% / 1)')._space.names(), ('h', 's', 'l'))\n\n def test_hue(self):\n \"\"\"Test `hue`.\"\"\"\n\n c = Color('color(--hsluv 120 50% 90% / 1)')\n self.assertEqual(c['hue'], 120)\n c['hue'] = 110\n self.assertEqual(c['hue'], 110)\n\n def test_saturation(self):\n \"\"\"Test `saturation`.\"\"\"\n\n c = Color('color(--hsluv 120 50% 90% / 1)')\n self.assertEqual(c['saturation'], 50)\n c['saturation'] = 60\n self.assertEqual(c['saturation'], 60)\n\n def test_lightness(self):\n \"\"\"Test `lightness`.\"\"\"\n\n c = Color('color(--hsluv 120 50% 90% / 1)')\n self.assertEqual(c['lightness'], 90)\n c['lightness'] = 80\n self.assertEqual(c['lightness'], 80)\n\n def test_alpha(self):\n \"\"\"Test `alpha`.\"\"\"\n\n c = Color('color(--hsluv 120 50% 90% / 1)')\n self.assertEqual(c['alpha'], 1)\n c['alpha'] = 0.5\n self.assertEqual(c['alpha'], 0.5)\n\n\nclass TestHSLuvNulls(util.ColorAsserts, unittest.TestCase):\n \"\"\"Test HSLuv Null cases.\"\"\"\n\n def test_null_input(self):\n \"\"\"Test null input.\"\"\"\n\n c = Color('hsluv', [NaN, 0.5, 1], 1)\n self.assertTrue(c.is_nan('hue'))\n\n def test_none_input(self):\n \"\"\"Test `none` null.\"\"\"\n\n c = Color('color(--hsluv none 0% 75% / 1)')\n self.assertTrue(c.is_nan('hue'))\n\n def test_null_normalization_min_sat(self):\n \"\"\"Test minimum saturation.\"\"\"\n\n c = Color('color(--hsluv 270 0% 0.75 / 1)').normalize()\n self.assertTrue(c.is_nan('hue'))\n\n def test_null_normalization_max_light(self):\n \"\"\"Test maximum lightness.\"\"\"\n\n c = Color('color(--hsluv 270 20% 100% / 1)').normalize()\n self.assertTrue(c.is_nan('hue'))\n\n def test_null_normalization_min_light(self):\n \"\"\"Test minimum lightness.\"\"\"\n\n c = Color('color(--hsluv 270 20% 0% / 1)').normalize()\n self.assertTrue(c.is_nan('hue'))\n\n\nclass TestsAchromatic(util.ColorAsserts, unittest.TestCase):\n \"\"\"Test achromatic.\"\"\"\n\n def test_achromatic(self):\n \"\"\"Test when color is achromatic.\"\"\"\n\n self.assertEqual(Color('hsluv', [270, 50, 0]).is_achromatic(), True)\n self.assertEqual(Color('hsluv', [270, 50, 100]).is_achromatic(), True)\n self.assertEqual(Color('hsluv', [270, 0, 50]).is_achromatic(), True)\n self.assertEqual(Color('hsluv', [270, 0.000001, 50]).is_achromatic(), True)\n self.assertEqual(Color('hsluv', [270, 50, 99.99999999]).is_achromatic(), True)\n self.assertEqual(Color('hsluv', [270, NaN, 0]).is_achromatic(), True)\n self.assertEqual(Color('hsluv', [270, NaN, 100]).is_achromatic(), True)\n self.assertEqual(Color('hsluv', [270, 0.0, NaN]).is_achromatic(), True)\n self.assertEqual(Color('hsluv', [270, 50, NaN]).is_achromatic(), True)\n self.assertEqual(Color('hsluv', [270, NaN, 50]).is_achromatic(), True)\n self.assertEqual(Color('hsluv', [270, NaN, NaN]).is_achromatic(), True)\n","repo_name":"facelessuser/coloraide","sub_path":"tests/test_hsluv/test_hsluv.py","file_name":"test_hsluv.py","file_ext":"py","file_size_in_byte":5704,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"8"} +{"seq_id":"15792634286","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\nurl = 'https://qcpi.questcdn.com/cdn/posting/?group=1950787&provider=1950787'\r\n\r\nresponse = requests.get(url)\r\nsoup = BeautifulSoup(response.text, 'html.parser')\r\n\r\nposting_heading = soup.find('h3', text='Search Postings')\r\nposting_div = posting_heading.find_next('div', class_='posting')\r\n\r\npostings = posting_div.find_all('div', class_='posting-desc')\r\n\r\nfor i, posting in enumerate(postings[:5], 1):\r\n est_value = posting.find('div', class_='est-value').text.strip()\r\n notes = posting.find('div', class_='notes').text.strip()\r\n description = posting.find('div', class_='description').text.strip()\r\n closing_date = posting.find('div', class_='closing-date').text.strip()\r\n\r\n print(f'Posting {i}:')\r\n print('Est. Value:', est_value)\r\n print('Notes:', notes)\r\n print('Description:', description)\r\n print('Closing Date:', closing_date)\r\n print()\r\n","repo_name":"satwikaasridhar/SiteScraper","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"73086633542","text":"from .base import BaseOperator, DriverCheckMixin\nfrom .static import get_bet_limit\n\n\nclass BettingLimitCheckOperator(BaseOperator, DriverCheckMixin):\n\n def do(\n self,\n timeout: int = 15,\n ) -> int:\n \"\"\"To check the amount of money deposited.\n\n Args:\n timeout (int, optional): timeout parameter. Defaults to 15.\n\n Raises:\n UnableActionException:\n Occurred when driver is not Chrome, Firefox or Edge.\n\n Returns:\n int: the amount of deposited money\n \"\"\"\n self._check_driver()\n return self.__get_bet_limit(timeout=timeout,)\n\n def __get_bet_limit(self, timeout: int = 15) -> int:\n return get_bet_limit(\n self._user,\n self._driver,\n timeout,\n )\n","repo_name":"hmasdev/pyjpboatrace","sub_path":"pyjpboatrace/operator/betting_limit_checker.py","file_name":"betting_limit_checker.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"8"} +{"seq_id":"7227362416","text":"from flask_restful import Resource,reqparse\nimport plotly.graph_objects as go\nimport os,inspect\n\n\nclass Chart_PizzaModel():\n \n def __init__(self,values,names):\n self.values = values\n self.names = names\n \n \n def gerar_chart(self):\n fig = go.Figure(data=go.Pie(values=self.values,labels=self.names))\n # import sys\n # print(type(self.y), file=sys.stderr)\n name_html = 'pizza_figure.html' \n fig.write_html(name_html, auto_open=False)\n path_html = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n path_html = path_html.replace('models','')\n return f\"{path_html}{name_html}\" \n \n \n\n\n \n\n","repo_name":"Lucas0Braz/charts_generete","sub_path":"models/chart_pizza.py","file_name":"chart_pizza.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"3763729096","text":"import json\nimport os\nimport secrets\nimport glob\nfrom pathlib import Path\nimport utils\nfrom timeit import default_timer as timer\nfrom Bio.Seq import Seq\nfrom Bio import SeqIO\nfrom Bio.SeqRecord import SeqRecord\nfrom joblib import Parallel, delayed\nfrom typing import List\nfrom colorama import Fore, init\n\n\ndef sample_sequences(file: str, length: int, n: int, wd: str, n_nuc_threshold: float, virus: bool, ) -> List[SeqRecord]:\n \"\"\"Function reads each given fasta file, randomly samples n subsequences of a defined length, creates new records with those subsequences and puts these new records into a list (which is finally returned).\n\n Args:\n file (str): Full path to a fasta file\n wd (str): Working directory path\n length (int): Length of a sampled subsequence\n n (int): Number of subsequences to be sampled\n n_nuc_threshold (float): Ambiguous nucleotide content threshold in sampled sequences (in percents)\n virus (bool): Flag which enables or disables virus genomes sampling\n\n Returns:\n list[SeqRecord]: Returns list of newly created biopython `SeqRecord`, each contains sampled subsequences.\n \"\"\"\n new_records = []\n record = SeqIO.read(file, \"fasta\")\n for i in range(n):\n new_seq = None\n valid = False\n pick = None\n while not valid:\n pick = secrets.choice(range(0, len(record.seq) - length))\n new_seq = Seq(str(record.seq[pick:pick + length]))\n n_content = new_seq.count(\"N\")\n if n_content / length < n_nuc_threshold:\n valid = True\n else:\n print(f\"Too high N content ({n_content / length}%). Sampling again\")\n\n desc = f'{record.description} sample_{i} gen_pos:({pick}:{pick + length})'\n new_record = SeqRecord(\n id=record.id,\n seq=new_seq,\n description=desc\n )\n new_records.append(new_record)\n\n if virus:\n # if virus, this is possible\n with open(f\"{wd}virus/samples/{new_records[0].id}_samples.fasta\", \"w\") as w_fh:\n SeqIO.write(new_records, w_fh, \"fasta\")\n\n return new_records\n\n\n# DONE differentiate number of samples from virus and hosts (more from hosts most likely)\n# DONE new sample sources - not only from all edwards host genomes but from each species representative\ndef main_procedure(wd: str, host: bool, virus: bool, full: bool, host_dir: str, virus_dir: str, length: int,\n n_vir_samples: int, n_host_samples: int, n_nuc_threshold: float):\n \"\"\"Main function of the module that samples genomes randomly:\n\n Args:\n wd: Working directory path\n host: Flag, which allows sampling only host genomes\n !!! p-obsolete \"Obsolete\"\n In current vision of the whole process, this parameter is obsolete. Using it might be undesirable and cause program to fail. Might be removed in later version.\n virus: Flag, which allows sampling only virus genomes.\n full: Flag, which allows sampling both virus and host genomes.\n !!! p-obsolete \"Obsolete\"\n In current vision of the whole process, this parameter is obsolete. Using it might be undesirable and cause program to fail. Might be removed in later version.\n host_dir: Path to directory where sampled host fragments will be saved\n virus_dir: Path to directory where sampled virus fragments will be saved\n length: Length of sampled fragments\n n_vir_samples: Number of samples to take from a virus genome\n n_host_samples: Number of samples to take from a host genome\n n_nuc_threshold: Maximum allowed ambiguous nucleotide content threshold in sampled sequences (in percents)\n \"\"\"\n # colorama\n init()\n\n # timer\n start = timer()\n\n # importing json data about hosts\n utils.get_host_data()\n\n final_records = []\n # parallel sampling records of all files and dumping them into one file\n if host:\n new_records = Parallel(n_jobs=-1, verbose=True)(\n delayed(sample_sequences)(file, length, n_host_samples, wd, virus, n_nuc_threshold) for file in\n glob.glob(f\"{host_dir}*.fna\"))\n\n # for host\n for sublist in new_records:\n final_records.extend(sublist)\n\n # host_file = Path(f\"{wd}host/samples/host_samples.fasta\")\n # if host_file.exists():\n # os.system(f\"{wd}host/samples/host_samples.fasta\")\n with open(f\"{wd}host/samples/host_samples.fasta\", \"w\") as w_fh:\n SeqIO.write(final_records, w_fh, \"fasta\")\n\n # mapping samples to nbci ids and dumping them into a file; edit 10.11.21 - much faster\n p_records = list(\n SeqIO.parse(f\"{wd}host/samples/host_samples.fasta\", \"fasta\"))\n ids_records = [record.id.split(\".\")[0] for record in p_records]\n d = {}\n for index, value in enumerate(ids_records):\n d[index] = value\n # for id in set(ids_records):\n # keys = [index for index, value in enumerate(ids_records) if value == id]\n # for key in keys:\n # d[key] = id\n with open(f\"{wd}host/maps/sample_map.json\", \"w\", encoding='utf-8') as fh:\n json.dump(d, fh, indent=4)\n\n if virus:\n new_records = Parallel(n_jobs=-1, verbose=True)(\n delayed(sample_sequences)(file, length, n_vir_samples, wd, virus, n_nuc_threshold) for file in\n glob.glob(f\"{virus_dir}*.fna\"))\n\n if full:\n new_records = Parallel(n_jobs=-1, verbose=True)(\n delayed(sample_sequences)(file, length, n_host_samples, wd, n_nuc_threshold, virus=False) for file in\n glob.glob(f\"{host_dir}*.fna\"))\n\n # for host\n for sublist in new_records:\n final_records.extend(sublist)\n with open(f\"{wd}host/samples/host_samples.fasta\", \"w\") as w_fh:\n SeqIO.write(final_records, w_fh, \"fasta\")\n\n # mapping samples to nbci ids and dumping them into a file; edit 10.11.21 - much faster\n p_records = list(SeqIO.parse(f\"{wd}host/samples/host_samples.fasta\", \"fasta\"))\n ids_records = [record.id.split(\".\")[0] for record in p_records]\n d = {}\n for index, value in enumerate(ids_records):\n d[index] = value\n # for id in set(ids_records):\n # keys = [index for index, value in enumerate(ids_records) if value == id]\n # for key in keys:\n # d[key] = id\n with open(f\"{wd}host/maps/sample_map.json\", \"w\", encoding='utf-8') as fh:\n json.dump(d, fh, indent=4)\n\n new_records = Parallel(n_jobs=-1, verbose=True)(\n delayed(sample_sequences)(file, length, n_vir_samples, wd, n_nuc_threshold, virus=True) for file in\n glob.glob(f\"{virus_dir}*.fna\"))\n\n # for virus, but second slower\n # for sublist in new_records:\n # with open(f\"X:/edwards2016/virus/samples/{sublist[0].id}_samples.fasta\", \"a\") as w_fh:\n # SeqIO.write(sublist, w_fh, \"fasta\")\n\n end = timer()\n runtime = end - start\n print(f\"{Fore.GREEN}[random-sampling] Done in {runtime:.6f} seconds\")\n\n# if __name__ == \"__main__\":\n# main()\n","repo_name":"phenolophthaleinum/fastDNA-faiss","sub_path":"random_sampling.py","file_name":"random_sampling.py","file_ext":"py","file_size_in_byte":7179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"1870663192","text":"def parfait(n):\n x = 1\n for i in range (n//2, 1, -1):\n if (n % i == 0):\n x = x + i\n if x == n:\n return 1\n return 0\n\nnum = int(input(\"enter the number\"))\nresult = 0\nfor i in range(1,num):\n if (num%i)==0:\n result=result+i\nif result==num:\n print(num,\"is perfect number\")\nelse:\n print(num,\"is not perfect number\")\n\n\ndef fct_tri(c, l):\n for i in range(len(l)):\n if l[i]==c:\n return True\n return False\n","repo_name":"SCasella19/Master-1_Introduction-to-Python-Courses","sub_path":"Python_Nombre Parfait/TP1.py","file_name":"TP1.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"38511147817","text":"#!/usr/bin/env python\n#coding:utf8\n# Author : tuxpy\n# Email : q8886888@qq.com\n# Last modified : 2015-02-16 20:50:58\n# Filename : admin/api/log.py\n# Description : \nfrom .base import ApiAdminHandler, api_admin_authenticated\nfrom public.log import get_start_stop_time\nfrom tornado.web import asynchronous\n\nclass ApiLogHandler(ApiAdminHandler):\n @api_admin_authenticated\n @asynchronous\n def get(self, log_type = None):\n assert log_type in ('file', 'access')\n start = self.get_query_argument('start', None)\n stop = self.get_query_argument('stop', None)\n self._limit = int(self.get_query_argument('limit', 10))\n self._skip = int(self.get_query_argument('offset', 0))\n if start: # 先把日志的时间定好。因为这是所有日志都有的,所以在get就把它处理好,构造时间有空的查询条件\n _start, _stop = get_start_stop_time(start, stop)\n self._condition = [{'time': {'$gte': _start, \"$lte\": _stop}}]\n else:\n self._start, self._stop = None, None\n self._condition = []\n\n log_func = getattr(self, 'log_' + log_type, None)\n log_func()\n\n def log_file(self):\n operation = self.get_query_argument('operation', None)\n if operation:\n self._condition.append({'operation': operation})\n\n self.log_db['file'].find(self.find_condition, {'_id': 0}).sort([('time', -1)]).skip(self._skip).limit(self._limit).each(self._send_result)\n\n def log_access(self):\n status_code = int(self.get_query_argument('status_code', 0))\n if status_code:\n self._condition.append({'status_code': status_code})\n self.log_db['access'].find(self.find_condition, {'_id': 0}).sort([('time', -1)]).skip(self._skip).limit(self._limit).each(self._send_result)\n \n @property\n def find_condition(self):\n if self._condition == []:\n return {}\n else: # 如果已经有多个条件了,就加上$and\n return {\"$and\": self._condition}\n\n def log_acount(self):\n pass\n\n","repo_name":"lujinda/zjypan","sub_path":"admin/api/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"11135612302","text":"import tkinter as tk\n\n#윈도우 생성\nroot=tk.Tk()\n\n#전체 이름\nroot.title('코로나 진단키트 에측도 진단 프로그램')\n\n#창 크기 +붙은 부분은 좌상단 떨어진 위치\nroot.geometry(\"600x500+100+100\")\n\n#함수들\n\ndef make_lab14() :\n lab14.configure(text=ent12.get())\n\n#1행 라벨 추가\nlab11=tk.Label(root,\n text=\"민감도\",\n width=8,\n height=1,\n font=('맑은 고딕',16,'bold'),\n bg='#2F5597',\n fg='white')\nlab11.grid(row=0,column=0,padx=5,pady=10)\n\nent12=tk.Entry(font=('맑은 고딕',16,'bold'),bg='white',width=8)\nent12.grid(row=0,column=1,padx=5,pady=10)\n\nbutton13 = tk.Button(root,text='\\u2192',font=('맑은 고딕',11,'bold'),bg=\"red\",fg='white',width=4,command=make_lab14)\nbutton13.grid(row=0,column=2,padx=5,pady=10)\n\nlab14=tk.Label(root,width=8,height=1,font=('맑은 고딕',16,'bold'),bg='#2F5597',fg='white')\nlab14.grid(row=0,column=3,padx=5,pady=10)\n\nroot.mainloop()","repo_name":"goobontak/python_test","sub_path":"0217.py","file_name":"0217.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"28000607581","text":"import pygame, pgzero, pgzrun\n\n\nBLACK = (0, 0, 0)\nBLUE = (0, 155, 255)\nYELLOW = (255, 255, 0)\nWHITE = (255, 255, 255)\nGREY = (200, 200, 200)\nGREEN = (0, 255, 0)\nRED = (128, 0, 0)\n\nDARK_BLUE = (0, 100, 200)\nBRIGHT_BLUE = (100, 255, 355)\nPALE_BLUE = (0, 35, 135)\n\nDARK_GREY = (50, 50, 50)\nDARKER_GREY = (10, 10, 10)\n\nWIDTH = 1920\nHEIGHT = 1050\n\n# top_left_x = WIDTH / 2\n# top_left_y = HEIGHT / 2\n\ntop_left_x = 90\ntop_left_y = 0\n\nTILE_SIZE = 90\n\n# CREATE MAP\n\nplatform = []\n\nplatform_width = int(HEIGHT / TILE_SIZE) * 2\nplatform_height = int(WIDTH / TILE_SIZE) - 8\n\nfor y in range(0, platform_height, 3):\n platform.append([])\n for x in range(0, platform_width):\n platform[len(platform) - 1].append(0)\n\n platform.append([])\n for x in range(0, platform_width):\n platform[len(platform) - 1].append(0)\n\n platform.append([])\n for x in range(0, platform_width):\n platform[len(platform) - 1].append(1)\n\nplatform[1][7] = 3\nplatform[2][8: 11] = 0, 0, 0\nplatform[4][7] = 2\n\nplatform[4][9] = 3\nplatform[5][10: 13] = 0, 0, 0\nplatform[7][9] = 2\n\nplatform[7][7] = 3\nplatform[8][3: 6] = 0, 0, 0\nplatform[10][7] = 2\n\n\nplayer_y = 10\nplayer_x = 1\n\nplayer_images = [images.walk, images.left_climb, images.right_climb]\nplayer_image = 0\n\n\nfalling = False\n\n\ndef show_map():\n for y in range(0, platform_height):\n for x in range(0, platform_width):\n print(platform[y][x], end=\" \")\n print()\n\n\nshow_map()\n\n\ndef player_update():\n global player_x, player_y\n global player_image\n global platform\n global falling\n\n if keyboard.right and platform[player_y + 1][player_x] == 1:\n if player_x < platform_width - 2:\n player_x += 1\n else:\n player_x = platform_width - 2\n\n player_image = 0\n\n if keyboard.left and platform[player_y + 1][player_x] == 1:\n if player_x > 0:\n player_x -= 1\n else:\n player_x = 0\n\n player_image = 0\n\n if (keyboard.up and platform[player_y][player_x] == 2)\\\n or (keyboard.up and platform[player_y + 1][player_x] == 2)\\\n or (keyboard.up and platform[player_y + 2][player_x] == 2):\n player_y -= 1\n\n if player_image == 2:\n player_image = 1\n else:\n player_image = 2\n\n elif keyboard.up and falling == False:\n player_y -= 1\n\n if player_x < platform_width - 3:\n if keyboard.right:\n player_x += 3\n\n if player_x > 2:\n if keyboard.left:\n player_x -= 3\n\n if (keyboard.down and platform[player_y][player_x] == 2) \\\n or (keyboard.down and platform[player_y + 1][player_x] == 2)\\\n or (keyboard.down and platform[player_y + 2][player_x] == 2)\\\n or (keyboard.down and platform[player_y + 3][player_x] == 2):\n player_y += 1\n\n if player_image == 2:\n player_image = 1\n else:\n player_image = 2\n\n if (platform[player_y + 1][player_x] == 0) \\\n and (platform[player_y + 2][player_x] != 2 \\\n and platform[player_y + 3][player_x] != 2):\n player_y += 1\n falling = True\n else:\n falling = False\n\n if keyboard.c and platform[player_y][player_x] == 3:\n platform[player_y][player_x] = 0\n sounds.hi.play()\n\n\ndef draw():\n screen.fill(BLUE)\n\n\n for y in range(0, platform_height):\n for x in range(0, platform_width):\n if platform[y][x] == 1:\n draw_image(images.block2, x*TILE_SIZE, y*TILE_SIZE)\n elif platform[y][x] == 2:\n draw_image(images.ladder3, x*TILE_SIZE, y*TILE_SIZE)\n elif platform[y][x] == 3:\n draw_image(images.coin, x * TILE_SIZE, y * TILE_SIZE)\n\n if player_y == y:\n draw_player()\n\n\ndef draw_player():\n draw_image(player_images[player_image], player_x * TILE_SIZE, player_y * TILE_SIZE)\n\n\ndef draw_image(image, x, y):\n screen.blit(image,\n (top_left_x + x - image.get_width(),\n top_left_y + y - image.get_height()))\n\n\ndef draw_rect(x, y,\n width, height,\n colour=BLACK,\n outline=None):\n if outline is not None:\n BOX2 = Rect((top_left_x + x - int(width / 2) - 2, top_left_y + y - int(height / 2) - 2),\n (width + 4, height + 4)\n )\n screen.draw.rect(BOX2, outline)\n\n if colour is not None:\n BOX = Rect((top_left_x + x - int(width / 2), top_left_y + y - int(height / 2)),\n (width, height)\n )\n screen.draw.filled_rect(BOX, colour)\n\n\ndef show_text(text_to_show, x, y,\n colour=WHITE,\n size=75):\n screen.draw.text(text_to_show,\n (top_left_x + x, top_left_y + y),\n fontsize=size, color=colour)\n\nclock.schedule_interval(player_update, 0.075)\npgzrun.go()","repo_name":"zestra/Zestras-Platformer-Game","sub_path":"Platformer/Draft 1/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":4917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"29800093750","text":"# Fuction used to check input is vaild\r\ndef choice_checker (question):\r\n error = \"please between higher or lower ( or xxx to quit)\"\r\n valid = False\r\n while not valid:\r\n # Ask user for choice (and put choice in lowercase)\r\n response = input(question)\r\n response = \"i\" or response == \"infinite\"\r\n\r\n\r\ndef int_check(question, low=None, high=None, exit_code = None):\r\n\r\n while True:\r\n\r\n # sets up error messages\r\n if low is not None and high is not None:\r\n error = \"Please enter an integer between {} and {} (inclusive)\".format(low, high)\r\n elif low is not None and high is None:\r\n error = \"Please enter an integer that is more than or equal to {}\".format(low)\r\n elif low is None and high is not None:\r\n error = \"Please enter an integer that is less than or equal to {}\".format(high)\r\n else:\r\n error = \"Please enter an integer\"\r\n\r\n try:\r\n response = input(question)\r\n # check to see if response is the exit code and return it\r\n if response == exit_code:\r\n return response\r\n\r\n # change the response into an integer\r\n else:\r\n response = int(response)\r\n\r\n # Checks response is not too low, not use of 'is not' keywords\r\n if low is not None and response < low:\r\n print(error)\r\n continue\r\n\r\n # Checks response is not too high\r\n if high is not None and response > high:\r\n print(error)\r\n continue\r\n\r\n return response\r\n\r\n # checks input is a integer\r\n except ValueError:\r\n print(error)\r\n continue\r\n\r\n\r\n# Main routine goes here\r\n\r\nrounds_played = 0\r\nmode = \"regular\"\r\n\r\n# Ask user for # of rounds, for infinite mode\r\nrounds = int_check(\"How many rounds for infinite: \", 1, exit_code = \"\")\r\n\r\nend_game = \"no\"\r\nwhile end_game == \"no\":\r\n\r\n # Rounds Heading\r\n print()\r\n if rounds == \"\":\r\n mode = \"infinite\"\r\n rounds = 5\r\n\r\n if mode == \"infinite\":\r\n\r\n headings = \"continue mode: round {}\".format(rounds_played + 1)\r\n rounds += 1\r\n else:\r\n headings = \"round {} of {}\".format(rounds_played + 1, rounds)\r\n\r\n print(headings)\r\n choose = input(\"Type \" )\r\n rounds_played += 1\r\n\r\n if choose == \"xxx\" or rounds_played >= rounds:\r\n break\r\n # rest of loop / game\r\n print(\"you choose {}\".format(choose))\r\n\r\n\r\nprint(\"thank you for playing\")\r\n","repo_name":"Curnowj0407/Higher-Lower","sub_path":"01_HL_round_mechanics.py","file_name":"01_HL_round_mechanics.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"597383808","text":"\"\"\"\nCreate a python class called ‘Bus’ which has conductor name, total seats, seats booked, bus unique registration number, driver name as their properties. Create member functions -\nConstructor which takes registration_name, conductor_name, driver_name, total_seats, seats_booked.\nprint_bus_details -> This function should print ALL details of the bus.\nis_seat_available -> This function should return True if a seat available, if not return False.\nbook_seat(no_of_seats) -> This function should book a seats if seats available, else return message “Requested no of seats not available”.\n\n\"\"\"\n\nclass Bus:\n def __init__(self,register_number,conductor_name,driver_name,total_seats,seats_booked):\n self.register_number=register_number\n self.conductor_name=conductor_name\n self.driver_name=driver_name\n self.total_seats=total_seats\n self.seats_booked=seats_booked\n\n def bus_details(self):\n print(\"register number\"+self.register_number)\n print(\"conductor name:\"+self.conductor_name)\n print(\"driver name:\"+self.driver_name)\n print(\"total seats:\"+self.total_seats) \n print(\"seats booked:\"+self.seats_booked)\n\n def is_seat_available(self):\n if self.total_seats > 0 :\n return True\n else:\n return False\n\n def book_seat(self,no_of_seats):\n if self.total_seats!=self.seats_booked:\n if no_of_seats hex -> get last 6 to ensure it's only 3 bytes -> back to dec.\n hexStr = hex(int(item.productCode))\n color = int(hexStr[-6:], 16)\n embed = {\n 'title':\n '【{}】'.format(item.productCode),\n 'description':\n '{}\\n'.format(item.productName),\n 'url':\n '{}'.format(item.productURL),\n 'fields': [\n {\n 'name': 'Price:',\n 'value': \"{:,}円\".format(int(item.price)),\n 'inline': False\n },\n ],\n 'color':\n color,\n 'image': {\n 'url': item.imageURL,\n },\n }\n return embed\n\n\ndef check_item(item):\n c.execute(\n \"SELECT productCode, price FROM suruguya WHERE productCode=? AND price=?\",\n (\n item.productCode,\n item.price,\n ))\n if c.fetchone():\n return # don't care if item has been seen before\n\n c.execute(\"INSERT INTO suruguya VALUES (?, ?)\", (\n item.productCode,\n item.price,\n ))\n conn.commit()\n\n resp = get_new_item_embed(item)\n send_embeds(resp)\n\n\ndef send_embeds(embed):\n global discord_url\n if type(discord_url) is list:\n for url in discord_url:\n send_embed(embed, url)\n else:\n send_embed(embed, discord_url)\n\n\ndef send_embed(embed, url):\n global discord_url\n payload = {'embeds': [embed], 'username': 'Surugaya'}\n payload_json = json.dumps(payload)\n response = r.post(url,\n payload_json,\n headers={'Content-Type': 'application/json'})\n\n if response.status_code != 200 and response.status_code != 204:\n print(\"Error: \", response.text)\n jsonError = json.loads(response.text)\n sleepTime = jsonError['retry_after'] / 1000\n print(\"Sleeping for {}s\".format(sleepTime))\n time.sleep(sleepTime)\n send_embed(embed, url) # attempt sending again\n\n\ndef main():\n print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n query = '東方 ふもふも ぬいぐるみ'\n\n for item in suruguya.search(query):\n check_item(item)\n\n\ndb_file = os.path.join(os.path.dirname(__file__), \"fumo.db\")\nconn = sqlite3.connect(db_file)\nc = conn.cursor()\n\nschema = \"\"\"\nCREATE TABLE IF NOT EXISTS \"suruguya\" (\n \"productCode\" TEXT NOT NULL,\n \"price\" INTEGER,\n PRIMARY KEY (\"productCode\", \"price\")\n);\n\"\"\"\nc.execute(schema)\nconn.commit()\n\njson_file = fn = os.path.join(os.path.dirname(__file__), \"suruguya.json\")\ndata = load_json_file() # we make this global cause life easier\ndiscord_url = data['discord_webhook_url']\nmain()\nconn.close()\n","repo_name":"marvinody/fumodiscordbots","sub_path":"fumoSuruguya.py","file_name":"fumoSuruguya.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"8"} +{"seq_id":"42663778656","text":"from typing import Tuple\nfrom PIL import Image, ImageDraw, ImageFont\nfrom PIL.PngImagePlugin import PngImageFile\nfrom PIL.ImageFont import FreeTypeFont\nfrom .constants import Constants\nfrom .utils import Utils\n\nclass ImageGuide:\n def __init__(\n self, name: str, transporter: str, guide: str, date: str\n ) -> None:\n self.name: str = name\n self.transporter: str = transporter\n self.guide: str = guide\n self.date: str = date\n self.open_image()\n self.add_texts()\n Utils.save_image(self.name, self.image)\n\n def open_image(self) -> None:\n '''\n Open image to use\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n '''\n self.image: PngImageFile = Image.open(Constants.IMAGE_PATH)\n self.draw: ImageDraw.ImageDraw = ImageDraw.Draw(self.image)\n\n def add_texts(self) -> None:\n '''\n add all texts to image\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n '''\n # Name\n self.add_one_text(\n Constants.FONT_HI_PATH,\n Constants.FONT_NAME_PATH,\n Constants.SIZE_NAME,\n Constants.TITLE_NAME,\n self.name,\n Constants.Y_NAME,\n Constants.COLOR_NAME\n )\n\n #Transport\n self.add_one_text(\n Constants.FONT_TITLE_PATH,\n Constants.FONT_TRANSPORT_PATH,\n Constants.SIZE_TRANSPORT,\n Constants.TITLE_TRANSPORT,\n self.transporter,\n Constants.Y_TRANSPORT,\n Constants.COLOR_TRANSPORT\n )\n\n #Guide\n self.add_one_text(\n Constants.FONT_GUIDE_PATH,\n Constants.FONT_GUIDE_PATH,\n Constants.SIZE_GUIDE,\n Constants.TITLE_GUIDE,\n self.guide,\n Constants.Y_GUIDE,\n Constants.COLOR_GUIDE\n )\n\n #Date\n self.add_one_text(\n Constants.FONT_TITLE_PATH,\n Constants.FONT_DATE_PATH,\n Constants.SIZE_DATE,\n Constants.TITLE_DATE,\n self.date,\n Constants.Y_DATE,\n Constants.COLOR_DATE\n )\n\n def add_one_text(\n self, font_path_one: str, font_path_two: str,\n size: int, text_one: str, text_two: str,\n y_value: int, color: Tuple[int]\n ) -> None:\n '''\n font_path_one: str\n Path to text one's font\n font_path_two: str\n Path to text two's font\n size: int\n Size to text's font\n text_one: str\n Text one\n text_two: str\n Text two\n y_value: int\n Position to y in the image\n color: Tuple[int]\n Color to use in the text\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n '''\n\n font_one: FreeTypeFont = ImageFont.truetype(font_path_one, size)\n \n font_two: FreeTypeFont = ImageFont.truetype(font_path_two, size)\n \n sizes: Tuple[int] = Utils.generate_size(\n text_one, text_two,\n font_one, font_two,\n )\n\n x_name: int = Utils.generate_center_value(sizes[0])\n\n self.draw.text(\n (x_name, y_value),\n text_one,\n font=font_one,\n fill=color\n )\n\n self.draw.text(\n (x_name + sizes[2], y_value),\n text_two,\n font=font_two,\n fill=color\n )","repo_name":"jaortiz92/addTextToPosterShipments","sub_path":"app/imageGuide.py","file_name":"imageGuide.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"16948678092","text":"import torch\nimport torch.nn as nn\n\nfrom source.template_model import TemplateModel\nfrom source.model_utils import activations, _to_one_hot, CondFC\n\nclass FCGated(TemplateModel):\n \"\"\"\n Implement a fully connected encoder and decoder with a quantization laayer and attention\n gates to identify bits that are predictive of the sensitive attribute\n \"\"\"\n\n def __init__(self, input_dim, depth=2, width=64, zk=8, k=8, sdim=2, activation_out=None, sigma=1, ncode=2):\n\n super().__init__()\n\n encoder_list = []\n in_dim = input_dim\n out_dim = width\n for _ in range(depth - 1):\n encoder = nn.Sequential(nn.Linear(in_dim, out_dim),\n nn.BatchNorm1d(out_dim),\n nn.ELU())\n encoder_list.append(encoder)\n\n in_dim = out_dim\n\n encoder_list.append(nn.Sequential(nn.Linear(out_dim, zk * k),\n nn.BatchNorm1d( zk * k),\n nn.Tanh()))\n self.encoder = nn.Sequential(*encoder_list)\n\n decoder_list = []\n in_dim = zk * k\n out_dim = width\n for _ in range(depth - 1):\n decoder = nn.Sequential(CondFC(in_dim, out_dim, sdim + 1))\n decoder_list.append(decoder)\n in_dim = out_dim\n\n self.decoder = nn.Sequential(*decoder_list)\n\n if activation_out is not None:\n self.decoder_final = CondFC(in_dim, input_dim, sdim + 1, activation=activation_out)\n else:\n self.decoder_final = nn.Linear(in_dim, input_dim)\n\n self.gate = nn.Sequential(CondFC(zk * k, 1, 1, activation='sigmoid'))\n # self.gate_beta = nn.Sequential(nn.Linear(1, zk * k),\n # nn.Tanh())\n\n # self.encode_beta = nn.Sequential(nn.Linear(1, zk * k),\n # nn.Tanh())\n #\n # self.decode_beta = nn.Sequential(nn.Linear(1, zk * k * 2),\n # nn.Tanh())\n\n self.k = k\n self.zk = zk\n self.sigma = sigma\n self.code = nn.Parameter(torch.arange(ncode, dtype=float, requires_grad=True).float() / (ncode - 1))\n self.scale = nn.Parameter(torch.tensor([10.0], requires_grad=True)).float()\n self.scale_decode = nn.Parameter(torch.tensor([100.0], requires_grad=True)).float()\n self.param_init()\n\n def param_init(self):\n \"\"\"\n Xavier's initialization\n \"\"\"\n for layer in self.modules():\n if hasattr(layer, 'weight'):\n\n if isinstance(layer, (nn.BatchNorm1d, nn.BatchNorm2d, nn.PReLU, nn.Tanh)):\n nn.init.normal_(layer.weight, mean=1., std=0.02)\n else:\n nn.init.xavier_normal_(layer.weight)\n if hasattr(layer, 'bias'):\n nn.init.constant_(layer.bias, 0.)\n\n def encode(self, x):\n \"\"\"\n Encoder into a zk * k vectors\n :param x:\n :return:\n \"\"\"\n return self.encoder(x)\n\n def decode(self, b, beta):\n \"\"\"\n Decode quantized representation\n :param b:\n :return:\n \"\"\"\n out, beta = self.decoder((b, beta))\n out, _ = self.decoder_final((out, beta))\n\n return out\n\n def quantize(self, z):\n \"\"\"\n Quantization layer (right now a simple binarization using\n nearest neighbor to 0 or 1\n :return:\n \"\"\"\n z = (z + 1) / 2\n code = self.code.detach()\n code_idx = torch.arange(self.code.shape[0])\n code = code[None, None, :]\n z_code = (z.unsqueeze(-1) - code) ** 2\n\n z_soft = torch.sum(nn.Softmax(dim=-1)(- self.sigma * z_code) * code, dim=-1)\n\n centers = z_code.argmin(-1)\n center_code = _to_one_hot(centers, self.code.shape[0])\n center_code = center_code * code\n z_hard = center_code.sum(-1)\n\n centers_soft = torch.sum(nn.Softmax(dim=-1)(- self.sigma * z_code) * code_idx, dim=-1)\n\n q = (z_hard - z_soft).detach() + z_soft\n c = (centers - centers_soft).detach() + centers_soft\n\n return q, c, center_code.mean(dim=[0, 1])\n\n def compute_gate(self, z, beta):\n \"\"\"\n Generate a gate that returns the maximum number of\n bits to include in the bitstream and forget the\n remaining bits.\n :param z:\n :return: mask_soft\n \"\"\"\n beta = beta.unsqueeze(1)\n # z = z * (1 + self.gate_beta(beta))\n #\n # z_with_beta = torch.cat([z, beta], 1)\n gate, beta = self.gate((z, beta))\n gate = self.k * self.zk * ( 1 - gate * beta)\n\n mask_range = torch.arange(0, self.zk * self.k, device=z.device)\n mask_range = mask_range.unsqueeze(0).expand_as(z)\n\n mask = gate - mask_range\n mask = torch.sigmoid(mask)\n\n #gate = 1 - beta * gate\n\n mask_hard = torch.round(mask)\n mask_soft = (mask_hard - mask).detach() + mask\n\n return mask_soft\n\n def forward(self, x, s, beta):\n\n z = self.encode(x)\n b = beta.unsqueeze(1)\n\n mask = self.compute_gate(z, beta)\n\n q, centers, code = self.quantize(z * mask)\n\n # adjustment = self.decode_beta(b)\n # scale, bias = torch.chunk(adjustment, 2, dim=1)\n # qb = scale * q + bias\n #qb = torch.cat([q, b], 1)\n #srand = s[torch.randperm(s.shape[0])]\n\n b_with_s = torch.cat([b, s], -1)\n out = self.decode(q, b_with_s)\n\n q = q.reshape(q.shape[0], self.zk, self.k)\n centers = centers.reshape(q.shape[0], self.zk, self.k)\n\n return out, q, mask, centers, code\n\n\n\n\n","repo_name":"Gitiauxx/adapt_fbc","sub_path":"source/autoencoders/fc_gated.py","file_name":"fc_gated.py","file_ext":"py","file_size_in_byte":5692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"42877582715","text":"from django.urls import path, include\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth import views as auth_views\nfrom rest_framework import routers\nfrom . import views\n\nrouter = routers.SimpleRouter()\nrouter.register(\"places\", views.PlaceViewSet)\n\nurlpatterns = [\n path(\"api/\", TemplateView.as_view(template_name='world-atlas-api.html')),\n path(\"api/\", include(router.urls), name='api'),\n path(\"\", views.world_atlas, name='world-atlas'),\n path(\"play/\", views.world_atlas_play, name='world-atlas-play'),\n path(\"bot/\", views.world_atlas_bot, name='world-atlas-bot'),\n path(\"room/wa//\", views.world_atlas_room, name='world-atlas-room'),\n path(\"room/create/\", views.world_atlas_room_create, name='world-atlas-room-create'),\n path(\"room/join/\", views.world_atlas_room_join, name='world-atlas-room-join'),\n path(\"login/\", views.WA_LoginView.as_view(template_name=\"world-atlas/login.html\"), name='wa-login'),\n path(\"signup/\", views.register, name='wa-signup'),\n path(\"logout/\", auth_views.LogoutView.as_view(template_name='world-atlas/logout.html'), name='wa-logout'),\n]\n","repo_name":"Mr-Destructive/django-projects","sub_path":"worldatlas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"33427416340","text":"PAPER = \"AAA AAA AAA.[ some_paper_a, some_paper_b ] BBB BBB BBB.[ some_book_a, some_paper_a ] CCC CCC CCC.[ some_book_b ]\"\n\nuser_input = input()\n#user_input.replace(',', '')\npaper_line = user_input.split(']')\npaper_list = []\npaper_dict = {}\npaper_num = 1\nprint(paper_line)\nindex_list = []\nnum_list = []\n\nfor i in range(len(paper_line)-1):\n temp_line = []\n temp_line = paper_line[i].split('[')\n index_list.append(\"[\"+temp_line[1]+\"]\")\n temp_line = temp_line[1].split(',')\n for i in range(len(temp_line)):\n if temp_line[i].replace(',', '').strip() not in paper_list:\n paper_list.append(temp_line[i].replace(',', '').strip())\n paper_dict[temp_line[i].replace(',', '').strip()] = str(paper_num)\n paper_num += 1\n\nprint(paper_list)\nprint(paper_dict.items())\nprint(index_list)\n\nfor i in range(len(paper_line)-1):\n temp_line = []\n temp_line = paper_line[i].split('[')\n index_list.append(\"[\"+temp_line[1]+\"]\")\n temp_line_2 = temp_line[1].split(',')\n for j in range(len(temp_line_2)):\n if temp_line_2[j].replace(',', '').strip() in paper_list:\n temp_line_2[j] = paper_dict[temp_line_2[j].replace(',', '').strip()]\n temp_line_2.sort()\n num_list.append(temp_line_2)\n\nfor i in range(len(num_list)):\n temp_line = []\n temp_string = ''\n temp_line = paper_line[i].split('[')\n temp_string = temp_line[0] + \"[ \"\n\n for j in range(len(num_list[i])):\n temp_string = temp_string + num_list[i][j]\n if j != len(num_list[i])-1:\n temp_string = temp_string+', '\n\n temp_string = temp_string + \" ]\"\n paper_line[i] = temp_string\n\n\nprint_string = ''\nfor i in range(len(paper_line)-1):\n print_string = print_string + paper_line[i]\nprint(print_string)\n\nfor i in range(len(paper_list)):\n print(\"[\" + str(i + 1) + \"] \" + paper_list[i])","repo_name":"GDN2/DL","sub_path":"DeepLearning/groom/gromm1_2.py","file_name":"gromm1_2.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"6209450106","text":"# @Author: Eric Rosenthal\n# @Date: 2022-07-13T17:49:26-07:00\n# @Email: ericros@stanford.edu\n# @Project: nspyre-jv\n# @Last modified by: Eric Rosenthal\n# @Last modified time: 2022-07-22T10:51:49-07:00\n\n\n\nfrom xmlrpc.server import SimpleXMLRPCServer\nfrom wlm_nspyre import *\nimport ctypes\nfrom ctypes.util import find_library\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nwlm = WavelengthMeter() #initialize the wavemeter class\n\nserver = SimpleXMLRPCServer(('171.64.84.122',65432),allow_none=True) #initialize server\n#server = SimpleXMLRPCServer(('171.64.84.122',49944)) #initialize server\n\n#get wavelength\ndef get_wlen(channel):\n\treturn wlm.GetWavelength(channel=channel)\n\ndef get_pattern(channel):\n\t# get pattern\n\tpattern_short, pattern_long = wlm.get_pattern(channel=channel)\n\n\t# convert from c_type pointer to numpy array\n\tpattern_short_arr = np.ctypeslib.as_array(pattern_short)\n\tpattern_long_arr = np.ctypeslib.as_array(pattern_long)\n\n\treturn pattern_short_arr.tolist(), pattern_long_arr.tolist()\n\nserver.register_function(get_wlen, \"get_wlen\")\nserver.register_function(get_pattern, \"get_pattern\")\n\n#run the server loop\ntry:\n\tprint('Use Control-C to exit')\n\tserver.serve_forever()\nexcept KeyboardInterrupt:\n\tprint('Exiting')\n","repo_name":"nspyre-org/examples","sub_path":"experiments/nspyre-jv-main/Instruments/Wavemeter/my_wavemeter_server_nspyre.py","file_name":"my_wavemeter_server_nspyre.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"34151523825","text":"#!/usr/bin/env python\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom Letter.models import Letter\nfrom django.contrib.auth.models import User\n\nfrom django.core import mail\nfrom django.utils import timezone\n\nsched = BlockingScheduler()\n\n@sched.scheduled_job('interval', minutes=1)\ndef timed_job():\n # letter = [l for l in Letter.objects.all() if l.date_received.year == timezone.now().year and l.date_received.month == timezone.now().month\n # and l.date_received.day == timezone.now().day and l.date_received.hour == timezone.now().hour\n # and l.date_received.minute == timezone.now().minute + 1\n # ]\n\n letter = [l for l in Letter.objects.all() if l.date_received < timezone.now() and l.sent==False\n ]\n\n mails = [mail.EmailMessage(lett.subject,\n lett.text,\n lett.author.email,\n [lett.email],\n ) for lett in letter\n ]\n for item in letter:\n item.sent=True\n item.save()\n\n connection = mail.get_connection()\n\n connection.open()\n connection.send_messages(mails)\n\n connection.close()\n return None\n\n\n# @sched.scheduled_job('cron', day_of_week='mon-fri', hour=17)\n# def scheduled_job():\n# print('This job is run every weekday at 5pm.')\n\nsched.start()\n\n\n# timed_job()\n","repo_name":"mmetelytsia/Letter_2_Future","sub_path":"clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"74195942982","text":"#!/usr/bin/env python3\nimport os\nimport subprocess\nfrom datetime import date\n\nfrom setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\ndef get_version(app):\n version = date.today().strftime('%Y-%m')\n git_tag = \"0.0\"\n git_commits = \"0\"\n suffix = \"dev\"\n try:\n branch = subprocess.check_output(\n [\"git\", \"rev-parse\", \"--abbrev-ref\", \"HEAD\"]\n ).rstrip().decode('utf8')\n git_describe = subprocess.check_output(\n [\"git\", \"describe\", \"--long\"]\n ).rstrip().decode('utf8')\n git_tag = git_describe.split('-')[0]\n git_commits = git_describe.split('-')[1]\n if branch == 'master':\n suffix = ''\n else:\n suffix = 'dev'\n print(branch, git_tag, git_commits, suffix)\n version = '{}.{}{}'.format(git_tag, git_commits, suffix)\n except (subprocess.CalledProcessError, OSError) as e:\n print('git not installed', e)\n try:\n fp = open('{}/__init__.py'.format(app), 'w')\n fp.write(\n '__version__ = [{}, {}, \"{}\"]'.format(\n git_tag.replace('.', ','), git_commits, suffix)\n )\n fp.close()\n except Exception:\n print('ERROR opening {}/__init__.py'.format(app), os.curdir)\n return version\n\n\nmodule = 'geocurrency'\n\nsetup(\n name='geocurrency',\n description='Web based services to convert units and currencies.',\n python_requires='>3.7.0',\n version=get_version(module),\n author='Frédéric MEUROU',\n author_email='fm@peabytes.me',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url='https://api.geocurrency.me/swagger/',\n install_requires=[\n \"Django~=3.0\",\n \"django-cors-headers~=3.2\",\n \"django-cors-middleware~=1.5\",\n \"django-createsuperuser\",\n \"django-extensions~=3.0\",\n \"django-filter~=2.3\",\n \"django-redis~=4.12\",\n \"django-sendfile~=0.3\",\n \"djangorestframework~=3.11\",\n \"drf-yasg~=1.17\",\n \"markdown~=3.0\",\n \"lxml~=4.0\",\n \"django_redis~=4.0\",\n \"pysendfile~=2.0\",\n \"gunicorn\",\n \"requests\",\n # \"psycopg2\",\n # \"mysql\",\n \"pytz\",\n \"pycountry\",\n \"countryinfo~=0.1\",\n \"timezonefinder~=4.4\",\n \"iso4217\",\n \"forex-python~=1.0\",\n \"Babel~=2.8\",\n \"Pint~=0.17\",\n \"networkx~=2.5\",\n \"sympy~=1.7\",\n \"channels~=3.0\",\n \"uncertainties~=3.1\"\n ],\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"Environment :: Web Environment\",\n \"Development Status :: 4 - Beta\",\n \"Framework :: Django\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Physics\",\n \"Topic :: Office/Business\",\n \"License :: OSI Approved :: MIT License\",\n ],\n py_modules=[\n 'geocurrency.core',\n 'geocurrency.countries',\n 'geocurrency.currencies',\n 'geocurrency.rates',\n 'geocurrency.units',\n 'geocurrency.converters',\n 'geocurrency.calculations'\n ],\n)\n","repo_name":"fmeurou/geocurrency","sub_path":"src/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"8"} +{"seq_id":"27373614221","text":"from django.shortcuts import render\nfrom .models import Info\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\n# Create your views here.\n\ndef send_message(request):\n myinfo = Info.objects.first()\n if request.method == \"POST\":\n subject = request.POST['subject']\n # name = request.POST['name']\n email = request.POST['email']\n message = request.POST['message']\n '''\n send_mail(\n 'Subject here',\n 'Here is the message.',\n 'from@example.com',\n ['to@example.com'],\n fail_silently=False,\n )\n '''\n # test, I hope this email finds you\n send_mail(\n subject,\n message,\n settings.EMAIL_HOST_USER,\n [email],\n )\n\n return render(request, 'contact/contact.html', {'myinfo':myinfo})\n","repo_name":"abdula8/django-job-board","sub_path":"contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"8637298940","text":"from typing import MutableMapping\n\n\ndef format_port_bind(\n port_mapping: MutableMapping,\n config: MutableMapping,\n port_type: str\n) -> MutableMapping:\n\n c_port = f\"{port_mapping['container_port']}/{port_type.lower()}\"\n config['ExposedPorts'][c_port] = {}\n if isinstance(port_mapping['host_port'], list):\n h_ports = [{'HostPort': i} for i in port_mapping['host_port']]\n config['HostConfig']['PortBindings'][c_port] = h_ports\n else:\n config['HostConfig']['PortBindings'][c_port] = [{'HostPort': port_mapping['host_port']}]\n\n return config\n","repo_name":"paraleipsis/hivecore-agent","sub_path":"src/docker/utils/ports.py","file_name":"ports.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"33162330224","text":"import json\nfrom numba import njit\nfrom numba.typed import Dict\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass, field\nfrom .nn_model import create_nn_score_dt\nfrom multiply.util.definitions import ROOT_DIR\n\n\n# ================================================================================\n# Define an alignment between two primers\n#\n# ================================================================================\n\n\n@dataclass(order=True)\nclass PrimerAlignment:\n \"\"\"\n Represent the alignment of two primers\n \"\"\"\n\n primer1_name: str = field(compare=False)\n primer2_name: str = field(compare=False)\n primer1: str = field(compare=False)\n primer2: str = field(compare=False)\n score: float = field(compare=True)\n alignment: str = field(compare=False, repr=False)\n # index: int=field(compare=False, repr=False)\n\n\n# ================================================================================\n# Abstract base class for various primer alignment algorithms\n#\n# ================================================================================\n\n\nclass AlignmentAlgorithm(ABC):\n \"\"\"\n Alignment algorithm for a pair of primers\n\n \"\"\"\n\n rc_map = {\"A\": \"T\", \"T\": \"A\", \"C\": \"G\", \"G\": \"C\"}\n\n def __init__(self):\n pass\n\n def set_primers(self, primer1, primer2, primer1_name, primer2_name):\n \"\"\"\n Set a pair for primers to align\n\n \"\"\"\n self.primer1 = primer1\n self.primer2 = primer2\n self.primer1_name = primer1_name\n self.primer2_name = primer2_name\n\n self.score = None # reset\n\n @abstractmethod\n def load_parameters():\n pass\n\n @abstractmethod\n def align():\n \"\"\"\n Align the primers\n\n \"\"\"\n pass\n\n @abstractmethod\n def get_alignment_string():\n \"\"\"\n Create an ASCII string representing the aligned primers\n\n \"\"\"\n pass\n\n def print_alignment(self):\n \"\"\"\n Print an ASCII view of the aligned primers\n\n \"\"\"\n print(self.get_alignment_string())\n\n def get_primer_alignment(self):\n \"\"\"\n Return an alignment object\n\n \"\"\"\n return PrimerAlignment(\n primer1=self.primer1,\n primer2=self.primer2,\n primer1_name=self.primer1_name,\n primer2_name=self.primer2_name,\n score=self.score,\n alignment=self.get_alignment_string(),\n )\n\n\n# ================================================================================\n# Concrete primer alignment algorithms\n#\n# ================================================================================\n\n\nclass PrimerDimerLike(AlignmentAlgorithm):\n \"\"\"\n Align two primers using an algorithm like the one described\n by Johnston et al. (2019) Sci Reports\n \n Primary idea is to only allow:\n - Ungapped alignments\n - With 5' overhangs (i.e. extensible)\n \n And then to add a bonus if either 3' end is complementary in\n the highest scoring alignment\n \n \n \"\"\"\n \n param_path = f\"{ROOT_DIR}/settings/alignment/primer_dimer/parameters.json\"\n \n def load_parameters(self):\n \"\"\"\n Load parameters necessary for Primer Dimer algorithm,\n and set as attributes\n \n \"\"\"\n # Load parameter JSON\n params = json.load(open(self.param_path, \"r\"))\n\n # Load nearest neighbour model\n self.nn_scores = create_nn_score_dt(\n match_json=f\"{ROOT_DIR}/{params['match_scores']}\", # this is a path\n single_mismatch_json=f\"{ROOT_DIR}/{params['single_mismatch_scores']}\", # this is a path\n double_mismatch_score=params['double_mismatch_score'] # this is a float\n )\n\n # Load penalties\n self.end_length = params[\"end_length\"]\n #self.end_penalty = params[\"end_penalty\"]\n self.end_bonus = params[\"end_bonus\"]\n \n @staticmethod\n def _calc_linear_extension_bonus(matching, \n overhang_left, \n overhang_right, \n end_length, \n end_bonus):\n \"\"\"\n Calculate a bonus score for primer-dimer alignments that would allow for *extension*\n\n params\n matching : list[bool]\n List of booleans indicating whether or not bases matched\n for this alignment position\n overhang_left : bool\n Is there an overhang on the left side of the matches;\n i.e. would extension be possible?\n overhang_right : bool\n As above, but right side.\n end_length : int\n Number of bases to consider for end bonus.\n end_bonus : float\n Bonus to add per aligned, extendible base.\n\n returns\n _ : float\n Bonus score in [-2*end_length*end_bonus, 0].\n\n \"\"\"\n\n # Left end\n left_end = 0\n for match in matching[:end_length]:\n if not overhang_left:\n break\n if not match:\n break\n left_end += 1\n\n # Right end\n right_end = 0\n for match in matching[::-1][:end_length]:\n if not overhang_right:\n break\n if not match:\n break\n right_end += 1\n\n return (left_end + right_end) * end_bonus\n \n def align(self):\n \"\"\"\n Align primers;\n \n Finding highest score and its associated\n start position\n \n \"\"\"\n \n # Identify longer and shorter primer\n primers = [self.primer1, self.primer2]\n primers.sort(key=len, reverse=True)\n l, s = primers\n s = s[::-1]\n nL, nS = len(l), len(s)\n\n # Iterate over each start position\n best_start = 0\n best_score = 10\n best_matching = []\n for i in range(nL - 1):\n\n # Compute score for alignment\n matching = []\n current_score = 0\n penalties = 0\n for j in range(nS - 1):\n\n # Compute psuedo-Gibb's\n l_bases = l[(i+j):(i+j+2)]\n s_bases = s[j:(j+2)]\n nn = f\"{l_bases}/{s_bases}\"\n current_score += self.nn_scores[nn]\n\n # Compute if matching, for left-end\n matching.append(self.rc_map[s[j]] == l[i+j])\n\n # Stop if reached last dinucleotide of longer primer\n if i + j == nL - 2:\n break\n\n # Add matching state for last nucleotide\n matching.append(self.rc_map[s[j+1]] == l[i+j+1])\n\n # Compute end bonus\n current_score += self._calc_linear_extension_bonus(\n matching,\n overhang_left=i > 0,\n overhang_right=len(matching) < nS, \n end_length=self.end_length,\n end_bonus=self.end_bonus\n )\n\n # Update if this is the new best score\n if current_score <= best_score:\n best_score = current_score\n best_start = i\n best_matching = matching\n \n # Assign to instance variables\n self.score = best_score\n self.best_start = best_start\n \n # Helps with getting alignment string\n self.best_matching = best_matching\n self.s = s\n self.l = l\n \n def get_alignment_string(self):\n \"\"\"\n Return a string representing the best alignment\n between the two primers\n\n \"\"\"\n\n # Recover names, kinda ugly\n if self.l == self.primer1:\n lname = self.primer1_name\n sname = self.primer2_name\n else:\n lname = self.primer2_name\n sname = self.primer1_name\n\n # Create space, regardless of length of primer names\n name_max = max([len(self.primer1_name), len(self.primer2_name)])\n str_template = \"{:>%d} {}\\n\" % name_max\n\n # Create individual strings\n gap = \" \"*self.best_start\n lstr = f\"5-{self.l}-3\"\n mstr = f\"{gap} {''.join(['|' if m else ' ' for m in self.best_matching])}\"\n sstr = f\"{gap}3-{self.s}-5\"\n\n align_str = f\"Dimer Score: {self.score:.03f}\\n\"\n align_str += str_template.format(lname, lstr)\n align_str += str_template.format(\"\", mstr)\n align_str += str_template.format(sname, sstr)\n align_str += \"\\n\"\n\n return align_str","repo_name":"JasonAHendry/multiply","sub_path":"src/multiply/align/algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":8598,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"8"} +{"seq_id":"35386578315","text":"#!/usr/bin/env python\n#Fact1: We're looking for color 8, as shown below. This can be found using im.getcolors() in every frame\nimport Image,ImageDraw\ndef get_vectors():\n # Return a list of movement vectors extracted from wiggling pixels\n im = Image.open(\"white.gif\")\n vectors=[]\n try:\n while True:\n pix=list(im.getdata()).index(8)\n y,x=divmod(pix,200)\n v=(x-100,y-100)\n vectors.append(v)\n im.seek(im.tell()+1)\n except EOFError:\n pass # end of sequence\n return vectors\n\nmax_x, h, w = 0, 50, 250\nim = Image.new('RGB', (w,h))\ndraw = ImageDraw.Draw(im)\nsrc = (max_x,h//2) # (x,y)\nfor v in get_vectors():\n if v==(0,0):\n max_x+=30\n src = (max_x,h//2)\n continue\n dst=(src[0]+v[0],src[1]+v[1])\n max_x=max(max_x,dst[0])\n draw.line([src, dst], fill='white')\n src=dst\nim.save('22.jpg')\n","repo_name":"unixwars/python_challenge","sub_path":"22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"14380338725","text":"def bin_to_dex(binary):\n y = 0\n decimal = 0\n binary = str(binary)\n for x in binary:\n if int(x) in bin:\n print(end='')\n else:\n return \"This is not a binary number\"\n\n while int(binary) > 0:\n n = int(binary) % 10\n binary = int(binary) // 10\n decimal = decimal + (2 ** y) * n\n y += 1\n print(decimal, end='')\n\n return ''\n\nbin = [0, 1]\nprint(bin_to_dex(110011))\n","repo_name":"mouyleng2508/PycharmProjects","sub_path":"week03:ex/44_bin_to_dec.py","file_name":"44_bin_to_dec.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"38530410737","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 13 20:26:09 2015\n\n@author: Lukas Gartmair\n\"\"\"\nimport slidingwindows\nimport numpy as np\n\ndef calc_strainrate(slice_x, slice_y):\n sr = (float(slice_y[1]-slice_y[0])/(slice_x[1]-slice_x[0]))\n return sr\n\ndef make_slices(inp_array):\n slices = slidingwindows.sliding_window(inp_array,2,1)\n return slices\n\ndef calc_plast_strainrate_with_fit(time_corr, strain_plast):\n \n strainrate_plast = np.arange(0)\n time_slices = make_slices(time_corr)\n strain_slices = make_slices(strain_plast)\n for index,s in enumerate(strain_slices):\n sub_strainrate_plast = calc_strainrate(time_slices[index],s)\n strainrate_plast = np.hstack((strainrate_plast,sub_strainrate_plast))\n return strainrate_plast\n\n# + time means \n \ndef get_corresponding_means(strain_plast):\n\n strain_means = np.arange(0)\n strain_slices = make_slices(strain_plast) \n strain_means = np.mean(strain_slices, axis = 1)\n return strain_means\n \nimport unittest\n\n# for sclice options 2,1\nms_test_arr = np.arange(5)\nms_test_arr_res = np.array([[0,1],[1,2],[2,3],[3,4]])\n\ncs_test_arr1_x = np.arange(2)\ncs_test_arr1_y = np.array([2,2])\ncs_test_arr1_res = 0\n\ncs_test_arr2_x = np.arange(2)\ncs_test_arr2_y = np.arange(2)\ncs_test_arr2_res = 1\n\ncs_test_arr3_x = np.array([2,4])\ncs_test_arr3_y = np.array([10,30])\ncs_test_arr3_res = 10\n\ncm_test_arr = np.arange(5)\ncm_test_arr_res = np.array([0.5,1.5,2.5,3.5])\n\nclass CalcPlastStrainRateTest(unittest.TestCase):\n\n def test_make_slices(self):\n np.testing.assert_array_equal(make_slices(ms_test_arr),ms_test_arr_res)\n \n def test_calc_strainrate(self):\n np.testing.assert_array_equal(calc_strainrate(cs_test_arr1_x, cs_test_arr1_y),cs_test_arr1_res)\n np.testing.assert_array_equal(calc_strainrate(cs_test_arr2_x, cs_test_arr2_y),cs_test_arr2_res)\n np.testing.assert_array_equal(calc_strainrate(cs_test_arr3_x, cs_test_arr3_y),cs_test_arr3_res)\n \n def test_get_corresponding_means(self):\n np.testing.assert_array_equal(get_corresponding_means(cm_test_arr), cm_test_arr_res)\n \n\ndef main():\n unittest.main()\nif __name__ == '__main__':\n main()","repo_name":"lukasgartmair/strainrate-analysis","sub_path":"calcplaststrainrate.py","file_name":"calcplaststrainrate.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"15828075040","text":"from Levenshtein import distance\n\n\nclass Barcode:\n\n # Constructor\n def __init__(self, infile, barcode_file):\n self.infile = infile\n self.barcode_file = barcode_file\n self.barcode_frequency = dict()\n self.samples = dict()\n self.sample_barcodes = dict()\n\n # Parse sample barcodes from file\n with open(barcode_file) as barcode_file:\n for current_line in barcode_file:\n\n # Extract entry\n current_line = current_line.rstrip('\\n')\n split_line = current_line.split(\"\\t\")\n barcode_name = split_line[0]\n barcode_seq = split_line[1]\n\n self.sample_barcodes[barcode_seq] = barcode_name\n self.samples[barcode_seq] = dict()\n\n # Parse Fasta file with reads\n def parse_reads(self):\n\n # Read features\n # read_name = ''\n read_seq = ''\n\n with open(self.infile) as f1:\n for line in f1:\n line = line.rstrip('\\n')\n\n # Fasta entry: name\n if line.startswith('>'):\n # read_name = line[1:]\n if read_seq != '':\n self.identify_barcodes(read_seq)\n read_seq = ''\n\n # Fasta entry: sequence\n else:\n read_seq += line\n\n # Analyse last alignment\n self.identify_barcodes(read_seq)\n\n # Screen alignment - quality, barcodes etc.\n def identify_barcodes(self, read_seq):\n\n # Find UMI sequence ##############################################################################\n umi_match_1 = ''\n umi_match_2 = ''\n\n # Find UMI starting from downstream (right) reference-anchor sequence (UMI has length 12)\n reference_downstream = read_seq.find('ATGGCCCG')\n if reference_downstream > 0:\n start_umi = reference_downstream - 12 # subtract exactly the length of UMI\n end_umi = reference_downstream # keep as is: last base in range is excluded!\n umi_match_1 = read_seq[start_umi: end_umi]\n\n # Find UMI starting from upstream (left) reference-anchor sequence (UMI has length 12)\n reference_upstream = read_seq.find('GATATTGC')\n if reference_upstream > 0:\n start_umi = reference_upstream + 8 # add length of searched anchor\n end_umi = reference_upstream + 20 # add length of anchor + length of UMI\n umi_match_2 = read_seq[start_umi: end_umi]\n\n # Compare identified UMIs\n if umi_match_1 == umi_match_2:\n umi = umi_match_1\n elif umi_match_1 != '':\n umi = umi_match_1\n elif umi_match_2 != '':\n umi = umi_match_2\n else:\n umi = 'NNNNNNNNNNNN'\n\n # Find sample barcode ###########################################################################\n sample_barcode = ''\n\n # Direct match\n for x in self.samples:\n if read_seq.find(x) > 0:\n sample_barcode = x\n break\n\n # No direct match: find sample-barcode starting from downstream (right) reference-anchor sequence (sample-barcode has length 16)\n if sample_barcode == '':\n reference_anchor_pos = read_seq.find('GCGTAACA')\n best_match = 9999\n if reference_anchor_pos > 0:\n start_eid = reference_anchor_pos - 16 # add length of searched sample-barcode\n end_eid = reference_anchor_pos # keep as is: last base in range is excluded!\n potential_barcode = read_seq[start_eid: end_eid]\n for x in self.samples:\n dist = distance(x, potential_barcode)\n if dist < 4 and dist < best_match:\n sample_barcode = x\n best_match = dist\n\n # No downstream match: find sample-barcode starting from upstream (left) reference-anchor sequence (sample-barcode has length 16)\n if sample_barcode == '':\n reference_anchor_pos = read_seq.find('CACCATAC')\n best_match = 9999\n if reference_anchor_pos > 0:\n start_eid = reference_anchor_pos + 8 # add length of searched anchor\n end_eid = reference_anchor_pos + 24 # add length of anchor + length of sample-barcode\n potential_barcode = read_seq[start_eid: end_eid]\n for x in self.samples:\n dist = distance(x, potential_barcode)\n if dist < 4 and dist < best_match:\n sample_barcode = x\n best_match = dist\n\n # Barcode statistics\n if sample_barcode != '':\n\n # Add or update UMI counts\n if umi in self.samples[sample_barcode]:\n self.samples[sample_barcode][umi] += 1\n else:\n self.samples[sample_barcode].update({umi: 1})\n\n # Add or update sample barcode counts\n if sample_barcode in self.barcode_frequency:\n self.barcode_frequency[sample_barcode] += 1\n else:\n self.barcode_frequency[sample_barcode] = 1\n\n\n\"\"\"\nMIT License\n\nCopyright (c) 2021 stossowski\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"","repo_name":"stossowski/barcode_detection","sub_path":"read_parser.py","file_name":"read_parser.py","file_ext":"py","file_size_in_byte":6409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"19204275304","text":"#!/usr/bin/env python\n\"\"\"\nThis file contains code for RawNet2\n\nHemlata Tak, Jose Patino, Massimiliano Todisco, Andreas Nautsch, \nNicholas Evans, and Anthony Larcher. End-to-End Anti-Spoofing with RawNet2. \nIn Proc. ICASSP, 6369--6373. 2020.\n\nImplementation based on RawNet in\nhttps://github.com/asvspoof-challenge/2021/\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport sys\nimport numpy as np\n\nimport torch\nimport torch.nn as torch_nn\nimport torchaudio\nimport torch.nn.functional as torch_nn_func\n\nimport sandbox.block_nn as nii_nn\nimport core_scripts.other_tools.debug as nii_debug\n\n__author__ = \"Xin Wang\"\n__email__ = \"wangxin@nii.ac.jp\"\n__copyright__ = \"Copyright 2021, Xin Wang\"\n\nclass SincConv2(torch_nn.Module):\n \"\"\"\n \"\"\"\n @staticmethod\n def to_mel(hz):\n return 2595 * np.log10(1 + hz / 700)\n\n @staticmethod\n def to_hz(mel):\n return 700 * (10 ** (mel / 2595) - 1)\n\n\n def __init__(self, num_filters, kernel_size, in_channels=1,\n sample_rate = 16000, num_freq_bin = 257,\n stride = 1, dilation = 1, \n flag_pad = True, flag_trainable=False):\n \"\"\"\n SincConv2(num_filters, kernel_size, in_channels=1,\n sample_rate = 16000, num_freq_bins = 257,\n stride = 1, dilation = 1, \n flag_pad = True, flag_trainable=False)\n Args\n ----\n num_filters: int, number of sinc-filters\n kernel_size: int, length of each sinc-filter\n in_channels: int, dimension of input signal, \n (batchsize, length, in_channels)\n sample_rate: int, sampling rate\n num_freq_bin: number of frequency bins, not really important\n here. Default 257\n stride: int, stride of convoluiton, default 1\n dilation: int, dilaion of conv, default 1\n flag_pad: bool, whether pad the sequence to make input and \n output have equal length, default True\n flag_trainable: bool, whether the filter is trainable\n default False\n \n \n Num_filters and in_channels decide the output tensor dimension\n If input is (batchsize, length, in_channels), output will be\n (batchsize, length, in_channels * num_filters)\n \n This is done through depwise convolution, \n https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html\n i.e., each input dimension will go through all the num_filters.\n \"\"\"\n super(SincConv2,self).__init__()\n \n self.m_out_channels = num_filters\n self.m_in_channels = in_channels\n self.m_sample_rate=sample_rate\n\n # Forcing the filters to be odd (i.e, perfectly symmetrics)\n self.m_kernel_size = kernel_size\n if kernel_size % 2 == 0:\n self.m_kernel_size = self.m_kernel_size + 1\n\n self.m_stride = stride\n self.m_dilation = dilation\n \n # Pad to original length\n if flag_pad:\n self.m_padding = dilation * (self.m_kernel_size - 1) + 1 - stride\n if stride % 2 == 0:\n print(\"Warning: padding in SincCov is not perfect because of\")\n print(\"stride {:d}\".format(stride))\n self.m_padding = self.m_padding // 2\n else:\n self.m_padding = 0\n \n \n \n # initialize filterbanks using Mel scale\n f = int(self.m_sample_rate / 2) * np.linspace(0, 1, num_freq_bin)\n # Hz to mel conversion\n fmel = self.to_mel(f) \n fmelmax = np.max(fmel)\n fmelmin = np.min(fmel)\n \n filbandwidthsmel = np.linspace(fmelmin, fmelmax, self.m_out_channels+1)\n # Mel to Hz conversion\n filbandwidthsf = self.to_hz(filbandwidthsmel) \n \n # mel band\n self.m_mel = filbandwidthsf\n # time index\n self.m_hsupp = torch.arange(-(self.m_kernel_size-1)/2, \n (self.m_kernel_size-1)/2+1)\n # filter coeffs\n self.m_filters = torch.zeros(self.m_out_channels, self.m_kernel_size)\n \n # create filter coefficient\n for i in range(self.m_out_channels):\n fmin = self.m_mel[i]\n fmax = self.m_mel[i+1]\n hHigh = np.sinc(2 * fmax * self.m_hsupp / self.m_sample_rate)\n hHigh = (2 * fmax / self.m_sample_rate) * hHigh\n hLow = np.sinc(2 * fmin * self.m_hsupp / self.m_sample_rate)\n hLow = (2 * fmin / self.m_sample_rate) * hLow\n # band pass filters\n hideal = hHigh - hLow\n \n # applying windowing\n self.m_filters[i,:] = torch.tensor(\n np.hamming(self.m_kernel_size) * hideal)\n \n # repeat to (output_channels * in_channels)\n self.m_filters = self.m_filters.repeat(self.m_in_channels, 1)\n \n # save as model parameter\n self.m_filters = self.m_filters.view(\n self.m_out_channels * self.m_in_channels, 1, self.m_kernel_size)\n self.m_filters = torch_nn.Parameter(\n self.m_filters, requires_grad=flag_trainable)\n\n return\n \n def forward(self,x):\n \"\"\"SincConv(x)\n \n input\n -----\n x: tensor, shape (batchsize, length, feat_dim)\n \n output\n ------\n y: tensor, shape (batchsize, length, output-channel)\n \"\"\"\n return torch_nn_func.conv1d(\n x.permute(0, 2, 1), self.m_filters, stride=self.m_stride,\n padding=self.m_padding, dilation=self.m_dilation,\n bias=None, groups=x.shape[-1]).permute(0, 2, 1)\n\n\n\nclass FMS(torch_nn.Module):\n \"\"\"filter-wise feature map scaling\n Hemlata Tak, Jose Patino, Massimiliano Todisco, Andreas Nautsch, \n Nicholas Evans, and Anthony Larcher. \n End-to-End Anti-Spoofing with RawNet2. \n In Proc. ICASSP, 6369--6373. 2020.\n \n Example:\n l_fms = FMS(5)\n with torch.no_grad():\n data = torch.randn(2, 1000, 5)\n out = l_fms(data)\n \"\"\"\n def __init__(self, feat_dim):\n \"\"\"FMS(feat_dim)\n \n Args\n ----\n feat_dim: int, dimension of input, in shape (batch, length, dim)\n \"\"\"\n super(FMS, self).__init__()\n self.m_dim = feat_dim\n self.m_pooling = torch_nn.AdaptiveAvgPool1d(1)\n self.m_dim_change = torch_nn.Linear(feat_dim, feat_dim)\n self.m_act = torch_nn.Sigmoid()\n return\n \n def forward(self, x):\n \"\"\"FMS(x)\n input\n -----\n x: tensor, (batch, length, dim)\n \n output\n -----\n y: tensor, (batch, length, dim)\n \"\"\"\n if x.shape[-1] != self.m_dim:\n print(\"FMS expects data of dim {:d}\".format(self.m_dim))\n sys.exit(1)\n \n # pooling expects (batch, dim, length)\n # y will be (batch, dim, 1)\n y = self.m_pooling(x.permute(0, 2, 1))\n \n # squeeze to (batch, dim), unsqueeze to (batch, 1, dim, )\n y = self.m_act(self.m_dim_change(y.squeeze(-1))).unsqueeze(1)\n \n # scaling and shifting\n return (x * y + y)\n \n\nclass Residual_block(torch_nn.Module):\n \"\"\"Residual block used in RawNet2 for Anti-spoofing\n \"\"\"\n def __init__(self, nb_filts, flag_bn_input = False):\n \"\"\"Residual_block(bn_filts, flga_bn_input)\n Args\n ----\n bn_filts: list of int, [input_channel, output_channel]\n flag_bn_input: bool, whether do BatchNorm and LReLU\n default False\n \"\"\"\n super(Residual_block, self).__init__()\n \n # whether batch normalize input\n if flag_bn_input:\n self.bn1 = torch_nn.Sequential(\n torch_nn.BatchNorm1d(num_features = nb_filts[0]),\n torch_nn.LeakyReLU(negative_slope=0.3))\n else:\n self.bn1 = None\n \n self.conv = torch_nn.Sequential(\n torch_nn.Conv1d(in_channels = nb_filts[0],\n out_channels = nb_filts[1],\n kernel_size = 3,\n padding = 1,\n stride = 1),\n torch_nn.BatchNorm1d(num_features = nb_filts[1]),\n torch_nn.Conv1d(in_channels = nb_filts[1],\n out_channels = nb_filts[1],\n padding = 1,\n kernel_size = 3,\n stride = 1)\n )\n \n # for dimension change\n if nb_filts[0] != nb_filts[1]:\n self.dim_change = torch_nn.Conv1d(\n in_channels = nb_filts[0],\n out_channels = nb_filts[1],\n padding = 0,\n kernel_size = 1,\n stride = 1)\n else:\n self.dim_change = None\n \n # maxpooling\n self.mp = torch_nn.MaxPool1d(3)\n \n return\n \n def forward(self, x):\n \"\"\" y= Residual_block(x)\n \n input\n -----\n x: tensor, (batchsize, length, dim)\n \n output\n ------\n y: tensor, (batchsize, length, dim2)\n \"\"\"\n identity = x.permute(0, 2, 1)\n \n if self.bn1 is None:\n out = x.permute(0, 2, 1) \n else:\n out = self.bn1(x.permute(0, 2, 1))\n\n out = self.conv(out)\n \n if self.dim_change is not None:\n identity = self.dim_change(identity)\n \n out += identity\n out = self.mp(out)\n return out.permute(0, 2, 1)\n \nclass RawNet(torch_nn.Module):\n \"\"\"RawNet based on \n https://github.com/asvspoof-challenge/2021/\n \"\"\"\n def __init__(self, num_sinc_filter, sinc_filter_len, in_dim, sampling_rate, \n res_ch_1, res_ch_2, gru_node, gru_layer, emb_dim, num_class):\n super(RawNet, self).__init__()\n\n # sinc filter layer\n self.m_sinc_conv = SincConv2(\n num_sinc_filter, \n kernel_size = sinc_filter_len,\n in_channels = in_dim, \n sample_rate = sampling_rate, \n flag_pad = False, \n flag_trainable=False)\n \n # res block group\n self.m_resgroup = torch_nn.Sequential(\n nii_nn.BatchNorm1DWrapper(num_sinc_filter),\n torch_nn.SELU(),\n Residual_block([num_sinc_filter, res_ch_1], flag_bn_input=False),\n FMS(res_ch_1),\n Residual_block([res_ch_1, res_ch_1], flag_bn_input=True),\n FMS(res_ch_1),\n Residual_block([res_ch_1, res_ch_2], flag_bn_input=True),\n FMS(res_ch_2),\n Residual_block([res_ch_2, res_ch_2], flag_bn_input=True),\n FMS(res_ch_2),\n Residual_block([res_ch_2, res_ch_2], flag_bn_input=True),\n FMS(res_ch_2),\n Residual_block([res_ch_2, res_ch_2], flag_bn_input=True),\n FMS(res_ch_2),\n )\n \n # GRU part\n self.m_before_gru = torch_nn.Sequential(\n nii_nn.BatchNorm1DWrapper(res_ch_2),\n torch_nn.SELU()\n )\n self.m_gru = torch_nn.GRU(input_size = res_ch_2,\n hidden_size = gru_node,\n num_layers = gru_layer,\n batch_first = True)\n \n self.m_emb = torch_nn.Linear(in_features = gru_node, \n out_features = emb_dim)\n\n \n # output score\n self.m_output = torch_nn.Linear(in_features = emb_dim,\n out_features = num_class, \n bias=True)\n # \n self.logsoftmax = torch_nn.LogSoftmax(dim=1)\n return\n \n def _compute_embedding(self, x):\n \"\"\"\n input\n -----\n x: tensor, (batch, length, dim)\n \n output\n ------\n y: tensor, (batch, emb_dim)\n \"\"\"\n batch, length, dim = x.shape\n # \n x = self.m_sinc_conv(x)\n x = self.m_resgroup(x)\n x, _ = self.m_gru(self.m_before_gru(x))\n return self.m_emb(x[:, -1, :])\n \n \n def _compute_score(self, emb, inference=True):\n \"\"\"\n input\n -----\n emb: tensor, (batch, emb_dim)\n \n output\n ------\n score: tensor, (batch, num_class)\n \n Score here refers to \n \"\"\"\n # we should not use logsoftmax if we will use CrossEntropyLoss\n flag_logsoftmax = False\n\n if inference:\n # no softmax\n return self.m_output(emb)\n elif flag_logsoftmax:\n # Logsoftmax for training loss\n # this is used when the training criterion is NLLoss\n return self.logsoftmax(self.m_output(emb))\n else:\n return self.m_output(emb)\n \n def forward(self, x):\n \"\"\"\n input\n -----\n x: tensor, (batch, length, dim)\n \n output\n ------\n y: tensor, (batch, num_class)\n \n y is the log-probablity after softmax\n \"\"\"\n emb = self._compute_embedding(x)\n return self._compute_score(emb, inference=False)\n \n def inference(self, x):\n \"\"\"\n input\n -----\n x: tensor, (batch, length, dim)\n \n output\n ------\n y: tensor, (batch, num_class)\n \n y is the input activation to softmax\n \"\"\"\n emb = self._compute_embedding(x)\n return self._compute_score(emb, inference=True)\n\nif __name__ == \"__main__\":\n print(\"Definition of RawNet2\")\n","repo_name":"nii-yamagishilab/project-NN-Pytorch-scripts","sub_path":"sandbox/block_rawnet.py","file_name":"block_rawnet.py","file_ext":"py","file_size_in_byte":13819,"program_lang":"python","lang":"en","doc_type":"code","stars":265,"dataset":"github-code","pt":"8"} +{"seq_id":"73797835142","text":"\ndef main():\n\n while 1:\n pin = input(\"Enter pincode: \")\n error = \"\"\n for i in pin:\n if i < '0' or i > '9':\n error = \"Please enter only 0-9 number\"\n break\n if len(error) > 0:\n print(error)\n continue\n \n if len(pin) < 6:\n print(\"Don't use pincode fewer than 6 numbers\")\n continue\n\n temp = pin\n pin = []\n for i in temp:\n pin.append(int(i))\n\n countSame = 0\n for i in range(len(pin)-1):\n if pin[i] == pin[i+1]:\n countSame += 1\n if countSame > 1:\n error = \"Don't use triple number.\"\n break\n else:\n countSame = 0\n\n if len(error) > 0:\n print(error)\n continue\n\n countGroup = 0\n for i in range(len(pin)-1):\n if pin[i] == pin[i+1]:\n countGroup += 1\n if countGroup > 2:\n error = \"Don't use double number more than 2 groups.\"\n break\n \n if len(error) > 0:\n print(error)\n continue\n \n for i in range(len(pin)-2):\n if abs(pin[i] - pin[i+2]) == pin[i+1] and abs(pin[i] - pin[i+1]) == 1:\n error = \"Don't use number in ascending or decending order more than 2 numbers.\"\n break\n \n if len(error) > 0:\n print(error)\n continue\n else:\n print(\"Success.\")\n break\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"GinglePie/logicTest","sub_path":"ValidatePincode.py","file_name":"ValidatePincode.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"32231372235","text":"import unittest,os\nfrom util import *\n \ndef gemm_fox_random():\n N = 8\n Np = 4\n epsilon = 1e-6\n filename_A = \"matrix_A.txt\"\n filename_B = \"matrix_B.txt\"\n filename_C = \"matrix_C.txt\"\n filename_C_seq = \" matrix_C_seq.txt\"\n os.system(\"python generate_matrix.py\" + \" \" + filename_A + \" \" + str(N))\n os.system(\"python generate_matrix.py\" + \" \" + filename_B + \" \" + str(N))\n os.system(\"mpirun --mca pml ob1 -n \" + str(Np) + \" ../gemm_fox\" + \" \" + filename_A + \" \" + filename_B + \" \" + filename_C)\n A = parse_matrix(filename_A)\n B = parse_matrix(filename_B)\n C_seq = A*B\n C = parse_matrix(filename_C)\n return equal(C,C_seq,N,epsilon)\n\n\ndef gemm_fox():\n N = 8\n Np = 4\n epsilon = 1e-6\n filename_A = \"matrix_A.txt\"\n filename_B = \"matrix_B.txt\"\n filename_C = \"matrix_C.txt\"\n filename_C_seq = \" matrix_C_seq.txt\"\n matrix = np.matrix([[1]*N]*N)\n save_matrix(matrix,\"matrix_A.txt\")\n save_matrix(matrix,\"matrix_B.txt\")\n os.system(\"mpirun --mca pml ob1 -n \" + str(Np) + \" ../gemm_fox\" + \" \" + filename_A + \" \" + filename_B + \" \" + filename_C)\n C_seq = parse_matrix(filename_A)*parse_matrix(filename_B)\n C = parse_matrix(filename_C)\n return equal(C,C_seq,N,epsilon)\n\n#####\nclass TestTDP3(unittest.TestCase):\n def test_gemm_fox_random(self):\n self.assertEqual(gemm_fox_random(),True)\n \n def test_gemm_fox(self):\n self.assertEqual(gemm_fox(),True)\n \n\nif __name__ == \"__main__\":\n os.system(\"touch matrix_C.txt\")\n unittest.main()\n\n","repo_name":"alecorguill/prcd_tdp","sub_path":"td3/test/unit_test.py","file_name":"unit_test.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"17563095615","text":"import sys\nimport gc\nfrom pprint import pprint\nimport executing\n\n\ndef f():\n return sys._getframe()\n\n\nclass A:\n def do(self):\n executing_obj = executing.Source.executing(sys._getframe())\n node = list(executing_obj.statements)[0]\n print(node)\n while hasattr(node, \"parent\") and node.parent:\n print(node.parent)\n node = node.parent\n\n\na = A()\na.do()\n","repo_name":"laike9m/TestPython","sub_path":"check_frame_belonging.py","file_name":"check_frame_belonging.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"21528004804","text":"from django.shortcuts import render\nfrom listing.models import Listings\nfrom realtor.models import Realtors\nfrom listing.choices import bedroom_choices, price_choices, state_choices\n\ndef index(request):\n title = 'Real Estate | Welcome'\n template = 'main/index.html'\n\n # Order & Filter the context for the listing page.\n listings = Listings.objects.all().order_by(\n '-list_date').filter(is_published=True)[:3]\n\n context = {\n 'title': title,\n 'listings': listings,\n 'state_choices': state_choices,\n 'bedroom_choices': bedroom_choices,\n 'price_choices': price_choices,\n }\n return render(request, template, context)\n\n\ndef about(request):\n title = 'Real Estate | About'\n template = 'main/about.html'\n mvp = Realtors.objects.all().filter(is_mvp=True)\n realtors = Realtors.objects.all().order_by('-hire_date')\n\n context = {\n 'title': title,\n 'mvp': mvp,\n 'realtors': realtors,\n }\n return render(request, template, context)\n","repo_name":"Bonekit/DjangoRealEstate","sub_path":"real_estate/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"8"} +{"seq_id":"26777792670","text":"from .bengali_funcs import MishFunction\nfrom fastai2.vision.all import *\nfrom fastai2.basics import *\n\n\nclass Head(Module):\n def __init__(self, nc, n, ps=0.5):\n self.fc = nn.Sequential(*[AdaptiveConcatPool2d(), Mish(), Flatten(),\n LinBnDrop(nc*2, 512, True, ps, Mish()),\n LinBnDrop(512, n, True, ps)])\n self._init_weight()\n \n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1.0)\n m.bias.data.zero_()\n \n def forward(self, x):\n return self.fc(x)\n \n \nclass CascadeModel(Module):\n def __init__(self, arch, n, sz, pre=True):\n m = arch(pre)\n m = nn.Sequential(*children_and_parameters(m)[:-4])\n conv = nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1, bias=False)\n w = (m[0][0].weight.sum(1)).unsqueeze(1)\n conv.weight = nn.Parameter(w)\n m[0][0] = conv\n nc = m(torch.zeros(2, 1, sz, sz)).detach().shape[1]\n self.body = m\n self.heads = nn.ModuleList([Head(nc, c) for c in n])\n \n def forward(self, x): \n x = self.body(x)\n return [f(x) for f in self.heads]","repo_name":"overriden-sfdd/kaggle-silver-bengaliai","sub_path":"src/train_utils/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"74457296901","text":"# Importing the libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport json\nfrom sklearn.preprocessing import StandardScaler\nimport pickle\nfrom . import tools\n\n\nclass Classify:\n\n # Classify Object Classify's the Data in the Data Object\n # Classifications of Building Consumption Behavior Include\n # -1 - Below Threshold -> Energy Consumption is Very Low\n # 0 - Cocentric -> Consumption is relatively uniform\n # 1 - People -> Building has higher consumption during work hours\n # 2 - Scheduler -> Building that Operates With a Scheduler\n # 3 - Reverse -> Energy Usage is Greater During Work Hours\n # 4 - Random -> Abnormal Energy Behavior, Can't be Classified\n # \n # self.data = Original Consumption\n # self.thresh = Threshold Level to Classify as Below Threshold (kwH)\n # self.classifications = [ bldgnames(arr), dates(arr), classifications(2d arr)]\n def __init__(self, data, thresh):\n self.data = data.data\n self.thresh = thresh\n self.classifications = self.gen_classifications()\n \n # Get Classifications for All Buildings for Every Day in the Data Object\n def gen_classifications(self):\n energy_data = self.reshape_data()\n original_len = len(energy_data)\n \n energy_y = []\n for i in range(len(energy_data)): energy_y.append(i)\n energy_y = pd.DataFrame(energy_y)\n\n energy_data, energy_y = self.remove_small(pd.DataFrame(energy_data), energy_y)\n energy_y = energy_y.iloc[:, 0]\n \n sc = StandardScaler()\n energy_data = sc.fit_transform(energy_data)\n\n # Get List of Buildings\n buildings = list(self.data)[1:]\n\n # Get List of dates\n dates = pd.DataFrame(self.data.iloc[:,0])\n dates = self.group_df(df=dates, method=\"mean\", interval='day')\n for i in range(dates.shape[0]): dates.iloc[i, 0] = dates.iloc[i, 0].split()[0]\n dates = list(dates.iloc[:, 0])\n\n classifications = self.classify(energy_data)\n\n # Create building_type Array\n building_type = []\n for i in range(original_len): building_type.append(-1)\n for i in range(len(energy_y)): building_type[energy_y[i]] = classifications[i]\n\n # Create Classification df Date, Building Type\n dates = dates * 134\n updated_bldg = [] \n for i in buildings: updated_bldg += [i] * 237\n\n # Create List of Day Types\n day_type = tools.Tools.classify_day_type(dates)\n\n #Create Building Classification Dataframe\n arrays = [day_type, dates, updated_bldg, building_type]\n labels = ['Day_Type', 'Date', 'Building', 'Type']\n bldg_classes = tools.Tools.arrays_to_df(arrays, labels)\n \n # Remove Weekends and Holidays from the Dataset\n wkday_bldg = bldg_classes[bldg_classes['Day_Type'] > 0].reset_index()\n\n return self.table_bldg_classes(wkday_bldg)\n\n\n # Create Dataframe with the Columns: 'Building', 'Data', 'Type'\n # param bldg_df (dataframe): dataframe with the building data\n # return bldg_classes [bldg label (arr), dates (arr), types(arr)]\n def table_bldg_classes(self, bldg_df):\n counter = 0\n # Arrays for Dataframe Creation\n bldgs_labels = []\n dates_arr = []\n types_arr = []\n # Arrays Collect Information for Each Building then Reset for next Building\n prev_bldg = bldg_df['Building'][0]\n dates = []\n bldg_type = []\n\n for i in range(len(bldg_df)): \n curr_bldg = bldg_df['Building'][i]\n # Graph Old Building if Current Bldg is New or Last Bldg\n if (curr_bldg != prev_bldg) or (i == len(bldg_df) - 1):\n counter += 1\n bldgs_labels.append(prev_bldg)\n dates_arr.append(dates)\n types_arr.append(bldg_type)\n dates = []\n bldg_type = []\n prev_bldg = curr_bldg\n else:\n dates.append(bldg_df['Date'][i])\n bldg_type.append(bldg_df['Type'][i])\n\n return [bldgs_labels, dates_arr, types_arr]\n\n # Remove Instances With Energy Usage Below thresh kwH\n def remove_small(self, X, y):\n final_X, final_y = pd.DataFrame(), pd.DataFrame()\n y_list = list(y.iloc[:, 0])\n new_df = []\n new_y = []\n for i in range(0, X.shape[0]):\n if X.iloc[i, :].mean() > self.thresh*4:\n new_df.append(X.iloc[i, :].values)\n new_y.append(y_list[i])\n final_X = pd.concat([final_X, pd.DataFrame(new_df)])\n final_y = pd.concat([final_y, pd.DataFrame(new_y)])\n return final_X, final_y\n\n\n # Classify incoming data\n # param (data) - Data to classify\n def classify(self, data):\n filename = 'building_classification_model.p'\n loaded_model = pickle.load(open(filename, 'rb'))\n return loaded_model.predict(data)\n \n # Group By Interval with some Method\n # Methods: Sum, Mean, Min, or Max\n # Returns a df of grouped data\n def group_df(self, df, method=\"mean\", interval='day', has_time_col=True):\n interval = tools.Tools.time_to_row(interval)\n grouped_df = pd.DataFrame()\n for i in range(0,len(df)//interval):\n if has_time_col: start_date = df['time'][i*interval]\n block = df.iloc[ i*interval:(i+1)*interval, : ]\n #####if(i == 223): print(block)\n # Perform Computation on Row\n if method == \"sum\": block = block.sum(axis=0)\n elif method == \"mean\": block = block.mean(axis=0)\n elif method == \"min\": block = block.min(axis=0)\n elif method == \"max\": block = block.max(axis=0)\n else:\n print(\"Invalid Method Entry\")\n return\n # Add the Start Date Label\n if has_time_col:\n if method == \"mean\": block = pd.Series([start_date]).append(block) \n else: block[0] = start_date\n block = block.to_frame().transpose()\n grouped_df = grouped_df.append(block)\n if method == \"mean\" and has_time_col: grouped_df = grouped_df.rename(columns={ grouped_df.columns[0]: \"time\" })\n return grouped_df\n \n # Reshape Training Data\n # Formats it for Classification Model\n def reshape_data(self, has_time_col=True, agg_interval=\"0:15\", time_interval=\"day\"):\n X = self.data\n if has_time_col: X = X.drop(columns=['time'])\n # Determine Shape of New Dataframe and Reshape\n new_col_ct = int(tools.Tools.time_to_row(time_interval)/tools.Tools.time_to_row(agg_interval))\n rows_per_instance = int(X.shape[0]/new_col_ct)\n X = X.T.values.reshape(X.shape[1] * rows_per_instance, new_col_ct)\n return X\n\n # Create a Bar Graph Summarizing Classification Type Frequency\n def graph_bar_summary(self):\n type_labels = [\"Below Threshold\", \"Cocentric\", \"People\", \"Scheduler\", \"Reverse\", \"Random\"]\n counts = [0, 0, 0, 0, 0, 0]\n for classifications in self.classifications[2]:\n for classified in classifications:\n counts[classified + 1] += 1\n\n print(\"Classification Type Frequency\")\n for i in range(len(counts)): print(type_labels[i] + \": \" + str(counts[i]))\n\n plt.title(\"Classification Type Frequency\")\n plt.bar(x=[0,1,2,3,4,5], height=counts, tick_label=type_labels)\n plt.xticks(rotation='vertical')\n plt.show()","repo_name":"KKsharma99/energy_watch","sub_path":"Energy Watch - Spring 2019/code/energy_watch/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":7550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"35498444583","text":"import yaml\nimport typer\n\napp = typer.Typer()\n\ndef load_agents_from_plugin(plugin_list: list):\n agnts_constructors = {}\n for plugin in plugin_list:\n agnts_constructors.update(__import__(plugin).agents_constructors)\n\n return(agnts_constructors)\n\ndef parse_agents(agents_def, agents_constructors):\n agents = {\n key: agents_constructors[value['kind']](key, **value)\n for key, value in agents_def.items()\n }\n return agents\n\n@app.command()\ndef parse(process_yaml):\n with open(process_yaml, 'r') as file:\n process = yaml.safe_load(file)\n \n agents_constructors = load_agents_from_plugin(process['agents_plugins'])\n agents = parse_agents(process[\"agents\"], agents_constructors)\n max_process_length = process[\"process\"][\"max_process_length\"]\n entrypoint = process[\"process\"][\"entrypoint\"]\n variables = process.get(\"variables\", {})\n\n print(\"Parsing successful!\")\n\n return agents, entrypoint, process[\"steps\"], variables, max_process_length\n\n@app.command()\ndef draw(process_yaml:str, file_path:str):\n agents, entrypoint, steps, variables, max_process_length = parse(\n process_yaml\n )\n md = f\"flowchart TD\\n\\tsubgraph Agents\\n\\t\\tdirection LR\"\n for agent, color in zip(agents, ['lightblue', 'lightgreen', 'orange', 'pink', 'lightgrey', 'white', 'yellow', 'red', 'green',]):\n md += f\"\\n\\t\\tclassDef {agent} fill:{color}\"\n agent_type = str(type(agents[agent])).replace(\n \"\", \"\"\n )\n md += f\"\\n\\t\\t{agent}({agent} - {agent_type}):::{agent}\"\n md += \"\\n\\tend\\n\\n\\tsubgraph Process\"\n\n for step in steps:\n on_success = {entrypoint: 'end_of_process(( ))'}.get(\n steps[step]['on_success'], steps[step]['on_success']\n )\n on_failure = {entrypoint: 'end_of_process(( ))'}.get(\n steps[step]['on_failure'], steps[step]['on_failure']\n )\n md += f\"\\n\\t\\t{step}(step):::{steps[step]['agent']}\"\n if on_success == on_failure:\n md += f\"\\n\\t\\t{step} --> {on_success}\"\n else:\n md += f\"\\n\\t\\t{step} --> {step}_validation{{{{validation}}}}:::{steps[step]['validation']['agent']}\"\n md += f\"\\n\\t\\t{step}_validation --Sucess--> {on_success}\"\n md += f\"\\n\\t\\t{step}_validation --Failure--> {on_failure}\"\n \n md += f\"\\n\\t\\tstart_of_process(( )) --> {entrypoint}\"\n md += \"\\n\\tend\\n\"\n md +=\"\\n\\t style Process fill:white,stroke:white\"\n md +=\"\\n\\t style Agents fill:white,stroke:grey\"\n\n with open(file_path, 'w') as file:\n file.write(md)\n\n\n@app.command()\ndef run(process_yaml, verbose: bool=False):\n agents, entrypoint, steps, variables, max_process_length = parse(\n process_yaml\n )\n\n current_step = entrypoint\n context = {}\n\n n_step = 0\n while True:\n n_step += 1\n step = steps[current_step]\n agent = agents[step['agent']]\n prompt = step[\"prompt\"].format(**context, **variables)\n response = agent(prompt, verbose)\n context[f'{current_step}_answer'] = response\n \n validation = step['validation']\n agent = agents[validation['agent']]\n validation_prompt = validation[\"prompt\"].format(**context, **variables)\n validation_response = agent(validation_prompt, verbose)\n\n if n_step >= max_process_length:\n current_step = \"fail\"\n elif validation_response.strip().lower()[:3] == \"yes\":\n current_step = step.get(\"on_success\", \"fail\")\n else:\n current_step = step.get(\"on_failure\", \"fail\")\n\nif __name__ == \"__main__\":\n app()\n","repo_name":"Ozennefr/GPPPT","sub_path":"gpppt/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"8"} +{"seq_id":"9615060218","text":"# -*- coding: utf-8 -*-\r\n# \r\nfrom __future__ import division\r\n\r\nimport random\r\n\r\nimport otree.models\r\nfrom otree.db import models\r\nfrom otree import widgets\r\nfrom otree.common import Currency as c, currency_range, safe_json\r\nfrom otree.constants import BaseConstants\r\nfrom otree.models import BaseSubsession, BaseGroup, BasePlayer\r\n\r\n# \r\n\r\nauthor = 'Benson'\r\n\r\ndoc = \"\"\"\r\nIn this Game, six different coins with different amounts for heads and tails.\r\nSubjects can choose which coin they want to ip and then get the money that's associated with either heads or tails.\r\n\"\"\"\r\n\r\n\r\nclass Constants(BaseConstants):\r\n name_in_url = 'risk_game'\r\n players_per_group = None\r\n num_rounds = 1\r\n\r\n\r\nclass Subsession(BaseSubsession):\r\n pass\r\n\r\n\r\nclass Group(BaseGroup):\r\n def make_random_toss_one(self):\r\n for p in self.get_players():\r\n p.rand_toss_1 = random.choice([\"Heads\", \"Tails\"])\r\n\r\n def make_random_toss_two(self):\r\n for p in self.get_players():\r\n p.rand_toss_2 = random.choice([\"Heads\", \"Tails\"])\r\n\r\n def menu_a_points(self):\r\n for p in self.get_players():\r\n if p.decision_1 == \"Coin 1\":\r\n if p.rand_toss_1 == \"Heads\":\r\n p.coin_1 = 0\r\n else:\r\n p.coin_1 = 2880\r\n\r\n elif p.decision_1 == \"Coin 2\":\r\n if p.rand_toss_1 == \"Heads\":\r\n p.coin_1 = 240\r\n else:\r\n p.coin_1 = 2400\r\n\r\n elif p.decision_1 == \"Coin 3\":\r\n if p.rand_toss_1 == \"Heads\":\r\n p.coin_1 = 480\r\n else:\r\n p.coin_1 = 1920\r\n\r\n elif p.decision_1 == \"Coin 4\":\r\n if p.rand_toss_1 == \"Heads\":\r\n p.coin_1 = 720\r\n else:\r\n p.coin_1 = 1440\r\n\r\n elif p.decision_1 == \"Coin 5\":\r\n if p.rand_toss_1 == \"Heads\":\r\n p.coin_1 = 840\r\n else:\r\n p.coin_1 = 1200\r\n\r\n elif p.decision_1 == \"Coin 6\":\r\n p.coin_1 = 960\r\n\r\n elif p.decision_1 == \"Coin 7\":\r\n if p.rand_toss_1 == \"Heads\":\r\n p.coin_1 = 1080\r\n\r\n else:\r\n p.coin_1 = 720\r\n\r\n def menu_b_points(self):\r\n for p in self.get_players():\r\n if p.decision_2 == \"Coin 1\":\r\n if p.rand_toss_2 == \"Heads\":\r\n p.coin_2 = 0\r\n else:\r\n p.coin_2 = 2160\r\n\r\n elif p.decision_2 == \"Coin 2\":\r\n if p.rand_toss_2 == \"Heads\":\r\n p.coin_2 = 240\r\n else:\r\n p.coin_2 = 1920\r\n\r\n elif p.decision_2 == \"Coin 3\":\r\n if p.rand_toss_2 == \"Heads\":\r\n p.coin_2 = 480\r\n else:\r\n p.coin_2 = 1680\r\n\r\n elif p.decision_2 == \"Coin 4\":\r\n if p.rand_toss_2 == \"Heads\":\r\n p.coin_2 = 720\r\n else:\r\n p.coin_2 = 1440\r\n\r\n elif p.decision_2 == \"Coin 5\":\r\n if p.rand_toss_2 == \"Heads\":\r\n p.coin_2 = 960\r\n else:\r\n p.coin_2 = 1200\r\n\r\n elif p.decision_2 == \"Coin 6\":\r\n p.coin_2 = 1080\r\n\r\n elif p.decision_2 == \"Coin 7\":\r\n if p.rand_toss_2 == \"Heads\":\r\n p.coin_2 = 1200\r\n else:\r\n p.coin_2 = 960\r\n\r\n def set_payoff(self):\r\n self.menu_a_points()\r\n self.menu_b_points()\r\n\r\n for p in self.get_players():\r\n p.participant.vars[\"game_payoff\"][\"risk_game\"] = p.coin_1 + p.coin_2\r\n p.participant.vars[\"carrying_payoff\"] += p.coin_1 + p.coin_2\r\n p.risk_points = p.coin_1 + p.coin_2\r\n p.payoff = p.coin_1 + p.coin_2\r\n\r\n\r\nclass Player(BasePlayer):\r\n CHOICE_ONE = (\r\n (\"Coin 1\", \"Coin 1: 0 tokens if heads and 2880 tokens if tails\"),\r\n (\"Coin 2\", \"Coin 2: 240 tokens if heads and 2400 tokens if tails\"),\r\n (\"Coin 3\", \"Coin 3: 480 tokens if heads and 1920 tokens if tails\"),\r\n (\"Coin 4\", \"Coin 4: 720 tokens if heads and 1440 tokens if tails\"),\r\n (\"Coin 5\", \"Coin 5: 840 tokens if heads and 1200 tokens if tails\"),\r\n (\"Coin 6\", \"Coin 6: 960 tokens if heads and 960 tokens if tails\"),\r\n (\"Coin 7\", \"Coin 7: 1080 tokens if heads and 720 tokens if tails\"),\r\n )\r\n\r\n CHOICE_TWO = (\r\n (\"Coin 1\", \"Coin 1: 0 tokens if heads and 2160 tokens if tails\"),\r\n (\"Coin 2\", \"Coin 2: 240 tokens if heads and 1920 tokens if tails\"),\r\n (\"Coin 3\", \"Coin 3: 480 tokens if heads and 1680 tokens if tails\"),\r\n (\"Coin 4\", \"Coin 4: 720 tokens if heads and 1440 tokens if tails\"),\r\n (\"Coin 5\", \"Coin 5: 960 tokens if heads and 1200 tokens if tails\"),\r\n (\"Coin 6\", \"Coin 6: 1080 tokens if heads and 1080 tokens if tails\"),\r\n (\"Coin 7\", \"Coin 7: 1200 tokens if heads and 960 tokens if tails\"),\r\n )\r\n\r\n decision_1 = models.CharField(choices=CHOICE_ONE, widget=widgets.RadioSelect())\r\n rand_toss_1 = models.CharField()\r\n coin_1 = models.IntegerField(default=0)\r\n coin_2 = models.IntegerField(default=0)\r\n decision_2 = models.CharField(choices=CHOICE_TWO, widget=widgets.RadioSelect())\r\n rand_toss_2 = models.CharField()\r\n risk_points = models.IntegerField()\r\n","repo_name":"busara-devs/almas_otree_v2","sub_path":"risk_game/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"25279772261","text":"class Solution:\n def bubble_sort(self, q):\n n = len(q)\n for i in range(n):\n for j in range(i, n-1):\n if q[j] > q[j + 1]:\n q[j], q[j + 1] = q[j + 1], q[j]\n return q\n\ntest = Solution()\nans = test.bubble_sort([2, 1, 5, 3, 4])\nprint(ans)\n","repo_name":"shreyashg027/Leetcode-Problem","sub_path":"Algorithms/BubbleSort.py","file_name":"BubbleSort.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"38360263036","text":"from selenium.webdriver import Chrome\nfrom selenium.webdriver.common.by import By\nfrom time import sleep\n\ndriver=Chrome()\ndriver.get(\"https://demowebshop.tricentis.com/\")\ndriver.maximize_window()\nsleep(2)\n\n# scrolly the page by pixcels\ndriver.execute_script(\"window.scrollBy(0,100)\",\"\")\nsleep(2)\n\n# to reach the down of page\ndriver.execute_script(\"window.scrollBy(0,document.body.scrollHeight)\")\nsleep(5)\n\n# to reach the top of page\ndriver.execute_script(\"window.scrollBy(0,-document.body.scrollHeight)\")\nsleep(5)\n\n# Scroll down to element is visible\nflag=driver.find_element(By.XPATH,\"/html/body/div[4]/div[1]/div[4]/div[3]/div/div/div[3]/div[2]/div/div[2]/h2/a\")\ndriver.execute_script(\"arguments[0].scrollIntoView();\",flag)\nsleep(5)","repo_name":"Amitroshan1/selenium","sub_path":"Scroll.py","file_name":"Scroll.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"131849963","text":"import sys\nsys.setrecursionlimit(500005)\n#sys.setrecursionlimit(10**9)\n#import pypyjit # this is for solving slow issue for pypy when using recursion but python will not need this (test will fail but submit works)\n#pypyjit.set_param('max_unroll_recursion=-1')\n\nfrom collections import Counter\n#mylist = [\"apple\",\"banana\",\"apple\",\"apple\",\"orange\"]\n#mycounter = Counter(mylist)\nfrom collections import defaultdict\n#d = defaultdict(int)\n\n\nN, D = list(map(int, input().split()))\n\nall = []\n\nfor i in range(N):\n x, y = list(map(int, input().split()))\n all.append((x, y))\n\nimport math\n\nmp = [[] for _ in range(N)]\n\nfor i in range(N):\n for j in range(i,N):\n a1 = all[i][0]\n a2 = all[i][1]\n b1 = all[j][0]\n b2 = all[j][1]\n #d = abs(a1-b1)**2 + abs(a2-b2)**2\n d = math.sqrt(abs(a1-b1)**2 + abs(a2-b2)**2)\n\n if d <= D:\n mp[i].append(j)\n mp[j].append(i)\n\n\n#print(mp)\nans = [False for f in range(N)]\n\nq = [0]\n\nwhile q:\n v = q.pop()\n for g in range(len(mp[v])):\n\n nx = mp[v][g]\n if ans[nx] == True:\n continue\n else:\n #ans[nx] = True # this is AC\n q.append(nx)\n ans[v] = True # this was the cause of TLE -> same value can be in the que so many times (maximum O(N^3)?) => see below\n\n\n#print(ans)\n\nfor a in ans:\n if a == True:\n print('Yes')\n else:\n print('No')\n\n'''\nLINE openchat\nお忙しい中申し訳ありません、abc304Cに関連してもう一つ質問させて下さい。計算量の見積もり方についてです。\n下記のソースコードは、while文の中のans[v]=Trueの位置によってTLEするコードです。このTrue処理をwhile文の中にあるfor文の中に入れるとACします。(これは同じ頂点をキューに入れにくくなるからです。)\n\n\n\nそこで、このTLEになるソースコードの計算量を見積もりたいと思って考えているのですが、O (N^3)なのかもっと大きいのかよく分かりません。\n最悪ケースを考えようとしてもうまく見積もることができません。見積もり方も含めて教えて頂けないでしょうか。\n\nこの実装は、連結した頂点(感染する人)をキューに全て入れた後のみ、元の頂点をTrueにする(非効率な)形になっています。\nまた、キューから取り出した際に、すでにTrueになっているかの判定をしないため、同じ頂点がキューにある場合は、連結した頂点がその分何回もキューに挿入されることになります。\n唯一キューに挿入される頂点が減っていくケースは、連結した頂点を挿入していく段階で、Trueになっている頂点に遭遇する場合です。(実際に処理が遅くなるのは、結構稀なケースにはなると思います。実際に提出してTLEしたテストケースは2個のみ(残りはAC)でした。)\n非常に効率の悪い実装なのですが、計算量の見積もりの仕方を知りたく思っています。\n\n宜しくお願いします。\n\n\n\n\nreply\nwhile ループが何回繰り返されるか(=キューに頂点が追加される回数が何回か)を考えてみる。すべての頂点間に辺がある場合どうか。\n\n\n\n\nありがとうございます。以下が自分なりに考えた考察になります。\n\n(1) 全ての頂点が連結する場合は、最初の頂点が処理された時点で、全ての頂点(N)がキューに入り、順に取り出され実行されます。そして、その一つ一つも、連結する全頂点(N)をキューに挿入することになるので、O(N^2)くらいかと思います。\n\n(2) ただ最悪ケースは、最初の頂点から連結している(辺が存在している)のが、N/2個あり、それらが残りのN/2個に連結している(辺が存在している)場合だと思いました。\nこの場合、最初の頂点の処理で、N/2個キューに挿入され、それらそれぞれが残りのN/2個を挿入するのでO(N^2)くらいになるかと思います。\n\n(3) ただ、上記と同じような要領で、最初にN/3個挿入され、その後に(2)の処理がされると、O(N^3)になるのかと考えました。\n\nこのように(結構雑に)考えると、オーダーがどんどん増えていってしまいます。(分母も増えていくので、いずれオーダーは減っていくと思いますが)\nただ普通に考えると、処理が進むに連れて、すでに処理済みの頂点が増え、挿入される頂点は少しづづ減っていくと思うので、計算量をうまく見積れません。\nそもそも上記が最悪ケースなのかも正直不安です、、ご意見いただけますでしょうか。\n\n\n\n\n\n\n-> No specific Reply\n\n'''\n\n'''\nIn the case of (3) above, the time complexity would be O(N^3/3^3). Since N = 2000, it would be 8*10**9/27 which is a bit too much already. I hope above idea is mostly correct and some of the cases has quite big time complexity. Therefore, this approach makes some test cases TLE.\n\n'''\n\n","repo_name":"tinaba96/coding","sub_path":"acode/abc304/c/try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"72473534684","text":"\"\"\"\nDeep Learning on Graphs - ALTEGRAD - Nov 2022\n\"\"\"\n\nimport numpy as np\nimport networkx as nx\nfrom random import randint\nimport random\nfrom gensim.models import Word2Vec\nfrom tqdm import tqdm\n\n\n\n############## Task 1\n# Simulates a random walk of length \"walk_length\" starting from node \"node\"\ndef random_walk(G, node, walk_length):\n\n ##################\n walk=[node]\n for _ in range(walk_length-1):\n neighbors= list(G.neighbors(walk[-1]))\n walk.append(neighbors[randint(0,len(neighbors)-1)])\n \n ##################\n #walk = [str(node) for node in walk]\n return list(map(str, walk))\n\n############## Task 2\n# Runs \"num_walks\" random walks from each node\ndef generate_walks(G, num_walks, walk_length):\n walks = []\n # permuted_walks=np.array([])\n ##################\n for node in tqdm(np.random.permutation(G.nodes())):\n # walks=[]\n for _ in range(num_walks):\n walks.append(random_walk(G, node, walk_length))\n permuted_walks=walks\n # permuted_walks=np.concatenate((permuted_walks,walks ))\n\n ##################\n\n return permuted_walks\n\n# Simulates walks and uses the Skipgram model to learn node representations\ndef deepwalk(G, num_walks, walk_length, n_dim):\n print(\"Generating walks\")\n walks = generate_walks(G, num_walks, walk_length)\n print(\"Training word2vec\")\n model = Word2Vec(vector_size=n_dim, window=8, min_count=0, sg=1, workers=8, hs=1)\n model.build_vocab(walks)\n model.train(walks, total_examples=model.corpus_count, epochs=5)\n\n return model\n\n\n\n\n\n\n","repo_name":"zechchair/M2-data-science","sub_path":"advanced text and graph learning/deep learning for graphs/lab_5_echchair_zakaria/code/part1/deepwalk.py","file_name":"deepwalk.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"29578779675","text":"#This is the code to calculate the scalar value of the surface confined to E mechanism then print it\n# The value is caluclated from the ratio of the max first harmonic\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport DNNtimecluster_mod as TC_mod\nimport NN_train_mod\nimport TrainDNNtimeclass_mod as TDNN\nimport DC_neuralnetwork_mod as NNt\nimport ImagebasedDNN as IB_NNt\nimport time\nimport sys\nimport numpy as np\nimport shutil\nimport DNN_run_mods as runner_mod\nfrom joblib import dump\nimport tensorflow as tf\n\ngpus = tf.config.experimental.list_physical_devices(device_type='GPU')\ntf.config.experimental.set_memory_growth(gpus[0], True)\ntf.config.set_logical_device_configuration(gpus[0], [tf.config.LogicalDeviceConfiguration(memory_limit=1424)])\n\n\n\"\"\"iMPORT DNN settings\"\"\" #Need to get which experimental parameters where loading\ntime1 = time.time()\n\n#This is the output of input loader but is constant in this calculation\nserverdata = [\"postgres\", \"password\", \"ipadress\", \"post\", \"table\"]\nreactionmech = [\"E\",\"ESurf\"]\nmodelparamterfile = \"testmodelfile.txt\" # don't know what this does but leave it in and hope it works\ndeci = 2**8\nDNNmodel = \"inceptiontime\"\n\n# Something to classify the DNN label and data to a specific model\nprint(serverdata)\n# THIS GETS THE LABELS\n\n\n# \"\"\"GET EXPsetrow THAT RELATE TO THE ABOVE AC SINE PROPERTIES AND MODEL\"\"\" #exp = [[expsetrow,reactionmech],...]\n\nexp_class = TDNN.EXPsetrow_collector(serverdata, reactionmech)\nexp_data = TDNN.EXPsetrow_dcdata(serverdata, exp_class)\n\n# create list of form react_class = [[[ReactionID,Reactionmech]]] for each reac mech\nreact_class = TDNN.ReactID_collector(serverdata, exp_class)\n#print(react_class)\n\n# count number for each reaction model and check if same\nNarray = TDNN.Narray_count(react_class)\n\n\nmodeldic = TDNN.modeldataloader(modelparamterfile)\n\"\"\"May need something here to import model specific parpameters\"\"\"\n\"\"\"Load the reactmech or classes from the sql libary where exp setting is X\"\"\"\n\n# sets up model numbers\nmodel_numcode = {}\nfor i in range(len(reactionmech)):\n model_numcode.update({reactionmech[i]:i})\nNmodels = len(reactionmech)\n\n# tell everything just to get the fundimental harmonic\nharmdata = [1]\n#print(\"Train data\")\n#print(traindata)\n\n# extract the E stuff\nreact_classE = [react_class[0]]\ntestdataE1, traindata, Ntest, Ntrain = TC_mod.suffle_splittrainratio(0.1, react_classE)\nharmE1, harmE1_mech, harmE1_ID = NNt.DC_NN_setter(traindata, harmdata) # training data\ncurrentdataE1, mechacceptE1 = TDNN.ACsqlcurrentcollector(serverdata, harmdata, harmE1, deci,\"blah\")\nNE1 = len(currentdataE1)\n\n\n# extract the Esurf stuff\nreact_classEsurf = [react_class[1]]\ntestdataEsurf , traindataEs, Ntest, Ntrain = TC_mod.suffle_splittrainratio(0.99, react_classEsurf)\nharmEsurf, harmEsurf_mech, harmEsurf_ID = NNt.DC_NN_setter(traindataEs, harmdata) # training data\ncurrentdataEsurf, mechacceptEsurf = TDNN.ACsqlcurrentcollector(serverdata, harmdata, harmEsurf, deci,\"blah\")\nNEsurf = len(currentdataEsurf)\n\nimport matplotlib.pyplot as plt\nplt.figure()\nplt.plot(currentdataEsurf[0]*7638918.383354617)\nplt.plot(currentdataE1[0])\nplt.savefig(\"savefig.png\")\n\n# Something to randomly compare the\nxdiff = []\nt2 = time.time()\nNr = 300000\nfor i in range(Nr):\n x = np.random.randint(0,NEsurf-1,2)\n xdiff.append(max(currentdataE1[x[0]])/max(currentdataEsurf[x[1]]))\n\n#print(xdiff)\nprint(\"finished\")\nprint((time.time()-t2))\nprint(len(xdiff))\nprint(np.average(xdiff))\n","repo_name":"lukegun/MonashEChem_AIPlatform","sub_path":"machine_learning_algorithms/SurfaceE_scalercal.py","file_name":"SurfaceE_scalercal.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"16864900864","text":"from django.shortcuts import render\nfrom django.core.mail import EmailMessage , EmailMultiAlternatives\nfrom dj_mail import settings\n\ndef Sending_Mail(request):\n if request.method == 'POST':\n data = request.POST\n \n toEmail = data.get('toEmail')\n subject = data.get('subject')\n message = data.get('message')\n ccEmail = data.get('ccEmail')\n cc_list = [email.strip() for email in ccEmail.split(',')] # converting string to list\n \n attachment = request.FILES.get('attachment')\n \n fromEmail = settings.EMAIL_HOST_USER\n to = [toEmail] \n \n if cc_list:\n to += cc_list \n \n email = EmailMultiAlternatives(\n from_email=fromEmail,\n to=to,\n subject=subject,\n body=message\n )\n \n if attachment:\n email.attach(attachment.name, attachment.read(), attachment.content_type)\n \n email.content_subtype = 'html'\n email.send()\n \n return render(request, 'index.html')\n","repo_name":"romanhumagain/django-email-sending","sub_path":"dj_mail/MyApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"22432374923","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras.models import load_model\n#from tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nmenu = ['Home','Upload Your Photo','Capture From Webcam','Ask Me ~~~']\n\nchoice = st.sidebar.selectbox('Check your money with options below:', menu)\n\n\n#Load your model and check create the class_names list\nModel_Path = 'my_model_checkpoint_DenseNet_Nov23.h5'\n\nclass_names = ['1,000', '10,000', '100,000', '2,000', '20,000', '200,000', '5,000', '50,000', '500,000']\nmodel = tf.keras.models.load_model(Model_Path)\nmodel.compile(optimizer=tf.keras.optimizers.Adam(),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nst.header(\"Let My First Web App Help You!\")\nif choice=='Home':\n st.title(\"Wana scan your money??? \")\n\n st.write(\"Enjoy some music first\")\n st.write(\"\")\n st.video('https://www.youtube.com/watch?v=ETxmCCsMoD0')\n\n st.write(\"Money, money, money\")\n st.write(\"Must be funny!!!\")\n st.balloons()\n \n\nif choice == 'Upload Your Photo':\n st.title('Upload Your Photo')\n photo_uploaded = st.file_uploader('Please take a look at the requirements of the file', ['png', 'jpeg', 'jpg'])\n if photo_uploaded!=None:\n image_np = np.asarray(bytearray(photo_uploaded.read()), dtype=np.uint8)\n img = cv2.imdecode(image_np, 1)\n st.image(img, channels='BGR')\n\n #st.write(photo_uploaded.size)\n #st.write(photo_uploaded.type)\n\n #Resize the Image according with your model\n img = cv2.resize(img,(224,224),interpolation = cv2.INTER_AREA)\n #Expand dim to make sure your img_array is (1, Height, Width , Channel ) before plugging into the model\n img_array = np.expand_dims(img, axis=0)\n \n #JENNY code\n prediction = model.predict(img_array)\n a = np.argmax(prediction,axis=1)\n #st.write(a[0])\n result = class_names[int(a)]\n st.write('The denomination is:',result)\n \n\nelif choice == 'Capture From Webcam':\n st.title('Capture From Webcam')\n cap = cv2.VideoCapture(0) # device 0\n run = st.checkbox('Show Your Webcam')\n capture_button = st.checkbox('Capture now!')\n\n captured_image = np.array(None)\n\n\n # Check if the webcam is opened correctly\n if not cap.isOpened():\n raise IOError(\"Cannot open webcam\")\n\n FRAME_WINDOW = st.image([])\n while run:\n ret, frame = cap.read() \n # Display Webcam\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB ) #Convert color\n FRAME_WINDOW.image(frame)\n\n if capture_button: \n captured_image = frame\n break\n\n cap.release()\n\n if captured_image.all() != None:\n st.write('Image is captured:')\n st.image(captured_image)\n\n #Resize the Image according with your model\n captured_image = cv2.resize(captured_image,(224,224),interpolation = cv2.INTER_AREA)\n #Expand dim to make sure your img_array is (1, Height, Width , Channel ) before plugging into the model\n img_array = np.expand_dims(captured_image, axis=0)\n \n #JENNY code\n prediction = model.predict(img_array)\n a = np.argmax(prediction,axis=1)\n #st.write(a[0])\n result = class_names[int(a)]\n st.write('The denomination is:',result)\n \n\nelif choice=='Ask Me ~~~':\n st.title('Ask me for more help ~~!')\n st.success('Contact me via Discord Jenny Peace#2703')\n \n # st.image('https://c.tenor.com/LwS8qVjM3rQAAAAC/dino-wonder.gif',\n # caption=\"I am a wonder girl who wonder about EVERYTHING\",\n # use_column_width='auto')\n \n st.balloons()\n\n","repo_name":"Jenny-Peace/Banknote_recognition","sub_path":"CamPred.py","file_name":"CamPred.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"19018323761","text":"\"\"\"ApiRunner URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path,include,re_path\r\nfrom ApiManager import views as ApiManager_views\r\nfrom django.conf import settings\r\nfrom ApiManager import TaskEngine\r\nfrom ApiManager.TaskEngine import RunProjectTask\r\nimport sqlite3\r\nfrom apscheduler.schedulers.background import BackgroundScheduler\r\nfrom django_apscheduler.jobstores import DjangoJobStore, register_events, register_job\r\nimport time\r\nimport datetime\r\n\r\n\r\nurlpatterns = [\r\n re_path(r'^$',ApiManager_views.login),\r\n re_path(r'^accounts/login/$',ApiManager_views.login),\r\n path('register_action/',ApiManager_views.register_action),\r\n path('login/',ApiManager_views.login),\r\n path('login_action/',ApiManager_views.login_action),\r\n path('admin/', admin.site.urls),\r\n path('index/',ApiManager_views.index),\r\n path('api_get/',ApiManager_views.api_get),\r\n path(r'api/',include('ApiManager.urls')),\r\n# 项目管理路由\r\n path('project_list/',ApiManager_views.project_list),\r\n path('add_project/',ApiManager_views.add_project_page),\r\n path('add_project_action/',ApiManager_views.add_project),\r\n path('del_project/',ApiManager_views.del_project),\r\n path('edit_project//',ApiManager_views.add_project_page),\r\n path('edit_project/',ApiManager_views.edit_project),\r\n# 模块管理路由\r\n path('module_list/',ApiManager_views.module_list),\r\n path('add_module/',ApiManager_views.add_module_page),\r\n path('add_module_action/',ApiManager_views.add_module),\r\n path('del_module/',ApiManager_views.del_module),\r\n path('edit_module//',ApiManager_views.add_module_page),\r\n path('edit_module/',ApiManager_views.edit_module),\r\n# 用例管理路由\r\n path('add_testcase/',ApiManager_views.add_testcase_page),\r\n path('testcase_list/',ApiManager_views.testcase_list),\r\n path('del_testcase/',ApiManager_views.del_testcase),\r\n path('edit_testcase//',ApiManager_views.add_testcase_page),\r\n# 报告管理路由\r\n path('report_list/',ApiManager_views.report_list),\r\n path('del_report/',ApiManager_views.del_report),\r\n# 任务管理路由\r\n path('add_task/',ApiManager_views.add_task_page),\r\n path('add_task_action/',ApiManager_views.add_task),\r\n path('task_list/',ApiManager_views.task_list),\r\n path('edit_task//',ApiManager_views.add_task_page),\r\n path('edit_task/',ApiManager_views.edit_task),\r\n path('del_task/',ApiManager_views.del_task)\r\n]\r\n\r\ndbpath=settings.DB_DIRS+['db.sqlite3']\r\nconn=sqlite3.connect(''.join(dbpath))\r\nc=conn.cursor()\r\nc.execute(\"DELETE from django_apscheduler_djangojob;\")\r\nconn.commit()\r\nconn.close()\r\n \r\nscheduler = BackgroundScheduler()\r\nscheduler.add_jobstore(DjangoJobStore(), \"default\")\r\n \r\nresult=TaskEngine.getTaskInfo() \r\nprint(result)\r\nfor k,v in result.items():\r\n if(v[0]=='once'):\r\n scheduler.add_job(RunProjectTask,'cron',year=v[2]['year'],month = v[2]['month'],day = v[2]['day'],hour = v[2]['hour'],minute = v[2]['minute'],second = v[2]['second'],args=[v[1]])\r\n if(v[0]=='everyday'):\r\n scheduler.add_job(RunProjectTask,'cron',hour=v[2]['hour'],minute=v[2]['minute'],second=v[2]['second'],args=[v[1]])\r\n if(v[0]=='Mon-fri'):\r\n scheduler.add_job(RunProjectTask,'cron',day_of_week='mon-fri',hour=v[2]['hour'],minute=v[2]['minute'],second=v[2]['second'],args=[v[1]])\r\nregister_events(scheduler)\r\nscheduler.start()\r\n\r\n\r\n","repo_name":"tonydian/ApiRunner","sub_path":"ApiRunner/ApiRunner/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"70161855644","text":"# https://leetcode.com/problems/course-schedule-ii/\n# 210. Course Schedule II\n# There are a total of n courses you have to take, labeled from 0 to n-1.\n# Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]\n# Given the total number of courses and a list of prerequisite pairs, return the ordering of courses you should take to finish all courses.\n# There may be multiple correct orders, you just need to return one of them. If it is impossible to finish all courses, return an empty array.\n# Example 1:\n# Input: 2, [[1,0]]\n# Output: [0,1]\n# Explanation: There are a total of 2 courses to take. To take course 1 you should have finished\n# course 0. So the correct course order is [0,1] .\n# Example 2:\n# Input: 4, [[1,0],[2,0],[3,1],[3,2]]\n# Output: [0,1,2,3] or [0,2,1,3]\n# Explanation: There are a total of 4 courses to take. To take course 3 you should have finished both\n# courses 1 and 2. Both courses 1 and 2 should be taken after you finished course 0.\n# So one correct course order is [0,1,2,3]. Another correct ordering is [0,2,1,3] .\n# Note:\n# The input prerequisites is a graph represented by a list of edges, not adjacency matrices. Read more about how a graph is represented.\n# You may assume that there are no duplicate edges in the input prerequisites.\n\ndef findOrder(numCourses, prerequisites):\n graph = [[] for i in range(numCourses)]\n indegree = [0 for i in range(numCourses)]\n for pre in prerequisites:\n indegree[pre[0]] += 1\n graph[pre[1]].append(pre[0])\n queue = []\n for i in range(len(indegree)):\n if not indegree[i]:\n queue.append(i)\n count = 0\n result = []\n while queue:\n curr = queue.pop(0)\n result.append(curr)\n count += 1\n for i in range(len(graph[curr])):\n indegree[graph[curr][i]] -= 1\n if indegree[graph[curr][i]] == 0:\n queue.append(graph[curr][i])\n if len(result) != numCourses:\n return []\n return result\n","repo_name":"yang-official/LC","sub_path":"Python/4_Trees_and_Graphs/2_Graph_Traversal/210_course_schedule_ii.py","file_name":"210_course_schedule_ii.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"27111269522","text":"from flask import Flask, request, redirect\n\nfrom Util.subpages import ConfigManager, ErrManager, ManPage, TimeOutManager, to_err\n\nfrom functools import wraps\n\nfrom datetime import datetime\n\napp = Flask(__name__)\n\n\nform: dict\nconn_alive: dict\n\n\ndef if_filled_form(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n global form\n try:\n a = form[\"id\"]\n except Exception:\n return redirect('/')\n return f(*args, **kwargs)\n return decorated\n\n\n@app.route('/', methods=[\"POST\", \"GET\"])\ndef board():\n\n global form\n global conn_alive\n manager = ConfigManager()\n\n if request.method == \"GET\":\n ports = manager.get_all_ports()\n return manager.show_page(ports)\n\n form = manager.get_form()\n manager.connect(form['port'])\n while not manager.check_conn_alive():\n manager.connect(form['port'])\n tic = datetime.now()\n lwr_bounds = request.form.get('scan-lwr-bounds')\n upr_bounds = request.form.get('scan-upr-bounds')\n conn_alive = manager.test_alive(lwr_bounds, upr_bounds)\n toc = datetime.now()\n print(\"扫描{}个ID共花费了{}秒。\".format(int(upr_bounds)-int(lwr_bounds)+1, (toc-tic).total_seconds()))\n try:\n try:\n form['id'] = conn_alive['sgn'][0]['id']\n form['mode'] = conn_alive['sgn'][0]['devGM']\n form['version'] = conn_alive['sgn'][0]['ver']\n form['type'] = conn_alive['sgn'][0]['cate']\n except IndexError:\n try:\n form['id'] = conn_alive['pwr'][0]['id']\n form['mode'] = conn_alive['pwr'][0]['devGM']\n form['version'] = conn_alive['pwr'][0]['ver']\n form['type'] = conn_alive['pwr'][0]['cate']\n except IndexError:\n return to_err()\n except Exception as e:\n print(e)\n return to_err()\n try:\n return manager.redirect()\n except Exception as e:\n print(e)\n return to_err()\n except ValueError:\n return to_err()\n except Exception as e:\n print(e)\n return to_err()\n\n\n@app.route('/man/', methods=[\"POST\", \"GET\"])\n@if_filled_form\ndef man():\n global form\n global conn_alive\n lcl_form = form\n try:\n manager = ManPage(lcl_form[\"mode\"], lcl_form[\"id\"], lcl_form[\"type\"], lcl_form[\"version\"])\n except NameError:\n return to_err()\n except Exception as e:\n print(e)\n return to_err()\n\n manager.connect(lcl_form[\"port\"])\n\n manager.conn_alive = conn_alive\n\n if request.method == \"GET\":\n try:\n return manager.show_page()\n except ValueError:\n return to_err()\n except Exception as e:\n print(e)\n return to_err()\n\n manager.alter_tunnl_status()\n\n if manager.alter_group_mode():\n lcl_form['mode'] = manager.mode\n\n if manager.alter_board_id():\n lcl_form['id'] = manager.id\n\n if not manager.change_board_focus():\n if manager.change_board_focus():\n form = {'mode': manager.mode,\n 'id': manager.id,\n 'type': manager.cate,\n 'version': manager.version,\n 'port': lcl_form['port']}\n else:\n form = {'mode': manager.mode,\n 'id': manager.id,\n 'type': manager.cate,\n 'version': manager.version,\n 'port': lcl_form['port']}\n\n manager.port_man.close(manager.port_man.ser)\n return manager.redirect()\n\n\n@app.route('/conn_err/', methods=[\"GET\"])\ndef conn_err():\n manager = ErrManager()\n return manager.show_page()\n\n\n@app.route('/time_out/', methods=[\"GET\"])\ndef time_out():\n manager = TimeOutManager()\n return manager.show_page()\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"wangltsss/aVisualizedRS485Console","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"4647816635","text":"import argparse\nimport random\n\nimport torch\nfrom torch import optim\n\nfrom bioner.model.annotator import Annotator, TrainingParameters\nfrom bioner.model.bioner_model import BioNER\nfrom bioner.model.encoder.fasttext_encoder import FasttextEncoder, FastTextEmbedding\n\nif __name__ == '__main__':\n torch.multiprocessing.set_start_method('spawn')\n parser = argparse.ArgumentParser(description='Train Annotator')\n required_named = parser.add_argument_group('required named arguments')\n required_named.add_argument('--embeddings',\n type=str,\n help='Path to the embeddings file',\n required=False)\n required_named.add_argument('--embeddingsRoot',\n type=str,\n help='Path where the embeddings can be downloaded to',\n required=False)\n required_named.add_argument('--training',\n type=str,\n help='Path to the training dataset file',\n required=True)\n required_named.add_argument('--validation',\n type=str,\n help='Path to the validation dataset file',\n required=True)\n required_named.add_argument('--test',\n type=str,\n help='Path to the test dataset file',\n required=False)\n required_named.add_argument('--batchSize',\n type=int,\n help='Batch size',\n required=True)\n required_named.add_argument('--learningRate',\n type=float,\n help='Learning rate',\n required=True)\n required_named.add_argument('--modelOutputFolder',\n type=str,\n help='The folder where the best model should be saved',\n required=True)\n required_named.add_argument('--maxEpochs',\n type=int,\n help='Maximum training epochs',\n required=True)\n required_named.add_argument('--numWorkers',\n type=int,\n default=0,\n help='Number of workers (defaults to 0)')\n required_named.add_argument('--tensorboardLogDirectory',\n type=str,\n help='The directory where to log the tensorboard data',\n required=False)\n required_named.add_argument('--trainingsLogFile',\n type=str,\n help='The file path where to log the PyTorch Ignite training and validation',\n required=False)\n required_named.add_argument('--enableFasterTraining',\n action='store_true',\n help='Enable faster training by compute metrics only every 10th epoch')\n\n args = parser.parse_args()\n\n # Reproducibility\n torch.use_deterministic_algorithms(True)\n torch.manual_seed(1632737901)\n random.seed(1632737901)\n\n if args.embeddings is None and args.embeddingsRoot is None:\n parser.error(\"You need to set either --embeddings or --embeddingsRoot\")\n\n embeddings_file_path = None\n if args.embeddings is None:\n fasttext_embedding = FastTextEmbedding(embeddings_root=args.embeddingsRoot, ngram_range=\"3-4\")\n embeddings_file_path = fasttext_embedding.filepath\n else:\n embeddings_file_path = args.embeddings\n\n encoder = FasttextEncoder(embeddings_file_path=embeddings_file_path)\n model = BioNER(input_vector_size=encoder.get_embeddings_vector_size())\n parameters = TrainingParameters(encoder=encoder,\n batch_size=args.batchSize,\n training_dataset_path=args.training,\n validation_dataset_path=args.validation,\n model_save_path=args.modelOutputFolder,\n max_epochs=args.maxEpochs,\n num_workers=args.numWorkers,\n tensorboard_log_directory_path=args.tensorboardLogDirectory,\n training_log_file_path=args.trainingsLogFile,\n optimizer=optim.Adam(model.parameters(), lr=args.learningRate),\n model=model,\n faster_training_evaluation=args.enableFasterTraining)\n Annotator.train(parameters)\n","repo_name":"phil1995/BioNER","sub_path":"train_bioner.py","file_name":"train_bioner.py","file_ext":"py","file_size_in_byte":4935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"70763743324","text":"import pandas\nimport glob\n\nMEASUREMENT_DATA_FILES = glob.glob(\"resources/measurement_data/*.xlsx\")\nLOCATION_DATA_FILES = glob.glob(\"resources/location_data/*.xlsx\")\nUSE_COLS = [4, 5, 6, 7]\n\n\nclass Reader:\n\n def load_learning_data_series(self, normalize=True):\n # read\n series = [pandas.read_excel(data_file, usecols=USE_COLS)\n for data_file in MEASUREMENT_DATA_FILES]\n # normalize and save to normal arrays\n data = []\n for df in series:\n if normalize:\n min_x = min([min(x['measurement x'] for i, x in df.iterrows()),\n min(x['reference x'] for i, x in df.iterrows())])\n max_x = max([max(x['measurement x'] for i, x in df.iterrows()),\n max(x['reference x'] for i, x in df.iterrows())])\n min_y = min([min(x['measurement y'] for i, x in df.iterrows()),\n min(x['reference y'] for i, x in df.iterrows())])\n max_y = max([max(x['measurement y'] for i, x in df.iterrows()),\n max(x['reference y'] for i, x in df.iterrows())])\n tmp = []\n for i, x in df.iterrows():\n if normalize:\n tmp.append([(x['measurement x'] - min_x) / (max_x - min_x),\n (x['measurement y'] - min_y) / (max_y - min_y),\n (x['reference x'] - min_x) / (max_x - min_x),\n (x['reference y'] - min_y) / (max_y - min_y)])\n else:\n tmp.append([x['measurement x'], x['measurement y'], x['reference x'], x['reference y']])\n data.append(tmp)\n return data\n\n def load_testing_data(self, normalize=True):\n # it remembers maxs/mins from last normalization to 'denormalize' data later\n df = pandas.read_excel(LOCATION_DATA_FILES[0], usecols=USE_COLS)\n if normalize:\n self.min_x = min([min(x['measurement x'] for i, x in df.iterrows()),\n min(x['reference x'] for i, x in df.iterrows())])\n self.max_x = max([max(x['measurement x'] for i, x in df.iterrows()),\n max(x['reference x'] for i, x in df.iterrows())])\n self.min_y = min([min(x['measurement y'] for i, x in df.iterrows()),\n min(x['reference y'] for i, x in df.iterrows())])\n self.max_y = max([max(x['measurement y'] for i, x in df.iterrows()),\n max(x['reference y'] for i, x in df.iterrows())])\n tmp = []\n for i, x in df.iterrows():\n if i <= 1539:\n if normalize:\n tmp.append([(x['measurement x'] - self.min_x) / (self.max_x - self.min_x),\n (x['measurement y'] - self.min_y) / (self.max_y - self.min_y),\n (x['reference x'] - self.min_x) / (self.max_x - self.min_x),\n (x['reference y'] - self.min_y) / (self.max_y - self.min_y)])\n else:\n tmp.append([x['measurement x'], x['measurement y'], x['reference x'], x['reference y']])\n return tmp\n\n def denormalize_testing_data(self, data):\n tmp = []\n for x in data:\n tmp.append([x[0] * (self.max_x - self.min_x) + self.min_x,\n x[1] * (self.max_y - self.min_y) + self.min_y,\n x[2] * (self.max_x - self.min_x) + self.min_x,\n x[3] * (self.max_y - self.min_y) + self.min_y])\n return tmp;\n\n def denormalize_mlp_output(self, data):\n tmp = []\n for x in data:\n tmp.append([x[0] * (self.max_x - self.min_x) + self.min_x,\n x[1] * (self.max_y - self.min_y) + self.min_y])\n return tmp;\n","repo_name":"KKowalewski24/Reports","sub_path":"SISE/Task2/source_code/Reader.py","file_name":"Reader.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"75057382363","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\n\r\nN = int(input())\r\nM = int(input())\r\nbroken = list(input().split())\r\ninit_cnt = abs(N - 100)\r\nfor check in range(1000000):\r\n num = list(str(check))\r\n for digit in num:\r\n if digit in broken: \r\n break\r\n else: \r\n init_cnt = min(init_cnt, len(str(check)) + abs(check - N))\r\nprint(init_cnt)","repo_name":"algo-itzy/algo-itzy","sub_path":"BOJ/bruteforcing/01107-리모컨/01107-리모컨-seungjoo.py","file_name":"01107-리모컨-seungjoo.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"86"} +{"seq_id":"23567393524","text":"from dataclasses import dataclass\nfrom typing import Callable, Dict, List, Optional, Union\n\nfrom ray.actor import ActorHandle\nfrom ray.data import Dataset\n\n\n@dataclass\nclass RayDatasetSpec:\n \"\"\"Configuration for Datasets to pass to the training workers.\n\n dataset_or_dict: An optional Dataset or a dictionary of\n datasets to be sharded across all the training workers, which can be accessed\n from the training function via ``ray.train.get_dataset_shard()``. Multiple\n Datasets can be passed in as a dictionary that maps each name key to a\n Dataset value, and each Dataset can be accessed from the training function\n by passing in a `dataset_name` argument to ``ray.train.get_dataset_shard()``.\n dataset_split_fn: An optional callable to specify how the provided ``dataset``\n should be split across the training workers. It is expected to take in two\n arguments. The first one is the ``dataset``, just as is passed in to the\n ``_RayDatasetSpec``. The second argument is a list of the ActorHandles of the\n training workers (to use as locality hints). The Callable is expected to\n return a list of Datasets or a list of dictionaries of Datasets,\n with the length of the list equal to the length of the list of actor handles.\n If None is provided, the provided Dataset(s) will be equally split.\n\n \"\"\"\n\n dataset_or_dict: Optional[Union[Dataset, Dict[str, Dataset]]]\n dataset_split_fn: Optional[\n Callable[\n [Union[Dataset, Dict[str, Dataset]], List[ActorHandle]],\n List[Union[Dataset, Dict[str, Dataset]]],\n ]\n ] = None\n\n def _default_split_fn(\n self, training_worker_handles: List[ActorHandle]\n ) -> List[Optional[Union[Dataset, Dict[str, Dataset]]]]:\n def split_dataset(dataset_or_pipeline):\n return dataset_or_pipeline.split(\n len(training_worker_handles),\n equal=True,\n locality_hints=training_worker_handles,\n )\n\n if isinstance(self.dataset_or_dict, dict):\n # Return a smaller dict for each shard.\n dataset_shards = [{} for _ in range(len(training_worker_handles))]\n for key, dataset in self.dataset_or_dict.items():\n split_datasets = split_dataset(dataset)\n assert len(split_datasets) == len(training_worker_handles)\n for i in range(len(split_datasets)):\n dataset_shards[i][key] = split_datasets[i]\n return dataset_shards\n else:\n # return a smaller Dataset for each shard.\n return split_dataset(self.dataset_or_dict)\n\n def get_dataset_shards(\n self, training_worker_handles: List[ActorHandle]\n ) -> List[Optional[Union[Dataset, Dict[str, Dataset]]]]:\n \"\"\"Returns Dataset splits based off the spec and the given training workers\n\n Args:\n training_worker_handles: A list of the training worker actor handles.\n\n Returns:\n A list of Dataset shards or list of dictionaries of Dataset shards,\n one for each training worker.\n\n \"\"\"\n if not self.dataset_or_dict:\n return [None] * len(training_worker_handles)\n\n if self.dataset_split_fn is None:\n return self._default_split_fn(training_worker_handles)\n else:\n splits = self.dataset_split_fn(\n self.dataset_or_dict, training_worker_handles\n )\n if not len(splits) == len(training_worker_handles):\n raise RuntimeError(\n \"The list of Datasets returned by the \"\n f\"`dataset_split_fn`: {len(splits)} does not match \"\n f\"the number of training workers: {len(training_worker_handles)}\"\n )\n return splits\n","repo_name":"ray-project/ray","sub_path":"python/ray/train/_internal/dataset_spec.py","file_name":"dataset_spec.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","stars":28715,"dataset":"github-code","pt":"86"} +{"seq_id":"36872574742","text":"# create a random number\nimport random\n\ngo_on = \"y\"\nwhile go_on == \"y\":\n\n\n max= int(input(\"give a maximum number: \"))\n number = random.randint(1,max)\n guess = None\n \n cnt_mx = int(max/2)\n for count in range(1,cnt_mx+1):\n if guess != number:\n guess =int(input(\"%s / %s Guess: \" % (count, cnt_mx)))\n if guess == number:\n print (\"Nice Guess\" )\n if guess < number:\n print (\"high\" )\n if guess > number:\n print (\"low\" )\n\n if count == cnt_mx and guess != number:\n go_on = input(\"no more chances \\ another guess? y / n ?\" )\n \n if count == cnt_mx and guess == number:\n go_on =input (\"congradulations! another guess? y / n ? \")\n \n#if guess is less then RN, say ..... \n#if guess is more then RN, say ..... \n#if guess is equel to RN, break\n#add +1 to guess count\n#if guess cont is 5, end","repo_name":"AlptekinAktas/LearningProcess","sub_path":"alptekin_rasgele_sayı.py","file_name":"alptekin_rasgele_sayı.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"31206848258","text":"from csv import reader\nfrom math import sqrt\nimport random\nfrom collections import Counter \nfrom statistics import mean\n\n\"\"\" Toplam değer(x1,x2,x3) 1.5 ten küçükse 0, büyükse 1 değeri çıkar\"\"\"\n\n# CSV dosyasını yükleyip, içerisindeki verileri float tipine dönüştüren fonksiyon\ndef txt_yukle(dosya):\n veri = list()\n with open(dosya, 'r') as dosya:\n csv_okuyucu = reader(dosya)\n for satir in csv_okuyucu:\n if not satir:\n continue\n veri.append(satir)\n for satir in veri:\n for i in range(len(veri[0])):\n satir[i] = float(satir[i])\n return veri\n\n# Her veri noktası için tahmin yapan fonksiyon\ndef tahmin_et(satir, agirliklar):\n aktivasyon = 0\n if(len(satir) == 5):\n for i in range(len(satir)-1):\n aktivasyon += agirliklar[i] * satir[i] # ağırlık * veri satırındaki sütun\n else:\n for i in range(len(satir)):\n aktivasyon += agirliklar[i] * satir[i] # ağırlık * veri satırındaki sütun\n if aktivasyon > 0.0:\n return 1.0\n else:\n return 0\n\n# Ağırlıkları güncellemek için kullanılan fonksiyon\ndef agirliklari_egit(egitim_verisi, ogrenme_hizi, epoch_sayisi, agirliklar):\n epoch = 0\n toplam_hata = float('inf')\n while toplam_hata != 0 and epoch < epoch_sayisi: # toplam hata 0 olana veya maksimum epoch sayısına ulaşana kadar hesapla\n toplam_hata = 0.0\n for satir in egitim_verisi:\n tahmin = tahmin_et(satir, agirliklar)\n hata = satir[-1] - tahmin\n #print(\"satir:\", satir)\n #print(\"satir beklenen:\", satir[-1], \"tahmin:\", tahmin, \"=>> hata =\", hata)\n\n if hata != 0:\n toplam_hata += 0.5 * hata**2\n for i in range(len(satir)-1):\n agirliklar[i] = agirliklar[i] + ogrenme_hizi * hata * satir[i] \n #print(\"Ağırlıklar\", [\"%.4f\" % agirlik for agirlik in agirliklar]) #agirlik guncellemesi sonrası yeni ağırlıkları yazıdr\n\n print('>epoch=%d, ogrenme_hizi=%.3f, toplam_hata=%.3f' % (epoch, ogrenme_hizi, toplam_hata)) #her epoch sonrasi değerleri yazdır\n print(\"Ağırlıklar\", [\"%.4f\" % agirlik for agirlik in agirliklar])\n epoch += 1\n return agirliklar\n\n# Test verisini test edip, tahminleride ekrana yazan fonk.\ndef test_verisini_test_et(test_verisi, agirliklar):\n for satir in test_verisi:\n tahmin = tahmin_et(satir, agirliklar)\n print(satir, \"DEĞER:\", tahmin, \"==> değerler toplamı\", f\"{sum(satir)-1:.2f}\")\n\n# Doğru tahminlerin yüzdesini hesaplayan fonk.\ndef test_dataset(test_verisi, weights):\n dogru_tahmin = 0\n total_tahmin = len(test_verisi)\n for row in test_verisi:\n tahmin = tahmin_et(row, weights)\n if tahmin == row[-1]:\n dogru_tahmin += 1\n ortalama = dogru_tahmin / total_tahmin\n return ortalama\n\n# K-fold cross validation yaparak modelin doğruluğunu değerlendiren fonksiyon\ndef k_fold_cross_validation(dataset, k, ogrenme_hizi, n_epoch,agirliklar):\n random.shuffle(dataset)\n satir_Sayisi = len(dataset) // k\n dogru_orani = []\n for i in range(k):\n test_edilecek_data = dataset[i * satir_Sayisi: (i+1) * satir_Sayisi]\n egitilecek_data = dataset[:i * satir_Sayisi] + dataset[(i+1) * satir_Sayisi:]\n #agirliklar = [random.uniform(-1, 1) for _ in range(len(egitilecek_data[0]))]\n agirliklar = agirliklari_egit(egitilecek_data, ogrenme_hizi, n_epoch, agirliklar)\n accuracy = test_dataset(test_edilecek_data, agirliklar)\n dogru_orani.append(accuracy)\n genelOrtalama = mean(dogru_orani)\n return genelOrtalama\n\n# Kullanıcıdan bias değerini ve başlangıç ağırlıklarını alacak olan fonksiyon\ndef kullanıcıdan_agirlik_al():\n agirliklar_input = input(\"Lütfen virgülle ayrılmış şekilde bias değerini ve başlangıç ağırlıklarını girin (örneğin: 1.0,1.0,1.0,1.0): \")\n agirliklar = [1.0, 1.0, 1.0, 1.0] # Varsayılan ağırlık değerleri\n if agirliklar_input:\n input_degerleri = agirliklar_input.split(\",\")\n if len(input_degerleri) == 4:\n try:\n agirliklar = [float(w) for w in input_degerleri]\n except ValueError:\n pass # Geçersiz sayısal değerler olduğunda varsayılan değerleri kullanmaya devam et\n print(\"Geçersiz değerler girildi. Varsayılan ağırlık değerleri atandı\")\n return agirliklar\n\n# def random_data_olustur(veri_sayisi):\n# data = []\n# for _ in range(veri_sayisi):\n# x1 = random.uniform(0.1, 1.0)\n# x2 = random.uniform(0.1, 1.0)\n# x3 = random.uniform(0.1, 1.0)\n# total = x1 + x2 + x3\n# if total < 1.5:\n# label = 0\n# else:\n# label = 1\n# data.append([1, f\"{x1:.1f}\", f\"{x2:.1f}\", f\"{x3:.1f}\", label])\n# return data\n \n\n# Main fonksiyon, yukarıdaki tüm fonksiyonları kullanarak perceptron modelini eğitir ve test eder\ndef perceptron_veri_dogrulama(ogrenme_hizi, epoch_sayisi, agirliklar):\n\n\n # random_data = random_data_olustur(100)\n # for row in random_data:\n # print(','.join(str(value) for value in row))\n \n egitim_verisi = list()\n egitim_verisi = txt_yukle(\"egitim_verisi.txt\")\n test_verisi = list()\n test_verisi = txt_yukle(\"test_verisi.txt\")\n for j in test_verisi:\n del j[4]\n print(test_verisi)\n agirliklar = agirliklari_egit(egitim_verisi, ogrenme_hizi, epoch_sayisi, agirliklar)\n test_verisini_test_et(test_verisi, agirliklar)\n\nogrenme_hizi = 0.01\nepoch_sayisi = 200\nagirliklar = kullanıcıdan_agirlik_al() # agirliklar[0] bias olarak kullanılır\n\n\n\ndataset = txt_yukle(\"egitim_verisi.txt\")\n# ortalama_dogru_orani = k_fold_cross_validation(dataset, 5, ogrenme_hizi, epoch_sayisi,agirliklar)\n# print(\"Ortalama Doğruluk:\", f\"{ortalama_dogru_orani:.2f}\")\n\nperceptron_veri_dogrulama(ogrenme_hizi, epoch_sayisi, agirliklar)\n\n","repo_name":"kksal55/Classification-with-Three-Input-Perceptron-Algorithm","sub_path":"Perceptron_algorithm.py","file_name":"Perceptron_algorithm.py","file_ext":"py","file_size_in_byte":6038,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"26622461549","text":"import sys\r\nimport os\r\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox, QLabel, QPlainTextEdit, QComboBox, QFileDialog, QTabWidget, QVBoxLayout\r\nfrom PyQt5.QtGui import QIcon, QFont\r\nfrom PyQt5.QtCore import pyqtSlot\r\nfrom MainWindow import *\r\nimport MainWindow\r\nfrom ReportGenerator import Print_document\r\n\r\nclass App(QWidget):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.title = 'VAPT Report Generator'\r\n self.left = 10\r\n self.top = 10\r\n self.width = 1280\r\n self.height = 720\r\n self.__Img = None\r\n self.__doc = None\r\n self.doc = Print_document()\r\n if self.doc.start_doc():\r\n self.doc.reinitialize_doc()\r\n else:\r\n self.doc.start_doc()\r\n self.doc.initialize_doc()\r\n\r\n def TechUI(self, MainWindow):\r\n self.setWindowTitle(self.title)\r\n self.setWindowIcon(QIcon(\"Pristine.png\"))\r\n self.setGeometry(self.left, self.top, self.width, self.height)\r\n\r\n self.font = QFont()\r\n self.font.setFamily('Helvetica')\r\n self.font.setPointSize(16)\r\n\r\n self.Vname = QLabel('Vulnerabilty Name:',self)\r\n self.Vname.move(20, 5)\r\n self.Vname.resize(250,50)\r\n self.Vname.setFont(self.font)\r\n\r\n self.Vnamebox = QLineEdit(self)\r\n self.Vnamebox.move(260, 10)\r\n self.Vnamebox.resize(600,30)\r\n self.Vnamebox.setFont(self.font)\r\n\r\n self.VDesc = QLabel('Vulnerabilty Description:',self)\r\n self.VDesc.move(20, 80)\r\n self.VDesc.resize(250,50)\r\n self.VDesc.setFont(self.font)\r\n\r\n self.VDescbox = QPlainTextEdit(self)\r\n self.VDescbox.move(260, 50)\r\n self.VDescbox.resize(600,100)\r\n\r\n self.Vurl = QLabel('Vulnerable URL:',self)\r\n self.Vurl.move(20, 155)\r\n self.Vurl.resize(250,50)\r\n self.Vurl.setFont(self.font)\r\n\r\n self.Vurlbox = QLineEdit(self)\r\n self.Vurlbox.move(260, 160)\r\n self.Vurlbox.resize(600,30)\r\n self.Vurlbox.setText('')\r\n\r\n self.VPort = QLabel('Vulnerable Port:',self)\r\n self.VPort.move(20, 195)\r\n self.VPort.resize(250,50)\r\n self.VPort.setFont(self.font)\r\n\r\n self.VPortBox = QLineEdit(self)\r\n self.VPortBox.move(260, 200)\r\n self.VPortBox.resize(600,30)\r\n\r\n self.VSeverity = QLabel('Severity:',self)\r\n self.VSeverity.move(20, 245)\r\n self.VSeverity.resize(250,50)\r\n self.VSeverity.setFont(self.font)\r\n\r\n self.VSeveritybox = QComboBox(self)\r\n self.VSeveritybox.addItem('Critical',1)\r\n self.VSeveritybox.addItem('High',2)\r\n self.VSeveritybox.addItem('Medium',3)\r\n self.VSeveritybox.addItem('Low',4)\r\n self.VSeveritybox.addItem('Informational',5)\r\n self.VSeveritybox.move(260, 250)\r\n self.VSeveritybox.resize(600,30)\r\n\r\n self.VImpact = QLabel('Impact:',self)\r\n self.VImpact.move(20, 320)\r\n self.VImpact.resize(250,80)\r\n self.VImpact.setFont(self.font)\r\n\r\n self.VImpactBox = QPlainTextEdit(self)\r\n self.VImpactBox.move(260, 290)\r\n self.VImpactBox.resize(600,130)\r\n\r\n self.VRemediation = QLabel('Remediation:',self)\r\n self.VRemediation.move(20, 465)\r\n self.VRemediation.resize(250,50)\r\n self.VRemediation.setFont(self.font)\r\n\r\n self.VRemediationBox = QPlainTextEdit(self)\r\n self.VRemediationBox.move(260, 440)\r\n self.VRemediationBox.resize(600,100)\r\n\r\n self.SaveButton = QPushButton('Save', self)\r\n self.SaveButton.move(480,580)\r\n self.SaveButton.clicked.connect(self.back)\r\n self.SaveButton.setFont(self.font)\r\n\r\n self.saveReport = QPushButton('Add Vulnerabilty',self)\r\n self.saveReport.move(560,580)\r\n self.saveReport.clicked.connect(self.on_report)\r\n self.saveReport.setFont(self.font)\r\n\r\n self.ImgButton = QPushButton('Browse', self)\r\n self.ImgButton.move(400,580)\r\n self.ImgButton.clicked.connect(self.on_browse)\r\n self.ImgButton.setFont(self.font)\r\n\r\n self.show()\r\n\r\n @pyqtSlot()\r\n def on_report(self):\r\n\r\n vname = self.Vnamebox.text()\r\n self.doc.setVname(vname)\r\n self.Vnamebox.setText(\" \")\r\n\r\n severity = self.VSeveritybox.currentText()\r\n self.doc.setVSeverity(severity)\r\n\r\n VDesc = self.VDescbox.toPlainText()\r\n self.doc.SetVdesc(VDesc)\r\n self.VDescbox.setPlainText(\" \")\r\n\r\n Vurl = self.Vurlbox.text()\r\n self.doc.setVurl(Vurl)\r\n self.Vurlbox.setText(\" \")\r\n\r\n Vport = self.VPortBox.text()\r\n self.doc.setVport(Vport)\r\n self.VPortBox.setText(\" \")\r\n\r\n self.doc.setImg(self.__Img)\r\n\r\n VImpact = self.VImpactBox.toPlainText()\r\n self.doc.setImpact(VImpact)\r\n self.VImpactBox.setPlainText(\" \")\r\n\r\n Vrem = self.VRemediationBox.toPlainText()\r\n self.doc.setVremed(Vrem)\r\n self.VRemediationBox.setPlainText(\" \")\r\n\r\n self.doc.pageBreak()\r\n\r\n @pyqtSlot()\r\n def on_browse(self):\r\n self.__Img = QFileDialog.getOpenFileNames(self,'Open Files','/',(\"Images(*.png)\"))\r\n\r\n @pyqtSlot()\r\n def on_click(self):\r\n self.doc.Savereport()\r\n\r\n @pyqtSlot()\r\n def back(self):\r\n self.doc.Savereport()\r\n self.ui = MainWindow.App1()\r\n self.ui.initUI()\r\n App.hide(self)\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n app = QApplication(sys.argv)\r\n MainWindow1 = QtWidgets.QMainWindow()\r\n ui = App()\r\n ui.setup(MainWindow)\r\n MainWindow1.show()\r\n sys.exit(app.exec_())\r\n","repo_name":"TruBurbank/VAPT-Report-Generator","sub_path":"Tech.py","file_name":"Tech.py","file_ext":"py","file_size_in_byte":5663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"70471883163","text":"# Task 11: Seating System\n\nfrom utilities import read_input\nfrom copy import deepcopy\n\ndef init_grid_to_int(entries):\n \"\"\" Grid from char to int \"\"\"\n mapping = {\".\":0, \"L\":-1, \"#\":1}\n grid = [\n [mapping[c] for c in row]\n for row in entries\n ]\n return grid\n \ndef get_adjacent_pairs(grid, r, c, rng=1):\n \"\"\" Find positions considered adjacent to the current position \"\"\"\n grid_rows = len(grid)\n grid_columns = len(grid[0])\n \n pairs = []\n \n direcs = zip(\n (-1,-1,-1, 0,0, 1,1,1),\n (-1, 0, 1,-1,1,-1,0,1)\n )\n for direc in direcs:\n i = 1\n while i <= rng:\n chk_pos = (r+direc[0]*i, c+direc[1]*i)\n if (chk_pos[0] < 0) or (chk_pos[0] >= grid_rows) \\\n or (chk_pos[1] < 0) or (chk_pos[1] >= grid_columns):\n break\n elif grid[chk_pos[0]][chk_pos[1]] != 0:\n pairs.append(chk_pos)\n break\n else:\n i+=1\n continue\n \n return pairs\n\ndef gen_adjacency_array(grid, adj_rng):\n \"\"\" Collect adjacent positions for each grid position \"\"\"\n adj_array = deepcopy(grid)\n for r in range(len(grid)):\n for c in range(len(grid[r])):\n pairs = get_adjacent_pairs(grid, r, c, rng=adj_rng)\n adj_array[r][c] = pairs\n return adj_array\n \ndef get_next_state(grid, adj_array, item_r, item_c, occupancy_limit=4):\n \"\"\" Get next state for a single position \"\"\"\n cur_state = grid[item_r][item_c]\n if cur_state == 0:\n return cur_state\n \n pairs = adj_array[item_r][item_c]\n all_empty = all([grid[r][c]<=0 for r,c in pairs])\n num_occupied = sum([grid[r][c]==1 for r,c in pairs])\n \n if cur_state == -1 and all_empty:\n return 1\n elif cur_state == 1 and num_occupied>=occupancy_limit:\n return -1\n else:\n return cur_state\n\ndef apply_step(grid, adj_array, occupancy_limit):\n \"\"\" Apply timestep to grid \"\"\"\n next_grid = deepcopy(grid)\n for r in range(len(grid)):\n for c in range(len(grid[r])):\n next_grid[r][c] = get_next_state(grid, adj_array, r, c, occupancy_limit)\n return next_grid\n\ndef grids_equal(g1, g2):\n \"\"\" Compare grid elementwise \"\"\"\n for r1, r2 in zip(g1, g2):\n for e1, e2 in zip(r1, r2):\n if e1!=e2:\n return False\n return True\n\ndef find_stable_grid(grid, adj_array, occupancy_limit):\n \"\"\" Apply timesteps until grid is stable \"\"\"\n g_prev = grid\n grid = apply_step(grid, adj_array, occupancy_limit)\n while not grids_equal(grid, g_prev):\n g_prev = grid\n grid = apply_step(grid, adj_array, occupancy_limit)\n return grid\n\ndef count_occupied_seats(grid):\n \"\"\" Count occupied seats in grid \"\"\"\n return sum([sum([e>0 for e in row]) for row in grid])\n\nif __name__==\"__main__\":\n \n # Test data or real data\n # entries = read_input(\"11_test_input.txt\")\n entries = read_input(\"11_input.txt\")\n grid = init_grid_to_int(entries)\n \n adj_array = gen_adjacency_array(grid, adj_rng=1)\n stable_grid = find_stable_grid(grid, adj_array, occupancy_limit=4)\n print(f\"Directly adjacent rule, Occupied seats when stable: {count_occupied_seats(stable_grid)}\")\n \n adj_array = gen_adjacency_array(grid, adj_rng=1000)\n stable_grid = find_stable_grid(grid, adj_array, occupancy_limit=5)\n print(f\"Line of sight rule, Occupied seats when stable: {count_occupied_seats(stable_grid)}\")\n ","repo_name":"atrahan/aoc2020","sub_path":"src/11_seatingsystem.py","file_name":"11_seatingsystem.py","file_ext":"py","file_size_in_byte":3493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"23969467819","text":"\"\"\"\nDjango settings for AlarmSystem project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nSENSOR_SERVER_IP = '25.111.126.166'\nSENSOR_SERVER_PORT = 8798\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '%5jw-k17glg=9!*ed1clg@n0mn2trl=^*tmmrdn7k^v3qfpaeh'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\nSITE_ID = 1\n\nLOGIN_URL = '/admin/login'\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'djangocms_admin_style',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'bootstrap3',\n 'dashboard',\n 'sensorApp',\n 'scenarioApp',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = [\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n\n]\n\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, 'dashboard', 'templates'),\n os.path.join(BASE_DIR, 'AlarmSystem', 'templates'),\n os.path.join(BASE_DIR, 'scenarioApp', 'templates'),\n)\n\nROOT_URLCONF = 'AlarmSystem.urls'\n\nWSGI_APPLICATION = 'AlarmSystem.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'CET'\n\nUSE_I18N = False\n\nUSE_L10N = False\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\nSTATIC_URL = '/static/'\n# STATIC_ROOT = os.path.join(BASE_DIR, \"static\")\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, \"static\"),\n os.path.join(BASE_DIR, \"AlarmSystem/static\")\n)\n","repo_name":"merdigon/Sensors_installation_project","sub_path":"Python/AlarmSystem/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"24521106179","text":"#Author guo\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def swapPairs(self, head):\n if not head or not head.next:\n return head\n\n cur = dummy = ListNode(0)\n dummy.next = head\n cur = dummy\n\n while cur and cur.next:\n a, b = cur, cur.next\n\n a.next, b.next, cur = b.next, a, b # 交换过程,画图\n\n cur = cur.next.next # 往下进行两个结点\n\n return dummy\n","repo_name":"guojia60180/algorithm","sub_path":"算法题目/算法题目/链表/LeetCode24交换链表中相邻节点.py","file_name":"LeetCode24交换链表中相邻节点.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"29750891145","text":"#組み込みパッケージ\nimport os\nimport sys\nimport fnmatch\nimport pathlib\n\npath_ = os.path.abspath(\n os.path.join(\n __file__,\n '../..',))\n\nsys.path.insert(0, path_)\n\n#自前パッケージ\nfrom sanakin import Corpus\nfrom sanakin import SentenceDelimiter\nfrom sanakin import CorpusFile\nfrom sanakin import SNKSession\n\nfrom sanakin.cli_util import SNKCLIEngine\nfrom sanakin.cli_util.db_api import simple_insert\nfrom sanakin.cli_util.db_api import bulk_insert\n\nfrom env import RAKUTEN_TRAVEL_DIR\n\n# ロガー設定\nimport logging\nlogging.basicConfig()\nlogging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)\nlogging.getLogger().setLevel(logging.INFO)\n\nclass SeedEngine(SNKCLIEngine):\n _work = 'seed'\n\n def __init__(self):\n super(__class__, self).__init__(\n description='''\\\n DBに初期データを投入するためのCLI。\n 引数なしで実行した場合、開発モードとしてRTURのサブセットをinsert。\\\n '''\n )\n\n @SNKCLIEngine.confirm(msg=f'{_work}:消去しますか?')\n def _delete_mode(self):\n with SNKSession() as session:\n with session.commit_manager() as s:\n s.query(Corpus).filter(\n Corpus.corpus_id == 'CPRTUR'\n ).delete()\n\n s.query(SentenceDelimiter).filter(\n SentenceDelimiter.sentence_delimiter_id == 'SD0001'\n ).delete()\n\n q = 'ALTER TABLE {} AUTO_INCREMENT = 1;'\n for t in session.get_bind().table_names():\n session.execute(q.format(t))\n\n def _sandbox_mode(self):\n pass\n\n def _non_wrapped_insert_mode(self, *, is_develop_mode=True):\n corpus = Corpus(\n corpus_id='CPRTUR',\n name='楽天データセット::楽天トラベル::ユーザレビュー',\n )\n simple_insert(corpus)\n\n delimiter = SentenceDelimiter(\n sentence_delimiter_id='SD0001',\n regex=r'[。.\\.!!?\\?\\n]+',\n )\n simple_insert(delimiter)\n\n dir_ = pathlib.Path(RAKUTEN_TRAVEL_DIR)\n\n corpus_files = []\n with SNKSession() as session:\n session.add(corpus)\n for idx, file_path in enumerate(sorted(dir_.iterdir())):\n if is_develop_mode and idx == 1:\n break\n\n if fnmatch.fnmatch(file_path.name, 'travel02_userReview[0-9]*'):\n cf = CorpusFile.create(file_path)\n cf.corpus_id = corpus.corpus_id\n corpus_files.append(cf)\n\n bulk_insert(corpus_files, CorpusFile)\n\n @SNKCLIEngine.confirm(msg=f'{_work}:時間がかかりますがいいですか?')\n def _long_time_insert_mode(self, *, is_develop_mode=True):\n self._non_wrapped_insert_mode(is_develop_mode=is_develop_mode)\n\nif __name__ == '__main__':\n cli = SeedEngine()\n cli.run()\n","repo_name":"wbydo/sanascan","sub_path":"deplicated/script/seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"23951810774","text":"# ========================================\n# Title: luna-user-service.py\n# Author: Adam Luna\n# Date: 16 May 2021\n# Description: Query and Create Documents in MongoDB using Python and pymongo.\n# ========================================\n\n# Import MongoClient, pprint, and datetime\nfrom pymongo import MongoClient\nimport pprint\nimport datetime\n\n# Connect to local MongoDB instance\nclient = MongoClient('localhost', 27017)\ndb = client.web335\n\n# Create new user document\nuser = {\n \"first_name\": \"Adam\",\n \"last_name\": \"Luna\",\n \"email\": \"adam@adam.com\",\n \"employee_id\": \"1010101\",\n \"date_created\": datetime.datetime.utcnow()\n}\n\n# Insert new user focument\nuser_id = db.users.insert_one(user).inserted_id\n\n# Output user id\nprint(user_id)\n\n# Print the document returned from the FindOne Query\npprint.pprint(db.users.find_one({\"employee_id\": \"1010101\"}))\n","repo_name":"adamluna/web-335","sub_path":"week-9/luna-user-service.py","file_name":"luna-user-service.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"42230925671","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/1/3 14:51\n# @Author : wmy1995\n\nimport torch\nfrom torch import nn\nimport sys\nfrom .tcn import TemporalConvNet\n\nclass TrafficTCN(nn.Module):\n def __init__(self,emb_size,n_categs,channels_size,\n kernel_size=2,dropout=0.3,emb_dropout=0.1,tied_weights=False):\n super(TrafficTCN,self).__init__()\n\n self.encoder = nn.Embedding(n_categs,emb_size)\n self.traffic_tcn = TemporalConvNet(emb_size,channels_size,kernel_size,dropout=dropout)\n self.decoder = nn.Linear(channels_size[-1],n_categs) #卷积的输出channel 接着 n_categs\n self.drop = nn.Dropout(emb_dropout)\n self.init_weights() # init weights\n\n def init_weights(self):\n self.encoder.weight.data.normal_(0,0.01)\n self.decoder.weight.data.normal_(0,0.01)\n self.decoder.bias.data.fill_(0)\n\n def forward(self, input):\n \"\"\"\n input: n * sequence_len (sequence_len:你准备用多长的序列预测下一个值,预先可以配置)\n emb: n * sequence_len * emb_size\n \"\"\"\n emb = self.drop(self.encoder(input.to(torch.int64)))\n \"\"\"\n emb.transpose(1,2): n * emb_size * sequence_len\n y : n * sequence_len * channes[-1]\n \"\"\"\n y = self.traffic_tcn(emb.transpose(1,2)).transpose(1,2)\n \"\"\"\n y: n * sequence_len * n_cates\n \"\"\"\n y = self.decoder(y) # 如果我把这些整数值 全看成类别 不就简单了\n\n return y.contiguous()\n\n\n\n\n","repo_name":"ChiYaoLa/TimeSeriesPredict","sub_path":"model/traffic_tcn.py","file_name":"traffic_tcn.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"86"} +{"seq_id":"2391296631","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 28 17:10:03 2018\n\n@author: chemla\n\"\"\"\n\nimport os, os.path\nfrom .audio import DatasetAudio\n\n\ndef merge_dicts(dict1, dict2, verbose=False):\n new_dict = dict(dict1)\n for k, v in dict2.items():\n if verbose:\n if k in dict1.keys():\n print('[Warning] key %s present in both dictionaries'%k)\n new_dict[k] = v\n return new_dict\n\ndef load_dataset(folder_path, analysis_path=None, transformType='stft', flattening_function=None, *args, **kwargs):\n folder_path = os.path.abspath(folder_path)\n _, dataset_name = os.path.split(folder_path)\n analysis_path = analysis_path or '/tmp/'+dataset_name\n flattening_function = flattening_function or (lambda x: x[0])\n importOptions = {\n 'dataPrefix': folder_path,\n 'dataDirectory':folder_path,\n 'analysisDirectory':analysis_path, # Root to place (and find) the transformed data\n 'transformName':transformType,\n 'importType':[], # Type of import (direct or asynchronous)\n 'importCallback':None, # Function to perform import of data\n 'types':['mp3', 'wav', 'wave', 'aif', 'aiff', 'au'], # Accepted types of files\n 'transformCallback':None, # Function to transform data (can be a list)\n 'verbose':True, # Be verbose or not\n 'checkIntegrity':True, # Check that files exist (while loading)\n 'forceUpdate':True, # Force the update\n 'matlabCommand':'/usr/local/MATLAB/MATLAB_Production_Server/R2015a/bin/matlab',\n };\n dataset = DatasetAudio(importOptions)\n dataset.listDirectory()\n dataset.importMetadataTasks()\n transformList, transformParameters = dataset.getTransforms();\n \n # Compute Transforms\n transformList, transformParameters = dataset.getTransforms();\n transformParameters = merge_dicts(transformParameters, kwargs)\n transformOptions = dict(importOptions)\n transformOptions['transformTypes'] = [transformType]\n transformOptions['transformNames'] = [transformType] \n transformOptions['transformParameters'] = [transformParameters]\n dataset.computeTransforms(None, transformOptions, padding=False)\n \n # Import transforms\n dataset.importData(None, importOptions);\n dataset.flattenData(flattening_function)\n dataset.constructPartition([], ['train','test'], [0.8, 0.2], False);\n return dataset","repo_name":"acids-ircam/variational-timbre","sub_path":"data/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"86"} +{"seq_id":"25842369624","text":"import gym\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\nfrom actor import Actor\nfrom critic import Critic\nfrom noise import OrnsteinUhlenbeckActionNoise\n\nimport os\nimport numpy as np\nimport random\nimport gym\nfrom environment import RandomizedEnvironment\nfrom replay_buffer import Episode, ReplayBuffer\nimport hopper_2\n\nMAX_STEPS = 50\nTAU = 5e-3\nLEARNING_RATE = 1e-3\n\ndef update_net(model, target_model, tau=1.):\n '''更新目标网络'''\n for tar_param, param in zip(target_model.parameters(), model.parameters()):\n tar_param.data.copy_(param.data * tau + tar_param.data * (1.0 - tau))\n\nclass Agent:\n '''就是写一个pytorch的DDPG的结构即可'''\n def __init__(self, experiment, batch_size):\n self._dummy_env = gym.make(experiment)\n\n # High parms for this code \n # 判断是什么环境,是字典形还是普通环境\n obs_judge = self._dummy_env.reset()\n if type(obs_judge) == dict:\n self._dim_state = self._dummy_env.observation_space['observation'].shape[0]\n self._dim_goal = self._dummy_env.observation_space['desired_goal'].shape[0]\n else:\n self._dim_state = self._dummy_env.observation_space.shape[0]\n\n self._dim_action = self._dummy_env.action_space.shape[0]\n self._dim_env = 1 # 随机化环境参数的维数,如果是只随机化摩擦力,那就是1维,随机化多个参数,就是多维。\n self._batch_size = batch_size\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n # agent noise\n self._action_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(self._dim_action))\n\n # 初始化网络\n self._actor = Actor(self._dim_state, self._dim_goal, self._dim_action, self._dummy_env, TAU, LEARNING_RATE, self._batch_size).to(self.device)\n self._critic = Critic(self._dim_state, self._dim_goal, self._dim_action, self._dim_env, self._dummy_env, TAU, LEARNING_RATE, self._batch_size).to(self.device)\n self._actor_target = Actor(self._dim_state, self._dim_goal, self._dim_action, self._dummy_env, TAU, LEARNING_RATE, self._batch_size).to(self.device)\n self._critic_target = Critic(self._dim_state, self._dim_goal, self._dim_action, self._dim_env, self._dummy_env, TAU, LEARNING_RATE, self._batch_size).to(self.device)\n\n # 初始化目标网络的权重\n update_net(self._actor, self._actor_target, tau=1.)\n update_net(self._critic, self._critic_target, tau=1.)\n\n # 优化器\n self.actor_opt = torch.optim.Adam(self._actor.parameters(),lr=self._actor._learning_rate)\n self.critic_opt = torch.optim.Adam(self._critic.parameters(),lr=self._critic._learning_rate)\n\n # 设置loss函数\n self.loss_function = torch.nn.MSELoss()\n\n # 初始化记录scalar的字典\n self.summaries = {}\n \n def get_action(self, obs, goal, history):\n '''根据actor 得到动作的函数'''\n obs = torch.FloatTensor(obs).to(self.device)\n goal = torch.FloatTensor(goal).to(self.device)\n history = torch.FloatTensor(history).to(self.device)\n action = self._actor(obs, goal, history)\n action = action.cpu().detach().numpy()\n return action\n \n def action_noise(self):\n return self._action_noise()\n \n def update_target_actor(self):\n update_net(self._actor, self._actor_target, TAU)\n \n def update_target_critic(self):\n update_net(self._critic, self._critic_target, TAU)\n\n def save_model(self, filename):\n torch.save(self._actor, filename)\n\n def load_model(self, filename):\n self._actor = torch.load(filename)\n\n def get_dim_state(self):\n return self._dim_state\n\n def get_dim_action(self):\n return self._dim_action\n \n def get_dim_env(self):\n return self._dim_env\n\n def get_dim_goal(self):\n return self._dim_goal\n\n\n# if __name__ == '__main__':\n# dummy_env = gym.make('FetchSlide2-v1')\n \n# env2 = gym.make('HopperRandom-v1')\n\n# print(dummy_env.observation_space['observation'].shape[0])\n# print(dummy_env.observation_space['desired_goal'].shape[0])\n# print(env2.observation_space.shape[0])\n \n ","repo_name":"PeiZhangNEU/Dynamic_randomization_torch","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"8441278795","text":"#leia um número inteiro, diga se é primo\nn = int(input('Digite um número inteiro: '))\nm = str(n)\nif n == 2 or n == 3 or n == 5 or n == 7:\n print('{} é um número primo.'.format(n))\nelif n % 2 == 0 or n % 3 == 0 or n % 7 == 0:\n print('{} não é um número primo.'.format(n))\n if m[-1:] == 0 or m[-1:] == 5:\n print('Números com final \"0\" ou \"5\" nunca são primos.'.format(n))\nelse:\n print('{} é um numero primo.'.format(n))","repo_name":"petroniocangussu/Python","sub_path":"CursoPython3-M2/desafio52.py","file_name":"desafio52.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"42983369728","text":"def addReverse(s):\n\ts2 = s.split(\" \")\n\ta = s2[0][::-1]\n\tb = s2[1][::-1]\n\tprint(int(str((int(a) + int(b)))[::-1]))\n\ncasesCount = int(raw_input())\ncases = []\nfor i in range(casesCount):\n\tcases.append(raw_input())\nfor c in cases:\n\taddReverse(c)\n\t","repo_name":"stanislaw-zakrzewski/PythonSPOJ","sub_path":"Ready solutions/ADDREV.py","file_name":"ADDREV.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"13279407824","text":"import time\n\ndef show():\n print(\"\\nCurrent Board:\\n---------------------\")\n for slot in range(1, len(board)):\n print(board[slot], end = ' ')\n if slot and slot % 3 == 0:\n print()\n\ndef place(marker):\n \"\"\"\n choice = len(board)\n while True:\n if choice < len(board) and board[choice] not in markers:\n board[choice] = marker\n break;\n else:\n choice = int(input(marker + \" select a spot: \"))\n \"\"\"\n choice = -1\n while True:\n try:\n if type(choice) == int and choice > 0 and choice < len(board) and board[choice] not in markers:\n board[choice] = marker\n break\n else:\n choice = int(input(marker + \" select a spot: \"))\n except:\n pass\n\n\ndef move(marker):\n try:\n userFrom = int(input(\"\\n\" + marker + \" move your marker from: \"))\n userTo = int(input(\"Move your marker To: \"))\n except:\n userFrom, userTo = -1, -1\n\n if userFrom > 0 and userFrom < len(board):\n if board[userFrom] == marker and board[userTo] not in markers:\n board[userTo] = marker\n board[userFrom] = userFrom\n else:\n print(\"Please make a valid move next turn.\")\n else:\n print(\"PLease enter a number between 0 and 9 nexr turn.\")\n\ndef movesLeft(marker):\n return board.count(marker) < 3\n\ndef row(marker):\n f = board[1:4].count(marker)\n s = board[4:7].count(marker)\n t = board[7:10].count(marker)\n return True if ((f == 3) or (s == 3) or (t == 3)) else False\n\ndef col(marker):\n f = board[1::3].count(marker)\n s = board[2::3].count(marker)\n t = board[3::3].count(marker)\n return True if ((f == 3) or (s == 3) or (t == 3)) else False\n\ndef diag(marker):\n right = board[1::4].count(marker)\n left = board[3:-2:2].count(marker)\n return (right == 3) or (left == 3) or False\n\ndef gameover():\n for mark in markers:\n r, c, d = row(mark), col(mark), diag(mark)\n if any([r, c, d]):\n return mark\n return False\n\nboard = ['', 1, 2, 3, 4, 5, 6, 7, 8, 9]\nmarkers, counter = ['X', 'O'], 0\n\n\"\"\"\nwhile not gameover():\n counter += 1\n marker = markers[counter % 2] \n show()\n if movesLeft():\n place(marker)\n else:\n move(marker)\nprint(\"WINNER: \", gameover())\n\"\"\"","repo_name":"wingerlang/Python","sub_path":"socket/tictactoe2.py","file_name":"tictactoe2.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"30170805638","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 13 21:08:18 2019\r\n\r\n@author: 54164\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt \r\nimport time\r\nimport LRA_GL_utils as utils\r\nimport data_pre_process as dprocess\r\n\r\ntime_start=time.time()\r\n\r\n#train_vis,train_nir,test_vis,test_nir =utils.readPicture(folder_path=\"./dataset_CASIA\")\r\n\r\n#train_vis = np.load('train_vis.npy')\r\n#train_nir = np.load('train_nir.npy')\r\n#test_vis = np.load('test_vis.npy')\r\n#test_nir = np.load('test_nir.npy')\r\n\r\n#pre process the pictures\r\n#dprocess.cut_face_into_folder(train_vis,\"train_vis_face\")\r\n#dprocess.cut_face_into_folder(train_nir,\"train_nir_face\")\r\n#dprocess.cut_face_into_folder(test_vis,\"test_vis_face\")\r\n#dprocess.cut_face_into_folder(test_nir,\"test_nir_face\")\r\n\r\ntrain_vis_face,train_nir_face,test_vis_face,test_nir_face = dprocess.reload_new_faces()\r\n\r\ndprocess.rename_pictures()\r\n\r\nk = test_nir_face.shape[0]\r\nm = train_nir_face.shape[0]\r\nY_gl = np.hstack((np.eye(k),np.zeros([k,m])))\r\n#Y_gl = np.hstack((np.eye(k),np.zeros([k,1000])))\r\n\r\nprobe = utils.lbp_encode(test_nir_face)\r\nprint(\"encoded test_nir !!!\")\r\nX = utils.lbp_encode(test_vis_face)\r\nprint(\"encoded test_vis !!!\")\r\nvariants = utils.intro_class_variant(train_vis_face,train_nir_face)\r\nprint(\"extracted the varients !!!\")\r\nvariants_lbp = utils.lbp_encode(variants)\r\nprint(\"encoded varients !!!\")\r\n\r\nX_gl = np.hstack((X,variants_lbp))\r\n#X_gl = np.hstack((X,variants_lbp[:,0:1000]))\r\n\r\nX_gl_mat = np.mat(X_gl)\r\nX_gl_inv = X_gl_mat.I\r\nX_gl_inv = np.array(X_gl_inv)\r\n\r\nW_gl = np.dot(Y_gl,X_gl_inv)\r\nY_gl_hat = np.dot(W_gl,probe)\r\n\r\naccuracy = utils.calculate_accuracy(Y_gl_hat)\r\naccuracy_top5 = utils.calculate_accuracy_top5(Y_gl_hat)\r\n\r\ntime_end=time.time()\r\n\r\nprint('totally cost : ',time_end-time_start)\r\n\r\n\r\n\r\n","repo_name":"Metralix/U2FsdGVkX19T6xHCLUpd1jUBlEqNemjG6KwAIzcljWT4QywS0dJdp2O0RG-npZED","sub_path":"LRA_GL/LRA_GL.py","file_name":"LRA_GL.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"74223074844","text":"from Products.Five.browser import BrowserView\nfrom plone import api\nfrom Products.CMFCore.utils import getToolByName\n\n\nclass connection(BrowserView):\n \"\"\" A list of Database Connections\n \"\"\"\n def connList(self):\n results = []\n portal_catalog = getToolByName(self.context, 'portal_catalog')\n brains = portal_catalog(portal_type=\"connection\")\n for brain in brains:\n conn = brain.getObject()\n r = dict(\n id = conn.id,\n title = conn.title,\n url = conn.absolute_url(),\n conn_string = conn.conn_string,\n schema = conn.db_schema,\n table = conn.db_table\n )\n results.append(r)\n return results","repo_name":"mamogmx/plomino.replication","sub_path":"plomino/replication/browser/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"34269800684","text":"from fastapi import APIRouter, Depends\nfrom sqlalchemy.orm import Session\n\nfrom . import models, schemas, service\nfrom .dependencies import get_db, validate_store_id\n\nrouter = APIRouter()\n\n\n@router.get(\"/{store_id}\", response_model=schemas.Store)\ndef read_store(store: models.Store = Depends(validate_store_id)):\n return store\n\n\n@router.patch(\"/{store_id}\", response_model=schemas.Store)\ndef update_store(\n payload: schemas.StoreUpdate,\n store: models.Store = Depends(validate_store_id),\n database: Session = Depends(get_db)\n):\n updated_store = service.update_store(database, store.id, payload)\n\n return updated_store\n\n\n@router.get(\"/{store_id}/items\", response_model=list[schemas.SearchItem])\ndef search_for_items_in_store(\n keyword: str = \"%\",\n store: models.Store = Depends(validate_store_id),\n database: Session = Depends(get_db)\n):\n if keyword == \"\":\n return []\n\n items = service.search_items_by_keyword(database, store.id, keyword)\n\n return items\n","repo_name":"gacky1601/niupi-backend","sub_path":"app/api/stores/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"72078728605","text":"\nimport tkinter as tk\nfrom functools import partial\n\ndef add(label_result, n1, n2):\n num1 = (n1.get())\n num2 = (n2.get())\n result = int(num1)+int(num2)\n label_result.config(text=\"Result is %d\" % result)\n return\n\ndef subtract(label_result, n1, n2):\n num1 = (n1.get())\n num2 = (n2.get())\n result = int(num1)-int(num2)\n label_result.config(text=\"Result is %d\" % result)\n return\n\n \ndef multiply(label_result, n1, n2):\n num1 = (n1.get())\n num2 = (n2.get())\n result = int(num1)*int(num2)\n label_result.config(text=\"Result is %d\" % result)\n return\n\ndef divide(label_result, n1, n2):\n num1 = (n1.get())\n num2 = (n2.get())\n result = int(num1)/int(num2)\n label_result.config(text=\"Result is %f\" % result)\n return\n\ndef powers(label_result, n1, n2):\n num1 = (n1.get())\n num2 = (n2.get())\n result=1\n for i in range(int(num2)):\n result = result*int(num1)\n label_result.config(text=\"result is %d\" % result)\n return\n\ndef percentage(label_result, n1, n2):\n num1 = (n1.get())\n num2 = (n2.get())\n result = ((int(num1)/int(num2))*100)\n label_result.config(text=\"Result is %f\" % result)\n return\n \n \nroot = tk.Tk()\nroot.geometry('800x400+200+400')\nroot.title('Simple Calculator')\n\nnumber1 = tk.StringVar()\nnumber2 = tk.StringVar()\n \nlabelTitle = tk.Label(root, text=\"Simple Calculator\").grid(row=0, column=2)\nlabelNum1 = tk.Label(root, text=\"Enter a number\").grid(row=1, column=0)\nlabelNum2 = tk.Label(root, text=\"Enter another number\").grid(row=2, column=0)\nlabelResult = tk.Label(root)\nlabelResult.grid(row=8, column=2)\n \nentryNum1 = tk.Entry(root, textvariable=number1).grid(row=1, column=2)\nentryNum2 = tk.Entry(root, textvariable=number2).grid(row=2, column=2)\nadd = partial(add, labelResult, number1, number2)\nsubtract = partial(subtract, labelResult, number1, number2)\nmultiply = partial(multiply, labelResult, number1, number2)\ndivide = partial(divide, labelResult, number1, number2)\npowers = partial(powers, labelResult, number1, number2)\npercentage = partial(percentage, labelResult, number1, number2)\nbuttonCal = tk.Button(root, text=\"Add\", command=add).grid(row=3, column=0)\nC2 = tk.Button(root,text=\"Subtract\", command=subtract).grid(row=3, column=6)\nC3 = tk.Button(root,text=\"Multiply\", command=multiply).grid(row=5, column=0)\nC4 = tk.Button(root,text=\"Divide\", command=divide).grid(row=5, column=6)\nC5 = tk.Button(root,text=\"Power\", command=powers).grid(row=6, column=0)\nC6 = tk.Button(root,text=\"Percentage\", command=percentage).grid(row=6, column=6)\nroot.mainloop()\n\n\n","repo_name":"AaronSeth/Python","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"10619872130","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.common.exceptions import SessionNotCreatedException\nimport os\nfrom pywget import wget\nimport time\n\n\nclass Scrap:\n def __init__(self):\n self.url = \"http://www.dermnet.com/images/Eczema-Hand/photos/\"\n self.base_image_url = 'http://www.dermnet.com/dn2/allJPG3/eczema-hand-'\n\n @staticmethod\n def get_driver():\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n\n chrome_p = os.listdir('../chrome_driver/')[0]\n chrome_path = f'chrome_driver/{chrome_p}'\n driver = webdriver.Chrome(chrome_path, options=options)\n return driver\n\n def get_data(self, no):\n driver = self.get_driver()\n req = self.url + str(no)\n driver.get(req)\n soup = BeautifulSoup(driver.page_source, 'html')\n print(soup)\n\n def get_url(self, no):\n return f'{self.base_image_url}{no}.jpg'\n\n def get_images(self, download_folder):\n err = []\n for i in range(9,100):\n try:\n wget.download(self.get_url(i), fr'{download_folder}/')\n print('> ', i)\n time.sleep(1)\n except Exception as e:\n time.sleep(1)\n err.append(i)\n print('x ', i)\n\n print('data downloaded')\n print(f'error in {err}')\n\n\n\nScrap().get_images('../data/ec')\n","repo_name":"emylincon/GridProject","sub_path":"Scrap_data/scrap_eczema_hands.py","file_name":"scrap_eczema_hands.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"9290675880","text":"def get_first_right(domino, start, changes_right):\n for i in range(start, len(domino)):\n if domino[i] == '/':\n if i == len(domino)-1:\n changes_right.append(i)\n else:\n pass\n elif domino[i] == '\\\\':\n changes_right.append(i)\n break\n elif domino[i] == '|':\n changes_right.append(i - 1)\n break\n\n\ndef get_first_left(domino, start, changes_left):\n bckw_domino = domino[::-1]\n for i in range(start, len(domino)):\n if bckw_domino[i] == '\\\\':\n if i == len(domino)-1:\n changes_left.append(i)\n else:\n pass\n elif bckw_domino[i] == '/':\n changes_left.append(i)\n break\n elif bckw_domino[i] == '|':\n changes_left.append(i - 1)\n break\n\n\ndef iteration(domino):\n len_domino = len(domino) - 1\n changes_right = list()\n changes_left = list()\n bckw_domino = domino[::-1]\n\n for i in range(len(domino)):\n if domino[i] == '/':\n get_first_right(domino, i, changes_right)\n\n for i in range(len(domino)):\n if bckw_domino[i] == '\\\\':\n get_first_left(domino, i, changes_left)\n changes_left = [len_domino - x for x in changes_left]\n\n # to remove duplicates\n changes = list(set(changes_left + changes_right))\n list_domino = list(domino)\n for i in changes:\n list_domino[i] = \"|\"\n\n return \"\".join(list_domino)\n","repo_name":"sknera/recruitment-task-PAS","sub_path":"reverse_domino.py","file_name":"reverse_domino.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"15383606583","text":"__author__ = 'raphaelfettaya'\nimport datefinder\nimport datetime\nfrom math import radians, cos, sin, asin, sqrt\nimport pandas as pd\nimport googlemaps\nimport urllib\nimport requests\n# from urllib import quote #PY2\nfrom urllib.parse import quote\nimport os\nfrom xml.etree import ElementTree as ET\n\nACCESS_TOKEN = os.environ[\"GOOGLE_ACCES_TOKEN\"]\nGEOCODER = googlemaps.Client(key=ACCESS_TOKEN)\nMAX_SHOP_RES = 5\nMAX_DIST_SHOP = 50.0\nLENGOW_TAB_ENTRY = '{http://www.w3.org/2005/Atom}entry'\n\ndef extract_day(sentence):\n date = datefinder.find_dates(sentence)\n if len(date) >= 1:\n return date[0].strftime('%A')\n return datetime.datetime.now().strftime('%A')\n\ndef compute_distance(loc_a, loc_b):\n if loc_a is None or loc_b is None:\n return 1000000\n lon1, lat1 = loc_a\n lon2, lat2 = loc_b\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km\n\n\ndef get_closest_shop_from_sentence(sentence, df_shops):\n try:\n loc = text_search(sentence)\n if loc is None:\n return []\n return get_closest_shop(loc, df_shops)\n except IndexError:\n return []\n\n\ndef get_closest_shop(loc, df_shops):\n shops = df_shops.apply(lambda x: compute_distance(loc, load_location(x['location'])), axis=1)\n sorted_res = shops.sort_values()\n res_shops = sorted_res[sorted_res <= MAX_DIST_SHOP].index[:MAX_SHOP_RES]\n return [df_shops.index.get_loc(res) for res in res_shops]\n\n\ndef text_search(sentence):\n base_url = 'https://maps.googleapis.com/maps/api/place/textsearch/json'\n key_string = '?key=' + ACCESS_TOKEN\n query_string = '&query='+quote(sentence)\n sensor_string = '&sensor=false'\n type_string = ''\n url = base_url+key_string+query_string+sensor_string+type_string\n res_data = requests.get(url).json()\n if len(res_data['results']) == 0:\n return\n loc = res_data['results'][0]['geometry']['location']\n return loc['lat'], loc['lng']\n\n\ndef load_location(loc_data):\n if loc_data == '':\n return\n return tuple([float(lt) for lt in loc_data.split('/')])\n\ndef plural_invert(mot):\n\n def pluriel_ail(mot) :\n ail= 'bail corail émail soupirail travail ventail vitrail'.split()\n if mot in ail :\n return mot[0 : -2] + 'ux'\n\n def pluriel_ou(mot) :\n ou = 'hibou chou genou caillou pou bijou'.split()\n if mot in ou :\n return mot + 'x'\n\n def pluriel_eu(mot) :\n eu = 'pneu bleu'.split()\n if mot in eu : return mot + 's'\n elif mot[-2:] == 'eu' : return (mot+'x')\n\n def pluriel_al(mot) :\n al = 'banal fatal naval natal bancal bal festival chacal carnaval cal serval'.split()\n if mot in al : return mot + 's'\n\n def pluriel_au(mot):\n au = 'landau sarrau'.split()\n if mot in au : return mot + 's'\n elif mot[-2:] == 'au' : return (mot+'x')\n\n def pluriel_except(mot):\n if mot== 'oeil' : return ('yeux')\n elif mot == 'ail' : return ('aulx')\n # elif mot[-1] == 'z' or mot[-1] == 'x' : return (mot)\n\n def pluriel_regular(mot):\n if mot[-1] in ['s', 'x']:\n return \"\".join(mot[:-1])\n else:\n return mot + 's'\n\n functions = [pluriel_ail, pluriel_ou, pluriel_eu, pluriel_al, pluriel_au, pluriel_except, pluriel_regular]\n mot = mot.lower()\n if mot == \"\":\n return mot\n if 'sweat' in mot:\n pass\n for f in functions:\n m = f(mot)\n if m is not None:\n return m\n\n\ndef lengow_parser(xml_file_path):\n \"\"\"\n :param xml_file_path: path to lengow file\n :return: dataframe of parsed data\n \"\"\"\n tree = ET.parse(xml_file_path)\n root = tree.getroot()\n entries = [child for child in root if child.tag == LENGOW_TAB_ENTRY]\n all_tags = set([tag.tag for att in entries for tag in att])\n all_data = {k: [] for k in all_tags}\n for entry in entries:\n for tag in all_tags:\n elem = entry.find(tag)\n if elem is None:\n elem = ''\n elif elem.text is not None:\n elem = elem.text\n elif 'href' in elem.attrib:\n elem = elem.attrib['href']\n else:\n elem = ''\n all_data[tag].append(elem)\n return pd.DataFrame(all_data)\n\nif __name__ == '__main__':\n # df_data = pd.read_csv('Data/Shops2.csv').fillna('')\n # print(get_clothest_shop('la boutique de Neuilly Sur Seine svp', df_data))\n print(plural_invert('manteaux'))\n # fp = '../Downloads/lengowFR.xml'\n # df = lengow_parser(fp)\n # df.columns = [col.replace('{http://www.w3.org/2005/Atom}', '') for col in df.columns]\n # df.to_csv('Data/Lengow.csv', encoding='utf-8')\n","repo_name":"fettay/myBot","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"5490577440","text":"# Created by shamilsakib at 04/10/20\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.postgres.fields import JSONField\nfrom django.db import models\nfrom django.utils import timezone\n\n\n# BASE_MODEL = models.Model\n# if hasattr(settings, 'BASE_MODEL') and getattr(settings, 'BASE_MODEL'):\n# BASE_MODEL = getattr(settings, 'BASE_MODEL')\n\n\nclass ReportConfigurationManager(models.Manager):\n def __init__(self, user_field=None, user_fields=None):\n super(ReportConfigurationManager, self).__init__()\n\n def get_queryset(self):\n queryset = super(ReportConfigurationManager, self).get_queryset()\n queryset = queryset.filter(is_deleted=0, is_active=True)\n return queryset\n\n\nclass ReportConfiguration(models.Model):\n model = models.ForeignKey(ContentType, default=None, null=True, on_delete=models.SET_NULL, related_name='reports')\n information = JSONField(max_length=8192, default=None, null=True)\n dimensions = JSONField(max_length=8192, default=None, null=True)\n measures = JSONField(max_length=8192, default=None, null=True)\n filters = JSONField(max_length=8192, default=None, null=True)\n searches = JSONField(max_length=8192, default=None, null=True)\n orders = JSONField(max_length=8192, default=None, null=True)\n report_config = JSONField(max_length=8192, default=None, null=True)\n\n is_draft = models.BooleanField(default=False)\n is_active = models.BooleanField(default=True)\n is_deleted = models.SmallIntegerField(default=0)\n\n created_at = models.DateTimeField(editable=False, default=timezone.now)\n updated_at = models.DateTimeField(editable=False, default=timezone.now)\n\n objects = ReportConfigurationManager()\n\n def __init__(self, *args, **kwargs):\n super(ReportConfiguration, self).__init__(*args, **kwargs)\n\n def __str__(self):\n return f'Model: {self.model}'\n\n def save(self, force_insert=False, force_update=False, using=None, update_fields=None, **kwargs):\n super(ReportConfiguration, self).save(force_insert, force_update, using, update_fields)\n\n class Meta:\n app_label = 'django_reporter_pro'\n","repo_name":"shamilison/django-reporter-pro","sub_path":"django_reporter_pro/models/report_configuration.py","file_name":"report_configuration.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"39795778849","text":"import json\r\nimport requests\r\n\r\n\r\nusername = 'SergeyPishchagin'\r\nlink = requests.get('https://api.github.com/users/'+username+'/repos')\r\nresult = link.json()\r\n\r\nwith open('git_repos.json', 'w') as f:\r\n f.write(json.dumps(result))\r\nfor repos in link.json():\r\n print(repos['html_url'])\r\n \r\n","repo_name":"SergeyPishchagin/GeekBrains-repo3","sub_path":"1-1.py","file_name":"1-1.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"41732277569","text":"# File to define a planet(server) and connect it to its respective container unit.\nimport uuid\nimport docker\nimport requests\nimport time\n\nDC = docker.from_env()\n\n\nclass Planet:\n\n def __init__(self, port, vol_id):\n self.PlanetID = uuid.uuid4()\n self.ContainerPort = port\n self.VolID = vol_id\n self.Key = uuid.uuid4()\n self.AccessEnvironment = ['ACCESS_KEY='+str(self.Key)]\n self.ContainerURL = 'http://0.0.0.0:' + str(self.ContainerPort)\n # Note: For IP address use self.Container.attrs['NetworkSettings']['IPAddress']\n self.ContainerID = self.PlaContRun()\n self.Container = DC.containers.get(self.ContainerID)\n\n # Container launch\n def PlaContRun(self):\n vols = {'modelvol' + str(self.VolID): {'bind': '/mnt/VolumeLocal/model_zoo_local/', 'mode': 'rw'},\n 'localvol' + str(self.VolID): {'bind': '/mnt/Data/', 'mode': 'rw'}}\n DC.volumes.create(name='modelvol' + str(self.VolID), driver='local')\n device_requests = [docker.types.DeviceRequest(device_ids=['0'], capabilities=[['gpu']])]\n cont_id = DC.containers.run(\"orbitallearning/central_node:latest\", detach=True,\n ports={'80': self.ContainerPort}, volumes=vols, environment=self.AccessEnvironment,\n device_requests=device_requests)\n time.sleep(40)\n return str(cont_id)[12:-1]\n\n def inference(self, data):\n results = requests.post(self.ContainerURL, json=data)\n return results\n","repo_name":"nkc-research/Orbital_Learning","sub_path":"Planet.py","file_name":"Planet.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"11219274068","text":"from __future__ import print_function\nimport time\nimport rospy\nfrom geometry_msgs.msg import Point, PointStamped\nfrom std_msgs.msg import String\n\npoints = [Point(x = 5.0, y = 0.0, z = 0.0),\n\t\t Point(x = 8.0, y = 7.0, z = 2.0),\n\t\t Point(x = 7.0, y = -4.0, z = -0.5)]\n\ndef send_rotating_points(nPoints, delay):\n\trospy.init_node('quad_pos_test1')\n\tpub = rospy.Publisher('/quad_position', PointStamped, queue_size=10)\n\t\n\tn = 0\n\twhile n < nPoints:\n\t\tn += 1\n\t\ttime.sleep(delay)\n\t\tp = points[n % 3]\n\t\tprint(\"Sending point command:\", p)\n\t\tpub.publish(PointStamped(point = p))\n\trospy.spin()\n\ndef full_pointing_test():\n\t'''\n\tSends Baxter points, tells it to point to them. Also tests sending\n\tnull points and the origin, which should both cancel previous loc\n\tmessages, and maximum time before a location is 'forgotten'\"\n\t'''\n\trospy.init_node('quad_pos_test2')\n\tpub = rospy.Publisher('/quad_position', PointStamped, queue_size=10)\n\tcmdPub = rospy.Publisher('cmds_received', String, queue_size=10)\n\n\ttime.sleep(2)\n\tp = points[0]\n\tprint(\"Sending location\", p)\n\tpub.publish(PointStamped(point = p))\n\ttime.sleep(0.5)\n\tprint(\"sending point command\")\n\tcmdPub.publish(String(\"point to the quad\"))\n\ttime.sleep(1.0)\n\tp = points[1]\n\tprint(\"Sending location\", p)\n\tpub.publish(PointStamped(point = p))\n\ttime.sleep(1.0)\n\tprint(\"sending point command\")\n\tcmdPub.publish(String(\"point to the quad\"))\n\ttime.sleep(0.5)\n\tp = points[2]\n\tprint(\"Sending location\", p)\n\tpub.publish(PointStamped(point = p))\n\ttime.sleep(3.0)\n\tprint(\"sending point command - failure expected due to time elapsed\")\n\tcmdPub.publish(String(\"point to the quad\"))\n\ttime.sleep(1.0)\n\tp = points[2]\n\tprint(\"Sending location\", p)\n\tpub.publish(PointStamped(point = p))\n\ttime.sleep(0.5)\n\tp = Point(x = 0.0, y = 0.0, z = 0.0)\n\tprint(\"Sending origin to cancel\", p)\n\tpub.publish(PointStamped(point = p))\n\ttime.sleep(0.5)\n\tprint(\"sending point command - failure expected due to cancel\")\n\tcmdPub.publish(String(\"point to the quad\"))\n\ttime.sleep(1.0)\n\trospy.spin()\n\t\n\t\n\nif __name__ == \"__main__\":\n\t#send_rotating_points(20, 2.0)\n\tfull_pointing_test()\n\t\n\t\n\t\n","repo_name":"mclumd/baxter_cog","sub_path":"speech_recog/quad_loc_test.py","file_name":"quad_loc_test.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"9144571275","text":"import unittest\nfrom decorating import color\n\n\nclass TestColorize(unittest.TestCase):\n\n def test_colorize(self):\n string = 'lain'\n color.COLORED = True\n colored_string = color.colorize(string, 'cyan')\n\n self.assertNotEqual(string, colored_string, \"Must be different\")\n\n def test_colorize_disabled(self):\n string = 'test'\n color.COLORED = False\n colored_string = color.colorize(string, 'cyan')\n self.assertEqual(string, colored_string, \"Disabled; must be the same\")\n\n def test_failing(self):\n string = 'test'\n color.COLORED = True\n fails = False\n try:\n color.colorize(string, 'sua mae', 'eu e vc')\n except RuntimeError:\n fails = True\n\n self.assertTrue(fails, \"Must fails with RunTime if fails\")\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ryukinix/decorating","sub_path":"tests/test_color.py","file_name":"test_color.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":250,"dataset":"github-code","pt":"86"} +{"seq_id":"30226937811","text":"from torch.utils.data.dataset import Dataset\nimport torch\nfrom deep_03_10 import NKModel\nfrom my_dataset import NkDataSet\n\n#Data_Load\ncsv_path = './test.csv'\n\ncustom_dataset = NkDataSet(csv_path)\n\nmy_dataset_loader = torch.utils.data.DataLoader(dataset=custom_dataset,\n batch_size=1,\n shuffle=False)\n#Model_Load\n#imput , hidden, output size\n\nD_in = 30000 #(100 * 100 * 3)\nH = 1000\nD_out = 2\n\nmodel = NKModel(D_in, H, D_out)\n\n#CrossEntropyLoss 를 사용\ncriterion = torch.nn.CrossEntropyLoss(reduction='sum')\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-4)#1/10000\n\nfor epoch in range(500):\n\n for i,data in enumerate(my_dataset_loader,0):\n # Forward pass: Compute predicted y by passing x to the model\n\n #fc 구조이기 때문에 잉렬로 쫙피는 작업이 필요하다.\n\n images,label = data\n\n\n\n #그냥 images를 하면 에러가 난다. 데이터 shape이 일치하지 않아서\n # 100, 100 , 3\n images = images.view(1,30000)\n print(images.size())\n print(\"label is label\",label)\n y_pred = model(images)\n\n print(y_pred.size())\n print(label.size())\n # Compute and print loss\n loss = criterion(y_pred,label)\n\n print(epoch, loss.item())\n\n # Zero gradients, perform a backward pass, and update the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n","repo_name":"hkj9057/03_10","sub_path":"deep2_03_10.py","file_name":"deep2_03_10.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"73751622045","text":"import reflex as rx\n\n\nconfig = rx.Config(\n app_name=\"reflex_showcase\",\n db_url=\"sqlite:///pynecone.db\",\n env=rx.Env.DEV,\n frontend_port=3002,\n backend_port=8002,\n api_url=\"http://localhost:8002\"\n)\n","repo_name":"Lendemor/reflex-showcase","sub_path":"rxconfig.py","file_name":"rxconfig.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"86"} +{"seq_id":"23603286308","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport json\nimport pdb\nimport argparse\nimport random\nfrom collections import defaultdict\n\ndefault_net_Bandwidth = {'LAN':10,'WAN':100,'T3': 1000, 'T2': 10000, 'T1': 1000000}\ndefault_net_latency = {'LAN':10,'WAN':50,'T3': 100, 'T2': 1000, 'T1': 1000}\ndefault_intrfc_Bandwidth = {'LAN':10,'WAN':100,'T3': 1000, 'T2': 10000, 'T1': 1000000}\n\nNetworks = {}\nRouters = {}\nHosts = {}\n\nobject_id = 0\nT1 = 1\nT2 = 4\nxT2 = 4.0\nT3d = 4\nWANd = 10\nLANd = 10\nhosts_file = None \nrtrs_file = None\nstore_nets_file = None \nstore_topo_file = None\ninit_seed = '123455'\n\n\ndef getArgs(cmdline):\n global init_seed\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-T1', metavar='does a T1 backbone exist', dest='T1', action='store_const', const=True, required=False)\n parser.add_argument('-T2', metavar='number of T2 networks', dest='T2', required=True)\n parser.add_argument('-xT2', metavar='average number of cross T2 connections per T2', dest='xT2', required=True)\n parser.add_argument('-T3d', metavar='density of T3 networks per T2', dest='T3d', required=True)\n parser.add_argument('-WANd', metavar='density of WAN networks per T3', dest='WANd', required=True)\n parser.add_argument('-LANd', metavar='density of LAN networks per WAN', dest='LANd', required=True)\n parser.add_argument('-hosts', metavar='file with description of hosts', dest='hosts_file', required=True)\n parser.add_argument('-rtrs', metavar='file with description of routers', dest='rtrs_file', required=False)\n parser.add_argument('-nets', metavar='output file with description of networks', dest='store_nets_file', required=False)\n parser.add_argument('-topo', metavar='output file with description of topology', dest='store_topo_file', required=False)\n parser.add_argument('-seed', metavar='RNG seed', dest='seed', required=False)\n\n args = parser.parse_args(cmdline)\n\n seed = args.seed if args.seed else init_seed\n hosts_file = None if not args.hosts_file else args.hosts_file\n rtrs_file = None if not args.rtrs_file else args.rtrs_file\n store_nets_file = None if not args.store_nets_file else args.store_nets_file\n store_topo_file = None if not args.store_topo_file else args.store_topo_file\n\n return args.T1, int(args.T2), float(args.xT2), float(args.T3d), float(args.WANd), float(args.LANd),\\\n hosts_file, rtrs_file, store_nets_file, store_topo_file, seed\n\ndef createHostHomes(hosts_list, nets_by_name):\n # separate out hosts that have IP addresses\n withIP = []\n withoutIP = []\n \n for hd in hosts_list:\n if 'IP_Addr' in hd:\n withIP.append(hd)\n else:\n withoutIP.append(hd)\n \n # cluster the hosts that have IP addresses in the same /24\n cluster = defaultdict(list)\n\n for host_dict in withIP:\n IP_Addr = host_dict['IP_Addr']\n\n # strip off the /32 if present \n IP_Addr = IP_Addr.replace('/32','')\n\n octets = IP_Addr.split('.')\n cluster_name = '.'.join(octets[:3]) \n cluster[cluster_name].append(host_dict)\n\n # if any one of the hosts in a cluster declares a home then they all get the same\n # home and that home gets an IP_Addr. \n # N.B. this assumes that at most one of these has declared a home\n #\n for cn, clstr_list in cluster.items():\n home = None\n for host_dict in clstr_list: \n if 'Home' in host_dict: \n home = host_dict['Home']\n break\n\n if home is None:\n # create a home LAN for the host based on the host's name\n home = host_dict['Name']+'-LAN'\n home_LAN = {'Name': home, 'Level':'LAN'}\n nets_by_name[home] = home_LAN \n \n for host_dict in clstr_list:\n host_dict['Home'] = home\n\n# every LAN has a router to at most one WAN, named as a connection.\n# If that router was not declared in the input file we create one.\n# \n# If a WAN connects to a T3 it has a router. If that router was\n# not declared in the input file we create one\n#\n# If a T3 connects to a T2 and there is no declared router, we create one\n#\n# If two T2's connect and no router is declared for that connection, we make one\n#\n# If a T2 connects to a T1 we either find a router in the input file, or create one.\n#\ndef augmentRoutersList(nets_list, nets_by_name, rtrs_list):\n \n all_rtr_names = set() \n # separate nets by levels\n Level = {'LAN':[],'WAN':[],'T3':[],'T2':[],'T1':[]}\n for net_dict in nets_list:\n Level[ net_dict['Level']].append(net_dict)\n\n # for every connection with every LAN, see if there is already a router.\n # If not, create one\n #\n for lan in Level['LAN']:\n for wan_name in lan['Ext_conn']:\n rtr_name = 'rtr-'+lan['Name']+'-'+wan_name\n if rtr_name not in all_rtr_names:\n rtr = {'Name':rtr_name,'Intrfc':[]}\n rtr['Intrfc'].append({'Faces':lan['Name'], \n 'Bandwidth':default_intrfc_Bandwidth['LAN']})\n \n rtr['Intrfc'].append({'Faces':wan_name,\n 'Bandwidth':default_intrfc_Bandwidth['LAN']})\n \n all_rtr_names.add(rtr_name) \n rtrs_list.append(rtr)\n\n # for every connection with every WAN, see if there is already a router.\n # If not, create one\n #\n for wan in Level['WAN']:\n for t3_name in wan['Ext_conn']:\n rtr_name = 'rtr-'+wan['Name']+'-'+t3_name\n if rtr_name not in all_rtr_names:\n rtr = {'Name':rtr_name,'Intrfc':[]}\n rtr['Intrfc'].append({'Faces':wan['Name'], \n 'Bandwidth':default_intrfc_Bandwidth['WAN']})\n rtr['Intrfc'].append({'Faces':t3_name,\n 'Bandwidth':default_intrfc_Bandwidth['WAN']})\n\n all_rtr_names.add(rtr_name)\n rtrs_list.append( rtr )\n\n # for every connection with every T3, see if there is already a router.\n # If not, create one\n #\n for t3 in Level['T3']:\n for t2_name in t3['Ext_conn']:\n rtr_name = 'rtr-'+t3['Name']+'-'+t2_name\n if rtr_name not in all_rtr_names:\n rtr = {'Name':rtr_name,'Intrfc':[]}\n rtr['Intrfc'].append({'Faces':t3['Name'], \n 'Bandwidth':default_intrfc_Bandwidth['T3']})\n rtr['Intrfc'].append({'Faces':t2_name,\n 'Bandwidth':default_intrfc_Bandwidth['T3']})\n\n all_rtr_names.add(rtr_name)\n rtrs_list.append(rtr)\n\n # for every connection with every T3, see if there is already a router.\n # If not, create one\n #\n for t2 in Level['T2']:\n for conn_name in t3['Ext_conn']:\n if nets_by_name[conn_name]['Level'] == 'T2':\n rtr_name_1 = 'rtr-'+t2['Name']+'-'+conn_name \n rtr_name_2 = 'rtr-'+conn_name+'-'+t2['Name']\n rtr_name = rtr_name_1 if rtr_name_1 < rtr_name_2 else rtr_name_2\n if rtr_name not in all_rtr_names:\n rtr = {'Name':rtr_name,'Intrfc':[]}\n rtr['Intrfc'].append({'Faces':t2['Name'], \n 'Bandwidth':default_intrfc_Bandwidth['T2']})\n rtr['Intrfc'].append({'Faces':conn_name,\n 'Bandwidth':default_intrfc_Bandwidth['T2']})\n\n all_rtr_names.add(rtr_name)\n rtrs_list.append(rtr)\n\n elif nets_by_name[conn_name]['Level'] == 'T1':\n rtr_name = 'rtr-'+t2['Name']+'-'+conn_name\n if rtr_name not in all_rtr_names:\n rtr = {'Name':rtr_name,'Intrfc':[]}\n rtr['Intrfc'].append({'Faces':t2['Name'], \n 'Bandwidth':default_intrfc_Bandwidth['T2']})\n rtr['Intrfc'].append({'Faces':conn_name,\n 'Bandwidth':default_intrfc_Bandwidth['T2']})\n\n all_rtr_names.add(rtr_name)\n rtrs_list.append( rtr )\n\n\n# net_dict_list is a list of dictionaries, each of which\n# describes a network, with attributes\n# name (string)\n# Ext_conn (list of strings)\n# level (string)\n# Bandwidth (float)\n# latency (float)\n# number (int)\n#\n# from this create and return a dictionary with highest level attributes\n# 'Networks', 'Routers', 'Hosts'\n# Each returns a list of object dictionaries of the given type\n#\n# The hosts are carried in from expression in the hosts_file\n#\n# The list of routers is initialized by whatever is in rtrs_file (if not empty),\n# and then computed from interactions between networks and hosts\n#\n#\ndef buildTopology(net_dict_list, rtrs_file, hosts_file):\n rtn_dict = {'Networks':net_dict_list, 'Routers':[], 'Hosts':[]}\n\n if rtrs_file:\n with open(rtrs_file,'r') as rf:\n rtn_dict['Routers'] = json.load(rf)\n\n if hosts_file:\n with open(hosts_file,'r') as rf:\n rtn_dict['Hosts'] = json.load(rf)\n\n nets_by_name = {}\n for net_dict in net_dict_list:\n nets_by_name[net_dict['Name']] = net_dict\n\n createHostHomes(rtn_dict['Networks'], nets_by_name)\n\n # build up routers from network topology\n #\n augmentRoutersList(net_dict_list, nets_by_name, rtn_dict['Routers'])\n\n # number all the objects\n object_id = 0\n for net_dict in rtn_dict['Networks']:\n net_dict['Number'] = object_id\n object_id += 1\n\n for rtr_dict in rtn_dict['Routers']:\n rtr_dict['Number'] = object_id\n object_id += 1\n\n for host_dict in rtn_dict['Hosts']:\n host_dict['Number'] = object_id\n object_id += 1\n\n return rtn_dict\n \n\ndef main():\n global T1, T2, xT2, T3, WAN, LAN, seed, hosts_file, store_nets_file\n if len(sys.argv) < 2:\n print('arguments required')\n exit()\n\n cmdline = []\n if sys.argv[1] == '-is':\n with open(os.path.abspath(sys.argv[2]),'r', encoding='latin-1') as cf:\n for line in cf.readlines():\n line = line.strip()\n vect = line.split()\n for idx in range(0, len(vect)):\n cmdline.append( vect[idx].strip() )\n else:\n cmdline = sys.argv[1:]\n\n T1_exists, T2n, xT2, T3d, WANd, LANd, hosts_file, rtrs_file, store_nets_file, store_topo_file, seed = getArgs(cmdline)\n\n random.seed(seed)\n\n if T1:\n T1 = {'Name':'Backbone', 'Level':'T1'}\n\n T2_list = []\n for idx in range(0,int(T2n)):\n T2_list.append({'Name':'T2-'+str(idx),'Ext_conn':['Backbone'],'Level':'T2',\n 'Bandwidth':default_net_Bandwidth['T2'],\\\n 'Latency':default_net_latency['T2']})\n\n # from the average number of xT2 connections per T2 compute\n # what the probability of a given edge being present is. \n #\n # number of T2 networks\n L2 = len(T2_list)\n\n # average connections per T2 is probability a different T2\n # is a connection, times the number of different T2's\n #\n # xT2 = p*(L2-1)\n # so\n # p = xT2/(L2-1)\n #\n prxT2 = float(xT2)/(L2-1)\n\n for idx in range(0, L2):\n for jdx in range(idx+1, L2): \n p = random.random()\n if p < prxT2:\n T2_list[idx]['Ext_conn'].append( T2_list[jdx]['Name'] )\n T2_list[jdx]['Ext_conn'].append( T2_list[idx]['Name'] )\n \n\n # T3d is the ratio of number of T3 networks to T2 networks\n T3_list = []\n for idx in range(0, int(L2*T3d + 1)):\n T3_list.append({'Name':'T3-'+str(idx), 'Ext_conn':[], 'Level':'T3',\\\n 'Bandwidth':default_net_Bandwidth['T3'],\n 'Latency':default_net_latency['T3']})\n \n L3 = len(T3_list)\n for idx in range(0, L3):\n # choose a random T2 and its reflection as the T2 points of connection\n if L2 > 1:\n T2_base = random.randint(0,L2-1)\n offset = L2\n while (T2_base+offset)%L2 == T2_base:\n offset += 1\n\n T2_reflect = (T2_base+offset)%L2\n T3_list[idx]['Ext_conn'].append(T2_list[T2_base]['Name'])\n T3_list[idx]['Ext_conn'].append(T2_list[T2_reflect]['Name'])\n else:\n T3_list[idx]['Ext_conn'].append(T2_list[0]['Name'])\n\n # WANd is the ratio of number of WAN networks to T3 networks\n WAN_list = []\n for idx in range(0, int(L3*WANd + 1)):\n WAN_list.append({'Name':'WAN-'+str(idx), 'Ext_conn':[], 'Level':'WAN',\\\n 'Bandwidth':default_net_Bandwidth['WAN'],\n 'Latency':default_net_latency['WAN']})\n \n # A WAN typically connects to one T3, choose one modulo index identity\n LWAN = len(WAN_list)\n for idx in range(0, LWAN):\n T3id = idx%L3 \n WAN_list[idx]['Ext_conn'].append(T3_list[T3id]['Name'])\n\n # LANd is the ratio of number of LAN networks to WAN networks\n LAN_list = []\n for idx in range(0, int(LWAN*LANd + 1)):\n LAN_list.append({'Name':'LAN-'+str(idx), 'Ext_conn':[], 'Level':'LAN',\\\n 'Bandwidth':default_net_Bandwidth['LAN'],\n 'Latency':default_net_latency['LAN']})\n \n # A WAN typically connects to one T3, choose one modulo index identity\n LLAN = len(LAN_list)\n for idx in range(0, LLAN):\n WANid = idx%LWAN \n LAN_list[idx]['Ext_conn'].append(WAN_list[WANid]['Name'])\n\n nets_json = {}\n nets_json['Networks'] = []\n if T1:\n nets_json['Networks'].append(T1)\n nets_json['Networks'].extend(T2_list)\n nets_json['Networks'].extend(T3_list)\n nets_json['Networks'].extend(WAN_list)\n nets_json['Networks'].extend(LAN_list)\n\n if store_nets_file is not None:\n with open(store_nets_file,'w') as wf:\n json.dump(nets_json, wf, sort_keys=True, indent=4)\n\n topo_dict = buildTopology(nets_json['Networks'], rtrs_file, hosts_file)\n\n if store_topo_file is not None:\n with open(store_topo_file,'w') as wf:\n json.dump(topo_dict, wf, sort_keys=True, indent=4)\n\nif __name__ == '__main__':\n main()\n \n\n\n","repo_name":"isqrsqr/simulator","sub_path":"apps/read_topo/build_net.py","file_name":"build_net.py","file_ext":"py","file_size_in_byte":14046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"499511149","text":"# 234. 回文链表\n#\n# 请判断一个链表是否为回文链表。\n#\n# 示例 1:\n#\n# 输入: 1->2\n# 输出: false\n#\n# 示例 2:\n#\n# 输入: 1->2->2->1\n# 输出: true\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n\n def isPalindrome(self, head: ListNode) -> bool:\n\n if not head:\n return True\n mid = self.find_mid(head)\n if mid==head:\n if head.next==None:\n return True\n else:\n return head.val==head.next.val\n\n sec = mid.next\n mid.next=None\n\n sec = self.revese(sec)\n\n i,j=head,sec\n while i and j:\n if i.val!=j.val:\n return False\n i=i.next\n j=j.next\n\n return True\n\n\n\n def find_mid(self,head):\n f,s = head,head\n\n while f.next and f.next.next:\n s=s.next\n f=f.next.next\n\n return s\n\n def revese(self,head):\n\n pre,cur = None,head\n\n while cur:\n tmp=cur.next\n cur.next=pre\n pre = cur\n cur=tmp\n return pre\n\n\n\n","repo_name":"goodluck4s/leetcode","sub_path":"src/hot100_234.py","file_name":"hot100_234.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"73363401245","text":"import tensorflow as tf\nimport tensorflow.keras.layers as layers\nfrom tensorflow.keras import Model\n\nclass Encoder(Model):\n def __init__(self, voc_size, embeding_dim, enc_units, batch_size):\n super(Encoder, self).__init__()\n self.batch_size = batch_size\n self.enc_units = enc_units\n self.embedding = layers.Embedding(voc_size,embeding_dim)\n self.lstm = layers.LSTM(self.enc_units,\n return_sequences=True,\n return_state=True)\n def call(self, x, hidden):\n x = self.embedding(x)\n output, state,_ = self.lstm(x, hidden)\n return output, state\n def initialize_hidden_state(self):\n tf.ones((self.batch_size, self.enc_units))\n\nclass Attention(tf.keras.layers.Layer):\n def __init__(self, unit):\n super(Attention).__init__()\n self.w1 = layers.Dense(unit)\n self.w2 = layers.Dense(unit)\n self.v = layers.Dense(1)\n def call(self, query, value):\n # change the hidden layer shape from (batch_size,unit_size)\n # as (batch_size,1,unit_size)\n hidden_with_axis = tf.expand_dims(value, 1)\n # before self.V the shape of tensor is (batch_size,length_sentence, self.unit)\n # after this thee shape change as (b,length_sentence,1)\n scores = self.v(tf.nn.tanh(self.w1(query)+self.w2(hidden_with_axis)))\n # use the softmax transfer the scores as probability\n # according we have the (batch_size, length_sentence,unit_size) input which\n # need to be assigned a weight within each input\n scores_weight = tf.nn.softmax(scores, axis=1)\n context_vector = scores_weight*value\n context_vector = tf.reduce_sum(context_vector, axis=1)\n\n return context_vector, scores_weight\nclass Decoder(Model):\n def __init__(self, voc_size, embed_dim, de_unit, batch_size):\n super(Decoder, self).__init__()\n self.de_unit = de_unit\n self.batch_size = batch_size\n self.embedding = layers.Embedding(voc_size, embed_dim)\n self.lstm = layers.LSTM(de_unit,\n return_sequences=True,\n return_state=True)\n self.att = Attention(self.de_unit)\n self.fc = layers.Dense(voc_size)\n def call(self, x,hidden, enc_output):\n context_output, weights_map = self.att(hidden,enc_output)\n\n # change the input as embedding dim\n x = self.embedding(x)\n # after above operation the shape of x becomes (batch_size,1,embedding_dim)\n # concat the output of attention layer and the tensor shape becomes the (batch_size, 1, embedding_dim + uint_of_encoder)\n x = tf.concat([tf.expand_dims(context_output,1),x], axis=-1)\n output,state,_ = self.lstm(x)\n # reshape the output as (batch_size*1, embedding_dim)\n output = tf.reshape(output,(-1,output.shape[2]))\n x = self.fc(output)\n return x, state\n\n\n\n\n","repo_name":"hanguangmicrice/NLP","sub_path":"seq2seq.py","file_name":"seq2seq.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"43034197216","text":"\"\"\"\nCreate a dataset of plausible queries using autosuggest.\n\"\"\"\nfrom random import Random\n\nfrom pandas import DataFrame\nfrom requests import HTTPError\n\nfrom rankeval.paths import QUERIES_DATASET_PATH\nfrom rankeval.dataset.search_api import retrieve_suggestions\n\nSEED_TERMS = {'ebay'}\n\nNUM_QUERIES = 20000\n\nrandom = Random(2)\n\n\ndef full_term(term: str) -> str:\n return term\n\n\ndef first_two_characters(term: str) -> str:\n return term[:2]\n\n\ndef create_dataset() -> list[dict[str, str]]:\n dataset = []\n done_queries = set()\n terms = set(SEED_TERMS)\n\n while len(done_queries) < NUM_QUERIES:\n term = random.choice(list(terms))\n query_type = random.choice([full_term, first_two_characters])\n query = query_type(term)\n\n if query in done_queries:\n continue\n\n try:\n suggestions = retrieve_suggestions(query)\n except HTTPError as e:\n print(\"Exception getting results\", e)\n break\n dataset += [{'query': query, 'suggestion': suggestion} for suggestion in suggestions]\n done_queries.add(query)\n for suggestion in suggestions:\n terms |= set(suggestion.split())\n print(f'Query: {query}, suggestions: {suggestions}')\n\n return dataset\n\n\ndef save_dataset(dataset: list[dict[str, str]]):\n DataFrame(dataset).to_csv(QUERIES_DATASET_PATH)\n\n\ndef run():\n dataset = create_dataset()\n save_dataset(dataset)\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"mwmbl/rankeval","sub_path":"rankeval/dataset/queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"9898231210","text":"import logging.config\nimport os\n\nimport structlog\n\nfrom node_launcher.constants import NODE_LAUNCHER_DATA_PATH, OPERATING_SYSTEM\n\ntimestamper = structlog.processors.TimeStamper(fmt='%Y-%m-%d %H:%M:%S')\npre_chain = [\n # Add the log level and a timestamp to the event_dict if the log entry\n # is not from structlog.\n structlog.stdlib.add_log_level,\n timestamper,\n]\n\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'plain': {\n '()': structlog.stdlib.ProcessorFormatter,\n 'processor': structlog.dev.ConsoleRenderer(colors=False),\n 'foreign_pre_chain': pre_chain,\n },\n 'colored': {\n '()': structlog.stdlib.ProcessorFormatter,\n 'processor': structlog.dev.ConsoleRenderer(colors=True),\n 'foreign_pre_chain': pre_chain,\n },\n },\n 'handlers': {\n 'default': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'colored',\n },\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.WatchedFileHandler',\n 'filename': os.path.join(NODE_LAUNCHER_DATA_PATH[OPERATING_SYSTEM],\n 'debug.log'),\n 'formatter': 'plain',\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['default', 'file'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n }\n})\n\n\ndef dropper(logger, method_name, event_dict):\n for key in event_dict[0][0].keys():\n if 'rpcpass' in key:\n event_dict[0][0][key] = 'masked_password'\n return event_dict\n\n\nstructlog.configure(\n processors=[\n structlog.stdlib.add_log_level,\n structlog.stdlib.PositionalArgumentsFormatter(),\n timestamper,\n structlog.processors.StackInfoRenderer(),\n structlog.processors.format_exc_info,\n structlog.stdlib.ProcessorFormatter.wrap_for_formatter,\n dropper\n ],\n context_class=dict,\n logger_factory=structlog.stdlib.LoggerFactory(),\n wrapper_class=structlog.stdlib.BoundLogger,\n cache_logger_on_first_use=True,\n)\n\nlog = structlog.get_logger()\n","repo_name":"lightning-power-users/node-launcher","sub_path":"node_launcher/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":356,"dataset":"github-code","pt":"86"} +{"seq_id":"7397074593","text":"import csv\nimport random\n\nclass Tickers():\n def __init__(self):\n f1 = open(\"nasdaq_screener.csv\", newline='')\n\n csvreader = csv.reader(f1, delimiter=',') \n\n mylist = []\n counter = 0\n for row in csvreader:\n if counter == 0: # skip over the informative 1st row\n counter += 1\n continue\n mylist.append(row[0])\n counter += 1\n self.ticks = mylist\n\n def getRandomTick(self):\n return random.choice(self.ticks)\n\n def getXRandomTicks(self, num):\n tmplst = []\n for i in range(num):\n tmplst.append(random.choice(self.ticks))\n return tmplst\n\n def getNumTickers(self):\n return len(self.ticks)\n\n","repo_name":"christianniebling/stocks","sub_path":"tickers.py","file_name":"tickers.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"24520439059","text":"#Author guo\nclass Solution:\n def singleNumber(self, nums):\n #两个不相等的元素必定至少有一位不同\n #将数组元素异或得到结果是不存在重复的两个元素异或的结果\n #diff&=-diff 得到diff右侧不为0的位\n diff=0\n for num in nums:\n diff^=num\n diff&=-diff#得到最右边一位\n ret=[0]*2\n for num in nums:\n if num&diff==0:\n ret[0]^=num\n else:\n ret[1]^=num\n\n return ret","repo_name":"guojia60180/algorithm","sub_path":"算法题目/算法题目/位运算/LeetCode260数组中不重复的两个元素.py","file_name":"LeetCode260数组中不重复的两个元素.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"28440624424","text":"from setuptools import setup, find_packages\nfrom pathlib import Path\n\nNAME = 'oxbuster'\nDESCRIPTION = 'A Handy Script for Finding Website Directories using Wordlists'\nTAG = ['dir', 'buster', 'wordlist']\nREQUIREMENT = ['oxansi', 'oxflags']\n\nVERSION = '0.0.3'\nLONG_DESCRIPTION = (Path(__file__).parent / \"README.md\").read_text()\n\nsetup(\n name=NAME,\n version=VERSION,\n author=\"0x68616469\",\n description=DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n\t\tlong_description=LONG_DESCRIPTION, \n\t\tpackages=find_packages(),\n install_requires=REQUIREMENT,\n keywords=TAG,\n entry_points = {'console_scripts': ['oxbuster = oxbuster:main']},\n classifiers=[\n\t\t\"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n ]\n)","repo_name":"0x68616469/oxbuster","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"41959649935","text":"from __future__ import annotations\nfrom typing import Any\nimport numpy as np\n\ndef square(params, func, r, z):\n \"\"\"\n calculate ||z - func(x, y, *params)||^2\n where x and y are determine by z.shape\n \"\"\"\n z_guess = func(r, *params)\n return np.mean((z - z_guess)**2)\n\ndef masked_square(params, func, r, z, mask):\n z_guess = func(r, *params)\n return np.mean((z[mask] - z_guess[mask])**2)\n\ndef diagonal_gaussian(r, *params):\n a, b = params[-2:]\n ndim = len(params[:-2])//2\n mu = params[:ndim]\n sg = params[ndim:-2]\n z_value = np.array([(x0 - mu0)/sg0 for x0, mu0, sg0 in zip(r, mu, sg)])\n return a * np.exp(-np.sum(z_value**2, axis=0) / 2) + b\n \nclass Gaussian:\n def mu_inrange(self, low, high):\n return np.logical_and(low<=self.mu, self.mu<=high).all()\n \n def sg_inrange(self, low, high):\n sg_ = np.abs(self.sg)\n return np.logical_and(low<=sg_, sg_<=high).all()\n\nclass DiagonalGaussian(Gaussian):\n def __init__(self, params=None):\n if params is None:\n self.mu = self.sg = self.a = self.b = None\n else:\n self.mu, self.sg, self.a, self.b = params\n \n @property\n def mu(self):\n return self._mu\n \n @mu.setter\n def mu(self, value):\n if value is None:\n self._mu = None\n else:\n self._mu = np.asarray(value)\n \n @property\n def sg(self):\n return self._sg\n \n @sg.setter\n def sg(self, value):\n if value is None:\n self._sg = None\n else:\n self._sg = np.asarray(value)\n \n @property \n def params(self):\n \"\"\"Get flattened parameters.\"\"\"\n return tuple(self.mu) + tuple(self.sg) + (self.a, self.b)\n \n @params.setter\n def params(self, params:tuple):\n self.a, self.b = params[-2:]\n self.mu = params[:self.ndim]\n self.sg = params[self.ndim:-2]\n \n def asdict(self) -> dict[str, Any]:\n return {\n \"mu\": self.mu.tolist(),\n \"sigma\": self.sg.tolist(),\n \"A\": self.a,\n \"B\": self.b,\n }\n \n @property\n def ndim(self):\n return self.mu.size\n \n def fit(self, data: np.ndarray, method: str = \"Powell\", mask: np.ndarray | None = None):\n from scipy.optimize import minimize\n if self.mu is None or self.sg is None or self.a is None or self.b is None:\n if mask is not None:\n data = data.copy()\n data[mask] = -np.inf\n self._estimate_params(data)\n else:\n self._estimate_params(data)\n r = np.indices(data.shape)\n \n if mask is None:\n result = minimize(\n square,\n self.params,\n args=(diagonal_gaussian, r, data),\n method=method\n )\n else:\n if mask.shape != data.shape:\n raise ValueError(\n f\"Shape mismatch between data {data.shape!r} and mask {mask.shape!r}.\"\n )\n result = minimize(\n masked_square,\n self.params,\n args=(diagonal_gaussian, r, data, ~mask),\n method=method\n )\n self.params = result.x\n \n return result\n \n def rescale(self, scale: float):\n self.mu *= scale\n self.sg *= scale\n return None\n \n def shift(self, dxdy):\n self.mu += np.array(dxdy)\n return None\n \n def generate(self, shape:tuple) -> np.ndarray:\n r = np.indices(shape, dtype=np.float32)\n return diagonal_gaussian(r, *self.params)\n \n def _estimate_params(self, data:np.ndarray):\n pass\n\nclass GaussianParticle(DiagonalGaussian):\n def __init__(self, params=None, initial_sg=1):\n super().__init__(params)\n self.initial_sg = initial_sg\n \n def _estimate_params(self, data:np.ndarray):\n # n-dim argmax\n self.mu = np.array(np.unravel_index(np.argmax(data), data.shape), dtype=\"float32\")\n self.sg = np.full(data.ndim, self.initial_sg, dtype=\"float32\")\n self.b, p95 = np.percentile(data, [5, 95])\n self.a = p95 - self.b\n return None\n\nclass GaussianBackground(DiagonalGaussian):\n def _estimate_params(self, data: np.ndarray):\n # n-dim argmax\n self.mu = np.array(np.unravel_index(np.argmax(data), data.shape), dtype=\"float32\")\n self.sg = np.array(data.shape, dtype=\"float32\")\n self.b, p95 = np.percentile(data, [5, 95])\n self.a = p95 - self.b\n return None","repo_name":"hanjinliu/impy","sub_path":"impy/utils/gauss.py","file_name":"gauss.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"86"} +{"seq_id":"70024726686","text":"import subprocess\nfrom pathlib import Path\n\nimport pytest\n\nREPO_ROOT = Path(__file__).parent.parent.resolve()\n\n\ndef run_docker_test(image, script):\n cmdline = [\"docker\", \"run\", \"--rm\", \"-i\", \"--mount\", f\"type=bind,source={REPO_ROOT},target=/source\", image, \"bash\"]\n print(cmdline)\n with subprocess.Popen(cmdline, stdin=subprocess.PIPE, encoding=\"utf-8\") as cmd:\n cmd.communicate(\"cd /source; set -xeuo pipefail; \" + script)\n r = cmd.wait()\n print(r)\n assert r == 0\n\n\n# @pytest.mark.skip(\"Slow and tested as a side effect of other tests.\")\n@pytest.mark.parametrize(\"image\", [\"ubuntu:20.04\", \"ubuntu:18.04\"])\ndef test_setup_ubuntu(image):\n run_docker_test(\n image,\n \"\"\"\n apt-get update\n scripts/setup_ubuntu.sh\n \"\"\",\n )\n\n\n@pytest.mark.parametrize(\"image\", [\"condaforge/mambaforge\"])\ndef test_list(image):\n run_docker_test(\n image,\n \"\"\"\n mamba install --quiet --yes pyyaml packaging\n scripts/requirements list -l conda/dev -p conda\n \"\"\",\n )\n\n\n@pytest.mark.parametrize(\"image\", [\"condaforge/mambaforge\", \"condaforge/miniforge3\"])\ndef test_requirements_install_conda(image):\n run_docker_test(\n image,\n \"\"\"\n conda install --quiet --yes pyyaml packaging\n scripts/requirements install --arg=--yes --arg=--quiet -l conda/dev -p conda\n \"\"\",\n )\n","repo_name":"KatanaGraph/katana","sub_path":"test/test_requirements.py","file_name":"test_requirements.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"86"} +{"seq_id":"7948215238","text":"score = int(0) #setting up the score\nc1 = False #check whether user has answered question properly loop\na1 = int(0) #users answer to question\nq1 = str(\"\"\"What is the biggest channel on YouTube (based on subscribers)?\n1) T-Series\n2) Youtube Gaming\n3) PewDiePie\n4) JustDestiny\"\"\") #first question\n\nprint(q1)\n#while loop for the first question\nwhile c1 == False: #while the first question isn't answered...\n try: #try...\n a1 = int(input(\"Your answer >>> \"))\n if q1 == 3: #if the user says the answer is 3 then...\n score = score + 1 #add 1 to the score\n print(\"Thanks for your answer\") #thank the user\n c1 = True #set the boolean to true to get out of loop\n elif 0\", views.edit_page, name = \"edit\"),\n path(\"wiki/\", views.entry_check, name = \"entry_check\")\n]\n","repo_name":"Bilbo312/CS50W_Project1","sub_path":"encyclopedia/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"23122064079","text":"import json\nimport requests\nfrom telegram import Update\nfrom telegram.ext import CallbackContext\nfrom redis_connection import redis_connection as redconn\nfrom bot.inline_keyboards import products_keyboard\nfrom bot.keyboards import main_markup, categories_markup\nfrom bot.make_image import get_gr_photo\nfrom api import bot_login, get_my_cart\n\n\ndef start(update: Update, context: CallbackContext):\n try:\n user_data = json.loads(redconn.get(f'{update.message.from_user.id}'))\n print('Mana data:\\n',user_data)\n except:\n user_data=None\n if user_data and len(user_data)>2:\n token = user_data.get('token', None)\n if token:\n context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=\"Salom, bu bot ishga tushdi!\",\n reply_markup=categories_markup\n )\n else:\n redconn.mset({f'{update.message.from_user.id}': json.dumps({})})\n context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=\"Salom, bu bot ishga tu shdi!\\n\\nOnline do'konimizning imkoniyatlaridan to'liq foydalanishingiz uchun tizimga kirishingiz zarur\\n\\n❗Telefon kiriting:\",\n # reply_markup={remove_keyboard:true }\n )\n\n\ndef message(update: Update, context: CallbackContext):\n message = update.message.text\n print('user id:', update.message.from_user.id)\n try:\n user_json = redconn.get(f'{update.message.from_user.id}')\n if user_json:\n data = {\n \"phone_number\": message,\n }\n user_data=json.loads(user_json)\n token = user_data.get('token', None)\n phone_number = user_data.get('phone_number', None)\n if token:\n if message == 'Kategoriyalar':\n context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=f\"Marhamat kerakli kategoriyani tanlang\",\n reply_markup=main_markup\n )\n elif message == 'Savatcha':\n res=get_my_cart()\n print(res)\n context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=f\"1.{res[0]}\\n 2.{res[1]}\\n 3.{res[2]}\\n 4.{res[3]} \",\n reply_markup=categories_markup\n )\n elif message == 'Orqaga':\n context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=f\"Bosh sahifa\",\n reply_markup=categories_markup\n )\n else:\n pkeyboard,keys = products_keyboard(message,page=1)\n context.bot.send_photo(\n chat_id=update.effective_chat.id,\n photo=open(get_gr_photo(keys),'rb'),\n caption='Kerakli raqamni tanlang !',\n reply_markup=pkeyboard\n )\n elif phone_number:\n data['password']=message\n # Api request token\n response=bot_login(data)\n res_token = response.json()\n # End api\n if response.status_code==200:\n redconn.mset({f'{update.message.from_user.id}': json.dumps({\n 'phone_number':phone_number,\n 'password':message,\n 'token': res_token['token'],\n })})\n context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=f\"✅🥳 Tabriklayman tizimga kirdiz!\\n/start qayta bosing!!!\",\n )\n else:\n redconn.mset({f'{update.message.from_user.id}': json.dumps({\n 'phone_number': phone_number,})})\n context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=f\"🚫 Telefon raqam yoki parol xato! 🚫\\n Qayta urinib kuring! /start\",\n )\n redconn.delete(f'{update.message.from_user.id}')\n else:\n redconn.mset({f'{update.message.from_user.id}': json.dumps({\n 'phone_number': message,\n })})\n context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=f\"Password\"\n )\n else:\n redconn.mset({f'{update.message.from_user.id}': json.dumps({})})\n context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=f\"Online do'konimizning imkoniyatlaridan to'liq foydalanishingiz uchun tizimga kirishingiz zarur /start\"\n )\n except Exception as e:\n print(\"error:\", e)\n","repo_name":"xaydarovmaqsud/eshop","sub_path":"bot/callback_methods.py","file_name":"callback_methods.py","file_ext":"py","file_size_in_byte":4960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"34252116619","text":"import requests, json\n\n# Response 상태 코드\n\ns = requests.Session()\n\n# https://jsonplaceholder.typicode.com\n\nr = s.get('http://httpbin.org/stream/20', stream=True)\n# print(r.text)\n# print(r.encoding)\n\nif r.encoding is None:\n r.encoding = 'utf-8'\n\nfor line in r.iter_lines(decode_unicode=True):\n # print(line)\n b = json.loads(line) # dict\n # print(b)\n\n for e in b.keys():\n # print(b[e])\n print('key:', e, 'value:', b[e])\n\n\n","repo_name":"weon-seongjae/crawling","sub_path":"section3/requests_03.py","file_name":"requests_03.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"36469445964","text":"import os\nimport re\nimport numpy as np\nfrom pymatgen.core.periodic_table import Element\n\n# file format\nf_format = [\"mdl\", \"lmp\", \"POSCAR\"]\n\n# float type or not\ndef is_float(s):\n try:\n float(s)\n except ValueError:\n return False\n else:\n return True\n\n# sort atom type\ndef sort_atom_type(atom_type):\n atom_types = []\n atom = sorted(set(atom_type), key = atom_type.index)\n for i in atom:\n for j in atom_type:\n if i == j:\n atom_types.append(j)\n return atom_types\n\n# sort coordinate by atom type\ndef sort_coordinate(coordinate, atom_type):\n atom = sorted(set(atom_type), key = atom_type.index)\n coor = []\n for i in atom:\n for j in range(len(atom_type)):\n if i == atom_type[j]:\n coor.append(coordinate[j]) \n return coor\n\n# sort ion (\"ion\" represents the value in the third column of lmp file)\ndef sort_ion(ion, atom_type):\n atom = sorted(set(atom_type), key = atom_type.index)\n atom_ion = []\n for i in atom:\n for j in range(len(atom_type)):\n if i == atom_type[j]:\n atom_ion.append(ion[j]) \n return atom_ion\n\n# determine lattice paramter\n# adopt the samller lattice paramter\ndef choice_lattice(lattice1, lattice2, direction, stack_gap):\n lattice = [[\"0\", \"0\", \"0\"], \n [\"0\", \"0\", \"0\"], \n [\"0\", \"0\", \"0\"]]\n if direction == \"x\":\n lattice[0][0] = str(float(lattice1[0][0]) + float(lattice2[0][0]) + stack_gap)\n lattice[1][1] = str(max(float(lattice1[1][1]), float(lattice2[1][1])))\n lattice[2][2] = str(max(float(lattice1[2][2]), float(lattice2[2][2])))\n elif direction == \"y\":\n lattice[1][1] = str(float(lattice1[1][1]) + float(lattice2[1][1]) + stack_gap) \n lattice[0][0] = str(max(float(lattice1[0][0]), float(lattice2[0][0])))\n lattice[2][2] = str(max(float(lattice1[2][2]), float(lattice2[2][2])))\n elif direction == \"z\":\n lattice[2][2] = str(float(lattice1[2][2]) + float(lattice2[2][2]) + stack_gap) \n lattice[0][0] = str(max(float(lattice1[0][0]), float(lattice2[0][0])))\n lattice[1][1] = str(max(float(lattice1[1][1]), float(lattice2[1][1])))\n return lattice\n\n# change coordinate (add lattice paramter to coordinate of second structure)\ndef coordinate_change(coordinate2, lattice1, direction, stack_gap):\n for i in range(len(coordinate2)):\n if direction == \"x\":\n coordinate2[i][0] = str(float(coordinate2[i][0]) + float(lattice1[0][0]) + stack_gap)\n elif direction == \"y\":\n coordinate2[i][1] = str(float(coordinate2[i][1]) + float(lattice1[1][1]) + stack_gap)\n elif direction == \"z\":\n coordinate2[i][2] = str(float(coordinate2[i][2]) + float(lattice1[2][2]) + stack_gap)\n return coordinate2\n\n# remove atoms outside the lattice\ndef remove_atom(lattice, coordinate, direction):\n re_atom = []\n for i in range(len(coordinate)):\n if direction == \"x\":\n if float(coordinate[i][1]) > float(lattice[1][1]) or float(coordinate[i][2]) > float(lattice[2][2]):\n re_atom.append(i)\n if direction == \"y\":\n if float(coordinate[i][0]) > float(lattice[0][0]) or float(coordinate[i][2]) > float(lattice[2][2]):\n re_atom.append(i)\n if direction == \"z\":\n if float(coordinate[i][0]) > float(lattice[0][0]) or float(coordinate[i][1]) > float(lattice[1][1]):\n re_atom.append(i) \n return re_atom\n\n# remove atoms outside from the lattice\ndef remove_coordinate(coordinate, re_atom):\n coor = []\n for i in range(len(coordinate)):\n if i not in re_atom:\n coor.append(coordinate[i])\n return coor\n\n# remove atoms outside from the lattice\ndef remove_atom_type(atom_type, re_atom):\n atom = []\n for i in range(len(atom_type)):\n if i not in re_atom:\n atom.append(atom_type[i])\n return atom\n\n# remove atoms outside from the lattice (use when you select lmp file)\ndef remove_ion(ion, re_atom):\n atom_ion = []\n for i in range(len(ion)):\n if i not in re_atom:\n atom_ion.append(ion[i])\n return atom_ion \n\n# count the number of atom\ndef count_atom(atom_type):\n atom_num = []\n atom_el = sorted(set(atom_type), key = atom_type.index)\n for i in atom_el:\n atom_num.append(atom_type.count(i))\n return atom_num\n\n# determine coordinate type\n# The range is set from -1.1 to 1.1 because there were cases where 1 or -1 was exceeded despite relative coordinates\ndef coor_type(coordinate):\n count = 0\n for i in range(len(coordinate)):\n for j in range(3):\n if -1.1 <= float(coordinate[i][j]) <= 1.1:\n count += 1\n if count == len(coordinate) * 3:\n return \"Direct\"\n else:\n return \"Cartesian\"\n\n# determine atom type from atom mass (when you select lmp file)\ndef atom_type_check(weight, atom_list):\n atom_mass = []\n for i in weight:\n for j in atom_list:\n atom_data = Element(j)\n m = str(atom_data.atomic_mass)\n m, _ = m.split()\n if m == i:\n atom_mass.append(j)\n break\n return atom_mass\n\n# convert coordinate (flag = 0 : from Direct to Cartesian, flag = 1 : from Cartesian to Direct)\ndef convert_coor(lattice, coordinate, flag):\n coor_coonverted = []\n if flag == 0:\n temp_coor = np.zeros(shape=(3, 1))\n temp_lattice = np.array([[float(lattice[0][0]), float(lattice[0][1]), float(lattice[0][2])], \n [float(lattice[1][0]), float(lattice[1][1]), float(lattice[1][2])], \n [float(lattice[2][0]), float(lattice[2][1]), float(lattice[2][2])]])\n for i in range(len(coordinate)):\n temp_coor[0, 0] = float(coordinate[i][0])\n temp_coor[1, 0] = float(coordinate[i][1])\n temp_coor[2, 0] = float(coordinate[i][2])\n cart = np.dot(temp_lattice.T, temp_coor)\n coor = [str(cart[0, 0]), str(cart[1, 0]), str(cart[2, 0])]\n coor_coonverted.append(coor)\n return coor_coonverted\n elif flag == 1:\n temp_coor = np.zeros(shape=(3, 1))\n temp_lattice = np.array([[float(lattice[0][0]), float(lattice[0][1]), float(lattice[0][2])], \n [float(lattice[1][0]), float(lattice[1][1]), float(lattice[1][2])], \n [float(lattice[2][0]), float(lattice[2][1]), float(lattice[2][2])]])\n for i in range(len(coordinate)):\n temp_coor[0, 0] = float(coordinate[i][0])\n temp_coor[1, 0] = float(coordinate[i][1])\n temp_coor[2, 0] = float(coordinate[i][2])\n cart = np.dot(np.linalg.inv(temp_lattice.T), temp_coor)\n coor = [str(cart[0, 0]), str(cart[1, 0]), str(cart[2, 0])]\n coor_coonverted.append(coor) \n return coor_coonverted\n\n# write mdl file\ndef convert_mdl(f_name, format_after, comment, lattice, atom_type, atom_num, coordinate):\n flag = 0\n f = open(f_name + format_after, \"w\")\n \n # fixed output\n f.write(\"Gear(5) RAND=10 ESWGE T=300.0 STEP=(0,1000100)\\n\")\n f.write(\"BLOCK FIXANGLE W=INF Q=INF LOGSTEP=10 PRINTVELOCITY dt=0.1fs scratch(100)\\n\")\n f.write(\"\\n\")\n for i in comment:\n if re.search(\"structure\", i, re.IGNORECASE):\n f.write(\"{}\".format(i))\n flag = 1\n if flag == 0:\n for i in atom_type:\n f.write(i + \" \")\n f.write(\"structure\\n\")\n f.write(\"\\n\")\n \n # write lattice parameter\n for i in range(3):\n for j in range(3):\n f.write(lattice[i][j] + \" \")\n f.write(\"\\n\")\n f.write(\"\\n\")\n \n # If Cartesian relative coordinates, convert to relative coordinates\n if coor_type(coordinate) == \"Cartesian\":\n coordinate = convert_coor(lattice, coordinate, 1) \n \n # write atom type and coordinate\n tmp = 0\n for i in range(len(atom_type)):\n num = tmp\n for j in range(num, num + int(atom_num[i])):\n f.write(atom_type[i] + \" \")\n for k in range(3):\n f.write(coordinate[j][k] + \" \")\n f.write(\"\\n\")\n tmp += 1\n \n f.close()\n\n# write lmp file \ndef convert_lmp(f_name, format_after, comment, lattice, atom_type, atom_num, ion, coordinate):\n f = open(f_name + format_after, \"w\")\n \n # fixed output\n flag = 0\n for i in comment:\n if re.search(\"structure\", i, re.IGNORECASE):\n f.write(\"{}\".format(i))\n flag = 1\n if flag == 0:\n for i in atom_type:\n f.write(i + \" \")\n f.write(\"structure\\n\")\n f.write(\"\\n\") \n \n # write number of atom\n num = 0\n for i in atom_num:\n num = num + int(i)\n f.write(\" {} atoms\\n\".format(num))\n \n # write number of atom type\n f.write(\" {} atom types\\n\".format(len(atom_type)))\n \n # write lattice parameter\n f.write(\" 0.00 {} xlo xhi\\n\".format(lattice[0][0]))\n f.write(\" 0.00 {} ylo yhi\\n\".format(lattice[1][1]))\n f.write(\" 0.00 {} zlo zhi\\n\".format(lattice[2][2]))\n f.write(\"\\n\")\n \n # write atom mass\n f.write(\" Masses\\n\")\n f.write(\"\\n\")\n for i in range(len(atom_type)):\n atom_data = Element(atom_type[i])\n atom_mass = str(atom_data.atomic_mass)\n atom, _ = atom_mass.split()\n f.write(\"{} {}\\n\".format(i + 1, atom))\n f.write(\"\\n\")\n \n # If relative coordinates, convert to Cartesian coordinates\n if coor_type(coordinate) == \"Direct\":\n coordinate = convert_coor(lattice, coordinate, 0)\n \n # write coordinate\n f.write(\" Atoms\\n\")\n f.write(\"\\n\")\n tmp = 0\n for i in range(len(atom_type)):\n num = tmp\n for j in range(num, num + int(atom_num[i])):\n tmp += 1 \n f.write(\"{} {} {} \".format(tmp, i + 1, ion[j]))\n for k in range(3):\n f.write(coordinate[j][k] + \" \")\n f.write(\"\\n\") \n \n f.close()\n\n# write POSCAR file \ndef convert_POSCAR(f_name, format_after, comment, lattice, atom_type, atom_num, coordinate):\n f = open(format_after + \"_\" + f_name, \"w\")\n \n # fixed output\n flag = 0\n for i in comment:\n if re.search(\"structure\", i, re.IGNORECASE):\n f.write(\"{}\".format(i))\n flag = 1\n if flag == 0:\n for i in atom_type:\n f.write(i + \" \")\n f.write(\"structure\\n\")\n \n # write lattice parameter\n f.write(\"1.0\\n\")\n for i in range(3):\n for j in range(3):\n f.write(lattice[i][j] + \" \")\n f.write(\"\\n\")\n \n # write atom type and number of atom\n for i in atom_type:\n f.write(i + \" \")\n f.write(\"\\n\")\n for i in atom_num:\n f.write(str(i) + \" \")\n f.write(\"\\n\")\n \n # write coordinate type\n f.write(coor_type(coordinate) + \"\\n\")\n \n # write coordinate\n for i in range(len(coordinate)):\n for j in range(3):\n f.write(coordinate[i][j] + \" \")\n f.write(\"\\n\")\n \n f.close()\n\n# list of chemical symbol\natom_list = [\"H\", \"He\", \"Li\", \"Be\", \"B\", \"C\", \"N\", \"O\", \"F\", \"Ne\", \"Na\", \"Mg\", \"Al\", \"Si\", \"P\", \"S\", \"Cl\", \"Ar\", \n \"K\", \"Ca\", \"Sc\", \"Ti\", \"V\", \"Cr\", \"Mn\", \"Fe\", \"Co\", \"Ni\", \"Cu\", \"Zn\", \"Ga\", \"Ge\", \"As\", \"Se\", \"Br\", \n \"Kr\", \"Rb\", \"Sr\", \"Y\", \"Zr\", \"Nb\", \"Mo\", \"Tc\", \"Ru\", \"Rh\", \"Pd\", \"Ag\", \"Cd\", \"In\", \"Sn\", \"Sb\", \"Te\", \n \"I\", \"Xe\", \"Cs\", \"Ba\", \"La\", \"Ce\", \"Pr\", \"Nd\", \"Pm\", \"Sm\", \"Eu\", \"Gd\", \"Tb\", \"Dy\", \"Ho\", \"Er\", \"Tm\", \n \"Yb\", \"Lu\", \"Hf\", \"Ta\", \"W\", \"Re\", \"Os\", \"Ir\", \"Pt\", \"Au\", \"Hg\", \"Tl\", \"Pb\", \"Bi\", \"Po\", \"At\", \"Rn\", \n \"Fr\", \"Ra\", \"Ac\", \"Th\", \"Pa\", \"U\", \"Np\", \"Pu\", \"Am\", \"Cm\", \"Bk\", \"Cf\", \"Es\", \"Fm\", \"Md\", \"No\", \"Lr\", \n \"Rf\", \"Db\", \"Sg\", \"Bh\", \"Hs\", \"Mt\", \"Ds\", \"Rg\", \"Cn\", \"Nh\", \"Fl\", \"Mc\", \"Lv\", \"Ts\", \"Og\"]\n\n# search file\npath = os.getcwd()\ndirlist = os.listdir(path)\nfile_list = []\nfor i in dirlist:\n flag = 0\n if os.path.isfile(i):\n for j in f_format:\n if re.search(j, i):\n flag = 1\n if flag == 1:\n file_list.append(i)\nif len(file_list) == 0:\n print(\"There is no file\")\n exit()\n\n# select 1st file\nfor i in range(len(file_list)):\n print(str(i) + \" : \" + file_list[i])\nwhile True:\n num = input(\"Please select 1st file : \")\n if num.isdecimal() and 0 <= int(num) < len(file_list):\n f_1st = file_list[int(num)]\n if \"POSCAR\" not in f_1st:\n f1_format = os.path.splitext(f_1st)[1]\n break\n else:\n f1_format = \"POSCAR\"\n break\n\n# select 2nd file\nwhile True:\n num = input(\"Please select 2nd file : \")\n if num.isdecimal() and 0 <= int(num) < len(file_list):\n f_2nd = file_list[int(num)]\n if \"POSCAR\" not in f_2nd:\n f2_format = os.path.splitext(f_2nd)[1]\n else:\n f2_format = \"POSCAR\"\n if f_2nd == f_1st:\n print(\"Can't select same file\")\n elif f1_format != f2_format:\n print(\"Can't select different format of file\")\n else:\n break\n\n# input direction to stack\nwhile True:\n direction = input(\"Please input direction to stack : \")\n if direction == \"x\" or direction == \"y\" or direction == \"z\":\n break\n\n# input the distance between 1st structure and 2nd structure\nwhile True:\n stack_gap = input(\"Please select distance among two structure : \")\n if is_float(stack_gap) and 0 <= float(stack_gap):\n stack_gap = float(stack_gap)\n break\n\n# input file name\nwhile True:\n err = 0\n f_name = input(\"Please input the name of the converted file: \")\n if f_name != \"\":\n for i in file_list:\n if f_name + \".\" + f1_format == i:\n print(\"{}{} already exists\".format(f_name, f1_format))\n err += 1\n if err == 0:\n break\n\n# read 1st and 2nd file\nf1 = open(f_1st, \"r\")\nline1 = f1.readlines()\nf1.close()\n\nf2 = open(f_2nd, \"r\")\nline2 = f2.readlines()\nf2.close()\n\n# data\ncomment1 = []\ncomment2 = []\nlattice1 = []\nlattice2 = []\natom_type1 = []\natom_type2 = []\ncoordinate1 = []\ncoordinate2 = []\n\ncomment = []\nlattice = []\natom_type = []\ncoordinate = []\n\n# get data\nif re.search(\"mdl\", f1_format):\n # get comment\n for i in range(5):\n comment1.append(line1[i])\n \n # get lattice parameter\n for i in range(5, 8):\n lattice1.append(line1[i].split())\n \n # get atom type and coordinate\n for i in range(9, len(line1)):\n atom, *coor = line1[i].split()\n atom_type1.append(atom)\n coordinate1.append(coor)\n \n # convert to cartesian coordinate\n if coor_type(coordinate1) == \"Direct\":\n coordinate1 = convert_coor(lattice1, coordinate1, 0)\n \n # get comment\n for i in range(5):\n comment2.append(line2[i])\n \n # get lattice parameer\n for i in range(5, 8):\n lattice2.append(line2[i].split())\n \n # get atom type and coordinate\n for i in range(9, len(line2)):\n atom, *coor = line2[i].split()\n atom_type2.append(atom)\n coordinate2.append(coor)\n \n # convert to cartesian coordinate\n if coor_type(coordinate2) == \"Direct\":\n coordinate2 = convert_coor(lattice2, coordinate2, 0)\n \n # determine lattice\n lattice = choice_lattice(lattice1, lattice2, direction, stack_gap)\n \n # change coordinate\n coordinate2 = coordinate_change(coordinate2, lattice1, direction, stack_gap)\n\n # combine data of two structure into one array\n atom_type = atom_type1 + atom_type2 \n coordinate = coordinate1 + coordinate2\n \n # sort coordinate because coordinate of different atoms is mixed\n coordinate = sort_coordinate(coordinate, atom_type)\n \n # sort atom type to suit sorted coordinate\n atom_type = sort_atom_type(atom_type) \n \n # remove atoms outside the lattice\n re_atom = remove_atom(lattice, coordinate, direction)\n coordinate = remove_coordinate(coordinate, re_atom)\n atom_type = remove_atom_type(atom_type, re_atom)\n \n # count the number of atom\n atom_num = count_atom(atom_type)\n \n # get atom type\n atom_type = sorted(set(atom_type), key = atom_type.index)\n \n convert_mdl(f_name, f1_format, comment, lattice, atom_type, atom_num, coordinate)\n \nelif re.search(\"lmp\", f1_format):\n ion1 = []\n ion2 = []\n \n # get comment\n comment1.append(line1[0])\n \n # get lattice parameter\n x = line1[4].split()\n y = line1[5].split()\n z = line1[6].split()\n lattice1 = [[str(float(x[1]) - float(x[0])), \"0\", \"0\"], [\"0\", str(float(y[1]) - float(y[0])), \"0\"], [\"0\", \"0\", str(float(z[1]) - float(z[0]))]]\n \n # get atom type, ion and coordinate\n type_num, *_= line1[3].split()\n for i in range(10 + 3 + int(type_num), len(line1)):\n _, atom, ion, *coor = line1[i].split()\n atom_type1.append(atom)\n coordinate1.append(coor)\n ion1.append(ion)\n \n # get comment\n comment2.append(line1[0])\n \n # get atom type, ion and coordinate\n x = line2[4].split()\n y = line2[5].split()\n z = line2[6].split()\n lattice2 = [[str(float(x[1]) - float(x[0])), \"0\", \"0\"], [\"0\", str(float(y[1]) - float(y[0])), \"0\"], [\"0\", \"0\", str(float(z[1]) - float(z[0]))]]\n type_num, *_= line2[3].split()\n for i in range(10 + 3 + int(type_num), len(line2)):\n _, atom, ion, *coor = line2[i].split()\n atom_type2.append(atom)\n coordinate2.append(coor)\n ion2.append(ion) \n \n # determine lattice\n lattice = choice_lattice(lattice1, lattice2, direction, stack_gap)\n \n # change coordinate\n coordinate2 = coordinate_change(coordinate2, lattice1, direction, stack_gap)\n\n # combine data of two structure into one array\n coordinate = coordinate1 + coordinate2\n atom_type = atom_type1 + atom_type2\n ion = ion1 + ion2\n \n # sort coordinate because coordinate of different atoms is mixed\n coordinate = sort_coordinate(coordinate, atom_type)\n \n # sort ion because ion of different atoms is mixed\n ion = sort_ion(ion, atom_type)\n \n # sort atom type to suit sorted coordinate\n atom_type = sort_atom_type(atom_type)\n \n # remove atoms outside the lattice\n re_atom = remove_atom(lattice, coordinate, direction)\n coordinate = remove_coordinate(coordinate, re_atom)\n atom_type = remove_atom_type(atom_type, re_atom)\n ion = remove_ion(ion, re_atom)\n \n # count the number of atom\n atom_num = count_atom(atom_type)\n \n # get atom type\n atom_type = sorted(set(atom_type), key = atom_type.index)\n\n convert_lmp(f_name, f1_format, comment, lattice, atom_type, atom_num, ion, coordinate)\n\nelif re.search(\"POSCAR\", f1_format):\n # get comment\n comment1.append(line1[0])\n \n # get scale of lattice\n scale = float(line1[1])\n \n # get lattice parameter\n for i in range(2, 5):\n lattice1.append(line1[i].split())\n for i in range(3):\n for j in range(3):\n lattice1[i][j] = str(float(lattice1[i][j]) * scale)\n \n # get atom type\n atom_type1 = line1[5].split()\n \n # get the number of atom\n atom_num1 = line1[6].split()\n \n # get coordinate\n line_num = 0\n for i in atom_num1:\n line_num = line_num + int(i)\n for i in range(8, 8 + line_num):\n coordinate1.append(line1[i].split()) \n \n # convert to cartesian coordinate\n if coor_type(coordinate1) == \"Direct\":\n coordinate1 = convert_coor(lattice1, coordinate1, 0)\n \n # get comment\n comment2.append(line2[0])\n \n # get scale of lattice\n scale = float(line2[1])\n \n # get lattice parameter\n for i in range(2, 5):\n lattice2.append(line2[i].split())\n for i in range(3):\n for j in range(3):\n lattice2[i][j] = str(float(lattice2[i][j]) * scale)\n \n # get atom type\n atom_type2 = line2[5].split()\n \n # get the number of atom\n atom_num2 = line2[6].split()\n \n # get coordinate\n line_num = 0\n for i in atom_num2:\n line_num = line_num + int(i)\n for i in range(8, 8 + line_num):\n coordinate2.append(line2[i].split()) \n \n # convert to cartesian coordinate\n if coor_type(coordinate2) == \"Direct\":\n coordinate2 = convert_coor(lattice2, coordinate2, 0) \n \n # determine lattice \n lattice = choice_lattice(lattice1, lattice2, direction, stack_gap)\n \n # change coordinate\n coordinate2 = coordinate_change(coordinate2, lattice1, direction, stack_gap)\n \n # combine data of two structure into one array\n coordinate = coordinate1 + coordinate2\n for i in range(len(atom_type1)):\n for j in range(int(atom_num1[i])):\n atom_type.append(atom_type1[i])\n for i in range(len(atom_type2)):\n for j in range(int(atom_num2[i])):\n atom_type.append(atom_type2[i])\n \n # sort coordinate because coordinate of different atoms is mixed \n coordinate = sort_coordinate(coordinate, atom_type)\n \n # sort atom type to suit sorted coordinate\n atom_type = sort_atom_type(atom_type) \n \n # remove atoms outside the lattice\n re_atom = remove_atom(lattice, coordinate, direction)\n coordinate = remove_coordinate(coordinate, re_atom)\n atom_type = remove_atom_type(atom_type, re_atom)\n \n # count the number of atom\n atom_num = count_atom(atom_type)\n \n # get atom type\n atom_type = sorted(set(atom_type), key = atom_type.index)\n \n convert_POSCAR(f_name, f1_format, comment, lattice, atom_type, atom_num, coordinate) \n ","repo_name":"MDGroup-WatanabeLab/mdpython","sub_path":"Structure/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":21637,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"38449571037","text":"import sqlite3 as sql\n\nclass Conexao():\n base_de_dados = \"d:/clientes.db\"\n conn = None\n indexador = None\n connected = False\n\n def connect(self):\n Conexao.conn = sql.connect(Conexao.base_de_dados)\n Conexao.indexador = Conexao.conn.cursor()\n Conexao.connected = True\n\n def disconnect(self):\n Conexao.conn.close()\n Conexao.connected = False\n\n def execute(self, sql, parms = None):\n if Conexao.connected:\n if parms == None:\n Conexao.indexador.execute(sql)\n else:\n Conexao.indexador.execute(sql, parms)\n return True\n else:\n return False\n\n def fetchall(self):\n return Conexao.indexador.fetchall()\n\n def persist(self):\n if Conexao.connected:\n Conexao.conn.commit()\n return True\n else:\n return False\n\ndef initDB():\n transacao = Conexao()\n transacao.connect()\n transacao.execute(\"CREATE TABLE IF NOT EXISTS clientes (id INTEGER PRIMARY KEY , nome TEXT, sobrenome TEXT, email TEXT, cpf TEXT)\")\n transacao.persist()\n transacao.disconnect()\n\ndef insert(nome, sobrenome, email, cpf):\n transacao = Conexao()\n transacao.connect()\n transacao.execute(\"INSERT INTO clientes VALUES(NULL, ?,?,?,?)\", (nome, sobrenome, email, cpf))\n transacao.persist()\n transacao.disconnect()\n\ndef view():\n transacao = Conexao()\n transacao.connect()\n transacao.execute(\"SELECT * FROM clientes\")\n rows = transacao.fetchall()\n transacao.disconnect()\n return rows\n\ndef search(nome=\"\", sobrenome=\"\", email=\"\", cpf=\"\"\n ):\n transacao = Conexao()\n transacao.connect()\n transacao.execute(\"SELECT * FROM clientes WHERE nome=? or sobrenome=? or email=? or cpf=?\", (nome,sobrenome,email, cpf))\n rows = transacao.fetchall()\n transacao.disconnect()\n return rows\n\ndef delete(id):\n transacao = Conexao()\n transacao.connect()\n transacao.execute(\"DELETE FROM clientes WHERE id = ?\", (id,))\n transacao.persist()\n transacao.disconnect()\n\ndef update(id, nome, sobrenome, email, cpf):\n transacao = Conexao()\n transacao.connect()\n transacao.execute(\"UPDATE clientes SET nome =?, sobrenome=?, email=?, cpf=? WHERE id = ?\",(nome, sobrenome,email, cpf, id))\n transacao.persist()\n transacao.disconnect()\n\ninitDB()\n\n\n","repo_name":"guilhon/python-crud-tkinter","sub_path":"sistema_backend.py","file_name":"sistema_backend.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"24962333429","text":"import base64\nimport pytest\n\nfrom tests.support.asserts import assert_pdf\nfrom tests.support.image import pt_to_cm\n\npytestmark = pytest.mark.asyncio\n\nINLINE_BACKGROUND_RENDERING_TEST_CONTENT = \"\"\"\n\n\"\"\"\n\nBLACK_DOT_PNG = \"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVQIW2NgYGD4DwABBAEAwS2OUAAAAABJRU5ErkJggg==\"\nWHITE_DOT_PNG = \"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAC0lEQVQIW2P4DwQACfsD/Z8fLAAAAAAASUVORK5CYII=\"\n\n\n@pytest.mark.parametrize(\"print_with_background, expected_image\", [\n (None, WHITE_DOT_PNG),\n (True, BLACK_DOT_PNG),\n (False, WHITE_DOT_PNG),\n], ids=[\"default\", \"true\", \"false\"])\nasync def test_background(\n bidi_session,\n top_context,\n inline,\n compare_png_bidi,\n render_pdf_to_png_bidi,\n print_with_background,\n expected_image,\n):\n page = inline(INLINE_BACKGROUND_RENDERING_TEST_CONTENT)\n await bidi_session.browsing_context.navigate(\n context=top_context[\"context\"], url=page, wait=\"complete\")\n\n print_value = await bidi_session.browsing_context.print(\n context=top_context[\"context\"],\n background=print_with_background,\n margin={\n \"top\": 0,\n \"bottom\": 0,\n \"right\": 0,\n \"left\": 0\n },\n page={\n \"width\": pt_to_cm(1),\n \"height\": pt_to_cm(1),\n },\n )\n\n assert_pdf(print_value)\n\n png = await render_pdf_to_png_bidi(print_value)\n comparison = await compare_png_bidi(png, base64.b64decode(expected_image))\n assert comparison.equal()\n","repo_name":"servo/servo","sub_path":"tests/wpt/tests/webdriver/tests/bidi/browsing_context/print/background.py","file_name":"background.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":24247,"dataset":"github-code","pt":"86"} +{"seq_id":"24692297724","text":"import argparse\n\nimport numpy as np\n\nimport os\n\nimport shutil\n\nimport torch\nimport torch.optim as optim\n\nfrom torch.utils.data import DataLoader\n\nfrom tqdm import tqdm\n\nimport warnings\n\nfrom lib.dataset import MegaDepthDataset\nfrom lib.exceptions import NoGradientError\nfrom lib.loss import loss_function\nfrom lib.model import D2Net\n\n\n# CUDA\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n\n# Seed\ntorch.manual_seed(1)\nif use_cuda:\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\n\n# Argument parsing\nparser = argparse.ArgumentParser(description='Training script')\n\nparser.add_argument(\n '--dataset_path', type=str, required=True,\n help='path to the dataset'\n)\nparser.add_argument(\n '--scene_info_path', type=str, required=True,\n help='path to the processed scenes'\n)\n\nparser.add_argument(\n '--preprocessing', type=str, default='caffe',\n help='image preprocessing (caffe or torch)'\n)\nparser.add_argument(\n '--model_file', type=str, default='models/d2_ots.pth',\n help='path to the full model'\n)\n\nparser.add_argument(\n '--num_epochs', type=int, default=10,\n help='number of training epochs'\n)\nparser.add_argument(\n '--lr', type=float, default=1e-3,\n help='initial learning rate'\n)\nparser.add_argument(\n '--batch_size', type=int, default=1,\n help='batch size'\n)\nparser.add_argument(\n '--num_workers', type=int, default=4,\n help='number of workers for data loading'\n)\n\nparser.add_argument(\n '--use_validation', dest='use_validation', action='store_true',\n help='use the validation split'\n)\nparser.set_defaults(use_validation=False)\n\nparser.add_argument(\n '--log_interval', type=int, default=250,\n help='loss logging interval'\n)\n\nparser.add_argument(\n '--log_file', type=str, default='log.txt',\n help='loss logging file'\n)\n\nparser.add_argument(\n '--plot', dest='plot', action='store_true',\n help='plot training pairs'\n)\nparser.set_defaults(plot=False)\n\nparser.add_argument(\n '--checkpoint_directory', type=str, default='checkpoints',\n help='directory for training checkpoints'\n)\nparser.add_argument(\n '--checkpoint_prefix', type=str, default='d2',\n help='prefix for training checkpoints'\n)\n\nargs = parser.parse_args()\n\nprint(args)\n\n# Create the folders for plotting if need be\nif args.plot:\n plot_path = 'train_vis'\n if os.path.isdir(plot_path):\n print('[Warning] Plotting directory already exists.')\n else:\n os.mkdir(plot_path)\n\n# Creating CNN model\nmodel = D2Net(\n model_file=args.model_file,\n use_cuda=use_cuda\n)\n\n# Optimizer\noptimizer = optim.Adam(\n filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr\n)\n\n# Dataset\nif args.use_validation:\n validation_dataset = MegaDepthDataset(\n scene_list_path='megadepth_utils/valid_scenes.txt',\n scene_info_path=args.scene_info_path,\n base_path=args.dataset_path,\n train=False,\n preprocessing=args.preprocessing,\n pairs_per_scene=25\n )\n validation_dataloader = DataLoader(\n validation_dataset,\n batch_size=args.batch_size,\n num_workers=args.num_workers\n )\n\ntraining_dataset = MegaDepthDataset(\n scene_list_path='megadepth_utils/train_scenes.txt',\n scene_info_path=args.scene_info_path,\n base_path=args.dataset_path,\n preprocessing=args.preprocessing\n)\ntraining_dataloader = DataLoader(\n training_dataset,\n batch_size=args.batch_size,\n num_workers=args.num_workers\n)\n\n\n# Define epoch function\ndef process_epoch(\n epoch_idx,\n model, loss_function, optimizer, dataloader, device,\n log_file, args, train=True\n):\n epoch_losses = []\n\n torch.set_grad_enabled(train)\n\n progress_bar = tqdm(enumerate(dataloader), total=len(dataloader))\n for batch_idx, batch in progress_bar:\n if train:\n optimizer.zero_grad()\n\n batch['train'] = train\n batch['epoch_idx'] = epoch_idx\n batch['batch_idx'] = batch_idx\n batch['batch_size'] = args.batch_size\n batch['preprocessing'] = args.preprocessing\n batch['log_interval'] = args.log_interval\n\n try:\n loss = loss_function(model, batch, device, plot=args.plot)\n except NoGradientError:\n continue\n\n current_loss = loss.data.cpu().numpy()[0]\n epoch_losses.append(current_loss)\n\n progress_bar.set_postfix(loss=('%.4f' % np.mean(epoch_losses)))\n\n if batch_idx % args.log_interval == 0:\n log_file.write('[%s] epoch %d - batch %d / %d - avg_loss: %f\\n' % (\n 'train' if train else 'valid',\n epoch_idx, batch_idx, len(dataloader), np.mean(epoch_losses)\n ))\n\n if train:\n loss.backward()\n optimizer.step()\n\n log_file.write('[%s] epoch %d - avg_loss: %f\\n' % (\n 'train' if train else 'valid',\n epoch_idx,\n np.mean(epoch_losses)\n ))\n log_file.flush()\n\n return np.mean(epoch_losses)\n\n\n# Create the checkpoint directory\nif os.path.isdir(args.checkpoint_directory):\n print('[Warning] Checkpoint directory already exists.')\nelse:\n os.mkdir(args.checkpoint_directory)\n \n\n# Open the log file for writing\nif os.path.exists(args.log_file):\n print('[Warning] Log file already exists.')\nlog_file = open(args.log_file, 'a+')\n\n# Initialize the history\ntrain_loss_history = []\nvalidation_loss_history = []\nif args.use_validation:\n validation_dataset.build_dataset()\n min_validation_loss = process_epoch(\n 0,\n model, loss_function, optimizer, validation_dataloader, device,\n log_file, args,\n train=False\n )\n\n# Start the training\nfor epoch_idx in range(1, args.num_epochs + 1):\n # Process epoch\n training_dataset.build_dataset()\n train_loss_history.append(\n process_epoch(\n epoch_idx,\n model, loss_function, optimizer, training_dataloader, device,\n log_file, args\n )\n )\n\n if args.use_validation:\n validation_loss_history.append(\n process_epoch(\n epoch_idx,\n model, loss_function, optimizer, validation_dataloader, device,\n log_file, args,\n train=False\n )\n )\n\n # Save the current checkpoint\n checkpoint_path = os.path.join(\n args.checkpoint_directory,\n '%s.%02d.pth' % (args.checkpoint_prefix, epoch_idx)\n )\n checkpoint = {\n 'args': args,\n 'epoch_idx': epoch_idx,\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'train_loss_history': train_loss_history,\n 'validation_loss_history': validation_loss_history\n }\n torch.save(checkpoint, checkpoint_path)\n if (\n args.use_validation and\n validation_loss_history[-1] < min_validation_loss\n ):\n min_validation_loss = validation_loss_history[-1]\n best_checkpoint_path = os.path.join(\n args.checkpoint_directory,\n '%s.best.pth' % args.checkpoint_prefix\n )\n shutil.copy(checkpoint_path, best_checkpoint_path)\n\n# Close the log file\nlog_file.close()\n","repo_name":"mihaidusmanu/d2-net","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7129,"program_lang":"python","lang":"en","doc_type":"code","stars":716,"dataset":"github-code","pt":"86"} +{"seq_id":"4017770216","text":"'''\nWrite a function rotate(ar[], d, n) that rotates arr[] of size n by d elements. \n'''\n\ndef rotate(ar, d, n):\n # store first d elements in temporary\n temp = ar[:d]\n \n # modify the given array\n ar = ar[d:]\n \n # append the temporary array to the end\n ar.extend(temp)\n return ar\n\ndef main():\n n = int(input(\"Enter number of elements: \"))\n ar = input(\"Enter {} elements separated by space: \".format(n))\n d = int(input(\"Enter number of elements by which to rotate: \"))\n \n ar = ar.split(' ')\n ar = rotate(ar, d, n)\n \n for ele in ar:\n print(ele, end=\" \")\n\nif __name__=='__main__':\n main()\n","repo_name":"banerjeesoumya15/DSA","sub_path":"array_rotation.py","file_name":"array_rotation.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"16767350591","text":"# -*- coding: utf-8 -*-\n\nimport wfdb\nimport matplotlib.pyplot as plt\n\n# record = wfdb.rdrecord('Person_01/rec_1') \n# wfdb.plot_wfdb(record=record)\n# print(record.__dict__)\n# Temps = [x[0] for x in list(record.__dict__[\"p_signal\"])]\n# Signal = [x[1] for x in list(record.__dict__[\"p_signal\"])]\n# print(len(Temps), len(Signal))\n\n# plt.plot(Temps, Signal)\n# plt.show()\n# plt.plot(Temps, SignalFiltre)\n# plt.show()\n\nimport os.path\nfrom os import path\nimport json\nimport names\n\ndef dop():\n \n PrenomsH = ['Lucas', 'Jules', 'Paul', 'Sacha', 'Tom', 'Martin', 'Enzo', 'Axel', 'Antoine', 'Valentin', 'Samuel', 'Maxence', 'Malo', 'Thomas', 'Oscar', 'William', 'Mike', 'Noah', 'Robert', 'Michel', 'Patrick', 'Alain', 'Didier', 'Hugo', 'Valentin', 'Alexandre', 'Nicolas', 'Benjamin', 'Matthieu', 'Samy', 'Marc', 'Hamza', 'Luc', 'Jacques', 'Philippe', 'Thibaut', 'Steven', 'Albert', 'Harry', 'Juan', 'Serge', 'Benoit', 'Zack', 'Edouard', 'Will', 'Tony', 'Damien', 'Henri', 'Stanislas', 'Yann']\n PrenomsF = ['Emma', 'Camille', 'Sarah', 'Alice', 'Eva', 'Clara', 'Manon', 'Jade', 'Lisa', 'Julie', 'Rose', 'Margot', 'Ambre', 'Claire', 'Lucile', 'Audrey', 'Coline', 'Candice', 'Laura', 'Anne', 'Jacqueline', 'Amy', 'Deborah', 'Enora', 'Flavie', 'Kate', 'Suzie', 'Marie', 'Agathe', 'Suzanne', 'Carla', 'Tess', 'Sophie', 'Fanny', 'Maud', 'Louane', 'Helena', 'Jeanne', 'Charlotte', 'Anais', 'Yasmine', 'Roxane', 'Nina', 'Billie', 'Margaux', 'Albane', 'Romane', 'Brigitte', 'Axelle', 'Ali']\n\n \n nbH = 0\n nbF = 0\n \n n=1\n for i in range(1,91):\n \n if i<10:\n i = \"0\"+str(i)\n else:\n i = str(i)\n j = 1\n \n infos = wfdb.rdsamp(\"ecg-id-database-1.0.0/Person_\"+str(i)+\"/rec_\"+str(j))[1]\n \n \n if infos[\"comments\"][1][5:]==\"male\":\n sexe = 'male'\n nom = PrenomsH.pop(0)\n nbH +=1\n else:\n sexe = 'female'\n nom = PrenomsF.pop(0)\n nbF +=1\n \n \n \n while path.exists(\"ecg-id-database-1.0.0/Person_\"+str(i)+\"/rec_\"+str(j)+\".dat\"):\n signaux, infos = wfdb.rdsamp(\"ecg-id-database-1.0.0/Person_\"+str(i)+\"/rec_\"+str(j))\n \n personID = str(i)\n age = infos[\"comments\"][0][5:]\n if infos[\"comments\"][1][5:]==\"male\":\n sexe = 'H'\n else:\n sexe = 'F'\n TempsEchantillonnage = 1/infos[\"fs\"]\n date = infos[\"comments\"][2][10:]\n \n Signal = [x[0] for x in signaux]\n SignalFiltre = [x[1] for x in signaux]\n Temps = [i*TempsEchantillonnage for i in range(len(Signal))]\n \n\n with open('enregistrements/ecg-id-database/'+str(n)+\".txt\", 'w') as fichier:\n fichier.write(\"# TIPE ECG 2021 - ECG-ID Database v1.0.0 par Physionet\\n\")\n fichier.write(\"# [N° de la personne, N° de l'enregistrement, Période d'échantillonnage, Prénom, Age, Sexe, Date de l'enregistrement]\\n\")\n fichier.write(\"[\"+str(int(i))+\", \"+str(int(j))+\", \"+str(TempsEchantillonnage)+\", \\\"\"+nom+\"\\\", \"+age+\", \\\"\"+sexe+\"\\\", \\\"\"+date+\"\\\"]\\n\")\n for y in Signal:\n fichier.write(str(y)+\"\\n\")\n with open('enregistrements/ecg-id-database/signaux-deja-filtres-par-physionet/'+str(n)+\".txt\", 'w') as fichier:\n fichier.write(\"# TIPE ECG 2021 - ECG-ID Database v1.0.0 par Physionet\\n\")\n fichier.write(\"# [N° de la personne, N° de l'enregistrement, Période d'échantillonnage, Prénom, Age, Sexe, Date de l'enregistrement]\\n\")\n fichier.write(\"[\"+str(int(i))+\", \"+str(int(j))+\", \"+str(TempsEchantillonnage)+\", \\\"\"+nom+\"\\\", \"+age+\", \\\"\"+sexe+\"\\\", \\\"\"+date+\"\\\"]\\n\")\n for y in SignalFiltre:\n fichier.write(str(y)+\"\\n\")\n \n j = j+1\n n = n+1\n print(nbH)\n print(nbF)\n \nimport prenoms\n\ndef selection():\n \n PrenomsH = []\n PrenomsF = []\n \n while len(PrenomsH)<50:\n nom = input()\n PrenomsH.append(str(nom))\n print(\"Reste\",50-len(PrenomsH))\n \n while len(PrenomsF)<50:\n nom = input()\n PrenomsF.append(str(nom))\n print(\"Reste\",50-len(PrenomsF))\n \n print(PrenomsH)\n print(PrenomsF)\n","repo_name":"sheita/tipecg","sub_path":"ecgid-vers-txt.py","file_name":"ecgid-vers-txt.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"42314035857","text":"# -*- coding:utf-8 -*-\n\n\"\"\"\n有 n 位用户参加活动,他们的 ID 从 0 到 n - 1,每位用户都 恰好 属于某一用户组。给你一个长度为 n 的数组 groupSizes,其中包含每位用户所处的用户组的大小,���你返回用户分组情况(存在的用户组以及每个组中用户的 ID)。\n\n你可以任何顺序返回解决方案,ID 的顺序也不受限制。此外,题目给出的数据保证至少存在一种解决方案。\n\n \n\n示例 1:\n\n输入:groupSizes = [3,3,3,3,3,1,3]\n输出:[[5],[0,1,2],[3,4,6]]\n解释:\n其他可能的解决方案有 [[2,1,6],[5],[0,4,3]] 和 [[5],[0,6,2],[4,3,1]]。\n示例 2:\n\n输入:groupSizes = [2,1,3,3,3,2]\n输出:[[1],[0,5],[2,3,4]]\n \n\n提示:\n\ngroupSizes.length == n\n1 <= n <= 500\n1 <= groupSizes[i] <= n\n\nCPP:\nclass Solution {\npublic:\n vector> groupThePeople(vector& groupSizes) {\n unordered_map > dict; // 键: 某个组的人数, 值: 用户索引数组\n vector> res;\n\n for(int i = 0; i < groupSizes.size(); i ++)\n {\n int k = groupSizes[i];\n dict[k].push_back(i);\n // 只要某个组人数满了就推到res中, 并清空这个数组重新装用户\n if(dict[k].size() >= k)\n {\n res.push_back(dict[k]);\n dict.erase(k);\n }\n }\n return res;\n }\n};\n\"\"\"\n\nfrom typing import List\n\nclass Solution:\n def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:\n n = len(groupSizes)\n # 带上索引按小组人数排序\n group = sorted([(i, groupSizes[i]) for i in range(n)], key = lambda x:x[1])\n res = []\n i = 0\n while (i < n):\n t = group[i][1]\n res.append([group[i][0] for i in range(i, i + t)])\n i += t\n return res\n\ns = Solution()\nprint(s.groupThePeople([3,3,3,3,3,1,3]))","repo_name":"lovehhf/LeetCode","sub_path":"1282. 用户分组.py","file_name":"1282. 用户分组.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"25637259808","text":"# use json\nfrom flask import Flask, escape, request\n\napp = Flask(__name__)\n\nDB = {\"students\": [],\n \"classes\": []\n }\n\n\n@app.route('/')\ndef hello():\n name = request.args.get(\"name\", \"World\")\n return f'Hello, {escape(name)}!'\n\n\nstudentId = 1234456\n# Student POST\n@app.route('/students', methods=['POST'])\ndef create_student():\n req = request.json\n studentName = req[\"name\"]\n global studentId\n studentInfo = {\n \"id\": studentId,\n \"name\": studentName\n }\n DB[\"students\"].append(studentInfo)\n studentId = studentId + 1\n return studentInfo, 201\n\n# Student GET\n@app.route('/students/', methods=['GET'])\ndef get_student(id):\n for s in DB[\"students\"]:\n if s['id'] == int(id):\n return s, 201\n return \"Student not found\", 201\n\n\n# class POST\nclassId = 1122334\n@app.route('/classes', methods=['POST'])\ndef create_class():\n global classId\n req = request.json\n className = req[\"name\"]\n classInfo = {\n \"id\": classId,\n \"name\": className,\n \"students\": []\n }\n DB[\"classes\"].append(classInfo)\n classId = classId + 1\n return classInfo, 201\n\n# class GET Session\n@app.route('/classes/', methods=['GET'])\ndef get_class(id):\n for c in DB[\"classes\"]:\n if c[\"id\"] == int(id):\n return c, 201\n return \"Class not found\", 201\n\n# Patch: Add student to a class\n@app.route('/classes/', methods=['PATCH'])\ndef add_student(classId):\n req = request.json\n studentId = req[\"student_id\"]\n studentInfo = {}\n # get student info\n for s in DB[\"students\"]:\n if s['id'] == int(studentId):\n studentInfo = s\n break\n return \"Student not found\", 201\n # locate class in DB and patch\n\n for i in range(len(DB[\"classes\"])):\n if DB[\"classes\"][i][\"id\"] == classId:\n DB[\"classes\"][i][\"students\"].append(studentInfo)\n return DB[\"classes\"][i]\n else:\n return \"Class not found\"\n return abort(404)\n\n\n@app.route('/printall')\ndef printall():\n return DB\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"Roger-Yao8126/cmpe273_20spring","sub_path":"cmpe273-lab2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"38716421791","text":"#원근변환\n\n#원근 변환 행렬 구하기\n#cv2.getPerspectiveTranform(변환 전 좌표4개,변환후좌표4개)\n#원근변환적용\n#cv2.warpPerspective(입력이미지,변환행렬,출력이미지크기)\n\n\nimport cv2\nimport numpy as np\n\norg_image=cv2.imread(\"namecard.jpg\")\ncv2.imshow(\"org\",org_image)\n\nnum_pt=0\npt_before=[]\ntemp_image=org_image.copy()\n\ndef getPoints(event,x,y,flags,param):\n global num_pt,pt_before,temp_image\n if event==cv2.EVENT_LBUTTONDOWN:\n if num_pt!=4:\n pt_before.append((x,y)) #pt_before은 배열형태 그러므로 appen((x,y)) -> [(x,y),(x,y),(x,y)...] \n num_pt=num_pt+1\n cv2.circle(temp_image,(x,y),3,(255,0,255),2)\n cv2.imshow(\"org\",temp_image)\n else:\n num_pt=0\n temp_image=org_image.copy()\n cv2.imshow(\"org\",temp_image)\n namecarWarp(pt_before)\n pt_before=[]\n\n#두점사이의 거리공식 이용하기 ((x1-x2)**2+(y1-y2)**2)**0.5\ndef namecarWarp(ptd1): \n width=((ptd1[0][0]-ptd1[1][0])**2+(ptd1[0][1]-ptd1[1][1])**2)**0.5\n width=int(width) #integer형으로 변환\n height=int(width*2/3.5)\n\n ptd2=[(0,0),(width,0),(width,height),(0,height)]\n ptd1_m=np.array(ptd1,np.float32)\n ptd2_m=np.array(ptd2,np.float32)\n\n per_mat=cv2.getPerspectiveTransform(ptd1_m,ptd2_m)\n new_image=cv2.warpPerspective(org_image,per_mat,(width,height))\n cv2.imshow(\"new\",new_image)\n\ncv2.setMouseCallback(\"org\",getPoints)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"Sim-918/learning","sub_path":"digital/PerspectiveTrans.py","file_name":"PerspectiveTrans.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"34234693545","text":"import multiprocessing\nimport time\nfrom pandas import read_csv, concat, DataFrame\nfrom time import sleep\nimport requests\nimport xml.etree.ElementTree as ET\nfrom queue import Queue\n\nprint('Creating database...')\n\ntp = read_csv('merged2010-2016.txt', header=0, sep='\\t', iterator=True, chunksize=10000, encoding='cp1252', error_bad_lines=False)\ndb = concat(tp, ignore_index=True)\ndb = DataFrame([db['PI_NAMEs'], db['ORG_NAME'], db['ACTIVITY'], db['FY']])\ndb = db.T\ndb = db.dropna(axis=0,how='all')\n\n#print('Creating namelists...')\nnames = read_csv('800_names.txt', header = None, sep = '\\t', encoding='cp1252')\nnames = names.unstack()\n\n# Splitting names into eight parts\nfirst = round(len(names)/8)\nsecond = 2 * first\nthird = 3 * first\nfourth = 4 * first\nfifth = 5 * first\nsixth = 6 * first\nseventh = 7 * first\nnames1 = names[:first]\nnames2 = names[first:second]\nnames3 = names[second:third]\nnames4 = names[third:fourth]\nnames5 = names[fourth:fifth]\nnames6 = names[fifth:sixth]\nnames7 = names[sixth:seventh]\nnames8 = names[seventh:]\nnames = None\n\nout = multiprocessing.Queue()\n\ndef dump_queue(queue):\n print('Starting dump...')\n \"\"\"\n Empties all pending items in a queue and returns them in a list.\n \"\"\"\n result = dict()\n\n for i in iter(queue.get):\n result.update(i)\n sleep(.1)\n print('Writing output file...')\n filename = 'predictions.txt'\n print(len(result))\n output = DataFrame(data = result)\n output.to_csv(filename, sep = '\\t')\n\ndef fetch_count(name, type, journal = ''):\n try:\n if journal == '':\n journalVar = ''\n else:\n journalVar = str(journal) + '[journal]'\n name = name.split(',')\n lastName = name[0]\n lastName = lastName.strip()\n firstName = name[1]\n firstName = firstName.strip()\n firstName = firstName[:1]\n name = str(lastName) + ' ' + str(firstName)\n URL = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=' + str(name) + str(type) + '+AND+' + str(journalVar)\n request = requests.get(URL)\n result = request.content\n root = ET.fromstring(result)\n tag = 'Count'\n count = root.find(tag).text\n return count\n except:\n return 0\n\ndef writeFile(output, threadNO):\n print('Writing output file...')\n filename = 'predictions_' + str(threadNO) + '.txt'\n output = DataFrame.from_dict(data = output, orient = 'index')\n output.to_csv(filename, sep = '\\t')\n\ndef filter(names, threadNO, db = db):\n print('Started thread no. ' + str(threadNO))\n print('Thread ' + str(threadNO) + ' prefiltering names.')\n turn = len(names) + 1\n filtered = set()\n for name in names:\n turn -= 1\n print('Thread ' + str(threadNO) + ' ' + str(turn) + ' name(s) left to prefilter.') \n year = 2016\n for n,i in enumerate(db['PI_NAMEs']):\n if name in str(i):\n if int(db['FY'].iloc[n]) < year:\n year = int(db['FY'].iloc[n])\n if 2009 < year < 2013:\n filtered.add(name)\n filtered = list(filtered)\n predictList(filtered, threadNO)\n\ndef stats(name, db = db): \n #result = dict()\n values = []\n total = fetch_count(name, '[Author]')\n #sleep(1)\n total_last = fetch_count(name, '[Author - Last]')\n #sleep(1)\n nature_first = fetch_count(name, '[Author - First]', \"nature\")\n #sleep(1)\n nature_last = fetch_count(name, '[Author - Last]', \"nature\")\n #sleep(1)\n biotech_first = fetch_count(name, '[Author - First]', \"nature biotechnology\")\n #sleep(1)\n biotech_last = fetch_count(name, '[Author - Last]', \"nature biotechnology\")\n #sleep(1)\n cell_first = fetch_count(name, '[Author - First]', \"cell\")\n #sleep(1)\n cell_last = fetch_count(name, '[Author - Last]', \"cell\")\n #sleep(1)\n nejm_first = fetch_count(name, '[Author - First]', \"The New England journal of medicine\")\n #sleep(1)\n nejm_last = fetch_count(name, '[Author - Last]', \"The New England journal of medicine\")\n #sleep(1)\n science_first = fetch_count(name, '[Author - First]', \"science\")\n #sleep(1)\n science_last = fetch_count(name, '[Author - Last]', \"science\")\n #sleep(1)\n jama_first = fetch_count(name, '[Author - First]', \"Journal of the American Medical Association\")\n #sleep(1)\n jama_last = fetch_count(name, '[Author - Last]', \"Journal of the American Medical Association\")\n #sleep(1)\n noProjects = 0\n RO1titles = 0\n orgs = set()\n for n,i in enumerate(db['PI_NAMEs']): \n if name in str(i):\n noProjects += 1\n orgs.add(str(db['ORG_NAME'].iloc[n]))\n if str(db['ACTIVITY'].iloc[n]) == 'R01':\n RO1titles += 1\n orgs = list(orgs)\n highImpact = sum([int(nature_first), int(nature_last), int(biotech_first), int(biotech_last), int(cell_first), int(cell_last), int(nejm_first), int(nejm_last), int(science_first), int(science_last), int(jama_first), int(jama_last)])\n values.append(orgs)\n #values.append(year)\n values.append(noProjects)\n values.append(RO1titles)\n values.append(highImpact)\n values.append(total_last)\n values.append(total)\n return values\n\n\ndef predictList(names, threadNO, out = out):\n print('Started processing names in thread no. ' + str(threadNO))\n print(str(len(names)) + ' names in total in thread no. ' + str(threadNO))\n outDict = dict()\n turn = len(names) + 1\n for name in names:\n turn -= 1\n print('Thread ' + str(threadNO) + ' ' + str(turn) + ' name(s) left to process.')\n data = stats(name)\n total = int(data[5])\n if total != 0 :\n ratio1 = int(data[4])/total \n ratio2 = int(data[3])/total\n ratio3 = int(data[2])/int(data[1])\n if ratio1 > 0.25 and ratio2 > 0.15 and ratio3 > 0.25 :\n outDict.update({name: data[0]})\n return outDict\n #writeFile(outDict, threadNO)\n print('Thread ' + str(threadNO) + ' finished.')\n #results = DataFrame.from_dict(data = dataDict, orient = 'index')\n #writeFile(results, threadNo)\n\nif __name__ == '__main__':\n q = Queue()\n worker_1 = multiprocessing.Process(target=q.put, args=(filter(names1, 1),))\n worker_2 = multiprocessing.Process(target=q.put, args=(filter(names2, 2),))\n worker_3 = multiprocessing.Process(target=q.put, args=(filter(names3, 3),))\n worker_4 = multiprocessing.Process(target=q.put, args=(filter(names4, 4),))\n worker_5 = multiprocessing.Process(target=q.put, args=(filter(names5, 5),))\n worker_6 = multiprocessing.Process(target=q.put, args=(filter(names6, 6),))\n worker_7 = multiprocessing.Process(target=q.put, args=(filter(names7, 7),))\n worker_8 = multiprocessing.Process(target=q.put, args=(filter(names8, 8),))\n print('Starting threads')\n worker_1.start()\n worker_2.start()\n worker_3.start()\n worker_4.start()\n worker_5.start()\n worker_6.start()\n worker_7.start()\n worker_8.start()\n db = None\n worker_1.join()\n worker_2.join()\n worker_3.join()\n worker_4.join()\n worker_5.join()\n worker_6.join()\n worker_7.join()\n worker_8.join()\n #out.put('STOP')\n dump_queue(q)\n\n\n\n\n\n","repo_name":"Karl-Marka/data-mining","sub_path":"business-intelligence/predict_PIs_8_thread.py","file_name":"predict_PIs_8_thread.py","file_ext":"py","file_size_in_byte":7211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"39468820107","text":"#!/usr/bin/env python3\n# encoding: utf-8\n\n\"\"\"\n@Filename: findLongestWord.py\n@Function: 通过删除字母匹配到字典里最长单词 Ⅱ 双指针策略\n@Link: https://leetcode-cn.com/problems/longest-word-in-dictionary-through-deleting/\n@Python Version: 3.8\n@Author: Wei Li\n@Date:2021-07-08\n\"\"\"\n\nclass Solution:\n def findLongestWord(self, string, dictionary):\n ## 用好 python 内置函数 sort()、find(),比双指针效率更高\n ## 可以用元组表示多关键字排序,第一关键字是长度降序,第二关键字是字符串本身字典序\n dictionary.sort(key = lambda x: (-len(x), x))\n\n for word in dictionary:\n index = 0\n for ch in word:\n index = string.find(ch, index) + 1 # find 输出 -1:False\n if not index:\n break\n else: # 这里用 else 语句保证 break 之后不会执行,正常循环结束会执行\n return word\n return \"\"\n\n\n# -------------------------\nif __name__ == \"__main__\":\n s = \"abpcplea\"\n # dictionary = [\"ale\", \"apple\", \"monkey\", \"plea\"]\n dictionary = [\"a\", \"b\", \"c\"]\n\n solution = Solution()\n longest_word = solution.findLongestWord(string=s, dictionary=dictionary)\n print(f\"The solution of this problem is : {longest_word}\")","repo_name":"2694048168/LeetCodeAlgorithm","sub_path":"Python/findLongestWord.py","file_name":"findLongestWord.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"86"} +{"seq_id":"27889091031","text":"import os\nimport functools\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QCheckBox\n\nfrom src.loupe_control import _control_main\n\n\nclass roiClass():\n def __init__(self):\n self.humanReadableName = ''\n self.dictName = ''\n self.checkboxWidget = None\n self.checkboxWidgetRoi = None\n self.checkboxWidgetCosmic = None\n self.color = '#ffffff' #white, '#00f2ff' #cyan\n self.specIndexList = []\n\n def defineFull(self, roiName, workspace):\n self.dictName = workspace.dictName+'_'+roiName\n self.humanReadableName = roiName\n self.checkboxWidget = QCheckBox(roiName)\n # checkboxWidgetRoi is for the map tab\n self.checkboxWidgetRoi = QCheckBox(roiName)\n self.checkboxWidgetCosmic = QCheckBox(roiName)\n #self.checkboxWidget.clicked.connect(functools.partial(_control_main._roiSelectMainUpdate, self))\n self.color = '#ffffff'\n self.specIndexList = list(range(workspace.nSpectra))\n\n def defineSelectedRoi(self, roiName, workspace, indexList, color = '#ffffff'):\n self.dictName = workspace.dictName+'_'+roiName\n self.humanReadableName = roiName\n self.checkboxWidget = QCheckBox(roiName)\n self.checkboxWidgetRoi = QCheckBox(roiName)\n self.checkboxWidgetCosmic = QCheckBox(roiName)\n #self.checkboxWidget.clicked.connect(functools.partial(_control_main._roiSelectMainUpdate, self))\n self.color = color\n self.specIndexList = indexList\n\n","repo_name":"nasa/Loupe","sub_path":"src/main/python/src/roi_class.py","file_name":"roi_class.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"27905214948","text":"import argparse\nimport time\nimport json\n\nfrom lib.inventory import Inventory\nimport lib.logger as logger\nimport lib.bmc as _bmc\n\n\ndef set_bootdev_clients(bootdev, persist=False, config_path=None, clients=None,\n max_attempts=5):\n \"\"\"Set boot device for multiple clients. If a list of ip addresses\n are given they are assumed to be PXE addresses. Corresponding BMC addresses\n are looked up in inventory file corresponding to the config file given in\n the config_path. Similarly if no client list is given, all BMCs enumerated in\n the inventory file corresponding to the config file specified in config_path\n will be acted on. If clients is a dictionary, then the credentials are\n taken from the dictionary values.\n\n Args:\n state (str) : 'on' or 'off'\n config_path (str): path to a config file\n clients (dict or list of str): list of IP addresses or\n dict of ip addresses with values of credentials as tuple\n ie {'192.168.1.2': ('user', 'password', 'bmc_type')}\n \"\"\"\n log = logger.getlogger()\n if config_path:\n inv = Inventory(cfg_file=config_path)\n\n if type(persist) is not bool:\n persist = (persist == 'True')\n\n def _get_cred_list(client_list=None):\n \"\"\"Returns dict with values of tuples or list. Each tuple/list\n has the credentials for a node (userid, password, bmc_type).\n If no client list is passed, all nodes are returned\n Args:\n client_list (list of str): each list item is an ipv4 address\n \"\"\"\n cred_list = {}\n for index, hostname in enumerate(inv.yield_nodes_hostname()):\n ipv4 = inv.get_nodes_ipmi_ipaddr(0, index)\n if client_list and ipv4 not in client_list:\n continue\n userid = inv.get_nodes_ipmi_userid(index)\n password = inv.get_nodes_ipmi_password(index)\n bmc_type = inv.get_nodes_bmc_type(index)\n cred_list[ipv4] = (userid, password, bmc_type)\n return cred_list\n\n # if client list passed, it is assumed to be pxe addresses which\n # are used to look up the associated bmc addresses for the node.\n # otherwise use the entire ipmi inventory list. This allows a\n # subset of nodes to have their bootdev updated during install\n if isinstance(clients, list):\n # Get corresponing ipmi addresses\n _clients = []\n for index, hostname in enumerate(inv.yield_nodes_hostname()):\n ipv4_ipmi = inv.get_nodes_ipmi_ipaddr(0, index)\n ipv4_pxe = inv.get_nodes_pxe_ipaddr(0, index)\n if ipv4_pxe is not None and ipv4_pxe in clients:\n _clients.append(ipv4_ipmi)\n if not clients:\n log.debug('Retrieving IPMI address list from inventory')\n clients = inv.get_nodes_ipmi_ipaddr(0)\n _clients = clients[:]\n\n if isinstance(clients, list):\n log.debug('Retrieving client credentials from inventory')\n cred_list = _get_cred_list(_clients)\n else:\n # insure cred info in tuple\n cred_list = {}\n for client in clients:\n cred_list[client] = tuple(clients[client])\n\n clients_left = list(cred_list.keys())\n attempt = 0\n clients_left.sort()\n while clients_left and attempt < max_attempts:\n attempt += 1\n if attempt > 1:\n log.info('Retrying set bootdev. Attempt {} of {}'.format(attempt, max_attempts))\n log.info('Clients remaining: {}'.format(clients_left))\n clients_set = []\n bmc_dict = {}\n for client in clients_left:\n for i in range(3):\n tmp = _bmc.Bmc(client, *cred_list[client])\n if tmp.is_connected():\n bmc_dict[client] = tmp\n break\n else:\n log.debug(f'Failed BMC login attempt {i + 1} BMC: {client}')\n if i > 0:\n log.info(f'BMC login attempt {i + 1} BMC: {client}')\n if attempt == max_attempts and i == 2:\n log.error(f'Failed BMC login. BMC: {client}')\n time.sleep(1)\n del tmp\n\n for client in clients_left:\n if client in bmc_dict:\n log.debug(f'Setting boot device to {bootdev}. '\n f'Device: {client}')\n if bootdev in ('setup'):\n status = bmc_dict[client].host_boot_mode(bootdev)\n else:\n status = bmc_dict[client].host_boot_source(bootdev)\n log.debug(f'status1 from set bootdev: {status}')\n\n if status:\n if attempt in [2, 4, 8]:\n log.info(f'{client} - Boot source: {status} Required source: '\n f'{bootdev}')\n elif attempt == max_attempts:\n log.error(f'Failed attempt {attempt} set boot source {bootdev} '\n f'for node {client}')\n\n time.sleep(1 + attempt)\n\n for client in clients_left:\n if client in bmc_dict:\n if bootdev in ('setup'):\n status = bmc_dict[client].host_boot_mode()\n else:\n status = bmc_dict[client].host_boot_source()\n log.debug(f'status2 from set bootdev: {status}')\n\n if status:\n if attempt in [2, 4, 8]:\n log.info(f'{client} - Boot source: {bootdev}')\n if status == bootdev:\n log.debug(f'Successfully set boot source to {bootdev} for '\n f'node {client}')\n clients_set += [client]\n elif attempt == max_attempts:\n log.error(f'Failed attempt {attempt} set host boot source to'\n f'{bootdev} for node {client}')\n\n bmc_dict[client].logout()\n\n for client in clients_set:\n clients_left.remove(client)\n\n if attempt == max_attempts and clients_left:\n log.error('Failed to set boot device for some clients')\n log.debug(clients_left)\n\n del bmc_dict\n log.info('Set boot device to {} on {} of {} client devices.'\n .format(bootdev, len(cred_list) - len(clients_left),\n len(cred_list)))\n\n\nif __name__ == '__main__':\n \"\"\"\n \"\"\"\n logger.create()\n\n parser = argparse.ArgumentParser()\n parser.add_argument('bootdev', choices=['default', 'network', 'disk', 'setup'],\n help='Boot device. ie network or none...')\n\n parser.add_argument('config_path', default='',\n help='Path to a Power-Up config file')\n\n parser.add_argument('clients', default='',\n help='dict of ip addresses with credentials in list.\\n'\n 'in json format: {\"192.168.30.21\": [\"root\", \"0penBmc\", \"openbmc\"]}')\n\n parser.add_argument('max_attempts', default=2, nargs='*',\n help='Max number of login / power attempts')\n\n parser.add_argument('--persist', action='store_true', default=False,\n help='Persist this boot device setting.')\n\n parser.add_argument('--print', '-p', dest='log_lvl_print',\n help='print log level', default='info')\n\n parser.add_argument('--file', '-f', dest='log_lvl_file',\n help='file log level', default='info')\n\n args = parser.parse_args()\n\n if args.log_lvl_print == 'debug':\n print(args)\n\n if args.clients:\n _clients = json.loads(args.clients)\n else:\n _clients = ''\n\n set_bootdev_clients(args.bootdev, args.persist, args.config_path,\n _clients, max_attempts=args.max_attempts)\n","repo_name":"IBM/power-up","sub_path":"scripts/python/set_bootdev_clients.py","file_name":"set_bootdev_clients.py","file_ext":"py","file_size_in_byte":7870,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"86"} +{"seq_id":"27465254629","text":"word = input()\nwordArr = []\ncorrect = 0\nwhile True:\n guess = input()\n if guess == \"0\": break\n if guess not in wordArr:\n for i in word:\n if i == guess:\n correct += 1\n wordArr.append(guess)\nprint(f\"{correct}/{len(word)}\")\n","repo_name":"prokittikun/python-lab","sub_path":"lab11/04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"6756921836","text":"import math\r\nimport pygame\r\nfrom random import choice\r\nfrom player import FirstShipPlayer, SecondShipPlayer, ThirdShipPlayer\r\nfrom enemies import DemonZombieEnemy, CacoDemonEnemy, UberDemonEnemy, SatanEnemy\r\nfrom particles import Bubble, Fire, Flesh\r\nfrom data_controller import DataController\r\nimport music_controller\r\nimport settings\r\n\r\n\r\npygame.mixer.pre_init(44100, -16, 4, 2**12)\r\n\r\n\r\nclass Menu:\r\n def __init__(self, size):\r\n global dataController\r\n\r\n self.width, self.height = size\r\n self.bg = pygame.image.load('assets/sprites/bg/menu.jpg')\r\n self.shop_page = False\r\n self.profile_page = False\r\n self.game_start = False\r\n self.game_start_delay_start = 60\r\n self.game_start_delay = 60\r\n self.shopShipsById = {}\r\n for elem in dataController.ships:\r\n image = pygame.transform.scale(pygame.image.load(elem['sprite']), (100, 100))\r\n self.shopShipsById[str(elem['id'])] = image\r\n self.sounds = {\r\n 'open': pygame.mixer.Sound('assets/sounds/menu/open.wav'),\r\n 'close': pygame.mixer.Sound('assets/sounds/menu/close.wav'),\r\n 'tick': pygame.mixer.Sound('assets/sounds/menu/tick.wav'),\r\n 'start': pygame.mixer.Sound('assets/sounds/menu/start.wav')\r\n }\r\n\r\n def handleEvents(self, events):\r\n global running\r\n\r\n for event in events:\r\n if event.type == pygame.QUIT:\r\n running = False\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n if self.game_start:\r\n pass\r\n elif self.shop_page:\r\n self._handleShopPageClickEvent(event)\r\n elif self.profile_page:\r\n self._handleProfilePageClickEvent(event)\r\n else:\r\n self._handleTitlePageClickEvent(event)\r\n\r\n def _handleTitlePageClickEvent(self, event):\r\n global game\r\n\r\n if event.pos[1] < self.height - 100:\r\n game.spawnPlayer()\r\n self.game_start = True\r\n music_controller.stop_music()\r\n self.sounds['start'].play()\r\n elif event.pos[0] in range(self.width - 260, self.width - 60) and event.pos[1] in range(self.height - 70, self.height - 20):\r\n self.profile_page = True\r\n self.shop_page = False\r\n self.sounds['open'].play()\r\n elif event.pos[0] in range(60, 260) and event.pos[1] in range(self.height - 70, self.height - 20):\r\n self.profile_page = False\r\n self.shop_page = True\r\n self.sounds['open'].play()\r\n\r\n def _handleShopPageClickEvent(self, event):\r\n global dataController\r\n\r\n vault = dataController.personal['powerPointsTotalCollected'] - dataController.personal['powerPointsSold']\r\n if event.pos[0] in range(100, 100 + 400) and event.pos[1] in range(self.height - 70, self.height - 70 + 50):\r\n self.profile_page = False\r\n self.shop_page = False\r\n self.sounds['close'].play()\r\n else:\r\n ships = dataController.ships\r\n for i in range(len(ships)):\r\n if event.pos[0] in range(240, 500) and event.pos[1] in range(160 * (i + 1) + 20, 160 * (i + 1) + 80):\r\n if ships[i]['isBought'] is False:\r\n if vault >= ships[i]['cost']:\r\n dataController.personal['powerPointsSold'] += ships[i]['cost']\r\n dataController.ships[i]['isBought'] = True\r\n self.sounds['tick'].play()\r\n elif not ships[i]['isUsing']:\r\n for j in range(len(ships)):\r\n dataController.ships[j]['isUsing'] = False\r\n dataController.ships[i]['isUsing'] = True\r\n self.sounds['tick'].play()\r\n break\r\n\r\n def _handleProfilePageClickEvent(self, event):\r\n if event.pos[0] in range(100, 100 + 400) and event.pos[1] in range(self.height - 70, self.height - 70 + 50):\r\n self.profile_page = False\r\n self.shop_page = False\r\n self.sounds['close'].play()\r\n\r\n def render(self):\r\n global gameState\r\n\r\n if self.game_start:\r\n if self.game_start_delay < 0:\r\n music_controller.play_game()\r\n gameState = {\r\n 'menu': False,\r\n 'isPlaying': True,\r\n 'over': False\r\n }\r\n self.game_start_delay = self.game_start_delay_start\r\n self.game_start = False\r\n else:\r\n self.game_start_delay -= 1\r\n self._renderTitlePage()\r\n blackout = pygame.Surface((self.width, self.height))\r\n blackout.set_alpha(math.floor(255 - 255 * self.game_start_delay / self.game_start_delay_start))\r\n screen.blit(blackout, (0, 0))\r\n elif self.shop_page:\r\n self._renderShopPage()\r\n elif self.profile_page:\r\n self._renderProfilePage()\r\n else:\r\n self._renderTitlePage()\r\n\r\n def _renderTitlePage(self):\r\n screen.blit(self.bg, (0, 0))\r\n titleFont = pygame.font.Font(None, 64)\r\n title = titleFont.render('Underwater Division', True, (0, 180, 0))\r\n descFont = pygame.font.Font(None, 36)\r\n desc = descFont.render('Нажмите, что-бы начать', True, (255, 255, 255))\r\n screen.blit(title, (self.width // 2 - title.get_width() // 2, 190))\r\n screen.blit(desc, (self.width // 2 - desc.get_width() // 2, 200 + title.get_height()))\r\n\r\n shopFont = pygame.font.Font(None, 40)\r\n shop = shopFont.render('Магазин', True, (255, 255, 255))\r\n pygame.draw.rect(screen, pygame.Color(0, 180, 0), (60, self.height - 70, 200, 50))\r\n screen.blit(shop, (80, self.height - 60))\r\n\r\n profileFont = pygame.font.Font(None, 40)\r\n profile = profileFont.render('Статистика', True, (255, 255, 255))\r\n pygame.draw.rect(screen, pygame.Color(0, 180, 0), (self.width - 260, self.height - 70, 200, 50))\r\n screen.blit(profile, (self.width - profile.get_width() - 80, self.height - 60))\r\n\r\n def _renderShopPage(self):\r\n global dataController\r\n\r\n screen.blit(self.bg, (0, 0))\r\n\r\n vault = dataController.personal['powerPointsTotalCollected'] - dataController.personal['powerPointsSold']\r\n\r\n vaultFont = pygame.font.Font(None, 32)\r\n vaultText = vaultFont.render(f'Очков силы: {vault}', True, (180, 180, 0))\r\n\r\n titleFont = pygame.font.Font(None, 72)\r\n title = titleFont.render('Магазин', True, (255, 255, 255))\r\n screen.blit(title, (self.width // 2 - title.get_width() // 2, 40))\r\n\r\n screen.blit(vaultText, (self.width // 2 - vaultText.get_width() // 2, 60 + title.get_height()))\r\n\r\n ships = dataController.ships\r\n for i in range(len(ships)):\r\n screen.blit(self.shopShipsById[str(ships[i]['id'])], (100, 160 * (i + 1)))\r\n if ships[i]['isBought'] is False:\r\n color = pygame.Color(180, 180, 0)\r\n text = f'Купить за {ships[i][\"cost\"]} очк.'\r\n elif ships[i]['isUsing']:\r\n color = pygame.Color(180, 0, 180)\r\n text = 'Используется'\r\n else:\r\n color = pygame.Color(0, 180, 0)\r\n text = 'Использовать'\r\n pygame.draw.rect(screen, color, (240, 160 * (i + 1) + 20, 260, 60))\r\n pygame.draw.rect(screen, (0, 0, 0), (240, 160 * (i + 1) + 20, 260, 60), 2)\r\n textFont = pygame.font.Font(None, 30)\r\n textText = textFont.render(text, True, (255, 255, 255))\r\n screen.blit(textText, (260, 160 * (i + 1) + 40))\r\n menuFont = pygame.font.Font(None, 40)\r\n menu = menuFont.render('Обратно', True, (255, 255, 255))\r\n pygame.draw.rect(screen, pygame.Color(0, 180, 0), (100, self.height - 70, 400, 50))\r\n screen.blit(menu, (self.width // 2 - menu.get_width() // 2, self.height - 60))\r\n\r\n def _renderProfilePage(self):\r\n global dataController\r\n\r\n screen.blit(self.bg, (0, 0))\r\n titleFont = pygame.font.Font(None, 72)\r\n title = titleFont.render('Личная статистика', True, (255, 255, 255))\r\n screen.blit(title, (self.width // 2 - title.get_width() // 2, 200))\r\n\r\n totalPowerPointsFont = pygame.font.Font(None, 30)\r\n totalPowerPoints = totalPowerPointsFont.render(f'Очков силы собрано за всё время: {dataController.personal[\"powerPointsTotalCollected\"]}', True, (255, 255, 255))\r\n screen.blit(totalPowerPoints, (50, 280))\r\n\r\n maxPowerPointsFont = pygame.font.Font(None, 30)\r\n maxPowerPoints = maxPowerPointsFont.render(f'Максимум очков силы собрано за раунд: {dataController.personal[\"maxPowerPointsCollected\"]}', True, (255, 255, 255))\r\n screen.blit(maxPowerPoints, (50, 330))\r\n\r\n powerPointsSoldFont = pygame.font.Font(None, 30)\r\n powerPointsSold = powerPointsSoldFont.render(f'Потрачено очков силы: {dataController.personal[\"powerPointsSold\"]}', True, (255, 255, 255))\r\n screen.blit(powerPointsSold, (50, 380))\r\n\r\n menuFont = pygame.font.Font(None, 40)\r\n menu = menuFont.render('Обратно', True, (255, 255, 255))\r\n pygame.draw.rect(screen, pygame.Color(0, 180, 0), (100, self.height - 70, 400, 50))\r\n screen.blit(menu, (self.width // 2 - menu.get_width() // 2, self.height - 60))\r\n\r\n\r\nclass Game:\r\n def __init__(self, size):\r\n self.enemy_delay_down_delay = settings.ENEMY_DELAY_LOWER_DELAY\r\n self.default_enemy_delay = settings.ENEMY_DELAY\r\n self.enemy_delay = settings.ENEMY_DELAY\r\n self.score = 100000\r\n self.state = {\r\n 'mouseCords': (0, 0),\r\n 'mousePressed': False\r\n }\r\n self.bg = pygame.image.load('assets/sprites/bg/game.jpg')\r\n self.width, self.height = size\r\n self.spawnPlayer()\r\n self.bullet_delay = 0\r\n self.enemies = []\r\n self.bullets = []\r\n self.particles = {\r\n 'bubbles': [],\r\n 'fire': [],\r\n 'flesh': []\r\n }\r\n self.sounds = {\r\n 'bullet_collide_with_enemies': [\r\n pygame.mixer.Sound('assets/sounds/bullet_collide_with_flesh/1.wav'),\r\n pygame.mixer.Sound('assets/sounds/bullet_collide_with_flesh/2.wav')\r\n ],\r\n 'demon_killed': [\r\n pygame.mixer.Sound('assets/sounds/demon_killed/1.wav'),\r\n pygame.mixer.Sound('assets/sounds/demon_killed/2.wav')\r\n ],\r\n 'boss_awaken': [\r\n pygame.mixer.Sound('assets/sounds/boss_awaken/uber.wav'),\r\n pygame.mixer.Sound('assets/sounds/boss_awaken/satan.wav')\r\n ],\r\n 'open': pygame.mixer.Sound('assets/sounds/menu/open.wav'),\r\n 'start': pygame.mixer.Sound('assets/sounds/menu/start.wav')\r\n }\r\n self.game_over_delay_start = 60\r\n self.game_over_delay = self.game_over_delay_start\r\n self.sounds['boss_awaken'][1].set_volume(1.5)\r\n self.player_dead = False\r\n\r\n def spawnEnemy(self):\r\n spawnByLvlChance = {\r\n '1': ['zombie'],\r\n '2': ['zombie', 'zombie', 'zombie', 'zombie', 'caco'],\r\n '3': ['zombie', 'zombie', 'caco', 'caco', 'caco'],\r\n '4': ['zombie', 'zombie', 'caco', 'caco', 'caco', 'caco', 'uber'],\r\n '5': ['zombie', 'caco', 'caco', 'caco', 'caco', 'uber', 'uber'],\r\n '6': ['zombie', 'caco', 'caco', 'uber', 'uber', 'uber', 'satan']\r\n }\r\n lvl = self.getCurrentLvl()\r\n enemyType = choice(spawnByLvlChance[str(lvl)])\r\n enemy_ = None\r\n if enemyType == 'zombie':\r\n enemy_ = DemonZombieEnemy(choice(range(50, self.width - 50)), -50)\r\n elif enemyType == 'caco':\r\n enemy_ = CacoDemonEnemy(choice(range(50, self.width - 50)), -50)\r\n elif enemyType == 'uber':\r\n enemy_ = UberDemonEnemy(choice(range(50, self.width - 50)), -80)\r\n pygame.mixer.Channel(0).play(self.sounds['boss_awaken'][0])\r\n self.enemy_delay = round(600 / lvl)\r\n elif enemyType == 'satan':\r\n enemy_ = SatanEnemy(choice(range(50, self.width - 50)), -150)\r\n pygame.mixer.Channel(0).play(self.sounds['boss_awaken'][1])\r\n self.enemy_delay = round(1200 / lvl)\r\n self.enemies = self.enemies + [enemy_]\r\n\r\n def spawnPlayer(self):\r\n global dataController\r\n\r\n try:\r\n ship_id = list(filter(lambda x: x['isUsing'] is True, dataController.ships))[0]['id']\r\n if ship_id == 1:\r\n self.player = FirstShipPlayer(self.width // 2, self.height - 250)\r\n elif ship_id == 2:\r\n self.player = SecondShipPlayer(self.width // 2, self.height - 250)\r\n elif ship_id == 3:\r\n self.player = ThirdShipPlayer(self.width // 2, self.height - 250)\r\n else:\r\n raise Exception\r\n except Exception:\r\n self.player = FirstShipPlayer(self.width // 2, self.height - 250)\r\n\r\n def handleEvents(self, events):\r\n global running\r\n\r\n for event in events:\r\n if event.type == pygame.QUIT:\r\n running = False\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n self.state['mousePressed'] = True\r\n if event.type == pygame.MOUSEBUTTONUP:\r\n if event.button == 1:\r\n self.state['mousePressed'] = False\r\n if event.type == pygame.MOUSEMOTION:\r\n x, y = event.pos\r\n if self.state['mousePressed']:\r\n bubbles = []\r\n bubblesNum = choice(range(10, 20))\r\n for j in range(bubblesNum):\r\n bubble = Bubble(choice(range(2, 4)),\r\n choice(range(self.player.cords['x'] - round(self.player.radius * 0.75),\r\n self.player.cords['x'] + round(self.player.radius * 0.75))),\r\n choice(range(self.player.cords['y'] + self.player.radius // 3,\r\n self.player.cords['y'] + self.player.radius)))\r\n bubble.beforeDestroy = choice(range(60, 150))\r\n bubbles.append(bubble)\r\n self.particles['bubbles'] += bubbles\r\n if ((self.player.cords['x'] > self.player.radius or x - self.state['mouseCords'][0] > 0) and\r\n (self.player.cords['x'] < self.width - self.player.radius or x - self.state['mouseCords'][0] < 0)):\r\n if x - self.state['mouseCords'][0] > 0:\r\n self.player.rotation_angle -= 1\r\n self.player.rotation_angle = max(self.player.rotation_angle, -60)\r\n else:\r\n self.player.rotation_angle += 1\r\n self.player.rotation_angle = min(self.player.rotation_angle, 60)\r\n self.player.cords['x'] += x - self.state['mouseCords'][0]\r\n if (self.player.cords['y'] > self.player.radius or y - self.state['mouseCords'][1] > 0) and (self.player.cords['y'] < self.height - self.player.radius or y - self.state['mouseCords'][1] < 0):\r\n self.player.cords['y'] += y - self.state['mouseCords'][1]\r\n\r\n self.state['mouseCords'] = x, y\r\n\r\n def handleForGameOver(self):\r\n global gameState\r\n\r\n for enemy in self.enemies:\r\n if enemy.cords['y'] + enemy.radius > self.height:\r\n gameState = {\r\n 'menu': False,\r\n 'isPlaying': False,\r\n 'over': True\r\n }\r\n self.gameOver()\r\n\r\n def handleEnemiesConnectWithPlayer(self):\r\n global gameState\r\n\r\n for enemy in self.enemies:\r\n len1 = abs(enemy.cords['y'] - self.player.cords['y'])\r\n len2 = abs(enemy.cords['x'] - self.player.cords['x'])\r\n d = (len1 ** 2 + len2 ** 2) ** 0.5\r\n flag = False\r\n if d < enemy.radius + self.player.radius:\r\n flag = True\r\n elif d == self.player.radius + enemy.radius:\r\n flag = True\r\n elif d + self.player.radius == enemy.radius:\r\n flag = True\r\n elif d < enemy.radius - self.player.radius or d + self.player.radius < enemy.radius:\r\n flag = True\r\n if flag:\r\n particlesNum = choice(range(20, 40))\r\n for i in range(particlesNum):\r\n fire = Fire(\r\n choice(range(4, 10)),\r\n choice(\r\n range(round(self.player.cords['x'] - self.player.radius // 1.2),\r\n round(self.player.cords['x'] + self.player.radius // 1.2))),\r\n choice(range(round(self.player.cords['y'] - self.player.radius // 1.2),\r\n round(self.player.cords['y'] + self.player.radius // 1.2)))\r\n )\r\n fire.beforeDestroy = choice(range(60, 150))\r\n self.particles['fire'].append(fire)\r\n self.player.death()\r\n gameState = {\r\n 'menu': False,\r\n 'isPlaying': False,\r\n 'over': True\r\n }\r\n self.gameOver()\r\n\r\n def handleBulletsConnectWithEnemies(self):\r\n bullet_indexes = []\r\n for i, bullet in enumerate(self.bullets):\r\n for enemy in self.enemies:\r\n len1 = abs(enemy.cords['y'] - bullet.cords['y'])\r\n len2 = abs(enemy.cords['x'] - bullet.cords['x'])\r\n d = (len1 ** 2 + len2 ** 2) ** 0.5\r\n if d < enemy.radius + bullet.radius:\r\n bullet_indexes.append(i)\r\n enemy.hp -= bullet.damage\r\n elif d == bullet.radius + enemy.radius:\r\n bullet_indexes.append(i)\r\n enemy.hp -= bullet.damage\r\n elif d + bullet.radius == enemy.radius:\r\n bullet_indexes.append(i)\r\n enemy.hp -= bullet.damage\r\n elif d < enemy.radius - bullet.radius or d + bullet.radius < enemy.radius:\r\n bullet_indexes.append(i)\r\n enemy.hp -= bullet.damage\r\n for i in bullet_indexes:\r\n try:\r\n del self.bullets[i]\r\n pygame.mixer.Channel(1).play(choice(self.sounds['bullet_collide_with_enemies']))\r\n except Exception:\r\n pass\r\n\r\n def getCurrentLvl(self):\r\n if self.score < 500:\r\n return 1\r\n elif self.score < 1500:\r\n return 2\r\n elif self.score < 4500:\r\n return 3\r\n elif self.score < 13500:\r\n return 4\r\n elif self.score < 50500:\r\n return 5\r\n else:\r\n return 6\r\n\r\n def tryShoot(self):\r\n if self.state['mousePressed']:\r\n if self.bullet_delay <= 0:\r\n attackByLvl = {\r\n '1': lambda _: self.player.attack.lvl1(),\r\n '2': lambda _: self.player.attack.lvl2(),\r\n '3': lambda _: self.player.attack.lvl3(),\r\n '4': lambda _: self.player.attack.lvl4(),\r\n '5': lambda _: self.player.attack.lvl5(),\r\n '6': lambda _: self.player.attack.lvl6(),\r\n }\r\n lvl = self.getCurrentLvl()\r\n result = attackByLvl[str(lvl)]('_')\r\n self.bullets += result[0]\r\n self.bullet_delay = result[1]\r\n\r\n def updateBullets(self):\r\n indexes = []\r\n for i in range(len(self.bullets)):\r\n if self.bullets[i].cords['y'] + self.bullets[i].radius <= 0:\r\n indexes.append(i)\r\n else:\r\n bubbles = []\r\n bubblesNum = 1\r\n for j in range(bubblesNum):\r\n bubbles.append(Bubble(choice(range(1, 3)),\r\n choice(range(self.bullets[i].cords['x'] - self.bullets[i].radius,\r\n self.bullets[i].cords['x'] + self.bullets[i].radius)),\r\n choice(range(self.bullets[i].cords['y'] - self.bullets[i].radius,\r\n self.bullets[i].cords['y'] + self.bullets[i].radius))))\r\n bubbles[-1].beforeDestroy = 10\r\n self.particles['bubbles'] += bubbles\r\n for i in indexes:\r\n try:\r\n del self.bullets[i]\r\n except Exception:\r\n pass\r\n for bullet in self.bullets:\r\n bullet.cords['x'] += int(bullet.speed * bullet.direction['x_cof'])\r\n bullet.cords['y'] += int(bullet.speed * bullet.direction['y_cof'])\r\n self.bullet_delay -= 1\r\n\r\n def updateEnemies(self):\r\n if self.enemy_delay_down_delay <= 0:\r\n self.default_enemy_delay -= 1\r\n if self.default_enemy_delay < 10:\r\n self.default_enemy_delay = 10\r\n self.enemy_delay_down_delay = settings.ENEMY_DELAY_LOWER_DELAY\r\n if self.enemy_delay <= 0:\r\n self.enemy_delay = self.default_enemy_delay\r\n self.spawnEnemy()\r\n self.enemy_delay -= 1\r\n self.enemy_delay_down_delay -= 1\r\n for enemy_ in self.enemies:\r\n particlesNum = 2\r\n for i in range(particlesNum):\r\n fire = Fire(\r\n choice(range(2, 4)),\r\n choice(range(round(enemy_.cords['x'] - enemy_.radius), round(enemy_.cords['x'] + enemy_.radius))),\r\n choice(range(round(enemy_.cords['y'] + enemy_.radius // 3), round(enemy_.cords['y'] + enemy_.radius // 2)))\r\n )\r\n fire.beforeDestroy = choice(range(60, 150))\r\n self.particles['fire'].append(fire)\r\n enemy_.cords['x'] += enemy_.speed * enemy_.direction['x_cof']\r\n enemy_.cords['y'] += enemy_.speed * enemy_.direction['y_cof']\r\n indexes = []\r\n for i in range(len(self.enemies)):\r\n if self.enemies[i].hp <= 0:\r\n indexes.append(i)\r\n for i in indexes:\r\n try:\r\n self.score += self.enemies[i].reward\r\n x, y = self.enemies[i].cords['x'], self.enemies[i].cords['y']\r\n radius = self.enemies[i].radius\r\n particlesNum = choice(range(20, 30))\r\n for j in range(particlesNum):\r\n flesh = Flesh(choice(range(int(radius // 6), int(radius // 4))),\r\n choice(range(int(x - radius // 1.2), int(x + radius // 1.2))),\r\n choice(range(int(y - radius // 1.2), int(y + radius // 1.2))))\r\n self.particles['flesh'].append(flesh)\r\n del self.enemies[i]\r\n pygame.mixer.Channel(2).play(choice(self.sounds['demon_killed']))\r\n except Exception as err:\r\n print(err)\r\n\r\n def updateParticles(self):\r\n indexes = []\r\n for i in range(len(self.particles['bubbles'])):\r\n self.particles['bubbles'][i].beforeDestroy -= 1\r\n if self.particles['bubbles'][i].beforeDestroy <= 0:\r\n indexes.append(i)\r\n for i in indexes:\r\n try:\r\n del self.particles['bubbles'][i]\r\n except Exception:\r\n pass\r\n indexes = []\r\n for i in range(len(self.particles['fire'])):\r\n self.particles['fire'][i].beforeDestroy -= 1\r\n if self.particles['fire'][i].beforeDestroy <= 0:\r\n indexes.append(i)\r\n for i in indexes:\r\n try:\r\n del self.particles['fire'][i]\r\n except Exception:\r\n pass\r\n indexes = []\r\n for i in range(len(self.particles['flesh'])):\r\n self.particles['flesh'][i].beforeDestroy -= 1\r\n self.particles['flesh'][i].cords['y'] -= 0.5\r\n if self.particles['flesh'][i].beforeDestroy <= 0:\r\n indexes.append(i)\r\n for i in indexes:\r\n try:\r\n del self.particles['flesh'][i]\r\n except Exception:\r\n pass\r\n\r\n def render(self):\r\n screen.blit(self.bg, (0, 0))\r\n for bubble in self.particles['bubbles']:\r\n screen.blit(bubble.sprite, (bubble.cords['x'] - bubble.radius + choice(range(-1, 2)),\r\n bubble.cords['y'] - bubble.radius + choice(range(-1, 2)),\r\n bubble.radius * 2 + choice(range(-1, 2)),\r\n bubble.radius * 2 + choice(range(-1, 2))))\r\n for fire in self.particles['fire']:\r\n screen.blit(fire.sprite, (fire.cords['x'] - fire.radius + choice(range(-1, 2)),\r\n fire.cords['y'] - fire.radius + choice(range(-1, 2)),\r\n fire.radius * 2 + choice(range(-1, 2)),\r\n fire.radius * 2 + choice(range(-1, 2))))\r\n for flesh in self.particles['flesh']:\r\n flesh.sprite.set_alpha(math.floor(255 * flesh.beforeDestroy / flesh.maxBeforeDestroy))\r\n screen.blit(flesh.sprite, (flesh.cords['x'] - flesh.radius, flesh.cords['y'] - flesh.radius, flesh.radius * 2, flesh.radius * 2))\r\n for bullet in self.bullets:\r\n screen.blit(bullet.sprite, (bullet.cords['x'] - bullet.radius, bullet.cords['y'] - bullet.radius, bullet.radius * 2, bullet.radius * 2))\r\n for enemy_ in list(sorted(self.enemies, key=lambda x: x.radius)):\r\n screen.blit(enemy_.sprite, (enemy_.cords['x'] - enemy_.sprite.get_width() // 2, enemy_.cords['y'] - enemy_.sprite.get_height() // 2))\r\n pygame.draw.rect(screen, pygame.Color('#999999'), (\r\n enemy_.cords['x'] - enemy_.radius,\r\n enemy_.cords['y'] - enemy_.radius - 15,\r\n enemy_.radius * 2,\r\n 10\r\n ))\r\n pygame.draw.rect(screen, pygame.Color('#0fff83'), (\r\n enemy_.cords['x'] - enemy_.radius,\r\n enemy_.cords['y'] - enemy_.radius - 15,\r\n round(enemy_.radius * 2 * (enemy_.hp / enemy_.max_hp)),\r\n 10\r\n ))\r\n if not self.player.dead:\r\n screen.blit(self.player.rotated_sprite, (self.player.cords['x'] - self.player.rotated_sprite.get_width() // 2,\r\n self.player.cords['y'] - self.player.radius,\r\n self.player.radius * 2, self.player.radius * 2))\r\n scoreFont = pygame.font.Font(None, 36)\r\n score = scoreFont.render(f'Очки силы: {game.score}', True, (255, 255, 255))\r\n scoreSurface = pygame.Surface((self.width - 100, 20 + score.get_height()))\r\n scoreSurface.set_alpha(200)\r\n pygame.draw.rect(scoreSurface, pygame.Color(0, 180, 0), (0, 0, scoreSurface.get_width(), scoreSurface.get_height()))\r\n scoreSurface.blit(score, (scoreSurface.get_width() // 2 - score.get_width() // 2, 10))\r\n screen.blit(scoreSurface, (50, 80))\r\n\r\n def restart(self):\r\n self.enemy_delay_down_delay = settings.ENEMY_DELAY_LOWER_DELAY\r\n self.default_enemy_delay = settings.ENEMY_DELAY\r\n self.enemy_delay = settings.ENEMY_DELAY\r\n self.score = 0\r\n self.state = {\r\n 'mouseCords': (0, 0),\r\n 'mousePressed': False\r\n }\r\n self.width, self.height = size\r\n self.spawnPlayer()\r\n self.bullet_delay = 0\r\n self.enemies = []\r\n self.bullets = []\r\n self.game_over_delay_start = 60\r\n self.game_over_delay = self.game_over_delay_start\r\n self.particles = {\r\n 'bubbles': [],\r\n 'fire': [],\r\n 'flesh': []\r\n }\r\n self.player.respawn()\r\n\r\n def gameOver(self):\r\n global dataController\r\n\r\n music_controller.stop_music()\r\n dataController.personal[\"powerPointsTotalCollected\"] += self.score\r\n if self.score > dataController.personal[\"maxPowerPointsCollected\"]:\r\n dataController.personal[\"maxPowerPointsCollected\"] = self.score\r\n\r\n def handleGameOverEvents(self, events):\r\n global running, gameState\r\n\r\n for event in events:\r\n if event.type == pygame.QUIT:\r\n running = False\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n if (event.pos[0] in range(100, 100 + 400) and\r\n event.pos[1] in range(self.height - 70, self.height - 70 + 50)):\r\n self.restart()\r\n gameState = {\r\n 'menu': True,\r\n 'isPlaying': False,\r\n 'over': False\r\n }\r\n music_controller.play_menu()\r\n self.sounds['open'].play()\r\n else:\r\n self.restart()\r\n gameState = {\r\n 'menu': False,\r\n 'isPlaying': True,\r\n 'over': False\r\n }\r\n music_controller.play_game()\r\n self.sounds['start'].play()\r\n\r\n def gameOverRender(self):\r\n if self.game_over_delay < 0:\r\n titleFont = pygame.font.Font(None, 64)\r\n title = titleFont.render('Вы проиграли', True, (0, 180, 0))\r\n\r\n scoreFont = pygame.font.Font(None, 36)\r\n score = scoreFont.render(f'Вы набрали {self.score} очков силы', True, (180, 0, 0))\r\n\r\n descFont = pygame.font.Font(None, 36)\r\n desc = descFont.render('Нажмите, что-бы начать заново', True, (255, 255, 255))\r\n\r\n screen.blit(title, (self.width // 2 - title.get_width() // 2, 190))\r\n screen.blit(score, (self.width // 2 - score.get_width() // 2, 250))\r\n screen.blit(desc, (self.width // 2 - desc.get_width() // 2, 280))\r\n\r\n menuFont = pygame.font.Font(None, 40)\r\n menu = menuFont.render('Меню', True, (255, 255, 255))\r\n pygame.draw.rect(screen, pygame.Color(0, 180, 0), (100, self.height - 70, 400, 50))\r\n screen.blit(menu, (self.width // 2 - menu.get_width() // 2, self.height - 60))\r\n else:\r\n self.render()\r\n self.game_over_delay -= 1\r\n blackout = pygame.Surface((self.width, self.height))\r\n blackout.set_alpha(math.floor(255 - 255 * self.game_over_delay / self.game_over_delay_start))\r\n screen.blit(blackout, (0, 0))\r\n\r\n\r\ndataController = DataController()\r\ndataController.load_personal_data()\r\ndataController.load_ships_data()\r\npygame.init()\r\npygame.display.set_caption('')\r\nclock = pygame.time.Clock()\r\nsize = width, height = 600, 800\r\nscreen = pygame.display.set_mode(size)\r\nrunning = True\r\ngameState = {\r\n 'menu': True,\r\n 'over': False,\r\n 'isPlaying': False\r\n}\r\nmusic_controller.play_menu()\r\nmenu = Menu(size)\r\ngame = Game(size)\r\nwhile running:\r\n if gameState['menu']:\r\n menu.handleEvents(pygame.event.get())\r\n menu.render()\r\n if gameState['isPlaying']:\r\n game.handleEvents(pygame.event.get())\r\n game.updateEnemies()\r\n game.updateBullets()\r\n game.updateParticles()\r\n game.handleEnemiesConnectWithPlayer()\r\n game.handleBulletsConnectWithEnemies()\r\n game.render()\r\n game.player.update()\r\n game.tryShoot()\r\n game.handleForGameOver()\r\n if gameState['over']:\r\n game.updateParticles()\r\n game.handleGameOverEvents(pygame.event.get())\r\n game.gameOverRender()\r\n pygame.display.flip()\r\n clock.tick(settings.FPS)\r\n screen.fill('#000000')\r\ndataController.dump_personal_data()\r\ndataController.dump_ships_data()\r\n","repo_name":"MarlakDevelop/underwaterdivision","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":32903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"39354661949","text":"# importing libraries\r\nimport matplotlib.pyplot as plt\r\nimport seaborn\r\n# text color change\r\nplt.rcParams['text.color'] = 'white'\r\n\r\n# Background_color_change\r\nfig, ax = plt.subplots(figsize = (20,12))\r\nax.grid(False)\r\n# plt.style.use('ggplot')\r\nfig.set_facecolor('maroon') \r\n\r\n# declaring data\r\ndata = [44, 45, 40]\r\nkeys = ['Total Tweetes', 'Positive Tweets', 'Negative Tweets']\r\n\r\n# define Seaborn color palette to use\r\npalette_color = seaborn.color_palette('bright')\r\n\r\n# plotting data on chart\r\nplt.pie(data, labels=keys, colors=palette_color, autopct='%.0f%%' )\r\n# fig.set_facecolor('lightgrey')\r\n\r\n# displaying chart\r\nplt.show()\r\n\r\n","repo_name":"Mumer5490/Python_Logics_Files","sub_path":"Pie_Graph_Back_color_change/Backgrnd_change_pie_.py","file_name":"Backgrnd_change_pie_.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"3474887811","text":"# -*- coding: utf-8 -*-\n# Websocket consumer\nfrom asgiref.sync import async_to_sync\nfrom data import models\nfrom channels.generic.websocket import JsonWebsocketConsumer\nimport json\n\nfrom project.schema import schema\nfrom data.safe.tokener import tokener as token\nfrom channels.http import AsgiRequest\nfrom data.safe_gql_view import BetterGraphQLView\n\n\ndef get_query_dict(query_string):\n query_string = query_string.decode('utf-8')\n queries = query_string.split('&')\n query_dict = {}\n for query in queries:\n key, val = query.split('=')\n query_dict[key] = val\n return query_dict\n\n\nclass ChatConsumer(JsonWebsocketConsumer):\n\n\n def connect(self):\n self.group_name = 'default'\n try:\n query_dict = get_query_dict(self.scope['query_string'])\n except Exception as e:\n # Bad Request\n self.close()\n return\n # 将两个用户的id排序后用下划线连接,作为组名\n self.sender_token = query_dict['sender']\n try:\n self.sender_id = str(token.confirm_validate_token(self.sender_token))\n except:\n # 未登录\n self.close()\n return\n self.receiver_id = query_dict['receiver']\n if self.sender_id == self.receiver_id:\n # 不能自己给自己发\n self.close()\n return\n self.joined_user = [self.sender_id, self.receiver_id]\n self.joined_user.sort()\n self.group_name = '_'.join(self.joined_user)\n self.joined_user = [int(i) for i in self.joined_user]\n async_to_sync(self.channel_layer.group_add)(\n self.group_name,\n self.channel_name\n )\n self.accept()\n\n\n def disconnect(self, code):\n async_to_sync(self.channel_layer.group_discard)(\n self.group_name,\n self.channel_name\n )\n\n \n def receive(self, text_data=None, bytes_data=None):\n text_data_json = json.loads(text_data)\n \"\"\"\n message: {\n text: String,\n picture: Int,\n addfile: Int,\n audio: Int\n }\n \"\"\"\n message = text_data_json.get('message', '')\n new_msg_content_obj = models.MessageContent.objects.create(**message)\n new_msg_obj = models.Message.objects.create(content=new_msg_content_obj, sender_id=self.sender_id, receiver_id=self.receiver_id)\n async_to_sync(self.channel_layer.group_send)(\n self.group_name,\n {\n 'type': 'chat_message',\n 'message': message,\n 'sender': {\n 'id': self.sender_id,\n 'username': models.User.objects.get(pk=self.sender_id).username\n },\n 'receiver': {\n 'id': self.receiver_id,\n 'username': models.User.objects.get(pk=self.receiver_id).username\n },\n 'send_time': str(new_msg_obj.send_time)\n }\n )\n \n\n def chat_message(self, event):\n message = event['message']\n sender = event['sender']\n receiver = event['receiver']\n send_time = event['send_time']\n self.send(text_data=json.dumps({\n 'message': message,\n 'sender': sender,\n 'receiver': receiver,\n 'send_time': send_time\n }))\n \n\n def not_login(self, event):\n error = event['error']\n self.send(text_data=json.dumps({\n 'error': error\n }))\n\n\nclass LongGraphQLCosumer(JsonWebsocketConsumer):\n\n\n def connect(self):\n self.group_name = 'default'\n async_to_sync(self.channel_layer.group_add)(\n self.group_name,\n self.channel_name\n )\n self.accept()\n \n\n def disconnect(self, code):\n async_to_sync(self.channel_layer.group_discard)(\n self.group_name,\n self.channel_name\n )\n\n\n def receive(self, text_data=None, bytes_data=None):\n gql = text_data\n user_agent = ''\n for tup in self.scope['headers']:\n if tup[0].decode('utf-8') == 'user-agent':\n user_agent = tup[1]\n asgi_scope = {\n 'client': self.scope['client'],\n 'path': '/graphql/', \n 'type': 'http', \n 'headers': [(b'origin', b'null'), (b'connection', b'keep-alive'), (b'accept-encoding', b'gzip, deflate, br'), (b'user-agent', user_agent), (b'content-type', b'application/json'), (b'host', b'localhost:8000'), (b'content-length', b'51'), (b'accept-language', b'zh-CN,zh;q=0.9'), (b'accept', b'application/json')], \n 'scheme': 'http',\n 'method': 'POST', \n 'query_string': b'', \n 'root_path': '', \n 'http_version': '1.1', \n 'server': ['127.0.0.1', 8000]\n }\n asgi_body = json.dumps({\"query\": gql}).encode('utf-8')\n asgi_request = AsgiRequest(asgi_scope, asgi_body)\n resp = BetterGraphQLView.as_view(schema=schema)(asgi_request)\n async_to_sync(self.channel_layer.group_send)(\n self.group_name,\n {\n 'type': 'query_result',\n 'data': json.loads(resp.content.decode('utf-8'))\n }\n )\n\n \n def query_result(self, event):\n self.send(text_data=json.dumps(event['data']))\n ","repo_name":"Liadrinz/homework-plus","sub_path":"Backend/data/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":5390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"39046253447","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom google.protobuf import text_format\nfrom protos import pipeline_pb2\nfrom modeling import trainer\n\nfrom bertviz import head_view\n\n\nflags.DEFINE_string('model_dir',\n 'logs.final.eval/b2t2_entropy0.00001_res101_f00001/ckpts',\n 'Path to the directory which holds model checkpoints.')\n\nflags.DEFINE_string('pipeline_proto',\n 'logs.final.eval/b2t2_entropy0.00001_res101_f00001/pipeline.pbtxt',\n 'Path to the pipeline proto file.')\n\nflags.DEFINE_integer('num_bert_layers', 12, 'Number of BERT layers.')\n\nFLAGS = flags.FLAGS\n\n\ndef _load_pipeline_proto(filename):\n \"\"\"Loads pipeline proto from file.\n\n Args:\n filename: Path to the pipeline config file.\n\n Returns:\n An instance of pipeline_pb2.Pipeline.\n \"\"\"\n with tf.io.gfile.GFile(filename, 'r') as fp:\n return text_format.Merge(fp.read(), pipeline_pb2.Pipeline())\n\n\ndef _load_vocabulary(filename):\n \"\"\"Loads vocabulary file.\n \n Args:\n filename: Path to the vocabulary file.\n \n Returns:\n A list of python strings.\n \"\"\"\n with tf.io.gfile.GFile(filename, 'r') as fp:\n return [x.strip('\\n') for x in fp]\n\n\ndef main(_):\n logging.set_verbosity(logging.DEBUG)\n\n for gpu in tf.config.experimental.list_physical_devices('GPU'):\n tf.config.experimental.set_memory_growth(gpu, True)\n pipeline_proto = _load_pipeline_proto(FLAGS.pipeline_proto)\n\n pipeline_proto.eval_reader.vcr_reader.batch_size = 1\n\n num_layers = FLAGS.num_bert_layers\n\n def attn_processor_fn(tf_graph):\n \"\"\"Gets attention tensors by name.\n Args:\n tf_graph: tf.Graph instance.\n num_layers: Number of attention layers in BERT.\n \"\"\"\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n\n # `attention_probs` = [B, N, F, T]\n predictions = {}\n for i in range(num_layers):\n predictions['bert_attn_%i' % i] = tf_graph.get_tensor_by_name(\n 'bert/encoder/layer_%s/attention/self/Softmax:0' % i)\n return predictions\n\n vocab = _load_vocabulary(pipeline_proto.eval_reader.vcr_reader.vocab_file)\n\n count = 0\n params = {'create_additional_predictions': attn_processor_fn}\n\n entropy_list = []\n for example_id, example in enumerate(\n trainer.predict(pipeline_proto, FLAGS.model_dir, params=params)):\n\n detection_classes = []\n for i, x in enumerate(example['detection_classes'][0]):\n name = vocab[x]\n if name == '[unused400]':\n name = '[IMAGE]'\n detection_classes.append(name)\n\n answer_choices = [vocab[x] for x in example['mixed_answer_choices'][0][0]]\n tokens = ['[CLS]'] + detection_classes + ['[SEP]'] + answer_choices + ['[SEP]']\n\n # N = Number of layers.\n # H = Number of heads.\n # F = From tensor.\n # T = To tensor.\n\n # `attns` = [N, H, F, T]\n attns = np.concatenate([example['bert_attn_%i' % i]\n for i in range(num_layers)], 0)\n log_attns = np.log(attns + 1e-8)\n\n # `entropy` = [N, H]\n entropy = - (attns * log_attns).sum(-1).mean(-1)\n entropy_list.append(entropy)\n\n # Counting.\n count += len(example['annot_id'])\n if count % 100 == 0:\n print(count)\n\n entropy = np.stack(entropy_list, -1).mean(-1)\n for i in range(len(entropy)):\n line = ','.join(['%.3lf' % x for x in entropy[i]])\n print(line)\n\n logging.info('Done')\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('model_dir')\n flags.mark_flag_as_required('pipeline_proto')\n app.run(main)\n","repo_name":"yekeren/VCR-shortcut-effects-study","sub_path":"modeling/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"86"} +{"seq_id":"8753265044","text":"import sys\nimport os\nimport argparse\nimport timeit\nimport gc\n\nstart_time = timeit.default_timer()\n\nparser = argparse.ArgumentParser(description='Replication of the CoVe Biattentive Classification Network (BCN)')\n\nparser.add_argument(\"--glovepath\", type=str, default=\"../../Word2Vec_models/GloVe/glove.840B.300d.txt\", help=\"Path to GloVe word embeddings. Download glove.840B.300d embeddings from https://nlp.stanford.edu/projects/glove/\")\nparser.add_argument(\"--ignoregloveheader\", type=str, default=\"False\", help=\"Set this to \\\"True\\\" if the first line of the GloVe file is a header and not a (word, embedding) pair\")\nparser.add_argument(\"--covepath\", type=str, default='../Cove-ported/Keras_CoVe.h5', help=\"Path to the CoVe model\")\nparser.add_argument(\"--covedim\", type=int, default=600, help=\"Number of dimensions in CoVe embeddings (default: 600)\")\nparser.add_argument(\"--datadir\", type=str, default='../datasets', help=\"Path to the directory that contains the datasets\")\nparser.add_argument(\"--outputdir\", type=str, default='model', help=\"Path to the directory where the BCN model will be saved\")\n\nparser.add_argument(\"--mode\", type=int, default=0, help=\"0: Normal (train + test); 1: BCN model dry-run (just try creating the model and do nothing else); 2: Train + test dry-run (Load a smaller dataset and train + test on it)\")\n\nparser.add_argument(\"--type\", type=str, default=\"CoVe\", help=\"What sentence embeddings to use (GloVe, CoVe_without_GloVe or CoVe). For CoVe, [GloVe(w)CoVe(w)] embeddings will be used. For CoVe_without_GloVe, GloVe(w) will not be included.\")\nparser.add_argument(\"--transfer_task\", type=str, default=\"SSTBinary\", help=\"Transfer task used for training BCN and evaluating predictions (e.g. SSTBinary, SSTFine, SSTBinary_lower, SSTFine_lower, TREC6, TREC50, TREC6_lower, TREC50_lower)\")\n\nparser.add_argument(\"--n_epochs\", type=int, default=20, help=\"Number of epochs (int). After 5 epochs of worse dev accuracy, training will early stopped and the best epoch will be saved (based on dev accuracy).\")\nparser.add_argument(\"--batch_size\", type=int, default=64, help=\"Batch size (int)\")\nparser.add_argument(\"--same_bilstm_for_encoder\", type=str, default=\"False\", help=\"Whether or not to use the same BiLSTM (when flag is set) or separate BiLSTMs (flag unset) for the encoder (str: True or False)\")\nparser.add_argument(\"--bilstm_encoder_n_hidden\", type=int, default=300, help=\"Number of hidden states in encoder's BiLSTM(s) (int)\")\nparser.add_argument(\"--bilstm_encoder_forget_bias\", type=float, default=1.0, help=\"Forget bias for encoder's BiLSTM(s) (float)\")\nparser.add_argument(\"--bilstm_integrate_n_hidden\", type=int, default=300, help=\"Number of hidden states in integrate's BiLSTMs (int)\")\nparser.add_argument(\"--bilstm_integrate_forget_bias\", type=float, default=1.0, help=\"Forget bias for integrate's BiLSTMs (float)\")\nparser.add_argument(\"--dropout_ratio\", type=float, default=0.1, help=\"Ratio for dropout applied before Feedforward Network and before each Batch Norm (float)\")\nparser.add_argument(\"--maxout_reduction\", type=int, default=2, help=\"On the first and second maxout layers, the dimensionality is divided by this number (int)\")\nparser.add_argument(\"--bn_decay\", type=float, default=0.999, help=\"Decay for each batch normalisation layer (float)\")\nparser.add_argument(\"--bn_epsilon\", type=float, default=1e-3, help=\"Epsilon for each batch normalisation layer (float)\")\nparser.add_argument(\"--optimizer\", type=str, default=\"adam\", help=\"Optimizer (adam or gradientdescent)\")\nparser.add_argument(\"--learning_rate\", type=float, default=0.001, help=\"Leaning rate (float)\")\nparser.add_argument(\"--adam_beta1\", type=float, default=0.9, help=\"Beta1 for adam optimiser if adam optimiser is used (float)\")\nparser.add_argument(\"--adam_beta2\", type=float, default=0.999, help=\"Beta2 for adam optimiser if adam optimiser is used (float)\")\nparser.add_argument(\"--adam_epsilon\", type=float, default=1e-8, help=\"Epsilon for adam optimiser if adam optimiser is used (float)\")\n\nargs, _ = parser.parse_known_args()\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\nfrom sentence_encoders import GloVeEncoder, CoVeEncoder, GloVeCoVeEncoder, InferSentEncoder, GloVeInferSentEncoder\nfrom datasets import SSTFineDataset, SSTFineLowerDataset\nfrom model import BCN\n\n\"\"\"\nHYPERPARAMETERS\n\"\"\"\n\nhyperparameters = {\n 'n_epochs': args.n_epochs, # int\n 'batch_size': args.batch_size, # int\n\n 'same_bilstm_for_encoder': str2bool(args.same_bilstm_for_encoder), # boolean\n 'bilstm_encoder_n_hidden': args.bilstm_encoder_n_hidden, # int. Used by McCann et al.: 300\n 'bilstm_encoder_forget_bias': args.bilstm_encoder_forget_bias, # float\n\n 'bilstm_integrate_n_hidden': args.bilstm_integrate_n_hidden, # int. Used by McCann et al.: 300\n 'bilstm_integrate_forget_bias': args.bilstm_integrate_forget_bias, # float\n\n 'dropout_ratio': args.dropout_ratio, # float. Used by McCann et al.: 0.1, 0.2 or 0.3\n 'maxout_reduction': args.maxout_reduction, # int. Used by McCann et al.: 2, 4 or 8\n\n 'bn_decay': args.bn_decay, # float\n 'bn_epsilon': args.bn_epsilon, # float\n\n 'optimizer': args.optimizer, # \"adam\" or \"gradientdescent\". Used by McCann et al.: \"adam\"\n 'learning_rate': args.learning_rate, # float. Used by McCann et al.: 0.001\n 'adam_beta1': args.adam_beta1, # float (used only if optimizer == \"adam\")\n 'adam_beta2': args.adam_beta2, # float (used only if optimizer == \"adam\")\n 'adam_epsilon': args.adam_epsilon # float (used only if optimizer == \"adam\")\n}\n\nif args.mode == 1:\n BCN(hyperparameters, 3, 128, 900, args.outputdir).dry_run()\n sys.exit()\n\nif not os.path.exists(args.outputdir):\n os.makedirs(args.outputdir)\n\nif not os.path.exists(os.path.join(args.outputdir, \"info.txt\")):\n with open(os.path.join(args.outputdir, \"info.txt\"), \"w\") as outputfile:\n outputfile.write(str(args.type) + \"\\n\")\n outputfile.write(str(args.transfer_task) + \"\\n\")\n outputfile.write(str(hyperparameters))\n\n\"\"\"\nDATASET\n\"\"\"\n\nif args.type == \"GloVe\":\n encoder = GloVeEncoder(args.glovepath, ignore_glove_header=str2bool(args.ignoregloveheader))\nelif args.type == \"CoVe_without_GloVe\":\n encoder = CoVeEncoder(args.glovepath, args.covepath, ignore_glove_header=str2bool(args.ignoregloveheader), cove_dim=args.covedim)\nelif args.type == \"CoVe\":\n encoder = GloVeCoVeEncoder(args.glovepath, args.covepath, ignore_glove_header=str2bool(args.ignoregloveheader), cove_dim=args.covedim)\nelse:\n print(\"ERROR: Unknown embeddings type. Should be GloVe, InferSent or CoVe. Set it correctly using the --type argument.\")\n sys.exit(1)\n\nif args.transfer_task == \"SSTFine\":\n dataset = SSTFineDataset(args.datadir, encoder, dry_run=(args.mode == 2))\nelif args.transfer_task == \"SSTFine_lower\":\n dataset = SSTFineLowerDataset(args.datadir, encoder, dry_run=(args.mode == 2))\nelse:\n print(\"ERROR: Unknown transfer task. Set it correctly using the --transfer_task argument.\")\n sys.exit(1)\nencoder = None\ngc.collect()\n\n\"\"\"\nBCN MODEL\n\"\"\"\n\nbcn = BCN(hyperparameters, dataset.get_n_classes(), dataset.get_max_sent_len(), dataset.get_embed_dim(), args.outputdir)\ndev_accuracy = bcn.train(dataset)\ntest_accuracy = bcn.test(dataset)\n\naccuracy = {'dev': dev_accuracy, 'test': test_accuracy}\nwith open(os.path.join(args.outputdir, \"accuracy.txt\"), \"w\") as outputfile:\n outputfile.write(str(accuracy))\n\nprint(\"\\nReal time taken to train + test: %s seconds\" % (timeit.default_timer() - start_time))\n","repo_name":"menajosep/AleatoricSent","sub_path":"Cove-BCN/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":7479,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"42314608827","text":"# -*- coding:utf-8 -*-\n\n__author__ = 'huanghf'\n\n\"\"\"\n给定一个赎金信 (ransom) 字符串和一个杂志(magazine)字符串,判断第一个字符串ransom能不能由第二个字符串magazines里面的字符构成。如果可以构成,返回 true ;否则返回 false。\n\n(题目说明:为了不暴露赎金信字迹,要从杂志上搜索各个需要的字母,组成单词来表达意思。)\n\n注意:\n\n你可以假设两个字符串均只含有小写字母。\n\ncanConstruct(\"a\", \"b\") -> false\ncanConstruct(\"aa\", \"ab\") -> false\ncanConstruct(\"aa\", \"aab\") -> true\n\"\"\"\n\n\nclass Solution:\n def canConstruct(self, ransomNote: str, magazine: str) -> bool:\n \"\"\"\n 借助字典实现\n :param ransomNote:\n :param magazine:\n :return:\n \"\"\"\n d = {}\n for i in magazine:\n d[i] = d.get(i, 0) + 1\n for i in ransomNote:\n d[i] = d.get(i, 0) - 1\n if d[i] < 0:\n return False\n return True\n\n def canConstruct2(self, ransomNote: str, magazine: str) -> bool:\n \"\"\"\n 使用replace函数\n replace(self, old, new, count=None)\n :param ransomNote:\n :param magazine:\n :return:\n \"\"\"\n for i in ransomNote:\n\n if i in magazine:\n magazine = magazine.replace(i,'',1)\n ransomNote = ransomNote.replace(i,'',1)\n return ransomNote==''\n\n\nransomNote, magazine = \"aa\", \"aab\"\nsol = Solution()\nprint(sol.canConstruct2(ransomNote, magazine))\n","repo_name":"lovehhf/LeetCode","sub_path":"383_赎金信.py","file_name":"383_赎金信.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"21997958749","text":"import inkex, simpletransform\nfrom copy import deepcopy\n_ = str\n\nclass CroCro_Mirror(inkex.Effect):\n def __init__(self):\n inkex.Effect.__init__(self)\n\n def effect(self):\n # 選択要素がなければ終了\n if len(self.selected) <= 0: return\n\n # 親ノード\n parentnode = self.current_layer\n\n # 選択要素を取得\n sel = self.selected\n # inkex.debug('>> ' + ' '.join(_(v) for v in sel))\n\n # 変形マトリクスの作成\n transformation = 'scale(-1, 1)'\n transform = simpletransform.parseTransform(transformation)\n\n # 複製と変形マトリクスの適用\n for id, node in sel.iteritems():\n childNode = deepcopy(node)\n parentnode.append(childNode)\n simpletransform.applyTransformToNode(transform, childNode)\n\n# インスタンスの初期化と実行\nif __name__ == '__main__':\n e = CroCro_Mirror()\n e.affect()\n\n","repo_name":"masakazu-yanai/com.crocro.inkscape.mirror","sub_path":"_crocro_mirror.py","file_name":"_crocro_mirror.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"70976343964","text":"from torch import nn\nfrom torchvision import models\nimport cirtorch\nimport torch\nfrom metric_learning import ArcMarginProduct\n\n\nclass ArcFaceNet(nn.Module):\n DIVIDABLE_BY = 32\n\n def __init__(self,\n model_name='resnet50',\n pooling=['GAP'],\n args_pooling: dict = {},\n fc_dim=512,\n # dropout=0.0,\n pretrained=False,\n class_num=None):\n \"\"\"\n :param n_classes:\n :param model_name: name of model from pretrainedmodels\n e.g. resnet50, resnext101_32x4d, pnasnet5large\n :param pooling: One of ('SPoC', 'MAC', 'RMAC', 'GeM', 'Rpool', 'Flatten', 'CompactBilinearPooling')\n :param loss_module: One of ('arcface', 'cosface', 'softmax')\n \"\"\"\n super(ArcFaceNet, self).__init__()\n\n if model_name.startswith(\"efficientnet\"):\n from efficientnet_pytorch import EfficientNet\n backbone = EfficientNet.from_pretrained(model_name, num_classes=1000)\n last_linear_idx = -2\n pool_idx = -4\n backbone_layers = list(backbone.children())\n final_in_features = backbone_layers[last_linear_idx].in_features\n self.backbone = backbone\n elif model_name.startswith('bagnet'):\n import bagnets.pytorchnet\n backbone = getattr(bagnets.pytorchnet, model_name)(pretrained=pretrained)\n last_linear_idx = -1\n pool_idx = -2\n\n backbone_layers = list(backbone.children())\n final_in_features = backbone_layers[last_linear_idx].in_features\n self.backbone = nn.Sequential(*backbone_layers[:pool_idx])\n else:\n backbone = getattr(models, model_name)(num_classes=1000)\n last_linear_idx = -1\n pool_idx = -2\n\n backbone_layers = list(backbone.children())\n final_in_features = backbone_layers[last_linear_idx].in_features\n self.backbone = nn.Sequential(*backbone_layers[:pool_idx])\n\n self.pooling_param = pooling\n if len(pooling) == 1:\n if pooling[0] == 'GAP':\n self.pooling = nn.AdaptiveAvgPool2d(1)\n else:\n self.pooling = getattr(cirtorch.pooling, pooling[0])(**args_pooling)\n else:\n pooling_list = []\n for p in pooling:\n if p == 'GAP':\n pooling_list.append(nn.AdaptiveAvgPool2d(1))\n else:\n pooling_list.append(getattr(cirtorch.pooling, p)(**args_pooling))\n final_in_features *= len(pooling)\n self.pooling = nn.ModuleList(pooling_list)\n\n # self.pooling = getattr(cirtorch.pooling, pooling)(**args_pooling)\n\n # self.dropout = nn.Dropout(p=dropout)\n self.bn1 = nn.BatchNorm1d(final_in_features)\n self.fc1 = nn.Linear(final_in_features, fc_dim)\n self.relu = nn.ReLU()\n self.bn2 = nn.BatchNorm1d(fc_dim)\n self.fc2 = nn.Linear(fc_dim, fc_dim)\n self.bn3 = nn.BatchNorm1d(fc_dim)\n self.last_fc = nn.Linear(fc_dim, class_num)\n\n self.arc = ArcMarginProduct(fc_dim, class_num,\n s=30.0, m=0.3, easy_margin=False, ls_eps=0.0)\n\n self.model_name = model_name\n\n def forward(self, x, label=None):\n return self.extract_feat(x, label)\n\n def extract_feat(self, x, label=None):\n batch_size = x.shape[0]\n if self.model_name.startswith(\"efficientnet\"):\n x = self.backbone.extract_features(x)\n else:\n x = self.backbone(x)\n\n if len(self.pooling_param) == 1:\n x = self.pooling(x).view(batch_size, -1)\n else:\n pool_out_list = []\n for p in self.pooling:\n pool_out_list.append(p(x).view(batch_size, -1))\n x = torch.cat(pool_out_list, dim=1)\n\n # x = self.dropout(x)\n x = self.bn1(x)\n x = self.fc1(x)\n x = self.relu(x)\n x = self.bn2(x)\n x = self.fc2(x)\n x = self.bn3(x)\n\n if label is None:\n return self.last_fc(x)\n\n arc_output = self.arc(x, label)\n logits = self.last_fc(x)\n\n return arc_output, logits\n","repo_name":"jireh-father/dacon_landmark_classification","sub_path":"arc_face_net.py","file_name":"arc_face_net.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"5519820429","text":"\"\"\"\nThe polar expedition graph!\n===========================\n\nContains the graph connecting the vertices (or base stations) on the map.\n\nThis is going to be the main file that you are modifying. :)\n\nUsage:\n Contains the graph, requires the connection to vertices and edges.\n\"\"\"\nimport math\nfrom vertex import Vertex\nfrom edge import Edge\n\n\n# Define a \"edge already exists\" exception\n# Don't need to modify me.\nclass EdgeAlreadyExists(Exception):\n \"\"\"Raised when edge already exists in the graph\"\"\"\n def __init__(self, message):\n super().__init__(message)\n\n\nclass Graph:\n \"\"\"\n Graph Class\n -----------\n\n Represents the graph of vertices, which is equivalent to the map of base\n stations for our polar expedition.\n\n Attributes:\n * vertices (list): The list of vertices\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialises an empty graph\n \"\"\"\n self._vertices = []\n\n def insert_vertex(self, x_pos, y_pos):\n \"\"\"\n Insert the vertex storing the y_pos and x_pos\n\n :param x_pos: The x position of the new vertex.\n :param y_pos: The y position of the new vertex.\n\n :type x_pos: float\n :type y_pos: float\n\n :return: The new vertex, also stored in the graph.\n \"\"\"\n\n v = Vertex(x_pos, y_pos)\n self._vertices.append(v)\n return v\n\n def insert_edge(self, u, v):\n \"\"\"\n Inserts the edge between vertex u and v.\n\n We're going to assume in this assignment that all vertices given to\n this will already exist in the graph.\n\n :param u: Vertex U\n :param v: Vertex V\n\n :type u: Vertex\n :type v: Vertex\n\n :return: The new edge between U and V.\n \"\"\"\n\n e = Edge(u, v)\n\n # Check that the edge doesn't already exist\n for i in u.edges:\n if i == e:\n # Edge already exists.\n raise EdgeAlreadyExists(\"Edge already exists between vertex!\")\n\n # Add the edge to both nodes.\n u.add_edge(e)\n v.add_edge(e)\n\n def remove_vertex(self, v):\n \"\"\"\n Removes the vertex V from the graph.\n :param v: The pointer to the vertex to remove\n :type v: Vertex\n \"\"\"\n\n # Remove it from the list\n del self._vertices[self._vertices.index(v)]\n\n # Go through and remove all edges from that node.\n while len(v.edges) != 0:\n e = v.edges.pop()\n u = self.opposite(e, v)\n u.remove_edge(e)\n\n @staticmethod\n def distance(u, v):\n \"\"\"\n Get the distance between vertex u and v.\n\n :param u: A vertex to get the distance between.\n :param v: A vertex to get the distance between.\n\n :type u: Vertex\n :type v: Vertex\n :return: The Euclidean distance between two vertices.\n \"\"\"\n\n # Euclidean Distance\n # sqrt( (x2-x1)^2 + (y2-y1)^2 )\n\n return math.sqrt(((v.x_pos - u.x_pos)**2) + ((v.y_pos - u.y_pos)**2))\n\n @staticmethod\n def opposite(e, v):\n \"\"\"\n Returns the vertex at the other end of v.\n :param e: The edge to get the other node.\n :param v: Vertex on the edge.\n :return: Vertex at the end of the edge, or None if error.\n \"\"\"\n\n # It must be a vertex on the edge.\n if v not in (e.u, e.v):\n return None\n\n if v == e.u:\n return e.v\n\n return e.u\n\n ##############################################\n # Implement the functions below\n ##############################################\n\n def find_emergency_range(self, v):\n \"\"\"\n Returns the distance to the vertex W that is furthest from V.\n :param v: The vertex to start at.\n :return: The distance of the vertex W furthest away from V.\n \"\"\"\n #Solution 1 - Assumption: There might be vertices that are not connected. ie Split graph\n #Edges could have been removed, therefore need to test if still connected.\n #Doesn't pass a hidden test case.\n # furthestDist = 0\n # layers = self.bfs(v, 1000000)\n # for level in layers:\n # for l in level:\n # if furthestDist < self.distance(v, l):\n # furthestDist = self.distance(v, l)\n\n #Solution 2 - Assumption: All vertices are connected.\n emergency_range = 0\n for vertex in self._vertices:\n distance = self.distance(v, vertex)\n if distance > emergency_range:\n emergency_range = distance\n \n return emergency_range\n\n def find_path(self, start_vertex, end_vertex, radius):\n \"\"\"\n Find a path from vertex B to vertex S, such that the distance from B to\n every vertex in the path is within R. If there is no path between B\n and S within R, then return None.\n\n :param b: Vertex B to start from.\n :param s: Vertex S to finish at.\n :param r: The maximum range of the radio.\n :return: The LIST of the VERTICES in the path.\n \"\"\"\n #CHECK IF THERE IS A PATH\n end_vertex_present = False\n all_layers = self.BFS(start_vertex, radius)\n for layers in all_layers:\n for vertex in layers:\n if vertex == end_vertex:\n end_vertex_present = True\n\n if end_vertex_present == False:\n return None\n\n #RETURNS A PATH (LIST)\n path = []\n temp = end_vertex\n\n while start_vertex not in path:\n path.insert(0, temp)\n temp = temp.parent\n\n return path\n\n def minimum_range(self, start, end):\n \"\"\"\n Returns the minimum range required to go from Vertex B to Vertex S.\n :param b: Vertex B to start from.\n :param s: Vertex S to finish at.\n :return: The minimum range in the path to go from B to S.\n \"\"\"\n #THE BARE MINIMUM THAT THE MIN_RANGE CAN BE IS THE DIRECT EUCLIDEAN RANGE\n #Assumption: Graph is connected. There is at least one path from start to end.\n all_ranges = []\n all_path = []\n mini = self.distance(start, end)\n all_ranges.append(mini)\n #------------------------------------------------------------------------\n\n #FINDING ONE PATH USING PREVIOUS EXAMPLE AND LARGE RADIUS\n radius = 1000000\n\n counter = 0\n #KEEP LOOPING TO FIND THE SHORTEST ROUTE\n while counter < 10:\n path = self.find_path(start, end, radius)\n if path is None:\n break\n #CHECKING BETW EACH VERTICE OF THAT PATH\n m = self.distance(start, end)\n i = 0\n while i + 1 < len(path):\n dist = self.distance(path[0], path[i+1])\n if dist > m:\n m = dist\n i += 1\n\n radius = m - 0.01\n counter += 1\n #APPEND ALL PATHS INTO LIST\n all_path.append(path)\n\n #return m\n \n #------------------------------------------------------------------------\n #GET SHORTEST PATH\n s_path = all_path[-1]\n #APPEND ALL RANGES INTO A LIST AND GET MAXIMUM\n index = 0\n while index + 1 < len(s_path):\n distance = self.distance(s_path[0], s_path[index + 1])\n all_ranges.append(distance)\n index += 1\n\n return max(all_ranges)\n\n def move_vertex(self, v, new_x, new_y):\n \"\"\"\n Move the defined vertex.\n\n If there is already a vertex there, do nothing.\n\n :param v: The vertex to move\n :param new_x: The new X position\n :param new_y: The new Y position\n \"\"\"\n pos_taken = False\n for vertex in self._vertices:\n if vertex.x_pos == new_x and vertex.y_pos == new_y:\n pos_taken = True\n \n if pos_taken == False:\n v.move_vertex(new_x, new_y)\n\n #BREADTH FIRST SEARCH\n def BFS(self, start_vertex, radius):\n \"\"\"\n Conducts a Breadth First Search to find all connecting vertices within the given radius.\n \"\"\"\n #SETTING THINGS UP\n for vertex in self._vertices:\n vertex.seen = False\n vertex.parent = None\n\n start_vertex.seen = True\n all_layer = []\n current_layer = [start_vertex]\n next_layer = []\n empty = []\n\n #START OF BREADTH FIRST SEARCH\n while current_layer != empty:\n all_layer.append(current_layer)\n for element in current_layer:\n connected_vertices = []\n for edges in element.edges:\n opp_vertex = self.opposite(edges, element)\n if self.distance(start_vertex, opp_vertex) <= radius:\n connected_vertices.append(opp_vertex)\n #GETTING NEXT LAYER & FORMING PATHS\n for vertex in connected_vertices:\n if vertex.seen == False:\n next_layer.append(vertex)\n vertex.seen = True\n if self.distance(start_vertex, vertex) <= radius:\n vertex.parent = element \n #UPDATE LAYERS\n current_layer = next_layer\n next_layer = []\n\n return all_layer\n\n #BREADTH FIRST SEARCH (SHORTEST PATH)\n def BFSS(self, start_vertex, radius):\n \"\"\"\n Conducts a Breadth First Search to find the shortest path.\n \"\"\"\n #SETTING THINGS UP\n for vertex in self._vertices:\n vertex.seen = False\n vertex.parentlist = []\n vertex.childlist = []\n\n start_vertex.seen = True\n all_layer = []\n current_layer = [start_vertex]\n next_layer = []\n empty = []\n\n #START OF BREADTH FIRST SEARCH\n while current_layer != empty:\n all_layer.append(current_layer)\n for element in current_layer:\n connected_vertices = []\n for edges in element.edges:\n opp_vertex = self.opposite(edges, element)\n if element not in opp_vertex.childlist:\n connected_vertices.append(opp_vertex)\n #GETTING NEXT LAYER & FORMING PATHS\n for vertex in connected_vertices:\n if vertex.seen == False:\n next_layer.append(vertex)\n vertex.seen = True\n vertex.parentlist.append(element)\n element.childlist.append(vertex)\n if vertex.seen == True:\n vertex.parentlist.append(element)\n #UPDATE LAYERS\n current_layer = next_layer\n next_layer = []\n\n return all_layer\n\n","repo_name":"jonathanthen/COMP2123-Assignments","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":10838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"28144797169","text":"from Room import *\n\nclass RoomType:\n id = 0\n def __init__(self, hotel, price, sleeps, room_size, bed_type, room_type):\n self.__hotel = hotel\n self.__price = price\n self.__sleeps = sleeps\n self.__room_size = room_size\n self.__bed_type = bed_type\n self.__room_type = room_type\n self.__room_list = []\n RoomType.id += 1\n\n def add_room(self, count):\n for i in range(count):\n self.__room_list.append(Room(self.__hotel, self))\n\n @property\n def room_list(self):\n return self.__room_list\n \n @property\n def price(self):\n return self.__price\n","repo_name":"diaboliccz/OOP_proj","sub_path":"RoomType.py","file_name":"RoomType.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"23377554843","text":"from typing import List\n\n\nclass Solution:\n def numFactoredBinaryTrees(self, arr: List[int]) -> int:\n arr.sort()\n n = len(arr)\n res = [1] * n\n mod = 10**9 + 7\n\n for i in range(1, n):\n k = i - 1\n\n for j in range(i):\n while k > 0 and arr[j] * arr[k] > arr[i]:\n k -= 1\n\n if j > k:\n break\n\n if arr[j] * arr[k] == arr[i]:\n if j != k:\n res[i] = (res[i] + 2 * res[j] * res[k]) % mod\n else:\n res[i] = (res[i] + res[j] * res[k]) % mod\n\n return sum(res) % mod\n\n\narr = [2, 4]\narr = [2, 4, 5, 10]\nprint(Solution().numFactoredBinaryTrees(arr))\n","repo_name":"zzz136454872/leetcode","sub_path":"numFactoredBinaryTrees.py","file_name":"numFactoredBinaryTrees.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"7831175970","text":"import numpy as np\nimport cv2\nimport argparse\nimport glob\nimport os\nfrom scipy import signal\nimport sys\nimport open3d as o3d\n\ndef blur3d(voxels, radius=4, sigma=1.0, threshold=0.25):\n\n\tr = np.arange(-radius+1, radius, 1)\n\txx, yy, zz = np.meshgrid(r, r, r)\n\tkernel = np.exp(-(xx**2 + yy**2 + zz**2)/(2*sigma**2))\n\tkernel /= kernel.sum()\n\n\tfiltered = signal.convolve(voxels, kernel, mode=\"same\")\n\n\treturn filtered > threshold\n\ndef images_to_voxels(folder_path):\n\t# Find all images, assuming all items in path are images\n\n\tfile_paths = glob.glob(os.path.join(folder_path, '*'))\n\tfile_paths = sorted(file_paths)\n\n\tnum_images = len(file_paths)\n\n\t# Allocate a numpy array assuming each image has the same dimensions as the first\n\t# Grab first image dimensions\n\timg_shape = cv2.imread(file_paths[0]).shape[:2]\n\tvoxels = np.zeros((num_images, img_shape[0], img_shape[1]), dtype=bool)\n\n\tprint()\n\tfor i, file_path in enumerate(file_paths):\n\t\t# Load each file\n\t\timg = cv2.imread(file_path)\n\n\t\tvoxels[i, :, :] = img.sum(axis=2) > 0\n\n\t\tif i % 25 == 0:\n\t\t\tsys.stdout.write(f'Image {i} of {len(file_paths)} \\r')\n\n\treturn voxels\n\ndef generate_possible_vertex_indices(shape):\n\t# Assume three vertices per voxel, excess vertices along -y, -z, +x far planes will be assigned but never used\n\tvertices_shape = (shape[0], shape[1], shape[2], 3)\n\tvertices_size = shape[0] * shape[1] * shape[2] * 3\n\tvertex_indices = np.arange(vertices_size, dtype=np.int32).reshape(vertices_shape)\n\n\treturn vertex_indices\n\ndef generate_marching_cubes_offsets(shape):\n\t# Here we will create all possible triangle combinations for the marching cubes algorithm.\n\t# +z\n\t# ^ +y\n\t# | /\n\t# | /\n\t#\n\t# a-----0------b --> +x\n\t# /| /|\n\t# 2 | 6 |\n\t# / 1 / 4\n\t# d-----3------c |\n\t# | | | |\n\t# | e-----8--|---f\n\t# 7 / 9 /\n\t#- | 5 | 10\n\t# |/ |/\n\t# h-----11-----g\n\n\t# Table generated from here:\n\t# https://cg.informatik.uni-freiburg.de/intern/seminar/surfaceReconstruction_survey%20of%20marching%20cubes.pdf\n\n\tunique_case_table = np.array(\n\t\t[ # Character sequence bitfield equivalent\n\t\t\t[ [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], ], # 0\n\t\t\t[ [11, 5, 7], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], ], # h 128\n\t\t\t[ [10, 5, 7], [ 9, 10, 7], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], ], # hg 192\n\t\t\t[ [11, 5, 7], [ 3, 6, 9], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], ], # hc 132\n\t\t\t[ [11, 5, 7], [ 6, 0, 4], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], ], # hb 130\n\t\t\t[ [11, 9, 5], [ 9, 1, 5], [ 9, 4, 1], [-1, -1, -1], [-1, -1, -1], ], # gfe 112\n\t\t\t[ [10, 5, 7], [ 9, 10, 7], [ 6, 0, 4], [-1, -1, -1], [-1, -1, -1], ], # hgb 194\n\t\t\t[ [ 2, 3, 7], [ 6, 0, 4], [ 9, 10, 11], [-1, -1, -1], [-1, -1, -1], ], # dgb 74\n\t\t\t[ [ 4, 7, 9], [ 4, 1, 7], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], ], # hgfe 240\n\t\t\t[ [ 2, 7, 11], [ 2, 11, 0], [10, 0, 11], [ 4, 0, 10], [-1, -1, -1], ], # hafe 177\n\t\t\t[ [ 0, 2, 5], [ 0, 5, 8], [ 3, 6, 11], [11, 6, 10], [-1, -1, -1], ], # aceg 85\n\t\t\t[ [ 1, 7, 11], [ 6, 1, 11], [ 6, 11, 10], [ 0, 1, 6], [-1, -1, -1], ], # hefb 178\n\t\t\t[ [ 2, 3, 7], [11, 9, 5], [ 9, 1, 5], [ 9, 4, 1], [-1, -1, -1], ], # gfed 120\n\t\t\t[ [ 2, 3, 7], [ 8, 1, 5], [11, 9, 10], [ 6, 0, 4], [-1, -1, -1], ], # bdeg 90\n\t\t\t[ [ 2, 5, 11], [ 2, 11, 4], [ 0, 2, 4], [ 9, 4, 11], [-1, -1, -1], ], # eagf 113\n\t\t\t[ [ 2, 7, 11], [ 0, 2, 11], [ 0, 11, 10], [ 9, 3, 6], [ 0, 10, 4], ], # acefh 181\n\t\t\t[ [ 7, 6, 9], [ 0, 6, 7], [ 4, 0, 5], [10, 4, 5], [ 5, 0, 7], ], # acdef 61\n\t\t\t[ [ 9, 11, 5], [ 1, 9, 5], [ 4, 9, 1], [-1, -1, -1], [-1, -1, -1], ], # abcdh 143\n\t\t\t[ [ 5, 11, 7], [ 0, 6, 4], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], ], # acdefg 125\n\t\t\t[ [ 3, 7, 5], [ 3, 5, 6], [ 9, 6, 5], [11, 9, 5], [-1, -1, -1], ], # abdefg 123\n\t\t\t[ [ 9, 7, 5], [10, 9, 5], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], ], # abcdef 63\n\t\t\t[ [ 5, 11, 7], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], ], # abcdefg 127\n\t\t\t[ [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], ], # abcdefgh 255\n\t\t],\n\t\tdtype=np.int32\n\t)\n\n\tunique_case_bitfield = np.array( # Same as bitfield table above\n\t\t[ 0, 128, 192, 132, 130, 112, 194, 74, 240, 177, 85, 178, 120, 90, 113, 181, 61, 143, 125, 123, 63, 127, 255 ],\n\t\tdtype=np.uint8\n\t)\n\n\n\t# Quick function to apply transformations defined in the maps below\n\tdef perform_bitfield_transformation(old_bitfield_array, transformation):\n\t\tnew_bitfield_array = np.zeros_like(old_bitfield_array)\n\t\tfor c in 'abcdefgh':\n\t\t\tfind = 1 << (ord(c) - ord('a'))\n\t\t\treplace = 1 << (ord(transformation[c]) - ord('a'))\n\n\t\t\tmask = np.bitwise_and(old_bitfield_array, find).astype(bool)\n\t\t\tnew_bitfield_array[mask] += replace\n\t\treturn new_bitfield_array\n\n\tdef perform_index_transformation(indices, transformation, reverse_normal=True):\n\t\torig_shape = indices.shape\n\n\t\tif reverse_normal:\n\t\t\tindices = indices[:, :, ::-1] # If we are reflecting, things that were once clockwise are now counter-clockwise, and that screws our normals, so reversing the order here can fix that\n\n\t\tflat_indices = indices.reshape(-1)\n\t\treplaced_flat_indices = transformation[flat_indices]\n\t\treplaced_indices = replaced_flat_indices.reshape(orig_shape)\n\n\t\treturn replaced_indices\n\n\t# A map that represent how to roll the bitfield around ray ag\n\troll_bitfield_transformation = {\n\t\t'a': 'a',\n\t\t'b': 'd',\n\t\t'd': 'e',\n\t\t'e': 'b',\n\t\t'c': 'h',\n\t\t'h': 'f',\n\t\t'f': 'c',\n\t\t'g': 'g',\n\t}\n\troll_indices_transformation = np.array([ # Another map to roll the indices in the unique_case_table\n\t\t2, # 0 -> 2\n\t\t0, # 1 -> 0\n\t\t1, # 2 -> 1\n\t\t5, # 3 -> 5\n\t\t3, # 4 -> 3\n\t\t4, # 5 -> 4\n\t\t7, # 6 -> 7\n\t\t8, # 7 -> 8\n\t\t6, # 8 -> 6\n\t\t11, # 9 -> 11\n\t\t9, # 10 -> 9\n\t\t10, # 11 -> 10\n\t\t-1, # The last element (the -1 element, needs to map to -1)\n\t])\n\n\t# Map to reflect across x axis\n\tx_reflect_bitfield_transformation = {\n\t\t'a': 'b',\n\t\t'c': 'd',\n\t\t'e': 'f',\n\t\t'g': 'h',\n\n\t\t'b': 'a',\n\t\t'd': 'c',\n\t\t'f': 'e',\n\t\t'h': 'g',\n\t}\n\tx_reflect_indices_transformation = np.array([\n\t\t0, # 0 -> 0\n\t\t4, # 1 -> 4\n\t\t6, # 2 -> 6\n\t\t3, # 3 -> 3\n\t\t1, # 4 -> 1\n\t\t10, # 5 -> 10\n\t\t2, # 6 -> 2\n\t\t9, # 7 -> 9\n\t\t8, # 8 -> 8\n\t\t7, # 9 -> 7\n\t\t5, # 10 -> 5\n\t\t11, # 11 -> 11\n\t\t-1, # The last element (the -1 element, needs to map to -1)\n\t])\n\n\t# Reflect across y axis\n\ty_reflect_bitfield_transformation = {\n\t\t'a': 'd',\n\t\t'b': 'c',\n\t\t'e': 'h',\n\t\t'f': 'g',\n\n\t\t'd': 'a',\n\t\t'c': 'b',\n\t\t'h': 'e',\n\t\t'g': 'f',\n\t}\n\ty_reflect_indices_transformation = np.array([\n\t\t3, # 0 -> 3\n\t\t7, # 1 -> 7\n\t\t2, # 2 -> 2\n\t\t0, # 3 -> 0\n\t\t9, # 4 -> 9\n\t\t5, # 5 -> 5\n\t\t6, # 6 -> 6\n\t\t1, # 7 -> 1\n\t\t11, # 8 -> 11\n\t\t4, # 9 -> 4\n\t\t10, # 10 -> 10\n\t\t8, # 11 -> 8\n\t\t-1, # The last element (the -1 element, needs to map to -1)\n\t])\n\n\n\t# Reflect across z axis\n\tz_reflect_bitfield_transformation = {\n\t\t'a': 'e',\n\t\t'b': 'f',\n\t\t'c': 'g',\n\t\t'd': 'h',\n\n\t\t'e': 'a',\n\t\t'f': 'b',\n\t\t'g': 'c',\n\t\t'h': 'd',\n\t}\n\tz_reflect_indices_transformation = np.array([\n\t\t8, # 0 -> 8\n\t\t1, # 1 -> 1\n\t\t5, # 2 -> 5\n\t\t11, # 3 -> 11\n\t\t4, # 4 -> 4\n\t\t2, # 5 -> 2\n\t\t10, # 6 -> 10\n\t\t7, # 7 -> 7\n\t\t0, # 8 -> 0\n\t\t9, # 9 -> 9\n\t\t6, # 10 -> 6\n\t\t3, # 11 -> 3\n\t\t-1, # The last element (the -1 element, needs to map to -1)\n\t])\n\n\n\t# After performing a translation, some bitfields will remain unchanged or revert to ones that already exist,\n\t# that is because that bitfield is symmetric upon that combination of translations.\n\t# No worries, it just means we will override an existing with an equivalent form.\n\n\t# Create the resulting full marching cubes table and add the unique cases to it\n\tresult = np.zeros((256, 5, 3), dtype=np.int32)\n\tresult[unique_case_bitfield] = unique_case_table\n\n\t# Roll the unique cases once and add to the result\n\troll1_bitfield = perform_bitfield_transformation(unique_case_bitfield, roll_bitfield_transformation)\n\troll1_indices = perform_index_transformation(unique_case_table, roll_indices_transformation, reverse_normal=False) # Roll is the only transformation that does not effect the normals\n\tresult[roll1_bitfield] = roll1_indices\n\n\t# Roll the previous result again and add to the result\n\troll2_bitfield = perform_bitfield_transformation(roll1_bitfield, roll_bitfield_transformation)\n\troll2_indices = perform_index_transformation(roll1_indices, roll_indices_transformation, reverse_normal=False)\n\tresult[roll2_bitfield] = roll2_indices\n\n\t# Reflect the unique cases across the x axis\n\tx_reflect_bitfield = perform_bitfield_transformation(unique_case_bitfield, x_reflect_bitfield_transformation)\n\tx_reflect_indices = perform_index_transformation(unique_case_table, x_reflect_indices_transformation)\n\tresult[x_reflect_bitfield] = x_reflect_indices\n\n\t# Reflect the unique cases across the y axis\n\ty_reflect_bitfield = perform_bitfield_transformation(unique_case_bitfield, y_reflect_bitfield_transformation)\n\ty_reflect_indices = perform_index_transformation(unique_case_table, y_reflect_indices_transformation)\n\tresult[y_reflect_bitfield] = y_reflect_indices\n\n\t# Reflect the unique cases across the y axis\n\tz_reflect_bitfield = perform_bitfield_transformation(unique_case_bitfield, z_reflect_bitfield_transformation)\n\tz_reflect_indices = perform_index_transformation(unique_case_table, z_reflect_indices_transformation)\n\tresult[z_reflect_bitfield] = z_reflect_indices\n\n\n\t# Reflect the rolled cases across the x axis\n\troll_x_reflect_bitfield = perform_bitfield_transformation(roll1_bitfield, x_reflect_bitfield_transformation)\n\troll_x_reflect_indices = perform_index_transformation(roll1_indices, x_reflect_indices_transformation)\n\tresult[roll_x_reflect_bitfield] = roll_x_reflect_indices\n\n\t# Reflect the rolled cases across the y axis\n\troll_y_reflect_bitfield = perform_bitfield_transformation(roll1_bitfield, y_reflect_bitfield_transformation)\n\troll_y_reflect_indices = perform_index_transformation(roll1_indices, y_reflect_indices_transformation)\n\tresult[roll_y_reflect_bitfield] = roll_y_reflect_indices\n\n\t# Reflect the rolled cases across the y axis\n\troll_z_reflect_bitfield = perform_bitfield_transformation(roll1_bitfield, z_reflect_bitfield_transformation)\n\troll_z_reflect_indices = perform_index_transformation(roll1_indices, z_reflect_indices_transformation)\n\tresult[roll_z_reflect_bitfield] = roll_z_reflect_indices\n\n\n\t# Reflect the twice rolled cases across the x axis\n\troll2_x_reflect_bitfield = perform_bitfield_transformation(roll2_bitfield, x_reflect_bitfield_transformation)\n\troll2_x_reflect_indices = perform_index_transformation(roll2_indices, x_reflect_indices_transformation)\n\tresult[roll2_x_reflect_bitfield] = roll2_x_reflect_indices\n\n\t# Reflect the twice rolled cases across the y axis\n\troll2_y_reflect_bitfield = perform_bitfield_transformation(roll2_bitfield, y_reflect_bitfield_transformation)\n\troll2_y_reflect_indices = perform_index_transformation(roll2_indices, y_reflect_indices_transformation)\n\tresult[roll2_y_reflect_bitfield] = roll2_y_reflect_indices\n\n\t# Reflect the twice rolled cases across the y axis\n\troll2_z_reflect_bitfield = perform_bitfield_transformation(roll2_bitfield, z_reflect_bitfield_transformation)\n\troll2_z_reflect_indices = perform_index_transformation(roll2_indices, z_reflect_indices_transformation)\n\tresult[roll2_z_reflect_bitfield] = roll2_z_reflect_indices\n\n\n\n\t# Reflect the x reflected results across the y axis\n\txy_reflect_bitfield = perform_bitfield_transformation(x_reflect_bitfield, x_reflect_bitfield_transformation)\n\txy_reflect_indices = perform_index_transformation(x_reflect_indices, x_reflect_indices_transformation)\n\tresult[xy_reflect_bitfield] = xy_reflect_indices\n\n\troll_xy_reflect_bitfield = perform_bitfield_transformation(roll_x_reflect_bitfield, x_reflect_bitfield_transformation)\n\troll_xy_reflect_indices = perform_index_transformation(roll_x_reflect_indices, x_reflect_indices_transformation)\n\tresult[roll_xy_reflect_bitfield] = roll_xy_reflect_indices\n\n\troll2_xy_reflect_bitfield = perform_bitfield_transformation(roll2_x_reflect_bitfield, x_reflect_bitfield_transformation)\n\troll2_xy_reflect_indices = perform_index_transformation(roll2_x_reflect_indices, x_reflect_indices_transformation)\n\tresult[roll2_xy_reflect_bitfield] = roll2_xy_reflect_indices\n\n\n\t# Reflect the x reflected results across the y axis\n\txy_reflect_bitfield = perform_bitfield_transformation(x_reflect_bitfield, y_reflect_bitfield_transformation)\n\txy_reflect_indices = perform_index_transformation(x_reflect_indices, y_reflect_indices_transformation)\n\tresult[xy_reflect_bitfield] = xy_reflect_indices\n\n\troll_xy_reflect_bitfield = perform_bitfield_transformation(roll_x_reflect_bitfield, y_reflect_bitfield_transformation)\n\troll_xy_reflect_indices = perform_index_transformation(roll_x_reflect_indices, y_reflect_indices_transformation)\n\tresult[roll_xy_reflect_bitfield] = roll_xy_reflect_indices\n\n\troll2_xy_reflect_bitfield = perform_bitfield_transformation(roll2_x_reflect_bitfield, y_reflect_bitfield_transformation)\n\troll2_xy_reflect_indices = perform_index_transformation(roll2_x_reflect_indices, y_reflect_indices_transformation)\n\tresult[roll2_xy_reflect_bitfield] = roll2_xy_reflect_indices\n\n\n\t# Reflect the z reflected results across the x axis\n\tzx_reflect_bitfield = perform_bitfield_transformation(z_reflect_bitfield, x_reflect_bitfield_transformation)\n\tzx_reflect_indices = perform_index_transformation(z_reflect_indices, x_reflect_indices_transformation)\n\tresult[zx_reflect_bitfield] = zx_reflect_indices\n\n\troll_zx_reflect_bitfield = perform_bitfield_transformation(roll_z_reflect_bitfield, x_reflect_bitfield_transformation)\n\troll_zx_reflect_indices = perform_index_transformation(roll_z_reflect_indices, x_reflect_indices_transformation)\n\tresult[roll_zx_reflect_bitfield] = roll_zx_reflect_indices\n\n\troll2_zx_reflect_bitfield = perform_bitfield_transformation(roll2_z_reflect_bitfield, x_reflect_bitfield_transformation)\n\troll2_zx_reflect_indices = perform_index_transformation(roll2_z_reflect_indices, x_reflect_indices_transformation)\n\tresult[roll2_zx_reflect_bitfield] = roll2_zx_reflect_indices\n\n\n\t# Reflect the z reflected results across the y axis\n\tzy_reflect_bitfield = perform_bitfield_transformation(z_reflect_bitfield, y_reflect_bitfield_transformation)\n\tzy_reflect_indices = perform_index_transformation(z_reflect_indices, y_reflect_indices_transformation)\n\tresult[zy_reflect_bitfield] = zy_reflect_indices\n\n\troll_zy_reflect_bitfield = perform_bitfield_transformation(roll_z_reflect_bitfield, y_reflect_bitfield_transformation)\n\troll_zy_reflect_indices = perform_index_transformation(roll_z_reflect_indices, y_reflect_indices_transformation)\n\tresult[roll_zy_reflect_bitfield] = roll_zy_reflect_indices\n\n\troll2_zy_reflect_bitfield = perform_bitfield_transformation(roll2_z_reflect_bitfield, y_reflect_bitfield_transformation)\n\troll2_zy_reflect_indices = perform_index_transformation(roll2_z_reflect_indices, y_reflect_indices_transformation)\n\tresult[roll2_zy_reflect_bitfield] = roll2_zy_reflect_indices\n\n\n\t# Reflect the zy reflected results across the x axis\n\tzyx_reflect_bitfield = perform_bitfield_transformation(zy_reflect_bitfield, x_reflect_bitfield_transformation)\n\tzyx_reflect_indices = perform_index_transformation(zy_reflect_indices, x_reflect_indices_transformation)\n\tresult[zyx_reflect_bitfield] = zyx_reflect_indices\n\n\troll_zyx_reflect_bitfield = perform_bitfield_transformation(roll_zy_reflect_bitfield, x_reflect_bitfield_transformation)\n\troll_zyx_reflect_indices = perform_index_transformation(roll_zy_reflect_indices, x_reflect_indices_transformation)\n\tresult[roll_zyx_reflect_bitfield] = roll_zyx_reflect_indices\n\n\troll2_zyx_reflect_bitfield = perform_bitfield_transformation(roll2_zy_reflect_bitfield, x_reflect_bitfield_transformation)\n\troll2_zyx_reflect_indices = perform_index_transformation(roll2_zy_reflect_indices, x_reflect_indices_transformation)\n\tresult[roll2_zyx_reflect_bitfield] = roll2_zyx_reflect_indices\n\n\n\t# At this point, we now have all 256 marching cubes cases with indices 0 through 7 as described in the figure at the top of this function.\n\t# However, this does not give us a simple way to properly index vertex elements in the entire voxel grid.\n\t# The following array is a map between the 0 through 7 indices used in this function, and the appropriate strides needed to access that\n\t# vertex in a (a, b, c, 3) vertex matrix, where (a, b, c) is the shape of the voxel grid.\n\n\t# +z +z\n\t# ^ +y ^ +y\n\t# | / | /\n\t# | / | /\n\t#\n\t# a-----0------b --> +x v000---0000---v001 --> +x\n\t# /| /| /| /|\n\t# 2 | 6 | 0002| 0012 |\n\t# / 1 / 4 / 0001 / 0011\n\t# d-----3------c | >>> v100---1000---v101 |\n\t# | | | | | | | |\n\t# | e-----8--|---f | v010--0100-|-v011\n\t# 7 / 9 / 1001 / 1011 /\n\t# | 5 | 10 | 0102 | 0112\n\t# |/ |/ |/ |/\n\t# h-----11-----g v110---1100---v111\n\t# *v000 is a voxel at point (0, 0, 0)\n\t# *0000 is a vertex at coordinates (0, 0, 0, 0)\n\n\t# Maps indices 0 through 7 to unravelled index offsets shown in the right diagram\n\tunravelled_offset_map = np.array([\n\t\t[0, 0, 0, 0],\n\t\t[0, 0, 0, 1],\n\t\t[0, 0, 0, 2],\n\t\t[1, 0, 0, 0],\n\t\t[0, 0, 1, 1],\n\t\t[0, 1, 0, 2],\n\t\t[0, 0, 1, 2],\n\t\t[1, 0, 0, 1],\n\t\t[0, 1, 0, 0],\n\t\t[1, 0, 1, 1],\n\t\t[0, 1, 1, 2],\n\t\t[1, 1, 0, 0],\n\t\t[0, 0, 0, 0], # This last guy is just a hack so that the ravelled offset map has a -1 as the last element\n\t])\n\n\t# Ravel the offsets so we can index a flattened vertex array\n\t# ravelled_offset_map = unravelled_offset_map[:, 0] * stride0 + unravelled_offset_map[:, 1] * stride1 + unravelled_offset_map[:, 2] * stride2 + unravelled_offset_map[:, 3] * stride3\n\travelled_offset_map = np.ravel_multi_index(unravelled_offset_map.T, (shape[0] + 2, shape[1] + 2, shape[2] + 2, shape[3])) # Use shape + 2 so we can pad the near and far planes of the voxel grid with 0\n\travelled_offset_map[-1] = -1\n\n\t# Now replace indices 0 through 7 with our vertex index offsets\n\tresult_offsets = ravelled_offset_map[result] # Last element (-1st element) of ravelled_offset is also -1\n\n\treturn result, result_offsets\n\ndef generate_voxel_bitfields(voxels):\n\t# +z\n\t# ^ +y\n\t# | /\n\t# | /\n\t#\n\t# a------------b --> +x\n\t# /| /|\n\t# / | / |\n\t# / | / |\n\t# d------------c |\n\t# | | | |\n\t# | e--------|---f\n\t# | / | /\n\t# | / | /\n\t# |/ |/\n\t# h------------g\n\n\t# Convert booleans a b c d e f h to a bitfield in this order 0b hfedcba so that a is the least significant bit\n\t# The bitfield takes padding into consideration by not assigning values at the far planes and by physically padding\n\t# the near planes\n\n\tpadded_voxels = np.zeros((voxels.shape[0] + 1, voxels.shape[1] + 1, voxels.shape[2] + 1), dtype=np.uint8) # Only need padded_voxels.shape + 1 because the far plane is excluded in the indexing, we only need physical padding for the near plane\n\tpadded_voxels[1:, 1:, 1:] = voxels\n\n\tvoxel_bitfields = np.zeros_like(padded_voxels)\n\n\tvoxel_bitfields = padded_voxels.copy() # 0th bit (Is a True)\n\tvoxel_bitfields[ :, :, :-1] += np.uint8(padded_voxels[ :, :, 1:] * 2) # 1st bit (Is b True)\n\tvoxel_bitfields[:-1, :, :-1] += np.uint8(padded_voxels[1:, :, 1:] * 4) # 2nd bit (Is c True)\n\tvoxel_bitfields[:-1, :, :] += np.uint8(padded_voxels[1:, :, :] * 8) # 3rd bit (Is d True)\n\tvoxel_bitfields[ :, :-1, :] += np.uint8(padded_voxels[ :, 1:, :] * 16) # 4th bit (Is e True)\n\tvoxel_bitfields[ :, :-1, :-1] += np.uint8(padded_voxels[ :, 1:, 1:] * 32) # 5th bit (Is f True)\n\tvoxel_bitfields[:-1, :-1, :-1] += np.uint8(padded_voxels[1:, 1:, 1:] * 64) # 6th bit (Is g True)\n\tvoxel_bitfields[:-1, :-1, :] += np.uint8(padded_voxels[1:, 1:, :] * 128) # 7th bit (Is h True)\n\n\treturn voxel_bitfields\n\n\ndef write_obj(vertex_indices, shape):\n\tprint(\"Ignoring unused vertices\")\n\tunique_indices = np.unique(vertex_indices)\n\tunravelled_unique_indices = np.unravel_index(unique_indices, (shape[0] + 2, shape[1] + 2, shape[2] + 2, shape[3])) # Unravel using shape + 2 to account for near and far plane padding\n\n\tprint(\"Remapping used vertex indices\")\n\tcompacted_indices = np.arange(unique_indices.size)\n\tindices_map = {}\n\tindices_map.update(zip(unique_indices, compacted_indices))\n\n\tprint(\"Creating obj\")\n\ttris = vertex_indices.reshape(-1, 3) # Group by tris\n\n\tdirection = unravelled_unique_indices[3]\n\n\ty_arr = -unravelled_unique_indices[0]*2 - (direction == 2)\n\tz_arr = -unravelled_unique_indices[1]*2 - (direction == 1)\n\tx_arr = unravelled_unique_indices[2]*2 + (direction == 0)\n\n\n\tprint(f' Vertices: {len(x_arr)}')\n\tprint(f'Triangles: {len(tris)}')\n\n\t# vertices = np.stack([x_arr, y_arr, z_arr]).T\n\t# mesh = o3d.geometry.TriangleMesh(\n\t# \to3d.utility.Vector3dVector(vertices),\n\t# \to3d.utility.Vector3iVector(tris)\n\t# )\n\n\t# o3d.io.write_triangle_mesh(\"output.ply\", mesh, compressed=True, write_vertex_colors=False, write_triangle_uvs=False, print_progress=True, write_ascii=False,)\n\n\tprint()\n\twith open('output.obj', 'w') as f:\n\t\tf.write('# OBJ file\\n')\n\t\tfor i in range(len(unique_indices)):\n\n\t\t\tx = x_arr[i]\n\t\t\ty = y_arr[i]\n\t\t\tz = z_arr[i]\n\n\t\t\t# print(x, y, z)\n\t\t\t# print(unravelled_unique_indices[0][i], unravelled_unique_indices[1][i], unravelled_unique_indices[2][i], unravelled_unique_indices[3][i])\n\t\t\t# print(unique_indices[i])\n\t\t\t# print()\n\n\t\t\tf.write('v {0} {1} {2}\\n'.format(x, y, z))\n\n\t\t\tif i % 50000 == 0:\n\t\t\t\tsys.stdout.write(f'Vertices: {i / len(unique_indices) * 100:0.2f}% \\r')\n\n\t\tprint()\n\t\tfor i, tri in enumerate(tris):\n\t\t\tf.write('f')\n\t\t\tfor vi in tri:\n\t\t\t\tf.write(' {0}'.format(indices_map[vi] + 1))\n\t\t\tf.write('\\n')\n\n\t\t\tif i % 50000 == 0:\n\t\t\t\tsys.stdout.write(f'Triangles: {i / len(tris) * 100:0.2f}% \\r')\n\ndef images_to_pcd(voxels):\n\n\tprint()\n\tprint('Generating point cloud')\n\tcoords = np.where(voxels)\n\txyz = np.zeros((coords[0].size, 3), dtype=np.int32)\n\txyz[:, 1] = -coords[0]\n\txyz[:, 2] = -coords[1]\n\txyz[:, 0] = coords[2]\n\n\tpcd = o3d.geometry.PointCloud()\n\tpcd.points = o3d.utility.Vector3dVector(xyz)\n\to3d.io.write_point_cloud(\"output.ply\", pcd, print_progress=True)\n\ndef pcd_to_voxels(file_path):\n\tpcd = o3d.io.read_point_cloud(file_path)\n\tpoints = np.asarray(pcd.points)\n\n\t# We want the coordinates as integers, but they might be screwed up after being edited.\n\t# So let's approximate a greatest common multiple of the distances between voxels by\n\t# just finding the minimum distance between any two voxels\n\n\t# First lets place the bounding box origin to the world origin\n\tmin_x = points[:, 0].min()\n\tmin_y = points[:, 1].max()\n\tmin_z = points[:, 2].max()\n\n\tpoints[:, 0] -= min_x\n\tpoints[:, 1] -= min_y\n\tpoints[:, 2] -= min_z\n\n\tcoords = np.zeros_like(points)\n\tcoords[:, 0] = -points[:, 1]\n\tcoords[:, 1] = -points[:, 2]\n\tcoords[:, 2] = points[:, 0] # We assume all coords are positive at this point\n\n\t# Now lets find the minimum spacing between points\n\n\t# First find spacing between all points\n\tdiff_x = np.diff(coords[:, 0])\n\tdiff_y = np.diff(coords[:, 1])\n\tdiff_z = np.diff(coords[:, 2])\n\n\t# Now remove nonzero spacing\n\tdiff_x = diff_x[diff_x > 0]\n\tdiff_y = diff_y[diff_y > 0]\n\tdiff_z = diff_z[diff_z > 0]\n\n\t# Now find minimum spacing in each axis\n\tmin_diff_x = diff_x.min()\n\tmin_diff_y = diff_y.min()\n\tmin_diff_z = diff_z.min()\n\n\tmin_diff = min(min_diff_x, min_diff_y, min_diff_z)\n\n\t# Now that we have the minimum distance approximation of a GCM,\n\t# let's divide all of our coordinate spacing by it\n\tcoords //= min_diff\n\n\tcoords = coords.astype(np.int32)\n\ti = coords[:, 0]\n\tj = coords[:, 1]\n\tk = coords[:, 2]\n\n\tshape = (i.max()+1, j.max()+1, k.max()+1)\n\tvoxels = np.zeros(shape, dtype=bool)\n\n\tvoxels[i, j, k] = True\n\n\treturn voxels\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description='Converts a sequence of images into a voxelized obj')\n\tparser.add_argument('path', help='Path to folder of images or to a ply file')\n\tparser.add_argument('-e', '--export_for_edit', action='store_true', help=\"Don't process the images, just merge them into a ply file to be edited in another program\")\n\tparser.add_argument('-r', '--radius', type=int, help='Radius of smoothing kernel', default=4)\n\tparser.add_argument('-s', '--sigma', type=float, help='sigma value of smoothing kernel', default=1.0)\n\tparser.add_argument('-t', '--threshold', type=float, help='threshold value for smoothing kerne', default=0.25)\n\targs = parser.parse_args()\n\n\tif os.path.isfile(args.path):\n\t\tvoxels = pcd_to_voxels(args.path)\n\telse:\n\t\tvoxels = images_to_voxels(args.path)\n\n\tif args.export_for_edit:\n\n\t\timages_to_pcd(voxels)\n\n\telse:\n\t\tprint(\"\\nSmoothing...\")\n\t\tvoxels = blur3d(voxels, radius=args.radius, sigma=args.sigma, threshold=args.threshold)\n\n\t\tprint(\"Generating mesh\")\n\t\tpossible_vertices_shape = (voxels.shape[0], voxels.shape[1], voxels.shape[2], 3)\n\n\t\tdebug_indices, marching_cubes_offsets = generate_marching_cubes_offsets(possible_vertices_shape)\n\t\tvoxel_bitfields = generate_voxel_bitfields(voxels)\n\n\t\toffsets = marching_cubes_offsets[voxel_bitfields] # This is a (voxels.shape[0], voxels.shape[1], voxels.shape[2], 4, 3) matrix\n\t\t# debug = debug_indices[voxel_bitfields]\n\n\t\tpadded_possible_vertices_shape = (voxels.shape[0] + 2, voxels.shape[1] + 2, voxels.shape[2] + 2) # Add 2 for near and far plane padding\n\t\tpadded_possible_vertices_size = np.prod(padded_possible_vertices_shape)\n\n\t\tvertex_zero_index = np.arange(padded_possible_vertices_size).reshape(padded_possible_vertices_shape) * 3 # Index of 0th vertex for each voxel\n\t\tvertex_zero_index = vertex_zero_index[:-1, :-1, :-1] # Remove padding\n\n\t\tvertex_indices = vertex_zero_index[:, :, :, np.newaxis, np.newaxis] + offsets\n\n\t\tused_vertex_indices = vertex_indices[offsets != -1]\n\t\twrite_obj(used_vertex_indices, possible_vertices_shape)\n\n","repo_name":"WilliamRodriguez42/ImageSequenceTo3D","sub_path":"ImageSequenceTo3D.py","file_name":"ImageSequenceTo3D.py","file_ext":"py","file_size_in_byte":27676,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"14786240967","text":"from __future__ import print_function\r\nfrom keras.layers import LSTM\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nfrom keras.layers import Input, TimeDistributed, Dense, Lambda, concatenate, Dropout, BatchNormalization\r\nfrom keras.models import Sequential,Model\r\nfrom keras.layers.core import Dense, Activation, Dropout\r\nfrom keras.layers.embeddings import Embedding\r\nfrom keras.layers.recurrent import LSTM\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.utils import np_utils\r\nimport datetime, time\r\nfrom keras.layers import Convolution1D, GlobalMaxPooling1D\r\nfrom keras.utils.vis_utils import plot_model\r\nfrom keras import backend as K\r\nfrom keras.preprocessing import sequence, text\r\nfrom keras.layers import merge\r\nfrom sklearn.model_selection import train_test_split\r\nfrom keras.regularizers import l2\r\nfrom keras.layers import PReLU\r\nfrom keras.callbacks import Callback, ModelCheckpoint\r\nimport os\r\nMODEL_WEIGHTS_FILE = 'C:/ALDA/Project/data/question_pairs_weights.h5'\r\ndata = pd.read_csv(\"C:/ALDA/Project/data/train.csv\")\r\ny = data.is_duplicate.values\r\nprint(data.head(1))\r\nDROPOUT=0.2\r\n#\r\n#using WORD2VEC EMBEDDINGS\r\ntk = text.Tokenizer(num_words=200000)\r\n\r\nmax_len = 40\r\n\r\ntk.fit_on_texts(list(data.question1.values.astype(str)) + list(data.question2.values.astype(str)))\r\nx1 = tk.texts_to_sequences(data.question1.values.astype(str))\r\nx1 = sequence.pad_sequences(x1, maxlen=max_len)\r\n\r\nx2 = tk.texts_to_sequences(data.question2.values.astype(str))\r\nx2 = sequence.pad_sequences(x2, maxlen=max_len)\r\n\r\nword_index = tk.word_index\r\nytrain_enc = np_utils.to_categorical(y)\r\n\r\n\r\nRNG_SEED = 13371447\r\ntype(y)\r\n\r\n# Partition the dataset into train and test sets\r\nX = np.stack((x1, x2), axis=1)\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=RNG_SEED)\r\nQ1_train = X_train[:,0]\r\nQ2_train = X_train[:,1]\r\nQ1_test = X_test[:,0]\r\nQ2_test = X_test[:,1]\r\n\r\n# Define the model\r\nMAX_SEQUENCE_LENGTH = 40\r\nquestion1 = Input(shape=(MAX_SEQUENCE_LENGTH,))\r\nquestion2 = Input(shape=(MAX_SEQUENCE_LENGTH,))\r\nprint(question2.shape)\r\n#######\r\n\r\nNB_EPOCHS = 18\r\nMODEL_WEIGHTS_FILE = \"question_pairs_weights/cp.ckpt\"\r\nVALIDATION_SPLIT=0.1\r\nBATCH_SIZE = 32\r\n#######\r\n\r\nimport gensim\r\nfrom gensim.utils import simple_preprocess\r\n\r\ndef read_questions(row,column_name):\r\n return simple_preprocess(str(row[column_name]).encode('utf-8'))\r\n\r\ndocuments = []\r\n\r\nfor index, row in data.iterrows():\r\n question1_1 = read_questions(row,\"question1\")\r\n question2_2 = read_questions(row,\"question2\")\r\n documents.append(question1_1)\r\n documents.append(question2_2)\r\n\r\n\r\nmodel = gensim.models.Word2Vec(size=150, window=10, min_count=2, sg=1, workers=10)\r\nmodel.build_vocab(documents)\r\n\r\nmodel.train(sentences=documents, total_examples=len(documents), epochs=model.iter)\r\nword_vectors = model.wv\r\n\r\nembedding_matrix = np.zeros((len(word_index) + 1, 150))\r\ni = 0\r\nfor word in word_index:\r\n if word in word_vectors:\r\n embedding_vector = word_vectors[word]\r\n embedding_matrix[i] = embedding_vector\r\n i += 1\r\n\r\nEMBEDDING_DIM=150\r\nMAX_NB_WORDS=200000\r\nnb_words = min(MAX_NB_WORDS, len(word_index))\r\n\r\nq1 = Embedding(nb_words + 1, \r\n EMBEDDING_DIM, \r\n weights=[embedding_matrix], \r\n input_length=MAX_SEQUENCE_LENGTH, \r\n trainable=False)(question1)\r\n\r\nq1 = TimeDistributed(Dense(EMBEDDING_DIM, activation='relu'))(q1)\r\nq1 = Lambda(lambda x: K.max(x, axis=1), output_shape=(EMBEDDING_DIM, ))(q1)\r\nprint(q1.shape)\r\nq2 = Embedding(nb_words + 1, \r\n EMBEDDING_DIM, \r\n weights=[embedding_matrix], \r\n input_length=MAX_SEQUENCE_LENGTH, \r\n trainable=False)(question2)\r\n\r\nq2 = TimeDistributed(Dense(EMBEDDING_DIM, activation='relu'))(q2)\r\nq2 = Lambda(lambda x: K.max(x, axis=1), output_shape=(EMBEDDING_DIM, ))(q2)\r\nprint(q2.shape)\r\n\r\nm1 = Embedding(nb_words + 1, \r\n EMBEDDING_DIM, \r\n weights=[embedding_matrix], \r\n input_length=MAX_SEQUENCE_LENGTH, \r\n trainable=True)(question1)\r\nm1 = LSTM(EMBEDDING_DIM,activation='relu',dropout=0.2,recurrent_dropout=0.2)(m1)#dropout=0.2\r\n\r\nprint(m1.shape)\r\n\r\nm2 = Embedding(nb_words + 1, \r\n EMBEDDING_DIM, \r\n weights=[embedding_matrix], \r\n input_length=MAX_SEQUENCE_LENGTH, \r\n trainable=True)(question2)\r\n\r\nm2 = LSTM(EMBEDDING_DIM,activation='relu',dropout=0.2,recurrent_dropout=0.2)(m2)#dropout=0.2\r\nprint(m2.shape)\r\n\r\n\r\nmod1 = Embedding(nb_words + 1, \r\n EMBEDDING_DIM, \r\n weights=[embedding_matrix], \r\n input_length=MAX_SEQUENCE_LENGTH, \r\n trainable=False)(question1)\r\nmod2 = Embedding(nb_words + 1, \r\n EMBEDDING_DIM, \r\n weights=[embedding_matrix], \r\n input_length=MAX_SEQUENCE_LENGTH, \r\n trainable=False)(question1)\r\n\r\nfilter_length = 5\r\nnb_filter = 64\r\n\r\nmod1=Convolution1D(filters=nb_filter,\r\n kernel_size=filter_length,\r\n padding='valid',\r\n activation='relu',\r\n dilation_rate=1)(mod1)\r\n\r\nmod1= Dropout(DROPOUT)(mod1)\r\nmod1=Convolution1D(filters=nb_filter,\r\n kernel_size=filter_length,\r\n padding='valid',\r\n activation='relu',\r\n dilation_rate=1)(mod1)\r\n\r\n\r\nmod1=GlobalMaxPooling1D()(mod1)\r\nmod1= Dropout(DROPOUT)(mod1)\r\nmod1 = Dense(300, activation='relu')(mod1)\r\nmod1= Dropout(DROPOUT)(mod1)\r\nmod1 = BatchNormalization()(mod1)\r\n\r\nmod2=Convolution1D(filters=nb_filter,\r\n kernel_size=filter_length,\r\n padding='valid',\r\n activation='relu',\r\n dilation_rate=1)(mod2)\r\n\r\nmod2= Dropout(DROPOUT)(mod2)\r\nmod2=Convolution1D(filters=nb_filter,\r\n kernel_size=filter_length,\r\n padding='valid',\r\n activation='relu',\r\n dilation_rate=1)(mod2)\r\n\r\nmod2=GlobalMaxPooling1D()(mod2)\r\nmod2= Dropout(DROPOUT)(mod2)\r\nmod2 = Dense(300, activation='relu')(mod2)\r\nmod2= Dropout(DROPOUT)(mod2)\r\nmod2 = BatchNormalization()(mod2)\r\n\r\n#del merged\r\nDROPOUT=0.2\r\n# add the vector [mod1,mod2,q1,q2,m1,m2] as concatenate input to use LSTM+TDCNN+1DCNN\r\n# add the vector [mod1,mod2,q1,q2] as concatenate input to use TDCNN+1DCNN\r\n# add the vector [ ] as concatenate input to use combination of models\r\n\r\nmerged = concatenate([mod1,mod2,q1,q2,m1,m2])#,m1,m2\r\n\r\nmerged = BatchNormalization()(merged)\r\nmerged = Dense(300)(merged)\r\nmerged=PReLU()(merged)\r\nmerged = Dropout(DROPOUT)(merged)\r\n\r\nmerged = BatchNormalization()(merged)\r\nmerged = Dense(300)(merged)\r\nmerged=PReLU()(merged)\r\nmerged = Dropout(DROPOUT)(merged)\r\nmerged = BatchNormalization()(merged)\r\nmerged = Dense(300)(merged)\r\nmerged=PReLU()(merged)\r\nmerged = Dropout(DROPOUT)(merged)\r\nmerged = BatchNormalization()(merged)\r\nmerged = Dense(300)(merged)\r\nmerged=PReLU()(merged)\r\nmerged = Dropout(DROPOUT)(merged)\r\nmerged = BatchNormalization()(merged)\r\nmerged = Dense(300)(merged)\r\nmerged=PReLU()(merged)\r\nmerged = Dropout(DROPOUT)(merged)\r\nmerged = BatchNormalization()(merged)\r\n\r\n############\r\nis_duplicate_super = Dense(1, activation='sigmoid')(merged)\r\n\r\nOPTIMIZER = 'adam'\r\nmodel_super= Model(inputs=[question1,question2], outputs=is_duplicate_super)\r\nmodel_super.compile(loss='binary_crossentropy', optimizer=OPTIMIZER, metrics=['accuracy'])\r\n\r\n##\r\nmodel_super.summary()\r\nplot_model(model_super, to_file='model_plot.png', show_shapes=True, show_layer_names=True)\r\n\r\n##\r\nMODEL_WEIGHTS_FILE = 'C:/ALDA/Project/data/quepair_weights_lstm_word2vec_tt.h5'\r\nprint(\"Starting training at\", datetime.datetime.now())\r\nt0 = time.time()\r\ncallbacks = [ModelCheckpoint(MODEL_WEIGHTS_FILE, monitor='val_accuracy', save_best_only=True)]\r\nhistory = model_super.fit([Q1_train, Q2_train],#,Q1_train, Q2_train,Q1_train, Q2_train],\r\n y_train,\r\n epochs=150,\r\n validation_split=VALIDATION_SPLIT,\r\n verbose=1,\r\n batch_size=370,\r\n callbacks=callbacks)\r\n##\r\n\r\nt1 = time.time()\r\nprint(\"Training ended at\", datetime.datetime.now())\r\nprint(\"Minutes elapsed: %f\" % ((t1 - t0) / 60.))\r\n\r\nmax_val_acc, idx = max((val, idx) for (idx, val) in enumerate(history.history['val_accuracy']))\r\nprint('Maximum accuracy at epoch', '{:d}'.format(idx+1), '=', '{:.4f}'.format(max_val_acc))\r\n\r\nmodel_super.load_weights(MODEL_WEIGHTS_FILE)\r\nloss, accuracy = model_super.evaluate([Q1_test, Q2_test], y_test, verbose=0)\r\nprint('loss = {0:.4f}, accuracy = {1:.4f}'.format(loss, accuracy))\r\n\r\n","repo_name":"adarshpuri20/Question-Pair-Classification","sub_path":"word2vec_deep_learning.py","file_name":"word2vec_deep_learning.py","file_ext":"py","file_size_in_byte":8795,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"34062799804","text":"import random\nimport torch \nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets import CIFAR10\nfrom musketeer_optimizer import Musketeer\nimport numpy as np \nimport math\n\nif torch.cuda.is_available(): \n dev = \"cuda\" \nelse: \n dev = \"cpu\" \nprint(dev)\ndevice = torch.device(dev)\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n return None\n \ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\ntrain_dataloader = torch.utils.data.DataLoader(trainset, batch_size=32,\n shuffle=True, num_workers=2)\n\ntestset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=transform)\ntest_dataloader = torch.utils.data.DataLoader(testset, batch_size=32,\n shuffle=False, num_workers=2)\n \nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3, 12, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(12, 16, 5)\n self.fc1 = nn.Linear(16 * 5 * 5, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward_pass(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = x.view(-1, 16 * 5 * 5)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n \ndef train_sgd(seed,epochs,lr):\n # set seed for reproducibility\n set_seed(seed)\n # Build Neural Network\n model = Net()\n # Put on GPU\n model.to(device)\n # Loss function\n criterion = nn.CrossEntropyLoss()\n # Optimizer SGD\n optimizer = torch.optim.SGD(model.parameters(),lr=lr)\n # Initialize loss evolution\n loss_sgd= []\n accuracy_list = []\n # Main training loop\n for e in range(epochs):\n running_loss = 0\n t_start = time()\n for data in train_dataloader:\n images, labels = data[0].to(device), data[1].to(device)\n # Set gradients to 0\n optimizer.zero_grad()\n # Forward pass of the model\n output = model.forward_pass(images)\n # Loss function\n loss = criterion(output, labels)\n # Compute gradients\n loss.backward()\n # Perform one step of SGD optimizer\n optimizer.step()\n # Store evolution of loss\n running_loss += loss.item()\n loss_sgd.append(loss.item())\n #print(\"Time this epoch:\",time()-t_start)\n print(\"Epoch {} - Training loss: {}\".format(e, running_loss/len(train_dataloader)))\n correct = 0\n total = 0\n with torch.no_grad():\n for data in test_dataloader:\n images, labels = data[0].to(device), data[1].to(device)\n outputs = model.forward_pass(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n #print('Accuracy:',correct/total)\n accuracy_list.append(correct / total)\n params_save = 'seed'+str(seed)+'_lr'+str(lr)+'_epochs'+str(epochs)\n np.save('sgd/loss_cifar10_sgd_'+params_save+'.npy',loss_sgd)\n np.save('sgd/accuracy_cifar10_sgd_'+params_save+'.npy',accuracy_list)\n return None\n \ndef train_musketeer(seed,epochs,lr,ratio_changes,eta):\n # set seed for reproducibility\n set_seed(seed)\n # Build Neural Network\n model = Net()\n # Put on GPU\n model.to(device)\n # Loss function\n criterion = nn.CrossEntropyLoss()\n # Optimizer SPARTACOS # exploration size\n d = sum(p.numel() for p in model.parameters())\n T = int(math.sqrt(d))\n optimizer = Musketeer(params=list(model.parameters()),T=T,lr=lr,\n ratio_changes=ratio_changes,eta=eta)\n # Initialize loss evolution\n loss_musketeer = []\n accuracy_list = []\n # Main training loop\n for e in range(int(epochs/ratio_changes)):\n running_loss = 0\n t_start = time()\n for data in train_dataloader:\n # Get images,labels of current batch\n images, labels = data[0].to(device), data[1].to(device)\n # Set gradients to 0\n optimizer.zero_grad()\n # Forward pass of the model\n output = model.forward_pass(images)\n # Loss function\n loss = criterion(output, labels)\n # Compute gradients\n loss.backward()\n # Perform one step of SGD optimizer\n optimizer.step()\n # Store evolution of loss\n running_loss += loss.item()\n loss_musketeer.append(loss.item())\n #print(\"Time this epoch:\",time()-t_start)\n if e%10==0:\n print(\"Epoch {} - Training loss: {}\".format(e, running_loss/len(train_dataloader)))\n correct = 0\n total = 0\n with torch.no_grad():\n for data in test_dataloader:\n images, labels = data[0].to(device), data[1].to(device)\n outputs = model.forward_pass(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n #print('Accuracy:',correct/total)\n accuracy_list.append(correct / total)\n params_save = 'seed'+str(seed)+'_lr'+str(lr)+'_epochs'+str(int(epochs/ratio_changes))+'_eta'+str(eta)\n np.save('musketeer'+str(eta)+'/loss_cifar10_musketeer_'+params_save+'.npy',loss_cairos)\n np.save('musketeer'+str(eta)+'/accuracy_cifar10_musketeer_'+params_save+'.npy',accuracy_list)\n np.save('musketeer'+str(eta)+'/g_info_'+params_save+'.npy',np.array(optimizer.param_groups[0]['g_info'].cpu()))\n return None\n\n########## Parameter Configuration ##########\nepochs= 5\nlr = 0.01\nratio_changes=0.1\n#############################################\n\n\n### SGD training part\nfor seed in range(1,11):\n print('seed=',seed)\n train_sgd(seed=seed,epochs=epochs,lr=lr)\n \n### MUSKETEER training part with eta=1\nfor seed in range(1,11):\n print('seed:',seed)\n train_musketeer(seed=seed,epochs=epochs,\n lr=lr,ratio_changes=ratio_changes,eta=1)\n \n### MUSKETEER training part with eta=2\nfor seed in range(1,11):\n print('seed:',seed)\n train_musketeer(seed=seed,epochs=epochs,\n lr=lr,ratio_changes=ratio_changes,eta=2)\n \n ### MUSKETEER training part with eta=10\nfor seed in range(1,11):\n print('seed:',seed)\n train_musketeer(seed=seed,epochs=epochs,\n lr=lr,ratio_changes=ratio_changes,eta=10)","repo_name":"RemiLELUC/SCGD-Musketeer","sub_path":"code/StoFirstOrder_optimization/neural_networks/scripts/train_cifar10.py","file_name":"train_cifar10.py","file_ext":"py","file_size_in_byte":7091,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"86"} +{"seq_id":"17626767926","text":"from datetime import datetime, timedelta\nimport os\nfrom airflow import DAG\n# from airflow.operators.postgres_operator import PostgresOperator\nfrom airflow.contrib.operators.emr_create_job_flow_operator import EmrCreateJobFlowOperator\nfrom airflow.contrib.operators.emr_add_steps_operator import EmrAddStepsOperator\nfrom airflow.contrib.sensors.emr_step_sensor import EmrStepSensor\nfrom airflow.contrib.operators.emr_terminate_job_flow_operator import EmrTerminateJobFlowOperator\nfrom airflow.operators.bash import BashOperator\nfrom operators.data_quality import DataQualityOperator\n\ndefault_args = {\n 'owner': 'admin',\n 'start_date': datetime(2019, 1, 12),\n 'retries': 3,\n 'retry_delay': timedelta(minutes=5),\n 'depends_on_past': False,\n 'catchup': False,\n}\n\ndag = DAG('bikeshare_pipeline',\n default_args=default_args,\n description='Load and transform data in Redshift with Airflow',\n catchup=False,\n template_searchpath=\"/scripts\"\n )\n\nJOB_FLOW_OVERRIDES = {\n 'Name': 'test-cluster-airflow',\n 'ReleaseLabel': 'emr-6.7.0',\n 'LogUri': 's3n://airflow-bikeshare/',\n 'Applications': [\n {\n 'Name': 'Spark'\n },\n {\n 'Name': 'Hadoop'\n },\n ],\n 'Instances': {\n 'InstanceGroups': [\n {\n 'Name': 'MASTER',\n 'Market': 'ON_DEMAND',\n 'InstanceRole': 'MASTER',\n 'InstanceType': 'm5.xlarge',\n 'InstanceCount': 1,\n },\n {\n 'Name': 'CORE',\n 'Market': 'ON_DEMAND',\n 'InstanceRole': 'CORE',\n 'InstanceType': 'm5.xlarge',\n 'InstanceCount': 2,\n },\n ],\n 'KeepJobFlowAliveWhenNoSteps': True,\n 'TerminationProtected': False,\n # 'Ec2KeyName': '{{ var.value.emr_ec2_key_pair }}',\n },\n 'BootstrapActions': [\n {\n 'Name': 'string',\n 'ScriptBootstrapAction': {\n 'Path': 's3://bootstrap6/bootstrap.sh',\n }\n },\n ],\n # 'Configurations': [\n # {\n # 'Classification': 'spark-hive-site',\n # 'Properties': {\n # 'hive.metastore.client.factory.class': 'com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory'\n # }\n # }\n\n # ],\n 'VisibleToAllUsers': True,\n 'JobFlowRole': 'EMR_EC2_DefaultRole',\n 'ServiceRole': 'EMR_DefaultRole',\n # 'EbsRootVolumeSize': 32,\n # 'StepConcurrencyLevel': 1,\n # 'Tags': [\n # {\n # 'Key': 'Environment',\n # 'Value': 'Development'\n # },\n # {\n # 'Key': 'Name',\n # 'Value': 'Airflow EMR test Project'\n # },\n # {\n # 'Key': 'Owner',\n # 'Value': 'admin'\n # }\n # ]\n}\n\n# create_emr_cluster = EmrCreateJobFlowOperator(\n# task_id=\"create_emr_cluster\",\n# job_flow_overrides=JOB_FLOW_OVERRIDES,\n# aws_conn_id=\"aws_default\",\n# emr_conn_id=\"emr_default\",\n# dag=dag,\n# )\n\n# SPARK_STEPS = [ # Note the params values are supplied to the operator\n# {\n# \"Name\": \"Run pyspark script\",\n# \"ActionOnFailure\": \"CANCEL_AND_WAIT\",\n# \"HadoopJarStep\": {\n# \"Jar\": \"command-runner.jar\",\n# \"Args\": [\n# \"spark-submit\",\n# \"--deploy-mode\",\n# \"client\",\n# \"s3://bikeshare-script/emr_script.py\",\n# ],\n# },\n# },\n# ]\n \n# step_adder = EmrAddStepsOperator(\n# task_id='add_steps',\n# job_flow_id=\"{{ task_instance.xcom_pull(task_ids='create_emr_cluster', key='return_value') }}\",\n# aws_conn_id='aws_default',\n# steps=SPARK_STEPS,\n# dag=dag\n# )\n\n# last_step = len(SPARK_STEPS) - 1\n\n# step_checker = EmrStepSensor(\n# task_id='watch_step',\n# job_flow_id=\"{{ task_instance.xcom_pull('create_emr_cluster', key='return_value') }}\",\n# step_id=\"{{ task_instance.xcom_pull(task_ids='add_steps', key='return_value')[\"\n# + str(last_step)\n# + \"] }}\",\n# aws_conn_id='aws_default',\n# dag=dag\n# )\n\n# cluster_remover = EmrTerminateJobFlowOperator(\n# task_id='remove_cluster',\n# job_flow_id=\"{{ task_instance.xcom_pull(task_ids='create_emr_cluster', key='return_value') }}\",\n# aws_conn_id='aws_default',\n# dag=dag\n# )\n\nbash_command = \"python3 etl.py\"\n\nredshift_process_files = BashOperator(\n task_id='redshift_process',\n bash_command=bash_command,\n dag=dag,\n cwd=dag.folder)\n\n#Check that there is data in the table\nover_zero_check = {'check_sql': \"SELECT COUNT(*) from {}\", \n'expected_result': 1}\n\n# Make sure that correct number of records (neighborhood/ vehicle)\n# exist in the table\nneighborhood_check = {'check_sql': \"SELECT count(distinct concat(end_neighborhood, \\\nvehicle)) from {}\", 'expected_result':90}\n\nrun_quality_checks = DataQualityOperator(\n task_id='Run_data_quality_checks',\n dag=dag,\n redshift_conn_id=\"redshift\",\n tables=[\"neighborhood_metrics\"],\n dq_checks=[over_zero_check, neighborhood_check]\n)\n\n# create_emr_cluster >> step_adder\n# step_adder >> step_checker\n# step_checker >> cluster_remover\n# step_checker >> redshift_process_files\nredshift_process_files >> run_quality_checks\n\n","repo_name":"jessemoderwell/bikeshare_pipeline_capstone","sub_path":"airflow/dags/bikeshare_dag.py","file_name":"bikeshare_dag.py","file_ext":"py","file_size_in_byte":5281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"40152750037","text":"# (develop a dictionary by using python) made your own dictionary by python 0\n \n# dic ={\"python\":\"the most advance programming language\",\"java\":\"the most used pro.. lang in india\",\"ruby\":\" the only ios language\",\"C\":\" the most basic and complecated old language\"}\n# i =input(\"enter the programming language you want to know about:--\\n\")\n# o =i.capitalize()\n# print(o,\"=\",dic[i])\n\n# Develop a faulty calculator with this following faults: 45*3 = 555 , 56+9 = 77 ,56/6 = 4\noper = input ('''please enter the operation you want you to do:\n enter '+'for add \n enter '-'for substraction \n enter '*'for multiplication\n enter '/'for divide \n enter '**'for power \n enter '%'for modulus\n please enter your choice :- ''' )\n\nprint(\"enter the first number N1:\")\nN1 = int(input())\nprint(\"enter the 2nd number N2:-\")\nN2 = int(input())\nif oper == '+' :\n resu = N1 + N2 \n if N1 == 56 and N2 == 9 :\n resu = 77\nelif oper == '*' :\n resu = N1 * N2\n if N1 == 45 and N2 == 3 :\n resu = 555\nelif oper == '/':\n resu = N1/N2\n if N1 == 56 and N2 == 6 :\n resu = 4 \nelse :\n resu = N1 - N2\n\nprint(\"result =\",resu)\nprint(\"address of N1 in integer form is: \",int(id(N1)));\n\n","repo_name":"DARONIK03/Python_files","sub_path":"python#4dict.py","file_name":"python#4dict.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"26767546755","text":"import pygame\n\nfrom .settings import player_dict\nfrom .manager import player_select_def, explosion_type_def\nfrom .tools import Sprite_sheet, Timer, Particles\nfrom .weapons import Bullet, Missile\n\n\nclass Player(Sprite_sheet):\n\n def __init__(self, screen, style, model, lives, *args, **kwargs):\n player_img, player_action_dict = player_select_def(style, model)\n super().__init__(player_img)\n self.screen = screen\n self.select = style\n self.lives = lives\n\n self.weapon = args[0][0][0]\n self.level = args[0][0][1]\n self.score = args[0][0][2]\n self.dead_enemy = args[0][0][3]\n self.dead_meteor = args[0][0][4]\n self.SCREEN_W = args[0][0][5]\n self.SCREEN_H = args[0][0][6]\n\n self.bullet_group = args[0][1][0]\n self.missile_group = args[0][1][1]\n self.enemy_group = args[0][1][2]\n self.meteor_group = args[0][1][3]\n self.explosion_group = args[0][1][4]\n\n # Load player image and rect\n self.create_animation(200, 200, player_action_dict, scale=0.5)\n explosion_img, explosion_dict = explosion_type_def(2)\n self.sheet = pygame.image.load(explosion_img).convert_alpha()\n self.create_animation(100, 100, explosion_dict)\n self.image = self.animation_dict[self.action][self.frame_index]\n self.rect = self.image.get_rect(center=(self.SCREEN_W//2, self.SCREEN_H+self.SCREEN_H//10))\n\n self.ammo = (self.level*100 + player_dict['ammo'][self.select]) // 2\n self.start_ammo = self.ammo\n self.shoot_cooldown = 0\n self.load = self.level\n self.start_load = self.load\n self.throw_cooldown = 0\n\n self.vector = pygame.math.Vector2\n self.delta = self.vector(0, 0)\n self.speed = self.vector(0, 0)\n self.max_speed = player_dict['speed'][self.select]\n self.init_speed = self.max_speed\n\n self.direction_x = 1\n self.direction_y = -1\n self.auto_init = False\n self.auto_land = False\n\n self.alive = True\n self.health = player_dict['health'][self.select]\n self.max_health = self.health\n self.shield = False\n self.win = False\n\n # Define player action variables\n self.spawn = True\n self.turbo = False\n self.collide = False\n\n self.moving_left = False\n self.moving_right = False\n self.moving_up = False\n self.moving_down = False\n\n # Define item variables\n self.less_time = False\n self.freeze = False\n self.atomic = False\n self.turbo_up = 0\n self.vision = pygame.Rect(self.SCREEN_W//20, self.SCREEN_H//7, self.SCREEN_W-self.SCREEN_W//10, self.SCREEN_H-self.SCREEN_H//5.5)\n self.particles = Particles('fire', self.screen, self.image, self.select)\n self.timer_list = []\n for _ in range(2):\n self.timer_list.append(Timer())\n\n def update(self):\n # Update player events\n if self.alive: self.move()\n self.check_collision()\n self.update_animation(self.animation_cooldown)\n if self.alive and not self.spawn and not self.auto_init:\n self.particles.add_circle(self.rect.centerx-self.rect.width//4, self.rect.bottom-self.rect.height//10, self.direction_x, self.direction_y)\n self.particles.add_circle(self.rect.centerx+self.rect.width//4, self.rect.bottom-self.rect.height//10, self.direction_x, self.direction_y)\n # Update cooldown\n if self.shoot_cooldown > 0:\n self.shoot_cooldown -= 1\n if self.throw_cooldown > 0:\n self.throw_cooldown -= 1\n\n def move(self):\n self.speed = self.vector(0, 0)\n\n if self.moving_left and self.moving_right or not self.moving_left and not self.moving_right:\n # Recover the axis of delta x and rotate to 0\n if self.delta.x > 0.1: self.delta.x -= 0.1\n elif self.delta.x < 0.0: self.delta.x += 0.1\n else: self.delta.x = 0\n\n if self.rotate > 0.0: self.rotate -= 0.1\n elif self.rotate < 0.0: self.rotate += 0.1\n else:\n if self.moving_left:\n self.speed.x = -0.1\n if self.rotate < 5: self.rotate += 0.5\n self.direction_x = -1\n self.update_action('left')\n\n if self.moving_right:\n self.speed.x = 0.1\n if self.rotate > -5: self.rotate -= 0.5\n self.direction_x = 1\n self.update_action('right')\n\n if self.moving_up and self.moving_down or not self.moving_up and not self.moving_down:\n # Recover the axis of delta y to 0\n if self.delta.y > 0.1: self.delta.y -= 0.1\n elif self.delta.y < 0.0: self.delta.y += 0.1\n else: self.delta.y = 0\n\n else:\n if self.moving_up:\n self.speed.y = -0.1\n\n if self.moving_down:\n self.speed.y = 0.1\n\n # Check if going off the edges of the screen\n if self.limit_left(self.rect.width//10):\n if self.limit_left(): self.speed.x = 0.1\n else: self.moving_left = False\n\n if self.limit_right(self.rect.width//10):\n if self.limit_right(): self.speed.x = -0.1\n else: self.moving_right = False\n\n if self.limit_up(self.rect.height//10):\n if self.limit_up(): self.speed.y = 0.1\n else: self.moving_up = False\n\n if self.limit_down(self.rect.height//10):\n if self.limit_down(): self.speed.y = -0.1\n else: self.moving_down = False\n\n # Limits the maximum speed\n if not self.spawn and not self.collide:\n if self.moving_left and self.delta.x > -self.max_speed:\n self.delta.x += self.speed.x\n if self.moving_right and self.delta.x < self.max_speed:\n self.delta.x += self.speed.x\n if self.moving_up and self.delta.y > -self.max_speed:\n self.delta.y += self.speed.y\n if self.moving_down and self.delta.y < self.max_speed:\n self.delta.y += self.speed.y\n\n # Update the movement of the rectangle\n if self.alive:\n self.rect.x += self.delta.x + self.max_speed * self.speed.x\n self.rect.y += self.delta.y + self.max_speed * self.speed.y\n\n # print(self.rect.center, self.delta, self.speed, self.max_speed)\n\n def auto_movement(self):\n if self.win:\n if not self.auto_init:\n self.turbo = False\n\n if self.rect.centerx < self.SCREEN_W//2:\n self.moving_right = True\n else: self.moving_right = False\n\n if self.rect.centerx > self.SCREEN_W//2:\n self.moving_left = True\n else: self.moving_left = False\n\n if self.rect.centery < self.SCREEN_H//2:\n self.moving_down = True\n else: self.moving_down = False\n\n if self.rect.centery > self.SCREEN_H//2:\n self.moving_up = True\n else: self.moving_up = False\n\n if not self.moving_left and not self.moving_right and not self.moving_up and not self.moving_down:\n self.auto_init = True\n else:\n if not self.auto_land:\n if not self.limit_up(): self.rect.y -= 0.1\n\n if self.limit_up(self.rect.height):\n if self.rotate < 90: self.rotate += 1\n else: self.auto_land = True\n else:\n if self.rect.width > 1 and self.rect.height > 1:\n self.rect.width -= 1\n self.rect.height -= 1\n\n if self.rotate < 180: self.rotate += 1\n else:\n self.auto_init = False\n self.auto_land = False\n\n return True\n\n def shoot(self, *args):\n if self.shoot_cooldown == 0 and self.ammo > 0:\n self.shoot_cooldown = 10\n # create bullet ammo\n bullet = Bullet('player', self.screen, self.weapon, self.select, self.rect.centerx, self.rect.centery-self.rect.height//2, self.rect.width, self.direction_y,\\\n self.flip_x, self.flip_y, self, self.bullet_group, self.enemy_group, self.meteor_group, self.SCREEN_W, self.SCREEN_H)\n self.bullet_group.add(bullet)\n # Reduce ammo\n self.ammo -= 1\n args[1].play()\n # Sound if weapon is unloaded\n elif self.ammo == 0: args[0].play()\n\n def throw(self, *args):\n if self.throw_cooldown == 0 and self.load > 0:\n self.throw_cooldown = 400\n # Create missile load\n missile = Missile('player', self.screen, self.select, self.rect.centerx, self.rect.top, self.direction_y, self.flip_x, self.flip_y,\\\n self, self.enemy_group, self.meteor_group, self.explosion_group, args[1], args[2], args[3])\n self.missile_group.add(missile)\n # Reduce load\n self.load -= 1\n args[1].play()\n # Sound if weapon is unloaded\n elif self.load == 0: args[0].play()\n\n # Check if the collision with the enemy or obstacles\n def check_collision(self):\n if self.freeze:\n if self.timer_list[0].time(5, True):\n self.freeze = False\n\n if self.atomic:\n if self.timer_list[1].time(5, True):\n self.atomic = False\n # Check if there is any threat inside the screen\n for enemy in self.enemy_group:\n if self.vision.colliderect(enemy.rect):\n enemy.health = 0\n self.atomic = False\n\n for meteor in self.meteor_group:\n if self.vision.colliderect(meteor.rect):\n meteor.health = 0\n self.atomic = False\n\n # pygame.draw.rect(self.screen, (255, 0, 0), self.vision)\n\n def check_alive(self, *args):\n if self.health <= 0:\n if self.alive:\n self.alive = False\n self.health = 0\n # self.score = 0\n self.speed = self.vector(0, 0)\n # args[0].play()\n\n self.animation_cooldown = self.animation_cooldown // 2\n self.update_action('death')\n else:\n self.update_action('idle')\n\n def limit_left(self, value=0):\n return self.rect.left + self.delta.x < self.rect.width//10 + value\n\n def limit_right(self, value=0):\n return self.rect.right + self.delta.x > self.SCREEN_W - (self.rect.height//10 + value)\n\n def limit_up(self, value=0):\n return self.rect.top + self.delta.y < self.rect.height + value\n\n def limit_down(self, value=0):\n return self.rect.bottom + self.delta.y > self.SCREEN_H - (self.rect.height//10 + value)\n","repo_name":"Seven-z01/The-Quest","sub_path":"Assets/Scripts/players.py","file_name":"players.py","file_ext":"py","file_size_in_byte":11135,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"74456352603","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\n\"\"\"\nfiberassign.scripts.qa_plot\n==============================\n\nHigh-level functions for plotting QA output.\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport argparse\nimport json\n\nfrom ..vis import plot_qa\n\n\ndef parse_plot_qa(optlist=None):\n \"\"\"Parse QA plotting options.\n\n This parses either sys.argv or a list of strings passed in. If passing\n an option list, you can create that more easily using the\n :func:`option_list` function.\n\n Args:\n optlist (list, optional): Optional list of arguments to parse instead\n of using sys.argv.\n\n Returns:\n (namespace): an ArgumentParser namespace.\n\n \"\"\"\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--qafile\", type=str, required=True, default=None,\n help=\"Input QA file.\")\n\n parser.add_argument(\"--outroot\", type=str, required=False, default=None,\n help=\"Output root file name. Default uses input.\")\n\n parser.add_argument(\"--labels\", required=False, default=False,\n action=\"store_true\",\n help=\"Plot tile IDs at center of circles.\")\n\n args = None\n if optlist is None:\n args = parser.parse_args()\n else:\n args = parser.parse_args(optlist)\n\n # Check directory\n if not os.path.isfile(args.qafile):\n raise RuntimeError(\"Input file {} does not exist\".format(args.qafile))\n\n if args.outroot is None:\n args.outroot = os.path.splitext(args.qafile)[0]\n\n return args\n\n\ndef run_plot_qa(args):\n \"\"\"Run QA plotting.\n\n This uses the previously parsed options to read input data and make a\n plot of the QA results.\n\n Args:\n args (namespace): The parsed arguments.\n\n Returns:\n None\n\n \"\"\"\n qadata = None\n with open(args.qafile, \"r\") as f:\n qadata = json.load(f)\n plot_qa(qadata, args.outroot, labels=args.labels)\n return\n","repo_name":"desihub/fiberassign","sub_path":"py/fiberassign/scripts/qa_plot.py","file_name":"qa_plot.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"86"} +{"seq_id":"72480768604","text":"# 2.Дано натуральное число. Напишите программу, которая определяет, состоит ли указанное число из одинаковых цифр.\n\n# вводим число\nnum = input()\n# цикл, берущий каждую цифру числа\nfor i in num:\n # если первая цифра не равна i-й цифре, то выводим \"no\" и прерываем цикл\n if num[0] != i:\n print('no')\n break\n# если цикл не прервался, то это означает, что все цифры числа равны первой цифре,\n# что означает, что они все равны и в итоге выводится \"yes\"\nelse:\n print('yes')\n","repo_name":"YuriyDerkach/Courses","sub_path":"homework/homework4/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"23563826584","text":"# encoding: utf-8\n\"\"\"\nThis module provides an object oriented interface for pattern matching\nof files.\n\"\"\"\n\nfrom . import util\nfrom .compat import Collection, iterkeys, izip_longest, string_types, unicode\n\n\nclass PathSpec(object):\n\t\"\"\"\n\tThe :class:`PathSpec` class is a wrapper around a list of compiled\n\t:class:`.Pattern` instances.\n\t\"\"\"\n\n\tdef __init__(self, patterns):\n\t\t\"\"\"\n\t\tInitializes the :class:`PathSpec` instance.\n\n\t\t*patterns* (:class:`~collections.abc.Collection` or :class:`~collections.abc.Iterable`)\n\t\tyields each compiled pattern (:class:`.Pattern`).\n\t\t\"\"\"\n\n\t\tself.patterns = patterns if isinstance(patterns, Collection) else list(patterns)\n\t\t\"\"\"\n\t\t*patterns* (:class:`~collections.abc.Collection` of :class:`.Pattern`)\n\t\tcontains the compiled patterns.\n\t\t\"\"\"\n\n\tdef __eq__(self, other):\n\t\t\"\"\"\n\t\tTests the equality of this path-spec with *other* (:class:`PathSpec`)\n\t\tby comparing their :attr:`~PathSpec.patterns` attributes.\n\t\t\"\"\"\n\t\tif isinstance(other, PathSpec):\n\t\t\tpaired_patterns = izip_longest(self.patterns, other.patterns)\n\t\t\treturn all(a == b for a, b in paired_patterns)\n\t\telse:\n\t\t\treturn NotImplemented\n\n\tdef __len__(self):\n\t\t\"\"\"\n\t\tReturns the number of compiled patterns this path-spec contains\n\t\t(:class:`int`).\n\t\t\"\"\"\n\t\treturn len(self.patterns)\n\n\tdef __add__(self, other):\n\t\t\"\"\"\n\t\tCombines the :attr:`Pathspec.patterns` patterns from two\n\t\t:class:`PathSpec` instances.\n\t\t\"\"\"\n\t\tif isinstance(other, PathSpec):\n\t\t\treturn PathSpec(self.patterns + other.patterns)\n\t\telse:\n\t\t\treturn NotImplemented\n\n\tdef __iadd__(self, other):\n\t\t\"\"\"\n\t\tAdds the :attr:`Pathspec.patterns` patterns from one :class:`PathSpec`\n\t\tinstance to this instance.\n\t\t\"\"\"\n\t\tif isinstance(other, PathSpec):\n\t\t\tself.patterns += other.patterns\n\t\t\treturn self\n\t\telse:\n\t\t\treturn NotImplemented\n\n\t@classmethod\n\tdef from_lines(cls, pattern_factory, lines):\n\t\t\"\"\"\n\t\tCompiles the pattern lines.\n\n\t\t*pattern_factory* can be either the name of a registered pattern\n\t\tfactory (:class:`str`), or a :class:`~collections.abc.Callable` used\n\t\tto compile patterns. It must accept an uncompiled pattern (:class:`str`)\n\t\tand return the compiled pattern (:class:`.Pattern`).\n\n\t\t*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled\n\t\tpattern (:class:`str`). This simply has to yield each line so it can\n\t\tbe a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`)\n\t\tor the result from :meth:`str.splitlines`.\n\n\t\tReturns the :class:`PathSpec` instance.\n\t\t\"\"\"\n\t\tif isinstance(pattern_factory, string_types):\n\t\t\tpattern_factory = util.lookup_pattern(pattern_factory)\n\t\tif not callable(pattern_factory):\n\t\t\traise TypeError(\"pattern_factory:{!r} is not callable.\".format(pattern_factory))\n\n\t\tif not util._is_iterable(lines):\n\t\t\traise TypeError(\"lines:{!r} is not an iterable.\".format(lines))\n\n\t\tlines = [pattern_factory(line) for line in lines if line]\n\t\treturn cls(lines)\n\n\tdef match_file(self, file, separators=None):\n\t\t\"\"\"\n\t\tMatches the file to this path-spec.\n\n\t\t*file* (:class:`str` or :class:`~pathlib.PurePath`) is the file path\n\t\tto be matched against :attr:`self.patterns `.\n\n\t\t*separators* (:class:`~collections.abc.Collection` of :class:`str`)\n\t\toptionally contains the path separators to normalize. See\n\t\t:func:`~pathspec.util.normalize_file` for more information.\n\n\t\tReturns :data:`True` if *file* matched; otherwise, :data:`False`.\n\t\t\"\"\"\n\t\tnorm_file = util.normalize_file(file, separators=separators)\n\t\treturn util.match_file(self.patterns, norm_file)\n\n\tdef match_entries(self, entries, separators=None):\n\t\t\"\"\"\n\t\tMatches the entries to this path-spec.\n\n\t\t*entries* (:class:`~collections.abc.Iterable` of :class:`~util.TreeEntry`)\n\t\tcontains the entries to be matched against :attr:`self.patterns `.\n\n\t\t*separators* (:class:`~collections.abc.Collection` of :class:`str`;\n\t\tor :data:`None`) optionally contains the path separators to\n\t\tnormalize. See :func:`~pathspec.util.normalize_file` for more\n\t\tinformation.\n\n\t\tReturns the matched entries (:class:`~collections.abc.Iterable` of\n\t\t:class:`~util.TreeEntry`).\n\t\t\"\"\"\n\t\tif not util._is_iterable(entries):\n\t\t\traise TypeError(\"entries:{!r} is not an iterable.\".format(entries))\n\n\t\tentry_map = util._normalize_entries(entries, separators=separators)\n\t\tmatch_paths = util.match_files(self.patterns, iterkeys(entry_map))\n\t\tfor path in match_paths:\n\t\t\tyield entry_map[path]\n\n\tdef match_files(self, files, separators=None):\n\t\t\"\"\"\n\t\tMatches the files to this path-spec.\n\n\t\t*files* (:class:`~collections.abc.Iterable` of :class:`str; or\n\t\t:class:`pathlib.PurePath`) contains the file paths to be matched\n\t\tagainst :attr:`self.patterns `.\n\n\t\t*separators* (:class:`~collections.abc.Collection` of :class:`str`;\n\t\tor :data:`None`) optionally contains the path separators to\n\t\tnormalize. See :func:`~pathspec.util.normalize_file` for more\n\t\tinformation.\n\n\t\tReturns the matched files (:class:`~collections.abc.Iterable` of\n\t\t:class:`str`).\n\t\t\"\"\"\n\t\tif not util._is_iterable(files):\n\t\t\traise TypeError(\"files:{!r} is not an iterable.\".format(files))\n\n\t\tfile_map = util.normalize_files(files, separators=separators)\n\t\tmatched_files = util.match_files(self.patterns, iterkeys(file_map))\n\t\tfor path in matched_files:\n\t\t\tyield file_map[path]\n\n\tdef match_tree_entries(self, root, on_error=None, follow_links=None):\n\t\t\"\"\"\n\t\tWalks the specified root path for all files and matches them to this\n\t\tpath-spec.\n\n\t\t*root* (:class:`str`; or :class:`pathlib.PurePath`) is the root\n\t\tdirectory to search.\n\n\t\t*on_error* (:class:`~collections.abc.Callable` or :data:`None`)\n\t\toptionally is the error handler for file-system exceptions. See\n\t\t:func:`~pathspec.util.iter_tree_entries` for more information.\n\n\t\t*follow_links* (:class:`bool` or :data:`None`) optionally is whether\n\t\tto walk symbolic links that resolve to directories. See\n\t\t:func:`~pathspec.util.iter_tree_files` for more information.\n\n\t\tReturns the matched files (:class:`~collections.abc.Iterable` of\n\t\t:class:`str`).\n\t\t\"\"\"\n\t\tentries = util.iter_tree_entries(root, on_error=on_error, follow_links=follow_links)\n\t\treturn self.match_entries(entries)\n\n\tdef match_tree_files(self, root, on_error=None, follow_links=None):\n\t\t\"\"\"\n\t\tWalks the specified root path for all files and matches them to this\n\t\tpath-spec.\n\n\t\t*root* (:class:`str`; or :class:`pathlib.PurePath`) is the root\n\t\tdirectory to search for files.\n\n\t\t*on_error* (:class:`~collections.abc.Callable` or :data:`None`)\n\t\toptionally is the error handler for file-system exceptions. See\n\t\t:func:`~pathspec.util.iter_tree_files` for more information.\n\n\t\t*follow_links* (:class:`bool` or :data:`None`) optionally is whether\n\t\tto walk symbolic links that resolve to directories. See\n\t\t:func:`~pathspec.util.iter_tree_files` for more information.\n\n\t\tReturns the matched files (:class:`~collections.abc.Iterable` of\n\t\t:class:`str`).\n\t\t\"\"\"\n\t\tfiles = util.iter_tree_files(root, on_error=on_error, follow_links=follow_links)\n\t\treturn self.match_files(files)\n\n\t# Alias `match_tree_files()` as `match_tree()`.\n\tmatch_tree = match_tree_files\n","repo_name":"ray-project/ray","sub_path":"python/ray/_private/thirdparty/pathspec/pathspec.py","file_name":"pathspec.py","file_ext":"py","file_size_in_byte":7027,"program_lang":"python","lang":"en","doc_type":"code","stars":28715,"dataset":"github-code","pt":"86"} +{"seq_id":"39244533404","text":"from collections import deque\nimport sys\n\nN = int(input())\ngra = [[1]*(N+2)]\nfor _ in range(N):\n a = [1]+[0]*(N)+[1]\n gra.append(a)\ngra.append([1]*(N+2))\n\nK = int(input())\nfor _ in range(K):\n a, b = map(int, input().split())\n gra[a][b] = 2\n\n\nturn = list(map(lambda x : [int(x[0]), x[1]], [input().split() for _ in range(int(input()))] ))\nturn.sort(reverse = True)\n\n\nd = [0, 1, 2, 3]\ndic = {0:[0,1], 1:[1,0], 2:[0,-1], 3:[-1, 0]}\nnow_d = 0\n\nQ = deque([(1, 1)])\ngra[1][1] = 1\ntime = 0\n\nwhile True:\n\n x, y = Q[0][0], Q[0][1]\n xx, yy = dic[d[now_d]][0]+x, dic[d[now_d]][1]+y\n if gra[xx][yy] == 1:\n print(time+1)\n break\n elif gra[xx][yy] == 2:\n Q.appendleft((xx, yy))\n gra[xx][yy] = 1\n time += 1\n elif gra[xx][yy] == 0:\n Q.appendleft((xx, yy))\n gra[xx][yy] = 1\n gra[Q[-1][0]][Q[-1][1]] = 0\n Q.pop()\n time += 1\n if turn and time == turn[-1][0]:\n if turn[-1][1] == 'L':\n now_d -= 1\n else:\n now_d += 1\n if now_d == 4 or now_d == -4:\n now_d = 0\n turn.pop()\n\n \n \n","repo_name":"syoung7388/CodingTest","sub_path":"뱀.py","file_name":"뱀.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"44489115998","text":"from motobot import match, Action\nfrom random import uniform\n\n\n@match(r'\\*(.+? )(stabs) desubot')\ndef stab_match(bot, context, message, match):\n if uniform(0, 100) >= 50:\n response = [\"parries {}\".format(context.nick)]\n if uniform(0, 100) >= 50:\n response.append(\"ripostes {}\".format(context.nick))\n else:\n response = \"dies\"\n return response, Action\n","repo_name":"Motoko11/desubot","sub_path":"plugins/fun/parry.py","file_name":"parry.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"11637359194","text":"from tile import Tile\nimport numpy as np\nimport random\n\nclass Grid():\n def __init__(self, size = [9, 9]):\n self.fill_grid(size)\n self.place_bombs()\n self.place_nums()\n self.clicked = []\n\n def size(self):\n return np.prod(self.grid.shape)\n\n def fill_grid(self, size):\n self.grid = np.empty(size, dtype = type(Tile))\n for i in range(len(self.grid)):\n for j in range(len(self.grid[0])):\n self.grid[i,j] = Tile() #revealed = True)\n\n def place_bombs(self):\n for i in range(len(self.grid)):\n for j in range(len(self.grid[0])):\n if random.random() < 0.10:\n self.grid[i, j].value = -1\n\n def place_nums(self):\n for i in range(len(self.grid)):\n for j in range(len(self.grid[0])):\n if self.grid[i, j].value == -1:\n self.add_to_adjacent_tiles([i, j])\n \n def add_to_adjacent_tiles(self, pos):\n x, y = pos\n adjacents = [[x+1,y], [x+1,y+1], [x,y+1], [x-1,y+1], [x-1,y], [x-1,y-1], [x,y-1], [x+1,y-1]]\n for pos in adjacents:\n i, j = pos\n if 0 <= i < 9 and 0 <= j < 9 and self.grid[i, j].value != -1:\n self.grid[i, j].value += 1\n\n def render(self):\n print(\" \", end = \"\")\n for j in range(len(self.grid[0])):\n print(j, end = \" \")\n \n print(\"\\n------------------------\")\n for i in range(len(self.grid)):\n print(\"{}| \".format(i), end = \"\")\n for j in range(len(self.grid[0])):\n if self.grid[i, j].revealed:\n print(\"{} \".format(self.grid[i, j].value), end = \"\")\n elif self.grid[i, j].flagged == True:\n print(\"F \", end = \"\")\n else:\n print(\" \", end = \"\")\n print(\"\")\n \n def execute(self, pos, val):\n if val is 'F' or val is 'f':\n return self.flag(pos)\n elif val is 'C' or val is 'c':\n return self.click(pos)\n\n\n def flag(self, pos):\n x, y = pos\n return self.grid[x, y].flag()\n\n def click(self, pos):\n x, y = pos\n successful = self.grid[x, y].reveal()\n if successful is 0: #indicates a tile with no bombs surrounding it.\n self.click_adjacents(pos)\n return True\n elif successful is -1: #indicates a bomb\n return False\n else:\n self.clicked.append([x, y])\n return True\n\n def click_adjacents(self, pos):\n x, y = pos\n self.clicked.append([x, y])\n adjacents = [[x+1,y], [x+1,y+1], [x,y+1], [x-1,y+1], [x-1,y], [x-1,y-1], [x,y-1], [x+1,y-1]]\n for adj_pos in adjacents:\n i, j = adj_pos\n if 0 <= i < 9 and 0 <= j < 9 and [i, j] not in self.clicked:\n self.click([i, j])\n\n\n def solved(self):\n for i in range(len(self.grid)):\n for j in range(len(self.grid[0])):\n if self.grid[i, j].value == -1 and self.grid[i, j].flagged == False:\n return False\n return True\n\nif __name__ == \"__main__\":\n g = Grid()\n g.render()\n","repo_name":"Rossbarr/aao_minesweeper","sub_path":"grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"26026317144","text":"import json\nimport requests\n\n\nclass InferTest(object):\n def __init__(self):\n self.token = \"ZDQ3MzVlM2EyNjVlMTZlZWUwM2Y1OTcxOGI5YjVkMDMwMTljMDdkOGI2YzUxZjkwZGEzYTY2NmVlYzEzYWIzNTI2ODg1NTEyN2QyYjM3ZWUwZDk3ZDBmNjQwMjIyNmJjNjBkNmIwMTRkZjcxNTQ2NzMwMjYwYjc2MDUzNzc1MWItMg==\"\n\n def inference_create_from_train(self):\n \"\"\"\n token: str 用户验证信息\n infer_name: str 推理名称\n train_id: int trainID\n model_name: str model_name\n prefix_cmd: str run command\n\n :return: bool 成功标志\n \"\"\"\n\n header = {\n \"token\": self.token\n }\n data = {\n \"infer_name\": \"infer-demo-20220107\",\n \"train_id\": 20,\n \"model_name\": \"FasterRCNN_10_0.125.pkl\",\n \"description\": \"hahaha\",\n \"params\": {\n # \"class_id\": 1,\n # \"data_limit\": 'tiff, tif',\n # \"is_formal\": True,\n # 'resource_info' : {\n # \"cpu_count\": 1,\n # \"mem_size\": 4 * 1024 * 1024 * 1024,\n # \"gpu_dict\": {\"GeForce RTX 2080 Ti\": 1},\n # }\n }\n }\n url = 'http://0.0.0.0:5000/airserver-2.0/inference_create_from_train/'\n\n r = requests.post(url, data=json.dumps(data), headers=header)\n print(r)\n\n def inference_publish_to_intelligent_platform(self):\n header = {\n \"token\": self.token\n }\n data = {\n \"infer_id\": 22,\n \"class_id\": 1,\n \"data_limit\": 'tiff, tif',\n \"is_formal\": True,\n 'resource_info' : {\n \"cpu_count\": 1,\n \"mem_size\": 4 * 1024 * 1024 * 1024,\n \"gpu_dict\": {\"GeForce RTX 2080 Ti\": 1},\n }\n }\n url = 'http://0.0.0.0:5000/airserver-2.0/inference_publish_to_intelligent_platform/'\n\n r = requests.post(url, data=json.dumps(data), headers=header)\n print(r)\n\n def inference_query(self):\n \"\"\"\n 根据 user_id 查询 inference 信息\n token: str 用户验证信息\n\n :return: 查询到的inference信息\n \"\"\"\n header = {\n \"token\": self.token,\n }\n data = {\n \"page_size\": 10,\n \"page_num\": 1,\n\n \"grep_condition\": {\n \"get_template_info_detail\": True,\n \"infer_name\": \"Tensorflow\",\n \"task_type\": \"模板\",\n \"framework\": \"分类\"\n\n }\n }\n\n url = 'http://192.168.9.64:33135/airserver-2.0/inference_query/'\n\n r = requests.post(url, headers=header, data=json.dumps(data),)\n print(r.json())\n\n def inference_delete(self):\n # 删除inference\n header = {\n \"token\": self.token\n }\n data = {\n \"infer_id\": 4,\n }\n url = 'http://0.0.0.0:5000/airserver-2.0/inference_delete/'\n\n r = requests.delete(url, data=json.dumps(data), headers=header)\n print(r.json())\n\n\nif __name__ == \"__main__\":\n t = InferTest()\n\n # t.inference_create_from_train()\n t.inference_publish_to_intelligent_platform()\n # t.inference_query()\n # t.inference_delete()\n","repo_name":"xiaoyuan1996/AirPipeline","sub_path":"code/test/infer_test.py","file_name":"infer_test.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"26059759336","text":"from flask import Flask, request, jsonify\r\nimport numpy as np\r\nimport pickle\r\nmodel = pickle.load(open('model.pkl', 'rb'))\r\napp = Flask(__name__)\r\n@app.route('/')\r\ndef index():\r\n return \"Hello world\"\r\n@app.route('/predict', methods=['POST'])\r\ndef predict():\r\n clump_thick = request.form.get('clump_thickness')\r\n size_uniformity = request.form.get('size_uniformity')\r\n shape_uniformity = request.form.get('shape_uniformity')\r\n marginal_adhesion = request.form.get('marginal_adhesion')\r\n epithelial_size = request.form.get('epithelial_size')\r\n bare_nucleoli = request.form.get('bare_nucleoli')\r\n bland_chromatin = request.form.get('bland_chromatin')\r\n normal_nucleoli = request.form.get('normal_nucleoli')\r\n mitoses = request.form.get('mitoses')\r\n input_query = np.array([[clump_thick, size_uniformity, shape_uniformity, marginal_adhesion,\r\n epithelial_size, bare_nucleoli, bland_chromatin, normal_nucleoli, mitoses]])\r\n result = model.predict(input_query)[0]\r\n return jsonify({'class': str(result)})\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)","repo_name":"AnjaliRana18/Breast_Cancer_Prediction_app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"74174009563","text":"import os\nimport yaml\nimport pandas as pd\nimport math\n\nfrom utility_lib import required_input_file_dict\nfrom func_lib import (get_file_from_folder_by_type,\n check_required_files_exist,\n check_required_column_names_exist,\n func_running_time,\n path2linux,\n validate_filename,\n generate_absolute_path)\n\nfrom DTA import (Assignment,\n DemandPeriod,\n Node,\n Link,\n LinkType,\n GDPoint,\n e_traffic_flow_model,\n e_VDF_type)\n\nfrom DTA import (MIN_PER_TIME_SLOT,\n MAX_TIME_INTERVAL_PER_DAY,\n DTA_Direction,\n TMC_Corridor_Info)\nfrom cbi_reading import g_measurement_tstamp_parser, TMCLink\n\n\nclass CBI_TOOL:\n def __init__(self, path_input_folder: str) -> None:\n # initialize variables for the CBI TOOL\n self.__initial_values()\n\n self.path_input_folder = path_input_folder\n\n # Check if all required input files are provided in the input folder\n self.isRequired = self.__check_required_files_exist_in_folder()\n\n if self.isRequired:\n # prepare demand period for assignment\n self.__prepare_demand_period_for_assignment()\n else:\n raise Exception(\"Required input files are not provided in the input folder!\")\n\n def __initial_values(self):\n\n self.g_node_vector = [] # Node\n self.g_link_vector = [] # Link\n self.g_vdf_type_map = {} # VDF_Type\n self.g_corridor_info_base0_map = {}\n self.g_corridor_info_SA_map = {} # Corridor_Info\n\n self.g_tmc_corridor_vector = {} # TMC_Corridor_Info\n self.g_TMC_vector = [] # TMC_Link\n\n self.g_related_zone_vector_size = 0\n\n # initialize assignment object\n self.assignment = Assignment()\n\n self.node_col_name = [\"node_id\", \"node_no\", \"layer_no\", \"agent_id\", \"sequence_no\",\n \"distance_from_origin\", \"MRM_gate_flag\", \"node_type\", \"is_boundary\",\n \"#_of_outgoing_nodes\", \"activity_node_flag\", \"agent_type\",\n \"zone_id\", \"cell_code\", \"info_zone_flag\", \"x_coord\", \"y_coord\"]\n self.link_col_name = [\"link_id\", \"link_no\", \"layer_no\", \"from_node_id\", \"to_node_id\",\n \"from_gate_flag\", \"to_gate_flag\", \"link_type\", \"link_type_name\",\n \"lanes\", \"link_distance_VDF\", \"free_speed\", \"cutoff_speed\", \"fftt\",\n \"capacity\", \"allow_uses\", \"BPR_plf\", \"BPR_alpha\", \"BPR_beta\",\n \"QVDF_qdf\", \"QVDF_alpha\", \"QVDF_beta\", \"QVDF_cd\", \"QVDF_n\", \"geometry\"]\n\n _cbi_col_1 = [\"link_id\", \"tmc\", \"tmc_corridor_name\", \"tmc_corridor_id\", \"tmc_road_order\",\n \"tmc_road_sequence\", \"tmc_road\", \"tmc_direction\", \"tmc_intersection\",\n \"tmc_highest_speed\", \"link_no\", \"from_node_id\", \"to_node_id\",\n \"link_type\", \"link_type_code\", \"FT\", \"AT\", \"vdf_code\",\n \"nlanes\", \"link_distance_VDF\", \"free_speed\", \"capacity\",\n \"k_critical\", \"v_congestion_cutoff\"]\n _cbi_col_2 = [\"highest_speed\", \"vcutoff_updated\", \"vcutoff_ratio\", \"v_critical_s3\"]\n\n _cbi_col_3 = [f\"{period}_{name}\" for period in [\"AM\",\"MD\",\"PM\"] for name in [\"t0\",\"t3\",\"V\",\"peak_hour_volume\",\"D\",\"VC_ratio\",\"DC_ratio\",\"P\",\"vc/vt2-1\",\"vf_delay_index\",\"vc_delay_index\",\"speed_ph\",\"queue_speed\",\"vt2\",\"plf\",\"Q_n\",\"Q_cp\",\"Q_alpha\",\"Q_beta\",\"mV\",\"mD\",\"mDC_ratio\",\"mP\",\"mv_QVDF\",\"mvt2_QVDF\",\"m_mu_QVDF\",\"m_gamma_QVDF\",\"m_peak_hour_volume\",\"mVC_ratio\",\"mv_BPR\"]]\n\n _cbi_col_3 += [\"geometry\", \"tmc_geometry\"]\n\n _cbi_col_4 = [f\"vh{divmod(hour, 60)[0]}\" for hour in range(6 * 60, 20 * 60, 60)]\n _cbi_col_5 = [f\"m_vh{divmod(hour, 60)[0]}\" for hour in range(\n 6 * 60, 20 * 60, 60)]\n\n _cbi_col_6 = [\"evhMAE\", \"evhMAPE\", \"evhRMSE\"]\n _cbi_col_7 = [f\"qh{divmod(hour, 60)[0]}\" for hour in range(6 * 60, 20 * 60, 60)]\n _cbi_col_8 = [f\"vhr{divmod(hour, 60)[0]}\" for hour in range(6 * 60, 20 * 60, 60)]\n\n _cbi_col_9 = [f\"v{divmod(hour, 60)[0]}: {divmod(hour, 60)[1]}\" for hour in range(6 * 60, 20 * 60, 5)]\n _cbi_col_10 = [f\"mv{divmod(hour, 60)[0]}: {divmod(hour, 60)[1]}\" for hour in range(6 * 60, 20 * 60, 5)]\n _cbi_col_11 = [f\"v{divmod(hour, 60)[0]}: {divmod(hour, 60)[0]}\" for hour in range(6 * 60, 20 * 60, 15)]\n _cbi_col_12 = [f\"mv{divmod(hour, 60)[0]}: {divmod(hour, 60)[0]}\" for hour in range(6 * 60, 20 * 60, 15)]\n\n _cbi_col_13 = [f\"q{divmod(hour, 60)[0]}: {divmod(hour, 60)[0]}\" for hour in range(6 * 60, 20 * 60, 5)]\n\n self.cbi_summary_col_name = _cbi_col_1 + _cbi_col_2 + _cbi_col_3 + _cbi_col_4 + _cbi_col_5 + _cbi_col_6 + _cbi_col_7 + _cbi_col_8 + _cbi_col_9 + _cbi_col_10 + _cbi_col_11 + _cbi_col_12 + _cbi_col_13\n\n self.link_qvdf_col_name = [\"data_type\",\"link_id\",\"tmc_corridor_name\",\"from_node_id\",\"to_node_id\",\"vdf_code\"]\n\n for i in range(3):\n self.link_qvdf_col_name += [f\"QVDF_plf{i+1}\",f\"QVDF_n{i+1}\",f\"QVDF_s{i+1}\",f\"QVDF_cp{i+1}\",f\"QVDF_cd{i+1}\",f\"QVDF_alpha{i+1}\",f\"QVDF_beta{i+1}\"]\n\n def __check_required_files_exist_in_folder(self) -> bool:\n # get all csv files in the input folder\n csv_files = get_file_from_folder_by_type(self.path_input_folder, file_type=\"csv\")\n yaml_files = get_file_from_folder_by_type(self.path_input_folder, file_type=\"yml\")\n return check_required_files_exist(list(required_input_file_dict.keys()), csv_files + yaml_files)\n\n def __prepare_demand_period_for_assignment(self) -> Assignment:\n self.assignment.g_LoadingStartTimeInMin = 99999\n self.assignment.g_LoadingEndTimeInMin = 0\n\n # AM, MD, Afternoon\n demand_period = DemandPeriod()\n am_md_afternoon = [(7 * 60, 9 * 60), (10 * 60, 14 * 60), (15 * 60, 19 * 60)]\n am_md_afternoon_demand_period = [\"AM\", \"MD\", \"PM\"]\n am_md_afternoon_demand_period_id = [1, 2, 2]\n\n for i in range(len(am_md_afternoon)):\n # create demand period with id and name\n global_minute_vector = am_md_afternoon[i]\n demand_period.demand_period_id = am_md_afternoon_demand_period_id[i]\n demand_period.demand_period = am_md_afternoon_demand_period[i]\n\n # update values accordingly\n demand_period.starting_time_slot_no = global_minute_vector[0] / MIN_PER_TIME_SLOT\n demand_period.ending_time_slot_no = global_minute_vector[1] / MIN_PER_TIME_SLOT\n demand_period.time_period_in_hour = (global_minute_vector[1] - global_minute_vector[0]) / 60\n demand_period.t2_peak_in_hour = (global_minute_vector[0] + global_minute_vector[1]) / 2 / 60\n\n # add demand period to assignment\n self.assignment.g_DemandPeriodVector.append(demand_period)\n\n # initialize counter to 0\n self.assignment.g_number_of_nodes = 0\n self.assignment.g_number_of_links = 0\n\n return self.assignment\n\n @func_running_time\n def read_settings_yaml_file(self) -> None:\n print(\"Start reading settings.yml...\\n\")\n path_yaml = path2linux(os.path.join(self.path_input_folder, \"settings.yml\"))\n\n # load yaml data\n with open(path_yaml, \"r\", encoding=\"utf-8\") as f:\n data_yaml = yaml.safe_load(f)\n df_yaml = pd.DataFrame(data_yaml)\n\n # check required column name\n isColumnNameRequired = check_required_column_names_exist(\n list(required_input_file_dict[\"settings.yml\"]),\n list(df_yaml.columns)\n )\n\n if not isColumnNameRequired:\n raise Exception(\"Column name not required: please check settings.yml file\")\n\n # create a special link typ for virtual connector\n element_vc = LinkType()\n element_vc.link_type = -1\n element_vc.type_code = \"c\"\n element_vc.traffic_flow_code = e_traffic_flow_model.SPATIAL_QUEUE\n self.assignment.g_LinkTypeMap[element_vc.link_type] = element_vc\n # end of create special link type for virtual connectors\n\n for i in range(len(df_yaml)):\n element = LinkType()\n\n element.link_type = df_yaml.loc[i, \"link_type\"]\n element.type_code = df_yaml.loc[i, \"type_code\"]\n element.vdf_type = e_traffic_flow_model\n\n # a new column called: vdf_type\n if \"vdf_type\" in df_yaml.columns:\n vdf_type_str = df_yaml.loc[i, \"vdf_type\"]\n else:\n vdf_type_str = \"\"\n\n if vdf_type_str == \"bpr\":\n element.vdf_type = e_VDF_type.BPR_VDF\n\n if vdf_type_str == \"qvdf\":\n element.vdf_type = e_VDF_type.Q_VDF\n\n element.traffic_flow_code = e_traffic_flow_model.SPATIAL_QUEUE\n\n if \"k_jam\" in df_yaml.columns:\n element.k_jam = df_yaml.loc[i, \"k_jam\"]\n\n traffic_flow_code_str = df_yaml.loc[i, \"traffic_flow_code\"]\n if traffic_flow_code_str == \"point_queue\":\n element.traffic_flow_code = e_traffic_flow_model.POINT_QUEUE\n\n if traffic_flow_code_str == \"spatial_queue\":\n element.traffic_flow_code = e_traffic_flow_model.SPATIAL_QUEUE\n\n if traffic_flow_code_str == \"kw\":\n element.traffic_flow_code = e_traffic_flow_model.KINEMATIVE_WAVE\n\n self.assignment.g_LinkTypeMap[element.link_type] = element\n\n print(\"Settings.yml loaded...\\n\")\n\n @func_running_time\n def read_tmc_identification_csv_file(self) -> None:\n print(\"Start reading tmc_identification.csv file...\\n\")\n path_tmc_identification = path2linux(os.path.join(self.path_input_folder, \"tmc_identification.csv\"))\n df_tmc_identification = pd.read_csv(path_tmc_identification)\n tmc_identification_col = list(df_tmc_identification.columns)\n\n # dictionary store key: value pair; string: int\n long_lat_string_to_node_id_mapping = {}\n\n # dictionary store key: value pair; int: int\n zone_id_to_analysis_district_id_mapping = {}\n\n internal_node_seq_no = 0\n\n # read Node data\n # Prepare and Save Node data to g_NodeVector and assignment\n for i in range(len(df_tmc_identification)):\n node = Node()\n\n x_coord_from = start_longitude = df_tmc_identification.loc[i, \"start_longitude\"]\n y_coord_from = start_latitude = df_tmc_identification.loc[i, \"start_latitude\"]\n x_coord_to = end_longitude = df_tmc_identification.loc[i, \"end_longitude\"]\n y_coord_to = end_latitude = df_tmc_identification.loc[i, \"end_latitude\"]\n direction = df_tmc_identification.loc[i, \"direction\"]\n\n if direction == \"EASTBOUND\":\n DTA_DIR = DTA_Direction(DTA_Direction.DTA_EAST)\n elif direction == \"NORTHBOUND\":\n DTA_DIR = DTA_Direction(DTA_Direction.DTA_NORTH)\n elif direction == \"SOUTHBOUND\":\n DTA_DIR = DTA_Direction(DTA_Direction.DTA_SOUTH)\n elif direction == \"WESTBOUND\":\n DTA_DIR = DTA_Direction(DTA_Direction.DTA_WEST)\n else:\n DTA_DIR = DTA_Direction(DTA_Direction.DTA_NULL)\n\n if \"tmc_corridor_name\" in tmc_identification_col:\n tmc_corridor_name = df_tmc_identification.loc[i, \"tmc_corridor_name\"]\n elif {\"road\", \"direction\"}.issubset(set(tmc_identification_col)):\n tmc_corridor_name = df_tmc_identification.loc[i, \"road\"] + \"_\" + df_tmc_identification.loc[i, \"direction\"]\n else:\n tmc_corridor_name = \"\"\n\n long_lat_string_from = f\"{str(start_longitude)}_{str(start_latitude)}\"\n long_lat_string_to = f\"{str(end_longitude)}_{str(end_latitude)}\"\n\n corridor = TMC_Corridor_Info()\n\n # if tmc_corridor_name not in dictionary of g_tmc_corridor_vector\n if tmc_corridor_name not in self.g_tmc_corridor_vector:\n\n corridor.tmc_corridor_id = len(self.g_tmc_corridor_vector) + 1\n corridor.m_dir = DTA_DIR\n self.g_tmc_corridor_vector[tmc_corridor_name] = corridor\n\n # if long_lat_string_from not in dictionary of long_lat_string_to_node_id_mapping\n if long_lat_string_from not in long_lat_string_to_node_id_mapping:\n # micro network filter\n pt = GDPoint()\n pt.x = x_coord_from\n pt.y = y_coord_from\n\n node_id = len(self.assignment.g_node_id_to_seq_no_map) + 1\n long_lat_string_to_node_id_mapping[long_lat_string_from] = node_id\n\n self.assignment.g_node_id_to_seq_no_map[node_id] = internal_node_seq_no\n node.node_id = node_id\n node.node_seq_no = internal_node_seq_no\n node.x = x_coord_from\n node.y = y_coord_from\n node.agent_id = tmc_corridor_name\n\n self.g_node_vector.append(node)\n\n self.g_tmc_corridor_vector[tmc_corridor_name].node_no_vector.append(internal_node_seq_no)\n pt.node_no = node.node_seq_no\n self.g_tmc_corridor_vector[tmc_corridor_name].point_vector.append(pt)\n\n internal_node_seq_no += 1\n\n self.assignment.g_number_of_nodes += 1\n\n # if long_lat_string_to not in dictionary of long_lat_string_to_node_id_mapping\n if long_lat_string_to not in long_lat_string_to_node_id_mapping:\n pt = GDPoint()\n pt.x = x_coord_to\n pt.y = y_coord_to\n\n node_id = len(self.assignment.g_node_id_to_seq_no_map) + 1\n long_lat_string_to_node_id_mapping[long_lat_string_to] = node_id\n self.assignment.g_node_id_to_seq_no_map[node_id] = internal_node_seq_no\n node.node_id = node_id\n node.node_seq_no = internal_node_seq_no\n node.x = x_coord_to\n node.y = y_coord_to\n node.agent_id = tmc_corridor_name\n\n self.g_node_vector.append(node)\n\n self.g_tmc_corridor_vector[tmc_corridor_name].node_no_vector.append(internal_node_seq_no)\n pt.node_no = node.node_seq_no\n self.g_tmc_corridor_vector[tmc_corridor_name].point_vector.append(pt)\n self.assignment.g_number_of_nodes += 1\n internal_node_seq_no += 1\n\n if self.assignment.g_number_of_nodes % 1000 == 0:\n print(f\"reading {self.assignment.g_number_of_nodes} node...\")\n\n # read link data\n link_type_warning_count = 0\n length_in_km_warning = False\n\n for i in range(len(df_tmc_identification)):\n link = Link()\n\n # Prepare and Save Link data to g_LinkVector and assignment\n if \"link_type_name\" in tmc_identification_col:\n link_type_name = df_tmc_identification.loc[i, \"link_type_name\"]\n else:\n link_type_name = \"\"\n\n start_longitude = df_tmc_identification.loc[i, \"start_longitude\"]\n start_latitude = df_tmc_identification.loc[i, \"start_latitude\"]\n end_longitude = df_tmc_identification.loc[i, \"end_longitude\"]\n end_latitude = df_tmc_identification.loc[i, \"end_latitude\"]\n\n long_lat_string_from = f\"{str(start_longitude)}_{str(start_latitude)}\"\n long_lat_string_to = f\"{str(end_longitude)}_{str(end_latitude)}\"\n\n from_node_id = long_lat_string_to_node_id_mapping.get(long_lat_string_from, -1)\n to_node_id = long_lat_string_to_node_id_mapping.get(long_lat_string_to, -1)\n\n if from_node_id == -1 or to_node_id == -1:\n continue\n\n link_id = df_tmc_identification.loc[i, \"tmc\"]\n\n # add the to node id into the outbound (adjacent) node list\n if from_node_id not in self.assignment.g_node_id_to_seq_no_map:\n print(f\"Error: from_node_id {from_node_id} in file TMC_Identification.csv is not defined in node.csv\")\n\n if to_node_id not in self.assignment.g_node_id_to_seq_no_map:\n print(f\"Error: to_node_id {to_node_id} in file TMC_Identification.csv is not defined in node.csv\")\n\n internal_from_node_seq_no = self.assignment.g_node_id_to_seq_no_map[from_node_id]\n internal_to_node_seq_no = self.assignment.g_node_id_to_seq_no_map[to_node_id]\n\n link.from_node_seq_no = internal_from_node_seq_no\n link.to_node_seq_no = internal_to_node_seq_no\n link.link_seq_no = self.assignment.g_number_of_links\n link.link_id = link_id\n\n self.assignment.g_link_id_map[link.link_id] = 1\n\n link.tmc_code = link_id\n link.tmc_road_sequence = 1\n\n if tmc_corridor_name:\n link.tmc_corridor_name = tmc_corridor_name\n\n if \"road_order\" in tmc_identification_col:\n # \"corridor_link_sequence\"\n corridor_link_sequence = df_tmc_identification.loc[i, \"road_order\"]\n else:\n corridor_link_sequence = \"\"\n\n if corridor_link_sequence:\n link.tmc_road_sequence = corridor_link_sequence\n\n if link.tmc_corridor_name in self.g_tmc_corridor_vector:\n link.tmc_corridor_id = self.g_tmc_corridor_vector[link.tmc_corridor_name].tmc_corridor_id\n\n link_type = 2\n length = 1 # km or mile\n free_speed = 60\n cutoff_speed = 1.0\n\n # print(\"link_type\", link.link_type)\n if link.link_type in self.assignment.g_LinkTypeMap:\n k_jam = self.assignment.g_LinkTypeMap[link.link_type].k_jam\n else:\n k_jam = 300\n\n bwtt_speed = 12\n\n lane_capacity = 1800\n length = df_tmc_identification.loc[i, \"miles\"]\n\n link.free_flow_travel_time_in_min = length / free_speed * 60\n fftt_in_sec = link.free_flow_travel_time_in_min * 60\n\n link.length_in_meter = length * 1609.34\n link.link_distance_VDF = length\n link.link_distance_mile = length\n\n link.numbeer_of_lanes = 1\n link.lane_capacity = lane_capacity\n\n if link.link_type in self.assignment.g_LinkTypeMap:\n link.vdf_type = self.assignment.g_LinkTypeMap[link.link_type].vdf_type\n link.k_jam = k_jam\n\n VDF_field_name = []\n\n for i in range(MAX_TIME_INTERVAL_PER_DAY):\n link.model_speed[i] = free_speed\n link.est_volume_per_hour_per_lane[i] = 0\n link.est_avg_waiting_time_in_min[i] = 0\n link.est_queue_length_per_lane[i] = 0\n\n for tau in range(self.assignment.g_number_of_demand_periods):\n if link.link_type in self.assignment.g_LinkTypeMap:\n link.VDF_period[tau].vdf_type = self.assignment.g_LinkTypeMap[link.link_type].vdf_type\n link.VDF_period[tau].lane_based_ultimate_hourly_capacity = lane_capacity\n link.VDF_period[tau].num_lanes = 1\n\n link.VDF_period[tau].FFTT = link.link_distance_VDF / max(0.0001, link.free_speed) * 60\n link.v_congestion_cutoff = 0.7 * link.free_speed\n link.VDF_period[tau].vf = link.free_speed\n link.VDF_period[tau].v_congestion_cutoff = link.v_congestion_cutoff\n link.VDF_period[tau].aplha = 0.15\n link.VDF_period[tau].beta = 4\n link.VDF_period[tau].preload = 0\n\n link.VDF_period[tau].starting_time_in_hour = self.assignment.g_DemandPeriodVector[tau].starting_time_slot_no * MIN_PER_TIME_SLOT / 60.0\n link.VDF_period[tau].ending_time_in_hour = self.assignment.g_DemandPeriodVector[tau].ending_time_slot_no * MIN_PER_TIME_SLOT / 60.0\n link.VDF_period[tau].L = self.assignment.g_DemandPeriodVector[tau].time_period_in_hour\n link.VDF_period[tau].t2 = self.assignment.g_DemandPeriodVector[tau].t2_peak_in_hour\n link.VDF_period[tau].peak_load_factor = 1\n\n link.update_kc(free_speed)\n link.link_spatial_capacity = k_jam * 1 * link.link_distance_VDF\n link.link_distance_VDF = max(0.000001, link.link_distance_VDF)\n\n for tau in range(self.assignment.g_number_of_demand_periods):\n link.travel_time_per_period[tau] = link.link_distance_VDF / free_speed * 60\n\n self.g_node_vector[internal_from_node_seq_no].m_outgoing_link_seq_no_vector.append(\n link.link_seq_no)\n self.g_node_vector[internal_to_node_seq_no].m_incoming_link_seq_no_vector.append(\n link.link_seq_no)\n self.g_node_vector[internal_from_node_seq_no].m_to_node_seq_no_vector.append(link.to_node_seq_no)\n self.g_node_vector[internal_from_node_seq_no].m_to_node_2_link_seq_no_map[link.to_node_seq_no] = link.link_seq_no\n\n self.g_link_vector.append(link)\n self.assignment.g_number_of_links += 1\n\n if self.assignment.g_number_of_links % 10000 == 0:\n print(\"reading link %d\" % self.assignment.g_number_of_links)\n\n print(\"tmc_identification file loaded...\")\n\n @func_running_time\n def read_Readings_csv_file(self) -> None:\n print(\"Start reading Reading.csv file...\\n\")\n path_reading = path2linux(os.path.join(self.path_input_folder, \"Reading.csv\"))\n df_reading = pd.read_csv(path_reading)\n\n isColumnNameRequired = check_required_column_names_exist(\n list(required_input_file_dict[\"Reading.csv\"]),\n list(df_reading.columns)\n )\n\n if not isColumnNameRequired:\n raise Exception(\"Column name not required: please check Reading.csv\")\n\n # convert measurement_tstamp to datetime\n df_reading[\"measurement_tstamp\"] = pd.to_datetime(df_reading[\"measurement_tstamp\"])\n\n for i in range(len(df_reading)):\n tmc = df_reading.loc[i, \"tmc_code\"]\n\n # convert time stamp to string\n measurement_tstamp = str(df_reading.loc[i, \"measurement_tstamp\"])\n\n length_of_measurement_tstamp = len(measurement_tstamp)\n if length_of_measurement_tstamp == 0:\n continue\n\n if length_of_measurement_tstamp < 18:\n print(f\"reading data for measurement_tstamp = {measurement_tstamp}, len of the timestamp is {length_of_measurement_tstamp}. \\nPlease use stand ISO 8601 data and time to format. eg: 2022-05-23T22:00:23\")\n\n tmc_reference_speed = 0\n speed = -1\n bMatched_flag = False\n\n day_of_week_flag = 0\n day_of_year = 0\n global_time = g_measurement_tstamp_parser(measurement_tstamp, day_of_week_flag, day_of_year)\n\n speed = df_reading.loc[i, \"speed\"]\n reference_speed = df_reading.loc[i, \"reference_speed\"]\n\n volume_pl = df_reading.loc[i, \"volume_pl\"] if \"volume_pl\" in df_reading.columns else -1\n road_name = df_reading.loc[i, \"ROADNAME\"] if \"ROADNAME\" in df_reading.columns else \"\"\n\n if tmc not in self.assignment.m_TMClink_map:\n tmc_link = TMCLink()\n tmc_link.tmc_code = tmc\n self.assignment.m_TMClink_map[tmc] = len(self.g_TMC_vector)\n self.g_TMC_vector.append(tmc_link)\n\n index = self.assignment.m_TMClink_map[tmc]\n self.g_TMC_vector[index].add_speed_sensor_data(day_of_year, global_time, speed, volume_pl)\n print(\"Reading.csv loaded...\\n\")\n\n @func_running_time\n def generate_node_csv(self, isSave2csv=True) -> pd.DataFrame:\n\n # generate Node.csv\n node_result_list = []\n\n for corridor_name in self.g_tmc_corridor_vector:\n tmc_corridor_info = self.g_tmc_corridor_vector[corridor_name]\n tmc_corridor_info.find_center_and_origin()\n\n if len(tmc_corridor_info.point_vector) <= 5:\n continue\n\n for k in range(len(tmc_corridor_info.point_vector)):\n i = tmc_corridor_info.point_vector[k].node_no\n if self.g_node_vector[i].node_id > 0:\n node_result_list.append([\n self.g_node_vector[i].node_id,\n self.g_node_vector[i].node_seq_no,\n self.g_node_vector[i].layer_no,\n self.g_node_vector[i].agent_id,\n k,\n tmc_corridor_info.point_vector[k].distance_from_origin,\n self.g_node_vector[i].MRM_gate_flag,\n self.g_node_vector[i].node_type,\n self.g_node_vector[i].is_boundary,\n len(self.g_node_vector[i].m_outgoing_link_seq_no_vector),\n self.g_node_vector[i].is_activity_node,\n self.g_node_vector[i].agent_type_str,\n\n self.g_node_vector[i].zone_org_id,\n self.g_node_vector[i].cell_str,\n 0,\n self.g_node_vector[i].x,\n self.g_node_vector[i].y\n ])\n\n df_node = pd.DataFrame(node_result_list, columns=self.node_col_name)\n if isSave2csv:\n df_node.to_csv(validate_filename(generate_absolute_path(file_name=\"Node.csv\")), index=False)\n print(\"Successfully saved Node.csv to cbi_results/Node.csv \\n\")\n\n return df_node\n\n @func_running_time\n def generate_link_csv(self, isSave2csv=True) -> pd.DataFrame:\n\n # Generate Link.csv\n link_result_list = []\n\n for i in range(len(self.g_link_vector)):\n if self.g_link_vector[i].link_type <= -100:\n continue\n\n if len(self.g_link_vector[i].geometry) > 0:\n geometry_str = self.g_link_vector[i].geometry\n else:\n geometry_str = f\"LINESTRING({self.g_node_vector[self.g_link_vector[i].from_node_seq_no].x} {self.g_node_vector[self.g_link_vector[i].from_node_seq_no].y}, {self.g_node_vector[self.g_link_vector[i].to_node_seq_no].x} {self.g_node_vector[self.g_link_vector[i].to_node_seq_no].y})\"\n\n link_result_list.append([\n self.g_link_vector[i].link_id,\n self.g_link_vector[i].link_seq_no,\n self.g_link_vector[i].layer_no,\n self.g_node_vector[self.g_link_vector[i].from_node_seq_no].node_id,\n self.g_node_vector[self.g_link_vector[i].to_node_seq_no].node_id,\n self.g_node_vector[self.g_link_vector[i].from_node_seq_no].MRM_gate_flag,\n self.g_node_vector[self.g_link_vector[i].to_node_seq_no].MRM_gate_flag,\n self.g_link_vector[i].link_type,\n self.g_link_vector[i].link_type_name,\n self.g_link_vector[i].number_of_lanes,\n self.g_link_vector[i].link_distance_VDF,\n self.g_link_vector[i].free_speed,\n self.g_link_vector[i].v_congestion_cutoff,\n self.g_link_vector[i].free_flow_travel_time_in_min,\n self.g_link_vector[i].lane_capacity,\n # g_link_vector[i].VDF_period[0].allowed_uses,\n \"all\",\n self.g_link_vector[i].VDF_period[0].peak_load_factor,\n self.g_link_vector[i].VDF_period[0].alpha,\n self.g_link_vector[i].VDF_period[0].beta,\n\n self.g_link_vector[i].VDF_period[0].peak_load_factor,\n self.g_link_vector[i].VDF_period[0].Q_alpha,\n self.g_link_vector[i].VDF_period[0].Q_beta,\n self.g_link_vector[i].VDF_period[0].Q_cd,\n self.g_link_vector[i].VDF_period[0].Q_n,\n geometry_str\n ])\n\n df_link = pd.DataFrame(link_result_list, columns=self.link_col_name)\n\n if isSave2csv:\n df_link.to_csv(validate_filename(generate_absolute_path(file_name=\"Link.csv\")), index=False)\n print(\"Successfully saved Link.csv to cbi_results/Link.csv \\n\")\n\n return df_link\n\n @func_running_time\n def generate_cbi_summary_csv(self, isSave2csv=True) -> pd.DataFrame:\n # initialize the empty list to store cbi summary result\n link_cbi_summary_result_list = []\n\n # initialize the empty dictionary\n TMC_long_id_mapping = {}\n\n # ignored the column name from source code, generate data directly\n\n # sort data records\n for i in range(len(self.g_link_vector)):\n if len(self.g_link_vector[i].tmc_code) > 0:\n TMC_long_key = (self.g_link_vector[i].tmc_corridor_id * 10000 +\n self.g_link_vector[i].tmc_road_sequence) * 10 + self.g_link_vector[i].link_seq_no\n\n TMC_long_id_mapping[TMC_long_key] = self.g_link_vector[i].link_seq_no\n\n for tmc_long_key in TMC_long_id_mapping:\n i = TMC_long_id_mapping[tmc_long_key]\n\n highest_speed = self.g_link_vector[i].free_speed\n\n if self.g_link_vector[i].tmc_code in self.assignment.m_TMClink_map:\n tmc_index = self.assignment.m_TMClink_map[self.g_link_vector[i].tmc_code]\n\n if not self.g_TMC_vector[tmc_index].b_with_sensor_speed_data:\n continue\n\n highest_speed = self.g_TMC_vector[tmc_index].get_highest_speed()\n else:\n continue\n\n free_speed = self.g_link_vector[i].free_speed\n\n if self.g_link_vector[i].lane_capacity > 5000:\n continue\n\n self.g_link_vector[i].update_kc(self.g_link_vector[i].free_speed)\n\n cbi_result_1 = [\n self.g_link_vector[i].link_id,\n self.g_link_vector[i].tmc_code,\n self.g_link_vector[i].tmc_corridor_name,\n self.g_link_vector[i].tmc_corridor_id,\n self.g_link_vector[i].tmc_road_order,\n self.g_link_vector[i].tmc_road_sequence,\n self.g_link_vector[i].tmc_road,\n self.g_link_vector[i].tmc_direction,\n self.g_link_vector[i].tmc_intersection,\n highest_speed,\n self.g_link_vector[i].link_seq_no,\n self.g_node_vector[self.g_link_vector[i].from_node_seq_no].node_id,\n self.g_node_vector[self.g_link_vector[i].to_node_seq_no].node_id,\n self.g_link_vector[i].link_type,\n self.g_link_vector[i].link_type_code,\n self.g_link_vector[i].FT,\n self.g_link_vector[i].AT,\n self.g_link_vector[i].vdf_code,\n self.g_link_vector[i].number_of_lanes,\n self.g_link_vector[i].link_distance_VDF,\n self.g_link_vector[i].free_speed,\n self.g_link_vector[i].lane_capacity,\n self.g_link_vector[i].k_critical,\n self.g_link_vector[i].v_congestion_cutoff\n ]\n\n if self.g_link_vector[i].tmc_code in self.assignment.m_TMClink_map:\n tmc_index = self.assignment.m_TMClink_map[self.g_link_vector[i].tmc_code]\n highest_speed = 0\n\n p_link = self.g_link_vector[i]\n\n updated_vc = self.g_TMC_vector[tmc_index].scan_highest_speed_and_vc(\n p_link.free_speed, highest_speed)\n\n for time_index in range(MAX_TIME_INTERVAL_PER_DAY):\n p_link.model_speed[time_index] = p_link.free_speed\n\n p_link.v_congestion_cutoff = updated_vc\n p_link.update_kc(free_speed)\n\n cbi_result_2 = [\n highest_speed,\n p_link.v_congestion_cutoff,\n p_link.v_congestion_cutoff / max(1, highest_speed),\n p_link.v_critical\n ]\n\n # generate 0-24 hours list\n analysis_hour_flag = [0] * 25\n\n cbi_result_3 = []\n for tau in range(min(3, len(self.assignment.g_DemandPeriodVector))):\n assign_period_start_time_in_hour = self.assignment.g_DemandPeriodVector[\n tau].starting_time_slot_no * MIN_PER_TIME_SLOT / 60.0\n assign_period_end_time_in_hour = self.assignment.g_DemandPeriodVector[\n tau].ending_time_slot_no * MIN_PER_TIME_SLOT / 60.0\n assign_period_t2_peak_in_hour = self.assignment.g_DemandPeriodVector[\n tau].t2_peak_in_hour\n\n for hour in range(int(assign_period_start_time_in_hour), int(assign_period_end_time_in_hour)):\n analysis_hour_flag[hour] = 1\n\n obs_t0_in_hour = 0\n obs_t3_in_hour = 0\n obs_P = 0\n\n V = D = VOC_ratio = DOC_ratio = 0\n\n peak_hour_volume = 0\n mean_speed_BPR = 0\n mean_speed_QVDF = 0\n t2_speed = 0\n\n plf = 1\n Q_n = Q_s = Q_cd = Q_cp = 1\n outside_time_margin_in_hour = 1\n\n obs_P = self.g_TMC_vector[tmc_index].scan_congestion_duration(\n tau,\n assign_period_start_time_in_hour,\n assign_period_end_time_in_hour,\n outside_time_margin_in_hour,\n assign_period_t2_peak_in_hour,\n p_link.v_congestion_cutoff,\n p_link,\n obs_t0_in_hour,\n obs_t3_in_hour,\n obs_P,\n V,\n peak_hour_volume,\n D,\n VOC_ratio,\n DOC_ratio,\n mean_speed_BPR,\n mean_speed_QVDF,\n highest_speed,\n t2_speed,\n plf,\n #Q_n, Q_s, Q_cd, Q_cp\n )\n\n Q_alpha = 8 / 15 * Q_cp * math.pow(Q_cd, Q_s)\n Q_beta = Q_n * Q_s\n\n Q_alpha = self.g_TMC_vector[tmc_index].check_feasible_range(\n Q_alpha, 0.27, 0.01, 1)\n Q_beta = self.g_TMC_vector[tmc_index].check_feasible_range(\n Q_beta, 1.14, 0.5, 5)\n\n if p_link.link_id == \"201065AB\":\n idebug = 1\n\n p_link.VDF_period[tau].sa_volume = V * p_link.number_of_lanes\n p_link.VDF_period[tau].peak_load_factor = plf\n p_link.VDF_period[tau].v_congestion_cutoff = p_link.v_congestion_cutoff\n p_link.VDF_period[tau].Q_alpha = Q_alpha\n p_link.VDF_period[tau].Q_beta = Q_beta\n p_link.VDF_period[tau].Q_cd = Q_cd\n p_link.VDF_period[tau].Q_n = Q_n\n p_link.VDF_period[tau].Q_cp = Q_cp\n p_link.VDF_period[tau].Q_s = Q_s\n\n p_link.free_speed = highest_speed\n p_link.VDF_period[tau].vf = highest_speed\n\n p_link.calculate_dynamic_VDFunction(self.assignment, 0, False, p_link.vdf_type)\n\n speed_reduction_factor = 0\n if (obs_P > 0.25) and (t2_speed < p_link.v_congestion_cutoff):\n speed_reduction_factor = p_link.v_congestion_cutoff / \\\n max(1, t2_speed) - 1\n\n queue_vc_delay_index = p_link.v_congestion_cutoff / \\\n max(1, mean_speed_QVDF) - 1\n if queue_vc_delay_index < 0:\n queue_vc_delay_index = 0\n\n BPR_vf_delay_index = max(highest_speed, float(\n p_link.free_speed)) / max(1, mean_speed_BPR) - 1\n if BPR_vf_delay_index < 0:\n BPR_vf_delay_index = 0\n\n log_VOC = log_DOC = log_P = log_sf = log_vfdi = log_vcdi = 0\n if DOC_ratio > 0.00001:\n log_DOC = math.log(DOC_ratio)\n log_VOC = math.log(VOC_ratio)\n\n if obs_P > 0.00001:\n log_P = math.log(obs_P)\n\n if speed_reduction_factor > 0.00001:\n log_sf = math.log(speed_reduction_factor)\n\n if BPR_vf_delay_index > 0.00001:\n log_vfdi = math.log(BPR_vf_delay_index)\n\n if queue_vc_delay_index > 0.00001:\n log_vcdi = math.log(queue_vc_delay_index)\n\n cbi_result_3.extend([\n obs_t0_in_hour,\n obs_t3_in_hour,\n V,\n peak_hour_volume,\n D,\n VOC_ratio,\n DOC_ratio,\n obs_P,\n speed_reduction_factor,\n BPR_vf_delay_index,\n queue_vc_delay_index,\n mean_speed_BPR,\n mean_speed_QVDF,\n t2_speed,\n plf,\n Q_n,\n Q_cp,\n Q_alpha,\n Q_beta,\n self.g_link_vector[i].VDF_period[tau].link_volume,\n self.g_link_vector[i].VDF_period[tau].lane_based_D,\n self.g_link_vector[i].VDF_period[tau].DOC,\n self.g_link_vector[i].VDF_period[tau].P,\n self.g_link_vector[i].VDF_period[tau].avg_queue_speed,\n self.g_link_vector[i].VDF_period[tau].vt2,\n self.g_link_vector[i].VDF_period[tau].Q_mu,\n self.g_link_vector[i].VDF_period[tau].Q_gamma,\n self.g_link_vector[i].VDF_period[tau].lane_based_Vph,\n self.g_link_vector[i].VDF_period[tau].VOC,\n self.g_link_vector[i].VDF_period[tau].avg_speed_BPR,\n ])\n cbi_result_3.extend(\n [str(self.g_link_vector[i].geometry),\n f\"LINESTRING({self.g_link_vector[i].TMC_from.x} {self.g_link_vector[i].TMC_from.y},{self.g_link_vector[i].TMC_to.x} {self.g_link_vector[i].TMC_to.x})\"]\n )\n\n if self.g_link_vector[i].tmc_code in self.assignment.m_TMClink_map:\n tmc_index = self.assignment.m_TMClink_map[self.g_link_vector[i].tmc_code]\n\n count_total = 0\n MAE_total = 0\n MAPE_total = 0\n RMSE_total = 0\n\n ObsSpeed = [0] * 25\n EstSpeed = [0] * 25\n EstSpeedDiff = [0] * 25\n\n for t in range(6 * 60, 20 * 60, 60):\n hour = int(t / 60)\n\n # return data from index 6 to 20\n ObsSpeed[hour] = self.g_TMC_vector[tmc_index].get_avg_hourly_speed(t)\n\n # return data from index 6 to 20\n model_speed = self.g_link_vector[i].get_model_hourly_speed(t)\n EstSpeed[hour] = model_speed\n\n if EstSpeed[hour] > 1 and ObsSpeed[hour] > 1 and analysis_hour_flag[hour] == 1:\n EstSpeedDiff.append(\n math.fabs(EstSpeed[hour] - ObsSpeed[hour]))\n MAE_total += math.fabs(EstSpeedDiff[hour])\n MAPE_total += math.fabs(\n EstSpeedDiff[hour] / max(1, ObsSpeed[hour]))\n RMSE_total += EstSpeedDiff[hour] * \\\n EstSpeedDiff[hour]\n count_total += 1\n else:\n EstSpeedDiff.append(0)\n\n MSE_total = RMSE_total / max(1, count_total)\n\n cbi_result_4 = ObsSpeed[6:20] + EstSpeed[6:20] + [\n MAE_total / max(1, count_total),\n MAPE_total / max(1, count_total) * 100,\n math.pow(MSE_total, 0.5)\n ]\n\n cbi_result_5_volume = [self.g_TMC_vector[tmc_index].get_avg_hourly_volume(\n t) for t in range(6 * 60, 20 * 60, 60)]\n\n cbi_result_6_speed_ratio = []\n for t in range(6 * 60, 20 * 60, 60):\n hour = int(t / 60)\n speed_ratio = ObsSpeed[hour] / max(1, self.g_link_vector[i].TMC_highest_speed)\n if speed_ratio > 1:\n speed_ratio = 1\n cbi_result_6_speed_ratio.append(speed_ratio)\n\n cbi_result_7 = [self.g_TMC_vector[tmc_index].get_avg_speed(\n t) for t in range(6 * 60, 20 * 60, 5)]\n\n cbi_result_8 = [self.g_link_vector[i].get_model_5_min_speed(\n t) for t in range(6 * 60, 20 * 60, 5)]\n cbi_result_9 = [self.g_TMC_vector[tmc_index].get_avg_speed(\n t) for t in range(6 * 60, 20 * 60, 15)]\n cbi_result_10 = [self.g_link_vector[i].get_model_15_min_speed(\n t) for t in range(6 * 60, 20 * 60, 15)]\n\n cbi_result_11 = []\n for t in range(6 * 60, 20 * 60, 5):\n speed = self.g_TMC_vector[tmc_index].get_avg_speed(t)\n volume = self.g_TMC_vector[tmc_index].get_avg_volume(\n t, self.g_link_vector[i], speed, self.g_link_vector[i].TMC_highest_speed)\n cbi_result_11.append(volume * 12)\n\n link_cbi_summary_result_list.append(\n cbi_result_1 + cbi_result_2 + cbi_result_3 + cbi_result_4 + cbi_result_5_volume + cbi_result_6_speed_ratio + cbi_result_7 + cbi_result_8 + cbi_result_9 + cbi_result_10 + cbi_result_11)\n else:\n link_cbi_summary_result_list.append(cbi_result_1 + cbi_result_2 + cbi_result_3)\n else:\n link_cbi_summary_result_list.append(cbi_result_1)\n\n df_cbi_summary = pd.DataFrame(link_cbi_summary_result_list, columns=self.cbi_summary_col_name)\n\n if isSave2csv:\n df_cbi_summary.to_csv(validate_filename(generate_absolute_path(file_name=\"cbi_summary.csv\")), index=False)\n print(\"Successfully saved Link.csv to cbi_results/cbi_summary.csv \\n\")\n\n\n return df_cbi_summary\n\n @func_running_time\n def generate_link_qvdf_csv(self, isSave2csv=True) -> pd.DataFrame:\n # initialize the empty list to store cbi summary result\n qvdf_result_list = []\n\n # initialize the empty dictionary\n TMC_long_id_mapping = {}\n\n # ignored the column name from source code, generate data directly\n # sort data records\n for i in range(len(self.g_link_vector)):\n if len(self.g_link_vector[i].tmc_code) > 0:\n TMC_long_key = (self.g_link_vector[i].tmc_corridor_id * 10000 +\n self.g_link_vector[i].tmc_road_sequence) * 10 + self.g_link_vector[i].link_seq_no\n\n TMC_long_id_mapping[TMC_long_key] = self.g_link_vector[i].link_seq_no\n\n for tmc_long_key in TMC_long_id_mapping:\n i = TMC_long_id_mapping[tmc_long_key]\n\n highest_speed = self.g_link_vector[i].free_speed\n\n if self.g_link_vector[i].tmc_code in self.assignment.m_TMClink_map:\n tmc_index = self.assignment.m_TMClink_map[self.g_link_vector[i].tmc_code]\n\n if not self.g_TMC_vector[tmc_index].b_with_sensor_speed_data:\n continue\n\n highest_speed = self.g_TMC_vector[tmc_index].get_highest_speed(\n )\n else:\n continue\n\n free_speed = self.g_link_vector[i].free_speed\n\n if self.g_link_vector[i].lane_capacity > 5000:\n continue\n\n self.g_link_vector[i].update_kc(self.g_link_vector[i].free_speed)\n\n qvdf_result_1 = [\n \"link\",\n self.g_link_vector[i].link_id,\n self.g_link_vector[i].tmc_corridor_name,\n self.g_node_vector[self.g_link_vector[i].from_node_seq_no].node_id,\n self.g_node_vector[self.g_link_vector[i].to_node_seq_no].node_id,\n self.g_link_vector[i].vdf_code\n ]\n\n if self.g_link_vector[i].tmc_code in self.assignment.m_TMClink_map:\n tmc_index = self.assignment.m_TMClink_map[self.g_link_vector[i].tmc_code]\n highest_speed = 0\n\n p_link = self.g_link_vector[i]\n\n updated_vc = self.g_TMC_vector[tmc_index].scan_highest_speed_and_vc(\n p_link.free_speed, highest_speed)\n\n for time_index in range(MAX_TIME_INTERVAL_PER_DAY):\n p_link.model_speed[time_index] = p_link.free_speed\n\n p_link.v_congestion_cutoff = updated_vc\n p_link.update_kc(free_speed)\n\n\n qvdf_result_2 = []\n for tau in range(min(3, len(self.assignment.g_DemandPeriodVector))):\n assign_period_start_time_in_hour = self.assignment.g_DemandPeriodVector[\n tau].starting_time_slot_no * MIN_PER_TIME_SLOT / 60.0\n assign_period_end_time_in_hour = self.assignment.g_DemandPeriodVector[\n tau].ending_time_slot_no * MIN_PER_TIME_SLOT / 60.0\n assign_period_t2_peak_in_hour = self.assignment.g_DemandPeriodVector[\n tau].t2_peak_in_hour\n\n obs_t0_in_hour = 0\n obs_t3_in_hour = 0\n obs_P = 0\n\n V = D = VOC_ratio = DOC_ratio = 0\n\n peak_hour_volume = 0\n mean_speed_BPR = 0\n mean_speed_QVDF = 0\n t2_speed = 0\n\n plf = 1\n Q_n = Q_s = Q_cd = Q_cp = 1\n outside_time_margin_in_hour = 1\n\n obs_P = self.g_TMC_vector[tmc_index].scan_congestion_duration(\n tau,\n assign_period_start_time_in_hour,\n assign_period_end_time_in_hour,\n outside_time_margin_in_hour,\n assign_period_t2_peak_in_hour,\n p_link.v_congestion_cutoff,\n p_link,\n obs_t0_in_hour,\n obs_t3_in_hour,\n obs_P,\n V,\n peak_hour_volume,\n D,\n VOC_ratio,\n DOC_ratio,\n mean_speed_BPR,\n mean_speed_QVDF,\n highest_speed,\n t2_speed,\n plf,\n #Q_n, Q_s, Q_cd, Q_cp\n )\n\n Q_alpha = 8 / 15 * Q_cp * math.pow(Q_cd, Q_s)\n Q_beta = Q_n * Q_s\n\n Q_alpha = self.g_TMC_vector[tmc_index].check_feasible_range(\n Q_alpha, 0.27, 0.01, 1)\n Q_beta = self.g_TMC_vector[tmc_index].check_feasible_range(\n Q_beta, 1.14, 0.5, 5)\n\n p_link.VDF_period[tau].Q_alpha = Q_alpha\n p_link.VDF_period[tau].Q_beta = Q_beta\n p_link.VDF_period[tau].Q_cd = Q_cd\n p_link.VDF_period[tau].Q_n = Q_n\n p_link.VDF_period[tau].Q_cp = Q_cp\n p_link.VDF_period[tau].Q_s = Q_s\n\n if len(p_link.vdf_code) > 1:\n self.g_vdf_type_map[p_link.vdf_code].record_qvdf_data(p_link.VDF_period[tau], tau)\n\n # self.g_vdf_type_map[\"all\"].record_qvdf_data(p_link.VDF_period[tau], tau)\n\n qvdf_result_2.extend([\n plf, Q_n, Q_s, Q_cp, Q_cd, Q_alpha, Q_beta\n ])\n\n qvdf_result_list.append(qvdf_result_1 + qvdf_result_2)\n\n else:\n qvdf_result_list.append(qvdf_result_1)\n\n for key in self.g_vdf_type_map:\n qvdf_result_3 = []\n for tau in range(min(3, len(self.assignment.g_DemandPeriodVector))):\n self.g_vdf_type_map[key].computer_avg_parameter(tau)\n qvdf_result_3.extend([\n self.g_vdf_type_map[key].VDF_period_sum[tau].peak_load_factor,\n self.g_vdf_type_map[key].VDF_period_sum[tau].Q_n,\n self.g_vdf_type_map[key].VDF_period_sum[tau].Q_s,\n self.g_vdf_type_map[key].VDF_period_sum[tau].Q_cp,\n self.g_vdf_type_map[key].VDF_period_sum[tau].Q_cd,\n self.g_vdf_type_map[key].VDF_period_sum[tau].Q_alpha,\n self.g_vdf_type_map[key].VDF_period_sum[tau].Q_beta\n ])\n\n qvdf_result_list.append(\n [\"vdf_code\",\"\",\"\",\"\",\"\", key] + qvdf_result_3\n )\n\n df_qvdf = pd.DataFrame(qvdf_result_list, columns = self.link_qvdf_col_name)\n\n if isSave2csv:\n df_qvdf.to_csv(validate_filename(generate_absolute_path(file_name=\"link_qvdf.csv\")), index=False)\n print(\"Successfully saved Link.csv to cbi_results/link_qvdf.csv \\n\")\n\n return df_qvdf\n\n def cbi_execution(self):\n\n print(\"Step 1: read settings.yaml data\\n\")\n self.read_settings_yaml_file()\n\n print(\"Step 2: read tmc_identification.csv data\\n\")\n self.read_tmc_identification_csv_file()\n\n print(\"Step 3: read Reading.csv data\\n\")\n self.read_Readings_csv_file()\n\n print(\"Step 4: generate node.csv data\\n\")\n self.generate_node_csv(isSave2csv=True)\n\n print(\"Step 5: generate link.csv data\\n\")\n self.generate_link_csv(isSave2csv=True)\n\n print(\"Step 6: generate cbi_summary.csv data\\n\")\n self.generate_cbi_summary_csv(isSave2csv=True)\n\n print(\"Step 7: generate link_qvdf.csv data\\n\")\n self.generate_link_qvdf_csv(isSave2csv=True)\n\n\nif __name__ == \"__main__\":\n # define the path of input folder\n path_input_folder = \"./data_input\"\n\n # create an instance of CBI_TOOL\n cbi = CBI_TOOL(path_input_folder)\n\n # execute the CBI_TOOL\n cbi.cbi_execution()\n","repo_name":"asu-trans-ai-lab/TMC2GMNS","sub_path":"src/python/cbi_main.py","file_name":"cbi_main.py","file_ext":"py","file_size_in_byte":51207,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"37941954653","text":"import urllib.parse\nimport csv\nimport datetime\nfrom sqlalchemy import create_engine, Column, Integer, String, func\nfrom sqlalchemy.orm import sessionmaker, Session\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom ecnuopenapi.oauth_init import OAuth2Config\nfrom ecnuopenapi.model import HttpGet\nfrom ecnuopenapi.api import getRows, getAllRows\nfrom ecnuopenapi.parse import ParseRowsToCSV, UnmarshalRows,map_dict_to_dataclass\nfrom dateutil.parser import parse\n\nMAX_PAGE_SIZE = 2000\n\nclass APIConfig:\n def __init__(self, APIPath, PageSize=0, BatchSize=0, UpdatedAtField=None):\n self.APIPath = APIPath\n self.PageSize = PageSize\n self.BatchSize = BatchSize\n self.UpdatedAtField = UpdatedAtField\n self.params = {}\n\n def SetDefault(self):\n if self.PageSize == 0:\n self.PageSize = 2000\n if self.PageSize > MAX_PAGE_SIZE:\n self.PageSize = MAX_PAGE_SIZE\n if self.BatchSize == 0:\n self.BatchSize = 100\n if not self.UpdatedAtField:\n self.UpdatedAtField = \"updated_at\"\n\n def AddParam(self, key, value):\n self.params[key] = value\n\n def SetParam(self, key, value):\n self.params[key] = value\n\n def DelParam(self, key):\n if key in self.params:\n del self.params[key]\n \n def SetParamsToApiPath(self):\n if '?' in self.APIPath:\n for (key, value) in self.params.items():\n return self.APIPath + \"&\" + key + \"=\" + value\n else:\n apiPath = self.APIPath + \"?\"\n for (key, value) in self.params.items():\n apiPath = apiPath + key + \"=\" + str(value) + \"&\"\n return apiPath[0:-1] # 去掉多余的那个&符号\n\ndef SyncToCsv(csvFileName, api):\n api.SetDefault()\n apiPath = api.SetParamsToApiPath()\n rows, err = getAllRows(apiPath, api.PageSize)\n if err:\n return 0, err\n err = ParseRowsToCSV(rows, csvFileName)\n if err != None:\n return 0, err\n return len(rows), None\n\ndef SyncToModel(dataModel, api):\n api.SetDefault()\n apiPath = api.SetParamsToApiPath()\n # getAllRows\n rows, err = getAllRows(apiPath, api.PageSize)\n if err:\n return None, err\n data, err = UnmarshalRows(rows, dataModel)\n if err != None:\n return None, err\n return data, None\n\ndef SyncToDB(db, api, dataModel):\n api.SetDefault()\n apiPath = api.SetParamsToApiPath()\n\n try:\n # 创建数据库表\n dataModel.metadata.create_all(db)\n except Exception as e:\n return 0, len(data), Exception(str(e))\n\n try:\n # 创建Session\n Session = sessionmaker(bind=db)\n # 创建Session实例\n session = Session()\n except Exception as e:\n return 0, len(data), Exception(str(e))\n\n inserted_data_count = 0 # 用于跟踪成功插入的数据条数\n\n pageNum = 1\n totalRecordsNum = 0\n while True:\n data, err = getRows(apiPath, pageNum, api.PageSize)\n if err != None:\n return 0, len(data.Rows), err\n if data.Rows == None or len(data.Rows) == 0:\n break\n # 往数据库中同步\n try:\n for row in data.Rows:\n data_object = map_dict_to_dataclass(row, dataModel)\n session.add(dataModel(**data_object.__dict__))\n # 批量插入数据\n if len(session.new) % api.BatchSize == 0:\n session.flush()\n inserted_data_count += api.BatchSize\n # 刷入剩下的数据\n session.commit()\n # 没有抛出异常,则插入成功,插入条数则为列表长度\n inserted_data_count = len(data.Rows)\n pageNum += 1\n totalRecordsNum += len(data.Rows)\n except Exception as e:\n # 如果有重复数据,你可以选择忽略或更新现有数据\n session.rollback() # 出现问题回滚\n return 0, totalRecordsNum, Exception(str(e))\n \n session.close()\n return inserted_data_count, totalRecordsNum, None\n\ndef GetLastUpdatedTS(db, api, dataModel):\n api.SetDefault()\n session = Session(db, future=True)\n updatedAt = api.UpdatedAtField\n max_updated_at = session.query(func.max(getattr(dataModel, updatedAt))).scalar()\n\n if not max_updated_at:\n return 0\n print(type(max_updated_at))\n print(max_updated_at)\n if type(max_updated_at) == str:\n return int(parse(max_updated_at).timestamp())\n return int(max_updated_at.timestamp())\n","repo_name":"ECNU/ecnu-openapi-sdk-python","sub_path":"ecnuopenapi/sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"9063045391","text":"import logging\n\nfrom iptcinfo3 import IPTCInfo\n\n\ndef get_metadata(file_path):\n \"\"\"\n Get EXIF metadata for a file.\n :param file_path: full path to the file\n :return: iptcinfo3.IPTCInfo object\n \"\"\"\n\n logger = logging.getLogger(__name__)\n\n info = IPTCInfo(file_path)\n logger.debug(\"File {} tags: {}\".format(file_path, info))\n\n return info\n\n\ndef get_keywords(file_path):\n \"\"\"\n Get EXIF keywords for a file.\n :param file_path: full path to the file\n :return: list of keywords\n \"\"\"\n\n logger = logging.getLogger(__name__)\n\n metadata = get_metadata(file_path)\n\n try:\n file_keywords = metadata['keywords']\n logger.debug(\"File {} has keywords: {}\".\n format(file_path, file_keywords))\n except KeyError:\n logger.debug(\"File {} does not contain keyword metadata\".\n format(file_path))\n return []\n\n if not isinstance(file_keywords, list):\n file_keywords = [file_keywords]\n\n file_keywords = list(map(lambda x: x.decode(), file_keywords))\n\n return file_keywords\n\n\ndef check_keywords(file_path, keywords):\n \"\"\"\n Check if file has contains specified EXIF keywords.\n :param file_path: full path to the file\n :param keywords: list of keywords\n :return: true if the file contains a keyword from the list\n \"\"\"\n\n logger = logging.getLogger(__name__)\n\n for keyword in keywords:\n if keyword in get_keywords(file_path):\n logger.debug(\"File {} contains the '{}' keyword\".\n format(file_path, keyword))\n return True\n\n return False\n","repo_name":"vladak/photo-backup","sub_path":"photo_backup/photoutil.py","file_name":"photoutil.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"4074523834","text":"\"\"\"\nA convenience script to playback random demonstrations from\na set of demonstrations stored in a hdf5 file.\n\nExample:\n $ python playback_demonstrations_from_hdf5.py --folder ../models/assets/demonstrations/SawyerPickPlace/\n\"\"\"\nimport os\nimport h5py\nimport argparse\nimport random\nimport numpy as np\nimport time\nimport pickle \n\nimport pathlib \nimport torch \n\n# from get_config import *\n# from args_parser import arg_parser\n \nfrom colorama import init\nfrom termcolor import cprint, colored\ninit(autoreset=True)\nprint_color = \"yellow\"\n\ndef sort():\n \n parser = argparse.ArgumentParser()\n parser.add_argument('--env_id', type=int, default=21, help='Id of of the environment to run')\n parser.add_argument('--robo_task', action=\"store\", default=\"reach\", choices=[\"reach\", \"grasp\", \"full\"], help='task') \n args = parser.parse_args()\n \n env_dict = {\n ## Robosuite\n 21 : \"SawyerNutAssemblyRound\",\n 22 : \"SawyerNutAssemblySquare\",\n 23 : \"SawyerNutAssembly\",\n 24 : \"SawyerPickPlaceBread\",\n 25 : \"SawyerPickPlaceCan\",\n 26 : \"SawyerPickPlaceCereal\",\n 27 : \"SawyerPickPlaceMilk\",\n 28 : \"SawyerPickPlace\",\n }\n args.env_name = env_dict[args.env_id]\n \n traj_name = \"traj_roboturk\"\n demo_path = \"%s/Documents/Git/imitation_data/TRAJ_robo/%s\" % (pathlib.Path.home(), args.env_name)\n if args.robo_task != \"full\":\n demo_path += \"_%s\" % args.robo_task \n \n real_traj_tensor_list = [] # legnth equal to num_worker\n real_mask_tensor_list = [] # legnth equal to num_worker\n real_time_tensor_list = [] # legnth equal to num_worker\n real_worker_tensor_list = [] ## legnth equal to num_worker indices to worker ID, i.e., i_noise\n\n return_list = []\n traj_len_list = []\n\n total_size = 0 ## number of (s,a) pairs \n\n total_traj = len(os.listdir(demo_path)) - 1\n for demo_i in range(1, total_traj + 1):\n if args.robo_task != \"full\":\n traj_filename = demo_path + (\"/%s_%s_demo%d.p\" % (args.env_name, args.robo_task, demo_i))\n else:\n traj_filename = demo_path + (\"/%s_demo%d.p\" % (args.env_name, demo_i))\n\n real_traj_list, real_mask_list, real_reward_list = pickle.load(open(traj_filename, \"rb\"))\n ## The loaded list is actually a 2 dimensional lsit (list of list) which contains trajecctories of the same demonstrator.\n ## For roboturk dataset, (we assume) 1 demonstrator collect 1 trajectory, so the first dim is size 1.\n ## So we have [0] indexing below \n\n traj_len = len(real_mask_list[0]) \n total_size += traj_len \n\n real_traj_tensor_list += [ torch.FloatTensor(real_traj_list[0]) ] # real_traj_list is a list of state-action pairs\n real_mask_tensor_list += [ torch.FloatTensor(real_mask_list[0]) ]\n real_time_tensor_list += [ torch.FloatTensor( np.arange(1, traj_len+1) ) ]\n real_worker_tensor_list += [ torch.LongTensor(traj_len).fill_(demo_i) ]\n\n return_list += [np.sum(np.asarray( real_reward_list[0] ))] ## these were computed with shaped reward\n\n traj_len_list += [traj_len] \n\n\n traj_len_list = np.asarray(traj_len_list)\n\n rwd_ratio = return_list / traj_len_list \n # sort_index = np.argsort(rwd_ratio)[::-1]\n\n sort_index = np.argsort(traj_len_list) \n # filename = demo_path + ('../../%s/%s_sort.txt' % (args.env_name, args.env_name))\n \n if args.robo_task != \"full\":\n filename = demo_path + ('/%s_%s_sort.txt' % (args.env_name, args.robo_task))\n else:\n filename = demo_path + ('/%s_sort.txt' % (args.env_name))\n \n open(filename, 'w').close()\n with open(filename, 'a') as f:\n for i in range(0, total_traj):\n result_text = \"demo %4d, step %4d, return %f, return_ratio %f\" % (sort_index[i] + 1, traj_len_list[sort_index[i]], return_list[sort_index[i]], rwd_ratio[sort_index[i]]) \n print(result_text, file=f) \n\nif __name__ == \"__main__\":\n sort()\n\n","repo_name":"voot-t/vild_code","sub_path":"code/my_utils/robosuite_processor/sort_robo_demo.py","file_name":"sort_robo_demo.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"86"} +{"seq_id":"27483522422","text":"import telepot\nfrom time import sleep\nimport random\nfrom AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient\nfrom s3dynamodb import *\nimport paho.mqtt.client as paho\nimport os\nimport socket\nimport ssl\n\nconnflag = False\n\ndef on_connect(client, userdata, flags, rc):\n global connflag\n connflag = True\n print(\"Connection returned result: \" + str(rc) )\n\ndef on_message(client, userdata, msg):\n print(msg.topic+\" \"+str(msg.payload))\n\nmqttc = paho.Client()\nmqttc.on_connect = on_connect\nmqttc.on_message = on_message\n#mqttc.on_log = on_log\n\nawshost = \"\"\nawsport = 8883\nclientId = \"\"\nthingName = \"\"\ncaPath = \"\"\ncertPath = \"\"\nkeyPath = \"\"\n\nmqttc.tls_set(caPath, certfile=certPath, keyfile=keyPath, cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_TLSv1_2, ciphers=None)\n\nmqttc.connect(awshost, awsport, keepalive=60)\n\nmqttc.loop_start()\n\n# Connect and subscribe to AWS IoT\n#my_rpi.connect()\n\n#telegram bot\nmy_bot_token = ''\n\n#s3\nBUCKET = ''\n\ndef allCommandsMsg():\n return \"thank me for the halp u peasant\\nlist of available commands:\\n\\nTake Photo in T2031: take photo\\nGet number of people in T2031: t2031\\nGet number of people in T2032: t2032\"\ndef noCommandMsg():\n num = random.randint(0, 15)\n if num < 5:\n return \"no such command, try again\"\n elif num < 10:\n return \"**crying meep** no such command, dont bully me T_T\"\n else:\n return \"did u typo? dont be kailing no.2\"\n\ndef respondToMsg(msg):\n chat_id = msg['chat']['id']\n command = msg['text']\n print('Got command: {}'.format(command))\n\n command = command.lower() #lower case it\n # filter commands\n if command == 'help':\n bot.sendMessage(chat_id, allCommandsMsg())\n elif command == 'thankyou' or command =='thx' or command =='thank you':\n bot.sendMessage(chat_id, \"awww im touched blush blush\")\n elif command == \"t2031\" or command == \"t2032\":\n noOfPpl = readDynamoDBItem(command)\n bot.sendMessage(chat_id, \"No of people in room \" + command + \": \" + noOfPpl)\n elif command == \"take photo\":\n file_name = str(msg['date']) + \".jpg\"\n s3filepath = str(chat_id) + \"/\" + file_name\n mValues = \"\\\"file_name\\\":\\\"{0}\\\",\\\"chat_id\\\":\\\"{1}\\\",\\\"s3filepath\\\":\\\"{2}\\\"\".format(str(file_name),str(chat_id),str(s3filepath))\n mqttc.publish(\"rooms/t2031/takephoto\", \"{\"+mValues+\"}\", qos=1)\n bot.sendMessage(chat_id, \"Please wait...\")\n else:\n bot.sendMessage(chat_id, noCommandMsg())\n\nbot = telepot.Bot(my_bot_token)\nbot.message_loop(respondToMsg)\nprint('Listening for RPi commands...')\nwhile True:\n sleep(1)\n","repo_name":"shernaliu/soms-public-2","sub_path":"other_rpis/telepotserver/onlytelebot.py","file_name":"onlytelebot.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"23536617348","text":"from iconservice import *\nfrom .ICONProofManager.proof_manager import ProofManager\nfrom .ICONProofManager.pack import btp_poe_unpack, rep_list_poe_unpack, reps_list_unpack\n\n\nclass IconVerifier(IconScoreBase):\n\n def __init__(self, db: IconScoreDatabase) -> None:\n super().__init__(db)\n self.reps_list = ArrayDB(\"reps_list\", db, str)\n\n def on_install(self, packed_rep_list: str) -> None:\n super().on_install()\n\n reps_list = reps_list_unpack(packed_rep_list)\n print(f\"[dc_log] reps_list: {reps_list}\")\n for rep in reps_list:\n self.reps_list.put(rep)\n\n def on_update(self) -> None:\n super().on_update()\n\n # TODO method for validateor list update\n @external\n def set_validators(self, packed_poe: str):\n \n next_reps, block_msg, votes = rep_list_poe_unpack(json_loads, packed_poe)\n fomatted_next_reps= []\n for rep in next_reps:\n fomatted_next_reps.append(b'\\x00'+bytes.fromhex(rep[2:]))\n\n print(f\"[dc_log] next_reps: {fomatted_next_reps}\")\n\n reps_pm = ProofManager(sha3_256, fomatted_next_reps, \"\")\n next_reps_hash = reps_pm.get_proof_root()\n\n if block_msg[5] != next_reps_hash:\n revert(f\"[Verifier ERROR] Invalid reps_list\")\n\n block_pm = ProofManager(sha3_256, block_msg, \"\")\n block_hash = block_pm.get_proof_root()\n\n votes_msg = list()\n for idx, vote in enumerate(votes):\n vote_msg = {\n \"rep\": self.reps_list[idx],\n \"timestamp\": vote[0],\n \"blockHeight\": hex(block_msg[9]),\n \"round_\": vote[1],\n \"blockHash\": \"0x\" + block_hash.hex()\n }\n votes_msg.append(vote_msg)\n\n vpm = ProofManager(sha3_256, votes_msg, \"Vote\")\n\n for idx, hash_ in enumerate(vpm.hashes):\n key = recover_key(hash_, votes[idx][2])\n addr = create_address_with_key(key)\n\n print(\"rep in score: \", self.reps_list[idx])\n print(\"calculated : \", str(addr))\n\n if self.reps_list[idx] != str(addr):\n revert(f\"[Verifier] comparison fail!!\")\n\n print(f\"[verifier] comparison succccccc!!\")\n\n while self.reps_list.pop():\n pass\n\n ret = \"\"\n for e in self.reps_list:\n ret += e\n print(f\"[dc_log] after pop replist:{ret}\")\n\n for item in next_reps:\n self.reps_list.put(item)\n\n ret = \"\"\n for e in self.reps_list:\n ret += \".\"+e\n print(f\"[dc_log] after set replist:{ret[1:]}\")\n\n\n @external\n # def verify_btp_msg(self, receipt: bytes, rp_proof: bytes, block_msg: bytes, sigs: bytes):\n def verify_btp_msg(self, packed_poe: str):\n \"\"\" verifies received btp-msg\"\"\"\n # print(f\"[verifier] packed_poe: {packed_poe}\")\n # poe = unpackb(packed_poe, allow_invalid_utf8=True)\n poe = btp_poe_unpack(json_loads, packed_poe)\n\n receipt = poe[0]\n rp_proof = poe[1]\n block_msg = poe[2]\n sigs = poe[3]\n\n btp_msg = \"\"\n for event in receipt[\"eventLogs\"]:\n # TODO 상대 BMC 주소 확인하는 로직 구현 필요\n if event[\"indexed\"][0] == \"Message(str)\":\n btp_msg = event[\"data\"][0]\n\n if not btp_msg:\n revert(f\"No btp_msg\")\n\n rpm = ProofManager(sha3_256, [receipt], \"Receipt\")\n if not rpm.validate_proof(sha3_256, rpm.hashes[0], block_msg[2], rp_proof):\n revert(f\"Fail to validate receipt merkle path\")\n\n bpm = ProofManager(sha3_256, block_msg, \"Block\")\n block_hash = bpm.get_proof_root()\n\n votes_msg = list()\n for idx, sig in enumerate(sigs):\n vote_msg = {\n \"rep\": self.reps_list[idx],\n \"timestamp\": sig[0],\n \"blockHeight\": hex(block_msg[9]),\n \"round_\": sig[1],\n \"blockHash\": \"0x\" + block_hash.hex()\n }\n votes_msg.append(vote_msg)\n\n vpm = ProofManager(sha3_256, votes_msg, \"Vote\")\n\n for idx, hash_ in enumerate(vpm.hashes):\n key = recover_key(hash_, sigs[idx][2])\n addr = create_address_with_key(key)\n\n print(\"rep in score: \", self.reps_list[idx])\n print(\"calculated : \", str(addr))\n\n if self.reps_list[idx] != str(addr):\n revert(f\"[Verifier] comparison fail!!\")\n\n print(f\"[Verifier] comparison succccccc!!\")\n return btp_msg\n\n\n\n\n\n","repo_name":"icon-project/icon-btp-0.5-PoC","sub_path":"contract-poc/contracts/icon_verifier/icon_verifier.py","file_name":"icon_verifier.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"86"} +{"seq_id":"13055172170","text":"'''\nCreated on 8 Dec 2013\n\n@author: Alexander Kampmann, David Poetzsch-Heffter\n'''\n\nimport sys, os, difflib, shutil, imp, tempfile, multiprocessing, signal\nfrom timed_process import TimedProcess\nfrom valgrindxml import ValgrindXML\n\nclass TestResult(object):\n def __init__(self, success, msg=\"\"):\n self.success = success\n self.msg = msg\n\n @staticmethod\n def success(msg=\"\"):\n return TestResult(True, msg)\n\n @staticmethod\n def fail(msg=\"\"):\n return TestResult(False, msg)\n \n\nclass Test(object):\n \"\"\"Superclass for all the tests.\"\"\"\n optional = False\n \n def __init__(self, exe, base, src, options, optional=False):\n self.exe = exe\n self.basedir = base\n self.srcfile = src\n self.options = options\n self.optional = optional\n self.timeoutFactor = 1\n \n def opt(self):\n self.optional = True\n return self\n \n def isOptional(self):\n return self.optional\n \n def getName(self):\n return os.path.join(self.basedir, self.srcfile)\n\n def execCmd(self):\n return [os.path.abspath(self.exe)] + self.options + [self.srcfile]\n \n def invoke(self):\n timeout = TimedProcess.timeout * self.timeoutFactor\n \n p = TimedProcess(self.execCmd(), self.basedir, timeout)\n p.execute()\n return self.check(p)\n \n def check(self, proc):\n cmd = os.path.basename(proc.cmd[0])\n \n if proc.killed:\n return TestResult.fail(\" Process '%s' timed out.\\n\\n\" % cmd)\n \n if proc.crash():\n return TestResult.fail(\" '%s' crashed. Return code was: %d\\n\\n\" % (cmd, proc.returncode))\n \n return TestResult.success()\n\nclass ValgrindTest(Test):\n \"\"\"Auto-check for memory leaks with valgrind\"\"\"\n \n VALGRIND_XML_FILE = os.path.join(tempfile.gettempdir(), \"impala_valgrind.xml\")\n \n def __init__(self, test):\n super(ValgrindTest, self).__init__(test.exe, test.basedir, test.srcfile, test.options, test.isOptional())\n self.timeoutFactor = 5\n\n def execCmd(self):\n return [\"valgrind\", \"--xml=yes\", \"--xml-file=\"+ValgrindTest.VALGRIND_XML_FILE] + \\\n [os.path.abspath(self.exe)] + self.options + [self.srcfile]\n \n def check(self, p):\n super_result = super(ValgrindTest, self).check(p)\n if not super_result.success:\n return super_result\n \n try:\n vgout = ValgrindXML(ValgrindTest.VALGRIND_XML_FILE)\n\n success = len(vgout.leaks) == 0\n\n if not success:\n return TestResult.fail(\"\\n\" + vgout)\n else:\n return TestResult.success()\n except Exception as e:\n return TestResult.fail(\"Parsing valgrind output FAILED: %s\" % e)\n \ndef diff_output(output, expected):\n olines = output.splitlines(1)\n elines = expected.splitlines(1)\n \n diff = difflib.Differ()\n fails = 0\n msg = []\n for cp in diff.compare(elines, olines):\n if cp.startswith('-') or cp.startswith('+'):\n msg.append(cp.rstrip())\n fails=fails+1\n\n return TestResult(fails == 0, \"\\n\".join(msg))\n\nclass CompilerOutputTest(Test):\n \"\"\"Superclass tests which work on a single file and compare the output.\"\"\"\n positive = True\n basedir = \".\"\n srcfile = \"\"\n options = []\n result = None\n \n def __init__(self, positive, exe, base, src, res, options=[]):\n super(CompilerOutputTest, self).__init__(exe, base, src, list(options))\n self.positive = positive\n self.result = res\n \n def check(self, p):\n super_result = super(CompilerOutputTest, self).check(p)\n if not super_result.success:\n return super_result\n \n if p.success() != self.positive:\n return TestResult.fail(\"Output: %s\\n\\n\" % p.output)\n \n if self.result is None:\n return TestResult.success()\n \n with open(os.path.join(self.basedir, self.result), 'r') as f:\n return diff_output(p.output.strip(), f.read().strip())\n\ndef get_tests(directory, suffixes=[\".O0\", \".O1\", \".O3\", \".ll\"]):\n \"\"\"A generator for test files based on the file extensions\n \n Output files are expected to have the same name but with .output extension.\n If no output file is found for a test no output is assumed.\n \n This yields (test_file, output_file) for each file in the directory that has an\n extension included in the suffixes list\"\"\"\n tests = []\n\n for testfile in os.listdir(directory):\n if os.path.splitext(testfile)[1] in suffixes:\n of = testfile + \".output\"\n if not os.path.exists(os.path.join(directory, of)):\n of = os.path.splitext(testfile)[0] + \".output\"\n res = of if os.path.exists(os.path.join(directory, of)) else None\n yield (testfile, res)\n\ndef make_compiler_output_tests(directory, exe, positive=True, options=[]):\n \"\"\"Creates a list of CompilerOutputTests using get_tests(directory)\"\"\"\n tests = []\n for testfile, res in get_tests(directory):\n tests.append(CompilerOutputTest(positive, exe, directory, testfile, res, options))\n return sorted(tests, key=lambda test: test.getName())\n\nmake_tests = make_compiler_output_tests\n\ndef get_tests_from_dir(directory):\n testfile = os.path.join(directory, \"tests.py\")\n \n if os.path.exists(testfile):\n tests = imp.load_source(\"tests\", testfile).allTests()\n else:\n tests = make_tests(directory)\n return tests\n\ndef invoke_test(test):\n return test.invoke()\n\ndef init_worker():\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n \ndef executeTests(tests, processes):\n \"\"\"Invoke this function with a list of test objects to run the tests. \"\"\"\n pool = multiprocessing.Pool(processes, init_worker)\n it = pool.imap(invoke_test, tests)\n\n res = {}\n for i in range(len(tests)):\n print (\"[\"+str(i+1)+\"/\"+str(len(tests))+\"] \" + tests[i].getName())\n r = res[tests[i]] = it.next()\n\n if not r.success:\n print(\"[FAIL] \" + \" \".join(tests[i].execCmd()))\n print(r.msg)\n \n print(\"\\n* Test summary\\n\")\n failOpt = 0\n failReq = 0\n \n opt_tests = []\n req_tests = []\n for t in tests:\n opt_tests.append(t) if t.isOptional() else req_tests.append(t)\n \n for t in req_tests:\n if not res[t].success:\n print(\"- REQUIRED test failed: \"+t.getName())\n failReq += 1\n \n for t in opt_tests:\n if not res[t].success:\n print(\"- OPTIONAL test failed: \"+t.getName())\n failOpt += 1\n \n if failOpt == 0 and failReq == 0:\n print(\"\\n* All \" + str(len(tests)) + \" tests were successful.\")\n else:\n if failReq == 0:\n print(\"\\n* All %i required tests were successful.\" % len(req_tests))\n else:\n print(\"\\n!\" + str(failReq) + \" of \" + str(len(req_tests)) + \" REQUIRED tests failed.\")\n if failOpt == 0:\n print(\"\\n* All %i optional tests were successful.\" % len(opt_tests))\n else:\n print(\"\\n!\" + str(failOpt) + \" of \" + str(len(opt_tests)) + \" OPTIONAL tests failed.\")\n","repo_name":"croustibaie/Taint_checking_llvm","sub_path":"test/infrastructure/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7297,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"86"} +{"seq_id":"22603875868","text":"import networkx as nx\nimport os\n\n\nclass BaseGraph:\n def __init__(self):\n self.graph = None\n self.dataset_path = os.path.join(os.path.dirname(__file__), \"../datasets\")\n\n def retrieve_graph(self, graphname):\n\n if graphname == \"blogcatalog\":\n self.graph = nx.read_gml(os.path.join(self.dataset_path, \"blogcatalog.gml\"))\n\n\nb = BaseGraph()\nb.retrieve_graph(\"blogcatalog\")\n\n","repo_name":"abdcelikkanat/free-emb","sub_path":"base/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"74677287959","text":"\nimport numpy as np\nfrom gym import utils\nfrom gym.envs.mujoco import mujoco_env\n\nclass BoxEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n\n def __init__(self, include_fov=True, set_param_fn=None, rand_reset=False): \n self.include_fov = include_fov\n self.t = -1\n self.set_param_fn = set_param_fn\n self.rand_reset = rand_reset\n\n mujoco_env.MujocoEnv.__init__(self, '/home/benevans/projects/varyingsim/varyingsim/assets/box.xml', 2)\n utils.EzPickle.__init__(self)\n\n def step(self, a):\n if self.set_param_fn:\n self.set_param_fn(self, self.t)\n\n self.t += 1\n self.do_simulation(a, self.frame_skip)\n ob = self._get_obs()\n reward = 0.0\n \n return ob, reward, False, dict(t=self.t)\n\n def set_mass(self, mass):\n self.model.body_mass[1] = mass\n \n def set_box_friction(self, friction):\n self.model.geom_friction[1, 0] = friction \n \n def set_floor_friction(self, friction):\n self.model.geom_friction[0, 0] = friction\n\n def set_actuator_gear(self, gear):\n self.model.actuator_gear[:, 0] = gear\n\n def get_mass(self):\n return self.model.body_mass[1]\n\n def get_box_friction(self):\n return self.model.geom_friction[1, 0] \n \n def get_floor_friction(self):\n return self.model.geom_friction[0, 0]\n\n def get_actuator_gear(self):\n return self.model.actuator_gear[:, 0]\n\n def _get_obs(self):\n qpos = self.sim.data.qpos\n qvel = self.sim.data.qvel\n ret = np.concatenate([qpos.flat, qvel.flat])\n if self.include_fov:\n bf = self.get_box_friction()\n ff = self.get_floor_friction()\n mass = self.get_mass()\n gear = self.get_actuator_gear()\n ret = np.concatenate([ret, [bf], [ff], [mass], gear])\n return ret\n\n def reset_model(self):\n if self.rand_reset:\n qpos_start = self.init_qpos.copy()\n qvel_start = self.init_qvel.copy()\n qpos_start[:2] += np.random.randn(2) / 2.0\n qvel_start[:2] += np.random.randn(2) / 10.0\n self.set_state(\n qpos_start, \n qvel_start\n )\n else:\n self.set_state(\n self.init_qpos, \n self.init_qvel\n )\n self.t = -1\n return self._get_obs()","repo_name":"bennevans/iida","sub_path":"varyingsim/envs/box.py","file_name":"box.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"87"} +{"seq_id":"33742512281","text":"\n# python Type Annotation, 타입 힌팅\n## Type annotation\n# Python은 동적 타이핑 언어\n# 동적 타이핑은 이렇게 단순한 코드를 만들 수 있지만, 변수에 전달되는 값이 과연 원하는 것으로 전달되는 지를 감시할 수 없다\n\n\n# List, Dict, Tuple, Set\nfrom typing import List, Dict, Tuple, Set\n\nnums:List[int] = [1,2,3]\ncountries: Dict[str,str]={'kr':'south-korea','us':'united-states'}\nuser : Tuple[int, str, bool] = (3, 'smkim',True)\nchars: Set[str] = {\"A\", \"B\", \"A\",\"C\"}\nprint('Typing')\nprint(nums, countries, user, chars)\n\n# Final, Union\nfrom typing import Final, Union\nprint('Final, Union')\nTIME_OUT: Final[int] = 10\nprint(TIME_OUT)\n\n# Union은 여러 개의 타입이 허용될 수 있는 상황\ndef toString(num: Union[int, float]) -> str:\n return str(num)\nprint(toString(1))\n\nfrom typing import Optional\n# typing 모듈의 Optional은 None이 허용되는 함수의 매개 변수에 대한 타입을 명시할 때 유용\n# Optional[int]는 Union[int, None]과 동일\ndef repeat(message: str, times: Optional[int] = None) -> list:\n if times:\n return [message] * times\n else:\n return [message]\n\nprint('Optional')\nprint(repeat('hi',3))\n\n## 참고문헌\n# https://www.daleseo.com/python-typing/","repo_name":"SeonminKim1/Python","sub_path":"Skills_Advanced-Python/python_typing.py","file_name":"python_typing.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"87"} +{"seq_id":"2948309502","text":"import os\nimport sqlite3\nfrom app.create import createDatabase\nfrom app.tables import createTables, fillBarris, fillUsers, fillGreenPoints\n\nif __name__ == '__main__':\n database = 'data.db'\n\n db = createDatabase(os.path.abspath(database))\n\n if db is not None:\n createTables(db)\n fillBarris(db)\n fillUsers(db)\n fillGreenPoints(db)\n\n else:\n print('No such database :(')\n","repo_name":"raulhigueras/ReciclaBCN","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"3146611060","text":"import numpy as np\nimport sys\nfrom sklearn.preprocessing import normalize\n\n\nclass PageRank:\n # matrix -> link matrix\n # beta -> teleport activation probability\n # sumeq -> to identify uniq ranks solution we need\n # to know exact sum of this ranks. By default it's equal 1.\n # starting_Value -> init for power iterations\n def __init__(self, matrix, beta, sumeq = 1, starting_value = \"default\"):\n self.A = normalize(matrix, axis=0, norm='l1')\n self.N = self.A.shape[0]\n self.A = self.dead_end_fix(self.A)\n self.beta = beta\n self.sumeq = sumeq\n self.A_cor = beta * self.A + (1 - beta) * np.ones([self.N]*2) / self.N\n self.epsilon = 0.001\n self.starval = starting_value\n print(self.A)\n print(\"--------------\")\n print(self.A_cor)\n \n def fit_power_iter(self):\n r0 = np.array([0] * self.N)\n if self.starval == \"default\":\n r1 = np.array([1./self.N] * self.N)\n else:\n r1 = np.array([self.starval] * self.N)\n dist = np.linalg.norm(r0-r1, ord = 1)\n print(\"--------Init ---------\")\n print(dist)\n print(r1)\n i = 1\n while dist > self.epsilon:\n print(\"----------- Iteration #{0} ---------\".format(i))\n i += 1\n temp = self.A_cor.dot(r1)\n r0 = r1\n r1 = temp.reshape((3,1))\n dist = np.linalg.norm(r0-r1, ord = 1)\n print(dist)\n print(r1)\n self.solution = r1\n\n def print_final_sol(self):\n print(\"----- Final Solution ------\")\n print(self.sumeq/sum(self.solution) * self.solution)\n\n def dead_end_fix(self, matrix):\n x = matrix.sum(axis = 0)\n for i, ss in enumerate(x):\n if ss == 0:\n matrix[:,i] = np.ones((self.N,)) / self.N\n return(matrix)\n\n def print_equations(self):\n print(\"------ Equations ------\")\n print(self.A_cor - np.eye(self.N))\n\nif __name__ == \"__main__\":\n m = np.matrix([[0.,1.,1.], [0.,0.,1.], [1.,0.,0.]])\n t = PageRank(m, 1, 1, 1)\n t.fit_power_iter()\n t.print_final_sol()\n t.print_equations()\n\n","repo_name":"orenov/mmds","sub_path":"week1/pagerank.py","file_name":"pagerank.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"3387513942","text":"# Kivy\nimport kivy\nfrom kivy.app import App\n##from kivy.uix.button import Button\n##from kivy.uix.scatter import Scatter\n##from kivy.uix.label import Label\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.button import Label\nfrom kivy.uix.popup import Popup\n\n#PiCamera\nfrom picamera import PiCamera\n\n#Sonstige\nfrom time import sleep\nfrom datetime import datetime\n#################################################################\n\nclass CustomPopup(Popup):\n \n global imagename\n def imagename():\n imagepath = 'pics/'\n imagedatetime = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n imagesuffix = '_Test'\n imageext = '.jpg'\n imagenamecomplete = imagepath + imagedatetime + imagesuffix + imageext\n return imagenamecomplete\n \n # Camera\n def video(self):\n camera = PiCamera()\n\n camera.rotation = 180\n camera.resolution = (3280, 2464)\n #camera.resolution = (2000, 2000)\n #camera.framerate = 15\n\n camera.start_preview(resolution=(1640, 1232))\n sleep(5)\n camera.annotate_text = 'Hello world!'\n camera.capture(imagename())\n camera.stop_preview()\n\nclass HomeGridLayout(GridLayout):\n \n # Camera\n def video(self):\n camera = PiCamera()\n\n camera.rotation = 180\n camera.resolution = (3280, 2464)\n #camera.resolution = (2000, 2000)\n #camera.framerate = 15\n\n camera.start_preview(resolution=(1640, 1232))\n sleep(5)\n camera.annotate_text = 'Hello world!'\n camera.capture('pics/image6.jpg')\n camera.stop_preview()\n \n # Opens Popup when called\n def open_popup(self):\n the_popup = CustomPopup()\n the_popup.open()\n\nclass PhotoboxApp(App):\n \n def build(self):\n return HomeGridLayout()\n\nPhotobox = PhotoboxApp()\n\nPhotobox.run()","repo_name":"matdoess/photobox","sub_path":"backup/photobox_2018_05_31/testing/2017-10-07_photobox.py","file_name":"2017-10-07_photobox.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"87"} +{"seq_id":"43514445687","text":"#!/usr/bin/env python\n\nimport argparse\nimport cloudpickle\n\nfrom io import open\n\n\ndef run(pickled_runner):\n f = cloudpickle.load(open(pickled_runner, \"rb\"))\n res = f[\"func\"](*f[\"args\"], **f[\"kwargs\"])\n cloudpickle.dump(res, open(\"./result.pickle\", \"wb\"))\n return\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"pickled_runner\")\n args = parser.parse_args()\n run(args.pickled_runner)\n","repo_name":"ohsu-comp-bio/tesseract","sub_path":"tesseract/resources/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"87"} +{"seq_id":"8313670524","text":"# -*- coding: utf8 -*-\n\n# 20 ye kadar tüm sayılara tam bölünen en küçük sayı\n# ekok\n\nfrom time import time \n\nstart_time = time()\n\nproduct=1\n\nnumbers=range(2,21)\ni=2\n\nwhile True:\n\tcheck=False\n\tfor j,item in enumerate(numbers):\n\t\tif item %i ==0:\n\t\t\tcheck=True\n\t\t\tnumbers[j]/=i\n\t\t\tif numbers[j]==1:\t\t\t# sayının böleni kalmadığında diziden silinir\n\t\t\t\tdel numbers[j]\n\n\tif not check:\n\t\tif i<20:\n\t\t\ti+=1\n\t\telse:\n\t\t\tbreak\n\telse:\n\t\tproduct*=i\n\nprint(product)\n\nprint(time() - start_time)\n\t\n","repo_name":"karapanter/euler","sub_path":"q5.py","file_name":"q5.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"17151638762","text":"from unittest import result\nimport cv2\nimport numpy as np\nfrom keras.models import load_model\nfrom pytesseract import pytesseract\n\n# lista das cameras disponiveis no sistema\n#all_camera_idx_avaiable = []\n\n# endereço da camera do celular através do app 'IP Webcam'\nurlCamCel = 'http://192.168.15.2:8080/video'\n# caminho da camera usb\nurlCamUsb = \"/dev/video4\"\n\n# loading pre trained model\nmodel = load_model('model/digits.h5')\n\n# função para predizer os digitos\ndef prediction(frame):\n caminho_tesseract = r\"/Tesseract-OCR/teressact.exe\"\n pytesseract.tesseract_cmd = caminho_tesseract\n\n imgH ,imgW,_ = frame.shape\n x1,y1,w1,h1 = 0,0,imgH ,imgW\n imgchar = pytesseract.image_to_string(frame)\n imgboxes = pytesseract.image_to_boxes(frame)\n for boxes in imgboxes.splitlines():\n boxes = boxes.split(' ')\n x,y,w,h = int(boxes[1]),int(boxes[2]),int(boxes[3]),int(boxes[4])\n cv2.rectangle(frame,(x,imgH-y),(w,imgH-h),(0,0,255),3)\n cv2.putText(frame,imgchar,(x1 +int(w1/50),y1+int(h1/50)),cv2.FONT_HERSHEY_COMPLEX,0.7,(0,0,255),2) \n \n cv2.imshow('text',frame) \n \n return resultado,probabilidade\n\n# versão do OpenCV que estamos usando para saber\n# quais formas de chamar as funções\nprint(\"VERSAO: \", cv2.__version__)\n\n# pega o vídeo do celular\ncap = cv2.VideoCapture(urlCamUsb)\nWIDTH = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\nHEIGHT = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n\n# se o vídeo nao estiver aberto\nif not (cap.isOpened):\n print(\"Nao pude abrir a cameraa!\")\n\n# define circuloAnterior como none\ncirculoAnterior = None\n\n# define a lambda que usaremos mais pra frente\ndist = lambda x1,y1,x2,y2: (x1-x2)**2+(y1-y2)**2\n\n# enquanto user nao digitar q (de quit) \nwhile(True):\n # lê os frames do vídeo\n ret, frame = cap.read()\n # para evitar ruidos e muitos pontos na imsg final\n frame_com_blur = cv2.GaussianBlur(frame, (5,5), 0)\n\n if not ret: break\n hsv = cv2.cvtColor(frame_com_blur, cv2.COLOR_BGR2HSV)\n\n # define as escalas de azul baixo e cima\n # para serem detectadas/amplificadas na mascara\n lower_blue = np.array([38,86,0])\n upper_blue = np.array([121,255,255])\n\n # define a mascara\n mascara = cv2.inRange(hsv, lower_blue, upper_blue)\n\n # função que vai nos retornar 2 valores, sendo o primeiro o contorno\n # o 3 nao precisamos e ent chamaremos de _\n # pegar o contorno das areas brancas mostradas na mascara\n contornos, _ = cv2.findContours(mascara, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n #print(contornos)\n\n # em cada um dos contornos verifica a area\n for contorno in contornos:\n area = cv2.contourArea(contorno)\n\n # se a area do contorno for mtt pequena,\n # mt provavelmente é ruído\n if area > 100:\n # desenha cada contorno\n cv2.drawContours(frame, contorno, -1, (0, 255, 0), 3)\n\n prediction(frame)\n\n # mostra o frame original e o modificado em uma janelinha\n cv2.imshow('frame', frame)\n cv2.imshow('modificado', mascara)\n\n # se user apertar q, finaliza a execução\n if cv2.waitKey(1) & 0XFF == ord('q'): \n break\n\ncap.release()\ncv2.destroyAllWindows()\n\n","repo_name":"ayrous/reconhecimentoBotoesEDigitosElevadorComInputVoz","sub_path":"detectComTesseract.py","file_name":"detectComTesseract.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"87"} +{"seq_id":"6648582112","text":"from machine import Pin, Timer\nfrom micropython import schedule\n# set up status LED\npin_status = Pin(8, Pin.OUT)\nstatus_timer = Timer(0)\nstatus_led = 0\nstatus_next = 0\nDISABLED = False\n\ndef init_activity():\n print(\"Initializing status LED\")\n global status_led\n global status_next\n status_led = 0\n status_next = 0 \n status_timer.init(period=500, mode=Timer.ONE_SHOT, callback=lambda t: schedule(status_callback,1))\n\ndef status_callback(first):\n global DISABLED\n global status_led\n global status_next\n if not DISABLED:\n if status_led == 1:\n pin_status.off()\n status_led = 0\n else:\n if status_next == 1:\n status_led = 1\n pin_status.on()\n status_next = 0\n else:\n status_led = 0\n status_next = 0\n status_timer.init(period=500, mode=Timer.ONE_SHOT, callback=lambda t: schedule(status_callback,0))\n \ndef activity():\n global status_next\n status_next = 1\n","repo_name":"stonstoff/lighthouse","sub_path":"firmware/activity.py","file_name":"activity.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"87"} +{"seq_id":"1721935882","text":"from argparse import ArgumentParser\n\nimport cherrypy\n\nfrom sale import settings, wsgi\n\n\nclass DjangoApplication:\n def __init__(self, port: int, certfile: str, keyfile: str):\n self.port = port\n self.certfile = certfile\n self.keyfile = keyfile\n\n @staticmethod\n def make_config():\n config = {\n 'tools.staticdir.on': False,\n 'tools.expires.on': True,\n 'tools.expires.secs': 86400,\n 'tools.staticdir.dir': settings.STATIC_ROOT,\n 'engine.autoreload.on': False,\n 'log.screen': True,\n }\n\n cherrypy.tree.mount(None, script_name=settings.STATIC_URL, config={'/': config})\n\n def get_server_str(self):\n if not self.certfile or not self.keyfile:\n return 'tcp:%d' % self.port\n return 'ssl:%d:certKey=%s:privateKey=%s' % (self.port, self.certfile, self.keyfile)\n\n def run_server(self):\n # import libs\n from twisted.internet import endpoints\n from twisted.internet import reactor\n from twisted.internet import task\n from twisted.web import server\n from twisted.web.wsgi import WSGIResource\n\n # create default configuration\n self.make_config()\n\n # We will be using Twisted HTTP server so let's\n # disable the CherryPy's HTTP server entirely\n cherrypy.server.unsubscribe()\n\n # Publish periodically onto the 'main' channel as the bus mainloop would do\n task.LoopingCall(lambda: cherrypy.engine.publish('main')).start(0.1)\n\n # create SSL server from string\n https_server = endpoints.serverFromString(reactor, self.get_server_str())\n\n # Tie our app to Twisted\n reactor.addSystemEventTrigger('after', 'startup', cherrypy.engine.start)\n reactor.addSystemEventTrigger('before', 'shutdown', cherrypy.engine.exit)\n resource = WSGIResource(reactor, reactor.getThreadPool(), wsgi.application)\n site = server.Site(resource)\n https_server.listen(site)\n reactor.run()\n\n\ndef parse_args():\n parser = ArgumentParser()\n parser.add_argument('--port', type=int, action='store', dest='port', help='Port to attach server')\n parser.add_argument('--certfile', type=str, action='store', dest='certfile',\n help='SSL certificate to attach server')\n parser.add_argument('--keyfile', type=str, action='store', dest='keyfile',\n help='SSL private key to to attach server')\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n print(\"Your app is running at 0.0.0.0:%s\" % args.port)\n app = DjangoApplication(port=args.port, certfile=args.certfile, keyfile=args.keyfile)\n app.run_server()\n","repo_name":"oseniasjunior/sale","sub_path":"production-server.py","file_name":"production-server.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"87"} +{"seq_id":"74947231641","text":"#!/usr/bin/env python3\n\nimport sys\nimport copy\nimport rospy\nfrom rospkg import RosPack\nfrom aruco_msgs.msg import MarkerArray\nimport numpy as np\n\nRosPkg = 'erc_remote_maintenance'\nsys.path.insert(0, RosPack().get_path(RosPkg))\n\nfrom orionlib.RobotController import *\n\nVIEW_HIDDEN_JOINTS = np.array([-50.0, -100.0, 90.0, -125.0, -91.0, 74.0])*np.pi/180\n\nglobal controller \n\ndef main():\n global controller\n # Start class\n controller = RobotController(markers_detect = True, load_markers_base = True)\n rospy.loginfo(\"# ========== STARTING OBJECTIVE 2 ========== #\")\n rospy.loginfo(\"Loading controller class.\")\n # Start test\n rospy.loginfo(\"Loading scene objects.\")\n controller.add_all_scene()\n controller.go_home()\n controller.control_gripper(gripper_state = GRIPPER_OPEN)\n try:\n controller.control_joint_state(VIEW_HIDDEN_JOINTS)\n mrkr_id, _ = controller.detect_aruco_hiden(OTHER_MRKR_SIZE)\n rospy.loginfo(\"Hiden aruco is \"+str(mrkr_id))\n controller.go_home()\n\n except rospy.ROSInterruptException:\n return\n except KeyboardInterrupt:\n controller.go_home()\n return\n except Exception:\n controller.go_home()\n rospy.loginfo(Exception)\n return\n rospy.loginfo(\"Removing scene objects.\")\n controller.remove_all_scene()\n rospy.loginfo(\"# ========== FINISHING OBJECTIVE 2 ========== #\")\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Renzo-Mendoza/ERC2023_Remote_Edition","sub_path":"erc_remote_maintenance/scripts/obj7.py","file_name":"obj7.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"16507935136","text":"from queue import Queue\n\ndef main():\n # Initialize a queue\n q = Queue()\n # hold name of already searched personnel\n searched = []\n\n # The dataset to search on \n graph = {}\n graph[\"you\"] = [\"bob\", \"alice\" , \"claire\"]\n graph[\"bob\"] = [\"anuj\", \"peggy\"]\n graph[\"alice\"] = [\"peggy\"]\n graph[\"claire\"] = [\"thom\", \"jonny\"]\n graph[\"anuj\"] = []\n graph[\"peggy\"] = []\n graph[\"thom\"] = []\n graph[\"jonny\"] = []\n\n # The person to find\n name = \"thom\"\n\n q.insertToQueue(graph[\"you\"])\n while q.getAllElements():\n person = q.popLeft()\n if person not in searched:\n if person == name:\n print(f\"{person} found.\")\n return\n else:\n q.insertToQueue(graph[person])\n searched.append(person)\n \n print(f\"Person {name} not found\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"shahzaib49/software-architectures","sub_path":"algorithms/breadth-first/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"87"} +{"seq_id":"41534791301","text":"import ast\nimport inspect\nimport util\n\n\n\ndef load_python_to_ast(filepath):\n text_file = open(filepath, \"r\")\n data = text_file.read()\n py_ast = ast.parse(data)\n return py_ast\n\npy_ast = load_python_to_ast(\"../pyg-examples/gcn2-cora.py\")\n\nprint(ast.dump(py_ast, indent=4))","repo_name":"ftxj/ftxj","sub_path":"tools/jxin_tools/nvtx/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"87"} +{"seq_id":"16296197103","text":"# Project name : HackerRank: Lily's Homework\n# Link : https://www.hackerrank.com/challenges/lilys-homework/forum\n# Try it on : \n# Author : Wojciech Raszka\n# E-mail : contact@gitpistachio.com\n# Date created : 2021-04-06\n# Description :\n# Status : Accepted (207621923)\n# Tags : python, sorting, minimum no of swaps to sort array\n# Comment : \n\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the lilysHomework function below.\ndef lilysHomework(arr):\n return min(minNoOfSwapsAsc(arr), minNoOfSwapsDesc(arr))\n\ndef minNoOfSwapsDesc(arr):\n positions = [x[0] for x in sorted(enumerate(arr), key=lambda x: x[1], reverse=True)]\n \n n = len(arr)\n visited = [False for _ in range(n)]\n ans = 0\n for i in range(n):\n if visited[i] or positions[i] == i:\n continue\n \n cycle_size = 0\n j = i\n while not visited[j]:\n visited[j] = True\n j = positions[j]\n cycle_size += 1\n \n if cycle_size > 0:\n ans += (cycle_size - 1)\n \n return ans\n\ndef minNoOfSwapsAsc(arr):\n positions = [x[0] for x in sorted(enumerate(arr), key=lambda x: x[1], reverse=False)]\n \n n = len(arr)\n visited = [False for _ in range(n)]\n ans = 0\n for i in range(n):\n if visited[i] or positions[i] == i:\n continue\n \n cycle_size = 0\n j = i\n while not visited[j]:\n visited[j] = True\n j = positions[j]\n cycle_size += 1\n \n if cycle_size > 0:\n ans += (cycle_size - 1)\n \n return ans\n \nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n result = lilysHomework(arr)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n\n","repo_name":"GitPistachio/Competitive-programming","sub_path":"HackerRank/Lily's Homework/Lily's Homework.py","file_name":"Lily's Homework.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"87"} +{"seq_id":"16629574322","text":"import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, GObject\nimport sys\nfrom .uiutils import ChildFinder, get_handler_id_for_signal\nfrom .dialogs import DialogUser\nfrom ..monster import Monster\nimport logging\nimport re\nimport random\n\nlogger = logging.getLogger()\n\n@Gtk.Template(filename=sys.path[0] + \"/dungeon/ui/monsterwidget.glade\")\nclass MonsterWidget(Gtk.Box, ChildFinder, DialogUser):\n __gtype_name__ = \"MonsterWidget\"\n __gsignals__ = {\n 'refresh_data': (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, ()),\n 'update_time': (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, (int,)),\n 'refresh_monsters': (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, ()),\n 'refresh_features': (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, ()),\n 'refresh_contents': (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, ())\n }\n\n def __init__(self, dungeon, monster):\n Gtk.Box.__init__(self)\n self.monster = monster\n self.dungeon = dungeon\n \n self.monsterName = self.find_child('monsterName')\n self.monsterAlignment = self.find_child('monsterAlignment')\n self.monsterType = self.find_child('monsterType')\n self.monsterXP = self.find_child('monsterXP')\n self.monsterSource = self.find_child('monsterSource')\n\n a = self.monster.get_attributes()\n self.monsterName.set_label(f\"{a['name']} ({self.monster.id})\")\n self.monsterSource.set_label(f\"{a['source']}/{a['page']}\")\n self.monsterType.set_label(a['type'])\n self.monsterXP.set_label(str(a['xp']))\n self.monsterAlignment.set_label({\n 'LG': 'Lawful Good',\n 'LN': 'Lawful Neutral',\n 'LE': 'Lawful Evil',\n 'N': 'Neutral',\n 'NE': 'Neutral Evil',\n 'NG': 'Neutral Good',\n 'CE': 'Chaotic Evil',\n 'CN': 'Chaotic Neutral',\n 'CG': 'Chaotic Good'}.get(a['alignment'], a['alignment']))\n\n\n @Gtk.Template.Callback()\n def onFled(self, caller):\n # find a place to flee (if possible)\n node = self.dungeon.map.get_node(self.monster.get_location())\n choices = []\n for ex in node.exits:\n edge = self.dungeon.map.get_edge(ex)\n if edge.is_open():\n if edge.left == self.monster.get_location():\n choices.append([edge.id, edge.right])\n else:\n choices.append([edge.id, edge.left])\n # if we have a choice...run to one of the open doors\n if choices:\n edge_id, new_room = random.choice(choices)\n logger.info(f\"Monster {self.monster.attributes['name']} ({self.monster.id}) in {self.monster.get_location()} fleeing to room {new_room} via {edge_id}\")\n self.monster.set_location(new_room)\n self.message_box(Gtk.MessageType.INFO, \"Fleeing Monster\", f\"{self.monster.attributes['name']} ({self.monster.id}) has fled into room {new_room} via exit {edge_id}\")\n\n self.emit('refresh_monsters')\n\n @Gtk.Template.Callback()\n def onKilled(self, caller):\n self.monster.kill()\n attrs = self.monster.get_attributes()\n node = self.dungeon.map.get_node(self.monster.state['location'])\n if attrs['size'][0].lower() in ['t', 's', 'm']:\n # it's small, we'll make it so the players can pick it up\n node.add_content(f\"The smelly corpse of {attrs['name']} ({self.monster.id})\")\n else:\n # otherwise, it becomes a feature (one does not simply pick up an ancient dragon corpse)\n node.add_feature(f\"The smelly, unmovable corpse of {attrs['name']} ({self.monster.id})\")\n\n for p in self.monster.get_property():\n node.add_content(p)\n\n self.emit('refresh_contents')\n self.emit('refresh_monsters')\n self.emit('refresh_features')\n","repo_name":"bdwheele/dungeon","sub_path":"dungeon/ui/monsterwidget.py","file_name":"monsterwidget.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"70696085720","text":"\"\"\"\nCustomized FilterSpecs.\n\"\"\"\nimport logging\n\nfrom django.utils.translation import ugettext as _\nfrom django.contrib.admin import filterspecs\nfrom django.contrib.sites.models import Site\nimport django.db\n\nfrom ella.core.models import Category\nfrom ella_newman.permission import permission_filtered_model_qs\n\nlog = logging.getLogger('newman')\n\nclass CommonFilter(filterspecs.FilterSpec):\n def __init__(self, f, request, params, model, model_admin, field_path=None):\n self.state = 0\n self.params = params\n self.model = model\n self.links = []\n self.model_admin = model_admin\n self.user = request.user\n #self.lookup_val = request.GET.get(self.lookup_kwarg, None) #selected filter value (not label)\n self.lookup_kwarg = 'NOT SET'\n self.f = f\n self.request_get = request.GET\n if self.f:\n # funny conditional parent constructor call\n super(CommonFilter, self).__init__(f, request, params, model, model_admin)\n self.request_path_info = request.path_info\n self.title_text = ''\n if self.f:\n self.title_text = self.f.verbose_name\n self.active_filter_lookup = None\n self.selected_item = None\n self.remove_from_querystring = [] # may be used as list of keys to be removed from querystring when outputting links\n\n def get_lookup_kwarg(self):\n \"\"\"\n this method can be specified as second argument in @filter_spec decorator. (see below)\n\n If more than one GET parameter is used to filter queryset,\n get_lookup_kwarg() should return list containing these parameters\n (suitable esp. for calendar/date filters etc.).\n \"\"\"\n return self.lookup_kwarg\n\n def get_active(self, request_params):\n if self.active_filter_lookup is not None: # cached result\n return self.active_filter_lookup\n self.active_filter_lookup = []\n lookup_multi = 0\n lookup = self.get_lookup_kwarg()\n if type(lookup) == list:\n lookup_multi = len(lookup)\n found = 0\n for p in request_params:\n if not lookup_multi and p == lookup:\n self.active_filter_lookup = [lookup]\n break\n elif lookup_multi:\n if p in lookup:\n found += 1\n if found == lookup_multi:\n self.active_filter_lookup = lookup\n break\n return self.active_filter_lookup\n\n def is_selected_item(self):\n \"\"\"\n Returns empty dict if no filter item is selected.\n Otherwise returns dict containing GET params as keys and corresponding\n values.\n \"\"\"\n active = self.get_active(self.request_get)\n out = dict()\n for par in active:\n if par in self.request_get:\n out[par] = self.request_get[par]\n return out\n\n def generate_choice(self, **lookup_kwargs):\n \"\"\" \n Returns representation of one choice. \n Suitable when rendering of all choices isn't necessary. Faster than get_selected() method.\n \"\"\"\n model = self.model\n lookup = dict()\n if 'field' in self.__dict__:\n if self.field and type(self.field) == django.db.models.fields.related.ForeignKey:\n model = self.field.rel.to\n for key in lookup_kwargs:\n index = key.find('__')\n if index != key.rfind('__'): #multiple __ present\n modified_key = key[index + 2:]\n lookup[modified_key] = lookup_kwargs[key]\n else:\n lookup[key] = lookup_kwargs[key]\n\n if len(lookup.keys()) == 0:\n return None\n try:\n thing = model.objects.get(**lookup)\n except (self.model.MultipleObjectsReturned, self.model.DoesNotExist):\n return None\n return thing.__unicode__()\n\n def get_selected(self):\n \" Should be used within a template to get selected item in filter. \"\n if hasattr(self, 'selected_item'):\n return self.selected_item\n if not hasattr(self, 'all_choices'):\n # return the same structure with error key set\n return {\n 'selected': False,\n 'query_string':'',\n 'display': '',\n 'error': 'TOO EARLY'\n }\n for item in self.all_choices:\n if item['selected']:\n self.selected_item = item\n return item\n\n\n\nclass CustomFilterSpec(CommonFilter):\n \"\"\" custom defined FilterSpec \"\"\"\n def __init__(self, f, request, params, model, model_admin, field_path=None):\n self.state = 0\n self.params = params\n self.model = model\n self.links = []\n self.model_admin = model_admin\n self.user = request.user\n #self.lookup_val = request.GET.get(self.lookup_kwarg, None) #selected filter value (not label)\n self.lookup_kwarg = 'NOT SET'\n self.f = f\n self.request_get = request.GET\n if self.f:\n # funny conditional parent constructor call\n super(CustomFilterSpec, self).__init__(f, request, params, model, model_admin)\n self.request_path_info = request.path_info\n self.title_text = ''\n if self.f:\n self.title_text = self.f.verbose_name\n self.active_filter_lookup = None\n self.all_choices = []\n self.selected_item = None\n self.remove_from_querystring = [] # may be used as list of keys to be removed from querystring when outputting links\n\n def filter_func(self):\n raise NotImplementedError('filter_func() method should be overloaded (substituted at run-time).')\n\n def title(self):\n return self.title_text\n\n def filter_active(self):\n \" Can be used from template. \"\n return self.is_active(self.request_get)\n\n def is_active(self, request_params):\n \"\"\"\n Returns True if filter is applied, otherwise returns False.\n Tries to find its argument(s) in request querystring.\n \"\"\"\n return len(self.get_active(request_params)) > 0\n\n def get_disabled_params(self):\n \" Returns parameter dict for constructing HREF to disable this filter. \"\n out = dict()\n for key in self.request_get:\n if key in self.get_active(self.request_get):\n continue\n out[key] = self.request_get[key]\n return out\n\n def generate_choices(self, cl):\n def make_unicode_params(pdict):\n \" param values converted to unicode are needed to make dict to dict parameter comparison. \"\n out = dict()\n for key in pdict:\n out[key] = unicode(pdict[key])\n return out\n\n if self.filter_func():\n self.state = 1\n if self.state <= 0:\n yield dict()\n lookup = self.get_lookup_kwarg()\n selected = self.is_selected_item()\n # Reset filter button/a href\n all_query_string = cl.get_query_string(None, self.get_active(self.request_get))\n if all_query_string.endswith('?') and len(all_query_string) == 1:\n all_query_string = '?q='\n yield {'selected': len(selected.keys()) == 0,\n 'query_string': all_query_string,\n 'display': _('All')}\n for title, param_dict in self.links:\n params = make_unicode_params(param_dict)\n yield {'selected': selected == params,\n 'query_string': cl.get_query_string(param_dict, self.remove_from_querystring),\n 'display': title}\n\n def get_selected(self):\n \" Should be used within a template to get selected item in filter. \"\n if self.selected_item:\n return self.selected_item\n if not self.all_choices:\n # return the same structure with error key set\n return {\n 'selected': False,\n 'query_string':'',\n 'display': '',\n 'error': 'TOO EARLY'\n }\n for item in self.all_choices:\n if item['selected']:\n self.selected_item = item\n return item\n\n def choices(self, cl):\n if not self.all_choices:\n self.all_choices = map(None, self.generate_choices(cl))\n return self.all_choices\n\n\ndef filterspec_preregister(cls, test, factory):\n \"\"\" method inserts FilterSpec and test to the beginning of FilterSpec registration table \"\"\"\n cls.filter_specs.insert(0, (test, factory))\n\ndef filterspec_clean_all(cls):\n while cls.filter_specs:\n cls.filter_specs.pop()\n\n# Adding class method register_insert() to FilterSpec.\n# important is to run following code before admin.py\nfilterspecs.FilterSpec.register_insert = classmethod(filterspec_preregister)\nfilterspecs.FilterSpec.clean_registrations = classmethod(filterspec_clean_all)\n\n\ndef filter_spec(field_test_func, lookup_kwarg_func=None, title=None):\n \"\"\"\n Decorator ``filter_spec`` creates custom filter.\n\n Example:\n @filter_spec(lambda field_to_test: isinstance(field_to_test, models.DateField))\n @filter_spec(lambda field_to_test: isinstance(field_to_test, models.DateField), lambda p: 'category__exact')\n \"\"\"\n def decorate(filter_func):\n name = '%s_%s' % (filter_func.__name__, CustomFilterSpec.__name__)\n cls = type(name, (CustomFilterSpec,), {})\n cls.filter_func = filter_func\n if lookup_kwarg_func:\n cls.get_lookup_kwarg = lookup_kwarg_func\n if title:\n cls.title = lambda fspec: title\n filterspecs.FilterSpec.register_insert(field_test_func, cls)\n return filter_func\n return decorate\n\nclass NewmanSiteFilter(CustomFilterSpec):\n \" Site customized filter. Filtering is done via Site.category_set()... \"\n site_field_path = 'category__site__id__exact'\n\n def title(self):\n return _('Site')\n\n def get_lookup_kwarg(self):\n for param in self.request_get:\n if param.startswith(self.site_field_path):\n self.selected_lookup = param\n return param\n return ''\n\n def filter_func(fspec):\n root_cats = Category.objects.filter(tree_parent__isnull=True)\n qs = permission_filtered_model_qs(root_cats, fspec.user)\n for cat in qs:\n lookup_dict = dict()\n lookup_dict[fspec.site_field_path] = cat.site.pk\n link_text = '%s (%s)' % (cat.site.name, cat.site.domain)\n link = (link_text, lookup_dict)\n fspec.links.append(link)\n return True\n\n def generate_choice(self, **lookup_kwargs):\n category_id = lookup_kwargs.get(self.site_field_path, '')\n if not category_id or not category_id.isdigit():\n return None\n try:\n thing = Site.objects.get( pk=int(category_id) )\n except (Site.MultipleObjectsReturned, Site.DoesNotExist):\n return None\n return thing.__unicode__()\n\n# -------------------------------------\n# Standard django.admin filters\n# -------------------------------------\n\nclass FilterSpecEnhancement(CommonFilter):\n def filter_active(self):\n \" Can be used from template. \"\n return self.is_active(self.params)\n\nclass RelatedFilterSpec(filterspecs.RelatedFilterSpec, FilterSpecEnhancement):\n def is_active(self, request_params):\n \"\"\"\n Returns True if filter is applied, otherwise returns False.\n Tries to find its argument(s) in request querystring.\n \"\"\"\n return self.lookup_kwarg in request_params\n\n def choices(self, cl):\n if not hasattr(self, 'all_choices'):\n c = super(RelatedFilterSpec, self).choices(cl)\n self.all_choices = map(None, c)\n return self.all_choices\n\nfilterspecs.FilterSpec.register_insert(lambda f: bool(f.rel), RelatedFilterSpec)\n\nclass ChoicesFilterSpec(filterspecs.ChoicesFilterSpec, FilterSpecEnhancement):\n def is_active(self, request_params):\n return self.lookup_kwarg in request_params\n\nfilterspecs.FilterSpec.register_insert(lambda f: bool(f.choices), ChoicesFilterSpec)\n\nclass DateFieldFilterSpec(filterspecs.DateFieldFilterSpec, FilterSpecEnhancement):\n def __init__(self, *args, **kwargs):\n super(DateFieldFilterSpec, self).__init__(*args, **kwargs)\n self.active_choice = None\n\n def get_lookup_kwarg(self):\n out = []\n for title, param_dict in self.links:\n for key in param_dict:\n out.append(key)\n return out\n\n def is_active(self, request_params):\n if self.__get_active_choice() is None:\n return False\n return True\n\n def get_active(self, request_params):\n if self.active_filter_lookup is not None: # cached result\n return self.active_filter_lookup\n self.active_filter_lookup = []\n lookup_multi = 0\n lookup = self.get_lookup_kwarg()\n if type(lookup) == list:\n lookup_multi = len(lookup)\n found = 0\n for p in request_params:\n if not lookup_multi and p == lookup:\n self.active_filter_lookup = [lookup]\n break\n elif lookup_multi:\n if p in lookup:\n found += 1\n self.active_filter_lookup.append(p)\n return self.active_filter_lookup\n\n def __get_active_choice(self):\n if self.active_choice is not None:\n return self.active_choice\n for title, param_dict in self.links:\n if param_dict and self.date_params == param_dict:\n self.active_choice = (title, param_dict)\n return self.active_choice\n\n def generate_choice(self, **lookup_kwargs):\n active_title, active_param_dict = self.active_choice\n if lookup_kwargs == active_param_dict:\n return active_title\n for title, param_dict in self.links:\n if lookup_kwargs == param_dict:\n return self.active_choice\n return None\n\nfilterspecs.FilterSpec.register_insert(lambda f: isinstance(f, models.DateField), DateFieldFilterSpec)\n\n\nclass BooleanFieldFilterSpec(filterspecs.BooleanFieldFilterSpec, FilterSpecEnhancement):\n def is_active(self, request_params):\n return False\n\nfilterspecs.FilterSpec.register_insert(lambda f: isinstance(f, models.BooleanField), BooleanFieldFilterSpec)\n\nfrom django.db import models\nfilterspecs.FilterSpec.register_insert(lambda f: isinstance(f, models.BooleanField) or isinstance(f, models.NullBooleanField), BooleanFieldFilterSpec)\n","repo_name":"ella/ella-newman","sub_path":"ella_newman/filterspecs.py","file_name":"filterspecs.py","file_ext":"py","file_size_in_byte":14686,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"87"} +{"seq_id":"29343547208","text":"# Kérd be egy telek oldalait méterben! Írd ki a telek területét négyszögölben! (1 négyszögöl = 3,6 m2). Ha a telek 100\n# négyszögölnél kisebb, akkor írja ki, hogy túl kicsi!\n\na = int(input(\"A: \"))\nb = int(input(\"B: \"))\n\nt = (a * b) / 3.6\n\nprint(str(t) + \" négyszögöl\")\n\nif t <= 100:\n print(\"túl kicsi\")\n","repo_name":"maximuse/feladatok20181020","sub_path":"telek.py","file_name":"telek.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"37497133580","text":"import numpy as np\nfrom metric import Metric\nfrom scipy.optimize import linear_sum_assignment\nfrom track import Track, TrackStatus, KalmanCentroidTrack, KalmanBBoxTrack, ParticleTrack\nimport cv2\nfrom ReID_Model import get_siamese_model, INPUT_SHAPE, BEST_MODEL_PATH, process_image\nfrom sklearn.preprocessing import MinMaxScaler\nimport matplotlib.pyplot as plt\nfrom collections import OrderedDict\nimport imgaug as ia\nfrom skimage.transform import resize\n\n\nclass Tracker(object):\n\n reid_model = get_siamese_model(INPUT_SHAPE)\n reid_model.load_weights(BEST_MODEL_PATH)\n\n def __init__(self, metric_str, metric_threshold=None, max_age=30, n_init=3,\n track_type=KalmanCentroidTrack, project=False, project_one=False,\n consider_features=False, use_reid_model=False):\n \"\"\"\n :param metric_str(str): the metric string used as one of the metrics in Metric class. Must match to the track_type.\n :param metric_threshold(float): the threshold for the metric given - must be a match for the sign with the metric.\n default None and will take the default threshold from Metric class\n :param max_age(int): the maximum age of predicts for an unmatched track.\n :param n_init(int): the minimum number of matches for a track to exist.\n :param track_type(Track): the track type used. MUST match to the metric_str.\n :param project(bool): is projecting on image every update or not.\n :param project_one(bool):is projecting on image just one track at a time. project must be true for this to work.\n \"\"\"\n self.use_reid_model = use_reid_model\n self.consider_features = consider_features\n self.one_track_projected = None\n self.project_one = project_one\n self.project = project\n self.num_created_tracks = 0\n self.frame_count = 0\n self.tracks = []\n self.framed_matched_track_dets = []\n self.framed_unmatched_dets = []\n self.max_age = max_age\n self.n_init = n_init\n self.track_type = track_type\n self.metric = Metric(metric_str)\n self.fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=False, varThreshold=8)\n if metric_threshold is None:\n self.metric_threshold = self.metric.threshold\n else:\n self.metric_threshold = metric_threshold\n @staticmethod\n def _get_cropped_by_det(det, image):\n \"\"\"\n :param det: a BBox detection as x1, y1, x2, y2.\n :param image: the entire frame.\n :return: cropped image by the bbox with extra 10 inches of height.\n \"\"\"\n int_det = det.astype(int)\n return image[int_det[1]:int_det[3], int_det[0]:int_det[2]]\n @staticmethod\n def _get_reduced_crop_by_det(det, image):\n int_det = det.astype(int)\n width = int_det[2] - int_det[0]\n height = int_det[3] - int_det[1]\n return image[int_det[1] + int(height / 8):int_det[3] - int(height / 8),\n int_det[0] + int(width / 8):int_det[2] - int(width / 8)]\n @staticmethod\n def _get_up_down_crops_by_det(det, image):\n \"\"\"\n\n :param det: a BBox detection as x1, y1, x2, y2.\n :param image: the entire frame.\n :return: two cropped images by the bbox with extra 10 inches of height - upper crop and lower crop.\n \"\"\"\n int_det = det.astype(int)\n h = det[3] - det[1]\n y1_up = int(det[1] + h / 2)\n crop_up = image[y1_up:int_det[3] + 5, int_det[0]:int_det[2]]\n crop_down = image[int_det[1] - 5:y1_up, int_det[0]:int_det[2]]\n return crop_up, crop_down\n\n @staticmethod\n def _compare_hists(hist1, hist2):\n \"\"\"\n :param hist1: 3D histograms returned from Track.calc_hist function.\n :param hist2: 3D histograms returned from Track.calc_hist function.\n :return: the Hellinger distance between them.\n \"\"\"\n return cv2.compareHist(hist1, hist2, cv2.HISTCMP_BHATTACHARYYA)\n\n def _associate_detections_to_tracks(self, detections, image):\n \"\"\"\n Assigns detections to tracked object - assuming both represented as as the same representation the track type\n is expecting at update.\n\n Updating self.framed_matched_track_dets list with the matched detections and tracks(as class) - list of tuples.\n Updating self.framed_unmatched_dets with the unmatched detections.\n \"\"\"\n dets = detections.copy()\n compiled_dets = self.track_type.precompile_detections(detections)\n self.framed_matched_track_dets = []\n self.framed_unmatched_dets = []\n tracks_predicts = self.predict()\n\n if len(tracks_predicts) == 0:\n self.framed_unmatched_dets = compiled_dets\n return\n dist_matrix = self.metric.distance(compiled_dets, tracks_predicts)\n if self.metric_threshold < 0:\n dist_matrix = -dist_matrix\n\n row, col = linear_sum_assignment(dist_matrix)\n # If we use features, then the next section will change the distance matrix using the tracks features.\n if self.consider_features:\n if image is None:\n raise ValueError(\"In order to consider the features, you have to pass the image frame each update.\")\n fgmask = self.fgbg.apply(image)\n no_bg_image = cv2.bitwise_and(image, image, mask=fgmask)\n\n for det, trk in zip(row, col):\n if dist_matrix[det, trk] < self.metric_threshold:\n if self.tracks[trk].time_since_features_ext > self.tracks[trk].features_ext_interval:\n h = int(dets[det][3] - dets[det][1]) + 10\n w = int(dets[det][2] - dets[det][0])\n self.tracks[trk].extract_hist_features(no_bg_image, h, w)\n if len(self.tracks[trk].features.keys()) == 0:\n continue\n col_dists = dist_matrix[:, trk]\n if self.metric_threshold < 0:\n r_add = - self.metric_threshold\n else:\n r_add = self.metric_threshold\n det_indices = np.where(col_dists < dist_matrix[det, trk] + r_add)[0]\n det_indices = np.unique(det_indices)\n if len(det_indices) > 1:\n trk_hist_up = self.tracks[trk].features[\"hist_up\"]\n trk_hist_down = self.tracks[trk].features[\"hist_down\"]\n crops = [Tracker._get_up_down_crops_by_det(dets[d_i], no_bg_image) for d_i in det_indices]\n hists = [(Track.calc_hist(up_down_crops[0]), Track.calc_hist(up_down_crops[1]))\n for up_down_crops in crops]\n diffs_hists = [max(Tracker._compare_hists(trk_hist_up, up_down_hist[0]),\n Tracker._compare_hists(trk_hist_down, up_down_hist[1]))\n for up_down_hist in hists]\n for i, d_i in enumerate(det_indices):\n if self.metric_threshold > 0:\n dist_matrix[d_i, trk] *= diffs_hists[i]\n else:\n dist_matrix[d_i, trk] *= (1-diffs_hists[i])\n row, col = linear_sum_assignment(dist_matrix)\n if len(compiled_dets) > len(tracks_predicts):\n self.framed_unmatched_dets = [det for d, det in enumerate(compiled_dets) if d not in row]\n\n for det, trk in zip(row, col):\n if dist_matrix[det, trk] > self.metric_threshold:\n self.framed_unmatched_dets.append(compiled_dets[det])\n else:\n self.framed_matched_track_dets.append((compiled_dets[det], self.tracks[trk]))\n elif self.use_reid_model:\n if image is None:\n raise ValueError(\"In order to use reid model, you have to pass the image frame each update.\")\n collected_pairs = OrderedDict()\n collected_matchings = {}\n for det, trk in zip(row, col):\n if dist_matrix[det, trk] < self.metric_threshold:\n col_dists = dist_matrix[:, trk]\n if len(col_dists) == 1:\n continue\n min_dist = np.min(col_dists)\n next_min = np.min(col_dists[col_dists > min_dist])\n have_feature = len(self.tracks[trk].features.keys()) != 0\n # update the track feature \"crop\" only when its matching track is far from other detections:\n if (self.tracks[trk].is_confirmed()\n and self.tracks[trk].age % self.tracks[trk].features_ext_interval == 0) and (min_dist / next_min < 0.2\n and min_dist == dist_matrix[det, trk]):\n cropped = Tracker._get_cropped_by_det(dets[det], image.copy())\n self.tracks[trk].features[\"crop\"] = cropped\n if not have_feature:\n continue\n col_dists = dist_matrix[:, trk]\n det_indices = np.where(col_dists < self.metric_threshold)[0]\n det_indices = np.unique(det_indices)\n if len(det_indices) > 1:\n # initializing a track for later with score and detection 0, for later use.\n collected_matchings[trk] = (0, 0)\n for d in det_indices:\n d_crop = Tracker._get_reduced_crop_by_det(dets[d], image.copy())\n collected_pairs[(d, trk)] = [d_crop, self.tracks[trk].features[\"crop\"]]\n\n if len(collected_pairs) > 0:\n pairs = [np.zeros((len(collected_pairs), INPUT_SHAPE[0], INPUT_SHAPE[1], 3)) for _ in range(2)]\n for i, pair in enumerate(collected_pairs.values()):\n pairs[0][i, :, :, :] = process_image(cv2.cvtColor(pair[0], cv2.COLOR_BGR2RGB))\n pairs[1][i, :, :, :] = process_image(cv2.cvtColor(pair[1], cv2.COLOR_BGR2RGB))\n reid_model_similarities = Tracker.reid_model.predict(pairs).ravel()\n for i, pair_ind in enumerate(collected_pairs.keys()):\n # for plotting the similarities: #\n # pair = collected_pairs[pair_ind]\n # title = \"match score: {:.2f}\".format(reid_model_similarities[i])\n # fig = plt.figure(figsize=(20, 10))\n # fig.subplots_adjust(hspace=0.1, wspace=0.1)\n # fig.suptitle(title)\n # fig.add_subplot(1, 2, 1)\n # plt.imshow(pair[0])\n # fig.add_subplot(1, 2, 2)\n # plt.imshow(pair[1])\n # plt.show()\n\n # if the current pair of track and detection has better similarity score then\n # replace the collected matching with the current detection and its score:\n if reid_model_similarities[i] > collected_matchings[pair_ind[1]][1]:\n collected_matchings[pair_ind[1]] = (pair_ind[0], reid_model_similarities[i])\n\n # in order for the linear assignment to almost definitly pick the track and detection who matched best,\n # we put distance of 0 in the distance matrix\n for trk, value in collected_matchings.items():\n dist_matrix[value[0], trk] = 0\n\n row, col = linear_sum_assignment(dist_matrix)\n if len(compiled_dets) > len(tracks_predicts):\n self.framed_unmatched_dets = [det for d, det in enumerate(compiled_dets) if d not in row]\n\n for det, trk in zip(row, col):\n if dist_matrix[det, trk] > self.metric_threshold:\n self.framed_unmatched_dets.append(compiled_dets[det])\n else:\n self.framed_matched_track_dets.append((compiled_dets[det], self.tracks[trk]))\n else:\n if len(compiled_dets) > len(tracks_predicts):\n self.framed_unmatched_dets = [det for d, det in enumerate(compiled_dets) if d not in row]\n\n for det, trk in zip(row, col):\n if dist_matrix[det, trk] > self.metric_threshold:\n self.framed_unmatched_dets.append(compiled_dets[det])\n else:\n self.framed_matched_track_dets.append((compiled_dets[det], self.tracks[trk]))\n\n def predict(self):\n self.frame_count += 1\n # get predicted locations from existing trackers.\n trks = np.zeros((len(self.tracks), self.track_type.track_dim))\n to_del = []\n for t, trk in enumerate(trks):\n pos = self.tracks[t].predict()\n trk[:] = [pos[i] for i in range(len(pos))]\n if np.any(np.isnan(pos)):\n to_del.append(t)\n trks = np.ma.compress_rows(np.ma.masked_invalid(trks))\n for t in reversed(to_del):\n self.tracks.pop(t)\n return trks\n\n def update(self, dets, image=None):\n \"\"\"\n Params:\n return an array of tracks with type self.track_type that is still alive after updating.\n this function will project the relevant tracks if the relevant booleans are True.\n\n NOTE: The number of objects returned may differ from the number of detections provided.\n \"\"\"\n ret = []\n self._associate_detections_to_tracks(dets, image)\n # update matched tracks with assigned detections\n\n for (det, trk) in self.framed_matched_track_dets:\n trk.update(det)\n if trk.is_tentative() and trk.hit_streak >= self.n_init:\n trk.status = TrackStatus.Confirmed\n\n # create and initialise new trackers for unmatched detections\n\n for det in self.framed_unmatched_dets:\n self.num_created_tracks += 1\n trk = self.track_type(self.num_created_tracks, det)\n self.tracks.append(trk)\n\n i = len(self.tracks)\n for trk in reversed(self.tracks):\n if trk.is_confirmed() or self.frame_count <= self.n_init:\n # ret.append(np.concatenate((d, [trk.id + 1])).reshape(1, -1)) # +1 as MOT benchmark requires positive\n ret.append(trk)\n i -= 1\n # remove dead tracklet\n if trk.time_since_update > self.max_age:\n self.tracks.pop(i)\n\n if self.project:\n if self.project_one:\n if self.one_track_projected is not None:\n if self.one_track_projected not in ret:\n if len(ret) == 0:\n self.one_track_projected = None\n else:\n self.one_track_projected = ret[0]\n\n if self.one_track_projected is None and len(ret) != 0:\n self.one_track_projected = ret[0]\n\n if self.one_track_projected is not None:\n self.one_track_projected.project_on_image(image)\n else:\n for track in ret:\n track.project_on_image(image)\n return ret","repo_name":"tamirdennis/Tracking_Project","sub_path":"tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":15313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"23168138351","text":"#! /usr/bin/env python\n#\ndef r8_pak ( y, n ):\n\n#*****************************************************************************80\n#\n## R8_PAK packs a base 2 exponent into an R8.\n#\n# Discussion:\n#\n# This routine is almost the inverse of R8_UPAK. It is not exactly\n# the inverse, because abs ( x ) need not be between 0.5 and 1.0.\n# If both R8_PAK and 2.0^n were known to be in range, we could compute\n# R8_PAK = x * 2.0^n.\n#\n# Licensing:\n#\n# This code is distributed under the GNU LGPL license.\n#\n# Modified:\n#\n# 24 April 216\n#\n# Author:\n#\n# Python version by John Burkardt.\n#\n# Parameters:\n#\n# Input, real Y, the mantissa.\n#\n# Input, integer N, the exponent.\n#\n# Output, real VALUE, the packed value.\n#\n from machine import i4_mach\n from machine import r8_mach\n from sys import exit\n\n aln210 = 3.321928094887362347870319429489\n\n aln2b = 1.0\n if ( i4_mach ( 10 ) != 2 ):\n aln2b = r8_mach ( 5 ) * aln210\n nmin = aln2b * i4_mach ( 15 )\n nmax = aln2b * i4_mach ( 16 )\n\n value, ny = r8_upak ( y )\n\n nsum = n + ny\n\n if ( nsum < nmin ):\n print ( '' )\n print ( 'R8_PAK - Warning!' )\n print ( ' Packed number underflows.' )\n value = 0.0\n return value\n\n if ( nmax < nsum ):\n print ( '' )\n print ( 'R8_PAK - Fatal error!' )\n print ( ' Packed number overflows.' )\n exit ( 'R8_PAK - Fatal error!' )\n\n while ( nsum < 0 ):\n value = 0.5 * value\n nsum = nsum + 1\n\n while ( 0 < nsum ):\n value = 2.0 * value\n nsum = nsum - 1\n\n return value\n\ndef r8_pak_test ( ):\n\n#*****************************************************************************80\n#\n## R8_PAK_TEST tests R8_PAK.\n#\n# Licensing:\n#\n# This code is distributed under the GNU LGPL license.\n#\n# Modified:\n#\n# 24 April 2016\n#\n# Author:\n#\n# John Burkardt\n#\n import numpy as np\n import platform\n\n n_test = np.array ( [ 7, 8, 7, 7, 4, 0, -1, 0, 7, 2, 0 ] )\n\n y_test = np.array ( [ \\\n 0.5, \\\n 0.5, \\\n -0.5, \\\n 0.75, \\\n 0.9375, \\\n 0.5, \\\n 0.5, \\\n 0.625, \\\n 0.5048828125, \\\n 0.7853981633974483, \\\n 0.0 ] )\n\n print ( '' )\n print ( 'R8_PAK_TEST:' )\n print ( ' Python version: %s' % ( platform.python_version ( ) ) )\n print ( ' R8_PAK converts a mantissa and base 2 exponent to an R8.' )\n print ( '' )\n print ( ' Mantissa Exponent R8' )\n print ( '' )\n\n for i in range ( 0, 11 ):\n\n y = y_test[i];\n n = n_test[i]\n\n x = r8_pak ( y, n )\n\n print ( ' %24.16g %8d %14.16g' % ( y, n, x ) )\n#\n# Terminate.\n#\n print ( '' )\n print ( 'R8_PAK_TEST:' )\n print ( ' Normal end of execution.' )\n return\n\ndef r8_upak ( x ):\n\n#*****************************************************************************80\n#\n## R8_UPAK unpacks an R8 into a mantissa and exponent.\n#\n# Discussion:\n#\n# This function unpacks a floating point number x so that\n#\n# x = y * 2.0^n\n#\n# where\n#\n# 0.5 <= abs ( y ) < 1.0.\n#\n# Licensing:\n#\n# This code is distributed under the GNU LGPL license.\n#\n# Modified:\n#\n# 24 April 2016\n#\n# Author:\n#\n# Python version by John Burkardt.\n#\n# Parameters:\n#\n# Input, real X, the number to be unpacked.\n#\n# Output, real Y, the mantissa.\n#\n# Output, integer N, the exponent.\n#\n absx = abs ( x )\n n = 0\n y = 0.0\n\n if ( x == 0.0 ):\n return y, n\n\n while ( absx < 0.5 ):\n n = n - 1\n absx = absx * 2.0\n\n while ( 1.0 <= absx ):\n n = n + 1\n absx = absx * 0.5\n\n if ( x < 0.0 ):\n y = - absx\n else:\n y = + absx\n\n return y, n\n\ndef r8_upak_test ( ):\n\n#*****************************************************************************80\n#\n## R8_UPAK_TEST tests R8_UPAK.\n#\n# Licensing:\n#\n# This code is distributed under the GNU LGPL license.\n#\n# Modified:\n#\n# 24 April 2016\n#\n# Author:\n#\n# John Burkardt\n#\n import numpy as np\n import platform\n\n x_test = np.array ( [ \\\n 64.0, \\\n 128.0, \\\n -64.0, \\\n 96.0, \\\n 15.0, \\\n 0.5, \\\n 0.25, \\\n 0.625, \\\n 64.625, \\\n 3.141592653589793, \\\n 0.0 ] )\n\n print ( '' )\n print ( 'R8_UPAK_TEST:' )\n print ( ' Python version: %s' % ( platform.python_version ( ) ) )\n print ( ' R8_UPAK converts an R8 to a mantissa and base 2 exponent.' )\n print ( '' )\n print ( ' X Mantissa Exponent' )\n print ( '' )\n\n for i in range ( 0, 11 ):\n\n x = x_test[i];\n\n y, n = r8_upak ( x )\n\n print ( ' %24.16g %24.16g %8d' % ( x, y, n ) )\n#\n# Terminate.\n#\n print ( '' )\n print ( 'R8_UPAK_TEST:' )\n print ( ' Normal end of execution.' )\n return\n\nif ( __name__ == '__main__' ):\n from timestamp import timestamp\n timestamp ( )\n r8_pak_test ( )\n r8_upak_test ( )\n timestamp ( )\n","repo_name":"tnakaicode/jburkardt-python","sub_path":"r8lib/r8_pak.py","file_name":"r8_pak.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"87"} +{"seq_id":"32129077114","text":"import os\nfrom pyspark.sql import DataFrame\n\nPLATFORM_SYNAPSE_INTERNAL = \"synapse_internal\"\nPLATFORM_SYNAPSE = \"synapse\"\nPLATFORM_BINDER = \"binder\"\nPLATFORM_DATABRICKS = \"databricks\"\nPLATFORM_UNKNOWN = \"unknown\"\nSECRET_STORE = \"mmlspark-build-keys\"\nSYNAPSE_PROJECT_NAME = \"Microsoft.ProjectArcadia\"\n\n\ndef current_platform():\n if os.environ.get(\"AZURE_SERVICE\", None) == SYNAPSE_PROJECT_NAME:\n from pyspark.sql import SparkSession\n\n sc = SparkSession.builder.getOrCreate().sparkContext\n cluster_type = sc.getConf().get(\"spark.cluster.type\")\n if cluster_type == \"synapse\":\n return PLATFORM_SYNAPSE\n else:\n return PLATFORM_SYNAPSE_INTERNAL\n elif \"dbfs\" in os.listdir(\"/\"):\n return PLATFORM_DATABRICKS\n elif os.environ.get(\"BINDER_LAUNCH_HOST\", None) is not None:\n return PLATFORM_BINDER\n else:\n return PLATFORM_UNKNOWN\n\n\ndef running_on_synapse_internal():\n return current_platform() is PLATFORM_SYNAPSE_INTERNAL\n\n\ndef running_on_synapse():\n return current_platform() is PLATFORM_SYNAPSE\n\n\ndef running_on_binder():\n return current_platform() is PLATFORM_BINDER\n\n\ndef running_on_databricks():\n return current_platform() is PLATFORM_DATABRICKS\n\n\ndef find_secret(secret_name, keyvault=SECRET_STORE, override=None):\n if override is not None:\n return override\n\n if running_on_synapse() or running_on_synapse_internal():\n from notebookutils.mssparkutils.credentials import getSecret\n\n return getSecret(keyvault, secret_name)\n elif running_on_databricks():\n from pyspark.sql import SparkSession\n from pyspark.dbutils import DBUtils\n\n spark = SparkSession.builder.getOrCreate()\n dbutils = DBUtils(spark)\n return dbutils.secrets.get(scope=keyvault, key=secret_name)\n else:\n raise RuntimeError(\n f\"Could not find {secret_name} in keyvault or overrides. If you are running this demo \"\n f\"and would like to manually specify your key for Azure KeyVault or Databricks Secrets,\"\n f'please add the override=\"YOUR_KEY_HERE\" to the arguments of the find_secret() method'\n )\n\n\ndef materializing_display(data):\n if running_on_synapse() or running_on_synapse_internal():\n from notebookutils.visualization import display\n\n if isinstance(data, DataFrame):\n data.collect()\n display(data)\n else:\n print(data)\n","repo_name":"microsoft/SynapseML","sub_path":"core/src/main/python/synapse/ml/core/platform/Platform.py","file_name":"Platform.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":4858,"dataset":"github-code","pt":"87"} +{"seq_id":"39522203168","text":"from basededatos.utils.conexiones import get_mysql_conection\nfrom datetime import datetime\ntry:\n conexion = get_mysql_conection()\n\n cursor = conexion.cursor()\n #conexion.autocommit = True solo funciona si la base de datos tiene activada el autocommit\n '''\n SHOW VARIABLES WHERE Variable_name='autocommit';\n\n '''\n sentencia = 'INSERT INTO productos(nombre, precio, fecha_registro) VALUES(%s, %s, %s)'\n valores = ('Producto', 12313, datetime.now())\n cursor.execute(sentencia, valores)\n \n sentencia = 'UPDATE productos SET nombre=%s, precio=%s, fecha_registro=%s WHERE id=%s'\n valores = ('Producto1234', 21313,datetime.now(), 2)\n cursor.execute(sentencia, valores)\n\n conexion.commit() #no se ejecuta el commit hasta que no se ha ejecutado la transaccion de forma correcta\n print('Termina la transacción')\nexcept Exception as e:\n conexion.rollback()\n print(f'Ocurrió un error, se hizo rollback: {e}')\n\nfinally:\n conexion.cursor().close()\n conexion.close()\n\n","repo_name":"kevinp97/Curso-python-pycharm","sub_path":"basededatos/transacciones/1.manejo_transacciones.py","file_name":"1.manejo_transacciones.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"27684804512","text":"\"\"\"\n Lob\n\n The Lob API is organized around REST. Our API is designed to have predictable, resource-oriented URLs and uses HTTP response codes to indicate any API errors.

Looking for our [previous documentation](https://lob.github.io/legacy-docs/)? # noqa: E501\n\n The version of the OpenAPI document: 1.3.0\n Contact: lob-openapi@lob.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\nimport string\nimport unittest\nimport warnings\n\nimport lob_python\nfrom lob_python.api.letters_api import LettersApi # noqa: E501\nfrom lob_python.model.letter_editable import LetterEditable\nfrom lob_python.model.address_editable import AddressEditable\nfrom lob_python.model.ltr_use_type import LtrUseType\nfrom lob_python.model.mail_type import MailType\nfrom lob_python.model.sort_by3 import SortBy3\nfrom lob_python.model.metadata_model import MetadataModel\nfrom lob_python.model.include_model import IncludeModel\nfrom lob_python.exceptions import UnauthorizedException, NotFoundException, ApiException\nfrom unittest.mock import Mock, MagicMock\n\nclass TestLettersApi(unittest.TestCase):\n \"\"\"LettersApi unit test stubs\"\"\"\n\n def setUp(self):\n warnings.simplefilter(\"ignore\", ResourceWarning)\n self.config_for_unit = lob_python.Configuration(\n username = \"Totally Fake Key\"\n )\n with lob_python.ApiClient(self.config_for_unit) as self.api_client:\n self.mock_api = LettersApi(self.api_client)\n\n self.mock_list_of_letters = MagicMock(return_value={\n \"data\": [{ \"id\": \"fake 1\" }, { \"id\": \"fake 2\" }]\n })\n\n self.letter_editable = LetterEditable(\n to = \"adr_fakeId1\",\n _from = \"adr_fakeId2\",\n file = \"https://s3-us-west-2.amazonaws.com/public.lob.com/assets/us_letter_1pg.pdf\",\n color = True,\n use_type = LtrUseType(\"marketing\")\n )\n\n def test_letter_retrieve_error_handle(self):\n \"\"\"Test case for handling retrieve error\"\"\"\n self.mock_api.letter_retrieve = Mock(side_effect=UnauthorizedException(status=401, reason=\"Unauthorized\"))\n\n with self.assertRaises(Exception) as context:\n self.mock_api.letter_retrieve(\"ltr_fakeId\")\n self.assertTrue(\"Unauthorized\" in context.exception.__str__())\n\n def test_letter_retrieve(self):\n \"\"\"Test case for retrieving letter\"\"\"\n self.mock_api.letter_retrieve = MagicMock(return_value={\n \"id\": \"ltr_fakeId\"\n })\n retrieved_letter = self.mock_api.letter_retrieve(\"ltr_fakeId\")\n self.assertIsNotNone(retrieved_letter)\n self.assertEqual(retrieved_letter[\"id\"], \"ltr_fakeId\")\n\n def test_letter_retrieve_with_custom_headers(self):\n \"\"\"Test case for retrieving new letter with custom headers\"\"\"\n self.mock_api.letter_retrieve = MagicMock(return_value={\n \"id\": \"ltr_fakeId\"\n })\n retrieved_letter = self.mock_api.letter_retrieve(\"ltr_fakeId\", _content_type=\"application/json\")\n self.assertIsNotNone(retrieved_letter)\n self.assertEqual(retrieved_letter[\"id\"], \"ltr_fakeId\")\n\n def test_letters_list(self):\n \"\"\"Test case for listing letters\"\"\"\n self.mock_api.letters_list = self.mock_list_of_letters\n letters = self.mock_api.letters_list()\n self.assertIsNotNone(letters)\n self.assertEqual(len(letters[\"data\"]), 2)\n\n def test_letters_list_with_custom_headers(self):\n \"\"\"Test case for listing letters with custom headers\"\"\"\n self.mock_api.letters_list = self.mock_list_of_letters\n letters = self.mock_api.letters_list(_content_type=\"application/json\")\n self.assertIsNotNone(letters)\n self.assertEqual(len(letters[\"data\"]), 2)\n\n def test_letters_list_error_handle(self):\n \"\"\"Test case for handling list error\"\"\"\n msg = \"\"\"Cannot prepare a request message for provided\n arguments. Please check that your arguments match\n declared content type.\"\"\"\n self.mock_api.letters_list = Mock(side_effect=ApiException(status=0, reason=msg))\n\n with self.assertRaises(Exception) as context:\n self.mock_api.letters_list()\n self.assertTrue(\"Cannot prepare a request message\" in context.exception.__str__())\n\n def test_letters_list_with_limit_param(self):\n \"\"\"Test case for listing letter with limit parameter\"\"\"\n self.mock_api.letters_list = self.mock_list_of_letters\n letters = self.mock_api.letters_list(limit=10)\n self.assertIsNotNone(letters)\n self.assertEqual(len(letters[\"data\"]), 2)\n\n def test_letters_list_with_before_param(self):\n \"\"\"Test case for listing letter with before parameter\"\"\"\n self.mock_api.letters_list = self.mock_list_of_letters\n letters = self.mock_api.letters_list(before=\"before\")\n self.assertIsNotNone(letters)\n self.assertEqual(len(letters[\"data\"]), 2)\n\n def test_letters_list_with_after_param(self):\n \"\"\"Test case for listing letter with after parameter\"\"\"\n self.mock_api.letters_list = self.mock_list_of_letters\n letters = self.mock_api.letters_list(after=\"after\")\n self.assertIsNotNone(letters)\n self.assertEqual(len(letters[\"data\"]), 2)\n\n def test_letters_list_with_include_param(self):\n \"\"\"Test case for listing letter with include parameter\"\"\"\n self.mock_api.letters_list = self.mock_list_of_letters\n letters = self.mock_api.letters_list(include=IncludeModel([\"total_count\"]))\n self.assertIsNotNone(letters)\n self.assertEqual(len(letters[\"data\"]), 2)\n\n def test_letters_list_with_date_created_param(self):\n \"\"\"Test case for listing letter with date_created parameter\"\"\"\n self.mock_api.letters_list = self.mock_list_of_letters\n letters = self.mock_api.letters_list(date_created={ \"gt\": \"2020-01-01\", \"lt\": \"2020-01-31T12\" })\n self.assertIsNotNone(letters)\n self.assertEqual(len(letters[\"data\"]), 2)\n\n def test_letters_list_with_metadata_param(self):\n \"\"\"Test case for listing letter with metadata parameter\"\"\"\n self.mock_api.letters_list = self.mock_list_of_letters\n letters = self.mock_api.letters_list(metadata=MetadataModel(fakeMetadata = \"fakeMetadata\"))\n self.assertIsNotNone(letters)\n self.assertEqual(len(letters[\"data\"]), 2)\n\n def test_letters_list_with_color_param(self):\n \"\"\"Test case for listing letter with color parameter\"\"\"\n self.mock_api.letters_list = self.mock_list_of_letters\n letters = self.mock_api.letters_list(color=True)\n self.assertIsNotNone(letters)\n self.assertEqual(len(letters[\"data\"]), 2)\n\n def test_letters_list_with_scheduled_param(self):\n \"\"\"Test case for listing letter with scheduled parameter\"\"\"\n self.mock_api.letters_list = self.mock_list_of_letters\n letters = self.mock_api.letters_list(scheduled=True)\n self.assertIsNotNone(letters)\n self.assertEqual(len(letters[\"data\"]), 2)\n\n def test_letters_list_with_send_date_param(self):\n \"\"\"Test case for listing letter with send_date parameter\"\"\"\n self.mock_api.letters_list = self.mock_list_of_letters\n letters = self.mock_api.letters_list(send_date={ \"gt\": \"2012-01-01\", \"lt\": \"2012-01-31T12:34:56Z\" })\n self.assertIsNotNone(letters)\n self.assertEqual(len(letters[\"data\"]), 2)\n\n def test_letters_list_with_mail_type_param(self):\n \"\"\"Test case for listing letter with mail_type parameter\"\"\"\n self.mock_api.letters_list = self.mock_list_of_letters\n letters = self.mock_api.letters_list(mail_type=MailType('usps_first_class'))\n self.assertIsNotNone(letters)\n self.assertEqual(len(letters[\"data\"]), 2)\n\n def test_letters_list_with_sort_by_param(self):\n \"\"\"Test case for listing letter with sort_by parameter\"\"\"\n self.mock_api.letters_list = self.mock_list_of_letters\n letters = self.mock_api.letters_list(sort_by=SortBy3(date_created = 'asc'))\n\n self.assertIsNotNone(letters)\n self.assertEqual(len(letters[\"data\"]), 2)\n\n def test_letter_cancel(self):\n \"\"\"Test case for canceling letter\"\"\"\n self.mock_api.letter_cancel = MagicMock(return_value={\n \"id\": \"ltr_fakeId\", \"deleted\": True\n })\n canceled_letter = self.mock_api.letter_cancel(\"ltr_fakeId\")\n self.assertTrue(canceled_letter[\"deleted\"])\n\n def test_letter_cancel_with_custom_headers(self):\n \"\"\"Test case for canceling letter\"\"\"\n self.mock_api.letter_cancel = MagicMock(return_value={\n \"id\": \"ltr_fakeId\", \"deleted\": True\n })\n canceled_letter = self.mock_api.letter_cancel(\"ltr_fakeId\", _content_type=\"application/json\")\n self.assertTrue(canceled_letter[\"deleted\"])\n\n def test_letter_cancel_error_handle(self):\n \"\"\"Test case for handling cancel error\"\"\"\n self.mock_api.letter_cancel = Mock(side_effect=NotFoundException(status=404, reason=\"Not Found\"))\n\n with self.assertRaises(Exception) as context:\n self.mock_api.letter_cancel(\"ltr_fakeId\")\n self.assertTrue(\"Not Found\" in context.exception.__str__())\n\n def test_letter_create_error_handle(self):\n \"\"\"Test case for handling create error\"\"\"\n self.mock_api.letter_create = Mock(side_effect=UnauthorizedException(status=401, reason=\"Unauthorized\"))\n\n with self.assertRaises(Exception) as context:\n self.mock_api.letter_create(self.letter_editable)\n self.assertTrue(\"Unauthorized\" in context.exception.__str__())\n\n def test_letter_create(self):\n \"\"\"Test case for creating new letter\"\"\"\n self.mock_api.letter_create = MagicMock(return_value={\n \"id\": \"ltr_fakeId\"\n })\n created_letter = self.mock_api.letter_create(self.letter_editable)\n self.assertIsNotNone(created_letter)\n self.assertIsNotNone(created_letter[\"id\"])\n\n def test_letter_create_with_custom_headers(self):\n \"\"\"Test case for creating new letter with custom headers\"\"\"\n self.mock_api.letter_create = MagicMock(return_value={\n \"id\": \"ltr_fakeId\"\n })\n created_letter = self.mock_api.letter_create(self.letter_editable, _content_type=\"application/json\")\n self.assertIsNotNone(created_letter)\n self.assertIsNotNone(created_letter[\"id\"])\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"lob/lob-python","sub_path":"test/Unit/test_letters_api.py","file_name":"test_letters_api.py","file_ext":"py","file_size_in_byte":10429,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"87"} +{"seq_id":"744618598","text":"# coding=utf-8\n\"\"\"\nInaSAFE Disaster risk assessment tool developed by AusAid and World Bank\n- **Import Dialog Test Cases.**\n\nContact : ole.moller.nielsen@gmail.com\n\n.. note:: This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.\n\n\"\"\"\n__author__ = 'tim@kartoza.com'\n__date__ = '13/11/2014'\n__copyright__ = ('Copyright 2013, Australia Indonesia Facility for '\n 'Disaster Reduction')\n\n# noinspection PyUnresolvedReferences\nimport unittest\nimport logging\nimport os\n\nfrom qgis.core import QgsRectangle, QgsCoordinateReferenceSystem\nfrom qgis.PyQt import QtGui, QtWidgets\nfrom qgis.PyQt.QtTest import QTest\nfrom qgis.PyQt.QtCore import Qt, QPoint\n\nfrom safe.definitions.constants import INASAFE_TEST\nfrom safe.test.utilities import get_qgis_app\n\n# In our tests, we need to have this line below before importing any other\n# safe_qgis.__init__ to load all the configurations that we make for testing\nQGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app(qsetting=INASAFE_TEST)\n\nfrom safe.gui.tools.extent_selector_dialog import ExtentSelectorDialog\n\nLOGGER = logging.getLogger('InaSAFE')\n\n\nclass ExtentSelectorTest(unittest.TestCase):\n \"\"\"Test Import Dialog widget\n \"\"\"\n # noinspection PyPep8Naming\n\n def setUp(self):\n \"\"\"Runs before each test.\"\"\"\n self.extent = QgsRectangle(10.0, 10.0, 20.0, 20.0)\n self.crs = QgsCoordinateReferenceSystem('EPSG:4326')\n CANVAS.setExtent(self.extent)\n self.dialog = ExtentSelectorDialog(\n IFACE,\n PARENT,\n self.extent,\n self.crs)\n self.signal_received = False\n\n self.dialog.extent_defined.connect(self.extent_defined)\n\n self.dialog.show()\n QTest.qWaitForWindowExposed(self.dialog)\n\n def tearDown(self):\n \"\"\"Runs after each test.\"\"\"\n self.dialog.reject()\n self.dialog = None\n self.extent = None\n self.crs = None\n\n def extent_defined(self, extent, crs):\n \"\"\"Slot for when extents are changed in dialog.\n\n :param extent: Rectangle that was created.\n :type extent: QgsRectangle\n\n :param crs: Coordiate reference system.\n :type crs: QgsCoordinateReferenceSystem\n \"\"\"\n self.extent = extent\n self.crs = crs\n self.signal_received = True\n\n def canvas_mouse_moved(self, point):\n \"\"\"Slot for when the mouse moves on the canvas.\"\"\"\n # print point.toString()\n\n def test_spinboxes(self):\n \"\"\"Test validate extent method.\"\"\"\n self.dialog.x_maximum.clear()\n self.dialog.extent_defined.connect(self.extent_defined)\n QTest.mouseClick(self.dialog.x_maximum, Qt.LeftButton)\n QTest.keyClick(self.dialog.x_maximum, '3')\n QTest.keyClick(self.dialog.x_maximum, '0')\n ok = self.dialog.button_box.button(QtWidgets.QDialogButtonBox.Ok)\n QTest.mouseClick(ok, Qt.LeftButton)\n\n expected_extent = QgsRectangle(10.0, 10.0, 30.0, 20.0)\n self.assertEqual(self.extent.toString(), expected_extent.toString())\n\n @unittest.skip(\"This currently fails as QTest does not properly do the mouse interaction\")\n def test_mouse_drag(self):\n \"\"\"Test setting extents by dragging works.\n\n This currently fails as QTest does not properly do the mouse\n interactions with the canvas.\n\n \"\"\"\n # Imported here because it is not available in OSX QGIS bundle\n # pylint: disable=redefined-outer-name\n from qgis.PyQt.QtTest import QTest\n\n # Click the capture button\n QTest.mouseClick(self.dialog.capture_button, Qt.LeftButton)\n\n # drag a rect on the canvas\n QTest.mousePress(CANVAS, Qt.LeftButton, pos=QPoint(0, 0), delay=500)\n QTest.mouseRelease(\n CANVAS, Qt.LeftButton,\n pos=QPoint(300, 300),\n delay=-1)\n\n # on drag the extents selector windows should appear again\n QTest.qWaitForWindowExposed(self.dialog)\n # Click ok to dispose of the window again\n ok = self.dialog.button_box.button(QtWidgets.QDialogButtonBox.Ok)\n QTest.mouseClick(ok, Qt.LeftButton)\n\n # Check the extent emitted on closing teh dialog is correct\n expected_extent = QgsRectangle(10.0, 10.0, 30.0, 20.0)\n self.assertEqual(self.extent.toString(), expected_extent.toString())\n\n\nif __name__ == '__main__':\n suite = unittest.makeSuite(ExtentSelectorTest, 'test')\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)\n","repo_name":"inasafe/inasafe","sub_path":"safe/gui/tools/test/test_extent_selector.py","file_name":"test_extent_selector.py","file_ext":"py","file_size_in_byte":4673,"program_lang":"python","lang":"en","doc_type":"code","stars":247,"dataset":"github-code","pt":"87"} +{"seq_id":"10366506426","text":"import socket\nimport time\nfrom uphone.logging import getLogger\n\n\nlogger = getLogger(__name__)\n\n\nclass Listener(object):\n\n def __init__(self, url, port=9999):\n\n self.url = url\n self.port = port\n\n def __iter__(self, n_messages=-1):\n with socket.socket() as socket_uphone:\n logger.info('Connect to uphone at {url}:{port}'.format(\n url=self.url,\n port=self.port)\n )\n socket_uphone.connect((self.url, self.port))\n # Tell server to send data to sockt\n socket_uphone.send(b'Hi uPhone, please gimme some noise')\n result = socket_uphone.recv(2)\n\n logger.info('Received {} from Phone'.format(result))\n if result != b'OK':\n raise Exception('Could not connect')\n while True:\n data = socket_uphone.recv(3).decode('utf-8')\n logger.info('Received {}'.format(data))\n if data:\n yield int(data)\n else:\n break\n","repo_name":"windisch/uPhone","sub_path":"uphone/listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"87"} +{"seq_id":"75129337882","text":"#En una clase de programación hay dos alumnos, Ana y Luis, que han asistido a diferentes sesiones de clase.\n#La información se encuentra en un diccionario donde las llaves son los nombres de los alumnos y los valores son tuplas con las sesiones a las que asistieron.\n\n#3. Mostrar las sesiones a las que asistió uno de los dos alumnos, pero no a las que asistieron ambos.\n\n\n\ndef principal():\n\n asistencias = {\"Ana\": (1, 2, 3, 5, 6), \"Luis\": (2, 3, 4, 6, 7)}\n\n #3\n\n asistenciasAna = set(asistencias[\"Ana\"])\n asistenciasLuis = set(asistencias[\"Luis\"])\n asistenciasUnAlumno = []\n \n for x in asistenciasAna:\n if x not in asistenciasLuis:\n asistenciasUnAlumno.append(x)\n\n for x in asistenciasLuis:\n if x not in asistenciasAna:\n asistenciasUnAlumno.append(x)\n\n asistenciasUnAlumno.sort()\n print()\n print(f\"3 - Las sesiones a las que solo asistió uno de los alumnos y no ambos fueron las sesiones: {asistenciasUnAlumno}\")\n\n\n #3B\n asistenciasUnAlumno2 = [x for x in asistenciasAna if x not in asistenciasLuis]\n asistenciasUnAlumno3 = [x for x in asistenciasLuis if x not in asistenciasAna]\n asistenciasUnAlumnoFinal = asistenciasUnAlumno2 + asistenciasUnAlumno3\n\n asistenciasUnAlumnoFinal.sort()\n print()\n print(f\"3B - Las sesiones a las que solo asistió uno de los alumnos y no ambos fueron las sesiones: {asistenciasUnAlumnoFinal}\")","repo_name":"DanielRomero29/MiddlePython","sub_path":"Colecciones/Ejercicio3.py","file_name":"Ejercicio3.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"22365525741","text":"from __future__ import annotations\n\nfrom colors import bg_color, fg_color\nfrom conftest import ShinyAppProc\nfrom controls import Sidebar, _expect_class_value\nfrom playwright.sync_api import Page, expect\n\n\ndef test_colors_are_rgb() -> None:\n assert bg_color.startswith(\"rgb(\")\n assert fg_color.startswith(\"rgb(\")\n\n\ndef test_sidebar_bg_colors(page: Page, local_app: ShinyAppProc) -> None:\n page.goto(local_app.url)\n\n for i in range(1, 5):\n content = page.locator(f\"#m{i}\")\n sidebar = page.locator(f\"#s{i}\")\n\n main_layout = sidebar.locator(\"..\")\n\n open_val = \"always\" if i <= 2 else \"desktop\"\n position_val = \"left\" if i % 2 == 1 else \"right\"\n\n expect(main_layout).to_have_attribute(\"data-bslib-sidebar-open\", open_val)\n\n _expect_class_value(main_layout, \"sidebar-right\", position_val == \"right\")\n\n expect(content).to_have_text(f\"Main content - {i}\")\n expect(sidebar).to_have_text(f\"Sidebar content - {i}\")\n\n # Only works if css file is loaded\n expect(sidebar).to_have_css(\"background-color\", bg_color)\n expect(sidebar).to_have_css(\"color\", fg_color)\n\n s1 = Sidebar(page, \"s1\")\n s1.expect_position(\"left\")\n s2 = Sidebar(page, \"s2\")\n s2.expect_position(\"right\")\n","repo_name":"posit-dev/py-shiny","sub_path":"tests/playwright/shiny/bugs/0666-sidebar/test_sidebar_colors.py","file_name":"test_sidebar_colors.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":734,"dataset":"github-code","pt":"87"} +{"seq_id":"72903363162","text":"import numpy as np\nimport torchvision, torchvision.transforms\nfrom sklearn.model_selection import GroupShuffleSplit\n\n\nimport torchxrayvision as xrv\nfrom functools import reduce\nimport os\n\nall_xray_ids = ['nih', 'pc', 'chex']\n\n\ndef find_useful_subset(dataset, only_include=['Atelectasis', 'Effusion', 'Cardiomegaly', 'No Finding'],\n multiclass=False):\n ind_list = []\n if only_include:\n for label in dataset.pathologies:\n if label == 'No Finding': continue\n if label in only_include:\n ind_list.append(list(np.where(dataset.labels[:, list(dataset.pathologies).index(label)] == 1)[0]))\n\n if len(ind_list) > 0:\n if multiclass:\n res = reduce(np.setdiff1d, ind_list)\n else:\n res = reduce(np.union1d, ind_list)\n else:\n res = list(np.where(dataset.labels[:, list(dataset.pathologies).index(\"No Finding\")] != 1)[0])\n\n try:\n no_findings = list(np.where(dataset.labels[:, list(dataset.pathologies).index(\"No Finding\")] == 1)[0]) # add those with no findings\n except:\n no_findings = []\n\n return np.union1d(no_findings, res).astype(int)\n\n\ndef add_no_finding_to_labels(dataset):\n if \"No Finding\" not in dataset.pathologies:\n print('add No Finding to labels')\n no_finding = np.array(np.all(dataset.labels == 0, axis=1), dtype=float)[:, None]\n dataset.labels = np.concatenate([dataset.labels, no_finding], axis=1)\n try:\n dataset.pathologies.append(\"No Finding\")\n except AttributeError:\n temp = dataset.pathologies.tolist()\n temp.append(\"No Finding\")\n dataset.pathologies = temp\n\n\ndef get_data_augmentation(data_aug_rot=45, data_aug_trans=0.15, data_aug_scale=0.15,\n xray_img_size=224, train=True):\n augs = []\n augs.append(xrv.datasets.ToPILImage())\n augs.append(torchvision.transforms.Resize((xray_img_size, xray_img_size)))\n if train:\n # Apply data augmentation only during training\n affine = torchvision.transforms.RandomAffine(\n data_aug_rot,\n translate=(data_aug_trans, data_aug_trans),\n scale=(1.0 - data_aug_scale, 1.0 + data_aug_scale))\n augs.append(affine)\n\n augs.append(torchvision.transforms.ToTensor())\n data_aug = torchvision.transforms.Compose(augs)\n return data_aug\n\n\ndef get_ind_save_name(dataset_name, view=\"\", unique_patients=0):\n return f\"{dataset_name}_view{view}\" \\\n f\"_uniquePatients{unique_patients}\"\n\n\ndef get_single_xray_data(dataset_path, dataset_name, train=True, xray_views='AP-PA',\n xray_img_size=224, data_aug_rot=45, data_aug_trans=0.15,\n data_aug_scale=0.15, unique_patients=0,\n preparing=False, only_include=['No Finding']\n ):\n if preparing:\n train = False\n views = xray_views.split(\"-\")\n transforms = torchvision.transforms.Compose([xrv.datasets.XRayCenterCrop(),\n ])\n\n data_aug = get_data_augmentation(data_aug_rot=data_aug_rot, data_aug_trans=data_aug_trans,\n data_aug_scale=data_aug_scale, xray_img_size=xray_img_size,\n train=train)\n\n if 'nih' in dataset_name:\n dataset = xrv.datasets.NIH_Dataset(\n imgpath=dataset_path + \"/NIH/images-224\",\n transform=transforms, data_aug=data_aug,\n unique_patients=unique_patients, views=views, )\n elif 'pc' in dataset_name:\n dataset = xrv.datasets.PC_Dataset(\n imgpath=dataset_path + \"/padchest/images-224\",\n transform=transforms, data_aug=data_aug,\n unique_patients=unique_patients, views=views, )\n elif 'chex' in dataset_name:\n dataset = xrv.datasets.CheX_Dataset(\n imgpath=dataset_path + \"/chexpert/CheXpert-v1.0-small\",\n csvpath=dataset_path + \"/chexpert/CheXpert-v1.0-small/train.csv\",\n transform=transforms, data_aug=data_aug,\n unique_patients=unique_patients, views=views, )\n else:\n raise NotImplementedError(f\"{dataset_name} not supported\")\n\n add_no_finding_to_labels(dataset)\n useful_subset = find_useful_subset(dataset, only_include)\n xrv.datasets.relabel_dataset(only_include, dataset, silent=True)\n dataset = xrv.datasets.SubsetDataset(dataset, useful_subset)\n return dataset\n\n\ndef prepare_xray_data(dataset_path, split_info_path, xray_views, unique_patients=0,\n kfold=5, seed=0, recreate_data=True, only_include=[\"No Finding\"]):\n\n if not os.path.exists(f\"{split_info_path}/exp/fold{kfold - 1}\") or recreate_data:\n for dataset_id in all_xray_ids:\n dataset = get_single_xray_data(dataset_path, dataset_id, train=False,\n xray_views=xray_views,\n unique_patients=unique_patients,\n preparing=True, only_include=only_include)\n print(dataset)\n if \"patientid\" not in dataset.csv:\n dataset.csv[\"patientid\"] = [\"{}-{}\".format(dataset.__class__.__name__, i) for i in range(len(dataset))]\n gss = GroupShuffleSplit(train_size=(1.-1./kfold), test_size=1./kfold, random_state=seed)\n for k, (train_index, test_index) in enumerate(gss.split(X=range(len(dataset)), groups=dataset.csv.patientid)):\n if not os.path.exists(f\"{split_info_path}/exp/fold{k}\"):\n os.makedirs(f\"{split_info_path}/exp/fold{k}\")\n np.save(f\"{split_info_path}/exp/fold{k}/{dataset_id}_ind_train.npy\", train_index)\n np.save(f\"{split_info_path}/exp/fold{k}/{dataset_id}_ind_test.npy\", test_index)\n print(train_index.shape, test_index.shape)\n\n\ndef get_xray_dataset(dataset_path, split_info_path, xray_id='all', xray_views='AP-PA',\n xray_img_size=224, data_aug_rot=45, data_aug_trans=0.15,\n data_aug_scale=0.15, unique_patients=0,\n train=True, exp_id=0, only_include=[\"No Finding\"]):\n if train:\n file_name_suffix = 'train'\n else:\n file_name_suffix = 'test'\n\n if xray_id == 'all':\n datas = []\n for dataset_id in all_xray_ids:\n subset_ind = np.load(f\"{split_info_path}/exp/fold{exp_id}/{dataset_id}_ind_{file_name_suffix}.npy\")\n individual_dataset = get_single_xray_data(\n dataset_path, dataset_id, train=train, xray_views=xray_views,\n xray_img_size=xray_img_size, data_aug_rot=data_aug_rot, data_aug_trans=data_aug_trans,\n data_aug_scale=data_aug_scale, unique_patients=unique_patients,\n preparing=False, only_include=only_include,)\n train_test_subset = xrv.datasets.SubsetDataset(individual_dataset, subset_ind)\n datas.append(train_test_subset)\n dataset = xrv.datasets.Merge_Dataset(datas)\n\n elif xray_id in all_xray_ids:\n subset_ind = np.load(f\"{split_info_path}/exp/fold{exp_id}/{xray_id}_ind_{file_name_suffix}.npy\")\n individual_dataset = get_single_xray_data(\n dataset_path, xray_id, train=train, xray_views=xray_views,\n xray_img_size=xray_img_size, data_aug_rot=data_aug_rot, data_aug_trans=data_aug_trans,\n data_aug_scale=data_aug_scale, unique_patients=unique_patients, preparing=False,\n only_include=only_include,\n )\n useful_subset = xrv.datasets.SubsetDataset(individual_dataset, subset_ind)\n dataset = useful_subset\n\n else:\n raise NotImplementedError(f\"{xray_id} not supported\")\n return dataset\n\n\ndef get_list_private_data_xray(dataset_path, split_info_path, xray_views='AP-PA',\n xray_img_size=224, data_aug_rot=45, data_aug_trans=0.15,\n data_aug_scale=0.15, unique_patients=0,\n exp_id=0, train=True, only_include=[\"No Finding\"]):\n all_private_data = []\n for xray_id in all_xray_ids:\n all_private_data.append(get_xray_dataset(\n dataset_path, split_info_path, xray_id=xray_id, xray_views=xray_views,\n xray_img_size=xray_img_size, data_aug_rot=data_aug_rot, data_aug_trans=data_aug_trans,\n data_aug_scale=data_aug_scale, unique_patients=unique_patients,\n train=train, exp_id=exp_id, only_include=only_include))\n return all_private_data\n","repo_name":"cleverhans-lab/DeCaPH","sub_path":"datasets_utils/xray/xray_utils.py","file_name":"xray_utils.py","file_ext":"py","file_size_in_byte":8532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"9702216237","text":"class Person(object):\n # 这里的属性世界上属于类属性\n name = \"person\"\n def __init__(self,name):\n # 对象属性\n self.name = name\nprint(Person.name) # 类属性\n# 对象属性的优先级高与类属性\nper = Person(\"tom\")\n# 动态的给对象添加对象属性,只针对当前对象生效,对于类创建的其他对象没有作用\nper.age = 18\nprint(per.age)\nprint(per.name)\n\n# 删除对象中的name属性,在电泳会使用到同名类属性\ndel per.name\nprint(per.name)\n'''\n注意:以后千万不要将对象属性与类属性重名,因为对象属性会屏蔽掉类属性。\n但是当删除对象属性后,在使用又能使用类属性了。\n'''\n","repo_name":"adminwj1/-","sub_path":"对象属性与类属性/对象属性与类属性.py","file_name":"对象属性与类属性.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"87"} +{"seq_id":"20936705404","text":"# -*- encoding: utf-8 -*-\nfrom supriya.tools.ugentools.Control import Control\nfrom supriya.tools.ugentools.MultiOutUGen import MultiOutUGen\n\n\nclass LagControl(Control):\n \"\"\"\n A lagged control-rate control ugen.\n \"\"\"\n\n ### CLASS VARIABLES ###\n\n __documentation_section__ = 'UGen Internals'\n\n __slots__ = ()\n\n _ordered_input_names = (\n 'lags',\n )\n\n _unexpanded_input_names = (\n 'lags',\n )\n\n _valid_calculation_rates = None\n\n ### INITIALIZER ###\n\n def __init__(\n self,\n parameters,\n calculation_rate=None,\n starting_control_index=0,\n ):\n from supriya.tools import synthdeftools\n coerced_parameters = []\n for parameter in parameters:\n if not isinstance(parameter, synthdeftools.Parameter):\n parameter = synthdeftools.Parameter(name=parameter, value=0)\n coerced_parameters.append(parameter)\n self._parameters = tuple(coerced_parameters)\n lags = []\n for parameter in self._parameters:\n lag = parameter.lag or 0.\n lags.extend([lag] * len(parameter))\n MultiOutUGen.__init__(\n self,\n channel_count=len(self),\n calculation_rate=calculation_rate,\n special_index=starting_control_index,\n lags=lags,\n )\n\n ### PUBLIC PROPERTIES ###\n\n @property\n def lags(self):\n \"\"\"\n Gets `lags` input of LagControl.\n\n Returns ugen input.\n \"\"\"\n index = self._ordered_input_names.index('lags')\n return tuple(self._inputs[index:])\n","repo_name":"bsdpunk/supriya","sub_path":"supriya/tools/ugentools/LagControl.py","file_name":"LagControl.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"87"} +{"seq_id":"70824931160","text":"#!/usr/bin/env python3\nfrom os.path import join, basename, abspath, dirname\nfrom setuptools import setup\n\nwith open(join(dirname(abspath(__file__)), 'requirements.txt')) as f:\n requirements = f.readlines()\n\nPLUGIN_ENTRY_POINT = ('deepspeech_stt_plug = '\n 'mycroft_stt_plugin_deepspeech:DeepspeechSTTPlugin')\n\nsetup(\n name='mycroft-stt-plugin-deepspeech',\n version='0.2',\n description='A STT plugin for mycroft',\n url='http://github.com/MycroftAI/mycroft-stt-plugin-deepspeech',\n author='Joshua Watts',\n author_email='',\n license='Apache-2.0',\n packages=['mycroft_stt_plugin_deepspeech'],\n install_requires=requirements,\n zip_safe=True,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Topic :: STT',\n 'License :: OSI Approved :: Apache Software License',\n\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='mycroft plugin stt',\n entry_points={'mycroft.plugin.stt': PLUGIN_ENTRY_POINT}\n)\n","repo_name":"forslund/mycroft-stt-plugin-deepspeech","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"87"} +{"seq_id":"71299609880","text":"# built in functions\n# print()\n# input()\n# type()\n# round()\n\n\"\"\"\n# anatomy of a function\ndef function_name(parameters):\n '''docstring'''\n statement1\n statement2\n '''\n '''\n return [expression]\n\"\"\"\n\ndef greet(first_name, last_name=\"\"):\n '''\n print a welcome message to the console\n '''\n print(f'hello {first_name} {last_name}')\n\n\ngreet('Barry', 'harry')\ngreet(' ')\ngreet('Ron', 'Barry')\n\ndef subtract(num1, num2):\n result = num1 - num2\n return result\n\nsubtract(7, 5)\n\n# create function that says the temperature in english\ndef get_temperature(temp: float) -> str:\n '''function takes in temp as number returns temp in english'''\n\n if temp > 90:\n return \"Hot\"\n elif temp >75 and temp <=90:\n return \"Warm\"\n elif 60 < temp <=75:\n return \"Comfortable\"\n elif 45 < temp <=60:\n return \"Chilly\"\n elif temp <= 45:\n return \"Freezing\"","repo_name":"PdxCodeGuild/class_kiwi","sub_path":"Code/joseph/python/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"87"} +{"seq_id":"14647673709","text":"import re\nfrom faraday_plugins.plugins.plugin import PluginBase\n\n\n__author__ = \"Francisco Amato\"\n__copyright__ = \"Copyright 2013, Faraday Project\"\n__credits__ = [\"Francisco Amato\"]\n__license__ = \"\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Francisco Amato\"\n__email__ = \"famato@infobytesec.com\"\n__status__ = \"Development\"\n\n\nclass MedusaParser:\n \"\"\"\n The objective of this class is to parse an xml file generated by the medusa tool.\n\n @param medusa_filepath A proper simple report generated by medusa\n \"\"\"\n\n def __init__(self, xml_output, resolve_hostname):\n self.resolve_hostname = resolve_hostname\n self.srv = {\n 'ftp': '21', 'http': '80', 'imap': '143', 'mssql': '1433', 'mysql': '3306',\n 'ncp': '524', 'nntp': '119', 'pcanywhere': '5631', 'pop3': '110', 'postgres': '5432',\n 'rexec': '512', 'rlogin': '513', 'rsh': '514', 'smbnt': 'smbnt', 'smtp': '25',\n 'smtp-vrfy': 'smtp-vrfy', 'snmp': '161', 'ssh': '22', 'svn': '3690',\n 'telnet': '23', 'vmauthd': 'vmauthd', 'vnc': '5900', 'web-form': 'web-form',\n 'wrapper': 'wrapper'\n }\n\n lines = xml_output.splitlines()\n self.items = []\n\n for line in lines:\n\n reg = re.search(r\"ACCOUNT FOUND: \\[([^$]+)\\] Host: ([^$]+) User: ([^$]+) Password: ([^$]+) \\[SUCCESS\\]\", line)\n if reg:\n\n item = {\n 'service': reg.group(1),\n 'host': reg.group(2),\n 'user': reg.group(3),\n 'pass': reg.group(4)\n }\n item['ip'] = resolve_hostname(item['host'])\n item['port'] = self.srv[item['service']]\n self.items.append(item)\n\n\nclass MedusaPlugin(PluginBase):\n \"\"\"\n Example plugin to parse medusa output.\n \"\"\"\n\n def __init__(self, *arg, **kwargs):\n super().__init__(*arg, **kwargs)\n self.id = \"Medusa\"\n self.name = \"Medusa Output Plugin\"\n self.plugin_version = \"0.0.1\"\n self.version = \"2.1.1\"\n self.options = None\n self._command_regex = re.compile(r'^(sudo medusa|sudo \\.\\/medusa|medusa|\\.\\/medusa)\\s+.*?')\n self.host = None\n self.port = \"\"\n self._use_temp_file = True\n self._temp_file_extension = \"txt\"\n self.xml_arg_re = re.compile(r\"^.*(-O\\s*[^\\s]+).*$\")\n\n def parseOutputString(self, output):\n \"\"\"\n This method will discard the output the shell sends, it will read it from\n the xml where it expects it to be present.\n\n NOTE: if 'debug' is true then it is being run from a test case and the\n output being sent is valid.\n \"\"\"\n parser = MedusaParser(output, resolve_hostname=self.resolve_hostname)\n\n for item in parser.items:\n\n h_id = self.createAndAddHost(item['ip'], hostnames=[item['host']])\n\n port = self.port if self.port else item['port']\n\n s_id = self.createAndAddServiceToHost(\n h_id,\n item['service'],\n ports=[port],\n protocol=\"tcp\",\n status=\"open\")\n\n self.createAndAddCredToService(\n h_id,\n s_id,\n item['user'],\n item['pass'])\n\n self.createAndAddVulnToService(h_id,\n s_id,\n \"Weak Credentials\",\n f\"[medusa found the following credentials]\\nuser:{item['user']}\\npass:{item['pass']}\",\n severity=\"high\")\n\n del parser\n\n def processCommandString(self, username, current_path, command_string):\n super().processCommandString(username, current_path, command_string)\n self.port = \"\"\n arg_match = self.xml_arg_re.match(command_string)\n\n mreg = re.search(r\"\\-n( |)([\\d]+)\", command_string)\n if mreg:\n self.port = mreg.group(2)\n\n if arg_match is None:\n return re.sub(r\"(^.*?medusa?)\", r\"\\1 -O %s\" % self._output_file_path, command_string)\n else:\n return re.sub(arg_match.group(1), r\"-O %s\" % self._output_file_path, command_string)\n\n def _isIPV4(self, ip):\n if len(ip.split(\".\")) == 4:\n return True\n else:\n return False\n\n\n\n\ndef createPlugin(*args, **kwargs):\n return MedusaPlugin(*args, **kwargs)\n","repo_name":"infobyte/faraday_plugins","sub_path":"faraday_plugins/plugins/repo/medusa/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"87"} +{"seq_id":"36305891284","text":"\"\"\"Message model tests\"\"\"\n\nimport os\nfrom unittest import TestCase\n\nfrom models import db, User, Message, Like\n\n\nos.environ['DATABASE_URL'] = \"postgresql:///warbler_test\"\n\nfrom app import app\n\ndb.create_all()\n\nclass MessageModelTestCase(TestCase):\n def setUp(self):\n User.query.delete()\n\n u1 = User.signup(\"u1\", \"u1@email.com\", \"password\", None)\n u2 = User.signup(\"u2\", \"u2@email.com\", \"password\", None)\n\n db.session.commit()\n self.u1_id = u1.id\n self.u2_id = u2.id\n\n msg1 = Message(text = \"Msg1\", user_id = self.u1_id)\n msg2 = Message(text = \"Msg2\", user_id = self.u2_id)\n\n db.session.add(msg1)\n db.session.add(msg2)\n db.session.commit()\n\n self.msg1_id = msg1.id\n self.msg2_id = msg2.id\n\n self.client = app.test_client()\n\n def tearDown(self):\n db.session.rollback()\n\n def test_message_model(self):\n \"\"\"Test Message is created\"\"\"\n msg1 = Message.query.get(self.msg1_id)\n\n self.assertEqual(msg1.user_id, self.u1_id)\n self.assertIn(\"Msg1\", msg1.text)\n\n def test_is_liked(self):\n \"\"\"Test is_liked function\"\"\"\n u1 = User.query.get(self.u1_id)\n u2 = User.query.get(self.u2_id)\n\n msg1 = Message.query.get(self.msg1_id)\n msg2 = Message.query.get(self.msg2_id)\n\n u1.likes.append(msg2)\n db.session.commit()\n\n liked_msg = msg2.is_liked(u1)\n unliked_msg = msg1.is_liked(u2)\n\n self.assertTrue(liked_msg)\n self.assertFalse(unliked_msg)\n\n def test_message_liked(self):\n\n u1 = User.query.get(self.u1_id)\n u2 = User.query.get(self.u2_id)\n\n msg1 = Message.query.get(self.msg1_id)\n msg2 = Message.query.get(self.msg2_id)\n\n # are we making a new instance of Like?\n u1.likes.append(msg2)\n db.session.commit()\n\n fav_list = msg2.users\n\n self.assertIsInstance(fav_list, list)\n self.assertIn(u1, fav_list)\n\n","repo_name":"brendaliu2/warbler","sub_path":"test_message_model.py","file_name":"test_message_model.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"30442829092","text":"#!/usr/bin/env python\nimport os\n\nfrom setuptools import find_packages, setup\n\nPROJECT_DIR = os.path.dirname(__file__)\nREQUIREMENTS_DIR = os.path.join(PROJECT_DIR, \"requirements\")\nVERSION = \"1.20.2\"\n\n\ndef get_requirements(env):\n with open(os.path.join(REQUIREMENTS_DIR, f\"{env}.txt\")) as fp:\n return [x.strip() for x in fp.read().split(\"\\n\") if not x.startswith(\"#\")]\n\n\ninstall_requires = get_requirements(\"base\")\n\n\nsetup(\n name=\"baserow\",\n version=VERSION,\n url=\"https://baserow.io\",\n scripts=[\"baserow\"],\n author=\"Bram Wiepjes (Baserow)\",\n author_email=\"bram@baserow.io\",\n license=\"MIT\",\n description=\"Baserow: open source no-code database backend.\",\n long_description=\"Baserow is an open source no-code database tool and Airtable \"\n \"alternative. Easily create a relational database without any \"\n \"technical expertise. Build a table and define custom fields \"\n \"like text, number, file and many more.\",\n platforms=[\"linux\"],\n package_dir={\"\": \"src\"},\n packages=find_packages(\"src\"),\n include_package_data=True,\n install_requires=install_requires,\n)\n","repo_name":"bram2w/baserow","sub_path":"backend/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":1782,"dataset":"github-code","pt":"87"} +{"seq_id":"42328884612","text":"import pytest\n\nfrom shuup.admin.modules.attributes.views.edit import AttributeEditView\nfrom shuup.core.models import Attribute, AttributeType\nfrom shuup.testing.factories import get_default_shop\nfrom shuup.testing.utils import apply_request_middleware\n\n\n@pytest.mark.django_db\ndef test_attribute_edit_view(rf, admin_user):\n # create choices attribute\n shop = get_default_shop()\n data = {\n \"base-name__en\": \"Attribute Name\",\n \"base-identifier\": \"attr-id\",\n \"base-searchable\": \"on\",\n \"base-type\": AttributeType.CHOICES.value,\n \"base-min_choices\": 3,\n \"base-max_choices\": 10,\n \"base-visibility_mode\": 1,\n \"base-ordering\": 0,\n }\n request = apply_request_middleware(rf.post(\"/\", data=data), shop=shop, user=admin_user)\n response = AttributeEditView.as_view()(request)\n assert response.status_code == 302\n attribute = Attribute.objects.get(identifier=\"attr-id\")\n\n # Create the options on next save\n data.update(\n {\n \"choice_options-TOTAL_FORMS\": 2,\n \"choice_options-INITIAL_FORMS\": 0,\n \"choice_options-MIN_NUM_FORMS\": 0,\n \"choice_options-MAX_NUM_FORMS\": 100,\n \"choice_options-0-name__en\": \"Option A\",\n \"choice_options-1-name__en\": \"Option B\",\n }\n )\n request = apply_request_middleware(rf.post(\"/\", data=data), shop=shop, user=admin_user)\n response = AttributeEditView.as_view()(request, pk=attribute.pk)\n assert response.status_code == 302\n\n options = list(sorted(attribute.choices.values_list(\"translations__name\", flat=True)))\n assert options[0] == \"Option A\"\n assert options[1] == \"Option B\"\n\n options_ids = list(sorted(attribute.choices.values_list(\"pk\", flat=True)))\n # change options\n data.update(\n {\n \"choice_options-TOTAL_FORMS\": 3,\n \"choice_options-INITIAL_FORMS\": 2,\n \"choice_options-0-id\": options_ids[0],\n \"choice_options-0-name__en\": \"Option AZ\", # change name\n \"choice_options-1-id\": options_ids[1],\n \"choice_options-1-DELETE\": \"on\", # delete option\n \"choice_options-2-name__en\": \"Option C\", # new one\n }\n )\n request = apply_request_middleware(rf.post(\"/\", data=data), shop=shop, user=admin_user)\n response = AttributeEditView.as_view()(request, pk=attribute.pk)\n assert response.status_code == 302\n\n attribute = Attribute.objects.get(identifier=\"attr-id\")\n options = list(sorted(attribute.choices.values_list(\"translations__name\", flat=True)))\n assert options[0] == \"Option AZ\"\n assert options[1] == \"Option C\"\n assert len(options) == 2\n","repo_name":"shuup/shuup","sub_path":"shuup_tests/admin/test_attributes_view.py","file_name":"test_attributes_view.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":2106,"dataset":"github-code","pt":"87"} +{"seq_id":"70825904280","text":"\ntemplates_path = ['_template']\nexclude_patterns = ['_build']\n\nsource_suffix = '.rst'\nmaster_doc = 'index'\n\nproject = u'nextgisweb'\ncopyright = u'2014-2016, NextGIS'\n\nversion = '2.2'\nrelease = '2'\n\nlanguage = 'ru'\n\npygments_style = 'sphinx'\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinxcontrib.httpdomain',\n 'sphinx.ext.todo'\n]\n\nautodoc_member_order = 'bysource'\nautoclass_content = 'both'\n\nhttp_index_shortname = 'api'\n\n# -- Options for HTML output ----------------------------------------------\n\nhtml_theme = 'nature'\n","repo_name":"nextgis/nextgisweb","sub_path":"doc/developer/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":197,"dataset":"github-code","pt":"87"} +{"seq_id":"22107253459","text":"import hashlib\nimport hmac\nfrom datetime import datetime\n\nfrom boto3 import Session\nfrom botocore.client import Config\nfrom minio import Minio\nfrom minio.compat import queryencode, urlsplit\nfrom minio.error import InvalidArgumentError\nfrom minio.signer import (\n collections,\n generate_canonical_request,\n generate_credential_string,\n generate_signing_key,\n generate_string_to_sign,\n get_signed_headers,\n)\n\nfrom shub.apps.main.models import Container\nfrom shub.settings import (\n DISABLE_MINIO_CLEANUP,\n MINIO_BUCKET,\n MINIO_EXTERNAL_SERVER,\n MINIO_REGION,\n MINIO_ROOT_PASSWORD,\n MINIO_ROOT_USER,\n MINIO_SERVER,\n MINIO_SSL,\n)\n\n# Signature version '4' algorithm.\n_SIGN_V4_ALGORITHM = \"AWS4-HMAC-SHA256\"\n\nMINIO_HTTP_PREFIX = \"https://\" if MINIO_SSL else \"http://\"\n\nminioClient = Minio(\n MINIO_SERVER,\n region=MINIO_REGION,\n access_key=MINIO_ROOT_USER,\n secret_key=MINIO_ROOT_PASSWORD,\n secure=MINIO_SSL,\n)\n\nminioExternalClient = Minio(\n MINIO_EXTERNAL_SERVER,\n region=MINIO_REGION,\n access_key=MINIO_ROOT_USER,\n secret_key=MINIO_ROOT_PASSWORD,\n secure=MINIO_SSL,\n)\n\nif not minioClient.bucket_exists(MINIO_BUCKET):\n minioClient.make_bucket(MINIO_BUCKET)\n\nsession = Session(\n aws_access_key_id=MINIO_ROOT_USER,\n aws_secret_access_key=MINIO_ROOT_PASSWORD,\n region_name=MINIO_REGION,\n)\n\n\n# https://github.com/boto/boto3/blob/develop/boto3/session.py#L185\ns3 = session.client(\n \"s3\",\n verify=MINIO_SSL,\n use_ssl=MINIO_SSL,\n endpoint_url=MINIO_HTTP_PREFIX + MINIO_SERVER,\n region_name=MINIO_REGION,\n config=Config(signature_version=\"s3v4\", s3={\"addressing_style\": \"path\"}),\n)\n\n# signature_versions\n# https://github.com/boto/botocore/blob/master/botocore/auth.py#L846\ns3_external = session.client(\n \"s3\",\n verify=MINIO_SSL,\n use_ssl=MINIO_SSL,\n endpoint_url=MINIO_HTTP_PREFIX + MINIO_EXTERNAL_SERVER,\n region_name=MINIO_REGION,\n config=Config(signature_version=\"s3v4\", s3={\"addressing_style\": \"path\"}),\n)\n\n\ndef sregistry_presign_v4(\n method,\n url,\n credentials,\n content_hash_hex,\n region=None,\n headers=None,\n expires=None,\n response_headers=None,\n):\n \"\"\"\n Calculates signature version '4' for regular presigned URLs.\n :param method: Method to be presigned examples 'PUT', 'GET'.\n :param url: URL to be presigned.\n :param credentials: Credentials object with your AWS s3 account info.\n :param region: region of the bucket, it is optional.\n :param headers: any additional HTTP request headers to\n be presigned, it is optional.\n :param expires: final expiration of the generated URL. Maximum is 7days.\n :param response_headers: Specify additional query string parameters.\n :param content_hash_hex: sha256sum of the object.\n \"\"\"\n\n # Validate input arguments.\n if not credentials.get().access_key or not credentials.get().secret_key:\n raise InvalidArgumentError(\"Invalid access_key and secret_key.\")\n\n if region is None:\n region = MINIO_REGION\n\n if headers is None:\n headers = {}\n\n # 7 days\n if expires is None:\n expires = \"604800\"\n\n request_date = datetime.utcnow()\n\n parsed_url = urlsplit(url)\n host = remove_default_port(parsed_url)\n headers[\"Host\"] = host\n iso8601Date = request_date.strftime(\"%Y%m%dT%H%M%SZ\")\n\n headers_to_sign = headers\n # Construct queries.\n query = {}\n query[\"X-Amz-Algorithm\"] = _SIGN_V4_ALGORITHM\n query[\"X-Amz-Credential\"] = generate_credential_string(\n credentials.get().access_key, request_date, region\n )\n query[\"X-Amz-Date\"] = iso8601Date\n query[\"X-Amz-Expires\"] = str(expires)\n if credentials.get().session_token is not None:\n query[\"X-Amz-Security-Token\"] = credentials.get().session_token\n\n signed_headers = get_signed_headers(headers_to_sign)\n query[\"X-Amz-SignedHeaders\"] = \";\".join(signed_headers)\n\n if response_headers is not None:\n query.update(response_headers)\n\n # URL components.\n url_components = [parsed_url.geturl()]\n if query is not None:\n ordered_query = collections.OrderedDict(sorted(query.items()))\n query_components = []\n for component_key in ordered_query:\n single_component = [component_key]\n if ordered_query[component_key] is not None:\n single_component.append(\"=\")\n single_component.append(queryencode(ordered_query[component_key]))\n else:\n single_component.append(\"=\")\n query_components.append(\"\".join(single_component))\n\n query_string = \"&\".join(query_components)\n if query_string:\n url_components.append(\"?\")\n url_components.append(query_string)\n new_url = \"\".join(url_components)\n # new url constructor block ends.\n new_parsed_url = urlsplit(new_url)\n\n canonical_request = generate_canonical_request(\n method, new_parsed_url, headers_to_sign, signed_headers, content_hash_hex\n )\n string_to_sign = generate_string_to_sign(request_date, region, canonical_request)\n signing_key = generate_signing_key(\n request_date, region, credentials.get().secret_key\n )\n signature = hmac.new(\n signing_key, string_to_sign.encode(\"utf-8\"), hashlib.sha256\n ).hexdigest()\n new_parsed_url = urlsplit(new_url + \"&X-Amz-Signature=\" + signature)\n return new_parsed_url.geturl()\n\n\ndef remove_default_port(parsed_url):\n default_ports = {\"http\": 80, \"https\": 443}\n if any(\n parsed_url.scheme == scheme and parsed_url.port == port\n for scheme, port in default_ports.items()\n ):\n # omit default port (i.e. 80 or 443)\n host = parsed_url.hostname\n else:\n host = parsed_url.netloc\n return host\n\n\ndef delete_minio_container(container):\n \"\"\"A helper function to delete a container in Minio based on not finding\n more than one count for it (indicating that it is not in use by other\n container collections).\n\n Parameters\n ==========\n container: the container object to get Minio storage from.\n \"\"\"\n # Ensure that we don't have the container referenced by another collection\n # The verison would be the same, regardless of the collection/container name\n count = Container.objects.filter(version=container.version).count()\n storage = container.get_storage()\n\n # only delete from Minio not same filename, and if there is only one count\n if count == 1 and not DISABLE_MINIO_CLEANUP:\n print(\"Deleting no longer referenced container %s from Minio\" % storage)\n minioClient.remove_object(MINIO_BUCKET, storage)\n return True\n return False\n","repo_name":"singularityhub/sregistry","sub_path":"shub/apps/library/views/minio.py","file_name":"minio.py","file_ext":"py","file_size_in_byte":6671,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"87"} +{"seq_id":"23727838651","text":"# Camden Bruce\nplu_numbers = [1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000]\nplu_conversions = [\"Spoons\", \"Forks\", \"Plates\", \"Knifes\", \"Teaspoons\", \"Pots\", \"Pans\", \"Bowls\", \"Cups\", \"Mugs\"]\nelement_num = len(plu_numbers)\nif element_num == 9:\n print(\"camd\")\n\n\n\n\n","repo_name":"carbonatedcaffeine/Squible-Squabal","sub_path":"2022-PTN-TRL/Python/Inventory_Program/list_test.py","file_name":"list_test.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"21047054587","text":"#\n## Python Library for self Project\n## Create system-wide usable libraries\n#\n\nimport re\nfrom config.CrudModule import UserCrud\n\n# TODO @asperoph || INTRODUCE BINNING TO COMPARE PERFORMANCE\n \nclass RadixSort:\n def __init__(self,word_list):\n\n #\n ## Manipulate Word List to remove non-word character\n ## Replaces all special_characters_and_spaces\n #\n self.word_list=word_list\n self.max_size=max([len(word) for word in word_list])\n self.buckets=self.get_buckets(37) \n\n #\n ## During sorting, a new problem was detected. i.e \n ## names can contain uppercase as well as lowercase characters.\n ## \n ## Bucket Size is 26 alphabets + 10 numbers \n #\n ## Total Buckets Needed = [a-z]+[#]\n # \n self.buckets_directory=self.assign_numeric_values(\n self.get_numeric_characters()+\n self.get_alphabet_characters()) \n #\n ## Simply Add Function to include Numeric character for binning\n #\n self.current_index=self.max_size\n\n @staticmethod\n def lsd_numeric(sort):\n pass\n\n def lsd_sort(self):\n # Maintain Length of all words\n self.word_list=self.maintain_size(self.word_list)\n # Sort in Iterative Method\n self.flat_list=self.word_list.copy()\n for index in range(1,self.max_size+1):\n for word in self.flat_list:\n self.add_to_bucket(word,index)\n self.flat_list=self.flatten()\n return(self.remove_dont_care_character(self.flat_list))\n\n def flatten(self):\n temp_list=list()\n for each_list in self.buckets:\n if not each_list:\n continue\n for each_word in each_list:\n temp_list.append(each_word)\n #\n ## Reset Buckets \n #\n self.empty_buckets()\n return temp_list\n \n def empty_buckets(self):\n for each_bucket in self.buckets:\n while len(each_bucket)>0:\n each_bucket.pop()\n\n def add_to_bucket(self,word,index):\n self.buckets[self.get_dict_info(word[-index])].append(word)\n\n def get_dict_info(self,character):\n #\n ## Return invalid number so we know invalid character has been judged\n ## Not Handled || Breaks Code\n #\n index=self.buckets_directory.get(character,-9999)\n if(index==-9999):\n print(f'Invalid Index for {character}')\n return index\n\n @staticmethod\n def get_alphabet_characters():\n #\n ## Recieve 26 alphabets from 'a'=> 26\n #\n return [chr(i) for i in range(97,97+26)] \n \n @staticmethod\n def get_numeric_characters():\n #\n ## Recieve 10 numeric character from '0'\n #\n return [chr(i) for i in range(ord('0'),ord('0')+10)]\n \n @staticmethod\n def maintain_size(old_word_list,single_case=True):\n\n ## Add functionality to accept both cases. Ignored due to high no. of buckets\n\n fixlen=list()\n max_size=max(len(word) for word in old_word_list)\n for word in old_word_list:\n add=['#' for _ in range(max_size-len(word))]\n word+=''.join(add)\n fixlen.append(word)\n return fixlen\n\n @staticmethod\n def remove_dont_care_character(word_list,dont_care_character='#'):\n #\n ## Use re to sub matching criteria from back\n ## for faster manipulation\n #\n removed_list=list()\n for each_word in word_list:\n removed_list.append(re.sub('[#]+$','',each_word))\n return removed_list\n\n @staticmethod\n def get_buckets(no_buckets):\n #\n ## Create multiple empty buckets / bins\n #\n return [list() for _ in range(no_buckets)]\n \n @staticmethod\n def assign_numeric_values(char_list,dont_care_character='#'):\n #\n ## Assigns numeric values in dictionary\n ## for recieved list starting from 1.\n ## 0 is used for dont_care_character\n #\n assigned_values={character:index+1 for index,character in enumerate(char_list)}\n assigned_values[dont_care_character]=0\n return assigned_values\n\nclass SearchMethods:\n\n @staticmethod\n def growth_function(current_item, search_item):\n pass\n\n @staticmethod\n def binary_search(unsorted_list,item):\n sorted_list=RadixSort(unsorted_list).lsd_sort()\n last=len(sorted_list)-1\n first=0\n while(first<=last):\n mid = int((first+last)//2)\n if(sorted_list[mid]==item):\n return mid\n else:\n if item destination)\n inverse_rate : float\n The inverse of the previous rate (destination -> origin)\n date : str\n Date when the conversion rate was recorded\n api : Frankfurter\n Instance of Frankfurter class\n \"\"\"\n def __init__(self, from_currency, to_currency, date):\n # define self parameters\n self.from_currency = from_currency\n self.to_currency = to_currency\n self.date = date\n self.amount = None\n self.rate = None\n self.inverse_rate = None\n #instantiation of Frankfurter class\n self.frankfurter = Frankfurter()\n\n\n def check_currencies(self):\n \"\"\"\n Method that will check if currency codes stored in the class attributes are valid.\n Otherwise the program will exit and display the relevant message provided in the assignment brief\n\n Parameters\n ----------\n self: \n The function receives all self parameters from the CurrencyConverter class\n\n Pseudo-code\n ----------\n Create a Boolean variable \"is_valid\" as False\n\n Create a boolean variable to save the checked from_currency. It will generate a frankfurter object from the class Frankfurter and access the method check_currency to evaluate the currency.\n\n Create a boolean variable to save the checked to_currency. Generate a frankfurter object, access the method check_currency and evaluate the currency.\n\n if checked from_currency and check to_currency are false:\n print from_currency and to_currency are not valid currency codes\n elif checked from_currency is false:\n print from_currency is not a valid currency code\n elif check to_currency is false:\n print to_currency is not a valid currency code\n else:\n assign is_valid to True\n return is_valid\n\n Returns\n -------\n is_valid: Boolean\n The function returns a Boolean result (False or True) based on the currency code's validity.\n \"\"\"\n #check if currency codes stored in the class attributes are valid\n is_valid = False\n is_from_currency_valid = self.frankfurter.check_currency(self.from_currency)\n is_to_currency_valid = self.frankfurter.check_currency(self.to_currency)\n \n if not is_from_currency_valid and not is_to_currency_valid:\n print(f\"{self.from_currency} and {self.to_currency} are not valid currency codes\") \n elif not is_from_currency_valid:\n print(f\"{self.from_currency} is not a valid currency code\")\n elif not is_to_currency_valid:\n print(f\"{self.to_currency} is not a valid currency code\")\n else:\n is_valid = True\n \n return is_valid\n \n \n def reverse_rate(self):\n \"\"\"\n Method that will calculate the inverse rate from the value stored in the class attribute, round it to 4 decimal places and save it back in the class attribute inverse_rate.\n\n Parameters\n ----------\n self: \n The function receives all self parameters from the CurrencyConverter class\n\n Pseudo-code\n ----------\n if parameter self.rate from CurrencyConverter is different from cero:\n Save parameter inverse_rate as a result of applying round_rate function to 1/ self.rate\n else:\n inverse_rate equal to cero\n return empty\n\n Returns \n -------\n The function doesn't return a result. It is saving the self parameter inverse_rate from the CurrencyConverter class\n \"\"\"\n # calculate the inverse rate, use round_rate and save inverse rate\n if self.rate != 0:\n self.inverse_rate = self.round_rate(1/self.rate)\n else:\n self.inverse_rate = 0\n return \n\n\n def round_rate(self, rate):\n \"\"\"\n Method that will round an input argument to 4 decimals places.\n\n Parameters\n ----------\n self: \n The function receives all self parameters from the CurrencyConverter class\n rate: float\n The function receives a rate to round\n\n Pseudo-code\n ----------\n return the round rate with 4 decimals\n\n Returns\n -------\n round_rate: float with 4 decimals\n \"\"\"\n\n #round rate to 4 decimal\n return round(rate, 4)\n \n\n def get_historical_rate(self):\n \"\"\"\n Method that will call the Frankfurter API and get the historical conversion rate for the currencies (rounded to 4 decimals) and date stored in the class attributes.\n Then it will calculate the inverse rate and will exit by displaying the relevant message provided in the assignment brief\n\n Parameters\n ----------\n self: \n The function receives all self parameters from the CurrencyConverter class\n\n Pseudo-code\n ----------\n Create a variable json_conversion_rate to save the dictionary from the response object calling the method get_historical_rate of the Frankfurter class with from_currency, to_currency and date as parameters\n\n get the dictionary of the historical rates\n\n Calculate the rate as rate to_currency/ rate from_currency\n\n Save the rate parameter with 4 decimals\n\n calculate the inverse rate \n \n return empty\n\n Returns\n -------\n The function doesn't return a result. It is saving the self parameter rate from the CurrencyConverter class\n \"\"\"\n\n # => To be filled by student\n #call the Frankfurter API - dict object with base, date and rates \n json_conversion_rate = self.frankfurter.get_historical_rate(self.from_currency, self.to_currency, self.date).json()\n #get the historical conversion rate - dict with the rates\n historical_conversion_rate = json_conversion_rate.get(\"rates\")\n #get rate\n rate = historical_conversion_rate.get(self.to_currency) / historical_conversion_rate.get(self.from_currency)\n self.rate = self.round_rate(rate)\n #calculate the inverse rate\n self.reverse_rate()\n return\n \n ","repo_name":"amedinabe/conversion-rate-python-app","sub_path":"currency.py","file_name":"currency.py","file_ext":"py","file_size_in_byte":6838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"27722827704","text":"import errno\nimport logging\nfrom typing import Optional, List, Set, Any\nfrom collections import deque\nfrom datetime import datetime\n\nfrom datastore import DatastoreException\nfrom freenas.dispatcher.rpc import (\n RpcException,\n SchemaHelper as h,\n accepts,\n description,\n returns,\n private,\n generator\n)\nfrom task import Provider, Task, TaskException, TaskDescription, query\nfrom freenas.dispatcher.model import BaseStruct, BaseEnum, BaseVariantType\nfrom freenas.dispatcher.model.typing import Range\nfrom freenas.utils import normalize, query as q\nfrom debug import AttachRPC\n\n\nlogger = logging.getLogger('AlertPlugin')\nregistered_alerts = {}\npending_alerts = deque()\npending_cancels = deque()\n\n\nclass AlertSeverity(BaseEnum):\n CRITICAL = 'CRITICAL'\n WARNING = 'WARNING'\n INFO = 'INFO'\n\n\nclass Alert(BaseStruct):\n id: int\n clazz: str\n subtype: str\n severity: AlertSeverity\n target: str\n title: str\n description: str\n user: str\n happened_at: datetime\n cancelled_at: Optional[datetime]\n dismissed_at: Optional[datetime]\n last_emitted_at: Optional[datetime]\n active: bool\n dismissed: bool\n one_shot: bool\n send_count: int\n properties: dict\n\n\nclass AlertClass(BaseStruct):\n id: str\n type: str\n subtype: str\n severity: AlertSeverity\n\n\nclass AlertEmitterConfig(BaseVariantType):\n pass\n\n\nclass AlertEmitterParameters(BaseVariantType):\n pass\n\n\nclass AlertEmitter(BaseStruct):\n id: str\n name: str\n config: AlertEmitterConfig\n\n\nclass AlertPredicateOperator(BaseEnum):\n EQ = '=='\n NE = '!='\n LE = '<='\n GE = '>='\n LT = '<'\n GT = '>'\n MATCH = '~'\n\n\nclass AlertPredicate(BaseStruct):\n property: str\n operator: AlertPredicateOperator\n value: Any\n\n\nclass AlertFilter(BaseStruct):\n id: str\n index: Range[int, 0]\n clazz: str\n emitter: str\n parameters: AlertEmitterParameters\n predicates: List[AlertPredicate]\n\n\n@description('Provides access to the alert system')\nclass AlertsProvider(Provider):\n @query('Alert')\n @generator\n def query(self, filter=None, params=None):\n return self.datastore.query_stream('alerts', *(filter or []), **(params or {}))\n\n @private\n @accepts(str, str)\n @returns(h.one_of(h.ref('Alert'), None))\n def get_active_alert(self, cls, target):\n return self.datastore.query(\n 'alerts',\n ('clazz', '=', cls), ('target', '=', target), ('active', '=', True),\n single=True\n )\n\n @description(\"Dismisses an alert\")\n def dismiss(self, id: int) -> None:\n alert = self.datastore.get_by_id('alerts', id)\n if not alert:\n raise RpcException(errno.ENOENT, 'Alert {0} not found'.format(id))\n\n if alert['dismissed']:\n raise RpcException(errno.ENOENT, 'Alert {0} is already dismissed'.format(id))\n\n if alert['one_shot']:\n alert['active'] = False\n\n alert.update({\n 'dismissed': True,\n 'dismissed_at': datetime.utcnow()\n })\n\n self.datastore.update('alerts', id, alert)\n self.dispatcher.dispatch_event('alert.changed', {\n 'operation': 'update',\n 'ids': [id]\n })\n\n @description(\"Dismisses/Deletes all alerts from the database\")\n def dismiss_all(self) -> None:\n alert_list = self.query([('dismissed', '=', False)])\n alert_ids = []\n for alert in alert_list:\n alert.update({\n 'dismissed': True,\n 'dismissed_at': datetime.utcnow()\n })\n self.datastore.update('alerts', alert['id'], alert)\n alert_ids.append(alert['id'])\n\n if alert_ids:\n self.dispatcher.dispatch_event('alert.changed', {\n 'operation': 'update',\n 'ids': alert_ids\n })\n\n @private\n @description(\"Emits an event for the provided alert\")\n @accepts(h.all_of(\n h.ref('Alert'),\n h.required('clazz')\n ))\n @returns(int)\n def emit(self, alert):\n cls = self.datastore.get_by_id('alert.classes', alert['clazz'])\n if not cls:\n raise RpcException(errno.ENOENT, 'Alert class {0} not found'.format(alert['clazz']))\n\n normalize(alert, {\n 'when': datetime.utcnow(),\n 'dismissed': False,\n 'active': True,\n 'one_shot': False,\n 'severity': cls['severity']\n })\n\n alert.update({\n 'type': cls['type'],\n 'subtype': cls['subtype'],\n 'send_count': 0\n })\n\n id = self.datastore.insert('alerts', alert)\n self.dispatcher.dispatch_event('alert.changed', {\n 'operation': 'create',\n 'ids': [id]\n })\n\n try:\n self.dispatcher.call_sync('alertd.alert.emit', id)\n except RpcException as err:\n if err.code == errno.ENOENT:\n # Alertd didn't start yet. Add alert to the pending queue\n pending_alerts.append(id)\n else:\n raise\n\n return id\n\n @private\n @description(\"Cancels already scheduled alert\")\n def cancel(self, id: int) -> int:\n alert = self.datastore.get_by_id('alerts', id)\n if not alert:\n raise RpcException(errno.ENOENT, 'Alert {0} not found'.format(id))\n\n if not alert['active']:\n raise RpcException(errno.ENOENT, 'Alert {0} is already cancelled'.format(id))\n\n alert.update({\n 'active': False,\n 'cancelled_at': datetime.utcnow()\n })\n\n self.datastore.update('alerts', id, alert)\n self.dispatcher.dispatch_event('alert.changed', {\n 'operation': 'update',\n 'ids': [id]\n })\n\n try:\n self.dispatcher.call_sync('alertd.alert.cancel', id)\n except RpcException as err:\n if err.code == errno.ENOENT:\n # Alertd didn't start yet. Add alert to the pending queue\n pending_cancels.append(id)\n else:\n raise\n\n return id\n\n @description(\"Returns list of registered alerts\")\n def get_alert_classes(self) -> List[AlertClass]:\n return self.datastore.query('alert.classes')\n\n @description(\"Returns list of registered alert severities\")\n def get_alert_severities(self) -> Set[AlertSeverity]:\n alert_classes = self.get_alert_classes()\n return {alert_class['severity'] for alert_class in alert_classes}\n\n\n@description('Provides access to the alerts filters')\nclass AlertFiltersProvider(Provider):\n @query('AlertFilter')\n @generator\n def query(self, filter=None, params=None):\n order = self.configstore.get('alert.filter.order')\n\n def extend(obj):\n obj['index'] = order.index(obj['id'])\n return obj\n\n filters = self.datastore.query('alert.filters', ('id', 'in', order), callback=extend)\n return q.query(filters, *(filter or []), **(params or {}))\n\n\n@description('Provides access to the alert emitter configuration')\nclass AlertEmitterProvider(Provider):\n @query('AlertEmitter')\n @generator\n def query(self, filter=None, params=None):\n def collect():\n for p in list(self.dispatcher.plugins.values()):\n if p.metadata and p.metadata.get('type') == 'alert_emitter':\n config = self.dispatcher.call_sync('alert.emitter.{0}.get_config'.format(p.metadata['name']))\n yield {\n 'id': p.metadata['id'],\n 'name': p.metadata['name'],\n 'config': config\n }\n\n return q.query(collect(), *(filter or []), **(params or {}))\n\n\n@description(\"Creates an Alert Filter\")\n@accepts(h.all_of(\n h.ref('AlertFilter'),\n h.forbidden('id'),\n h.required('emitter', 'parameters')\n))\nclass AlertFilterCreateTask(Task):\n @classmethod\n def early_describe(cls):\n return 'Creating alert filter'\n\n def describe(self, alertfilter):\n return TaskDescription('Creating alert filter')\n\n def verify(self, alertfilter):\n return ['system']\n\n def run(self, alertfilter):\n normalize(alertfilter, {\n 'clazz': None,\n 'predicates': []\n })\n\n order = self.configstore.get('alert.filter.order')\n index = alertfilter.pop('index', len(order))\n id = self.datastore.insert('alert.filters', alertfilter)\n order.insert(index, id)\n self.configstore.set('alert.filter.order', order)\n\n self.dispatcher.dispatch_event('alert.filter.changed', {\n 'operation': 'create',\n 'ids': [id]\n })\n\n self.dispatcher.dispatch_event('alert.filter.changed', {\n 'operation': 'update',\n 'ids': list(set(order) - {id})\n })\n\n return id\n\n\n@description(\"Deletes the specified Alert Filter\")\n@accepts(str)\nclass AlertFilterDeleteTask(Task):\n @classmethod\n def early_describe(cls):\n return 'Deleting alert filter'\n\n def describe(self, id):\n return TaskDescription('Deleting alert filter')\n\n def verify(self, id):\n return ['system']\n\n def run(self, id):\n alertfilter = self.datastore.get_by_id('alert.filters', id)\n if not alertfilter:\n raise RpcException(errno.ENOENT, 'Alert filter doesn\\'t exist')\n\n try:\n order = self.configstore.get('alert.filter.order')\n order.remove(id)\n self.datastore.delete('alert.filters', id)\n self.configstore.set('alert.filter.order', order)\n except DatastoreException as e:\n raise TaskException(\n errno.EBADMSG,\n 'Cannot delete alert filter: {0}'.format(str(e))\n )\n\n self.dispatcher.dispatch_event('alert.filter.changed', {\n 'operation': 'delete',\n 'ids': [id]\n })\n\n self.dispatcher.dispatch_event('alert.filter.changed', {\n 'operation': 'update',\n 'ids': list(set(order) - {id})\n })\n\n\n@description(\"Updates the specified Alert Filter\")\n@accepts(str, h.ref('AlertFilter'))\nclass AlertFilterUpdateTask(Task):\n @classmethod\n def early_describe(cls):\n return 'Updating alert filter'\n\n def describe(self, id, updated_fields):\n return TaskDescription('Updating alert filter')\n\n def verify(self, id, updated_fields):\n return ['system']\n\n def run(self, id, updated_fields):\n alertfilter = self.datastore.get_by_id('alert.filters', id)\n order = self.configstore.get('alert.filter.order')\n if not alertfilter:\n raise RpcException(errno.ENOENT, 'Alert filter doesn\\'t exist')\n\n if 'id' in updated_fields and updated_fields['id'] != alertfilter['id']:\n raise TaskException(errno.EINVAL, 'Cannot change alert filter id')\n\n try:\n if 'index' in updated_fields:\n index = updated_fields.pop('index')\n order.remove(id)\n order.insert(index, id)\n self.configstore.set('alert.filter.order', order)\n\n alertfilter.update(updated_fields)\n self.datastore.update('alert.filters', id, alertfilter)\n except DatastoreException as e:\n raise TaskException(\n errno.EBADMSG,\n 'Cannot update alert filter: {0}'.format(str(e))\n )\n\n self.dispatcher.dispatch_event('alert.filter.changed', {\n 'operation': 'update',\n 'ids': order,\n })\n\n\n@accepts(str, h.ref('AlertEmitter'))\n@description('Configures global parameters of an alert emitter')\nclass AlertEmitterUpdateTask(Task):\n @classmethod\n def early_describe(cls):\n return 'Updating alert emitter configuration'\n\n def describe(self, id, updated_params):\n return\n\n def verify(self, id, updated_params):\n return ['system']\n\n def run(self, id, updated_params):\n emitter = self.dispatcher.call_sync('alert.emitter.query', [('id', '=', id)], {'single': True})\n if not emitter:\n raise TaskException(errno.ENOENT, 'Emitter not found')\n\n if 'config' in updated_params:\n self.run_subtask_sync('alert.emitter.{0}.update'.format(emitter['name']), updated_params['config'])\n\n self.dispatcher.emit_event('alert.emitter.changed', {\n 'operation': 'update',\n 'ids': [emitter['id']]\n })\n\n\n@accepts(str, h.ref('AlertSeverity'))\n@description('Sends user alerts')\nclass SendAlertTask(Task):\n @classmethod\n def early_describe(cls):\n return 'Sending user alert'\n\n def describe(self, message, priority=None):\n return TaskDescription('Sending user alert')\n\n def verify(self, message, priority=None):\n return []\n\n def run(self, message, priority=None):\n if not priority:\n priority = 'WARNING'\n\n return self.dispatcher.call_sync('alert.emit', {\n 'clazz': 'UserMessage',\n 'severity': priority,\n 'title': 'Message from user {0}'.format(self.user),\n 'description': message,\n 'one_shot': True\n })\n\n\ndef collect_debug(dispatcher):\n yield AttachRPC('alert-filter-query', 'alert.filter.query')\n yield AttachRPC('alert-emitter-query', 'alert.emitter.query')\n\n\ndef _init(dispatcher, plugin):\n # Register providers\n plugin.register_provider('alert', AlertsProvider)\n plugin.register_provider('alert.filter', AlertFiltersProvider)\n plugin.register_provider('alert.emitter', AlertEmitterProvider)\n\n # Register task handlers\n plugin.register_task_handler('alert.send', SendAlertTask)\n plugin.register_task_handler('alert.filter.create', AlertFilterCreateTask)\n plugin.register_task_handler('alert.filter.delete', AlertFilterDeleteTask)\n plugin.register_task_handler('alert.filter.update', AlertFilterUpdateTask)\n plugin.register_task_handler('alert.emitter.update', AlertEmitterUpdateTask)\n\n def on_alertd_started(args):\n if args['service-name'] != 'alertd.alert':\n return\n\n while pending_alerts:\n id = pending_alerts[-1]\n try:\n dispatcher.call_sync('alertd.alert.emit', id)\n except RpcException:\n logger.warning('Failed to emit alert {0}'.format(id))\n else:\n pending_alerts.pop()\n\n while pending_cancels:\n id = pending_cancels[-1]\n try:\n dispatcher.call_sync('alertd.alert.cancel', id)\n except RpcException:\n logger.warning('Failed to cancel alert {0}'.format(id))\n else:\n pending_cancels.pop()\n\n plugin.register_event_handler('plugin.service_registered', on_alertd_started)\n\n # Register event types\n plugin.register_event_type('alert.changed')\n plugin.register_event_type('alert.filter.changed')\n plugin.register_event_type('alert.emitter.changed')\n\n plugin.register_debug_hook(collect_debug)\n","repo_name":"mactanxin/middleware","sub_path":"src/dispatcher/plugins/AlertPlugin.py","file_name":"AlertPlugin.py","file_ext":"py","file_size_in_byte":15063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"90"} +{"seq_id":"31707360547","text":"from datetime import datetime\nfile_object = open(\"02-22-2019.txt\", \"w\")\nfile_object.close()\n\nclass PoolTable:\n def __init__(self, table_num):\n self.table_num = table_num\n self.is_available = \"Not Occupied\"\n\n def __repr__(self):\n return (f\"\\n Pool Table {self.table_num} - {self.is_available}\")\n\nclass TableManager:\n def __init__(self):\n self.open = \"Open\"\n self.pool_tables = []\n self.tables()\n self.welcome()\n self.menu()\n\n def tables(self):\n for table_num in range(1, 13):\n pool_table = PoolTable(table_num)\n self.pool_tables.append(pool_table)\n\n def welcome(self):\n print(\"\\n\")\n print(\"Welcome to the Pool Table Manager! \\n\")\n\n def menu(self):\n while self.open == \"Open\":\n print(\"Press 1 to view tables\")\n print(\"Press 2 to reserve a table\")\n print(\"Press 3 to close a table \")\n print(\"Press 4 to quit\")\n\n user_input = int(input(\"Select menu option (1, 2, 3, 4): \"))\n print(\"\\n\")\n\n if user_input == 1:\n self.view_all()\n elif user_input == 2:\n self.reserve_table()\n elif user_input == 3:\n self.close_table()\n elif user_input == 4:\n self.quit()\n\n def view_all(self):\n for pool_table in self.pool_tables:\n print(f\"Pool Table {pool_table.table_num} - {pool_table.is_available}\")\n if pool_table.is_available == \"Occupied\":\n print(f\"Start time: {pool_table.start_time}\")\n pool_table.current_time = datetime.now()\n pool_table.play_time = pool_table.current_time - pool_table.start_time\n print(f\"Play Time: {pool_table.play_time} \\n\")\n\n def reserve_table(self):\n user_input = int(input(\"Enter the pool table that you would like to reserve: \"))\n pool_table = self.pool_tables[user_input - 1]\n if pool_table.is_available == \"Occupied\":\n print(f\"Pool Table {pool_table.table_num} is occupied. Please choose another table. \\n\")\n else:\n pool_table.is_available = \"Occupied\"\n pool_table.start_time = datetime.now()\n print(f\"Table {pool_table.table_num} is reserved. \\n\")\n\n def close_table(self):\n user_input = int(input(\"Enter your table number: \"))\n pool_table = self.pool_tables[user_input - 1]\n pool_table.is_available = \"Not Occupied\"\n pool_table.end_time = datetime.now()\n pool_table.play_time = pool_table.end_time - pool_table.start_time\n print(f\"Pool Table {pool_table.table_num} has been closed.\")\n print(f\"Time Played: {pool_table.play_time} \\n\")\n\n with open(\"02-22-2019.txt\", \"a\") as file_object:\n file_object.write(f\"---------------------------------------------- \\n\")\n file_object.write(f\"Pool Table {pool_table.table_num} \\n\")\n file_object.write(f\"Start Date & Time: {pool_table.start_time} \\n\")\n file_object.write(f\"End Date & Time: {pool_table.end_time} \\n\")\n file_object.write(f\"Total Time Played: {pool_table.play_time} \\n\")\n file_object.write(f\"---------------------------------------------- \\n\")\n\n def quit(self):\n self.open = \"Closed\"\n\npool_management = TableManager()\n","repo_name":"spfeiler/python-assignments","sub_path":"pool_table.py","file_name":"pool_table.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18464087199","text":"#dt = {} for i in x: dt[i] = dt.get(i,0)+1\nimport sys;input = sys.stdin.readline\ninp,ip = lambda :int(input()),lambda :[int(w) for w in input().split()]\n\nimport sys\nsys.setrecursionlimit(10**6)\n\ndef dfs(u):\n global grpah,vis,dp\n vis[u] = 1\n for i in graph[u]:\n if not vis[i]:\n dfs(i)\n dp[u] = max(dp[u],dp[i] + 1)\n\nn,m = ip()\ngraph = [[] for i in range(n+1)]\nfor i in range(m):\n a,b = ip()\n graph[a].append(b)\n\ndp = [0]*(n+1)\nvis = [0]*(n+1)\nfor i in range(1,n+1):\n if not vis[i]:\n dfs(i)\n#print(dp)\nprint(max(dp))","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03166/s507632860.py","file_name":"s507632860.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18428888549","text":"\"\"\"\n?AGC\nAG?C\nA?GC\n?ACG\n?CAG\n\"\"\"\n\nn = int(input())\n\n# dp i文字目までのパターン 前の3文字を保存する\nimport itertools\nl = [\"A\", \"G\", \"C\", \"T\"]\npl = itertools.product(l, repeat=3)\n\ndic = dict()\ndic2 = dict()\nfrom collections import defaultdict\nng_list = defaultdict(int)\n\n\nfor idx, p in enumerate(pl):\n dic[idx] = p\n dic2[p] = idx\n\npl = itertools.product(l, repeat=4)\nfor idx, p in enumerate(pl):\n if p[1] == \"A\" and p[2] == \"G\" and p[3] == \"C\":\n ng_list[p] = 1\n elif p[0] == \"A\" and p[1] == \"G\" and p[3] == \"C\":\n ng_list[p] = 1\n elif p[1] == \"A\" and p[2] == \"C\" and p[3] == \"G\":\n ng_list[p] = 1\n elif p[1] == \"G\" and p[2] == \"A\" and p[3] == \"C\":\n ng_list[p] = 1\n elif p[0] == \"A\" and p[2] == \"G\" and p[3] == \"C\":\n ng_list[p] = 1\n\npl = itertools.product(l, repeat=3)\ndic3 = dict()\ndic4 = dict()\nfor idx, p in enumerate(pl):\n dic3[idx] = p\n dic4[p] = idx\n\ndp = [[0 for _ in range(4**3)] for _ in range(n+1)]\ndp[0][dic4[(\"T\", \"T\", \"T\")]] = 1\nMOD = 10 ** 9 + 7\nfor i in range(n):\n for j in range(4**3):\n for k in l:\n if ng_list[dic3[j] + (k,)] == 0:\n dp[i+1][dic4[dic3[j][1:] + (k,)]] += dp[i][j]\n dp[i+1][dic4[dic3[j][1:] + (k,)]] %= MOD\n\nprint(sum(dp[-1]) % MOD)\n\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03088/s872664734.py","file_name":"s872664734.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18531972039","text":"A,B,C,K = map(int,input().split())\ns1 = 0\ns2 = 0\nfor i in range(2):\n a = A + 2 * B\n b = B + 2 * A\n A = a\n B = b\n if i == 0:\n s1 = A - B\n else:\n s2 = A - B\nif K % 2 == 1:\n if s1 < 10 ** 18:\n print(s1)\n else:\n print(\"Unfair\")\nelse:\n if s2 < 10 ** 18:\n print(s2)\n else:\n print(\"Unfair\")\n\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03345/s613866262.py","file_name":"s613866262.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"12863555616","text":"from graphics import Canvas\nimport random\n \nCANVAS_WIDTH = 400\nCANVAS_HEIGHT = 400\n\n\n\ndef main():\n canvas = Canvas(CANVAS_WIDTH, CANVAS_HEIGHT)\n\n # TODO: your code here!\n \n rect = canvas.create_rectangle(0, 0, 260, 30, 'yellow')\n text = canvas.create_text(10,10, font_size = 15, text='Play PaperScissorsRock with Karel')\n \n #set player's seat\n canvas.create_image_with_size(60, 50, 80,80, \"karel.png\")\n text = canvas.create_text(180, 80, font_size = 25, text='VS')\n text = canvas.create_text(240, 80, font_size = 25, text='You')\n \n title = canvas.create_text(60, 150, font_size = 20, text=\"Click to make your choice:\")\n \n text = canvas.create_text(30, 320, font_size = 15, text=\"Karel's Choice:\")\n text = canvas.create_text(240, 320, font_size = 15, text=\"Your Choice:\")\n \n \n \n rock = canvas.create_image_with_size(60, 200, 60, 60, \"rock.png\")\n paper = canvas.create_image_with_size(160, 200, 60, 60, \"paper.png\")\n scissors = canvas.create_image_with_size(260, 200, 60, 60, \"scissors.png\")\n \n #wait fot player to choose\n canvas.wait_for_click()\n \n #show karel's choice\n karel_choice = get_karel_choice()\n if karel_choice == 'rock':\n karel_rock = canvas.create_image_with_size(60, 350, 30, 30, \"rock.png\")\n elif karel_choice == 'paper':\n karel_paper = canvas.create_image_with_size(60, 350, 30, 30, \"paper.png\")\n elif karel_choice == 'scissors':\n karel_scissors = canvas.create_image_with_size(60, 350, 30, 30, \"scissors.png\")\n \n #show player's choice\n player_choice = get_player_choice(canvas)\n if player_choice == 'rock':\n player_rock = canvas.create_image_with_size(270, 350, 30, 30, \"rock.png\")\n elif player_choice == 'paper':\n player_paper = canvas.create_image_with_size(270, 350, 30, 30, \"paper.png\")\n elif player_choice == 'scissors':\n player_scissors = canvas.create_image_with_size(270, 350, 30, 30, \"scissors.png\")\n \n #show result\n canvas.delete(rock) # Remove rock\n canvas.delete(paper) # Remove paper\n canvas.delete(scissors) # Remove scissors\n canvas.delete(title) # Remove old title\n title = canvas.create_text(60, 150, font_size = 20, text=\"The result:\")\n result= caculate_result(karel_choice, player_choice)\n text = canvas.create_text(130, 220,font_size = 25, text=result)\n \n\ndef get_karel_choice():\n random_number = random.randint(1, 3)\n if random_number == 1:\n return 'rock'\n elif random_number == 2:\n return 'paper'\n elif random_number == 3:\n return 'scissors'\n \n \ndef get_player_choice(canvas):\n click = canvas.get_last_click()\n if click[1]> 200 and click[1] < 260:\n if click[0]>60 and click[0]<120:\n return 'rock'\n elif click[0]>160 and click[0]<220:\n return 'paper'\n elif click[0]>260 and click[0]<320:\n return 'scissors'\n else:\n return 0\n else:\n return 0\n\n \ndef caculate_result(karel_choice, player_choice):\n if karel_choice == player_choice:\n return \"It's a tie!\"\n if karel_choice == 'rock' and player_choice == 'paper':\n return 'You win!'\n if karel_choice == 'rock' and player_choice == 'scissors':\n return 'You lose!'\n if karel_choice == 'paper' and player_choice == 'rock':\n return 'You lose!'\n if karel_choice == 'paper' and player_choice == 'scissors':\n return 'You win!'\n if karel_choice == 'scissors' and player_choice == 'rock':\n return 'You win!'\n if karel_choice == 'scissors' and player_choice == 'paper':\n return 'You lose!'\n if player_choice == 0:\n return 'Invalid Choice'\n\nif __name__ == '__main__':\n main()\n","repo_name":"akiraafu/python-paper-scissors-rock","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"15713621378","text":"# 파이썬 웹 스크랭핑 강의 5\n# https://nadocoding.tistory.com/10\n# 가우스 전자 만화 목록\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nurl = 'https://comic.naver.com/webtoon/list.nhn?titleId=675554'\nres = requests.get(url)\nres.raise_for_status()\n\nsoup = BeautifulSoup(res.text, 'lxml')\n\ncartoons = soup.find_all('td', attrs={'class':'title'})\n# print(cartoons)\ntitle = cartoons[0].a.get_text()\nlink = cartoons[0].a['href']\nprint(title)\nprint('https://comic.naver.com' + link)\n\n# 만화 제목 + 링크 가져오기\n# for cartoon in cartoons:\n# print(cartoon.a.get_text(), 'https://comic.naver.com' + cartoon.a['href'])\n\n# 평점 가져오기\n# total_rate = 0\n# cartoons = soup.find_all('div', attrs={'class':'rating_type'})\n# for cartoon in cartoons:\n# rate = cartoon.find('strong').get_text()\n# print(rate)\n# total_rate += float(rate)\n# print('전체 점수 :', total_rate)\n# print('평균 점수 : ', total_rate / len(cartoons))","repo_name":"zoro6908/PY_acamedy","sub_path":"ExPyT0107.py","file_name":"ExPyT0107.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"1097334217","text":"import tkinter\nfrom tkinter import *\nfrom tkinter.ttk import *\nimport time\nimport semestral\nimport tryit\n\n\nclass Main:\n def __init__(self,first,second):\n self.first = \" \"+first\n self.second = \" \"+second\n self.can = Tk()\n self.step = 0\n self.boolen = True\n self.a = None\n self.b = None\n self.pole_b= []\n self.pole_label = []\n self.left = tkinter.PhotoImage(file='icon/left.png')\n self.up = tkinter.PhotoImage(file='icon/up.png')\n self.diag = tkinter.PhotoImage(file='icon/diag.png')\n self.img1 = tkinter.PhotoImage(file='icon/img1.png')\n self.img2 = tkinter.PhotoImage(file='icon/img2.png')\n self.can.title(\"Longest common subsequence\")\n self.can.configure(background=\"white\")\n self.can.minsize(900,550)\n self.menu()\n self.g = Canvas(bg = \"white\")\n self.g.place(height = 500, width = 900 , x =0 ,y = 0)\n self.g.create_text(400,25 ,text = \"LCS-VIZUALIZATION\", font = \"Calibri 12 bold\")\n self.g.create_text(50,440 ,text = \"first-word: \"+str(first), anchor = \"w\",font = \"Calibri 12 bold\")\n self.g.create_text(50,470 ,text = \"second-word: \"+str(second),anchor = \"w\", font = \"Calibri 12 bold\")\n self.buttons()\n self.table()\n self.pole = [[[0,\"n\",False] for x in range(len(second)+2)] for y in range(len(first)+2)]\n self.can.mainloop()\n \n \n\n def menu(self):\n menubar = Menu(self.can)\n filemenu = Menu(menubar, tearoff=0)\n filemenu.add_command(label=\"Help\", command=self.help)\n filemenu.add_command(label=\"Back\", command=self.back_w)\n filemenu.add_separator()\n filemenu.add_command(label=\"Exit\", command=self.quit)\n menubar.add_cascade(label=\"File\", menu=filemenu)\n self.can.config(menu= menubar)\n\n def buttons(self):\n styl=Style()\n styl.configure('My.TButton',background=\"white\")\n self.button1=Button(self.can,text=\"PREVIOUS\",command=self.back,style = \"My.TButton\")\n self.button1.place(height=30, width=150, x=50, y=510)\n self.button1.config(state = \"disabled\")\n self.button2=Button(self.can,text=\"NEXT\",command=self.next,style = \"My.TButton\")\n self.button2.place(height=30, width=150, x=650, y=510)\n self.button3=Button(self.can,text=\"TRY IT!\",command=self.tryit,style = \"My.TButton\")\n\n \n def next(self):\n self.step+=1\n [self.g.delete(x) for x in self.pole_label]\n self.pole_label=[]\n self.g.delete(self.b)\n self.g.update()\n if len(self.first)-1>=self.step:\n self.button1.config(state = \"disabled\")\n self.button2.config(state = \"disabled\")\n self.table()\n self.button2.config(state = \"normal\")\n if self.step <=1:\n self.button1.config(state = \"disabled\")\n else:\n self.button1.config(state = \"normal\")\n \n else:\n self.button1.config(state = \"disabled\")\n self.button2.config(state = \"disabled\")\n self.g.move(self.a,6000,0)\n self.solve()\n \n\n def back(self):\n self.step-=1\n if self.step <=1:\n self.button1.config(state = \"disabled\")\n else:\n self.button1.config(state = \"normal\")\n [self.g.delete(x) for x in self.pole_label]\n self.pole_label=[]\n for x in range(len(self.second)-2):\n a = self.pole_b.pop()\n self.g.delete(a[0])\n self.g.delete(a[1])\n self.g.move(self.a,0,-30)\n self.g.update()\n \n def table_cell(self,x,y):\n self.g.create_rectangle(x,y,x+30,y+30)\n \n def table_circle(self,x,y,color):\n c = self.g.create_oval(x,y,x+30,y+30,outline=color)\n return c \n\n def table_text(self,x,y,text,color = \"black\"):\n t= self.g.create_text(x,y,text =text,fill = color)\n return t \n def arrow(self,x,y,draw):\n if draw ==\"n\":\n return\n elif draw==\"l\":\n image = self.g.create_image(x,y+15,image=self.left)\n elif draw==\"u\":\n image = self.g.create_image(x+15,y,image=self.up)\n elif draw==\"d\":\n image = self.g.create_image(x,y,image=self.diag)\n return image\n\n def table_img(self,x,y,draw):\n if draw == \"1\":\n image = self.g.create_image(x,y+15,image=self.img1)\n else:\n image = self.g.create_image(x,y+15,image=self.img2)\n \n \n\n \n\n def table(self):\n x,y = 50 , 50\n dy = 0\n lenx,leny = len(self.first), len(self.second)\n booly =True\n for i in range(lenx):\n indexA = self.first[i]\n if i!=0 and self.step==0:\n self.table_text(x+15,y+15,self.first[i])\n if self.step>1 and self.step==i:\n if self.boolen:\n self.a = self.table_circle(x,y,\"red\")\n self.boolen = False \n else:\n self.g.move(self.a,0,30)\n for j in range(leny):\n indexB = self.second[j]\n if self.step==0:\n self.table_cell(x,y)\n if i==0 and j!=0 and self.step==0:\n self.table_text(x+15,y+15,self.second[j])\n if self.step>1 and j>1 and i==self.step:\n if booly:\n self.b = self.table_circle(x,50,\"blue\")\n booly = False\n else:\n self.g.move(self.b,30,0)\n if self.step>=1 and ((j==1 and i!=0) or (i==1 and j!=0)) and self.pole[i][j][2]==False:\n self.table_text(x+15,y+15,str(self.pole[i][j][0]))\n self.pole[i][j][2]=True\n if i1 and j>1 and self.pole[i][j][2]==False:\n self.table_text(x+15,y+15,str(self.pole[i][j][0]))\n self.arrow(x,y,self.pole[i][j][1])\n self.pole[i][j][2]=True\n if self.step==i and i>1 and j>1:\n if indexA==indexB:\n self.pole[i][j][0] = self.pole[i-1][j-1][0] +1\n self.pole[i][j][1]=\"d\"\n self.pole_label.append(self.table_text(650,80+40*dy,\"\\\"\"+indexA.upper()+\"\\\" is match with \\\"\"+ indexB.upper() + \"\\\" then \\\"\"+ indexA.upper() + \"\\\" is appended to he upper left sequence\",\"#de00ff\"))\n self.pole_label.append(self.table_text(650,95+40*dy,\"LCS(i,j)=LCS(i-1,j-1)+1\")) \n dy+=1\n elif self.pole[i-1][j][0]0:\n if first[i-1] == second[j-1]:\n self.table_img(x+30*(j),y+30*(i),\"1\")\n self.table_cell(765-30*w,235)\n self.table_text(780-30*w,250,first[i-1])\n solv =first[i-1] + solv\n i-=1\n j-=1\n w+=1\n elif self.pole[i-1][j][0]>= self.pole[i][j-1][0]:\n self.table_img(x+30*(j),y+30*(i),\"0\")\n i-=1\n else:\n self.table_img(x+30*(j),y+30*(i),\"0\")\n j-=1\n self.g.update()\n time.sleep(0.2)\n self.button3.place(height=30, width=200, x=796, y=300,anchor ='e')\n\n def help(self):\n tkhelp = Tk()\n tkhelp.minsize(550,350)\n l = Label(tkhelp,text = \"\"\"\\n The longest common subsequence (LCS)\\n\n problem is the problem of finding the longest subsequence common\\n\n to all sequences in a set of sequences (often just two sequences).\\n\n It differs from problems of finding common substrings: unlike substrings,\\n\n subsequences are not required to occupy consecutive positions within the original sequences.\\n\n VIZUALIZATION \\n\n Watch and learn how algoritm works \\n\n CONTROLS \\n\n NEXT button : make next step \\n\n PREVIOUS button: make previous step \\n\n \"\"\",anchor = \"w\")\n l.pack()\n\n def back_w(self):\n self.can.destroy()\n semestral.Main()\n\n def quit(self):\n self.can.destroy()\n\n def tryit(self):\n self.can.destroy()\n tryit.Main(self.first[2:],self.second[2:])\n \n \n \n \n \n\n \n \n \n\n\n\n\n\n \n \n \n\n \n \n","repo_name":"shokker/LCS-Project","sub_path":"vizual.py","file_name":"vizual.py","file_ext":"py","file_size_in_byte":10068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"36123170546","text":"import ipywidgets as widgets\nimport plotly.express as px\nfrom plotly.subplots import make_subplots\nimport pandas as pd\nfrom IPython.core.display import display, HTML\n\n'''This function will return 4 tables: overview, descriptive stats, frequency top and frequency bottom and two graphs: histogram and boxplot for each numerical feature'''\n\n\ndef eda_num_univ_binary(dataset):\n '''dataset:pd.DataFrame'''\n dataset_num = dataset.select_dtypes(exclude='object')\n if len(dataset_num[dataset_num.columns.values[-1]].unique())==2:\n dataset_num = dataset_num.iloc[:,:-1]\n else:\n dataset_num = dataset.select_dtypes(exclude='object')\n \n tab_contents = [i for i in dataset_num.columns.values]\n children = [widgets.Output() for value in tab_contents]\n tab = widgets.Tab(children=children)\n [tab.set_title(num,name) for num,name in enumerate(tab_contents)]\n display(tab)\n\n for i,k in enumerate(dataset_num.columns.values):\n # overview table\n count_num = dataset_num[k].count()\n distinct_count_num = dataset_num[k].nunique()\n unique_num = round(100*(dataset[k].nunique()/len(dataset)),2)\n missing_num = dataset_num[k].isnull().sum().sum()\n missing_num_perc = 100*(missing_num/len(dataset_num))\n zeros_num_count = (dataset_num[k]==0).sum()\n zeros_num_count_perc = 100*(zeros_num_count/len(dataset))\n \n df1_num = pd.DataFrame([count_num,distinct_count_num,unique_num,missing_num,missing_num_perc,zeros_num_count,zeros_num_count_perc],columns=[''],\n index=['Total count','Distinct count','Unique (%)','Missing','Missing (%)','Zeros','Zeros (%)'])\n df1_num.index.name = 'Overview'\n # descriptive stats table\n df2_num = pd.DataFrame(dataset_num[k].describe().round(2))\n df2_num.columns = ['']\n df2_num = df2_num.drop('count')\n df2_num.loc['kurtosis'] = dataset_num[k].kurtosis(axis=0)\n df2_num.loc['skew'] = dataset_num[k].skew(axis=0)\n df2_num.index.name = 'Descriptive Statistics'\n df2_num = df2_num.style.set_precision(2)\n # Frequency top 7\n freq_top = pd.DataFrame(dataset_num[k].value_counts().sort_values(ascending=False).head(10)).rename(columns={k:'Frequency'}).reset_index()\n freq_top['Frequency (%)'] = 100*(freq_top['Frequency']/len(dataset))\n freq_top = freq_top.rename(columns={'index':k,'Frequency':'Frequency'})\n freq_top = freq_top.style.hide_index().set_caption(\"Frequency top\").set_precision(2)\n \n # Frequency bottom 7\n freq_bottom = pd.DataFrame(dataset_num[k].value_counts().sort_values(ascending=False).tail(10)).rename(columns={k:'Frequency'}).reset_index()\n freq_bottom['Frequency (%)'] = 100*(freq_bottom['Frequency']/len(dataset))\n freq_bottom = freq_bottom.rename(columns={'index':k,'Frequency':'Frequency'})\n freq_bottom = freq_bottom.style.hide_index().set_caption(\"Frequency bottom\").set_precision(3)\n \n out1 = widgets.Output()\n out2 = widgets.Output()\n out3 = widgets.Output()\n out4 = widgets.Output()\n with out1:\n display(df1_num)\n with out2:\n display(df2_num)\n with out3:\n display(freq_top)\n with out4:\n display(freq_bottom)\n hbox = widgets.HBox([out1,out2,out3,out4])\n \n with children[i]:\n display(hbox)\n \n x_titles = list(dataset_num.columns)\n \n fig1=px.histogram(dataset_num,x=dataset_num[k],labels={'x':x_titles[i],'y':'count'},histnorm='percent')\n fig2=px.box(dataset_num,y=dataset_num[k],labels={'x':x_titles[i],'y':'percent'})\n \n trace1 = fig1['data'][0]\n trace2 = fig2['data'][0]\n fig = make_subplots(rows=1,cols=2,subplot_titles=['Histogram','Box plot'])\n fig.layout[\"xaxis\"].title.text = k\n fig.layout[\"xaxis2\"].title.text = ''\n fig.layout[\"yaxis\"].title.text = 'percent'\n fig.layout[\"yaxis2\"].title.text = k\n \n fig.add_trace(trace1,1,1)\n fig.add_trace(trace2,1,2)\n fig.update_layout(template='plotly_dark')\n fig.show()","repo_name":"MTereM/Bank-Marketing","sub_path":"eda_num_univ_binary.py","file_name":"eda_num_univ_binary.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"8216967825","text":"class Criterial:\n \"\"\"\n Модуль решения задачи многокритериальной оптимизации.\n Задача: Требуется из списка шаблонов меню выбрать, используя параметры клиента, оптимальныt варианты шаблонов меню.\n\n Модуль содержит класс, реализующий следующие методы:\n Загрузка списка шаблонов меню в формате JSON (см data_specification.md) с диска\n Добавление нового шаблона\n Изменение существующего шаблона\n Сохранение шаблонов на диск\n Выбор N отпимальных вариантов по критериям клиента из зугруженного ранее списка\n\n Все методы следует сделать @classmethod, т.к используется паттерн \"Singletone\".\n\n В качестве параметров клиента предлагается использовать следующие величины:\n Возраст\n Рост\n Вес\n Объём физической активности\n Цель использования диеты\n Аллергические реакции\n Любимые продукты\n\n Любая величина, для которой существует четко определенное количество вариантов (например цель использования диеты),\n должна быть реализована в виде соответствующей сущности Python так, чтобы невозможно было подставить значение,\n не определенное в коде, без ошибки.\n \"\"\"\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self.menu_list = {}\n\n def set_menu_list(self, menu_list):\n self.menu_list = menu_list\n\n def pareto(self, restricted_prod: list, loved_prod: list, diet_type=\"health\", epd=3, ned=1):\n \"\"\"Формирует Парето-оптимальный список шаблонов меню.\n Критерии:\n 1) разница между фактическим и желаемым количеством приемов пищи (негативный) (epd)\n 2) разница между фактическим и желаемым количеством разгрузочных дней (негативный) (ned)\n 3) любимые продукты: число совпадений из loved_prod и general->all_products (позитивный) (love)\n\n Ограничения:\n 1) тип диеты (general->type)\n 2) аллергии (продукты из restricted_prod не содержатся в general->all_products)\n \"\"\"\n pareto_list = self.menu_list.copy()\n i = 0\n while i < len(pareto_list):\n j = i + 1\n c_epd = abs(pareto_list[i]['general']['eats_per_day'] - epd)\n c_ned = abs(pareto_list[i]['general']['no_eats_day'] - ned)\n c_love = len(set(pareto_list[i]['general']['all_products']) & set(loved_prod))\n pareto_list[i]['criterias'] = {\n 'epd': c_epd,\n 'ned': c_ned,\n 'love': c_love\n }\n if ( # Ограничения\n (len(set(pareto_list[i]['general']['all_products']) & set(restricted_prod)) != 0) or\n (pareto_list[i]['general']['type'] != diet_type)\n ):\n pareto_list.pop(i)\n continue\n while j < len(pareto_list):\n if (\n # Критерии\n (\n (c_epd > abs(pareto_list[j]['general']['eats_per_day'] - epd)) and\n (c_ned > abs(pareto_list[j]['general']['no_eats_day'] - ned)) and\n (c_love < len(set(pareto_list[j]['general']['all_products']) & set(loved_prod)))\n )\n ):\n pareto_list.pop(i)\n i -= 1\n break\n j += 1\n i += 1\n return pareto_list\n\n def optimization(self, pareto_list: list, weight=[1 / 3, 1 / 3, 1 / 3]):\n \"\"\" Построение обобщенного критерия \"\"\"\n general_criteria = [0 for _ in pareto_list]\n for index, menu in enumerate(pareto_list):\n general_criteria[index] = ( menu['criterias']['epd'] * weight[0] +\n menu['criterias']['ned'] * weight[1] +\n menu['criterias']['love'] * weight[2])\n return pareto_list[general_criteria.index(max(general_criteria))]\n","repo_name":"tunsmm/fitness_guide","sub_path":"fitness_guide/menu/services/criterial/criterial.py","file_name":"criterial.py","file_ext":"py","file_size_in_byte":4947,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"41394846855","text":"import argparse\nimport os\nimport shutil\nimport tarfile\n\nscript_directory = os.path.dirname(os.path.abspath(__file__))\n\nfbx_tgz_filename = 'fbx20161_2_fbxsdk_clang_mac.pkg.tgz'\nfbx_tgz_filepath = os.path.join(script_directory, fbx_tgz_filename)\n\nfbx_pkg_filename = 'fbx20161_2_fbxsdk_clang_macos.pkg'\nfbx_pkg_filepath = os.path.join(script_directory, fbx_pkg_filename)\n\nparser = argparse.ArgumentParser(description='Install and setup FBXSDK on the Mac for Lumberyard.')\n\nparser.add_argument('fbxthirdpartypath',\n default='',\n help='Path to your 3rdParty folder.')\nparser.add_argument('--installerPath', \n default=fbx_tgz_filepath,\n help='The path to the installer archive file.')\n\nargs, unknown = parser.parse_known_args()\n\ninstaller_filepath = args.installerPath\n\nif os.path.exists(installer_filepath) is False:\n raise Exception('Cannot find file: {}'.format(installer_filepath))\n\n_, file_extension = os.path.splitext(installer_filepath)\n\n# Unzip if input is a tgz file\nif file_extension == '.tgz':\n tar = tarfile.open(installer_filepath)\n tar.extractall(script_directory)\n tar.close()\n installer_filepath = fbx_pkg_filepath\n\nif not os.path.exists(installer_filepath) or not installer_filepath.endswith('.pkg'):\n raise Exception('Invalid file extracted: {}'.format(installer_filepath))\n\n# Remove quarantine mark on the installer file before opening it\nos.system('xattr -dr com.apple.quarantine {}'.format(installer_filepath))\n\n# Run the installer package\nos.system('open -W {}'.format(installer_filepath))\n\n# Clean up extracted file if input is a tgz file\nif file_extension == '.tgz':\n shutil.rmtree(installer_filepath)\n\n# This points to the default location where the pkg installs the files\ninstalled_path = os.path.join(os.path.abspath(os.sep), 'Applications', 'Autodesk', 'FBX SDK', '2016.1.2') \n\nif os.path.exists(installed_path) is False:\n raise Exception('Failed to install FBX SDK, cannot find installed files: {}'.format(installed_path))\n\n# Move the installed files to the 3rdParty location\nshutil.move(installed_path, args.fbxthirdpartypath)\n","repo_name":"santosh90n/lumberyard-1","sub_path":"dev/Tools/Redistributables/FbxSdk/2016.1.2/fbx_install_mac.py","file_name":"fbx_install_mac.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"42927313035","text":"#hours, minutes and seconds\nfrom time import sleep, strftime\n\n#while True: #will run forever\n# hora = strftime('%H:%M:%S')\n# print(hora)\n# sleep(1) #wait 1 second\nn = 0\n\nwhile n < 10:\n print(n)\n n = n + 1\n\n \n ","repo_name":"Vogel1212/PythonExamples","sub_path":"simpleExamples/PythonCode-3-3.py","file_name":"PythonCode-3-3.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"13306663099","text":"#!/usr/bin/env python3\n\nimport json\nimport mq\nfrom sys import argv\nfrom time import sleep\n\n\ndef pull(filename):\n with open(filename, 'r') as fp:\n return json.load(fp)\n\n\ndef check(tweets):\n assert len(tweets) > 0\n first_batch = tweets[0]\n assert len(first_batch) > 0\n first_tweet = first_batch[0]\n EXPECT_KEYS = {'content', 'hashtags', 'id', 'image', 'name',\n 'partycolor', 'retweet', 'sound', 'time', 'twitterName'}\n # Implicit assertion: first_tweet is a dict\n assert EXPECT_KEYS.issubset(first_tweet.keys()), first_tweet.keys()\n\n\n# Waiting period, in milliseconds, between each sent batch\nPERIOD_MS = 50\n\n\ndef vomit(tweets):\n print('Now vomiting {} tweet-batches all over the place.'.format(len(tweets)))\n q = mq.RealQueue('tweets')\n for batch in tweets:\n for t in batch:\n q.post([t])\n sleep(PERIOD_MS / 1000.0)\n\n\ndef transfer_file(filename):\n tweets = pull(filename)\n check(tweets)\n vomit(tweets)\n\n\nif __name__ == '__main__':\n if len(argv) == 1:\n transfer_file('all_tweets.json') # argv[0] is the program name\n elif len(argv) == 2:\n transfer_file(argv[1]) # argv[0] is the program name\n else:\n print('{}: need precisely one argument: the name of the tweets JSON file.'.format(argv[0]))\n exit(1)\n","repo_name":"Schwenger/House-Of-Tweets","sub_path":"backend/vomit.py","file_name":"vomit.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"40231209431","text":"#!/usr/bin/env python\n\nfrom collections import namedtuple\nimport re\nfrom .helpers import read\n\n\"\"\"\nhttps://www.raspberrypi.org/documentation/hardware/raspberrypi/revision-codes/README.md\nuuuuuuuuFMMMCCCCPPPPTTTTTTTTRRRR\n\nNote: As of the 4.9 kernel, all Pis report BCM2835, even those with BCM2836,\nBCM2837 and BCM2711 processors. You should not use this string to detect the\nprocessor. Decode the revision code using the information below, or cat\n/sys/firmware/devicetree/base/model\n\"\"\"\n\nRPiInfo = namedtuple(\"RPiInfo\", \"type processor memory revision manufacturer flag\")\n\ndef revision(n):\n return 0b1111 & n\n\ndef name(n):\n rpi = {\n 0x0: \"A\",\n 0x1: \"B\",\n 0x2: \"A+\",\n 0x3: \"B+\",\n 0x4: \"2B\",\n 0x5: \"Alpha (early prototype)\",\n 0x6: \"CM1\",\n 0x8: \"3B\",\n 0x9: \"Zero\",\n 0xa: \"CM3\",\n 0xc: \"Zero W\",\n 0xd: \"3B+\",\n 0xe: \"3A+\",\n 0xf: \"Internal use only\",\n 0x10: \"CM3+\",\n 0x11: \"4B\"\n }\n val = 0b11111111 & (n >> 4)\n return rpi[val]\n\ndef processor(n):\n p = {\n 0: \"BCM2835\",\n 1: \"BCM2836\",\n 2: \"BCM2837\",\n 3: \"BCM2711\"\n }\n val = 0b1111 & (n >> 12)\n return p[val]\n\ndef manufacturer(n):\n m = {\n 0: \"Sony UK\",\n 1: \"Egoman\",\n 2: \"Embest\",\n 3: \"Sony Japan\",\n 4: \"Embest\",\n 5: \"Stadium\"\n }\n val = 0b1111 & (n >> 16)\n return m[val]\n\ndef memory(n):\n m = {\n 0: \"256MB\",\n 1: \"512MB\",\n 2: \"1GB\",\n 3: \"2GB\",\n 4: \"4GB\"\n }\n val = 0b111 & (n >> 20)\n return m[val]\n\ndef flag(n):\n f = {\n 1: \"new-style revision\",\n 0: \"old-style revision\"\n }\n val = 0b1 & (n >> 23)\n return f[val]\n\ndef decode(n):\n return RPiInfo(\n name(n),\n processor(n),\n memory(n),\n revision(n),\n manufacturer(n),\n flag(n)\n )\n\ndef find(key, info):\n match = re.search('^{}\\s+:\\s+(\\w+)$'.format(key), info, flags=(re.MULTILINE | re.IGNORECASE))\n if match is None:\n return None\n return match.group(1)\n\n\ndef pi_info():\n cpuinfo = read('/proc/cpuinfo')\n if cpuinfo is None:\n return None\n\n n = find(\"Revision\", cpuinfo)\n if n is None:\n return None\n\n return decode(n)\n","repo_name":"pombredanne/rpi-info","sub_path":"rpi-info/rpi2.py","file_name":"rpi2.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"90"} +{"seq_id":"12084370142","text":"import itertools\nimport peutil\n\nFIND = 50000000\nPRIME_LIMIT = 100000\n\ndef f(a, b, c):\n return a**2 + b**3 + c**4\n\nprimes = peutil.get_primes(PRIME_LIMIT)\nfor i, p in enumerate(primes):\n if f(p, primes[0], primes[0]) > FIND:\n primes = primes[:i + 1]\n break\n\npossible = []\nfor a in primes:\n a_pos_len = len(possible)\n for b in primes:\n b_pos_len = len(possible)\n for c in primes:\n val = f(a, b, c)\n if val > FIND:\n break\n possible.append(val)\n if len(possible) == b_pos_len:\n break\n if len(possible) == a_pos_len:\n break\n\nprint(len(set(p for p in possible)))\n\n","repo_name":"mishajw/projecteuler","sub_path":"src/p087_prime_power_triples.py","file_name":"p087_prime_power_triples.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"25542257217","text":"import urllib\nimport time\nimport hmac\nimport base64\nimport datetime\nimport urllib\nimport hashlib\nimport logging\nfrom xml.dom import minidom\nfrom google.appengine.api import urlfetch\n\ndef productSearch(keywords):\n # Amazon is kind of obnoxious with the API and there's very little available online to help.\n # their page is like a maze.\n # hopefully this won't have to change any time soon.\n pairs = [\n \"AssociateTag=wbor07-20\",\n \"AWSAccessKeyId=AKIAJIXECWA77X5XX4DQ\",\n \"Keywords=\" + keywords,\n \"Operation=ItemSearch\",\n \"ResponseGroup=Images%2CTracks%2CItemAttributes\",\n \"SearchIndex=Music\",\n \"Service=AWSECommerceService\",\n \"Timestamp=\" + urllib.quote_plus(datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\")),\n \"Version=2011-08-01\",\n ]\n string = \"&\".join(pairs)\n hashstring = \"GET\\necs.amazonaws.com\\n/onca/xml\\n\" + string\n dig = hmac.new(\"6oYjAsiXTz8xZzpKZC8zkqXnkYV72CNuCRh9hUsQ\", \n msg=hashstring, \n digestmod=hashlib.sha256).digest()\n coded = dig.encode(\"base64\").strip()\n finalurl = (\"https://ecs.amazonaws.com/onca/xml?\" + string + \n \"&Signature=\" + urllib.quote_plus(coded))\n logging.warning(\"Final URL: \" + finalurl)\n xmldata = urlfetch.fetch(unicode(finalurl)).content\n logging.warning(\"XML Data: \" + xmldata)\n xmldoc = minidom.parseString(xmldata)\n items = xmldoc.getElementsByTagName(\"Item\")\n # makes sure we only look at items with images, otherwise bad things can happen\n items = filter(lambda i: len(i.getElementsByTagName(\"SmallImage\")) > 0, items)\n # same with medium image\n items = filter(lambda i: len(i.getElementsByTagName(\"MediumImage\")) > 0, items)\n # same with large image\n items = filter(lambda i: len(i.getElementsByTagName(\"LargeImage\")) > 0, items)\n # and track\n items = filter(lambda i: len(i.getElementsByTagName(\"Track\")) > 0, items)\n # and artist\n items = filter(lambda i: len(i.getElementsByTagName(\"Artist\")) > 0, items)\n # and title\n items = filter(lambda i: len(i.getElementsByTagName(\"Title\")) > 0, items)\n return items\n","repo_name":"cmds4410/WBOR","sub_path":"amazon.py","file_name":"amazon.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"90"} +{"seq_id":"18043815049","text":"h, w = map(int, input().split())\nl = []\nfor i in range(h):\n l.append(list(input()) + [\".\"])\nl[-1][w-1] = \"g\"\nl.append([\".\"]*(w+1))\n\ndef dfs(x, y):\n if x == w or y == h or l[y][x] == \".\":\n return\n elif l[y][x] == \"g\":\n print(\"Possible\")\n exit()\n else:\n dfs(x+1, y)\n dfs(x, y+1)\n\ncnt = 0\nfor i in l:\n for j in i:\n if j == \"#\":\n cnt += 1\n\nif cnt == h + w - 2:\n dfs(0, 0)\nprint(\"Impossible\")","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03937/s849371408.py","file_name":"s849371408.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"39065226156","text":"#coding: utf-8\nimport numpy as np\nimport cmath as cm\nimport matplotlib.pyplot as plt\nimport wave\nimport struct\nimport warnings\nimport time\nwarnings.filterwarnings('ignore')\n\nfile = \"test.wav\"\nspan = 15000\ndt = 0.01 # サンプリング間隔\nfq1, fq2 = 5, 40 # 周波数\nfc, fc2 = 25, 75 # カットオフ周波数\n\ndef plot(locate, f, a, b, lab): #plot\n plt.subplot(locate)\n plt.plot(f, label=lab)\n plt.xlabel(a, fontsize=20)\n plt.ylabel(b, fontsize=20)\n plt.grid()\n \ndef out_wav(name, outd): # wav file output\n outf = name\n wave_file = wave.open(outf, 'w')\n wave_file.setnchannels(ch)\n wave_file.setsampwidth(width)\n wave_file.setnframes(fn)\n wave_file.setframerate(fr)\n wave_file.writeframes(outd)\n wave_file.close()\n\ndef dft(data):\n res = []\n N = len(data)\n for k in range(N):\n w = cm.exp(-1j * 2 * cm.pi * k / float(N))\n X_k = 0\n for n in range(N):\n X_k += data[n] * (w ** n)\n res.append(abs(X_k))\n return res\n\ndef ifft_data(F, locate, wav): # ifft + plot + output\n f = np.fft.ifft(F)\n outd = [int(x) for x in f]\n outd = struct.pack(\"h\" * len(outd), *outd)\n Amp = np.abs(F)\n plot(locate, f, \"time\", \"signal\", \"f(n)\") # 時間信号(元)\n plot(locate+1, Amp, \"freq.\", \"amp.\", \"|F(k)|\")# 周波数信号(元)\n out_wav(wav, outd)\n \n# グラフ\nplt.figure()\nplt.rcParams['font.family'] = 'Times New Roman'\nplt.rcParams['font.size'] = 10\nleg = plt.legend(loc=1, fontsize=25)\n# leg.get_frame().set_alpha(1)\n \n# read\nwave_file = wave.open(file, \"rb\")\nch = wave_file.getnchannels()\nwidth = wave_file.getsampwidth()\nfr = wave_file.getframerate()\nfn = wave_file.getnframes()\nf = wave_file.readframes(wave_file.getnframes())\nwave_file.close()\nf = np.frombuffer(f, dtype = \"int16\")\nF = np.fft.fft(f)\nN = len(F) # サンプル数\nt = np.arange(0, N*dt, dt) # 時間軸\nfreq = np.linspace(0, 1.0/dt, N) # 周波数軸\n\n# filter\nF2 = F.copy()\nF3 = F.copy()\nF4 = F.copy()\n\nF2[(freq < fc)] = 0 # high\nF3[(freq > fc2)] = 0 #low\nF4[(freq < fc/2)] = 0 #band\nF4[(fc2/2 < freq)] = 0\n\nifft_data(F, 421, \"original.wav\")\nifft_data(F2, 423, \"high.wav\")\nifft_data(F3, 425, \"low.wav\")\nifft_data(F4, 427, \"band.wav\")\n\n#描画\n# leg.get_frame().set_alpha(1)\nplt.show()\n\n\n\n#時間測る\nstart = time.time()\nnp.fft.fft(f[10000:11024])\nend = time.time()\nt1 = end - start\nprint(\"1024fft\", t1)\nstart = time.time()\ndft(f[10000:11024])\nend = time.time()\nt2 = end - start\nprint(\"1024dft\", t2)\nprint(\"1024 fft - dft\", t2 - t1) #1024\n\nstart = time.time()\nnp.fft.fft(f[10000:12048])\nend = time.time()\nt1 = end - start\nprint(\"2048fft\", t1)\nstart = time.time()\ndft(f[10000:12048])\nend = time.time()\nt2 = end - start\nprint(\"2048dft\", t2)\nprint(\"2048 fft - dft\", t2 - t1) #2048\n\nstart = time.time()\nnp.fft.fft(f[10000:14096])\nend = time.time()\nt1 = end - start\nprint(\"4096fft\", t1)\nstart = time.time()\ndft(f[10000:14096])\nend = time.time()\nt2 = end - start\nprint(\"4096dft\", t2)\nprint(\"4096 fft - dft\", t2 - t1) #4096\n\n# 1024fft 0.00017452239990234375\n# 1024dft 2.41331148147583\n# 1024 fft - dft 2.4131369590759277\n# 2048fft 0.0002422332763671875\n# 2048dft 9.947956800460815\n# 2048 fft - dft 9.947714567184448\n# 4096fft 0.0004248619079589844\n# 4096dft 39.19393801689148\n# 4096 fft - dft 39.19351315498352\n\n","repo_name":"NaokiOsako/Fourier","sub_path":"Fourier.py","file_name":"Fourier.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18557553939","text":"# import math\n# import statistics\n# a=input()\n#b,c=int(input()),int(input())\n# c=[]\n# for i in a:\n# c.append(i)\ne1,e2 = map(str,input().split())\n# f = list(map(int,input().split()))\n#g = [input() for _ in range(a)]\n# h = []\n# for i in range(e1):\n# h.append(list(map(int,input().split())))\nc=[]\ncount=0\nans=0\nfor i in range(int(e1),int(e2)+1):\n count=0\n for k in range(len(str(i))//2):\n if str(i)[k]==str(i)[-(k+1)]:\n count+=1\n if len(str(i))//2 == count:\n ans+=1\n\nprint(ans)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03416/s284674937.py","file_name":"s284674937.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"8386394942","text":"# !/usr/bin/python3\n\nfrom math import log2\nfrom numpy import tri\nimport pandas as pd\nimport re\nfrom pathlib import Path\n\nclass LanguageModel:\n\n def __init__(self):\n self.unigram = pd.DataFrame(columns=[\"cnt\"])\n self.bigram = pd.DataFrame(columns=[\"word1\", \"word2\", \"cnt\"])\n self.trigram = pd.DataFrame(columns=[\"word1\", \"word2\", \"word3\", \"cnt\"])\n\n def read_data(self, corpus):\n # adapt code from bigram.py\n lines = corpus.readlines()\n # extra start on first line\n entire_file = \" \"\n for line in lines:\n line = line.strip()\n # get rid of most punctuation\n line = re.sub(pattern=r'[^a-zA-Z0-9\\s]', repl=\"\", string=line)\n # add beginning and end of sentence tokens\n entire_file += \" \" + line + \" \"\n # extra stop on last line\n entire_file += \" \"\n\n # unigram df - adapted from bigram.py\n entire_file = entire_file.split()\n for word in entire_file:\n if word in self.unigram.index:\n self.unigram.loc[word, \"cnt\"] += 1\n else:\n row = pd.Series(data={\"cnt\": 1}, name=word)\n self.unigram = self.unigram.append(row, ignore_index=False)\n # bigram df - adapted from bigram.py\n for i in range(len(entire_file)-1):\n first_word = entire_file[i]\n second_word = entire_file[i+1]\n gram = first_word + \" \" + second_word\n if gram in self.bigram.index:\n self.bigram.loc[gram, \"cnt\"] += 1\n else:\n row = pd.Series(data={\"cnt\": 1, \"word1\": first_word, \"word2\": second_word}, name=gram)\n self.bigram = self.bigram.append(row, ignore_index=False)\n # trigram df - adapted from bigram.py\n for i in range(len(entire_file)-2):\n w1 = entire_file[i]\n w2 = entire_file[i+1]\n w3 = entire_file[i+2]\n gram = w1 + \" \" + w2 + \" \" + w3\n if gram in self.trigram.index:\n self.trigram.loc[gram, \"cnt\"] += 1\n else:\n row = pd.Series(data={\"cnt\": 1, \"word1\": w1, \"word2\": w2, \"word3\": w3}, name=gram)\n self.trigram = self.trigram.append(row, ignore_index=False)\n\n # changes tokens only seen once into and updates all dataframes\n def train_unk(self):\n # adapt unigram train_unk to UNK unigram_df\n num_unk = self.unigram.loc[self.unigram[\"cnt\"] == 1].size\n unked_words = self.unigram[self.unigram[\"cnt\"] == 1].index\n df = self.unigram[self.unigram[\"cnt\"] != 1]\n row = pd.Series(data={\"cnt\": num_unk}, name=\"\")\n self.unigram = df.append(row, ignore_index=False)\n\n # adapt bigram train_unk to UNK bigram_df\n self.bigram = self.bigram.replace(unked_words, \"\")\n self.bigram = self.bigram.groupby(['word1', 'word2']).sum()\n unked_bigram = pd.DataFrame()\n for tup in self.bigram.index:\n w1, w2 = tup\n row = pd.Series(data=[w1, w2, self.bigram.loc[w1, w2][\"cnt\"]], name=w1 + \" \" + w2)\n unked_bigram = unked_bigram.append(row, ignore_index=False)\n unked_bigram.columns = [\"word1\", \"word2\", \"cnt\"]\n unked_bigram[\"cnt\"] = unked_bigram.cnt.apply(int)\n self.bigram = unked_bigram\n\n # trigram\n self.trigram = self.trigram.replace(unked_words, \"\")\n self.trigram = self.trigram.groupby(['word1', 'word2', 'word3']).sum()\n unked_trigram = pd.DataFrame()\n for tup in self.trigram.index:\n w1, w2, w3 = tup\n row = pd.Series(data=[w1, w2, w3, self.trigram.loc[w1, w2, w3][\"cnt\"]], \\\n name=w1 + \" \" + w2 + \" \" + w3)\n unked_trigram = unked_trigram.append(row, ignore_index=False)\n unked_trigram.columns = [\"word1\", \"word2\", \"word3\", \"cnt\"]\n unked_trigram[\"cnt\"] = unked_trigram.cnt.apply(int)\n self.trigram = unked_trigram\n\n def train_prob(self):\n # folded smoothing into this method\n for index, row in self.trigram.iterrows():\n count = row['cnt'] + 1\n denom = self.bigram.loc[row['word1'] + \" \" + row['word2'], \"cnt\"] + len(self.unigram.index) - 1\n self.trigram.loc[index, 'MLE'] = log2(float(count)/denom)\n\n def print_ngram(self):\n \"\"\"\n prints out each trigram with its logged MLE to the third decimal place\n \"\"\"\n # \n # highest to lowest prob, 3 decimal place rounded\n # then alphabetical\n # adapted from both bigram and unigram\n self.trigram.index.name = \"index\"\n self.trigram.sort_values(by=['MLE', \"index\"], ascending = [False, True], inplace = True)\n for index, row in self.trigram.iterrows():\n \t print(index, round(row['MLE'], 3))\n\n def train(self, train_corpus):\n \"\"\"\n pass in the training corpus. prints each trigram with its logged MLE.\n creates trigram_df.csv, trigram_bi_df.csv, trigram_uni_df.csv for efficiency after training the model once\n \"\"\"\n filename1 = \"trigram_df.csv\"\n filename2 = \"trigram_bi_df.csv\"\n filename3 = \"trigram_uni_df.csv\"\n if Path(filename1).exists() and Path(filename2).exists() and Path(filename3).exists():\n self.trigram = pd.read_csv(filename1, index_col=0)\n self.bigram = pd.read_csv(filename2, index_col=0)\n self.unigram = pd.read_csv(filename3, index_col=0)\n else:\n file = open(train_corpus, 'r')\n self.read_data(file)\n file.close()\n self.train_unk()\n self.train_prob()\n self.trigram.to_csv(filename1)\n self.bigram.to_csv(filename2)\n self.unigram.to_csv(filename3)\n self.print_ngram()\n\n def score_unk(self, sent):\n unked_sent = \"\"\n for w in sent.split():\n if w in self.unigram.index:\n unked_sent += w + \" \"\n else:\n unked_sent += \" \"\n return unked_sent\n\n def score_prob(self, sent):\n \"\"\"\n takes in a string sentence, returns the probability of the sentence\n \"\"\"\n prob = 0\n sent_list = sent.split()\n for i in range(len(sent_list)):\n if (i + 2) < len(sent_list):\n bi_index = sent_list[i] + \" \" + sent_list[i+1]\n index = bi_index + \" \" + sent_list[i+2]\n MLE = 0\n if index in self.trigram.index:\n MLE = self.trigram.loc[index, 'MLE']\n elif bi_index in self.bigram.index:\n denom = self.bigram.loc[bi_index, \"cnt\"] + len(self.unigram.index) - 1\n MLE = log2(1.0/denom)\n else:\n denom = len(self.unigram.index) - 1\n MLE = log2(1.0/denom)\n prob += MLE\n return prob\n\n def calc_perplex(self, sum, N):\n H = (-1/N) * sum\n return round(2 ** H, 3)\n\n def score(self, test_corpus):\n total_prob = 0\n num_words = 0\n\n # read in file\n f = open(test_corpus, 'r')\n lines = f.readlines()\n f.close()\n\n # iterate through lines, outputting individual prob\n for i in range(len(lines)):\n line = lines[i]\n unked_line = self.score_unk(re.sub(pattern=r'[^a-zA-Z0-9\\s]', repl=\"\", string=line))\n\n if i == 0:\n prob_line = \" \" + unked_line + \" \"\n else:\n prob_line = \" \" + unked_line + \" \"\n if i != len(lines) - 1:\n prob_line += \" \"\n\n # +1 for \n num_words += len(unked_line.split()) + 1\n\n prob = self.score_prob(prob_line)\n total_prob += prob\n print(line.strip() + \" \" + str(prob))\n\n # determine perplexity\n perp = self.calc_perplex(total_prob, num_words)\n print(\"Perplexity = \" + str(perp))\n\n\n def generate(self):\n \"\"\"\n Generates a sentence using trigram probabilities.\n Ignores all . Randomizes for valid starting word,\n then randomizes the top three probabilities for each next\n word.\n \"\"\"\n # loop until creation of a valid sentence\n start_new_sentence = True\n while start_new_sentence:\n # change boolean to indicate we are now continuing an existing sentence\n start_new_sentence = False\n # df with all word2 as , ignore UNK\n start = self.trigram[(self.trigram.word2 == \"\") & (self.trigram.word3 != \"\")]\n # randomize for one row\n start_row = start.sample()\n sent_list = [\"\"]\n # retrieve word3 and add to sentence list\n word = start_row.word3.iloc[0]\n sent_list.append(word)\n # keep track of index in sentence list\n i = 1\n\n while word != \"\" and not start_new_sentence:\n first = sent_list[i-1]\n # df with all word1 as first and word2 as word, ignore UNK\n trigrams = self.trigram[(self.trigram.word1 == first) & (self.trigram.word2 == word) & (self.trigram.word3 != \"\")]\n # if trigrams is empty, we need to start over\n start_new_sentence = trigrams.index.size == 0\n # if trigrams isn't empty, add the next word\n if not start_new_sentence:\n # finds rows of top 3 prob\n top_3 = trigrams.nlargest(3, \"MLE\")\n # randomizes for one row and retrieves word3, appending to sentence list\n word = top_3.sample().word3.iloc[0]\n sent_list.append(word)\n i += 1\n sent = \" \".join(sent_list[1:len(sent_list)-1]) + \".\"\n print(sent)\n","repo_name":"bnk7/ling472-project","sub_path":"trigram.py","file_name":"trigram.py","file_ext":"py","file_size_in_byte":9930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"44686295883","text":"import json\nfrom datetime import datetime\n\nimport telebot\n\nfrom app import app, db\nfrom app.models import Connection, ConnectType, LinkType, User\nfrom app.telegram_function import (append_sheet, delete_row, delete_rows,\n get_sheet)\nfrom app.utils import extract_values\n\nbot = telebot.TeleBot(app.config[\"BOT_TOKEN\"])\ndef create_user(tg_data):\n\tuser = User.query.filter_by(id=tg_data['id']).first()\n\tif user is None:\n\t\tuser = User(id=tg_data['id'],\n\t\t\tusername=tg_data['username'],\n\t\t\tfirst_name=tg_data['first_name'],\n\t\t\tlast_name=tg_data['last_name'],\n\t\t\tphoto_url=tg_data['photo_url'],\n\t\t\tauth_date=tg_data['auth_date'])\n\t\tdb.session.add(user)\n\t\tdb.session.commit()\n\treturn user\ndef connect_link(user_id,connect_type_id,link):\n\tif link is None:\n\t\treturn False\n\tif user_id is None:\n\t\treturn False\n\tif connect_type_id is None:\n\t\treturn False\n\tuser_id = int(user_id)\n\tconnection = Connection(user_id=user_id,connect_type=connect_type_id,connect_link=link)\n\tdb.session.add(connection)\n\tdb.session.commit()\n\treturn True\ndef get_connections(user_id):\n\tconnections = Connection.query.filter_by(user_id=user_id).all()\n\treturn connections\ndef get_connection_types():\n\tconnection_types = ConnectType.query.all()\n\treturn connection_types\ndef get_connection_type(connection_type_id):\n\tconnection_type = ConnectType.query.filter_by(id=connection_type_id).first()\n\treturn connection_type\ndef get_link(user_id,type):\n\tconnection = Connection.query.filter_by(user_id=user_id,connect_type=type).first()\n\tif connection is None:\n\t\treturn None\n\treturn connection.connect_link\ndef add_spending(user_id,name,amount,type,desc,sheet_name=None):\n\tlink = get_link(user_id,LinkType.GGSHEET.value)\n\tdate = datetime.now().strftime(\"%m/%d/%Y\")\n\tdata = [name, amount,type,date,desc]\n\tif sheet_name is None:\n\t\tsheet_name = \"{}/{}\".format(datetime.now().month, datetime.now().year)\n\tappend_sheet(link, sheet_name,data)\ndef remove_spending(user_id,row_index,sheet_name=None):\n\tlink = get_link(user_id,LinkType.GGSHEET.value)\n\tif sheet_name is None:\n\t\tsheet_name = \"{}/{}\".format(datetime.now().month, datetime.now().year)\n\trow_index = int(row_index)\n\tdelete_row(link, sheet_name,row_index)\ndef remove_spendings(user_id,row_indexes,sheet_name=None):\n\tlink = get_link(user_id,LinkType.GGSHEET.value)\n\tif sheet_name is None:\n\t\tsheet_name = \"{}/{}\".format(datetime.now().month, datetime.now().year)\n\tdelete_rows(link, sheet_name,extract_values(row_indexes, \"id\"))\ndef get_records(user_id,sheet_name=None):\n\tif sheet_name is None:\n\t\tsheet_name = \"{}/{}\".format(datetime.now().month, datetime.now().year)\n\tlink = get_link(user_id,LinkType.GGSHEET.value)\n\treturn get_sheet(link, sheet_name)","repo_name":"xdnuos/finance","sub_path":"app/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18676051129","text":"\nfrom users.views import main_calculation\nfrom OTP.models import Emails\nfrom django.shortcuts import render\nfrom django.core import serializers\nfrom django.http import HttpResponse, JsonResponse\nfrom django.contrib.auth.decorators import user_passes_test\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.mail import EmailMessage\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils import timezone\nfrom rest_framework import pagination\nfrom .models import supplier\nfrom .permissions import IsSupplier\nfrom config.settings import ADMIN_EMAILS\nfrom contract.models import contract\nfrom contract.serializers import ContractSerializer,ContractCustSerializer\nfrom customer.permissions import IsCustomer\nfrom customer.serializers import *\nfrom rest_framework.status import HTTP_400_BAD_REQUEST, HTTP_200_OK, HTTP_302_FOUND, HTTP_404_NOT_FOUND, \\\nHTTP_201_CREATED\nfrom rest_framework.response import Response\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.views import APIView\nfrom rest_framework import status, viewsets\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.decorators import action, api_view , schema , permission_classes\nfrom rest_framework.schemas import AutoSchema\nfrom datetime import datetime,date\nfrom .serializers import *\nimport json\nfrom extensions import jalali\nfrom customer.models import Guarantee\n\n# Create your views here.\n@api_view(['POST'])\n@permission_classes([IsSupplier])\n@schema(AutoSchema())\ndef old_specialCalc(request):\n s = request.user.supplier\n data = dict(request.data.items())\n data['additional_costs'] = data.get('additional_costs',s.additional_costs)\n data['downpayment_rate'] = data.get('downpayment_rate',s.downpayment_rate)\n data['discount'] = data.get('discount',s.discount)\n data['financial_source_rate'] = data.get('financial_source_rate',s.coffer.financial_source_rate1)\n data['company_gain_rate'] = data.get('company_gain_rate',s.company_gain_rate_one)\n data['investor_gain_rate'] = data.get('investor_gain_rate',s.investor_gain_rate)\n data['warranty_gain_rate'] = data.get('warranty_gain_rate',s.issuer.warranty_gain_rate)\n data['share_rate'] = data.get('share_rate',s.issuer.share_rate)\n serializer = SpecialCalcInputSerializer(data = data)\n if serializer.is_valid():\n Input = serializer.save()\n calc_out = {}\n discounted_net_amount = int(round(Input.net_amount - Input.face_net_amount * Input.discount , -4)) #مبلغ فاکتور پس از تخفیف\n\n calc_out['downpayment'] = int(round(Input.net_amount * Input.downpayment_rate , -4)) # پیش پرداخت\n\n loan_face_value = Input.net_amount - calc_out['downpayment']\n\n supplier_balance = int(round(discounted_net_amount - calc_out['downpayment'] , -4)) #تسویه ی فروشگاه\n\n company_gain = int(round(Input.face_net_amount * Input.company_gain_rate))\n\n investor_gain = int(round(Input.face_net_amount * Input.investor_gain_rate))\n\n month = Input.num_of_pay\n head = (month + 1 ) / 24\n source_percent = Input.financial_source_rate * 100\n source = source_percent / 1200\n rate = (1 + source) ** month\n warranty_rate = source * rate * Input.warranty_gain_rate * month * head\n warranty_gain = int(round( warranty_rate * (company_gain + investor_gain + supplier_balance) \n / (rate * (1 - source * month * Input.warranty_gain_rate * head * Input.share_rate) - 1)))\n\n complete_gain = company_gain + investor_gain + warranty_gain * Input.share_rate\n\n loan_amount = complete_gain + supplier_balance + Input.additional_costs\n calc_out['loan_amount'] = loan_amount\n instalment_amount = loan_amount * source * rate / (rate - 1)\n\n calc_out['total_amount_of_instalments'] = instalment_amount * Input.num_of_pay\n calc_out['instalment_amount'] = calc_out['total_amount_of_instalments'] / Input.num_of_pay\n\n temp = int(round(calc_out['instalment_amount'] , -4))\n if temp - calc_out['instalment_amount'] < -1000:\n calc_out['instalment_amount'] = temp + 5000\n else:\n calc_out['instalment_amount'] = temp\n calc_out['total_amount_of_instalments'] = calc_out['instalment_amount'] * Input.num_of_pay\n calc_out['total_amount'] = int(calc_out['total_amount_of_instalments'] + calc_out['downpayment'])\n calc_out['check_dates'] = []\n pdate = jalali.Gregorian(Input.sign_date).persian_tuple()\n persian_date_format = '{}/{}/{}'\n for i in NUMBER_PAY_DICT[Input.duration][1][Input.num_of_pay]:\n year,month,day = pdate\n month += i\n if month > 12:\n month -= 12\n year += 1\n if month==12 and day > 28:\n day = 28\n calc_out['check_dates'].append(persian_date_format.format(year,month,day))\n return JsonResponse(calc_out)\n else:\n return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)\n\n\n###########################################################\n@api_view(['POST'])\n@permission_classes([IsSupplier])\n@schema(AutoSchema())\ndef specialCalc(request):\n fac = {\n '1': 'max_fac1',\n '2': 'max_fac2',\n '3': 'max_fac3',\n '4':'max_fac4'\n }\n s = request.user.supplier\n data = dict(request.data.items())\n data['additional_costs'] = data.get('additional_costs',s.additional_costs)\n data['downpayment_rate'] = data.get('downpayment_rate',s.downpayment_rate)\n data['discount'] = data.get('discount',s.discount)\n data['financial_source_rate'] = data.get('financial_source_rate',s.coffer.financial_source_rate1)\n data['finance_gain_rate'] = data.get('finance_gain_rate',s.coffer.finance_gain_rate)\n data['nokol_rate'] = data.get('nokol_rate',s.coffer.nokol_rate)\n data['company_gain_rate'] = data.get('company_gain_rate',s.company_gain_rate_one)\n data['investor_gain_rate'] = data.get('investor_gain_rate',s.investor_gain_rate)\n data['warranty_gain_rate'] = data.get('warranty_gain_rate',s.issuer.warranty_gain_rate)\n data['share_rate'] = data.get('share_rate',s.issuer.share_rate)\n serializer = SpecialCalcInputSerializer(data = data)\n if serializer.is_valid():\n Input = serializer.save()\n loan = getattr(s,fac[data.get('level','4')])\n max_loan = 1000000 * loan // 0.75\n \n if int(data['net_amount']) < max_loan:\n data['face_net_amount'] = int(data['net_amount'])\n else:\n data['face_net_amount'] = max_loan\n my_calc = main_calculation(data)\n x = my_calc.supp()\n return JsonResponse(x)\n else:\n return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)\n###########################################################\n\n@api_view(['POST'])\n@permission_classes([IsSupplier])\n@schema(AutoSchema())\ndef old_calculator(request):\n calc_out = {}\n try:\n s = request.user.supplier\n if request.method == 'POST':\n data = json.loads(request.body)\n net_amount = data['net_amount'] #مبلغ فاکتور\n number_of_instalment = data['number_of_instalment'] #تعداد اقساط\n additional_costs = s.additional_costs #هزینه های اضافی\n downpayment_rate = data.get('downpayment_rate',s.downpayment_rate) #(نسبت پیش پرداخت (بین ۰-۱\n discount = s.discount # تخفیف\n financial_source_rate = s.coffer.financial_source_rate1 # نرخ خرید پول\n if number_of_instalment <=12:\n company_gain_rate = s.company_gain_rate_one # کارمزد توسعه\n financial_source_rate = s.coffer.financial_source_rate1 # نرخ خرید پول\n else:\n company_gain_rate = s.company_gain_rate_two\n financial_source_rate = s.coffer.financial_source_rate2 # نرخ خرید پول\n investor_gain_rate = s.investor_gain_rate # کارمزد بازاریاب\n warranty_gain_rate = s.issuer.warranty_gain_rate # کارمزد صدور ضمانت نامه\n share_rate = s.issuer.share_rate # سهم توسعه از کارمزد صدور ضمانت نامه\n\n calc_out['discounted_net_amount'] = int(round(net_amount *(1 - discount) , -4)) #مبلغ فاکتور پس از تخفیف\n\n calc_out['downpayment'] = int(round(net_amount * downpayment_rate , -4)) # پیش پرداخت\n\n calc_out['loan_face_value'] = net_amount - calc_out['downpayment']\n\n calc_out['supplier_balance'] = int(round(calc_out['discounted_net_amount'] - calc_out['downpayment'] , -4)) #تسویه ی فروشگاه\n\n calc_out['company_gain'] = int(round(net_amount * company_gain_rate))\n\n calc_out['investor_gain'] = int(round(net_amount * investor_gain_rate))\n\n month = number_of_instalment\n head = (month + 1 ) / 24\n source_percent = financial_source_rate * 100\n source = source_percent / 1200\n rate = (1 + source) ** month\n warranty_rate = source * rate * warranty_gain_rate * month * head\n calc_out['warranty_gain'] = int(round( warranty_rate * (calc_out['company_gain'] + calc_out['investor_gain']+ calc_out['supplier_balance']) \n / (rate * (1 - source * month * warranty_gain_rate * head * share_rate) - 1)))\n\n calc_out['complete_gain'] = calc_out['company_gain'] + calc_out['investor_gain'] + calc_out['warranty_gain']* share_rate\n\n calc_out['loan_amount'] = calc_out['complete_gain'] + calc_out['supplier_balance'] + additional_costs\n\n calc_out['instalment_amount'] = calc_out['loan_amount'] * source * rate / (rate - 1)\n temp = int(round(calc_out['instalment_amount'] , -4))\n if temp - calc_out['instalment_amount'] < -1000:\n calc_out['instalment_amount'] = temp + 5000\n else:\n calc_out['instalment_amount'] = temp\n\n calc_out['total_amount_of_instalments'] = calc_out['instalment_amount'] * number_of_instalment\n\n calc_out['customer_check'] = int(round(calc_out['total_amount_of_instalments'] * 1.1 , -6)) + 1000000\n\n \n calc_out['surety_check'] = int(round(calc_out['total_amount_of_instalments'] * 1.5 , -6)) + 1000000\n\n calc_out['total_amount'] = int(calc_out['instalment_amount'] * number_of_instalment + \n calc_out['downpayment'])\n return JsonResponse(calc_out)\n\n except Exception as e:\n return HttpResponse(str(e))\n\n@api_view(['POST'])\n@permission_classes([IsSupplier])\n@schema(AutoSchema())\ndef calculator(request):\n try:\n s = request.user.supplier\n if request.method == 'POST':\n data = json.loads(request.body)\n net_amount = data['net_amount'] #مبلغ فاکتور\n number_of_instalment = data['number_of_instalment'] #تعداد اقساط\n data['additional_costs'] = s.additional_costs #هزینه های اضافی\n data['downpayment_rate'] = data.get('downpayment_rate',s.downpayment_rate) #(نسبت پیش پرداخت (بین ۰-۱\n data['discount'] = s.discount # تخفیف\n data['financial_source_rate'] = s.coffer.financial_source_rate1 # نرخ خرید پول\n if number_of_instalment <=12:\n data['company_gain_rate'] = s.company_gain_rate_one # کارمزد توسعه\n else:\n data['company_gain_rate'] = s.company_gain_rate_two\n data['investor_gain_rate'] = s.investor_gain_rate # کارمزد بازاریاب\n data['warranty_gain_rate'] = s.issuer.warranty_gain_rate # کارمزد صدور ضمانت نامه\n data['share_rate'] = s.issuer.share_rate # سهم توسعه از کارمزد صدور ضمانت نامه\n\n my_calc = main_calculation(data)\n x = my_calc.supplier()\n return JsonResponse(x)\n\n except Exception as e:\n return HttpResponse(str(e))\n\n\nclass CustomerViewSet(viewsets.ModelViewSet):\n\n pagination_class = PageNumberPagination\n permission_classes = [IsSupplier, IsAuthenticated]\n serializer_class = CustomerDetailSerializer\n queryset = supplier.objects.filter(status=True)\n\n def get_serializer_context(self):\n context = super().get_serializer_context()\n request = context.get('request')\n try:\n s = request.user.supplier\n context['supplier_id'] = s.id\n except:\n pass\n return context\n def list(self,request):\n c_status = str(request.GET.get(\"customerStatus\"))\n try:\n s = request.user.supplier\n if (c_status == '1'):\n initial_queryset = s.customer.filter(status = c_status).order_by('-id')\n queryset = []\n for c in initial_queryset:\n try:\n contract.objects.get(supplier = s , customer = c , status__iexact = '0')\n except Exception as e:\n queryset.append(c)\n elif (c_status in ['0','2']):\n queryset = s.customer.filter(status = c_status).order_by('-id')\n else:\n queryset = s.customer.order_by('-id')\n serializer = self.get_serializer(queryset , many = True)\n return Response(serializer.data)\n except ObjectDoesNotExist:\n return Response({\"Error\",\"تامین کننده مورد نظر یافت نشد\"} , status.HTTP_404_NOT_FOUND)\nLEVEL = {\n '0' : 'مشخص نشده',\n '1' : 'سطح ۱',\n '2' : 'سطح ۲',\n '3' : 'سطح ۳',\n}\nclass ContractViewSet(viewsets.ModelViewSet):\n\n pagination_class = PageNumberPagination\n permission_classes = [IsSupplier, IsAuthenticated]\n serializer_class = ContractSerializer\n queryset = supplier.objects.filter(status=True)\n \n def list(self,request):\n try:\n page_size = request.GET.get(\"page_size\")\n if page_size != None:\n self.pagination_class.page_size = page_size\n s = request.user.supplier\n if request.GET.get(\"customerID\") is None:\n queryset = s.contract_set.all().order_by(\"-id\")\n \n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page,many = True)\n return self.get_paginated_response(serializer.data)\n serializer = self.get_serializer(queryset, many = True)\n else:\n customer_id = int(request.GET.get(\"customerID\"))\n queryset = s.contract_set.filter(customer__pk = customer_id).order_by(\"-id\")\n page = self.paginate_queryset(queryset)\n if page is not None: \n serializer = ContractCustSerializer(page , many = True)\n return self.get_paginated_response(serializer.data)\n serializer = ContractCustSerializer(queryset , many = True)\n return Response(serializer.data)\n except Exception as e:\n return Response({\"Error\" : str(e)} , status.HTTP_404_NOT_FOUND)\n \n def create(self, request):\n s = request.user.supplier\n data = dict(request.data.items())\n data[\"supplier\"] = s.id\n try:\n customer = s.customer.get(pk = data[\"customer\"])\n Guarantee_objs = Guarantee.objects.filter(customer = customer)\n sureties_have_permission = Guarantee_objs.filter(surety__again_purchase = True)\n sureties_have_not_permission = Guarantee_objs.filter(surety__again_purchase = False) \n false_surety = []\n for item in sureties_have_not_permission:\n false_surety += [item.surety.user.get_full_name()]\n false_surety = ','.join(false_surety)\n \n if len(Guarantee_objs) != len(sureties_have_permission):\n return Response({\"Error\" : \"ضامن {} قابلیت عقد مجدد قرارداد را ندارد \".format(false_surety)} , status.HTTP_400_BAD_REQUEST)\n if customer.again_purchase == False:\n return Response({\"Error\" : \"متقاضی اجازه خرید مجدد ندارد.\"} , status.HTTP_403_FORBIDDEN)\n if data.get(\"appoint_time\") != None and data.get(\"appoint_time\") != '' :\n appoint_time = datetime.strptime(data['appoint_time'],\"%Y-%m-%dT%H:%M\")\n if appoint_time < datetime.now():\n return Response({\"Error\" : \"رزرو زمان گذشته ممکن نیست.\"} , status.HTTP_400_BAD_REQUEST)\n elif s.Type == '0':\n return Response({\"Error\" : \"تعیین قرارملاقات ضروری است.\"} , status.HTTP_400_BAD_REQUEST)\n else:\n try:\n del(data[\"appoint_time\"])\n del(data[\"sign_date\"])\n except:\n pass\n \n if s.contract_set.filter(customer__pk = data[\"customer\"]).count() > 0:\n return Response({\"Error\" : \"اجازه تولید قرارداد با این متقاضی را ندارید\"} , status.HTTP_400_BAD_REQUEST)\n if (s.Type == '0' and customer.status not in ['1','3']) or (s.Type == '1' and customer.status not in ['0','1','3']):\n return Response({\"Error\" : \"متقاضی تایید نشده است.\"} , status.HTTP_400_BAD_REQUEST)\n serializer = self.get_serializer(data = data)\n if serializer.is_valid():\n \n Contract = serializer.save()\n for item in Guarantee_objs:\n surt = item.surety\n surt.again_purchase = False\n surt.save()\n customer.again_purchase = False\n customer.save()\n \n if Contract.appoint_time != None:\n appoint.contract = Contract\n appoint.save()\n else:\n email_dict = {\n 'name' : Contract.customer.full_name(),\n 's_name' : s.name,\n 'level' : LEVEL[Contract.customer.level],\n 'time' : persian_numbers_converter(jalali.Gregorian(date.today()).persian_string('{}/{}/{}'))\n }\n emaill = Emails.objects.get(email_type = '4')\n too = [x.strip() for x in emaill.TO.split(',')]\n email = EmailMessage(subject = emaill.ST.format(**email_dict),body = emaill.ET.format(**email_dict) ,from_email = 'admin@test.com', to = too,headers= {'Content_Type' :'text/plain'})\n email.send()\n return Response(serializer.data, status.HTTP_201_CREATED)\n return Response({\"Error\" : serializer.errors['non_field_errors'][0]}, status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return Response({\"Error\" : str(e)},status.HTTP_404_NOT_FOUND)\n\n def update(self, request, pk = None):\n s = request.user.supplier\n data = dict(request.data.items())\n data[\"supplier\"] = s.id\n \n try:\n c = s.contract_set.get(pk = pk)\n serializer = self.get_serializer(c,data = data,partial = True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status.HTTP_200_OK)\n except Exception as e:\n return Response({\"error\":str(e)} , status.HTTP_404_NOT_FOUND)\n\ndef categories(request):\n pass\n\ndef brands(request):\n pass\n\nclass SuppliersForCustomer(GenericAPIView):\n pagination_class = PageNumberPagination\n permission_classes = [IsCustomer, IsAuthenticated]\n serializer_class = SupplierShowcaseSerializer\n queryset = supplier.objects.filter(status=True)\n\n def get(self, request):\n queryset = self.filter_queryset(self.get_queryset())\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializers = self.get_serializer(page, many=True)\n result = self.get_paginated_response(serializers.data)\n data = result.data\n else:\n serializers = self.get_serializer(page, many=True)\n data = serializers.data\n return Response(data, HTTP_200_OK)\n\nclass SupplierRegistrationView(APIView):\n\n def post(self,request):\n data = dict(request.data.items())\n serializer = InitialSupplierSerializer(data = data)\n if serializer.is_valid():\n try:\n serializer.save()\n email_text = \"\"\"new supplier {brand} with phonenumber {phone_number} \n agent {agent_firstname} {agent_lastname} {agent_mobile_number} {province} {city} {address} \n details : {description} \"\"\"\n emaill = Emails.objects.get(email_type = '5')\n too = [x.strip() for x in emaill.TO.split(',')]\n email = EmailMessage(subject = emaill.ST,body = emaill.ET.format(**serializer.validated_data),from_email = 'admin@test.com', to = too,headers= {'Content_Type' :'text/plain'})\n email.send()\n return Response ({\"message\" : 'ثبت نام شما به مدیر سایت اطلاع داده شد'} , HTTP_200_OK)\n except Exception as e:\n return Response({\"error\": str(e)}, HTTP_404_NOT_FOUND)\n else:\n return Response(serializer.errors, HTTP_400_BAD_REQUEST)\n\n\nclass SupplierView(APIView):\n permission_classes = [IsAuthenticated]\n\n def get(self, request):\n user = request.user\n try:\n my_supplier = user.supplier\n serializer = SupplierSerializer(my_supplier)\n return Response(serializer.data, HTTP_200_OK)\n except Exception as e:\n return Response({\"error\": str(e)}, HTTP_404_NOT_FOUND)\n","repo_name":"khoji2001/Django-project","sub_path":"supplier/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"71319334696","text":"import pygame\nfrom center_asset import CenterAsset\n\n\nclass StaticObject:\n def __init__(self, background: CenterAsset, image: pygame.Surface, x: int, y: int, game_settings, name: str = \"\",\n text: dict = None):\n self.name = name\n self.text = text\n self.image = image\n self.image = pygame.transform.scale_by(self.image, game_settings.SCALE)\n self.image.set_colorkey(game_settings.COLOURKEY)\n self.rectangle = self.image.get_rect()\n self.x = background.x + x * game_settings.SCALE\n self.y = background.y + y * game_settings.SCALE\n self.rectangle.x, self.rectangle.y = (self.x, self.y)\n","repo_name":"xMoneMone/RPG-Package","sub_path":"static_object.py","file_name":"static_object.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"71326208296","text":"from collections import OrderedDict\n\nimport numpy as np\nfrom scipy.optimize import minimize\nfrom joblib import Parallel, delayed\n\nfrom bo.logger import EventLogger\n\n\nclass BO:\n def __init__(self, surrogate, acquisition, f, parameter_dict, n_jobs=1):\n \"\"\"Bayesian Optimization class.\n\n :type surrogate: surrogate model instance\n :param surrogate: Gaussian Process surrogate model instance\n :type acquisition: acquisition instance\n :param acquisition: acquisition function instance\n :param f: target unction to maximize over parameters specified by `parameter_dict`\n :type parameter_dict: dict\n :param parameter_dict: dictionary specifying parameter, their type and bounds\n :type n_jobs: int\n :param n_jobs: parallel threads to use during acquisition optimization\n \"\"\"\n self.GP = surrogate\n self.A = acquisition\n self.f = f\n self.parameters = parameter_dict\n self.n_jobs = n_jobs\n\n self.tau = None\n self.init_evals = None\n\n self.parameter_key = list(parameter_dict.keys())\n self.parameter_value = list(parameter_dict.values())\n self.parameter_type = [p[0] for p in self.parameter_value]\n self.parameter_range = [p[1] for p in self.parameter_value]\n\n self.history = []\n self.logger = EventLogger(self)\n\n def sample_param(self):\n \"\"\"Randomly samples parameters over bounds.\n\n :return a random sample of specified parameters\n \"\"\"\n d = OrderedDict()\n for index, param in enumerate(self.parameter_key):\n if self.parameter_type[index] == 'int':\n d[param] = np.random.randint(self.parameter_range[index][0], self.parameter_range[index][1])\n elif self.parameter_type[index] == 'cont':\n d[param] = np.random.uniform(self.parameter_range[index][0], self.parameter_range[index][1])\n else:\n raise ValueError('Unsupported variable type.')\n return d\n\n def _first_run(self, n_eval=3):\n \"\"\"Performs initial evaluations before fitting GP.\n\n :return: the number of initial evaluations to perform, default is 3\n \"\"\"\n self.X = np.empty((n_eval, len(self.parameter_key)))\n self.y = np.empty((n_eval,))\n for i in range(n_eval):\n s_param = self.sample_param()\n s_param_val = list(s_param.values())\n self.X[i] = s_param_val\n self.y[i] = self.f(**s_param)\n self.GP.fit(self.X, self.y)\n self.tau = np.max(self.y)\n self.history.append(self.tau)\n\n def _acq_wrapper(self, x_new):\n \"\"\"Evaluates the acquisition function on a point\n\n :type x_new: np.ndarray, shape=((len(self.parameter_key),))\n :param x_new: point to evaluate the acquisition function on\n :return: the acquisition function value for `x_new`\n \"\"\"\n new_mean, new_var = self.GP.predict(x_new, return_std=True)\n new_std = np.sqrt(new_var + 1e-6)\n return -self.A.eval(self.tau, new_mean, new_std)\n\n def _optimize_acq(self, method='L-BFGS-B', n_start=100):\n \"\"\"Optimizes the acquisition function using a multi-start approach.\n\n :type method: str\n :param method: any `scipy.optimize` method that admits bounds and gradients, default is 'L-BFGS-B'\n :type n_start: int\n :param n_start: number of starting points for the optimization procedure, default is 100\n \"\"\"\n start_points_dict = [self.sample_param() for _ in range(n_start)]\n start_points_arr = np.array([list(s.values()) for s in start_points_dict])\n x_best = np.empty((n_start, len(self.parameter_key)))\n f_best = np.empty((n_start,))\n if self.n_jobs == 1:\n for index, start_point in enumerate(start_points_arr):\n res = minimize(self._acq_wrapper, x0=start_point, method=method,\n bounds=self.parameter_range)\n x_best[index], f_best[index] = res.x, np.atleast_1d(res.fun)[0]\n else:\n opt = Parallel(n_jobs=self.n_jobs)(delayed(minimize)(self._acq_wrapper,\n x0=start_point,\n method='L-BFGS-B',\n bounds=self.parameter_range) for start_point in\n start_points_arr)\n x_best = np.array([res.x for res in opt])\n f_best = np.array([np.atleast_1d(res.fun)[0] for res in opt])\n\n self.best = x_best[np.argmin(f_best)]\n\n def update_gp(self):\n \"\"\"Updates the internal model with the next acquired point and its evaluation.\n \"\"\"\n kw = {param: self.best[i] for i, param in enumerate(self.parameter_key)}\n f_new = self.f(**kw)\n self.GP.update(np.atleast_2d(self.best), np.atleast_1d(f_new))\n self.tau = np.max(self.GP.y)\n self.history.append(self.tau)\n\n def get_result(self):\n \"\"\"Prints best result in the Bayesian Optimization procedure.\n\n :type: OrderedDict\n :return: the point yielding best evaluation in the procedure\n :type: float\n :return: the best function evaluation\n \"\"\"\n arg_tau = np.argmax(self.GP.y)\n opt_x = self.GP.x[arg_tau]\n res_d = OrderedDict()\n for i, key in enumerate(self.parameter_key):\n res_d[key] = opt_x[i]\n return res_d, self.tau\n\n def run(self, max_iter=10, init_evals=3, resume=False):\n \"\"\"Runs the Bayesian Optimization procedure.\n\n :type max_iter: int\n :param max_iter: number of iterations to run, default is 10\n :type init_evals: int\n :param init_evals: initial function evaluations before fitting a GP, default is 3\n :type resume: bool\n :param resume: whether to resume the optimization procedure from the last evaluation, default is `False`\n \"\"\"\n if not resume:\n self.init_evals = init_evals\n self._first_run(self.init_evals)\n self.logger.print_init(self)\n for iteration in range(max_iter):\n self._optimize_acq()\n self.update_gp()\n self.logger.print_current(self)\n","repo_name":"xuedong/hpo","sub_path":"source/bo/bo.py","file_name":"bo.py","file_ext":"py","file_size_in_byte":6344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"86586200917","text":"from web.companies.services import DnbServiceClient, CompaniesHouseClient\nfrom web.core.exceptions import DnbServiceClientException\n\n\nclass SupportingInformationContent:\n msgs = {\n 'contact-admin': 'Please try again later or contact the site administrator.'\n }\n\n def __init__(self, grant_application):\n super().__init__()\n self.grant_application = grant_application\n self.dnb_client = DnbServiceClient()\n self.ch_client = CompaniesHouseClient()\n self._dnb_company_data = None\n\n def _make_table(self, headers=None, rows=None, col_tags=None):\n return {\n 'headers': headers or [],\n 'rows': rows or [],\n 'col_tags': col_tags or []\n }\n\n @property\n def dnb_company_data(self):\n if self._dnb_company_data is None:\n # Look in local DB Cache first.\n dnb_company_response = self.grant_application.company.last_dnb_get_company_response\n if dnb_company_response:\n self._dnb_company_data = dnb_company_response.dnb_data\n\n # If not available then go to dnb-service\n if not self._dnb_company_data:\n try:\n self._dnb_company_data = self.dnb_client.get_company(\n duns_number=self.grant_application.company.duns_number\n )\n except DnbServiceClientException:\n # leave self._dnb_company as None if not available\n pass\n return self._dnb_company_data\n\n @property\n def verify_business_entity_content(self):\n if self.dnb_company_data:\n return {\n 'employee_number': self.dnb_company_data['employee_number'],\n 'annual_sales': int(self.dnb_company_data['annual_sales'] or 0),\n 'annual_sales_currency': self.dnb_company_data['annual_sales_currency']\n }\n\n @property\n def verify_state_aid_content(self):\n rows = [\n [s.authority, s.amount, s.description, s.date_received.strftime('%B %Y')]\n for s in self.grant_application.stateaid_set.all()\n ]\n return {\n 'table': self._make_table(\n headers=['Authority', 'Amount', 'Description', 'Date received'],\n rows=rows or [['—', '—', '—', '—']]\n )\n }\n\n @property\n def decision_content(self):\n grant_management_process = self.grant_application.grant_management_process\n return {\n 'table': self._make_table(\n rows=[\n [\n 'Eligibility criteria',\n f'{grant_management_process.total_verified}/4'\n ],\n [\n 'Suitability score',\n f'{grant_management_process.suitability_score}/15'\n ],\n [\n 'Is the trade show appropriate?',\n 'Yes' if grant_management_process.event_is_appropriate else 'No'\n ]\n ],\n col_tags=[\"style=width:75%\", \"style=width:25%\"]\n )\n }\n","repo_name":"uktrade/trade-access-program","sub_path":"backoffice/web/grant_management/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"15832796448","text":"from SharedInterfaces.RegistryAPI import *\nimport requests\nfrom helpers.type_aliases import GetAuthFunction\nfrom rich import print\nimport json\n\n\ndef get_model_run_by_handle(\n handle_id: str,\n auth: GetAuthFunction,\n registry_endpoint: str,\n) -> ItemModelRun:\n \"\"\"\n get_model_run_by_handle\n\n Given the handle, stage and auth, will pull the given handle's model run\n record\n\n Parameters\n ----------\n handle_id : str\n The handle of the model run record\n stage : str\n The application stage\n auth : GetAuthFunction\n The auth function\n\n Returns\n -------\n ItemModelRun\n The complete ItemModelRun, or an error raised\n \"\"\"\n base = registry_endpoint\n postfix = \"/registry/activity/model_run/fetch\"\n full_endpoint = base + postfix\n params = {\n 'id': handle_id\n }\n\n # fetch the model run record\n print(f\"Fetching record id {handle_id} ...\")\n response = requests.get(\n url=full_endpoint,\n params=params,\n auth=auth()\n )\n\n # check that the response was successful\n assert response.status_code == 200, f\"Non 200 status code from registry for handle {handle_id}. Code: {response.status_code}.\"\n\n # parse as model run response\n model_run_response = ModelRunFetchResponse.parse_obj(response.json())\n\n # check success\n assert model_run_response.status.success, f\"Success is false on model run fetch response, details: {model_run_response.status.details}.\"\n\n # check the object exists\n assert model_run_response.item, f\"Item was none despite successful response!\"\n\n # check it is not a seed item\n assert model_run_response.item.record_type == RecordType.COMPLETE_ITEM, f\"Item was not complete! Cannot restore into neo4j.\"\n\n # All good\n assert isinstance(model_run_response.item, ItemModelRun)\n return model_run_response.item\n\n\ndef list_model_run_records_from_registry(\n auth: GetAuthFunction,\n registry_endpoint: str\n) -> List[ItemModelRun]:\n \"\"\"\n list_model_run_records_from_registry \n\n Retrieves a complete list of all model run records in the registry for\n relodging.\n\n Parameters\n ----------\n stage : str\n The stage to target\n auth : GetAuthFunction\n The auth function\n registry_endpoint_override : Optional[str], optional\n Optional override of registry endpoint, by default None\n\n Returns\n -------\n List[ItemModelRun]\n The list of complete item model runs\n\n Raises\n ------\n Various exceptions for failure conditions\n\n\n\n \"\"\"\n base = registry_endpoint\n postfix = \"/registry/activity/model_run/list\"\n full_endpoint = base + postfix\n\n list_request = SubtypeListRequest()\n\n pagination_key: Optional[PaginationKey] = None\n\n complete_items: List[ItemModelRun] = []\n exhausted = False\n\n while not exhausted:\n list_request.pagination_key = pagination_key\n fetch_response = requests.post(\n url=full_endpoint, json=json.loads(list_request.json()), auth=auth())\n\n # check the status of the response\n status_code = fetch_response.status_code\n if status_code != 200:\n # try and get details then raise HTTPException\n try:\n detail = fetch_response.json()['detail']\n raise Exception(\n f\"Registry API responded with non 200 code: {status_code}. Error: {detail}\")\n except: # unable to get details\n raise Exception(\n f\"Registry API responded with non 200 code: {status_code}. \")\n\n # 200 code meaning that parse model will be valid\n try:\n model_run_list_response: ModelRunListResponse = ModelRunListResponse.parse_obj(\n fetch_response.json())\n except Exception as e:\n raise Exception(\n f\"Tried to parse successful response from registry API model run list endpoint but failed. Parse error {e}.\")\n\n # We now have a parsed model response\n if model_run_list_response.status.success:\n if model_run_list_response.items:\n complete_items.extend([ItemModelRun.parse_obj(i)\n for i in model_run_list_response.items])\n else:\n raise Exception(\n \"Response from registry API was successful but had no item property!\"\n )\n else:\n raise Exception(\n f\"Response from registry API was unsuccessful, error: {model_run_list_response.status.details}.\"\n )\n\n pagination_key = model_run_list_response.pagination_key\n if pagination_key is None:\n exhausted = True\n\n return complete_items\n","repo_name":"provena/provena","sub_path":"admin-tooling/prov-store/helpers/registry_helpers.py","file_name":"registry_helpers.py","file_ext":"py","file_size_in_byte":4745,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"38685879669","text":"# Replace with your TooGoodToGo API key\nfrom tgtg import TgtgClient\n\nAPI_KEY = \"your-api-key\"\n\n# Replace with the store name or ID you want to check\nSTORE_NAME = \"store-name\"\n\n# Create a TooGoodToGo client using your API key\n\nclient = TgtgClient(email=\"\")\ncredentials = client.get_credentials()\n\n# Get the list of stores from the API\nstores = client.get_partners()\n\n# Find the store with the given name or ID\nstore = None\nfor s in stores:\n if s.name == STORE_NAME or s.id == STORE_NAME:\n store = s\n break\n\n# Check if the store was found\nif store is None:\n print(f\"Error: store with name or ID '{STORE_NAME}' not found.\")\n exit(1)\n\n# Get the availability of magic bags at the store\nmagic_bag_data = client.get_magic_bag(store.id)\n\n# Check if magic bags are available\nif magic_bag_data.available:\n print(\"Magic bags are available at the store.\")\nelse:\n print(\"Magic bags are not available at the store.\")","repo_name":"kzhekov/Idea-Pod","sub_path":"2g2g/notify_on_available_2g2g.py","file_name":"notify_on_available_2g2g.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"22090226025","text":"from . import thorlabs_apt as apt\nfrom .TLPMall.TLPM import TLPM\nfrom ctypes import c_uint32, byref, create_string_buffer, c_bool, c_int, c_double # , c_void\nimport serial\n# from driwers.newport import Controller\nimport time\n\n\ndef isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\n\nclass motor(apt.Motor):\n def __init__(self, motor_id):\n apt.Motor.__init__(self, motor_id)\n self.backlash_distance = 0.0\n self.set_hardware_limit_switches(2, 1)\n self.set_move_home_parameters(2, 1, 1.0, 0.5)\n\n def __del__(self):\n self.disable()\n\n def list_available_devices():\n return apt.list_available_devices()\n\n\nclass powerMeter(TLPM):\n def __init__(self):\n TLPM.__init__(self)\n self.Connect()\n\n def __del__(self):\n self.Close()\n\n def Connect(self):\n deviceName = self.FindSingleDevice()\n\n if (deviceName == -1):\n print(\"Unable to Connect\\n\")\n return -1\n\n self.open(deviceName, c_bool(True), c_bool(True))\n self.isConnected = True\n return self.isConnected\n\n def FindSingleDevice(self):\n deviceCount = c_uint32()\n self.findRsrc(byref(deviceCount))\n\n if (deviceCount.value == 0):\n print(\"Device Not Found\\n\")\n return -1\n\n deviceName = create_string_buffer(1024)\n self.getRsrcName(c_int(0), deviceName)\n return deviceName.value\n\n def read(self):\n power = c_double()\n self.measPower(byref(power))\n return power.value\n\n def Close(self):\n if (self.isConnected == False):\n print(\"Device not Connected\\n\")\n return -1\n self.close()\n self.isConnected = False\n return 0\n\n\nclass tensionGauge():\n def __init__(self):\n self.Connect()\n\n def Connect(self):\n self.port = serial.Serial(port=\"COM3\", baudrate=115200)\n time.sleep(1) # действительно нужно\n self.port.write(1)\n time.sleep(1)\n self.isConnected = True\n return 0\n\n def Close(self):\n self.port.close()\n\n def read(self):\n self.port.write(1)\n string = ''\n i = 0\n while len(string) < 3 or not isfloat(string):\n t0 = time.time()\n while self.port.in_waiting == 0:\n if time.time() - t0 > 2:\n print('tg read waiting problem')\n return 'problem'\n string = self.port.readline()\n if len(string) < 3:\n i += 1\n if i > 10:\n print('tg read problem')\n return 'problem'\n # print(string)\n weight = float(string[0:-2])\n return weight\n\n\n# class tikalka_base():\n# _controller = Controller(idProduct=0x4000, idVendor=0x104d)\n# ides = {'x': 3, 'y': 2, 'z': 1}\n\n# def __init__(self, name):\n# self.id = tikalka_base.ides[name]\n# self.coord = 0\n\n# def IsInMotion(self):\n# motor_done_cmd = '{}MD?'.format(self.id)\n# resp = tikalka_base._controller.command(motor_done_cmd)\n# return not int(resp[2]) # True if motor in motion\n\n# def move(self, value):\n# while self.IsInMotion():\n# pass\n# move_motor_cmd = '{}PR{}'.format(self.id, int(value))\n# # print(move_motor_cmd)\n# tikalka_base._controller.command(move_motor_cmd)\n# self.coord += int(value)\n\n# def move_to(self, value):\n# self.move(value - self.coord)\n\n # def move_absolute(self, motor_id, value):\n # move_motor_cmd = '{}PA{}'.format(motor_id, value)\n # self._controller.command(move_motor_cmd)\n #\n # def get_home_position(self, motor_id):\n # return int(self._controller.command('{}DH?'.format(motor_id))[2:])\n #\n # def get_position(self, motor_id):\n # return int(self._controller.command('{}TP?'.format(motor_id))[2:])\n #\n # def set_home_position(self, motor_id, value):\n # self._controller.command('{}DH{}'.format(motor_id, value))\n #\n # def get_target(self, motor_id):\n # return int(self._controller.command('{}PA?'.format(motor_id))[2:])\n","repo_name":"korbash/table_control","sub_path":"mylib/driwers/win.py","file_name":"win.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"37930538354","text":"from unittest import TestCase\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n # Straightforward DFS for problem without repeated values.\n # Time Complexity: O(|V|).\n # Space Complexity: O(log|V|) or O(|V|) in worst case.\n def getTargetCopy(\n self, original: TreeNode, cloned: TreeNode, target: TreeNode\n ) -> TreeNode:\n def _dfs(node: TreeNode):\n if not node or node.val == target.val:\n return node\n return _dfs(node=node.left) or _dfs(node=node.right)\n\n return _dfs(node=cloned)\n\n\nclass SolutionTwo:\n # Modified DFS for both trees.\n # Time Complexity: O(|V|).\n # Space Complexity: O(log|V|) or O(|V|) in worst case.\n def getTargetCopy(\n self, original: TreeNode, cloned: TreeNode, target: TreeNode\n ) -> TreeNode:\n def _dfs(original_node: TreeNode, cloned_node: TreeNode):\n if not original_node or original_node is target:\n return cloned_node\n return _dfs(\n original_node=original_node.left, cloned_node=cloned_node.left\n ) or _dfs(original_node=original_node.right, cloned_node=cloned_node.right)\n\n return _dfs(original_node=original, cloned_node=cloned)\n\n\nclass TestGetTargetCopy(TestCase):\n def test_example_1(self):\n root = TreeNode(x=7)\n root.left = TreeNode(x=4)\n root.right = TreeNode(x=3)\n root.right.left = TreeNode(x=6)\n root.right.right = TreeNode(x=19)\n\n clone = TreeNode(x=7)\n clone.left = TreeNode(x=4)\n clone.right = TreeNode(x=3)\n clone.right.left = TreeNode(x=6)\n clone.right.right = TreeNode(x=19)\n\n target = root.right\n expected = clone.right\n\n assert (\n Solution().getTargetCopy(original=root, cloned=clone, target=target)\n is expected\n )\n assert (\n Solution().getTargetCopy(original=root, cloned=clone, target=target)\n is expected\n )\n","repo_name":"saubhik/leetcode","sub_path":"problems/find_a_corresponding_node_of_a_binary_tree_in_a_clone_of_that_tree.py","file_name":"find_a_corresponding_node_of_a_binary_tree_in_a_clone_of_that_tree.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"18568625979","text":"n=int(input())\n\nprint(0)\nlowg = input()\nprint(n-1)\nhighg = input()\n\nif lowg==\"Vacant\":\n quit()\nif highg==\"Vacant\":\n quit()\n\nlow =0 \nhigh=n-1\nmid = (n-1)//2\nfor i in range(20):\n print(mid)\n temp=input()\n if temp==\"Vacant\":\n quit()\n if (mid-low)%2==0:\n if lowg==temp:\n lowg=temp\n low=mid\n else:\n high=mid\n else:\n if lowg==temp:\n high=mid\n else:\n lowg=temp\n low=mid\n #print(\"high \" + str(high) + \" low \"+str(low))\n mid=(high+low)//2","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03439/s018799631.py","file_name":"s018799631.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"26109613186","text":"\"\"\"\nA model that builds the values to display for a brew.\n\"\"\"\nfrom core.view_models.base_view_model import BaseViewModel\nfrom data.models.recipe_model import Ingredient, Recipe\n\n\nclass RecipeViewModel(BaseViewModel):\n def __init__(self, recipe: Recipe, **kwargs):\n super(RecipeViewModel, self).__init__(recipe, **kwargs)\n\n if recipe is not None:\n self.steps = [\n BaseViewModel(step)\n for step in recipe.steps\n if recipe is not None and recipe.steps is not None\n ]\n self.ingredients = [\n BaseViewModel(ingredient)\n for ingredient in recipe.ingredients\n if recipe is not None and recipe.ingredients is not None\n ]\n","repo_name":"JosephTTurner/chubfactor","sub_path":"app/view_models/recipe_view_model.py","file_name":"recipe_view_model.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"72741628137","text":"import pygame as pg\r\nimport numpy as np\r\n\r\npg.init()\r\npg.mixer.init()\r\nscreen = pg.display.set_mode((1280, 720))\r\nfont = pg.font.SysFont(\"Impact\", 48)\r\n\r\ndef synth(frequency, duration=1.5, sampling_rate=44100):\r\n frames = int(duration*sampling_rate)\r\n arr = np.cos(2*np.pi*frequency*np.linspace(0,duration, frames))\r\n arr = arr + np.cos(4*np.pi*frequency*np.linspace(0,duration, frames))\r\n arr = arr - np.cos(6*np.pi*frequency*np.linspace(0,duration, frames))\r\n## arr = np.clip(arr*10, -1, 1) # squarish waves\r\n## arr = np.cumsum(np.clip(arr*10, -1, 1)) # triangularish waves pt1\r\n## arr = arr+np.sin(2*np.pi*frequency*np.linspace(0,duration, frames)) # triangularish waves pt1\r\n arr = arr/max(np.abs(arr)) # triangularish waves pt1\r\n sound = np.asarray([32767*arr,32767*arr]).T.astype(np.int16)\r\n sound = pg.sndarray.make_sound(sound.copy())\r\n \r\n return sound\r\n\r\n\r\nkeylist = '123456789qwertyuioasdfghjklzxcvbnm,.'\r\nnotes_file = open(\"noteslist.txt\")\r\nfile_contents = notes_file.read()\r\nnotes_file.close()\r\nnoteslist = file_contents.splitlines()\r\n\r\nkeymod = '0-='\r\nnotes = {} # dict to store samples\r\nfreq = 16.3516 # start frequency\r\nposx, posy = 25, 25 #start position\r\n\r\n\r\nfor i in range(len(noteslist)):\r\n mod = int(i/36)\r\n key = keylist[i-mod*36]+str(mod) \r\n sample = synth(freq)\r\n color = np.array([np.sin(i/25+1.7)*130+125,np.sin(i/30-0.21)*215+40, np.sin(i/25+3.7)*130+125])\r\n color = np.clip(color, 0, 255)\r\n notes[key] = [sample, noteslist[i], freq, (posx, posy), 255*color/max(color)]\r\n notes[key][0].set_volume(0.33)\r\n notes[key][0].play()\r\n notes[key][0].fadeout(100)\r\n freq = freq * 2 ** (1/12)\r\n posx = posx + 140\r\n if posx > 1220:\r\n posx, posy = 25, posy+56\r\n \r\n screen.blit(font.render(notes[key][1], 0, notes[key][4]), notes[key][3])\r\n pg.display.update()\r\n \r\n\r\nrunning = 1\r\nmod = 1\r\npg.display.set_caption(\"FinFET Synth - Change range: 0 - = // Play with keys: \"+keylist )\r\n\r\nkeypresses = []\r\nwhile running:\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):\r\n running = False\r\n if event.type == pg.KEYDOWN:\r\n key = str(event.unicode)\r\n if key in keymod:\r\n mod = keymod.index(str(event.unicode))\r\n elif key in keylist:\r\n key = key+str(mod)\r\n notes[key][0].play()\r\n keypresses.append([1, notes[key][1], pg.time.get_ticks()])\r\n screen.blit(font.render(notes[key][1], 0, (255,255,255)), notes[key][3])\r\n if event.type == pg.KEYUP and str(event.unicode) != '' and str(event.unicode) in keylist:\r\n key = str(event.unicode)+str(mod)\r\n notes[key][0].fadeout(100)\r\n keypresses.append([0, notes[key][1], pg.time.get_ticks()])\r\n screen.blit(font.render(notes[key][1], 0, notes[key][4]), notes[key][3])\r\n\r\n pg.display.update()\r\n\r\npg.display.set_caption(\"Exporting sound sequence\")\r\nif len(keypresses) > 1:\r\n for i in range(len(keypresses)-1):\r\n keypresses[-i-1][2] = keypresses[-i-1][2] - keypresses[-i-2][2]\r\n keypresses[0][2] = 0 # first at zero\r\n\r\n with open(\"test.txt\", \"w\") as file:\r\n for i in range(len(keypresses)):\r\n file.write(str(keypresses[i])+'\\n') # separate lines for readability\r\n file.close()\r\n \r\npg.mixer.quit()\r\npg.quit()\r\n","repo_name":"FinFetChannel/Python_Synth","sub_path":"keyboard_synth.py","file_name":"keyboard_synth.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"90"} +{"seq_id":"34290833927","text":"## written by xiongbiao\n## date 2020-6-3\n\nfrom Tree.node import TreeNode\n'''\n最大树定义:一个树,其中每个节点的值都大于其子树中的任何其他值。\n给出最大树的根节点 root。\n就像之前的问题那样,给定的树是从表 A(root = Construct(A))递归地使用下述 Construct(A) 例程构造的:\n如果 A 为空,返回 null\n否则,令 A[i] 作为 A 的最大元素。创建一个值为 A[i] 的根节点 root\nroot 的左子树将被构建为 Construct([A[0], A[1], ..., A[i-1]])\nroot 的右子树将被构建为 Construct([A[i+1], A[i+2], ..., A[A.length - 1]])\n返回 root\n请注意,我们没有直接给定 A,只有一个根节点 root = Construct(A).\n'''\n\nclass Solution(object):\n def insertIntoMaxTree(self, root, val):\n \"\"\"\n :type root: TreeNode\n :type val: int\n :rtype: TreeNode\n \"\"\"\n if root is None:\n return TreeNode(val)\n\n if root.val > val:\n right = self.insertIntoMaxTree(root.right, val)\n root.right = right\n return root\n else:\n left = TreeNode(val)\n left.left = root\n return left","repo_name":"xb2342996/Algorithm-and-Data-Structure","sub_path":"LeetCode_vII/Tree/998. 最大二叉树 II.py","file_name":"998. 最大二叉树 II.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"32254624979","text":"\nfrom __future__ import division, print_function\n\nimport tensorflow as tf\nslim = tf.contrib.slim\n\nfrom utils.layer_utils import conv2d, darknet53_body, yolo_block, upsample_layer\n\nclass yolov3(object):\n '''\n yolov3模型,用于车牌检测和识别\n '''\n def __init__(self, class_num, anchors, use_label_smooth=False, use_focal_loss=True, batch_norm_decay=0.999, weight_decay=5e-4, use_static_shape=True):\n\n self.class_num = class_num\n self.anchors = anchors\n self.batch_norm_decay = batch_norm_decay\n self.use_label_smooth = use_label_smooth\n self.use_focal_loss = use_focal_loss\n self.weight_decay = weight_decay\n self.use_static_shape = use_static_shape\n\n def forward(self, inputs, is_training=False, reuse=False):\n '''\n 向前传播算法,接收图像矩阵输入,输出置信度和坐标框\n '''\n self.img_size = tf.shape(inputs)[1:3]\n\n batch_norm_params = {\n 'decay': self.batch_norm_decay,\n 'epsilon': 1e-05,\n 'scale': True,\n 'is_training': is_training,\n 'fused': None, \n }\n\n with slim.arg_scope([slim.conv2d, slim.batch_norm], reuse=reuse):\n with slim.arg_scope([slim.conv2d], \n normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params,\n biases_initializer=None,\n activation_fn=lambda x: tf.nn.leaky_relu(x, alpha=0.1),\n weights_regularizer=slim.l2_regularizer(self.weight_decay)):\n with tf.variable_scope('darknet53_body'):\n route_1, route_2, route_3 = darknet53_body(inputs)\n\n with tf.variable_scope('yolov3_head'):\n inter1, net = yolo_block(route_3, 512)\n feature_map_1 = slim.conv2d(net, 3 * (5 + self.class_num), 1,\n stride=1, normalizer_fn=None,\n activation_fn=None, biases_initializer=tf.zeros_initializer())\n feature_map_1 = tf.identity(feature_map_1, name='feature_map_1')\n\n inter1 = conv2d(inter1, 256, 1)\n inter1 = upsample_layer(inter1, route_2.get_shape().as_list() if self.use_static_shape else tf.shape(route_2))\n concat1 = tf.concat([inter1, route_2], axis=3)\n\n inter2, net = yolo_block(concat1, 256)\n feature_map_2 = slim.conv2d(net, 3 * (5 + self.class_num), 1,\n stride=1, normalizer_fn=None,\n activation_fn=None, biases_initializer=tf.zeros_initializer())\n feature_map_2 = tf.identity(feature_map_2, name='feature_map_2')\n\n inter2 = conv2d(inter2, 128, 1)\n inter2 = upsample_layer(inter2, route_1.get_shape().as_list() if self.use_static_shape else tf.shape(route_1))\n concat2 = tf.concat([inter2, route_1], axis=3)\n\n _, feature_map_3 = yolo_block(concat2, 128)\n feature_map_3 = slim.conv2d(feature_map_3, 3 * (5 + self.class_num), 1,\n stride=1, normalizer_fn=None,\n activation_fn=None, biases_initializer=tf.zeros_initializer())\n feature_map_3 = tf.identity(feature_map_3, name='feature_map_3')\n\n return feature_map_1, feature_map_2, feature_map_3\n\n def reorg_layer(self, feature_map, anchors):\n '''\n 区域建议网络,提取ROI\n '''\n grid_size = feature_map.get_shape().as_list()[1:3] if self.use_static_shape else tf.shape(feature_map)[1:3] # [13, 13]\n ratio = tf.cast(self.img_size / grid_size, tf.float32)\n\n rescaled_anchors = [(anchor[0] / ratio[1], anchor[1] / ratio[0]) for anchor in anchors]\n\n feature_map = tf.reshape(feature_map, [-1, grid_size[0], grid_size[1], 3, 5 + self.class_num])\n\n box_centers, box_sizes, conf_logits, prob_logits = tf.split(feature_map, [2, 2, 1, self.class_num], axis=-1)\n box_centers = tf.nn.sigmoid(box_centers)\n\n grid_x = tf.range(grid_size[1], dtype=tf.int32)\n grid_y = tf.range(grid_size[0], dtype=tf.int32)\n grid_x, grid_y = tf.meshgrid(grid_x, grid_y)\n x_offset = tf.reshape(grid_x, (-1, 1))\n y_offset = tf.reshape(grid_y, (-1, 1))\n x_y_offset = tf.concat([x_offset, y_offset], axis=-1)\n x_y_offset = tf.cast(tf.reshape(x_y_offset, [grid_size[0], grid_size[1], 1, 2]), tf.float32)\n\n box_centers = box_centers + x_y_offset\n box_centers = box_centers * ratio[::-1]\n\n box_sizes = tf.exp(box_sizes) * rescaled_anchors\n\n box_sizes = box_sizes * ratio[::-1]\n\n boxes = tf.concat([box_centers, box_sizes], axis=-1)\n\n return x_y_offset, boxes, conf_logits, prob_logits\n\n\n def predict(self, feature_maps):\n '''\n CNN检测器模块,输入为特征层,输出为预测值\n '''\n feature_map_1, feature_map_2, feature_map_3 = feature_maps\n\n feature_map_anchors = [(feature_map_1, self.anchors[6:9]),\n (feature_map_2, self.anchors[3:6]),\n (feature_map_3, self.anchors[0:3])]\n reorg_results = [self.reorg_layer(feature_map, anchors) for (feature_map, anchors) in feature_map_anchors]\n\n def _reshape(result):\n x_y_offset, boxes, conf_logits, prob_logits = result\n grid_size = x_y_offset.get_shape().as_list()[:2] if self.use_static_shape else tf.shape(x_y_offset)[:2]\n boxes = tf.reshape(boxes, [-1, grid_size[0] * grid_size[1] * 3, 4])\n conf_logits = tf.reshape(conf_logits, [-1, grid_size[0] * grid_size[1] * 3, 1])\n prob_logits = tf.reshape(prob_logits, [-1, grid_size[0] * grid_size[1] * 3, self.class_num])\n\n return boxes, conf_logits, prob_logits\n\n boxes_list, confs_list, probs_list = [], [], []\n for result in reorg_results:\n boxes, conf_logits, prob_logits = _reshape(result)\n confs = tf.sigmoid(conf_logits)\n probs = tf.sigmoid(prob_logits)\n boxes_list.append(boxes)\n confs_list.append(confs)\n probs_list.append(probs)\n \n\n boxes = tf.concat(boxes_list, axis=1)\n confs = tf.concat(confs_list, axis=1)\n probs = tf.concat(probs_list, axis=1)\n\n center_x, center_y, width, height = tf.split(boxes, [1, 1, 1, 1], axis=-1)\n x_min = center_x - width / 2\n y_min = center_y - height / 2\n x_max = center_x + width / 2\n y_max = center_y + height / 2\n\n boxes = tf.concat([x_min, y_min, x_max, y_max], axis=-1)\n\n return boxes, confs, probs\n \n def loss_layer(self, feature_map_i, y_true, anchors):\n '''\n 损失函数,用于反向传播算法\n '''\n grid_size = tf.shape(feature_map_i)[1:3]\n ratio = tf.cast(self.img_size / grid_size, tf.float32)\n N = tf.cast(tf.shape(feature_map_i)[0], tf.float32)\n\n x_y_offset, pred_boxes, pred_conf_logits, pred_prob_logits = self.reorg_layer(feature_map_i, anchors)\n\n object_mask = y_true[..., 4:5]\n\n ignore_mask = tf.TensorArray(tf.float32, size=0, dynamic_size=True)\n def loop_cond(idx, ignore_mask):\n return tf.less(idx, tf.cast(N, tf.int32))\n def loop_body(idx, ignore_mask):\n # shape: [13, 13, 3, 4] & [13, 13, 3] ==> [V, 4]\n # V: num of true gt box of each image in a batch\n valid_true_boxes = tf.boolean_mask(y_true[idx, ..., 0:4], tf.cast(object_mask[idx, ..., 0], 'bool'))\n # shape: [13, 13, 3, 4] & [V, 4] ==> [13, 13, 3, V]\n iou = self.box_iou(pred_boxes[idx], valid_true_boxes)\n # shape: [13, 13, 3]\n best_iou = tf.reduce_max(iou, axis=-1)\n # shape: [13, 13, 3]\n ignore_mask_tmp = tf.cast(best_iou < 0.5, tf.float32)\n # finally will be shape: [N, 13, 13, 3]\n ignore_mask = ignore_mask.write(idx, ignore_mask_tmp)\n return idx + 1, ignore_mask\n _, ignore_mask = tf.while_loop(cond=loop_cond, body=loop_body, loop_vars=[0, ignore_mask])\n ignore_mask = ignore_mask.stack()\n # shape: [N, 13, 13, 3, 1]\n ignore_mask = tf.expand_dims(ignore_mask, -1)\n\n # shape: [N, 13, 13, 3, 2]\n pred_box_xy = pred_boxes[..., 0:2]\n pred_box_wh = pred_boxes[..., 2:4]\n\n # shape: [N, 13, 13, 3, 2]\n true_xy = y_true[..., 0:2] / ratio[::-1] - x_y_offset\n pred_xy = pred_box_xy / ratio[::-1] - x_y_offset\n\n # numerical range: 0 ~ 1\n # shape: [N, 13, 13, 3, 2]\n true_tw_th = y_true[..., 2:4] / anchors\n pred_tw_th = pred_box_wh / anchors\n # for numerical stability\n true_tw_th = tf.where(condition=tf.equal(true_tw_th, 0),\n x=tf.ones_like(true_tw_th), y=true_tw_th)\n pred_tw_th = tf.where(condition=tf.equal(pred_tw_th, 0),\n x=tf.ones_like(pred_tw_th), y=pred_tw_th)\n true_tw_th = tf.log(tf.clip_by_value(true_tw_th, 1e-9, 1e9))\n pred_tw_th = tf.log(tf.clip_by_value(pred_tw_th, 1e-9, 1e9))\n\n # shape: [N, 13, 13, 3, 1]\n box_loss_scale = 2. - (y_true[..., 2:3] / tf.cast(self.img_size[1], tf.float32)) * (y_true[..., 3:4] / tf.cast(self.img_size[0], tf.float32))\n\n # mix_up weight\n # [N, 13, 13, 3, 1]\n mix_w = y_true[..., -1:]\n # shape: [N, 13, 13, 3, 1]\n xy_loss = tf.reduce_sum(tf.square(true_xy - pred_xy) * object_mask * box_loss_scale * mix_w) / N\n wh_loss = tf.reduce_sum(tf.square(true_tw_th - pred_tw_th) * object_mask * box_loss_scale * mix_w) / N\n\n # shape: [N, 13, 13, 3, 1]\n conf_pos_mask = object_mask\n conf_neg_mask = (1 - object_mask) * ignore_mask\n conf_loss_pos = conf_pos_mask * tf.nn.sigmoid_cross_entropy_with_logits(labels=object_mask, logits=pred_conf_logits)\n conf_loss_neg = conf_neg_mask * tf.nn.sigmoid_cross_entropy_with_logits(labels=object_mask, logits=pred_conf_logits)\n conf_loss = conf_loss_pos + conf_loss_neg\n if self.use_focal_loss:\n alpha = 1.0\n gamma = 2.0\n focal_mask = alpha * tf.pow(tf.abs(object_mask - tf.sigmoid(pred_conf_logits)), gamma)\n conf_loss *= focal_mask\n conf_loss = tf.reduce_sum(conf_loss * mix_w) / N\n\n # shape: [N, 13, 13, 3, 1]\n if self.use_label_smooth:\n delta = 0.01\n label_target = (1 - delta) * y_true[..., 5:-1] + delta * 1. / self.class_num\n else:\n label_target = y_true[..., 5:-1]\n class_loss = object_mask * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_target, logits=pred_prob_logits) * mix_w\n class_loss = tf.reduce_sum(class_loss) / N\n\n return xy_loss, wh_loss, conf_loss, class_loss\n \n\n def box_iou(self, pred_boxes, valid_true_boxes):\n '''\n 计算两个坐标框的重叠度(iou)\n '''\n # [13, 13, 3, 2]\n pred_box_xy = pred_boxes[..., 0:2]\n pred_box_wh = pred_boxes[..., 2:4]\n\n # shape: [13, 13, 3, 1, 2]\n pred_box_xy = tf.expand_dims(pred_box_xy, -2)\n pred_box_wh = tf.expand_dims(pred_box_wh, -2)\n\n # [V, 2]\n true_box_xy = valid_true_boxes[:, 0:2]\n true_box_wh = valid_true_boxes[:, 2:4]\n\n # [13, 13, 3, 1, 2] & [V, 2] ==> [13, 13, 3, V, 2]\n intersect_mins = tf.maximum(pred_box_xy - pred_box_wh / 2.,\n true_box_xy - true_box_wh / 2.)\n intersect_maxs = tf.minimum(pred_box_xy + pred_box_wh / 2.,\n true_box_xy + true_box_wh / 2.)\n intersect_wh = tf.maximum(intersect_maxs - intersect_mins, 0.)\n\n # shape: [13, 13, 3, V]\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n # shape: [13, 13, 3, 1]\n pred_box_area = pred_box_wh[..., 0] * pred_box_wh[..., 1]\n # shape: [V]\n true_box_area = true_box_wh[..., 0] * true_box_wh[..., 1]\n # shape: [1, V]\n true_box_area = tf.expand_dims(true_box_area, axis=0)\n\n # [13, 13, 3, V]\n iou = intersect_area / (pred_box_area + true_box_area - intersect_area + 1e-10)\n\n return iou\n\n \n def compute_loss(self, y_pred, y_true):\n '''\n 计算不同的损失函数,包括:总损失,xy坐标损失,wh长/高损失,置信度损失和类别损失\n '''\n loss_xy, loss_wh, loss_conf, loss_class = 0., 0., 0., 0.\n anchor_group = [self.anchors[6:9], self.anchors[3:6], self.anchors[0:3]]\n\n # calc loss in 3 scales\n for i in range(len(y_pred)):\n result = self.loss_layer(y_pred[i], y_true[i], anchor_group[i])\n loss_xy += result[0]\n loss_wh += result[1]\n loss_conf += result[2]\n loss_class += result[3]\n total_loss = loss_xy + loss_wh + loss_conf + loss_class\n return [total_loss, loss_xy, loss_wh, loss_conf, loss_class]\n","repo_name":"q629988171/yolov3-vehicle-plate-recognition-","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":13333,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"90"} +{"seq_id":"18373939919","text":"from collections import deque\n\n\ndef perm(n, k, a=1, mod=10**9+7):\n if n < k or k < 0:\n a = 0\n else:\n for i in range(n, n-k, -1):\n a = a*i % mod\n \n return a\n\n\nMOD = 10**9 + 7\nN, K = map(int, input().split())\ntree = [[] for i in range(N)]\nfor i in range(N-1):\n a, b = map(int, input().split())\n tree[a-1].append(b-1)\n tree[b-1].append(a-1)\n\n\nans = K\nd = deque([[0, -1]])\nwhile d:\n node, parent = d.pop()\n children = tree[node]\n if parent == -1:\n ans = ans*perm(K-1, len(children)) % MOD\n else:\n ans = ans*perm(K-2, len(children)-1) % MOD\n \n for child in children:\n if child == parent:\n continue\n \n d.append([child, node])\n\nprint(ans)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02985/s756017553.py","file_name":"s756017553.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18357727289","text":"from heapq import heappush, heappop\n \nN,M = map(int,input().split())\nA = [0]*N\nB = [0]*N\nfor i in range(N):\n A[i],B[i] = map(int,input().split())\nAB = [[] for _ in range(M+1)] # A に対する B\nfor a,b in zip(A,B):\n if a <= M:\n AB[a].append(b)\n \nq = []\nanswer = 0\n\nfor day in AB:\n # 制限より短いものからも選べるように全文pushして一番良いものだけpopする\n for pay in day:\n heappush(q,-pay)\n if len(q) > 0:\n answer += -heappop(q)\n \nprint(answer)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02948/s665654701.py","file_name":"s665654701.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"19334694785","text":"from django.shortcuts import render,HttpResponse, HttpResponseRedirect\nfrom django.template import loader\nfrom django.conf import settings\nfrom rameniaapp.forms import AddNoodleForm\nfrom rameniaapp.models import Noodle, NoodleImage, Edit, Tag\nfrom .edit_util import apply_change\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\n@login_required(login_url=\"/app/login\")\ndef ramen_create_view(request):\n '''View for handling add noodle form'''\n form = AddNoodleForm()\n # If this is a POST request then process the Form data\n if request.method == 'POST':\n user = request.user\n # Create a form instance and populate it with data from the request of the user\n form = AddNoodleForm(request.POST or None, request.FILES)\n # Check if the form is valid:\n if form.is_valid():\n print(form.cleaned_data)\n # Helps with clean format\n metadata = { \"Name\": form.cleaned_data[\"name\"], \"Description\": form.cleaned_data[\"description\"], \\\n \"Flavor\": form.cleaned_data[\"flavor\"], \\\n \"Manufacturer\": form.cleaned_data[\"manufacturer\"], \\\n \"Released\": form.cleaned_data[\"released\"], \"Line\": form.cleaned_data[\"line\"], \\\n \"Tags\": form.cleaned_data[\"tags\"] }\n edit = Edit(editor = user, change = metadata)\n # Standard code to get file from request and set as edit's image\n if request.FILES:\n file = list(request.FILES.keys())[0]\n edit.image = request.FILES[file]\n edit.save()\n messages.add_message(request, messages.SUCCESS, \"Entry submitted successfully- please wait for moderator approval\")\n # redirect to a new URL:\n return HttpResponseRedirect('/app/')\n else:\n template = loader.get_template('add_ramen.html')\n context = {\n 'form': form,\n }\n return HttpResponse(template.render(context, request))\n\n","repo_name":"awlane/ramenia","sub_path":"rameniaapp/views/add_ramen.py","file_name":"add_ramen.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"72497082537","text":"from flask import Blueprint, render_template, request, redirect, url_for, flash, abort\nfrom flask_login import login_required, current_user\nfrom . import db\nfrom .models import User, Child, Measurement\nfrom .plots import setup_figure, get_data, get_reference_data, colors\nimport json\nfrom bokeh.embed import json_item\nfrom bokeh.models import Span, Label, ColumnDataSource\nfrom bokeh.resources import CDN\nimport pandas as pd\n\nfrom datetime import datetime\n\nmain = Blueprint('main', __name__)\n\nPLOTS = {\"weight\": \"Weight [g]\",\n \"height\": \"Height [cm]\",\n \"sleep\": \"Time [h]\",\n \"amount\": \"Amount [ml]\"}\n\n\n@main.route('/')\ndef index():\n return render_template('index.html', plots=PLOTS)\n\n\n@main.errorhandler(404)\ndef page_not_found(error):\n return render_template('404.html', title='404', plots=PLOTS), 404\n\n\n@main.route('/profile')\n@login_required\ndef profile():\n return render_template('profile.html', name=current_user.name, children=current_user.children, plots=PLOTS)\n\n\n@main.route('/profile', methods=[\"POST\"])\n@login_required\ndef profile_post():\n name = request.form.get('name')\n date = pd.to_datetime(request.form.get('date'), errors=\"raise\")\n gender = request.form.get('gender').lower()\n color = request.form.get('color')\n new_child = Child(name=name, birth_date=date,\n gender=gender, color=color)\n current_user.children.append(new_child)\n db.session.commit()\n return redirect(url_for('main.profile'))\n\n@main.route('/profile/edit/')\n@login_required\ndef profile_edit(id):\n try:\n child = current_user.children.filter_by(id=id).one()\n except sqlalchemy.orm.exc.NoResultFound:\n abort(404)\n return render_template('profile_edit.html', child=child, plots=PLOTS)\n\n@main.route('/profile/edit/', methods=[\"POST\"])\n@login_required\ndef profile_edit_post(id):\n try:\n child = current_user.children.filter_by(id=id).one()\n except sqlalchemy.orm.exc.NoResultFound:\n abort(404)\n child.name = request.form.get('name')\n child.birth_date = pd.to_datetime(request.form.get('date'), errors=\"raise\")\n child.gender = request.form.get('gender').lower()\n child.color = request.form.get('color')\n print(child)\n db.session.commit()\n return redirect(url_for('main.profile'))\n\n\n@main.route('/plots')\n@login_required\ndef plots():\n return render_template('plots.html', plots=PLOTS, resources=CDN.render())\n\n\n@main.route('/plots/')\n@login_required\ndef plot(y):\n if y not in PLOTS:\n abort(404)\n name = request.args.get(\"name\")\n file_name = request.args.get(\"file_name\")\n if name and file_name:\n df = pd.read_csv(request.args.get(\"file_name\"))\n df[\"Date\"] = pd.to_datetime(df[\"Date\"])\n child = current_user.children.filter_by(name=name).one()\n print(df)\n child.measurements.extend(\n [Measurement(m_type=y, date=t[1], value=t[2]) for t in df.itertuples()])\n db.session.commit()\n return render_template('plot.html', plot=y, resources=CDN.render(), children=current_user.children, plots=PLOTS)\n\n\n@main.route('/plots/', methods=[\"POST\"])\n@login_required\ndef plot_post(y):\n if y not in PLOTS:\n abort(404)\n try:\n date = pd.to_datetime(request.form.get('date'), errors=\"raise\")\n value = float(request.form.get('value'))\n child_name = request.form.get('child')\n child = current_user.children.filter_by(name=child_name).one()\n except ValueError:\n flash('Please enter a date and a number')\n else:\n child.measurements.append(Measurement(\n m_type=y, date=date, value=value))\n db.session.commit()\n return redirect(url_for('main.plot', y=y))\n\n\ndef line_plot(p, y, child):\n ds = child.get_data(y)\n p.circle(x=\"date\", y=y, size=10, color=child.color,\n source=ds, legend_label=child.name)\n p.line(x=\"date\", y=y, color=child.color,\n line_width=3, line_dash=\"dashed\", source=ds)\n\n\ndef reference(p, y):\n try:\n ref = get_reference_data(y)\n except FileNotFoundError:\n return\n options = dict(x=\"Date\", color=\"gray\", source=ref)\n p.line(y=\"P50\", line_width=2, legend_label=\"Median\", **options)\n p.line(y=\"P5\", line_width=1, line_dash=\"dotted\", **options)\n p.line(y=\"P10\", line_width=1, line_dash=\"dashed\", **options)\n p.line(y=\"P90\", line_width=1, line_dash=\"dashed\",\n legend_label=\"90%\", **options)\n p.line(y=\"P95\", line_width=1, line_dash=\"dotted\",\n legend_label=\"95%\", **options)\n\n\n@main.route('/bokeh/')\n@login_required\ndef plot_bokeh(y):\n if y not in PLOTS:\n abort(404)\n p = setup_figure(\"Date\", PLOTS[y])\n for child in current_user.children:\n line_plot(p, y, child)\n reference(p, y)\n p.legend.location = \"top_left\"\n return json.dumps(json_item(p, y))\n","repo_name":"graipher/baby_dashboard","sub_path":"project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"5599285784","text":"import sys\n\nstring = sys.stdin.readline().strip()\n\nstack = []\n# 답: 관통수 + 막대수\ncnt_lazer = 0 # 관통수\ncnt_stick = 0 # 원래 막대 수\nbefore = \"\" # ')' 나왔을 때: 레이저인지, 막대 끝인지 구분하기 위해 이전 것 기록\n\n# string = \"()(((()())(())()))(())\"\nfor char in string:\n\n if char == '(':\n stack.append(char)\n before = char\n continue\n\n # 여기부터는 char == ')'\n # 레이저인지, 막대 끝인지 구분해야한다\n stack.pop()\n\n if before == '(': # 레이저\n cnt_lazer += len(stack)\n \n else: # 막대 끝\n cnt_stick += 1\n \n before = char\n\nprint(cnt_lazer + cnt_stick)\n\n","repo_name":"jinhyung-noh/algorithm-ps","sub_path":"BaekJoon/10799_쇠막대기.py","file_name":"10799_쇠막대기.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"28104592507","text":"iN1 = int(input(\"Ingrese el primer numero: \"))\niN2 = int(input(\"Ingrese el segundo numero: \"))\n\niCon1 = 0\niCon2 = 0\nfor i in range(1, iN1):\n if iN1 % i == 0:\n iCon1 = iCon1 + i\n\nfor h in range(1, iN2):\n if iN2 % h == 0:\n iCon2 = iCon2 + h\n\nif iN1 == iCon2 and iN2 == iCon1:\n print(f\"Los numeros {iN1} y {iN2} son amigos\")\nelse:\n print(f\"Los numeros {iN1} y {iN2} no son amigos\")","repo_name":"AlejandroP75/CampusAP75","sub_path":"Python/Software Review/04-Ejercicio7_Estructuras_Repetitivas_For.py","file_name":"04-Ejercicio7_Estructuras_Repetitivas_For.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"31610351077","text":"from selenium.webdriver import Chrome\nfrom selenium.webdriver import ChromeOptions\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.common.by import By\nimport time\nfrom amazoncaptcha import AmazonCaptcha\nimport random\nimport pandas as pd\n\n\nclass Spider:\n\n asinSet = None\n index = 0\n curParseCount = 0\n\n # 存储文件地点\n saveFileUrl='./movie_crawler'\n\n # 爬取的起点和终点\n startIndex=0\n endIndex=0\n\n def getNextUrl(self):\n # 寻找下一个可爬取的url\n while self.asinSet.iloc[self.index].hasDeal and self.index < self.endIndex:\n self.index += 1\n\n # 爬取结束\n if self.index >= self.asinSet.shape[0]:\n print('====全部数据集成功获取====')\n return -1\n\n print(\"{0}:{1}\".format(self.index,\n self.asinSet.iloc[self.index].asinID))\n\n return 'https://www.amazon.com/-/zh/dp/'+self.asinSet.iloc[self.index].asinID\n\n def __init__(self):\n options = ChromeOptions()\n prefs = {\n 'profile.default_content_setting_values': {\n 'images': 2,\n }\n }\n options.add_experimental_option('prefs', prefs)\n # options.add_argument(\"--headless\")\n self.driver = Chrome(options=options)\n\n # 读取文件\n self.asinSet = pd.read_csv('./movie_crawler/asin.csv')\n \n self.index=self.startIndex\n if self.endIndex == 0:\n self.endIndex=self.asinSet.shape[0]\n \n self.nextUrl = self.getNextUrl()\n\n while self.nextUrl != -1:\n self.getNextPage()\n self.nextUrl = self.getNextUrl()\n # 每10次爬取存储一次csv文件\n self.curParseCount += 1\n if self.curParseCount >= 10:\n self.saveAsinSet()\n self.curParseCount = 0\n\n '''\n 处理页面\n '''\n\n def getNextPage(self):\n self.wait = WebDriverWait(self.driver, 0.8, 0.5)\n # 访问该网站\n self.driver.get(self.nextUrl)\n time.sleep(random.random())\n # 标记为已提取\n self.asinSet.iloc[self.index, 1] = True\n self.total = self.driver.page_source\n try:\n title = self.driver.find_element_by_xpath(\n '//span[@id=\"productTitle\"]')\n print(title.text.replace('\\n', ''))\n with open(self.saveFileUrl+self.nextUrl[-10:]+'.txt', 'w', encoding='utf-8') as fp:\n fp.write(self.total)\n except:\n # 处理验证码\n self.handleCaptcha()\n \n\n '''\n 处理验证码\n '''\n\n def handleCaptcha(self):\n try:\n self.total = self.driver.page_source\n soup = BeautifulSoup(self.total, features=\"lxml\")\n src = soup.find(\n class_=\"a-row a-text-center\").findChild(name=\"img\").attrs[\"src\"]\n captcha = AmazonCaptcha.fromlink(src)\n solution = captcha.solve(keep_logs=True)\n print(solution)\n #


\n input_element = self.driver.find_element_by_id(\"captchacharacters\")\n input_element.send_keys(solution)\n\n button = self.driver.find_element_by_xpath(\"//button\")\n button.click()\n\n print(\"已解决验证码√\")\n\n #再来一次\n self.asinSet.iloc[self.index, 1] = False\n except:\n #404\n return\n\n def saveAsinSet(self):\n print('====存储asinSet.csv====')\n self.asinSet.to_csv('./movie_crawler/asin.csv', index=False)\n\n'''\ndef run():\n spider = Spider()\n\n\nif __name__ == \"__main__\":\n run()\n'''","repo_name":"Shotray/DataWarehouse","sub_path":"ETL/movie_crawler/selenium.py","file_name":"selenium.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"40442188738","text":"import json\nfrom flask import Flask, request, jsonify \nimport requests\nimport objectpath\nimport os\nfrom googletrans import Translator\n\napp = Flask(__name__) \nport = int(os.getenv(\"PORT\", 9009)) #definicion de puerto de salida\n\n@app.route('/casos', methods=['POST']) \ndef index():\n\n\tpostRece = json.loads(request.get_data())\n\tlanguage = str(postRece['conversation']['language'])\n\tpais = str(postRece['conversation']['memory']['pais']['raw']).capitalize()\n\n\ttranslator = Translator()\n\ttranslated = translator.translate(pais, src='es', dest='en')\n\tcountry = translated.text.capitalize()\n\n\tURL = \"https://services1.arcgis.com/0MSEUqKaxRlEPj5g/arcgis/rest/services/ncov_cases/FeatureServer/1/query?f=json&where=(Confirmed > 0) AND (Deaths>0) AND (Recovered > 0) AND (Country_Region='{}')&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*&orderByFields=Deaths desc,Country_Region asc,Province_State asc&outSR=102100&resultOffset=0&resultRecordCount=250&cacheHint=true\".format(country)\n\tHEADERS = {'Content-Type': 'application/json'}\n\n\ts = requests.session()\n\n\tr = s.get(url=URL,headers=HEADERS)\n\n\tprint(r)\n\n\tcountry_exists = r.json()['features']\n\n\tif country_exists is not None and len(country_exists)>=1:\n\n\t\tcases = str(r.json()['features'][0]['attributes']['Confirmed'])\n\t\tdeaths = str(r.json()['features'][0]['attributes']['Deaths'])\n\t\trecovered = str(r.json()['features'][0]['attributes']['Recovered'])\n\n\t\tif language == 'es':\n\n\t\t\tmessageCAI = 'La cantidad de casos confirmados de COVID19 en {} son: {}, contanto con {} recuperados y {} fallecidos.'.format(pais,cases,recovered,deaths) \n\t\t\tprint(messageCAI)\n\n\t\telse:\n\t\t\tmessageCAI = 'The total confirmed cases of COVID19 in {} are: {}, with {} recovered and {} deaths.'.format(country,cases,recovered,deaths)\n\t\t\tprint(messageCAI)\n\n\n\t\treturn jsonify( \n\t\t\t status=200, \n\t\t\t replies=[{'type': 'text','content': messageCAI}],\n\t\t\t conversation= postRece['conversation']\n\t\t\t )\n\telse:\n\n\t\tif language == 'es':\n\t\t\tmessageCAI = 'Aún no se cuentan con casos confirmados para {}, o el nombre del país es incorrecto.'.format(pais)\n\n\t\telse:\n\t\t\tmessageCAI = 'There is not confirmed cases yet in {}, or the country name is incorrect.'.format(country)\n\n\t\treturn jsonify( \n\t\t\t status=200, \n\t\t\t replies=[{'type': 'text','content': messageCAI}],\n\t\t\t conversation= postRece['conversation']\n\t\t\t )\n\n\n@app.route('/errors', methods=['POST']) \ndef errors(): \n json.loads(request.get_data())\n return jsonify(status=200) \n \napp.run(host='0.0.0.0', port=port) #importante indicar host para deployment en plataforma","repo_name":"mariajosemq/covid19caibot","sub_path":"covid19cai.py","file_name":"covid19cai.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"74324819817","text":"n = int(input())\r\nprice_lst = []\r\n\r\nfor _ in range(n):\r\n a,b,c = map(int, input().split())\r\n if a == b == c:\r\n s = 10000 + a * 1000\r\n elif a != b and a != c and b != c:\r\n s = max(a,b,c) * 100\r\n else:\r\n if a == b or a == c:\r\n s = 1000 + a * 100\r\n else:\r\n s = 1000 + b * 100\r\n price_lst.append(s)\r\n\r\nprint(max(price_lst))","repo_name":"Siabel/Beakjoon_solving","sub_path":"백준/Bronze/2476. 주사위 게임/주사위 게임.py","file_name":"주사위 게임.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"34553894484","text":"\"\"\"\nCreated by Piotr R. 2018-2019\nApplication is searching for the nearest measuremenet point (MPoint) of the pollution.\nIt is searching a place from the arguments or from user input\nProvides an information about place (latitude and longitude) and measures from the nearest point.\nrunning script without arguments will lead to menu which user can choose search for MPoints\nFirst arg - place, city where it should search for MPoint. Argument should be by one word or with quotation marks\nExample 1: main.py Gdansk\nExample 2: main.py \"Gdansk, dluga\"\nExample 3: main.py \"Sopot, Haffnera 20\"\nSecond arg - interval to repeat measurement request (in seconds)\nExample 1: main.py Gdansk, 10\nExample 2: main.py \"Gdansk, dluga\", 60\nExample 3: main.py \"Sopot, Haffnera 20\", 900\n\n\"\"\"\nimport sys\n\nimport advanced\nimport settings\nimport simple\nimport variables\n\nz = 0\nif len(sys.argv) == 1:\n print(variables.menuIntro)\n print(variables.menuOptions)\n while z != 4:\n try:\n z = int(input(variables.yourChoice))\n except ValueError:\n print(\"Podana wartość nie jest liczbą\")\n\n if z == 1:\n print(simple.simpleChose)\n simple.GetData()\n elif z == 2:\n print(advanced.advancedChose)\n elif z == 3:\n print(settings.settingChose)\n elif z == 4:\n print(variables.menuEnd)\n break\n else:\n print(\"nie wybrano poprawnej cyfry, wybierz jedną z opcji\")\n\nelse:\n arguments = sys.argv[1:]\n try:\n simple.GetData(*arguments)\n except Exception:\n print(\"niepoprawne parametry\")\n\n","repo_name":"gdaPythonProjects/smogDetector","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"35524440956","text":"# Python 3.7\n\n\ndef make_recipes(elf1, elf2, recipes):\n # type: (int, int, list) -> list\n combo = list(map(int, list(str(recipes[elf1] + recipes[elf2]))))\n # print(f\"combo: {combo}, elf1: {elf1}, elf2: {elf2}, recipes: {recipes}\")\n return combo\n\n\ndef run_elves_1(num: int) -> str:\n scores = [3, 7]\n elf1 = 0\n elf2 = 1\n while len(scores) <= (num+10):\n new = make_recipes(elf1, elf2, scores)\n scores.extend(new)\n elf1 = (elf1 + scores[elf1] + 1) % len(scores)\n elf2 = (elf2 + scores[elf2] + 1) % len(scores)\n\n return ''.join(map(str, scores[num:num+10]))\n\n\n# Check that run_elves works with the examples\n# assert run_elves_1(9) == \"5158916779\"\n# assert run_elves_1(5) == \"0124515891\"\n# assert run_elves_1(18) == \"9251071085\"\n# assert run_elves_1(2018) == \"5941429882\"\n\np1 = run_elves_1(793031)\nprint(f\"Part 1 - {p1}\") # 4910101614\n\n\ndef run_elves_2(num_str: str) -> int:\n scores = [3, 7]\n score_str = ''.join(map(str, scores))\n num_len = len(num_str)\n elf1 = 0\n elf2 = 1\n while True:\n new = make_recipes(elf1, elf2, scores)\n scores.extend(new)\n score_str += ''.join(map(str, new))\n elf1 = (elf1 + scores[elf1] + 1) % len(scores)\n elf2 = (elf2 + scores[elf2] + 1) % len(scores)\n\n if score_str[-num_len:] == num_str:\n return len(scores) - len(num_str)\n\n if score_str[-num_len-1:-1] == num_str:\n return len(scores) - len(num_str) - 1\n\n\n# assert run_elves_2(\"51589\") == 9\n# assert run_elves_2(\"01245\") == 5\n# assert run_elves_2(\"92510\") == 18\n# assert run_elves_2(\"59414\") == 2018\n\np2 = run_elves_2(\"793031\")\nprint(f\"Part 2 - {p2}\") # 20253137\n","repo_name":"urianchang/Algorithms","sub_path":"AdventOfCode/2018/14_chocolate_charts/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"90"} +{"seq_id":"40389489025","text":"def make_sandwich(*foods):\n for food in foods:\n print(\"food: \" + str(food))\n print(\"add \" + str(foods) + \" to sandwich\")\n\n\ndef get_foods():\n foods = []\n while True:\n food = input(\"Please input food which you want to add to this sandwich: \")\n if food == \"quit\":\n break;\n foods.append(food)\n return foods\n\nmake_sandwich('apple', 'banana')\nfor idx in range(0, 3):\n foods = get_foods()\n make_sandwich(foods[:])\n\n\n","repo_name":"yuansx/project","sub_path":"python/test_file/sandwich.py","file_name":"sandwich.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"15801527805","text":"# -*- coding: utf-8 -*-\n\"\"\"\n1086. High Five\n\nGiven a list of the scores of different students, items, where items[i] = [IDi, scorei] represents one score from\na student with IDi, calculate each student's top five average.\n\nReturn the answer as an array of pairs result, where result[j] = [IDj, topFiveAveragej] represents\nthe student with IDj and their top five average. Sort result by IDj in increasing order.\n\nA student's top five average is calculated by taking the sum of their top five scores and dividing it by 5\nusing integer division.\n\nConstraints:\n\n1 <= items.length <= 1000\nitems[i].length == 2\n1 <= IDi <= 1000\n0 <= scorei <= 100\nFor each IDi, there will be at least five scores.\n\"\"\"\n\n\nclass Solution:\n def highFive(self, items):\n cache = {}\n for sid, score in items:\n if sid not in cache:\n cache[sid] = [score]\n else:\n cache[sid].append(score)\n\n return [[sid, sum(sorted(cache[sid])[-5:]) // 5] for sid in sorted(cache.keys())]\n","repo_name":"tjyiiuan/LeetCode","sub_path":"solutions/python3/problem1086.py","file_name":"problem1086.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"43597757725","text":"from __future__ import absolute_import\n#!/usr/bin/env python\nimport sys\nimport unittest\nsys.path.append('xypath')\nimport xypath\n\nimport messytables\nfrom os.path import dirname, abspath, join as pjoin, splitext\n\nFIXTURE_DIR = pjoin(abspath(dirname(__file__)), '..', 'fixtures')\n\ndef get_extension(filename):\n \"\"\"\n >>> get_extension('/foo/bar/test.xls')\n 'xls'\n \"\"\"\n return splitext(filename)[1].strip('.')\n\ndef get_fixture_filename(name):\n return pjoin(FIXTURE_DIR, name)\n\ndef get_messytables_fixture(name, table_index=0, memoized={}):\n \"\"\"\n Memoized function for loading fixtures\n \"\"\"\n\n if name not in memoized:\n with open(name, \"rb\") as fd:\n messy = messytables.any.any_tableset(fd)\n messytable = messy.tables[table_index]\n memoized[name] = (messy, xypath.Table.from_messy(messytable))\n\n return memoized[name]\n\nclass TCore(unittest.TestCase):\n @classmethod\n def setup_class(cls):\n cls.wpp_filename = get_fixture_filename(\"wpp.xls\")\n cls.messy, cls.table = get_messytables_fixture(cls.wpp_filename)\n\n def setUp(self):\n pass\n\n # A special version of assertRaises() that tests both the\n # exception class, and the exception message. Based on:\n # http://stackoverflow.com/questions/8672754\n def assertRaisesWithMessage(self, func, exception_type, msg, *args, **kwargs):\n try:\n func(*args, **kwargs)\n self.fail('No exception was raised')\n except Exception as inst:\n self.assertIsInstance(inst, exception_type)\n self.assertEqual(str(inst), msg)\n\nclass TMissing(unittest.TestCase):\n @classmethod\n def setup_class(cls):\n cls.wpp_filename = get_fixture_filename(\"missingcell.csv\")\n cls.messy, cls.table = get_messytables_fixture(cls.wpp_filename)\n\n def setUp(self):\n pass\n","repo_name":"sensiblecodeio/xypath","sub_path":"test/tcore.py","file_name":"tcore.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"90"} +{"seq_id":"17241710850","text":"#리트코드 937 Reorder Log File\nclass Solution:\n def reorderLogFiles(self, logs: List[str]) -> List[str]:\n logs_str = []\n logs_num = []\n\n for l in logs:\n l_split = l.split(\" \")\n identifier = l_split.pop(0)\n s = \"\".join(l_split)\n\n if s.isalpha():\n logs_str.append(l)\n else:\n logs_num.append(l)\n\n logs_str.sort(key=lambda x: (x.split()[1:], x.split()[0]))\n\n return logs_str + logs_num\n\nsol = Solution()\nprint(sol.reorderLogFiles([\"dig1 8 1 5 1\",\"let1 art can\",\"dig2 3 6\",\"let2 own kit dig\",\"let3 art zero\"]))","repo_name":"luboolu/PythonAlgorithmInterview","sub_path":"6장_문자열조작/3_로그파일재정렬/solution_my.py","file_name":"solution_my.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"40330123335","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nfrom keras.models import Sequential, tf, Model\nfrom keras.layers import Dense, Flatten, Embedding, Input, Bidirectional, Dropout, LSTM, GRU, TimeDistributed, RepeatVector\nfrom keras.optimizers import adam_v2\nfrom keras.losses import MSE, mean_squared_error\nfrom keras import layers, activations, losses, metrics, optimizers\nfrom keras.callbacks import EarlyStopping\n\nimport pickle as pkl\n\n# X_train_pada_seq.shape\n# (28840, 999)\n\ninput_length = 312\n\n# https://www.5axxw.com/questions/content/6ymxhz\n\n# 自动编码器是将输入 x 进行编码,得到新的特征 y ,并且希望原始的输入 x 能够从新的特征 y 重构出来。\n# 我们希望自动编码器能够学习到在归一化转换时的特征,并且在应用时这个输入和输出是类似的。而对于异常情况,因模型没有学习此类数据特征,所以输入和输出将会有不同。\n# 在对模型进行训练的过程中,只使用没有标签的正常数据训练。\n# 这种方法的好处是它允许使用无监督的学习方式。\n# 异常检测任务通常情况下负样本(异常样本)是比较少的,有时候依赖于人工标签,属于样本不平衡问题。\n# 噪音。 异常和噪音有时候很难分清,位于数据的稀疏区域的点,与其他数据非常不同,因此可以断定为异常,但若周围有也有很多点分布,我们很难把点识别出来。\n\nwith open(os.path.join(os.path.split(EXCEL_DATA_FILE)[0], 'jd_api_train_dataset.pkl'), 'rb')as f:\n train_dataset = pkl.load(f)\n x_train = train_dataset['x_train']\n y_train = train_dataset['y_train']\n\n# 只考虑多数类别的数据,少数类别的数据不纳入训练集;\nx_test, y_test = [], []\nX, Y = [], []\n\nfor x, y in zip(x_train, y_train):\n if y[0] == 1:\n x_test.append(x)\n y_test.append(y[0])\n else:\n X.append(x)\n Y.append(y[0])\n\nX = np.array(X)\nY = np.array(Y)\nx_test = np.array(x_test)\ny_test = np.array(y_test)\n\n\nimport keras.backend as K\n\ndef reshape_squared_error(y_true, y_pred):\n '''自定义损失函数'''\n y_true = K.mean(y_true, axis=1)\n y_pred = K.mean(y_pred, axis=1)\n return mean_squared_error(y_true, y_pred)\n\ndef build_model(x_train, epochs=5):\n n_features = 312\n encoder = Sequential(name='encoder')\n encoder.add(layer=layers.Dense(units=20, activation=activations.relu, input_shape=x_train.shape[1:]))\n encoder.add(layers.Dropout(0.1))\n encoder.add(layer=layers.Dense(units=10, activation=activations.relu))\n encoder.add(layer=layers.Dense(units=n_features, activation=activations.relu))\n\n decoder = Sequential(name='decoder')\n decoder.add(layer=layers.Dense(units=10, activation=activations.relu, input_shape=x_train.shape[1:]))\n decoder.add(layer=layers.Dense(units=20, activation=activations.relu))\n decoder.add(layers.Dropout(0.1))\n decoder.add(layer=layers.Dense(units=n_features, activation=activations.sigmoid))\n\n autoencoder = Sequential([encoder, decoder])\n\n es = EarlyStopping(monitor='val_loss', min_delta=0.00001, patience=20, restore_best_weights=True)\n autoencoder.compile(\n \tloss=losses.MSE,\n \toptimizer='Adam',\n \tmetrics=[metrics.mean_squared_error])\n autoencoder.summary()\n\n history = autoencoder.fit(\n x_train,\n x_train,\n epochs=epochs,\n batch_size=8,\n validation_split=0.1,\n callbacks = [es]\n # validation_data=(X_test_pada_seq, X_test_pada_seq)\n )\n return autoencoder\n\n# autoencoder = build_model(X, epochs=20)\n\nencoder_inputs = Input(shape=(x_train.shape[1], x_train.shape[2]))\n# encoder_emb = Embedding(input_dim=len(word_index)+1, output_dim=20, input_length=input_length)(encoder_inputs)\nencoder_emb = encoder_inputs\n\nencoder_LSTM_1 = Bidirectional(LSTM(32, activation='relu', return_sequences=True))(encoder_emb)\nencoder_drop = Dropout(0.2)(encoder_LSTM_1)\nencoder_LSTM_2 = Bidirectional(GRU(16, activation='relu', return_sequences=False, name = 'bottleneck'))(encoder_drop)\n\ndecoder_repeated = RepeatVector(10)(encoder_LSTM_2)\ndecoder_LSTM = Bidirectional(LSTM(32, activation='relu', return_sequences=True))(decoder_repeated)\ndecoder_drop = Dropout(0.2)(decoder_LSTM)\n# decoder_time = TimeDistributed(Dense(1, activation='softmax'))(decoder_drop) # sigmoid\n# decoder_output = tf.math.reduce_mean(decoder_time, axis=1)\n\ndecoder_output = TimeDistributed(Dense(X.shape[2], activation='softmax'))(decoder_drop)\n\nautoencoder = Model(encoder_inputs, decoder_output)\n# autoencoder.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])\nautoencoder.compile(loss=reshape_squared_error, optimizer='adam', metrics=['accuracy'])\nautoencoder.summary()\n\nhistory = autoencoder.fit(\n X,\n X,\n epochs=5,\n batch_size=8,\n validation_split = 0.1,\n # validation_data=(X_test_pada_seq, X_test_pada_seq)\n )\n\n# 使用该模型,我们能够计算出正常交易时的均方根误差,并且还能知道当需要均方根误差值为95%时,阈值应该设置为多少。\ntrain_predicted_x = autoencoder.predict(x=X)\ntrain_events_mse = mean_squared_error(X.mean(axis=1), train_predicted_x.mean(axis=1))\ncut_off = np.percentile(train_events_mse, 95)\n\n\ntest_predicted_x = autoencoder.predict(x=x_test)\ntest_events_mse = mean_squared_error(x_test.mean(axis=1), test_predicted_x.mean(axis=1))\n\n# 我们设置的阈值为 cut_off ,如果均方根误差大于cut_off 时,我们就把这次的交易视为异常交易,即有欺诈行为出现。\n# 让我们选取size个异常数据和size个正常数据作为样本,结合阈值能够绘制如下图:\n\n# 绘图\nimport matplotlib.pyplot as plt\n\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n# 设置图形大小\nplt.figure(figsize=(8, 4), dpi=80)\nsize = x_test.shape[0]\nplt.plot(range(x_test.shape[0]), [t for t in train_events_mse[:size]], ls='-', lw=2, c='r', label='正常值')\nplt.plot(range(x_test.shape[0]), [t for t in test_events_mse[:size]], ls='-.', lw=2, c='b', label='异常值')\nplt.plot(range(x_test.shape[0]), [cut_off] * size, ls='-', lw=2, c='y', label='临界值')\n\nplt.legend()\nplt.xlabel('测试样本') # 设置x轴的标签文本\nplt.ylabel('mse') # 设置y轴的标签文本\n# plt.ylim(0, 5) # 设置y轴展示范围\nplt.title(\"正常值与异常值对比\")\nplt.show()\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()","repo_name":"gswyhq/hello-world","sub_path":"算法/文本异常检测的Keras自动编码器模型.py","file_name":"文本异常检测的Keras自动编码器模型.py","file_ext":"py","file_size_in_byte":6564,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"90"} +{"seq_id":"25858065833","text":"\nimport sys\nfrom future.standard_library import install_aliases\ninstall_aliases()\n\nfrom urllib import request, parse\nfrom rdflib import Graph, term\nfrom lxml import etree\n\nif len(sys.argv) < 2:\n print('Usage: python {} '.format(sys.argv[0]))\n print('')\n print('Extract rdfa, microdata and json-ld annotations from a website')\n exit(1)\n\nurl = sys.argv[1]\n\ng = Graph()\ng.parse(url, format='rdfa')\ng.parse(url, format='microdata')\n\n\ndef sanitize_triple(t):\n \"\"\"Function to remove bad URIs from the graph that would otherwise\n make the serialization fail.\"\"\"\n def sanitize_triple_item(item):\n if isinstance(item, term.URIRef) and '/' not in item:\n return term.URIRef(parse.quote(str(item)))\n return item\n\n return (sanitize_triple_item(t[0]),\n sanitize_triple_item(t[1]),\n sanitize_triple_item(t[2]))\n\n\nwith request.urlopen(url) as response:\n # Get all json-ld objects embedded in the html file\n html = response.read().decode('utf-8', errors='ignore')\n parser = etree.XMLParser(recover=True)\n root = etree.fromstring(html, parser=parser)\n if root:\n for jsonld in root.findall(\".//script[@type='application/ld+json']\"):\n g.parse(data=jsonld.text, publicID=url, format='json-ld')\n\n\nfixedgraph = Graph()\nfixedgraph += [sanitize_triple(s) for s in g]\n\nprint(g.serialize(format='turtle').decode('utf-8', errors='ignore'))\n","repo_name":"carlosv5/LOD-RDF","sub_path":"extract_data.py","file_name":"extract_data.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"22596311866","text":"# Program to check if element exists in list\n\n\"\"\"list1=[1,2,3,4,5]\n\nele = int(input('Enter any element to check exist in list:'))\nif ele in list1:\n print(ele,'exists in a list')\nelse:\n print(ele,'not exists in a list.')\"\"\"\n\n\n# Different ways to clear a list in Python\n\n\"\"\"list1 = [1,3,4,2,5,6]\nprint(list1)\nlist1.clear()\nprint(list1)\"\"\"\n\n# Reversing a List\n\"\"\"\nlist1 = [1,2,4,5,6,7]\nprint(\"Before Reversing:\",list1)\nlist1.reverse()\nprint(\"After Reversing using method:\",list1)\nlist1 = list1[::-1]\nprint(\"Reversing list using slicing:\",list1)\nlis=[1,2,3]\nl2 = []\nfor i in reversed(lis):\n l2.append(i)\nprint(l2)\n\"\"\"\n\ntuples = [(), ('ram','15','8'), (), ('laxman', 'sita'), ('krishna', 'akbar', '45'), ('',''),()]\n\ndef Remove(tuples):\n list1 = [t for t in tuples if t]\n return list1\n\nprint(Remove(tuples))","repo_name":"RoshanSagvekar/Study-Material","sub_path":"Practice/list-exercise.py","file_name":"list-exercise.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"1839015460","text":"import argparse\nfrom datetime import datetime\nimport csv\nimport os\n\ndef fn_to_dt(fn):\n dt_str = os.path.basename(fn)\n return datetime.strptime(dt_str, \"%Y-%m\")\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--monthly_fns', nargs=\"+\", help=\"CSV files with monthly editors\")\n args = parser.parse_args()\n\n if len(args.monthly_fns) == 1:\n dir = args.monthly_fns[0]\n args.monthly_fns = sorted([os.path.join(dir, f) for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))])\n else:\n args.monthly_fns = sorted(args.monthly_fns)\n\n editors_by_month = {}\n editor_starts = {}\n for fn in args.monthly_fns:\n print(\"Processing\", fn)\n editors_by_month[fn] = {}\n with open(fn, 'r') as fin:\n csvreader = csv.reader(fin)\n header = next(csvreader)\n assert header == ['editor_name', 'edit_count', 'first_edit_dt']\n for line in csvreader:\n name = line[0]\n count = int(line[1])\n first_edit = line[2]\n editors_by_month[fn][name] = count\n editor_starts[name] = datetime.strptime(first_edit, \"%Y-%m\")\n\n # stats for editors / edits by whether they are made by new or old editors\n print(\"Date \\tTotal Editors\\tNew\\tOld\\t\\tTotal Edits\\tNew\\tOld\")\n for fn in args.monthly_fns:\n new_edits = {}\n old_edits = {}\n dt = fn_to_dt(fn)\n for e in editors_by_month[fn]:\n count_edits = editors_by_month[fn][e]\n if editor_starts[e] == dt:\n new_edits[e] = count_edits\n else:\n old_edits[e] = count_edits\n count_new_edits = sum(new_edits.values())\n count_new_editors = len(new_edits)\n count_old_edits = sum(old_edits.values())\n count_old_editors = len(old_edits)\n total_edits = count_new_edits + count_old_edits\n total_editors = count_new_editors + count_old_editors\n print(\"{0}:\\t{1}\\t{2:.2f}\\t{3:.2f}\\t\\t{4}\\t{5:.2f}\\t{6:.2f}\".format(dt,\n total_editors,\n count_new_editors / total_editors,\n count_old_editors / total_editors,\n total_edits,\n count_new_edits / total_edits,\n count_old_edits / total_edits))\n # stats for what proportion of editors / edits in a month are new / old\n print(\"\\n==========\")\n start_month = args.monthly_fns[0]\n start_dt = fn_to_dt(start_month)\n start_editors = editors_by_month[start_month].keys()\n print(\"{0}:\\t{1} editors\".format(os.path.basename(start_month), len(start_editors)))\n for i in range(1, len(args.monthly_fns)):\n compare_month = args.monthly_fns[i]\n compare_editors = editors_by_month[compare_month].keys()\n overlap = list(start_editors & compare_editors)\n start_since = 0\n edits_since = 0\n edits_common = 0\n total_edits = sum(editors_by_month[compare_month].values())\n editors_common = len(overlap)\n total_editors = len(compare_editors)\n for e in compare_editors:\n num_edits = editors_by_month[compare_month][e]\n if editor_starts[e] > start_dt:\n start_since += 1\n edits_since += num_edits\n elif e in overlap:\n edits_common += num_edits\n print(\"{0}:\\t{1} editors;\\t{2} ({3:.2f}) in common;\\t{4} ({5:.2f}) new since;\"\n \"\\t{6} edits;\\t{7} ({8:.2f}) in common;\\t{9} ({10:.2f}) new since\".format(\n os.path.basename(compare_month),\n total_editors,\n editors_common, editors_common / total_editors,\n start_since, start_since / total_editors,\n total_edits,\n edits_common, edits_common / total_edits,\n edits_since, edits_since / total_edits))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"geohci/miscellaneous-wikimedia","sub_path":"editor-turnover/compute_overlap.py","file_name":"compute_overlap.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"20823047711","text":"__version__ = '1.0.0'\n\nimport json\nimport os\n\nfrom flask import Flask\n\n\nkwargs = {}\napp_path = os.path.dirname(__file__)\nkwargs.update({\n 'static_url_path': '',\n 'static_folder': os.path.join(\n os.path.abspath(app_path),\n 'client'\n )\n})\n\napp = Flask(__name__, **kwargs)\n\n\n@app.route('/')\ndef index():\n return app.send_static_file('index.html')\n\n\n@app.route('/get')\n@app.route('/get/')\ndef get(item=None):\n if item:\n with open(os.path.join(app_path, '_baseline', item)) as f:\n item = f.read()\n return item\n\n items = os.listdir(os.path.join(app_path, '_baseline'))\n return json.dumps([i for i in items if not i[0] == '.'])\n","repo_name":"sivel/ansible-perfchart","sub_path":"perfchart/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"1115757933","text":"\"\"\"binary_node.py: BinaryNode class\"\"\"\n\nfrom copy import deepcopy\nfrom genetic_programming.ast.nodes import Node\n\n\nclass BinaryNode(Node):\n \"\"\"\n BinaryNode class, a Node with two arguments\n \"\"\"\n def __init__(self, function: callable, left: Node, right: Node):\n super().__init__(function, 2)\n self.arguments.append(deepcopy(left))\n self.arguments.append(deepcopy(right))\n self.type = float\n\n def evaluate(self, **kwargs) -> float or [float]:\n \"\"\"\n Evaluate Binary Node\n\n :return: Value of binary nodes\n \"\"\"\n try:\n ready, variables, values = self._receive_values(**kwargs)\n answer = list()\n left = self.arguments[0].evaluate(ready=ready, values=(variables, values))\n right = self.arguments[1].evaluate(ready=ready, values=(variables, values))\n for l, r in zip(left, right):\n try:\n answer.append(self.function(l, r))\n except ZeroDivisionError:\n answer.append(float('NaN'))\n return answer\n except ValueError:\n return self.function(self.arguments[0].evaluate(),\n self.arguments[1].evaluate())\n\n def __repr__(self) -> str:\n first_child = \"\\n|-{}\".format(str(self.arguments[0]))\n second_child = \"\\n|-{}\".format(str(self.arguments[1]))\n return \"({})\" + (first_child + second_child).replace(\"\\n\", \"\\n \")\n\n @classmethod\n def get_arguments(cls) -> int:\n \"\"\"\n :return: 2 arguments\n \"\"\"\n return 2\n","repo_name":"StarBrand/CC5114-Tareas","sub_path":"code/genetic_programming/ast/nodes/binary_node.py","file_name":"binary_node.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"14912463962","text":"\"\"\"\n1번에 대해 다익스트라 + K번에 대해 다익스트라\nor\n속 편하게 플로이드 워셜\n\"\"\"\nimport sys\nimport heapq\n\n\ndef dijkstra(graph, begin, destination):\n distance_to = [int(1e9)] * len(graph)\n distance_to[begin] = 0\n\n dijk_q = [(0, begin)]\n\n while dijk_q:\n dist, cur = heapq.heappop(dijk_q)\n if distance_to[cur] < dist:\n continue\n\n for next_node in graph[cur]:\n if distance_to[next_node] > dist + 1:\n distance_to[next_node] = dist + 1\n heapq.heappush(dijk_q, (dist + 1, next_node))\n\n return distance_to[destination]\n\n\ndef solution():\n sys_input = sys.stdin.readline\n\n n, m = map(int, sys_input().split())\n graph = [[] for _ in range(n + 1)]\n for _ in range(m):\n begin, end = map(int, sys_input().split())\n graph[begin].append(end)\n graph[end].append(begin)\n x, k = map(int, sys_input().split())\n\n print(dijkstra(graph, 1, k) + dijkstra(graph, k, x))\n\n\nsolution()\n","repo_name":"Python-Algorithm-Practice/YJH_practice","sub_path":"01. 이코테/Chapter 09 최단 경로/실전_01_미래 도시.py","file_name":"실전_01_미래 도시.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"21187760437","text":"from typing import Union, Optional\n\nfrom mwcleric.template_modifier import TemplateModifierBase as MwclericTemplateModifier\nfrom mwparserfromhell.nodes import Template\n\nfrom mwrogue.esports_client import EsportsClient\n\n\nclass TemplateModifierBase(MwclericTemplateModifier):\n def __init__(self, site: EsportsClient, template, page_list=None, title_list=None, limit=-1, summary=None,\n quiet=False, lag=0, tags=None, skip_pages=None,\n recursive=True,\n startat_page=None,\n namespace: Optional[Union[int, str]] = None,\n **data):\n super().__init__(site, template, page_list=page_list, title_list=title_list, limit=limit, summary=summary,\n quiet=quiet, lag=lag, tags=tags, skip_pages=skip_pages,\n recursive=recursive,\n startat_page=startat_page,\n namespace=namespace,\n **data)\n\n # redo this assignment just for the type hint because it doesn't seem to get it otherwise\n self.site = site\n\n def backup(self, key):\n if self.current_page.name.startswith('Backup:'):\n return\n self.site.backup_template(template=self.current_template, page=self.current_page, key=key)\n\n def restore(self, key):\n self.current_template: Template\n if self.current_page.name.startswith('Backup:'):\n return\n to_restore = self.site.get_restored_template(self.current_template, self.current_page, key)\n if to_restore is None:\n if not self.quiet:\n if isinstance(key, str):\n key = [key]\n print('Could not find restore data for template on page \"{}\" with key: {}'.format(\n self.current_page.name,\n ', '.join([str(self.current_template.get(_, _)) for _ in key])))\n return\n to_restore.remove('backup_key')\n\n for param in self.current_template.params:\n self.current_template.remove(param.name.strip())\n\n for param in to_restore.params:\n name = param.name.strip()\n self.current_template.add(param.name, to_restore.get(name).value, preserve_spacing=False)\n","repo_name":"RheingoldRiver/mwrogue","sub_path":"mwrogue/template_modifier.py","file_name":"template_modifier.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"89"} +{"seq_id":"74916001249","text":"import torch.nn as nn\nimport torch\n\nfrom gym.spaces.dict import Dict as SpaceDict\nfrom gym.spaces.box import Box\nfrom gym.spaces.discrete import Discrete\nimport numpy as np\nimport torch.nn.functional as F\nfrom env_utils.env_wrapper.env_wrapper import EnvWrapper\nfrom model.policy import *\nfrom env_utils import *\n\n\nclass BaseRunner(nn.Module):\n def __init__(self, args, config, return_features=False):\n # super().__init__()\n super().__init__()\n observation_space = SpaceDict({\n 'panoramic_rgb': Box(low=0, high=256, shape=(64, 256, 3), dtype=np.float32),\n 'target_goal': Box(low=0, high=256, shape=(64, 256, 3), dtype=np.float32),\n 'step': Box(low=0, high=500, shape=(1,), dtype=np.float32),\n 'prev_act': Box(low=0, high=3, shape=(1,), dtype=np.int32),\n 'gt_action': Box(low=0, high=3, shape=(1,), dtype=np.int32)\n })\n action_space = Discrete(config.ACTION_DIM)\n agent = eval(config.POLICY)(\n observation_space=observation_space,\n action_space=action_space,\n hidden_size=config.features.hidden_size,\n rnn_type=config.features.rnn_type,\n num_recurrent_layers=config.features.num_recurrent_layers,\n backbone=config.features.backbone,\n goal_sensor_uuid=config.TASK_CONFIG.TASK.GOAL_SENSOR_UUID,\n normalize_visual_inputs=True,\n cfg=config\n )\n self.agent = agent\n self.torch_device = 'cpu' if args.gpu == '-1' else 'cuda:{}'.format(args.gpu)\n # self.torch_device = 'cuda'\n self.return_features = return_features\n self.need_env_wrapper = True\n self.num_agents = 1\n return\n\n def reset(self):\n self.B = 1\n self.hidden_states = torch.zeros(self.agent.net.num_recurrent_layers, self.B,\n self.agent.net._hidden_size).to(self.torch_device)\n self.actions = torch.zeros([self.B], dtype=torch.long).to(self.torch_device)\n self.time_t = 0\n\n def step(self, obs, reward, done, info, env=None):\n new_obs = {}\n for k, v in obs.items():\n if isinstance(v, np.ndarray):\n new_obs[k] = torch.from_numpy(v).float().to(self.torch_device).unsqueeze(0)\n if not isinstance(v, torch.Tensor):\n new_obs[k] = torch.tensor(v).float().to(self.torch_device).unsqueeze(0)\n else:\n new_obs[k] = v\n obs = new_obs\n (\n values,\n actions,\n actions_log_probs,\n hidden_states,\n actions_logits,\n *_\n ) = self.agent.act(\n obs,\n self.hidden_states,\n self.actions,\n torch.ones(self.B).unsqueeze(1).to(self.torch_device) * (1-done),\n deterministic=False,\n return_features=self.return_features\n )\n self.hidden_states.copy_(hidden_states)\n self.actions.copy_(actions)\n self.time_t += 1\n return self.actions.item()\n\n def visualize(self, env_img):\n return NotImplementedError\n\n def setup_env(self):\n return\n\n def wrap_env(self, env, config):\n self.env = EnvWrapper(env, config)\n return self.env\n\n def load(self, state_dict):\n self.agent.load_state_dict(state_dict)","repo_name":"rllab-snu/TopologicalSemanticGraphMemory","sub_path":"runner/base_runner.py","file_name":"base_runner.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"89"} +{"seq_id":"20910203716","text":"import json\nfrom typing import List\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\n\ndef get_all_stats_df(website: str):\n \"\"\"Gets all the stats from the webasite passed and \n retuns a list of lists\"\"\"\n if 'https://footystats.org/clubs/' not in website:\n raise ValueError(f\"Wrong website: {website}\")\n path = '..\\\\chromedriver.exe'\n service = Service(executable_path=path)\n driver = webdriver.Chrome(service=service)\n driver.get(website)\n\n containers = driver.find_elements(by='xpath', value='//tr[@class=\"row\"]')\n all_stats_df = []\n \n for item in containers:\n tr_stat_list = []\n value_tr_container = item.find_elements(by='xpath', value='./td')\n\n for value in value_tr_container:\n value = value.text\n tr_stat_list.append(value)\n if '' not in tr_stat_list:\n all_stats_df.append(tr_stat_list)\n\n return all_stats_df\n\n\ndef get_desired_stats_dataframe(dataframe: List[list], config_json_file: str):\n \"\"\"Returns the dataframe with stats that are enabled \n on the config file\"\"\"\n enabled_stats = []\n desired_df = [[\"Stat\", \"Overall\", \"Local\", \"Away\"]]\n \n with open(config_json_file, 'r') as f:\n stats_from_config = json.load(f)\n \n for key, value in stats_from_config.items():\n if value == \"Enabled\":\n enabled_stats.append(key)\n \n for stat in dataframe:\n if stat[0] in enabled_stats:\n desired_df.append(stat)\n \n return desired_df \n","repo_name":"luiseleazar/soccer_team_stats","sub_path":"team_stats.py","file_name":"team_stats.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"36870535755","text":"import requests\nimport json\nimport re\nimport numpy as np\nfrom langdetect import detect\n\n\ndef import_courses(coursesList):\n index = 0\n if coursesList.last_valid_index() is not None:\n index = 1 + coursesList.last_valid_index()\n has_next_page = True\n page = 1\n\n while has_next_page:\n\n courses_json = requests.get(\"https://stepic.org/api/courses\", params={'page': page, 'language': 'ru', 'is_paid': False}).json()\n has_next_page = courses_json['meta']['has_next']\n page += 1\n courses_json = courses_json['courses']\n\n for i in range(0, len(courses_json)):\n try:\n link = 'https://stepic.org/course/' + str(courses_json[i]['id'])\n accessibility = requests.head(link)\n if accessibility.status_code == 404:\n continue\n name = courses_json[i]['title'].lower()\n name_split = re.search(r'\\b\\d-\\d\\b|\\d+.*класс\\w*|\\b\\d{1,2}\\s\\w\\b', name)\n stop_words = ['ЕГЭ', 'егэ', 'лет', 'детей', 'школьников', 'старшеклассников', 'ОГЭ', 'огэ']\n mask = [courses_json[i]['title'].find(j) != -1 for j in stop_words]\n if name_split is not None or np.any(mask):\n continue\n\n description = ''\n\n if courses_json[i]['summary'] is not None:\n description += courses_json[i]['summary']\n if courses_json[i]['requirements'] is not None:\n description += courses_json[i]['requirements']\n if courses_json[i]['target_audience'] is not None:\n description += courses_json[i]['target_audience']\n if courses_json[i]['description'] is not None:\n description += courses_json[i]['description']\n sections_url = 'https://stepik.org/api/sections?{}'.format(\n '&'.join('ids[]={}'.format(obj_id) for obj_id in courses_json[i]['sections']))\n sections_json = requests.get(sections_url).json()\n sections_json = sections_json['sections']\n for j in range(0, len(sections_json)):\n description += sections_json[j]['title'] + '. '\n if courses_json[i]['total_units'] != 0:\n lessons = requests.get('https://stepic.org:443/api/lessons',\n params={'language': 'ru', 'course': courses_json[i]['id']}).json()\n lessons = lessons['lessons']\n for k in range(0, len(lessons)):\n description += lessons[k]['title'] + '. '\n\n len_words = description.split()\n\n if len(len_words) < 50:\n continue\n\n language = detect(description)\n\n if language != 'ru':\n continue\n\n target_aud = courses_json[i]['target_audience'].lower()\n target_aud_split = re.search(r'\\d+.*класс\\w*|\\b\\d{1,2}\\s\\w\\b|дет\\w{1,2}', target_aud)\n mask = [courses_json[i]['target_audience'].find(j) != -1 for j in stop_words]\n if target_aud_split is not None or np.any(mask):\n continue\n\n coursesList.loc[index, 'name'] = courses_json[i]['title']\n coursesList.loc[index, 'parameters'] = str(courses_json[i]['id'])\n coursesList.loc[index, 'link'] = link\n coursesList.loc[index, 'description'] = description\n\n index += 1\n except json.decoder.JSONDecodeError:\n continue\n\n return coursesList\n","repo_name":"Galentin/RecommenderSystem","sub_path":"ImportStepik.py","file_name":"ImportStepik.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"75063639649","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom app_mgr import views\nfrom app_mgr import distillviews\n\nurlpatterns = [\n # USER AUTHENTICATION\n url(r'^register/', views.register, name='register'),\n url(r'^login/$', views.login_user, name='login'),\n url(r'^logout/$', views.logout_user, name='logout'),\n url(r'^user_profile/$', views.view_profile, name='view_profile'),\n url(r'^reset/confirm/(?P[0-9A-Za-z]+)-(?P.+)/$', views.reset_confirm, name='reset_confirm'),\n url(r'^reset/$', views.reset, name='reset'),\n url(r'^reset/sent/$', views.reset_sent, name='reset_sent'),\n\n #RESTFUL API\n url(r'^users/$', views.UserProfileListView.as_view(), name='user-list'),\n url(r'^orgs/$', views.OrganizationListView.as_view(), name='org-list'),\n url(r'^apps/$', views.ApplicationListView.as_view(), name='app-list'),\n\n url(r'^user/(?P[\\d]+)/$', views.UserProfileInstanceView.as_view(), name='user-instance'),\n url(r'^user/(?Pcurrent)/$', views.UserProfileInstanceView.as_view(), name='user-current'),\n url(r'^org/(?P[\\d]+)/$', views.OrganizationInstanceView.as_view(), name='org-instance'),\n url(r'^app/(?P[\\d]+)/$', views.ApplicationInstanceView.as_view(), name='app-instance'),\n \n #url(r'^app/(?Pcurrent+)/$', views.AliasListView.as_view(), name='alias-list'),\n\n url(r'^appresults/(?P[0-9]{1,2})/(?P\\w+)/$', distillviews.app_results, name='app-results'),\n url(r'^appresults/(?P\\w+)/(?P\\w+)/$', distillviews.app_results_byname, name='app-results'),\n \n # url(r'^appresults/(?P[\\d]+)/fields/$', views.get_app_result_fields, name='data-fields'),\n # url(r'^appresults/(?P[\\d]+)/data/$', views.get_app_results, name='data'),\n \n \n]\n","repo_name":"ep-infosec/incubator-flagon-tap","sub_path":"app_mgr/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"24811451446","text":"from typing_extensions import TypeAlias\nfrom typing import Callable, List\n\n\nInt: TypeAlias = int\n\n\ndef func(step: Int) -> None:\n print(f\"step: {step}\")\n\n\ndef create_handlers(callback: Callable) -> List[Callable]:\n handlers = []\n for step in range(5):\n # добавляем обработчики для каждого шага (от 0 до 4)\n # * lambda func definition fixed\n handlers.append(lambda s=step: callback(s))\n # handlers.append(lambda: callback(s))\n return handlers\n\n\ndef execute_handlers(handlers: List[Callable]) -> None:\n # запускаем добавленные обработчики (шаги от 0 до 4)\n for handler in handlers:\n handler()\n\n\nif __name__ == \"__main__\":\n execute_handlers(create_handlers(func))\n # before after\n # step: 4 step: 0\n # ... ...\n # step: 4 step: 4\n\n","repo_name":"demetrius404/jetlend_interview","sub_path":"q7.py","file_name":"q7.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"30193143544","text":"import sys\nimport os\nimport json\nimport emojis\nfrom yr.libyr import Yr\n\ncwd = os.path.dirname(os.path.realpath(__file__))\n\nwith open(cwd + '/emojis.json') as file:\n symbols_emojis = json.load(file)\n\nif len(sys.argv) > 1:\n city = sys.argv[1].split('/')[2]\n weather = Yr(location_name=str(sys.argv[1] + '/' + city))\nelse:\n weather = Yr(location_name='Norway/Vestland/Bergen/Bergen')\nnow = weather.now()\n\nweather_location = weather.location_name.split('/')\nprint('Weather in ' + weather_location[0] + ', ' + weather_location[1] + ', ' + weather_location[len(\n weather_location) - 1])\n\nweather_type = emojis.encode(symbols_emojis[now['symbol']['@number']])\nweather_precipitation = now['precipitation']['@value'] + 'mm'\nweather_wind = now['windSpeed']['@mps'] + 'm/s'\nweather_temp = now['temperature']['@value'] + u'\\u00B0C'\nprint(weather_type + ' · ' + weather_temp + ' · ' + weather_wind + ' · ' + weather_precipitation)\n","repo_name":"timharek/p-yr","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"29057503390","text":"import turtle\nfrom turtle import color\nimport random\n\nscreen = turtle.Screen()\nscreen.setup(width=500, height=400)\ncolours = [\"red\", \"pink\", \"blue\", \"purple\", \"black\", \"green\"]\ny_pos = [100, 60, 20, -20, -60, -100]\nuser_bet = screen.textinput(title=\"Make your bet\",\n prompt=\"Which turtle will win? Choose a colour: \")\nis_race_on = False\nall_racers = []\n\n\nclass Racer(turtle.Turtle):\n # def __init__(self, color, x, y):\n def __init__(self, color, x, y):\n super().__init__(shape=\"turtle\")\n self.color(color)\n self.penup()\n self.goto(x=x, y=y)\n\n def race(self):\n self.forward(random.randint(0, 10))\n\n\nfor i in range(0, 6):\n racer = Racer(colours[i], -230, y_pos[i])\n all_racers.append(racer)\n\nif user_bet:\n is_race_on = True\n\nwhile is_race_on:\n for racer in all_racers:\n if racer.xcor() > 230:\n is_race_on = False\n winning_colour = racer.pencolor()\n if winning_colour == user_bet:\n print(\n f\"You won! The winning turtle colour was {winning_colour}.\")\n else:\n print(\n f\"You lost! The winning turtle colour was {winning_colour}.\")\n racer.race()\n\nscreen.exitonclick()\n","repo_name":"urbanfog/python","sub_path":"100DaysofPython/Day19/turtle_race.py","file_name":"turtle_race.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"43405239857","text":"import pandas as pd\nfrom s3_joao import io\nimport os\nimport numpy as np\n\n# Faz o download dos dados bruto de qualidade da água do hidroweb\n\n\nDir = '/media/joao/HD-jao/Artigo_Temp_Ar_Agua/Dados_temp/bruto_hidroweb/'\nf_ests = '/home/joao/Dropbox/Scripts/Python/s3_joao/relacao_estacoes_HidroWeb.txt'\nest_baixadas = '/media/joao/HD-jao/Artigo_Temp_Ar_Agua/Dados_temp/est_baixadas.txt'\n\ntipo_esp = 'qualagua'\n\ndt = pd.read_csv(f_ests, sep=\"\\t\")\ndt = dt[dt['Tipo'] == 1]\n\nif os.path.isfile(est_baixadas):\n dt_executados = pd.read_csv(est_baixadas, sep='\\t', index_col=0)\nelse:\n dt_executados = pd.DataFrame()\n dt_executados['Codigo'] = np.nan\n\ncods_nao_rodados = []\nfor cod in dt['Codigo'].tolist():\n if cod not in dt_executados['Codigo'].values:\n cods_nao_rodados.append(cod)\n\nfor cod in cods_nao_rodados:\n io.download_station(cod, formato=2, dir=Dir, tipo_especifico='qualagua', save_zip=False)\n dt_executados.loc[dt_executados.shape[0]] = str(cod)\n dt_executados.to_csv(est_baixadas, sep='\\t')\n","repo_name":"joaohuf/Estima_Temp_Agua_com_Temp_Ar","sub_path":"Temp_Ar_Agua_todo_Brasil/0_pre_process/1_Download_hidroweb_temp.py","file_name":"1_Download_hidroweb_temp.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"13773630854","text":"import threading\nimport pygame\nimport socket\n\nfrom networking import receive\nfrom gui_text import Text\n\npygame.init()\n\nCOLOR_INACTIVE = pygame.Color('lightskyblue3')\nCOLOR_ACTIVE = pygame.Color('dodgerblue2')\nFONT = pygame.font.SysFont(\"Calibri Light\", 30, (255, 255, 255))\n\n\nclass InputBox:\n def __init__(self, x, y, w, h, text=\"\"):\n self.rect = pygame.Rect(x, y, w, h)\n self.color = COLOR_INACTIVE\n self.text = text\n self.txt_surface = FONT.render(text, True, (255, 255, 255))\n self.active = False\n\n def handle_event(self, event):\n if event.type == pygame.MOUSEBUTTONDOWN:\n # If the user clicked on the input_box rect.\n self.active = not self.active if self.rect.collidepoint(event.pos) else False\n\n # Change the current color of the input box.\n self.color = COLOR_ACTIVE if self.active else COLOR_INACTIVE\n\n if self.active and event.type == pygame.KEYDOWN and event.key != pygame.K_RETURN:\n if event.key == pygame.K_BACKSPACE:\n self.text = self.text[:-1]\n\n else:\n self.text += event.unicode\n\n # Re-render the text.\n self.txt_surface = FONT.render(self.text, True, self.color)\n\n def update(self):\n width = max(200, self.txt_surface.get_width() + 10)\n self.rect.w = width\n\n def draw(self, screen):\n screen.blit(self.txt_surface, (self.rect.x+5, self.rect.y+5))\n pygame.draw.rect(screen, self.color, self.rect, 2)\n\n def get_text(self):\n return self.text\n\n\nclass IPConnectionScreen:\n def __init__(self, surface, width, player_offset, client_socket, default_info_text=\"Press enter to connect\"):\n self.surface = surface\n self.client_socket = client_socket\n\n self.width = width\n self.player_offset = player_offset\n\n # Text\n self.ip_text = Text(\"Server IP:\", FONT, (255, 255, 255),\n (self.width // 2, self.player_offset))\n\n self.port_text = Text(\"Server Port:\", FONT, (255, 255, 255),\n (self.width // 2, self.player_offset + 180))\n\n self.info_text = Text(default_info_text, FONT, (255, 255, 255),\n (self.width // 2, self.player_offset + 300))\n\n # Input boxes\n self.ip_input = InputBox(self.width // 2 - 100, self.player_offset + 20, 200, 40)\n self.port_input = InputBox(self.width // 2 - 100, self.player_offset + 200, 200, 40, \"9850\")\n\n self.start_message = \"\"\n self.connected = False\n\n def get_start_message(self):\n self.start_message = receive(self.client_socket)\n\n def draw(self):\n self.surface.fill((0, 0, 0))\n\n self.ip_text.draw(self.surface)\n self.port_text.draw(self.surface)\n self.info_text.draw(self.surface)\n\n self.ip_input.draw(self.surface)\n self.port_input.draw(self.surface)\n pygame.display.update()\n\n \"\"\"\n Runs the GUI for the IP connection screen.\n \n Returns: if the connection succeeded\n \"\"\"\n def run(self):\n clock = pygame.time.Clock()\n\n while not self.start_message:\n events = pygame.event.get()\n\n for event in events:\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n\n if event.type == pygame.KEYDOWN and not self.connected and event.key == pygame.K_RETURN:\n self.connected = self.connect()\n\n if self.connected is False: # if the connection failed\n return False # the connection failed\n\n self.client_socket.settimeout(1000)\n\n t = threading.Thread(target=self.get_start_message)\n t.start()\n\n self.ip_input.handle_event(event)\n self.port_input.handle_event(event)\n\n self.draw()\n clock.tick(60)\n\n print(self.start_message)\n\n return True # the connection succeeded\n\n def connect(self): # returns: if the client successfully connected\n self.info_text.change_text(\"Connecting...\")\n self.draw()\n\n try:\n self.client_socket.connect((self.ip_input.get_text(), int(self.port_input.get_text())))\n\n except (TypeError, socket.error, ConnectionRefusedError, TimeoutError, ValueError):\n self.info_text.change_text(\"Failed\")\n self.draw()\n return False\n\n self.info_text.change_text(\"Connected\")\n self.draw()\n return True\n","repo_name":"AlexanderJCS/multiplayer-snake","sub_path":"client/ip_connection_screen.py","file_name":"ip_connection_screen.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"22698954094","text":"\n\n\"\"\"Example TK.\"\"\"\n\n\ndef workday_count(days: int) -> int:\n \"\"\"Great!\"\"\"\n a = days // 7\n b = days % 7\n c = 7 * a - 2 * a + b\n if days % 7 == 6:\n return c - 1\n return c\n\n\nprint(workday_count(14))\n\n\ndef sorta_sum(a: int, b: int) -> int:\n \"\"\"Great!\"\"\"\n c = a + b\n if c in range(10, 20):\n return 20\n else:\n return c\n\n\nprint(sorta_sum(5, 56))\n\n\ndef extra_end(s: str) -> str:\n if len(s) >= 2:\n a = (s[-2] + s[-1]) * 3\n return a\n\n\nprint(extra_end(\"car\"))\n\n\ndef last_indices_elements_sum(nums: list) -> int:\n \"\"\"Great!\"\"\"\n a = nums[-1]\n b = nums[-2]\n if a >= len(nums):\n aa = 0\n else:\n aa = nums[a]\n if b >= len(nums):\n bb = 0\n else:\n bb = nums[b]\n return aa + bb\n\n\nprint(last_indices_elements_sum([0, 1, 7, 2]))\n\n\ndef divisions(numbers: list) -> int:\n \"\"\"Great!\"\"\"\n result = 0\n for i in range(len(numbers)):\n for j in range(len(numbers)):\n if i != j:\n if numbers[i] % numbers[j] == 0:\n result += 1\n return result\n\n\nprint(divisions([3, 14, 12, 6]))\n","repo_name":"martin1712/iti0102-2022","sub_path":"TK/tk0/exam.py","file_name":"exam.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"73146138850","text":"\n# A RegEx, or Regular Expression, is a sequence of characters that forms a search pattern.\n#\n# RegEx can be used to check if a string contains the specified search pattern.\n#\n# RegEx Module\n# Python has a built-in package called re, which can be used to work with Regular Expressions.\n#\n# Import the re module:\n\n# functions\n# findall\tReturns a list containing all matches\n# search\tReturns a Match object if there is a match anywhere in the string\n# split\tReturns a list where the string has been split at each match\n# sub\tReplaces one or many matches with a string\n# match Returns a mathching pattren\n\n\n\n# RegEx Functions\n# The re module offers a set of functions that allows us to search a string for a match:\n#\n# Function\tDescription\n# findall\tReturns a list containing all matches\n# search\tReturns a Match object if there is a match anywhere in the string\n# split\tReturns a list where the string has been split at each match\n# sub\tReplaces one or many matches with a string\n# match\n\n\n# syn: modulename.function_name('pattern',string,flags = 0)\n\nimport re\n\nm1 = re.match('hello','hello python')\nprint(m1)\n# \n\nString1 = \"RegEx can be used to check if a string contains the specified search pattern.\"\nm2 = re.match('can',String1)\nprint(m2)\n# None\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Mani015/PFS-Aug21","sub_path":"Python_Notes/Day55(RE)/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"89"} +{"seq_id":"41956284811","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import fields, models, api, _\nfrom odoo.exceptions import ValidationError\n\n\nclass ProjectTemplate(models.Model):\n _name = 'project.template'\n _description = \"Project Template\"\n\n name = fields.Char(string='Project Type', track_visibility='always', required=True, index=True)\n task_ids = fields.One2many(\n 'project.template.task',\n 'tasks_id',\n string='Project Template',\n )\n \n duration = fields.Integer(string='Duration')\n\n\n @api.one\n @api.constrains('task_ids')\n def _check_task_weightage(self):\n # To check the total task weightage and raises validation error, if total is not 100.\n total = [x.stage_weightages for x in self.task_ids]\n if (total and sum(total) != 100):\n raise ValidationError('Total Stage Weightage should be 100')\n\n\nclass ProjectTemplateTask(models.Model):\n _name = 'project.template.task'\n _description = \"Project Template Task\"\n _order = \"sequence\"\n\n tasks_id = fields.Many2one(\n 'project.template',\n string='Parent Task',\n readonly=True\n )\n stage_number = fields.Integer(\n # related='stage_id.sequence',\n string='Stage Number')\n sequence = fields.Integer(string='sequence', default=10)\n stage_weightages = fields.Float(string=\"Stage Weightage (%)\")\n # stage_id = fields.Many2one('project.task.type', string=\"Stage Name\", required=True)\n stage = fields.Char(string=\"Stage Name\", required=True)\n task_template_id = fields.Many2many('task.template', 'project_template_task_id', 'task_temp_id', string=\"Task Name\")\n\n def default_get(self, context=None):\n res = {}\n if self.env.context:\n context_keys = self.env.context.keys()\n next_sequence = 1\n if 'task_ids' in context_keys:\n if len(self.env.context.get('task_ids')) > 0:\n next_sequence = len(self.env.context.get('task_ids')) + 1\n res.update({'stage_number': next_sequence})\n return res\n\n # for parser sequence to the Stages from project templates'task\n @api.model\n def create(self, vals):\n stages = self.env['project.task.type'].search([('id', '=', vals.get('stage_id'))])\n stages.write({'sequence': vals.get('stage_number')})\n result = super(ProjectTemplateTask, self).create(vals)\n return result\n\n # for parser sequence to the Stages from project templates'task\n @api.multi\n def write(self, vals):\n stages = self.env['project.task.type'].search([('id', '=', vals.get('stage_id'))])\n stages.write({'sequence': vals.get('stage_number')})\n return super(ProjectTemplateTask, self).write(vals)","repo_name":"yosaraharjo88/IEM","sub_path":"project_template_job_order/models/project_template.py","file_name":"project_template.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"26680872412","text":"#!/usr/bin/python3\n\n# This scripts converts a folder of pcaps (e.g., cluster-150-3/pcaps/) to a bunch of netflows (cluster-150-3/netflows/) using nfpcapd/nfdump\n# WARNING: sometimes nfpcapd hangs, I don't know why. Keep an eye on the terminal, and kill using `pkill -f -9 nfpcapd` if stuck for too long.\n\nimport sys\nimport os\nimport glob\nimport subprocess\nimport multiprocessing as mp\nimport numpy as np\nfrom pathlib import Path\nimport json\nimport socket\nfrom scapy.all import *\nfrom scapy.layers.tls.all import *\nimport subprocess\nimport shlex\n\nSAMPLE_RATES = [1,10,100,1000] # 100%, 10%, 1%, 0.1%\n\nif True:\n sys.path.append(\"../lib\")\n import utils\n\ndef run_and_wait(cmd, stdout=None):\n parts = shlex.split(cmd)\n p = None\n if stdout is None:\n try:\n p = subprocess.run(parts, timeout=3)\n except:\n pass\n print(\"Ran\", cmd)\n else:\n with open(stdout, \"w\") as outfile:\n subprocess.run(parts, stdout=outfile, timeout=3)\n print(\"Ran\", cmd, \"redirected to\", stdout)\n try:\n os.waitpid(p.pid, 0)\n except:\n pass\n\ndef pcap_to_netflow(pcap_in, destination_folder, sample_rate=1):\n\n # if needs be, sample the pcap\n if sample_rate > 1:\n sampled_pcap = pcap_in.replace(\".pcap\", f\"_{sample_rate}.pcap\")\n if not os.path.isfile(sampled_pcap):\n print(f\"Sampling {pcap_in} -> {sampled_pcap}\")\n pkts = rdpcap(pcap_in)\n sampled_pkts = pkts[::sample_rate]\n wrpcap(sampled_pcap, sampled_pkts)\n else:\n print(f\"Skipping {sampled_pcap}\")\n pcap_in = sampled_pcap\n\n # output to netflows_samplerate if samplerate>1\n destination_folder = destination_folder.replace(\"/netflows/\", f\"/netflows_{sample_rate}/\")\n\n Path(destination_folder).mkdir(parents=True, exist_ok=True)\n print(f\"Processing {pcap_in} -> {destination_folder}\")\n\n run_and_wait(f\"rm -f {destination_folder}nfcapd*\")\n run_and_wait(f\"nfpcapd -r {pcap_in} -l {destination_folder}\")\n fnames = glob(f\"{destination_folder}nfcapd*\")\n fname = None\n \n # if nfpcapd only generated one file, pick it. Otherwise, pick the largest\n if len(fnames) == 0:\n print(\"No files in\", f\"{destination_folder}\")\n sys.exit(0)\n elif len(fnames)==1:\n fname=fnames[0]\n else:\n sizes = [(f, os.path.getsize(f)) for f in fnames]\n sizes.sort(key=lambda row: row[1], reverse=True)\n fname = sizes[0][0]\n\n run_and_wait(f\"nfdump -r {fname} -O tstart -q -o json\", f\"{destination_folder}nfdump.json\")\n run_and_wait(f\"rm -f {destination_folder}nfcapd*\")\n run_and_wait(f\"sed -i '1s/^/[/' {destination_folder}nfdump.json\") #somehow the json file lacks the initial [, insert it\n\ndef process_pcap(pcap_in):\n destination_folder = pcap_in.replace('/pcaps/', '/netflows/').replace('capture.pcap', '')\n\n for sample_rate in SAMPLE_RATES:\n pcap_to_netflow(pcap_in, destination_folder, sample_rate)\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"Usage: script DATASET_PATH/pcaps/\")\n sys.exit(1)\n\n dataset_path = sys.argv[1].strip()\n if not dataset_path[-1] == '/':\n dataset_path += '/'\n\n if not dataset_path.endswith('/pcaps/'):\n print(\"DATASET_PATH should end with /pcaps/\")\n sys.exit(1)\n\n pcaps = glob(dataset_path + '**/capture.pcap', recursive=True)\n pcaps.sort()\n for p in pcaps:\n process_pcap(p)\n","repo_name":"spring-epfl/quic-wf-defenses","sub_path":"code/process-capture/parse_pcaps_into_netflows.py","file_name":"parse_pcaps_into_netflows.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"36514756352","text":"\nimport requests, csv\n\ndef mdata():\n response = requests.get(\"http://api.nbp.pl/api/exchangerates/tables/C?format=json\")\n data = response.json()\n return data\n\n\n\nif __name__ == '__main__':\n \n \n data = mdata()\n\n rates_len = len(data[0]['rates'])\n rates = data[0]['rates']\n \n tradingdate = data[0]['tradingDate']\n print(tradingdate)\n\n rates_header = list([rates[0].keys()][0])\n rates_data = []\n\n \n for i in range(rates_len):\n rates_data.append([rates[i]['currency'],rates[i]['code'],rates[i]['bid'],rates[i]['ask']])\n\n print(rates_header)\n print(rates_data)\n\n filename = f'rates{tradingdate}.csv'\n with open(filename, 'w', encoding=\"utf-8\", newline='') as file:\n csvwriter = csv.writer(file, delimiter=';')\n csvwriter.writerow(rates_header)\n csvwriter.writerows(rates_data)\n\n\n \n\n\n","repo_name":"m1122b/api_nbp","sub_path":"CSVfile.py","file_name":"CSVfile.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"7699428103","text":"from igraph import *\r\nimport itertools\r\ndef mineSearch(r, c, m):\r\n if m==r*c-1:\r\n return (r,c,range(m),m)\r\n else:\r\n g=Graph.Lattice([r, c], circular=False)\r\n additionalEdges=[(v, v+r+1) for v in [cPos*r+rPos for cPos in range(0, c-1) for rPos in range(0, r-1)]]+[(v, v+r-1) for v in [cPos*r+rPos for cPos in range(0, c-1) for rPos in range(1,r)]]\r\n g.add_edges(additionalEdges)\r\n answer=(r, c, None, None)\r\n for mines in itertools.combinations(range(r*c), m): \r\n minesAndNaturals=set([item for sublist in g.neighborhood(vertices=mines) for item in sublist])\r\n justNaturals=minesAndNaturals-set(mines)\r\n zeros=list(set(range(r*c))-set(minesAndNaturals))\r\n zerosAndZeroNeighbours=set([item for sublist in g.neighborhood(vertices=zeros) for item in sublist])\r\n justZeroNeighbours=zerosAndZeroNeighbours-set(zeros)\r\n zeroGraph=g.induced_subgraph(zeros)\r\n if zeroGraph.is_connected() and zeroGraph.vcount()>0 and justZeroNeighbours==justNaturals:\r\n for zero in zeros:\r\n if set(g.neighbors(zero)) <= zerosAndZeroNeighbours:\r\n source=zero\r\n break\r\n answer=(r, c, mines, source)\r\n break\r\n return answer\r\n\r\ndef prettyPrint(answer):\r\n (r, c, mines, source)=answer\r\n if mines==None:\r\n return 'Impossible'\r\n else:\r\n output=[['.']*c for _ in range(r)]\r\n for v in mines:\r\n output[v%r][v/r]='*'\r\n output[source%r][source/r]='c'\r\n output='\\n'.join([''.join(x) for x in output])\r\n return output\r\n\r\ndef minesweeperMaster(inputFileName):\r\n data = [i.strip() for i in open(inputFileName).readlines()]\r\n data = [x.split() for x in data]\r\n caseCount=int(data[0][0])\r\n # change based on how many lines there are in each case\r\n caseLength=1\r\n\r\n def extractCases(data, caseCount, caseLength):\r\n return [data[1+caseID*caseLength:1+(caseID+1)*caseLength] for caseID in range(caseCount)]\r\n \r\n cases=extractCases(data, caseCount, caseLength)\r\n \r\n def caseText(caseID, case):\r\n (r, c, m)=map(int, case[0])\r\n \r\n solution='\\n'+prettyPrint(mineSearch(r, c, m))\r\n\r\n #monitoring progress:\r\n print(caseID)\r\n \r\n #boilerplate for pretty printing\r\n caseText='Case #'+str(caseID)+': '\r\n return caseText+str(solution)+'\\n'\r\n\r\n output=[]\r\n for (caseID, case) in enumerate(cases):\r\n output.append(caseText(caseID+1, case))\r\n\r\n outputFileName=inputFileName.replace('.in','-output.txt')\r\n\r\n f=open(outputFileName, 'wb')\r\n f.write('')\r\n f.writelines(output)\r\n f.close()\r\n\r\n67, 73, 87, 142\r\n","repo_name":"alexandraback/datacollection","sub_path":"solutions_5690574640250880_0/Python/vbat/BFS2.py","file_name":"BFS2.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"73923838049","text":"#!/usr/bin/env python3\n# All units are in cL\nimport json\n\nimport texttable as tt\n\nfrom solera_log import SoleraLog\n\n\ndef main() -> None:\n\n log = SoleraLog()\n with open('entries.json') as json_file:\n entries = json.load(json_file)\n for entry in entries:\n log.add(**entry)\n\n tab = tt.Texttable()\n tab.header(['Name', 'Amount (cL)', 'Percentage'])\n\n total = log.total()\n summary = log.summary()\n for entry in summary:\n entry['percent'] = 100 * (entry.get('amount') / total)\n tab.add_row((\n entry.get('desc'),\n entry.get('amount'),\n entry.get('percent')\n ))\n\n print(tab.draw())\n\nif __name__ == '__main__':\n main()\n","repo_name":"AubreySLavigne/solera-calc","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"38871987295","text":"import os\nimport pickle\n\na = 123\nb = 234\nc = 345\nd = 456\nwith open(\"ceshi.pk\", \"wb\") as f:\n pickle.dump(a, f)\n pickle.dump(b, f)\n pickle.dump(c, f)\n pickle.dump(d, f)\n\n# e = 5\n# g = 6\n# with open(\"ceshi.pk\", \"rb+\") as f:\n# print(pickle.load(f))\n# pickle.dump(e, f)\n# print(pickle.load(f))\n# print(pickle.load(f))\n# pickle.dump(g, f)\n\ne = 567\ng = 678\nsize = os.path.getsize(\"ceshi.pk\")\nprint(size)\nmark = []\nwith open(\"ceshi.txt\", \"rb+\") as f:\n while 0 < size:\n size = size - 1\n f.seek(size) # 指向文件最后一个字节\n try:\n print(pickle.load(f))\n print(f.tell())\n except Exception: # 无法load时退一个字节,继续尝试load\n continue\n else: # load成功时,\n mark.append(size)\n print(mark)\n # f.seek(16)\n # print(pickle.load(f))\n # pickle.dump(e, f)\n# 结论:(1)每个load有两个位置可以成功读取,其他位置不可读\n# (2)每次load完,自动跳到下一个load对象的开始位置\n# (3)在指定位置写入,会覆盖当前位置的load对象,并且,如果新load的对象的原load对象长度不匹配时,会造成之后的所有load对象无法读取\n\n\nprint(\"\")\n\nwith open(\"ceshi.txt\", \"rb\") as f:\n while True:\n try:\n a = pickle.load(f)\n except:\n break\n a = pickle.load(f)\nprint(a)\n","repo_name":"skymoonfp/python_learning","sub_path":"python_project/test/pickle_test.py","file_name":"pickle_test.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"7533756383","text":"debug = True\r\nimport itertools\r\n\r\ndef change(pattern, value):\r\n for i,v in enumerate(pattern):\r\n if v == 1:\r\n if value[i-1] == 1:\r\n value[i-1] = 0\r\n else:\r\n value[i-1] = 1\r\n return(value)\r\n\r\n \r\nwith open(\"A-small-attempt1.in\",\"r\") as f:\r\n with open(\"out.txt\",\"w\") as o:\r\n count = 0\r\n case = int(f.readline())\r\n while case-count>0:\r\n count += 1\r\n result = 'NOT POSSIBLE'\r\n \r\n n,l = map(int,f.readline().split())\r\n print(n,l)\r\n current = list(map(tuple,map(list, f.readline().split())))\r\n need = list(map(tuple,map(list, f.readline().split())))\r\n print(current)\r\n print(need)\r\n\r\n switch_count = 99999\r\n\r\n for switch_pattern in itertools.product([0,1], repeat=l):\r\n new_current = []\r\n for value in current:\r\n data = ()\r\n for i,v in enumerate(switch_pattern):\r\n if v == 1:\r\n if value[i] == '1':\r\n data += ('0',)\r\n else:\r\n data += ('1',)\r\n else:\r\n data += (value[i],)\r\n new_current.append(data)\r\n\r\n if len(set(new_current) & set(need)) == n:\r\n move = switch_pattern.count(1)\r\n if move < switch_count:\r\n switch_count = move\r\n \r\n if switch_count < 99999:\r\n result = switch_count\r\n \r\n ########################################\r\n # Output\r\n case_result = \"Case #%s: %s\" % (count,result)\r\n if debug:\r\n print(case_result)\r\n o.write(case_result+'\\n')\r\n \r\n \r\n","repo_name":"alexandraback/datacollection","sub_path":"solutions_5634947029139456_0/Python/TheGU/codejam-magic.py","file_name":"codejam-magic.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"2147389154","text":"from functools import reduce\n\nstart = '69415089'\n#start = raw_input('input factory code:\\n')\nfor i in range(10000):\n midle = ''\n if i < 1000:\n midle += '0'\n if i < 100:\n midle += '0'\n if i < 10:\n midle += '0'\n midle += str(i)\n c1 = reduce(lambda x, y: x+y,\n [int(start[j]) for j in range(len(start)) if j%2==0])\n c1 += int(midle[0]) + int(midle[2])\n c2 = reduce(lambda x, y: x+y,\n [int(start[j]) for j in range(len(start)) if j%2==1])\n c2 += int(midle[1]) + int(midle[3])\n c = 10 - (c1 + 3*c2) % 10\n if c == 10:\n c = 0\n upc = '{}{}{}'.format(start, midle, str(c))\n print(upc)\n","repo_name":"yanheven/learn-python","sub_path":"generate_upc.py","file_name":"generate_upc.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"7176726046","text":"import networkx as nx\nimport argparse\nimport matplotlib.pyplot as plt\nimport matplotlib.cbook as cbook\n\n\ndescription = \"\"\"Plot degree distribution\"\"\"\n\n################################\n# parse command line arguments #\n################################\nparser = argparse.ArgumentParser(\n description=description)\n\nparser.add_argument('gpickle',\n type=argparse.FileType('r'),\n help='a pickled networkx graph')\n\nargs = parser.parse_args()\n\ng = nx.gpickle.read_gpickle(args.gpickle)\n\nP = {}\nfor n in g.nodes():\n if float(g.degree(n))/len(g.nodes()) in P:\n P[float(g.degree(n))/len(g.nodes())] += 1\n else:\n P[float(g.degree(n))/len(g.nodes())] = 1\n\n\nfig, ax = plt.subplots()\nax.scatter([P[k] for k in P],\n P.keys(),\n alpha=0.5)\n\nfrom pprint import pprint\n\npprint(P)\nax.set_xlabel(r'$k$', fontsize=15)\nax.set_ylabel(r'$p(k)$', fontsize=15)\n\nax.set_title('Degree vs count')\n\nax.grid(True)\nfig.tight_layout()\n\nplt.show()\n","repo_name":"rgarcia-herrera/furry-spoon","sub_path":"degree_dist.py","file_name":"degree_dist.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"32982780981","text":"import numpy as np\nfrom tensorflow.contrib import learn\n\nx_text = ['This is a cat','This must be boy', 'This is a a dog']\nmax_document_length = max([len(x.split(\" \")) for x in x_text])\n\n## Create the vocabularyprocessor object, setting the max lengh of the documents.\nvocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)\n\n## Transform the documents using the vocabulary.\nx = np.array(list(vocab_processor.fit_transform(x_text)))\n\n## Extract word:id mapping from the object.\nvocab_dict = vocab_processor.vocabulary_._mapping\n\n## Sort the vocabulary dictionary on the basis of values(id).\n## Both statements perform same task.\n#sorted_vocab = sorted(vocab_dict.items(), key=operator.itemgetter(1))\nsorted_vocab = sorted(vocab_dict.items(), key = lambda x : x[1])\n\n## Treat the id's as index into list and create a list of words in the ascending order of id's\n## word with id i goes at index i of the list.\nvocabulary = list(list(zip(*sorted_vocab))[0])\n\nr_label=[]\nlabel=[1,2,3,4,5]\n\nfor l_num in label:\n lst = [0 for _ in range(5)]\n lst[-l_num] = 1\n r_label.append(lst) # lst 범위 max_num까지 다 1로 바꾸고 r_label에 저장\nr_label = np.array(r_label)\ny = r_label\n\nprint(vocabulary)\nprint(x)\n\nnp.random.seed(10)\nshuffle_indices = np.random.permutation(np.arange(len(y)))\nx_shuffled = x[shuffle_indices]\ny_shuffled = y[shuffle_indices]\n\nprint (\"x : \",x_shuffled)\nprint (\"y : \",y_shuffled)\n","repo_name":"annie522/kitri","sub_path":"PROJ_COD/Machine/lee/test_tensor.py","file_name":"test_tensor.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"17213619163","text":"\"\"\"Self-service admin mockup.\"\"\"\n\nfrom dataclasses import asdict\nfrom dataclasses import dataclass\n\nimport fastapi\nfrom fastapi import APIRouter\nfrom fastapi import Form\nfrom fastapi import Request\nfrom fastapi.responses import HTMLResponse\nfrom fastapi.responses import RedirectResponse\nfrom starlette.responses import Response\n\nfrom consent_api import forms\nfrom consent_api.jinja import templates\n\nrouter = APIRouter(include_in_schema=False)\nget = router.get\npost = router.post\nurl_for = router.url_path_for\n\n\n# @docs.ignore()\n@get(\"/\")\nasync def home(request: Request) -> Response:\n \"\"\"Display a welcome page and self-service sign up CTA.\"\"\"\n return templates.TemplateResponse(\n \"selfservice/home.html\",\n {\n \"num_orgs\": 1234,\n \"num_services\": 2468,\n \"request\": request,\n },\n )\n\n\n# @docs.ignore()\n@get(\"/create-account\")\ndef create_account(request: Request) -> Response:\n \"\"\"Show a signup form.\"\"\"\n return templates.TemplateResponse(\n \"selfservice/create_account.html\",\n {\n \"form\": forms.SignUpForm(),\n \"request\": request,\n },\n )\n\n\n@dataclass\nclass UserDetails:\n \"\"\"User details form data.\"\"\"\n\n name: str = Form(...)\n email: str = Form(...)\n phone: str = Form(...)\n password: str = Form(...)\n\n\n# @docs.ignore()\n@post(\"/create-account\")\ndef post_create_account_form(\n request: Request,\n user_details: UserDetails = fastapi.Depends(),\n) -> Response:\n \"\"\"Handle create account form submission.\"\"\"\n request.session[\"user_details\"] = asdict(user_details)\n return RedirectResponse(url_for(\"signup_continue\"), status_code=302)\n\n\n# @docs.ignore()\n@get(\"/continue-signup\")\ndef signup_continue(request: Request) -> Response:\n \"\"\"Let the user know their signup confirmation email is sent.\"\"\"\n return templates.TemplateResponse(\n \"selfservice/signup_continue.html\",\n {\n \"request\": request,\n \"session\": request.session,\n },\n )\n\n\n# @docs.ignore()\n@get(\"/login\")\ndef login(request: Request) -> Response:\n \"\"\"Show a login form.\"\"\"\n return templates.TemplateResponse(\n \"selfservice/login.html\",\n {\n \"form\": forms.SignInForm(),\n \"request\": request,\n },\n )\n\n\n# @docs.ignore()\n@post(\"/login\")\ndef post_login_form() -> Response:\n \"\"\"Handle login form submission.\"\"\"\n return RedirectResponse(url_for(\"dashboard\"), status_code=302)\n\n\n# @docs.ignore()\n@get(\"/forgot_password\")\ndef forgot_password() -> Response:\n \"\"\"Show a password reset request form.\"\"\"\n return HTMLResponse(\"Password reset coming soon\")\n\n\nservices = {\n \"haas\": {\n \"name\": \"Hexagrams as a Service\",\n \"domain\": \"haas-j4f7bdslta-nw.a.run.app\",\n },\n \"juggling_licence\": {\n \"name\": \"Apply for a Juggling Licence\",\n \"domain\": \"apply-juggling-licence-j4f7bdslta-nw.a.run.app\",\n },\n}\n\n\n# @docs.ignore()\n@get(\"/dashboard\")\ndef dashboard(request: Request) -> Response:\n \"\"\"Show dashboard.\"\"\"\n return templates.TemplateResponse(\n \"selfservice/dashboard.html\",\n {\n \"services\": services,\n \"request\": request,\n },\n )\n\n\n# @docs.ignore()\n@get(\"/services/{service_id}\")\ndef service(request: Request, service_id: str) -> Response:\n \"\"\"Show service details.\"\"\"\n return templates.TemplateResponse(\n \"selfservice/service.html\",\n {\n \"request\": request,\n \"service\": services[service_id],\n },\n )\n\n\n# @docs.ignore()\n@get(\"/add-service\")\ndef add_service(request: Request) -> Response:\n \"\"\"Add a service.\"\"\"\n return templates.TemplateResponse(\n \"selfservice/add_service.html\",\n {\n \"form\": forms.ServiceForm(),\n \"request\": request,\n },\n )\n\n\n# @docs.ignore()\n@post(\"/add-service\")\ndef post_add_service_form() -> Response:\n \"\"\"Handle add service form submission.\"\"\"\n return RedirectResponse(url_for(\"service\", service_id=1), status_code=302)\n\n\n# @docs.ignore()\n@get(\"/clients\")\ndef list_clients() -> HTMLResponse:\n \"\"\"Show a list of existing client services and organisations.\"\"\"\n return HTMLResponse(\"Client list coming soon\")\n\n\n# @docs.ignore()\n@get(\"/contact\")\ndef contact_us() -> HTMLResponse:\n \"\"\"Show a contact form.\"\"\"\n return HTMLResponse(\"Contact form coming soon\")\n\n\n@get(\"/test-cors\")\ndef test_cors(request: Request) -> Response:\n \"\"\"Test CORS policy is working.\"\"\"\n return templates.TemplateResponse(\n \"selfservice/test_cors.html\",\n {\"request\": request},\n )\n","repo_name":"alphagov/consent-api","sub_path":"consent_api/routers/self_service.py","file_name":"self_service.py","file_ext":"py","file_size_in_byte":4586,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"89"} +{"seq_id":"21694841574","text":"import pygame\nimport sys\nimport time\nimport random\nimport numpy as np\nfrom math import inf\n\n\nclass SnakeGame:\n def __init__(self):\n # Difficulty settings\n # Easy -> 10\n # Medium -> 25\n # Hard -> 40\n # Harder -> 60\n # Impossible-> 120\n self.difficulty = 25\n self.frame_size_x = 720\n self.frame_size_y = 480\n self.games = 0\n self.bestScore = 0\n check_errors = pygame.init()\n if check_errors[1] > 0:\n print(\n f'[!] Had {check_errors[1]} errors when initialising game, exiting...')\n sys.exit(-1)\n else:\n print('[+] Game successfully initialised')\n pygame.display.set_caption('SnAIke')\n self.game_window = pygame.display.set_mode(\n (self.frame_size_x, self.frame_size_y))\n\n # Colors (R, G, B)\n self.black = pygame.Color(43, 54, 193)\n self.white = pygame.Color(255, 255, 255)\n self.red = pygame.Color(255, 0, 0)\n self.green = pygame.Color(94, 0, 9)\n self.blue = pygame.Color(0, 0, 255)\n\n # FPS (frames per second) controller\n self.fps_controller = pygame.time.Clock()\n\n # Game variables\n self.snake_pos = [360, 240]\n self.snake_body = [[360, 240], [360-10, 240], [360-(2*10), 240]]\n\n self.food_pos = [random.randrange(1, (self.frame_size_x//10))\n * 10, random.randrange(1, (self.frame_size_y//10)) * 10]\n self.food_spawn = True\n\n self.direction = 'RIGHT'\n self.change_to = self.direction\n\n self.score = 0\n self.reset_next = False\n\n self.dist = inf\n\n self.steps = 0\n self.maxsteps = 200\n self.bonus_steps = 100\n self.timed_out = False\n\n\n def reset(self):\n self.games += 1\n if(self.score > self.bestScore):\n self.bestScore = self.score\n self.difficulty = 25\n self.frame_size_x = 720\n self.frame_size_y = 480\n\n # Colors (R, G, B)\n self.black = pygame.Color(43, 54, 193)\n self.white = pygame.Color(255, 255, 255)\n self.red = pygame.Color(255, 0, 0)\n self.green = pygame.Color(94, 0, 9)\n self.blue = pygame.Color(0, 0, 255)\n\n # Game variables\n self.snake_pos = [360, 240]\n self.snake_body = [[360, 240], [360-10, 240], [360-(2*10), 240]]\n\n self.food_pos = [random.randrange(1, (self.frame_size_x//10))\n * 10, random.randrange(1, (self.frame_size_y//10)) * 10]\n\n self.food_spawn = True\n\n self.direction = 'RIGHT'\n self.change_to = self.direction\n\n self.score = 0\n self.reset_next = False\n\n self.dist = inf\n\n self.steps = 0\n self.timed_out = False\n\n def getScore(self):\n return self.score\n # Score\n\n def show_score(self, choice, color, font, size):\n score_font = pygame.font.SysFont(font, size)\n score_surface = score_font.render(\n 'Score: ' + str(self.score) + ' game ' + str(self.games) + ' best ' + str(self.bestScore), True, color)\n score_rect = score_surface.get_rect()\n if choice == 1:\n score_rect.midtop = (self.frame_size_x/10, 15)\n else:\n score_rect.midtop = (self.frame_size_x/2, self.frame_size_y/1.25)\n self.game_window.blit(score_surface, score_rect)\n\n def show(self):\n # GFX\n self.game_window.fill(self.black)\n for pos in self.snake_body:\n # Snake body\n # .draw.rect(play_surface, color, xy-coordinate)\n # xy-coordinate -> .Rect(x, y, size_x, size_y)\n pygame.draw.rect(self.game_window, self.green,\n pygame.Rect(pos[0], pos[1], 10, 10))\n\n # Snake food\n pygame.draw.rect(self.game_window, self.white, pygame.Rect(\n self.food_pos[0], self.food_pos[1], 10, 10))\n\n self.show_score(1, self.white, 'consolas', 20)\n # Refresh game screen\n pygame.display.update()\n # Refresh rate\n self.fps_controller.tick(self.difficulty)\n\n def takeAction(self, direction):\n self.change_to = direction\n return direction\n\n def update(self):\n # if(self.reset_next):\n # self.reset()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n # Whenever a key is pressed down\n elif event.type == pygame.KEYDOWN:\n # W -> Up; S -> Down; A -> Left; D -> Right\n if event.key == pygame.K_UP or event.key == ord('w'):\n self.change_to = 'UP'\n if event.key == pygame.K_DOWN or event.key == ord('s'):\n self.change_to = 'DOWN'\n if event.key == pygame.K_LEFT or event.key == ord('a'):\n self.change_to = 'LEFT'\n if event.key == pygame.K_RIGHT or event.key == ord('d'):\n self.change_to = 'RIGHT'\n # Esc -> Create event to quit the game\n if event.key == pygame.K_ESCAPE:\n pygame.event.post(pygame.event.Event(pygame.QUIT))\n\n # Making sure the snake cannot move in the opposite self.direction instantaneously\n if self.change_to == 'UP' and self.direction != 'DOWN':\n self.direction = 'UP'\n if self.change_to == 'DOWN' and self.direction != 'UP':\n self.direction = 'DOWN'\n if self.change_to == 'LEFT' and self.direction != 'RIGHT':\n self.direction = 'LEFT'\n if self.change_to == 'RIGHT' and self.direction != 'LEFT':\n self.direction = 'RIGHT'\n\n # Moving the snake\n if self.direction == 'UP':\n self.snake_pos[1] -= 10\n if self.direction == 'DOWN':\n self.snake_pos[1] += 10\n if self.direction == 'LEFT':\n self.snake_pos[0] -= 10\n if self.direction == 'RIGHT':\n self.snake_pos[0] += 10\n\n # Snake body growing mechanism\n self.snake_body.insert(0, list(self.snake_pos))\n if self.snake_pos[0] == self.food_pos[0] and self.snake_pos[1] == self.food_pos[1]:\n self.score += 500\n self.food_spawn = False\n self.steps -= self.bonus_steps\n else:\n self.snake_body.pop()\n # self.score -= 1\n\n # Game Over conditions\n # Getting out of bounds\n if self.snake_pos[0] < 0 or self.snake_pos[0] > self.frame_size_x-10:\n self.reset_next = True\n \n if self.snake_pos[1] < 0 or self.snake_pos[1] > self.frame_size_y-10:\n self.reset_next = True\n\n # Touching the snake body\n for block in self.snake_body[1:]:\n if self.snake_pos[0] == block[0] and self.snake_pos[1] == block[1]:\n self.reset_next = True\n\n #ran out of time\n if self.steps >= self.maxsteps:\n self.reset_next = True\n self.timed_out = True\n self.steps += 1\n\n # Spawning food on the screen\n if not self.food_spawn:\n self.food_pos = [random.randrange(1, (self.frame_size_x//10))\n * 10, random.randrange(1, (self.frame_size_y//10)) * 10]\n\n self.food_spawn = True\n\n def getStates(self):\n feature_arr = np.zeros(11)\n feature_arr[0] = 1 if self.snake_pos[0] >= self.food_pos[0] else 0\n feature_arr[1] = 1 if self.snake_pos[1] >= self.food_pos[1] else 0\n # Check if obstacle directly above\n if self.snake_pos[1] - 10 < 0:\n feature_arr[2] = 1\n else:\n for i in range(len(self.snake_body)):\n if(self.snake_body[i][0] == self.snake_pos[0] and self.snake_body[i][1] == self.snake_pos[1] - 10):\n feature_arr[2] = 1\n # Check if obstacle directly below\n if self.snake_pos[1] + 10 > self.frame_size_y:\n feature_arr[3] = 1\n else:\n for i in range(len(self.snake_body)):\n if(self.snake_body[i][0] == self.snake_pos[0] and self.snake_body[i][1] == self.snake_pos[1] + 10):\n feature_arr[3] = 1\n # Check if obstacle directly to the left\n if self.snake_pos[0] - 10 < 0:\n feature_arr[4] = 1\n else:\n for i in range(len(self.snake_body)):\n if(self.snake_body[i][1] == self.snake_pos[1] and self.snake_body[i][0] == self.snake_pos[0] - 10):\n feature_arr[4] = 1\n # Check if obstacle directly to the right\n if self.snake_pos[0] + 10 > self.frame_size_x:\n feature_arr[5] = 1\n else:\n for i in range(len(self.snake_body)):\n if(self.snake_body[i][1] == self.snake_pos[1] and self.snake_body[i][0] == self.snake_pos[0] + 10):\n feature_arr[5] = 1\n\n feature_arr[6] = 1 if self.direction == \"UP\" else 0\n feature_arr[7] = 1 if self.direction == \"RIGHT\" else 0\n feature_arr[8] = 1 if self.direction == \"DOWN\" else 0\n feature_arr[9] = 1 if self.direction == \"LEFT\" else 0\n\n feature_arr[10] = abs(self.food_pos[0]//10-self.snake_pos[0]//10) + \\\n abs(self.food_pos[1]//10-self.snake_pos[1]//10)\n\n return feature_arr\n\n\nif __name__ == '__main__':\n game = SnakeGame()\n while True:\n game.update()\n game.show()\n","repo_name":"lws4701/snakeAI","sub_path":"SnakeGame.py","file_name":"SnakeGame.py","file_ext":"py","file_size_in_byte":9464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"3441093373","text":"\"\"\"\ndweezil.authorizer\n~~~~~~~~~~~~~~~~~~~~~\nDweezil Authorizer Lambda\n\"\"\"\n\nimport json\nimport logging\n\nimport credstash\nfrom awacs.aws import Policy, Allow, Statement\nfrom awacs.execute_api import Invoke\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndef lambda_handler(event=None, _=None):\n if not event:\n unauthorized('event is null')\n\n bearer_token = event.get('authorizationToken', '')\n token = bearer_token[7:] # strip 'Bearer '\n method_arn = event.get('methodArn', '')\n method_arn_parts = method_arn.split(':')\n if len(method_arn_parts) < 6:\n unauthorized(f\"methodArn not valid: '{method_arn}'\")\n region = method_arn_parts[3]\n acct_id = method_arn_parts[4]\n apig_parts = method_arn_parts[5].split('/')\n if len(apig_parts) < 2:\n unauthorized(f\"methodArn not valid, apig invalid: '{method_arn}'\")\n apig_id = apig_parts[0]\n stage = apig_parts[1]\n credstash_entry_name = 'dev' if stage.startswith('dev_') else stage\n try:\n secret = get_credstash_token(credstash_entry_name)\n except Exception:\n logger.error(f\"Credstash token not found for arn '${method_arn}'\")\n raise Exception('Unauthorized')\n\n if secret == token:\n logger.info('Authorization successful.')\n response = build_response(\n principal_id='dweezil-webhook',\n region=region,\n acct_id=acct_id,\n apig_id=apig_id,\n stage=stage\n )\n return response\n else:\n logger.error('Authorization has failed.')\n\n unauthorized('unknown')\n\n\ndef unauthorized(msg):\n logger.error(msg)\n raise Exception('Unauthorized')\n\n\ndef get_credstash_token(stage):\n credstash_key = 'dweezil-webhook-token-' + stage\n logger.info('Stage: ' + stage)\n logger.info('Checking token with credstash key: ' + credstash_key)\n return credstash.getSecret(credstash_key)\n\n\ndef build_response(*, principal_id, region, acct_id, apig_id, stage):\n resource_arn = \\\n f'arn:aws:execute-api:{region}:{acct_id}:{apig_id}/{stage}/*/responses'\n policy = Policy(Version=\"2012-10-17\",\n Statement=[Statement(\n Effect=Allow,\n Action=[Invoke],\n Resource=[resource_arn]\n )]\n )\n response = {'principalId': principal_id,\n 'policyDocument': json.loads(policy.to_json())}\n return response\n","repo_name":"philvarner/zappa-example-project","sub_path":"dweezil/authorizer.py","file_name":"authorizer.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"89"} +{"seq_id":"26450296287","text":"import collections\r\n\r\n#kahn's algorithm\r\n\r\nv,e = map(int ,input().split())\r\n\r\ngraph = [[] for _ in range(v)]\r\n\r\nfor _ in range(e):\r\n src, dest = map(int, input().split())\r\n graph[src].append(dest)\r\n\r\n\r\n#implementing kahn's algorithm here...\r\n#lets find in degree\r\nin_degress = [0] * v\r\n\r\nfor i in range(v):\r\n for n in graph[i]:\r\n in_degress[n] += 1\r\n\r\n\r\n#here put node in queque whose value is 0 in in_degress array\r\nq = collections.deque([])\r\n\r\nfor i, val in enumerate(in_degress):\r\n if val == 0:\r\n q.append(i)\r\n\r\nres = [] #will be storing result here...\r\n#lets implement bfs here\r\n\r\nwhile q:\r\n temp = q.popleft()\r\n\r\n res.append(temp)\r\n\r\n for t in graph[temp]:\r\n in_degress[t] -= 1\r\n if in_degress[t] == 0:\r\n q.append(t)\r\n\r\n\r\nprint(res)","repo_name":"rishav-ish/MyCertificates","sub_path":"topologicalSort_bfs.py","file_name":"topologicalSort_bfs.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"15540441690","text":"import torch\nimport pandas as pd\n\nfrom transformers import BertTokenizer, BertModel\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\ndf = pd.read_csv('datasets/republican_comments.csv')\n\nprint('dataframe head:')\nprint(df.head())\n\ncomment1 = df.loc[0]['comment']\nprint('comment[0]', comment1)\nwrapped_comment = f\"[CLS] {comment1} [SEP]\"\ntokenized_text = tokenizer.tokenize(wrapped_comment)\nprint(tokenized_text, len(tokenized_text))\n\nindexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)\nprint('token indexes')\nfor tup in zip(tokenized_text, indexed_tokens):\n print('{:<12} {:>6,}'.format(tup[0], tup[1]))\n\nsegments_ids = [1] * len(tokenized_text)\ntokens_tensor = torch.tensor([indexed_tokens]).to(device)\nsegments_tensors = torch.tensor([segments_ids]).to(device)\n\nmodel = BertModel.from_pretrained(\n 'bert-base-uncased',\n output_hidden_states=True,\n # Whether the model returns all hidden-states.\n).to(device)\n\nwith torch.no_grad():\n outputs = model(tokens_tensor, segments_tensors)\n # Evaluating the model will return a different number of objects based on\n # how it's configured in the `from_pretrained` call earlier. In this case,\n # because we set `output_hidden_states = True`, the third item will be the\n # hidden states from all layers. See the documentation for more details:\n # https://huggingface.co/transformers/model_doc/bert.html#bertmodel\n hidden_states = outputs[2]\n print(hidden_states)\n\n token_embeddings = torch.stack(hidden_states, dim=0)\n print('embeddings shape', token_embeddings.size())\n # embeddings shape torch.Size([13, 1, 209, 768])\n # [# layers, # batches, # tokens, # features]\n # Desired dimensions:\n # [# tokens, # layers, # features]\n\n # Let’s get rid of the “batches” dimension since we don’t need it.\n token_embeddings = torch.squeeze(token_embeddings, dim=1)\n # swap layers and tokens dimensions\n token_embeddings = token_embeddings.permute(1, 0, 2)\n print(token_embeddings.size())\n\n embedding = token_embeddings[12, -2, :]\n print(embedding, embedding.size())\n # torch.Size([209, 13, 768])","repo_name":"milselarch/02-137DH","sub_path":"bert_test.py","file_name":"bert_test.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"7241463586","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef main():\n signal = np.random.rand(100)\n convoluted_signal = signal\n\n figure, axes = plt.subplots(2, 2, layout=\"constrained\")\n figure.suptitle(\"Repeated convolutions of a randomly generated signal\")\n figure.set_size_inches(20, 15)\n\n axes[0][0].plot(signal)\n axes[0][0].set_title(\"Original signal\")\n\n for i in range(3):\n convoluted_signal = np.convolve(convoluted_signal, signal, mode=\"full\")\n\n axes[(i+1)//2][(i+1)%2].plot(convoluted_signal)\n axes[(i+1)//2][(i+1)%2].set_title(f\"Iteration #{i+1}\")\n\n plt.savefig(\"exercise-1.png\")\n plt.savefig(\"exercise-1.pdf\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"playback0022/Signal-Processing","sub_path":"Lab 6/exercise-1/exercise-1.py","file_name":"exercise-1.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"33694848350","text":"from shapes.shape import Shape\n\nimport numpy as np\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\n\nclass Sphere(Shape):\n def __init__(self, **lis):\n super().__init__()\n \n \n self.title = '{0:}'.format(\n Shape.store_fg['объём'][0]\n )\n\n @staticmethod\n def random_sphere(fig, ax):\n u = np.linspace(0, 2 * np.pi, 100)\n v = np.linspace(0, np.pi, 100)\n\n x = 2 * np.outer(np.cos(u), np.sin(v))\n y = 2 * np.outer(np.sin(u), np.sin(v))\n z = 2 * np.outer(np.ones(np.size(u)), np.cos(v))\n\n ax.plot_surface(x, y, z)\n return fig\n\n\nclass Cube(Shape):\n def __init__(self, **lis):\n super().__init__()\n \n \n self.title = '{0:}'.format(\n Shape.store_fg['объём'][1]\n )\n\n @staticmethod\n def random_cube(fig, ax):\n # Создание списка вершин куба\n vertices = np.array([(x, y, z)\n for x in [0, 1]\n for y in [0, 1]\n for z in [0, 1]])\n # Создание списка граней куба\n faces = np.array([(0, 1, 3, 2),\n (4, 5, 7, 6),\n (0, 1, 5, 4),\n (2, 3, 7, 6),\n (0, 2, 6, 4),\n (1, 3, 7, 5)])\n # Использование созданных списков вершин и граней для построения куба\n ax.add_collection3d(\n Poly3DCollection([vertices[face] for face in faces], alpha=.25, facecolor='b')\n )\n return fig\n\n\nclass Paralld(Shape):\n def __init__(self, **lis):\n super().__init__()\n \n \n self.title = '{0:}'.format(\n Shape.store_fg['объём'][2]\n )\n\n @staticmethod\n def random_paralld(fig, ax):\n # Создание списка вершин параллелепипеда\n vertices = np.array([(0, 0, 0),\n (1, 0, 0),\n (1, 1, 0),\n (0, 1, 0),\n (0, 0, 1),\n (1, 0, 1),\n (1, 1, 1),\n (0, 1, 1)])\n\n # Создание списка граней параллелепипеда\n faces = np.array([(0, 1, 2, 3),\n (0, 1, 5, 4),\n (1, 2, 6, 5),\n (2, 3, 7, 6),\n (3, 0, 4, 7),\n (4, 5, 6, 7)])\n\n # Использование созданных списков вершин и граней для построения параллелепипеда\n ax.add_collection3d(\n Poly3DCollection([vertices[face] for face in faces], alpha=.25, facecolor='b')\n )\n return fig\n\n\nclass Pyramd(Shape):\n def __init__(self, **lis):\n super().__init__()\n \n \n self.title = '{0:}'.format(\n Shape.store_fg['объём'][3]\n )\n\n @staticmethod\n def random_pyramd(fig, ax):\n # Координаты вершин пирамиды\n vertices = np.array([(0, 0, 0), (1, 0, 0), (1, 1, 0), (0, 1, 0), (0.5, 0.5, 1)])\n\n # Номера вершин для построения каждой грани\n faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 4), (1, 2, 4), (2, 3, 4), (3, 0, 4)])\n\n # Использование созданных списков вершин и граней для построения пирамиды\n ax.add_collection3d(Poly3DCollection([vertices[face] for face in faces], alpha=.25, facecolor='b'))\n return fig\n\n\nclass Cilind(Shape):\n def __init__(self, **lis):\n super().__init__()\n \n \n self.title = '{0:}'.format(\n Shape.store_fg['объём'][4]\n )\n\n @staticmethod\n def random_cilind(fig, ax):\n # Определение параметров цилиндра\n r = 1\n h = 2\n resolution = 50\n\n # Генерация координат вершин цилиндра\n theta = np.linspace(0, 2 * np.pi, resolution)\n z = np.linspace(0, h, resolution)\n theta, z = np.meshgrid(theta, z)\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n\n # Использование созданных координат для построения поверхности цилиндра\n ax.plot_surface(x, y, z, alpha=0.7)\n return fig\n\n\nclass Conus(Shape):\n def __init__(self, **lis):\n super().__init__()\n \n \n self.title = '{0:}'.format(\n Shape.store_fg['объём'][5]\n )\n\n @staticmethod\n def random_conus(fig, ax):\n # Определение параметров конуса\n r = 1\n h = 2\n resolution = 50\n\n # Генерация координат вершин конуса\n theta = np.linspace(0, 2 * np.pi, resolution)\n z = np.linspace(0, h, resolution)\n theta, z = np.meshgrid(theta, z)\n x = r * (h - z) / h * np.cos(theta)\n y = r * (h - z) / h * np.sin(theta)\n\n # Использование созданных координат для построения поверхности конуса\n ax.plot_surface(x, y, z, alpha=0.7)\n return fig","repo_name":"PenguinLW/geometr_calc","sub_path":"shapes/v_shapes.py","file_name":"v_shapes.py","file_ext":"py","file_size_in_byte":5528,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"6751649474","text":"import smtplib\nimport time\nimport sys\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\nif __name__ == '__main__':\n \n # 发送邮件服务器地址\n smtp_server = 'smtp.163.com'\n # 发送方账号\n sender = 'lewamlyn@163.com'\n # 发送方密码(或授权密码)\n password = str(sys.argv[1])\n\n # 收件方邮箱\n # jin mei ju hao jiao\n names = ['12207137@zju.edu.cn','852654283@qq.com','982135472@qq.com','854694132@qq.com','lxj123@nwafu.edu.cn']\n if time.localtime().tm_wday > 4:\n change = 0 \n # 获取轮班信息\n try:\n with open('./week.txt', 'r') as f:\n for line in f.readlines():\n change = int(line)\n receiver = names[change]\n change = (change + 1) % 5\n print(change)\n f.close\n with open('./week.txt', 'w') as f:\n f.write(str(change))\n f.close()\n except:\n print('无本地存储信息')\n with open('./week.txt', 'w') as f:\n f.write(str(time.localtime().tm_wday)) \n f.close()\n else:\n receiver = names[time.localtime().tm_wday]\n\n # 邮件标题\n subject = 'SCDA小助手上班啦!(●ˇ∀ˇ●)'\n # 邮件内容\n print(time.localtime().tm_hour)\n if time.localtime().tm_hour < 6:\n mail_msg = \"\"\"\n

每日日报:每日新闻 + 宣讲会

\n

浙大就业指导与服务中心

\n \"\"\"\n else: \n mail_msg = \"\"\"\n

社群招聘信息汇总

\n \"\"\"\n print(receiver)\n\n message = MIMEText(mail_msg, 'html', 'utf-8') # 发送内容 (文本内容,发送格式,编码格式)\n # 发送地址\n message['From'] = sender\n # 接受地址\n message['To'] = receiver\n # 邮件标题\n message['Subject'] = Header(subject,'utf-8')\n\n try:\n smtp = smtplib.SMTP()\n smtp.connect(smtp_server)\n smtp.login(sender, password)\n smtp.sendmail(sender, receiver, message.as_string())\n print('success:发送成功')\n except smtplib.SMTPException:\n print('error:邮件发送失败')\n finally:\n smtp.quit()\n","repo_name":"lewamlyn/spider","sub_path":"mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"11374113364","text":"def prime_numbers_generator(n):\r\n if not isinstance(n, int):\r\n return \"Only integers allowed\"\r\n\r\n elif n < 0:\r\n return \"Invalid input, only positive numbers allowed\"\r\n\r\n elif n == 0:\r\n return \"No prime numbers between 0 and 0\"\r\n\r\n else:\r\n for x in range(2, n + 1):\r\n for y in range(2, x):\r\n if (x % y == 0):\r\n break\r\n\r\n \r\n else:\r\n print(x)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"IamMutevu/andela-self-learning-clinic","sub_path":"Day 1/prime_numbers_generator.py","file_name":"prime_numbers_generator.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"16256673186","text":"import os\n\n\ndef test_log(tmpdir):\n from robocorp_ls_core.robotframework_log import get_logger, configure_logger\n from robocorp_ls_core.unittest_tools.fixtures import wait_for_test_condition\n import io\n\n somedir = str(tmpdir.join(\"somedir\"))\n configure_logger(\"test\", 2, os.path.join(somedir, \"foo.log\"))\n\n log = get_logger(\"my_logger\")\n log.info(\"something\\nfoo\\nbar\")\n\n try:\n raise AssertionError(\"someerror\")\n except:\n log.exception(\"rara: %s - %s\", \"str1\", \"str2\")\n\n def get_log_files():\n log_files = [\n x for x in os.listdir(somedir) if x.startswith(\"foo\") and x.endswith(\".log\")\n ]\n return log_files if log_files else None\n\n wait_for_test_condition(\n get_log_files, msg=lambda: \"Found: %s in %s\" % (get_log_files(), somedir)\n )\n log_files = get_log_files()\n\n with open(os.path.join(somedir, log_files[0]), \"r\") as stream:\n contents = stream.read()\n assert \"someerror\" in contents\n assert \"something\" in contents\n assert \"rara\" in contents\n assert \"rara: str1 - str2\" in contents\n\n log_file = io.StringIO()\n with configure_logger(\"\", 2, log_file):\n log.info(\"in_context\")\n\n log.info(\"out_of_context\")\n\n with open(os.path.join(somedir, log_files[0]), \"r\") as stream:\n contents = stream.read()\n assert \"out_of_context\" in contents\n assert \"in_context\" not in contents\n\n assert \"out_of_context\" not in log_file.getvalue()\n assert \"in_context\" in log_file.getvalue()\n","repo_name":"robocorp/robotframework-lsp","sub_path":"robocorp-python-ls-core/tests/robocorp_ls_core_tests/test_log.py","file_name":"test_log.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":179,"dataset":"github-code","pt":"89"} +{"seq_id":"35137449453","text":"import requests\n\nfor i in range (0,1000):\n \n\n tgturl = \"http://10.10.11.125/wp-content/plugins/ebook-download/filedownload.php?ebookdownloadurl=/proc/\" + str(i) + \"/cmdline\"\n req = requests.get(tgturl)\n length = len(req.content)\n if (length > 90):\n print(\"PID: \" + str(i))\n print(\" URL: \" + tgturl+'\\n')\n print(\" Response: \" + str(req.content) + '\\n')\n\n\n","repo_name":"ths18/career-info-tracking","sub_path":"HackTheBox Tracking/Machine Completion/Backdoor/BackingFiles/brutepid.py","file_name":"brutepid.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"89"} +{"seq_id":"26648180758","text":"class StatusItem:\n def __init__(self, word : str = str(), value = None):\n self.content = {\n \"aborted\" : {\"title\": \"Aborted\", \"icon\": \"⛔️\"},\n \"cancelled\" : {\"title\": \"Cancelled\", \"icon\": \"❌\"},\n \"cleaning\" : {\"title\": \"Cleaning\", \"icon\": \"🧹\"},\n \"compiling\" : {\"title\": \"Compiling\", \"icon\": \"🔨\"},\n \"complete\" : {\"title\": \"Complete\", \"icon\": \"✅\"},\n \"connecting\" : {\"title\": \"Connecting\", \"icon\": \"🔗\"},\n \"deleting\" : {\"title\": \"Deleting\", \"icon\": \"🗑\"},\n \"disconnecting\": {\"title\": \"Disconnecting\", \"icon\": \"🔌\"},\n \"downloading\" : {\"title\": \"Downloading\", \"icon\": \"⬇️\"},\n \"error\" : {\"title\": \"Error\", \"icon\": \"❗️\"},\n \"exporting\" : {\"title\": \"Exporting\", \"icon\": \"📤\"},\n \"failure\" : {\"title\": \"Failure\", \"icon\": \"❌\"},\n \"finished\" : {\"title\": \"Finished\", \"icon\": \"🎉\"},\n \"idle\" : {\"title\": \"Idle\", \"icon\": \"🕛\"},\n \"importing\" : {\"title\": \"Importing\", \"icon\": \"📥\"},\n \"installing\" : {\"title\": \"Installing\", \"icon\": \"🔧\"},\n \"loading\" : {\"title\": \"Loading\", \"icon\": \"⏳\"},\n \"paused\" : {\"title\": \"Paused\", \"icon\": \"⏸\"},\n \"pending\" : {\"title\": \"Pending\", \"icon\": \"🕒\"},\n \"progress\" : {\"title\": \"Progress\", \"icon\": \"🔄\"},\n \"receiving\" : {\"title\": \"Receiving\", \"icon\": \"📩\"},\n \"refreshing\" : {\"title\": \"Refreshing\", \"icon\": \"🔄\"},\n \"rendering\" : {\"title\": \"Rendering\", \"icon\": \"🎨\"},\n \"restarting\" : {\"title\": \"Restarting\", \"icon\": \"🔄\"},\n \"resuming\" : {\"title\": \"Resuming\", \"icon\": \"▶️\"},\n \"running\" : {\"title\": \"Running\", \"icon\": \"🏃\"},\n \"saving\" : {\"title\": \"Saving\", \"icon\": \"💾\"},\n \"scanning\" : {\"title\": \"Scanning\", \"icon\": \"🔍\"},\n \"sending\" : {\"title\": \"Sending\", \"icon\": \"📤\"},\n \"size\" : {\"title\": \"Size\", \"icon\": \"📐\"},\n \"started\" : {\"title\": \"Started\", \"icon\": \"🚀\"},\n \"success\" : {\"title\": \"Success\", \"icon\": \"✅\"},\n \"syncing\" : {\"title\": \"Syncing\", \"icon\": \"🔄\"},\n \"uninstalling\" : {\"title\": \"Uninstalling\", \"icon\": \"🔧\"},\n \"updating\" : {\"title\": \"Updating\", \"icon\": \"🔃\"},\n \"uploading\" : {\"title\": \"Uploading\", \"icon\": \"⬆️\"},\n \"validating\" : {\"title\": \"Validating\", \"icon\": \"✅\"},\n \"verifying\" : {\"title\": \"Verifying\", \"icon\": \"✅\"},\n \"waiting\" : {\"title\": \"Waiting\", \"icon\": \"⌛️\"},\n }\n self.display = {\"status\": \"\", \"value\": \"\"}\n self.set(word or \"\", value)\n\n def set(self, word: str, value : str|float|int|bool = None):\n \"\"\"\n outtputto display: an icon and status word, as well as the state if exists\n \"\"\"\n if word in self.content:\n self.display[\"status\"] = f\"{ self.content[word]['icon']} {self.content[word]['title']}\"\n else:\n self.display[\"status\"] = word or \"\"\n\n if value in self.content:\n self.display[\"value\"] = f\"{ self.content[str(value)]['title']}\"\n else:\n self.display[\"value\"] = str(value)\n return self.display\n\n def response_code(self, code : int) -> str:\n \"\"\"\n Returns the HTTP response code as a string representation.\n \"\"\"\n response_codes: dict[int, str] = {\n 100: \"Continue\",\n 101: \"Switching Protocols\",\n 102: \"Processing\",\n 103: \"Early Hints\",\n 200: \"OK\",\n 201: \"Created\",\n 202: \"Accepted\",\n 203: \"Non-Authoritative Information\",\n 204: \"No Content\",\n 205: \"Reset Content\",\n 206: \"Partial Content\",\n 207: \"Multi-Status\",\n 208: \"Already Reported\",\n 226: \"IM Used\",\n 300: \"Multiple Choice\",\n 301: \"Moved Permanently\",\n 302: \"Found\",\n 303: \"See Other\",\n 304: \"Not Modified\",\n 305: \"Use Proxy\",\n 307: \"Temporary Redirect\",\n 308: \"Permanent Redirect\",\n 400: \"Bad Request\",\n 401: \"Unauthorized\",\n 402: \"Payment Required\",\n 403: \"Forbidden\",\n 404: \"Not Found\",\n 405: \"Method Not Allowed\",\n 406: \"Not Acceptable\",\n 407: \"Proxy Authentication Required\",\n 408: \"Request Timeout\",\n 409: \"Conflict\",\n 410: \"Gone\",\n 411: \"Length Required\",\n 412: \"Precondition Failed\",\n 413: \"Payload Too Large\",\n 414: \"URI Too Long\",\n 415: \"Unsupported Media Type\",\n 416: \"Range Not Satisfiable\",\n 417: \"Expectation Failed\",\n 418: \"I'm a teapot\",\n 421: \"Misdirected Request\",\n 422: \"Unprocessable Entity\",\n 423: \"Locked\",\n 424: \"Failed Dependency\",\n 425: \"Too Early\",\n 426: \"Upgrade Required\",\n 428: \"Precondition Required\",\n 429: \"Too Many Requests\",\n 431: \"Request Header Fields Too Large\",\n 451: \"Unavailable For Legal Reasons\",\n 500: \"Internal Server Error\",\n 501: \"Not Implemented\",\n 502: \"Bad Gateway\",\n 503: \"Service Unavailable\",\n 504: \"Gateway Timeout\",\n 505: \"HTTP Version Not Supported\",\n 506: \"Variant Also Negotiates\",\n 507: \"Insufficient Storage\",\n 508: \"Loop Detected\",\n 510: \"Not Extended\",\n 511: \"Network Authentication Required\",\n }\n\n if code in response_codes:\n self.display['status'] = 'HTTP Respopnse Code'\n self.display['value'] = response_codes[code]\n else:\n return \"Unknown\"\n\n return self.display\n\nclass Items:\n \"\"\" List of Items Container \"\"\"\n def __init__(self):\n self.items : list[StatusItem] = []\n\n def add_item(self, item : StatusItem):\n self.items.append(item)\n\n def remove_item(self, item : StatusItem):\n self.items.remove(item)\n\n def clear_items(self):\n self.items.clear()\n\n def __len__(self):\n return len(self.items)\n\n def __iter__(self):\n return iter(self.items)\n","repo_name":"kaigouthro/minorscrapes","sub_path":"statwords.py","file_name":"statwords.py","file_ext":"py","file_size_in_byte":6528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"35768227639","text":"def get_next_direction(directions):\n if len(directions) == 0:\n return ('','')\n if len(directions) >= 2:\n if directions[:2] == 'nw':\n return ('ne', directions[1:])\n elif directions[:2] == 'se':\n return ('sw', directions[1:])\n if directions[0] == 'n' or directions[0] == 's':\n return (directions[:2], directions[2:])\n else:\n return (directions[:1], directions[1:])\n\ndef find_tile_coords(directions):\n coords = (0,0)\n while directions != \"\":\n nextdir, directions = get_next_direction(directions)\n if nextdir == 'e':\n coords = (coords[0]+1, coords[1])\n elif nextdir == 'ne':\n coords = (coords[0]+1, coords[1]+1)\n elif nextdir == 'w':\n coords = (coords[0]-1, coords[1])\n elif nextdir == 'sw':\n coords = (coords[0]-1, coords[1]-1)\n return coords\n\n\ntiles = set()\nwith open(\"input.txt\") as inp:\n for line in inp.readlines():\n coords = find_tile_coords(line.strip())\n if coords in tiles:\n tiles.remove(coords)\n else:\n tiles.add(coords)\n\nprint(len(tiles))","repo_name":"colopop/Advent2020","sub_path":"24/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"33658232075","text":"from dotenv import dotenv_values\n\ndotenv_config = dotenv_values(\".env\")\n\n# If there is no key in the .env file, then use an empty string\nAPI_KEY = dotenv_config.get(\"API_KEY\") or \"\"\n\ncache_config = {\n \"CACHE_TYPE\": \"SimpleCache\", # Flask-Caching related configs\n \"CACHE_DEFAULT_TIMEOUT\": 60 * 5, # 5 minutes of default timeout for Flask-Caching\n}\n","repo_name":"kseikyo/weather-app","sub_path":"server/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"70845938530","text":"import copy\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.compat.v1 as tfv1\nfrom tensorflow.keras import backend as K\nfrom collections import OrderedDict\nfrom ...common.defs import FLOAT_EQUAL\nfrom ...logger import INFO, DEBUG, WARN, ERROR, FATAL\n\n\ndef get_nodes_input_and_attr(configs):\n '''Get the inputs of attr of all the nodes from model config.\n For constants, it's not saved as a layer by tf, so record all the nodes' name\n to create Const node for them later.\n '''\n def flatten(mixed_nested_list):\n if isinstance(mixed_nested_list, list):\n for item in mixed_nested_list:\n yield from flatten(item)\n else:\n yield mixed_nested_list\n\n input_nodes_dict = {}\n if not configs or 'layers' not in configs.keys():\n return input_nodes_dict\n\n for layer_configs in configs['layers']:\n inbound_nodes = layer_configs.get('inbound_nodes', [])\n node_name = layer_configs.get('name', '')\n if not inbound_nodes or not node_name:\n continue\n # input_info_dict is a dict, whose key is kwarg name or index, and value is a tuple with 5 items:\n # input node name, dst out port, whether is control edge, whether is constant, value(None if not constant)\n input_info_dict = {}\n flatten_inbound_nodes = list(flatten(inbound_nodes))\n nodes_iter = iter(flatten_inbound_nodes)\n for node in nodes_iter:\n if isinstance(node, str):\n next(nodes_iter)\n if node == '_CONSTANT_VALUE':\n const_value = next(nodes_iter)\n const_node_name = node_name + '/' + node\n input_node_info = (const_node_name, 0, False, True, const_value)\n input_info_dict.update({node: input_node_info})\n continue\n tensor_index = next(nodes_iter)\n # TODO: Consider control edge(the third arg means is_control)\n input_node_info = (node, tensor_index, False, False, None)\n input_info_dict.update({node: input_node_info})\n elif isinstance(node, dict):\n for key, value in node.items():\n if key == 'name':\n continue\n if isinstance(value, list) and len(value) == 3 and isinstance(value[0], str):\n # node name and its parent's out port\n input_node_info = (value[0], value[2], False, False, None)\n input_info_dict.update({key: input_node_info})\n else:\n input_node_info = (node_name + '/' + key, 0, False, True, value)\n input_info_dict.update({key: input_node_info})\n input_nodes_dict.update({node_name: input_info_dict})\n return input_nodes_dict\n\n\ndef get_node_attr(layer):\n ret = {}\n try:\n layer_config = layer.get_config()\n if isinstance(layer_config, dict):\n ret.update(layer_config)\n except Exception as e:\n DEBUG('[Parser]: Fail to get config for layer (%s) because %s' % (layer.name, str(e)))\n\n if type(layer).__name__ == 'Lambda':\n from ..tf.load import get_possible_outputs, parse_graph_def\n graph_def = get_lambda_graph_def(layer)\n output_names = get_possible_outputs(graph_def)\n params = {'input_names': [], 'input_shapes': {},\n 'output_names': output_names}\n ret.update({'subgraph_nodes': parse_graph_def(graph_def, params)[0],\n 'subgraph_output_names': output_names})\n\n for key in layer.__dir__():\n if key.startswith('_'):\n # Ignore internal attributes\n continue\n if key in ('activity_regularizer', 'build', 'built', 'call', 'compute_dtype', 'count_params',\n 'dynamic', 'dtype_policy', 'finalize_state', 'get_config',\n 'inbound_nodes', 'input', 'input_mask', 'input_spec',\n 'losses', 'metric', 'metrics', 'name_scope', 'non_trainable_variables', 'non_trainable_weights',\n 'outbound_nodes', 'output', 'output_mask', 'OVERLOADABLE_OPERATORS',\n 'symbol', 'stateful', 'states', 'state_spec', 'submodules', 'supports_masking',\n 'trainable_weights', 'trainable_variables',\n 'updates', 'variables', 'variable_dtype', 'with_name_scope'):\n # Ignore inputs/outputs and other attributes that are not used\n continue\n try:\n value = eval('layer.' + key)\n if eval('callable(layer.' + key + ')'):\n if any(key.startswith(func) for func in ('add_', 'apply', 'compute_', 'from_', 'get_', 'reset_', 'set_')) \\\n or any(key.endswith(func) for func in ('_initializer', '_constraint')) \\\n or '__name__' not in dir(value):\n # Ignore functions that are not used\n continue\n func_name = value.__name__\n ret.update({key: func_name})\n elif key == 'weights' and isinstance(value, list):\n weights_list = []\n for variable in value:\n weights_list.append(variable.numpy())\n key = 'weights_list'\n ret.update({key: weights_list})\n if len(value) == 2:\n try:\n biases = layer.bias.numpy()\n except:\n biases = None\n if biases is not None and FLOAT_EQUAL(weights_list[1], biases):\n ret.update({'weights': weights_list[0]})\n elif len(value) == 1:\n ret.update({'weights': weights_list[0]})\n elif 'numpy' in dir(value):\n ret.update({key: value.numpy()})\n else:\n ret.update({key: copy.deepcopy(value)})\n except Exception as e:\n DEBUG('[Parser]: Fail to get key (%s) for layer (%s) because %s' % (key, layer.name, str(e)))\n continue\n DEBUG('[Parser]: layer: %s, key: %s, value: %s' % (layer.name, key, str(ret[key])))\n return ret\n\n\ndef get_node_type(layer):\n layer_type = type(layer).__name__\n if layer_type == 'TFOpLambda':\n node_type = layer.get_config().get('function', layer_type)\n else:\n node_type = layer_type\n\n opcode_version = 1 if 'compat.v1' in node_type else 2\n\n # Add prefix 'Keras' for keras op type to distiguish from raw ops\n if node_type in dir(tf.keras.layers):\n node_type = 'Keras' + node_type\n return (node_type, opcode_version)\n\n # Remove prefix like 'math.', 'nn.' and etc\n node_type = node_type.split('.')[-1]\n\n return (node_type, opcode_version)\n\n\ndef get_node_input(layer, input_info_dict):\n assert isinstance(input_info_dict, dict), 'Expect input_info_dict to be a dict!'\n arg_pos_dict = layer._call_fn_arg_positions\n arg_defaults_dict = layer._call_fn_arg_defaults\n\n if not input_info_dict:\n return []\n\n if not arg_pos_dict:\n return [value for _, value in input_info_dict.items()]\n\n node_input_info = []\n inbound_nodes_cnt = len(layer.inbound_nodes)\n\n for arg_name, arg_pos in arg_pos_dict.items():\n if arg_pos < inbound_nodes_cnt:\n input_tensors = layer.get_input_at(arg_pos)\n if isinstance(input_tensors, (list, tuple)):\n if len(input_tensors) == 0:\n continue\n else:\n if not tf.is_tensor(input_tensors):\n continue\n elif 'numpy' in dir(input_tensors) and '_CONSTANT_VALUE' in input_info_dict:\n input_info = input_info_dict['_CONSTANT_VALUE']\n node_input_info.append(input_info)\n continue\n inbound_layers = layer.inbound_nodes[arg_pos].inbound_layers\n inbound_layers = inbound_layers if isinstance(inbound_layers, (list, tuple)) else [inbound_layers]\n inbound_nodes = [node.name for node in inbound_layers]\n for node_name in inbound_nodes:\n if node_name in input_info_dict:\n input_info = input_info_dict[node_name]\n node_input_info.append(input_info)\n else:\n WARN('[Parser]: Meet invalid node (%s) in get_node_input!' % node_name)\n elif arg_name in input_info_dict \\\n and len(input_info_dict[arg_name]) == 5:\n input_info = input_info_dict[arg_name]\n node_input_info.append(input_info)\n elif arg_name in arg_defaults_dict:\n value = arg_defaults_dict[arg_name]\n input_info = (layer.name + '/' + arg_name, 0, False, True, value)\n node_input_info.append(input_info)\n else:\n WARN('[Parser]: Missing node (%s) in get_node_input!' % arg_name)\n return node_input_info\n\n\ndef get_const_node_content(node_name, const_value):\n '''Create Const node for inputs that are not shown as model layer.\n '''\n ret = {}\n if const_value is not None:\n const_value = np.array(const_value)\n else:\n const_value = np.array(None)\n out_tensor_name = node_name + ':0'\n ret = {'name': node_name,\n 'type': 'constant',\n 'input': [],\n 'output': [(out_tensor_name, list(const_value.shape))],\n 'attr': {'value': const_value, 'dtype': const_value.dtype.name},\n 'opcode_version': 2\n }\n return ret\n\n\ndef get_lambda_graph_def(layer):\n def _get_input_tensor(layer_input):\n input_shape = list(layer_input.shape)\n input_dtype = layer_input.dtype.as_numpy_dtype\n func_input = np.random.ranf(input_shape).astype(input_dtype)\n return func_input\n\n lambda_func = tf.function(layer)\n layer_input = layer.input\n if isinstance(layer_input, (list, tuple)):\n func_input = []\n for inp in layer_input:\n func_input.append(_get_input_tensor(inp))\n else:\n func_input = _get_input_tensor(layer_input)\n tfv1.reset_default_graph()\n graph_def = lambda_func.get_concrete_function(func_input).graph.as_graph_def()\n return graph_def\n\n\ndef get_node_content(layer):\n layer_outputs = layer.output if isinstance(\n layer.output, (list, tuple)) else [layer.output]\n output_name_shape = [(out.name, out.shape.as_list() if out.shape is not None else [])\n for out in layer_outputs if hasattr(out, 'shape')]\n DEBUG('layer: %s, output: %s' % (layer.name, str(output_name_shape)))\n node_type, opcode_version = get_node_type(layer)\n ret = {'name': layer.name,\n 'type': node_type,\n 'output': output_name_shape,\n 'attr': get_node_attr(layer),\n 'opcode_version': opcode_version\n }\n return ret\n\n\ndef get_nodes_content(layers, model_configs):\n layers = layers if isinstance(layers, list) else [layers]\n inputs_info_dict = get_nodes_input_and_attr(model_configs)\n nodes_content = []\n for layer in layers:\n node_content = get_node_content(layer)\n input_info_dict = inputs_info_dict.get(layer.name, {})\n node_input_info = get_node_input(layer, input_info_dict)\n node_content.update({\n 'input': [(name, src_out_port, control_edge) for name, src_out_port, control_edge, _, _ in node_input_info]})\n nodes_content.append(node_content)\n const_nodes = [(name, value) for name, _, _, is_const,\n value in node_input_info if is_const]\n for name, value in const_nodes:\n nodes_content.append(get_const_node_content(name, value))\n return nodes_content\n\n\ndef parse_keras(model_path, params):\n nodes = list()\n nodes_dict, tensors, np_tensors = OrderedDict(), OrderedDict(), OrderedDict()\n input_shapes = params['input_shapes'].copy()\n try:\n load_options = tf.saved_model.LoadOptions(\n allow_partial_checkpoint=True)\n model = tf.keras.models.load_model(\n model_path, compile=False, options=load_options)\n except Exception as e:\n WARN('[Parser]: Reading saved model/h5 file (%s) meets error (%s)!' %\n (model_path, str(e)))\n return nodes, nodes_dict, tensors, np_tensors, input_shapes\n\n nodes = get_nodes_content(model.layers, model.get_config())\n model_inputs = model.inputs\n model_inputs_names = model.input_names\n\n for n in nodes:\n if n['name'] in model_inputs_names and n['name'] not in input_shapes:\n tensor_shape = n['output'][0][1]\n input_shapes.update({n['name']: tensor_shape})\n if n['type'] == 'constant' and n['attr'].get('value', None) is not None:\n const_tensor_name = n['output'][0][0]\n const_tensor_value = n['attr']['value']\n np_tensors.update({const_tensor_name: const_tensor_value})\n nodes_dict.update({n['name']: n})\n\n feed_model_inputs = []\n for model_input_name, model_input in zip(model_inputs_names, model_inputs):\n model_input_shape = input_shapes[model_input_name]\n if any([d is None for d in model_input_shape]):\n WARN(\n '[Parser]: Found None in the shape of Input (%s): %s!' %\n (model_input_name, str(model_input_shape)))\n feed_model_inputs.append([])\n continue\n try:\n type_str = model_input.dtype.name\n except Exception as e:\n WARN('[Parser]: Meets error when getting dtype of input tensor (%s): %s!' %\n (model_input, str(e)))\n type_str = 'float32'\n np_tensor = np.random.randint(0, 1, size=model_input_shape).astype(type_str) \\\n if 'int' in type_str \\\n else np.random.ranf(model_input_shape).astype(type_str)\n feed_model_inputs.append(np_tensor)\n np_tensors.update({model_input.name: np_tensor})\n outputs = []\n for layer in model.layers:\n if layer.name in input_shapes.keys():\n continue\n if isinstance(layer.output, (list, tuple)):\n outputs.extend(layer.output)\n else:\n outputs.append(layer.output)\n try:\n functors = K.function([model.input], outputs)\n outputs_value = functors(feed_model_inputs)\n except Exception as e:\n outputs_value = [None] * len(outputs)\n DEBUG('Fail to get outputs of tensors: %s' % str(e))\n outputs = (out for out in outputs if hasattr(out, 'shape'))\n for out, out_value in zip(outputs, outputs_value):\n tensors.update({out.name: out})\n if out_value is None:\n out_value = np.random.ranf(\n out.shape.as_list()).astype(out.dtype.name)\n np_tensors.update({out.name: np.array(out_value)})\n\n return nodes, nodes_dict, tensors, np_tensors, input_shapes\n","repo_name":"Arm-China/Compass_Unified_Parser","sub_path":"AIPUBuilder/Parser/front_end/tf2/buffer.py","file_name":"buffer.py","file_ext":"py","file_size_in_byte":14932,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"89"} +{"seq_id":"41181215104","text":"from typing import Tuple\nfrom torch import nn, Tensor\nimport torch.nn.functional as nnf\n\n\n__all__ = ('preresnet20',)\n\n\nclass GlobalAvgPool(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, t: Tensor) -> Tensor:\n return nnf.avg_pool2d(t, t.size()[2:])\n\n\nclass BaseBasic(nn.Module):\n def __init__(self, inchannel: int, outchannel: int, stride: int = 1,\n activfunc=nn.ReLU) -> None:\n super().__init__()\n self.activ_h = activfunc()\n self.bn_h = nn.BatchNorm2d(outchannel)\n self.conv_io = nn.Conv2d(inchannel, outchannel, kernel_size=3,\n stride=stride, padding=1, bias=False)\n self.conv_ih = nn.Conv2d(inchannel, outchannel, kernel_size=3,\n stride=stride, padding=1, bias=False)\n self.conv_ho = nn.Conv2d(outchannel, outchannel, kernel_size=3,\n padding=1, bias=False)\n\n def forward(self, t: Tensor) -> Tensor:\n return self.conv_io(t) + \\\n self.conv_ho(self.activ_h(self.bn_h(self.conv_ih(t))))\n\n\nclass RefineBasic(nn.Module):\n def __init__(self, channel: int, activfunc=nn.ReLU) -> None:\n super().__init__()\n self.activ_i = activfunc()\n self.bn_i = nn.BatchNorm2d(channel)\n self.activ_h = activfunc()\n self.bn_h = nn.BatchNorm2d(channel)\n self.conv_ih = nn.Conv2d(\n channel, channel, kernel_size=3, padding=1, bias=False)\n self.conv_ho = nn.Conv2d(\n channel, channel, kernel_size=3, padding=1, bias=False)\n\n def forward(self, t: Tensor) -> Tensor:\n return t + self.conv_ho(self.activ_h(self.bn_h(self.conv_ih(\n self.activ_i(self.bn_i(t))))))\n\n\nclass ResNetHead(nn.Sequential):\n def __init__(self, outchannel: int, activfunc=nn.ReLU):\n super().__init__(\n nn.Conv2d(3, outchannel, kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(outchannel),\n activfunc())\n\n\nclass BasicTrunk(nn.Sequential):\n def __init__(self, inchannel: int, outchannel: int, blocks: int,\n stride: int = 1, activfunc=nn.ReLU):\n assert blocks >= 1\n super().__init__(\n BaseBasic(inchannel, outchannel, stride, activfunc),\n *[RefineBasic(outchannel, activfunc) for _ in range(blocks-1)],\n nn.BatchNorm2d(outchannel),\n activfunc())\n\n\nclass ResNetClassifier(nn.Sequential):\n def __init__(self, inchannel: int, outclass: int):\n super().__init__(\n GlobalAvgPool(),\n nn.Flatten(),\n nn.Linear(inchannel, outclass))\n\n\n# depth = 3 * sum(blocks) + 2\ndef _resnet(outclass: int, trunktype, blocks: Tuple[int, int, int],\n activfunc=nn.ReLU) -> nn.Sequential:\n c0, c1, c2, c3 = 16, 16, 32, 64\n return nn.Sequential(\n ResNetHead(c0, activfunc),\n trunktype(c0, c1, blocks[0], 1, activfunc),\n trunktype(c1, c2, blocks[1], 2, activfunc),\n trunktype(c2, c3, blocks[2], 2, activfunc),\n ResNetClassifier(c3, outclass))\n\n\nclass ResNet(nn.Sequential):\n def __init__(\n self, outclass: int, trunktype, blocks: Tuple[int, int, int],\n activfunc=nn.ReLU):\n c0, c1, c2, c3 = 16, 16, 32, 64\n super().__init__(\n ResNetHead(c0, activfunc),\n trunktype(c0, c1, blocks[0], 1, activfunc),\n trunktype(c1, c2, blocks[1], 2, activfunc),\n trunktype(c2, c3, blocks[2], 2, activfunc),\n ResNetClassifier(c3, outclass)\n )\n\n\ndef preresnet20(outclass: int) -> nn.Sequential:\n return _resnet(outclass, BasicTrunk, (3, 3, 3))\n\n\ndef preresnet20_model(outclass: int) -> nn.Sequential:\n return ResNet(outclass, BasicTrunk, (3, 3, 3))\n","repo_name":"tum-vision/dca","sub_path":"dca/models32/standard.py","file_name":"standard.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"89"} +{"seq_id":"14414961995","text":"import socket\nimport json\nimport requests\n\ndef enroll():\n enroll_data = json.dumps({'group':'debug','device_ID':'debug'})\n temp = requests.post(url = 'http://13.55.147.2/enroll', json = enroll_data, timeout = 5)\n print(temp.text)\n\ndef send_dummy():\n print(\"in\")\n UDP_IP_ADDRESS = \"69.4.20.69\"\n UDP_PORT_NO = 420\n \n data = {\n #'ports': {'80/tcp': 6,'42915/tcp': 9},\n 'name': 'samsung',\n 'cloud_ip': '42915'\n }\n\n data_json = json.dumps(data)\n\n clientSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n clientSock.sendto(bytes(data_json,'utf-8'), (UDP_IP_ADDRESS, UDP_PORT_NO))\n\nif __name__ == \"__main__\":\n enroll()\n send_dummy()","repo_name":"jcvaldez1/inband_controller","sub_path":"iot_objects/updated samsung/dev_register_new.py","file_name":"dev_register_new.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"70818237090","text":"#Panoramix's Prediction\n#Brute force solution\n#Given and n and m number, we need to know if m is the next prime number\n#There will be a counter adding 1 by 1 to n until it gets the next prime number\n#if the m number is equal to the next prime it will be printed YES else NO\n\ndef solution(n,m):\n while n != m:\n n+=1\n #if its prime and is the m number\n if is_prime(n) and n == m:\n return 'YES'\n elif is_prime(n):\n return 'NO' \n return 'NO'\n \ndef is_prime(n):\n counter = 0\n #knowing if a number is prime by counting factors\n for number in range(1, n+1):\n if n % number == 0:\n counter+=1\n if counter == 2:\n return True\n return False\n \ndef main():\n n , m = input().split()\n print(solution(int(n),int(m)))\n\nif __name__ == \"__main__\":\n main()","repo_name":"alexismrosales/cpp","sub_path":"codeforces/python/80A.py","file_name":"80A.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"16890985781","text":"# Задача 1\na = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\n#\n# # Выведите все элементы, которые меньше 5.\n# a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89] # мое решение\n# for number in a:\n# if number <5:\n# print(f\"числа меньше 5: {number}\")\n#\n# print([number for number in a if number < 5]) # вот это прикольное решение списковое включение:\n#\n# b = (1, 3, 2, 2, 3, 5, 8, 13, 21, 34, 55, 89,100)\n# print([number for number in b if number < 5]) # работат и для кортежей\n\nresult = [number for number in a if number <20]\nprint (result)\n\n\n#Решение из учебника\n# for elem in a:\n# if elem < 5:\n# print(elem)\n# Также можно воспользоваться функцией filter, которая фильтрует элементы согласно заданному условию:\n#\n# print(list(filter(lambda elem: elem < 5, a)))\n# И, вероятно, наиболее предпочтительный вариант решения этой задачи — списковое включение:\n#\n# # print([elem for elem in a if elem < 5])\n# person = {\n#\n# 'name':'Victor',\n# 'surname':'Vampire',\n# 'age': 300,\n# 'Location':'Hotel Transilvania',\n# 'Любимое блюдо': ['кровь', 'девственицы', 'карбонара' ],\n# 'Машина': 'Porsche',\n# 'Hobby':'Кусать девок за сиськи',\n# }\n#\n# print([key for key in person.items()])\n\n# Задача 2\n# Даны списки:\n#\n# a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89];\n#\n# b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13].\n#\n# Нужно вернуть список, который состоит из элементов, общих для этих двух списков.\n\na = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\n\nb = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n\nc = a+b\nprint(c) #тут я тупо сложил списки а надо было сделать не так сделать один список в котором бы встречались значения\nresult = [number for number in a if number in b] # отличное решение которое можно прочитать так\n# резулттиат числа для числа в а если число есть в б а потом принтим результат\n# можно было сразу распринтить но тут был вариант который придумал ранее\nprint (result)\n\n\n# Вариант решения\n# Можем воспользоваться функцией filter:\n#\n# result = list(filter(lambda elem: elem in b, a))\n# Или списковым включением:\n#\n# result = [elem for elem in a if elem in b]\n\n\n\n# Три словаря с цифрами сливаем в один\ndict_a = {1:10, 2:20}\ndict_b = {3:30, 4:40}\ndict_c = {5:50, 6:60}\n\nnewSlovar ={} # создаем новый дикт\nfor sborka_slovarey in (dict_a,dict_b,dict_c): # с помошбю фора делаем пременную собирающщую\n # словарь Newsloavr и переменную счетчик sborka_slovarey и запускаем сборку\n\n newSlovar.update(sborka_slovarey)\nprint(newSlovar)\n\n\n# Задача 5\n# Найдите три ключа с самыми высокими значениями в\n# словаре my_dict = {'a':500, 'b':5874, 'c': 560,'d':400, 'e':5874, 'f': 20}.\n\n#Вот тут важно, я хотел колхозить какой то велосипед с ифами сравнивая между собой все подряд\n# а в пайтоне кончено же есть встроенный сортировщик для таких как я\n\n# итак берем словарь и применяем к нему sorted\n# my_dict = {'a':500, 'b':5874, 'c': 560,'d':400, 'e':5874, 'f': 20}\n# resultat = sorted(my_dict.values())\n# print(resultat) # это просто сортировка а вот найти три ключа с самыми высокими значениями\n#\n# resultat = sorted(my_dict, key=my_dict.get, reverse=True)[:3]\n# print(resultat)\n# resultat = sorted(my_dict, val=my_dict.get, reverse=True)[:3]\n# print(resultat)\n\n\n\nliset_ot_baldy = [1,2,3, 27, 64, 0, ]\n\nprint(sorted(liset_ot_baldy))\n\nliset_ot_baldy = ['a','b','c', 'a', 'e',' f' ]\nprint(sorted(liset_ot_baldy))\n","repo_name":"AshykhminGit/Python2022","sub_path":"Zero_level/zadachi_razobrat.py","file_name":"zadachi_razobrat.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"22386042543","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\ndef CorrelationMatrix (df: pd.DataFrame, columns : list, name : str, fillNA=False, savePath=None):\n ''' Color map correlation matrix '''\n\n corr = df[columns].corr()\n\n if fillNA:\n corr = corr.fillna(0)\n\n sns.heatmap(\n corr,\n vmin=-1, vmax=1, center=0,\n cmap=sns.diverging_palette(20, 220, n=200),\n square=True\n )\n\n plt.title(name)\n\n if savePath:\n plt.savefig(savePath,dpi=300, bbox_inches='tight')\n else:\n plt.show()\n plt.close()\n\n\ndef CorrelationMatrix2(df: pd.DataFrame, columns : list, name : str, fillNA=False, savePath=None):\n ''' Square size correlation matrix '''\n\n corr = df[columns].corr()\n\n if fillNA:\n corr = corr.fillna(0)\n\n corr = pd.melt(corr.reset_index(), id_vars='index')\n corr.columns = ['x', 'y', 'value']\n\n heatmap(\n x=corr['x'],\n y=corr['y'],\n size=corr['value'].abs()\n )\n\n plt.title(name)\n\n if savePath:\n plt.savefig(savePath, dpi=300, bbox_inches='tight')\n else:\n plt.show()\n plt.close()\n\n\ndef heatmap(x, y, size):\n fig, ax = plt.subplots()\n\n # Mapping from column names to integer coordinates\n x_labels = [v for v in sorted(x.unique())]\n y_labels = [v for v in sorted(y.unique())]\n x_to_num = {p[1]: p[0] for p in enumerate(x_labels)}\n y_to_num = {p[1]: p[0] for p in enumerate(y_labels)}\n\n size_scale = 50\n ax.scatter(\n x=x.map(x_to_num),\n y=y.map(y_to_num),\n s=size * size_scale,\n marker='s'\n )\n\n # Show column labels on the axes\n ax.set_xticks([x_to_num[v] for v in x_labels])\n ax.set_xticklabels(x_labels, rotation=45, horizontalalignment='right')\n ax.set_yticks([y_to_num[v] for v in y_labels])\n ax.set_yticklabels(y_labels)\n\ndef scatterMatrix(df: pd.DataFrame, columns : list, name : str, fillNA=False, savePath=None):\n ''' Scatter plot '''\n\n Axes = pd.plotting.scatter_matrix(df [columns], alpha=0.2, figsize=(10, 10), s=100)\n\n # y ticklabels\n [plt.setp(item.yaxis.get_majorticklabels(), 'size', 4) for item in Axes.ravel()]\n # x ticklabels\n [plt.setp(item.xaxis.get_majorticklabels(), 'size', 4) for item in Axes.ravel()]\n # y labels\n [plt.setp(item.yaxis.get_label(), 'size', 6) for item in Axes.ravel()]\n # x labels\n [plt.setp(item.xaxis.get_label(), 'size', 6) for item in Axes.ravel()]\n\n plt.suptitle(name)\n\n if savePath:\n plt.savefig(savePath, dpi=300)\n else:\n plt.show()\n plt.close()\n","repo_name":"ilopezgazpio/PhrasIS-baselines","sub_path":"src/Correlations/Correlations.py","file_name":"Correlations.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"40271661464","text":"import os\r\nimport datetime\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.backends.cudnn as cudnn\r\nimport torch.distributed as dist\r\nimport torch.optim as optim\r\nfrom torch.utils.data import DataLoader\r\n\r\nfrom nets.deeplabv3_plus import DeepLab\r\nfrom nets.deeplabv3_training import (get_lr_scheduler, set_optimizer_lr,\r\n weights_init)\r\nfrom utils.callbacks import LossHistory, EvalCallback\r\nfrom utils.dataloader import DeeplabDataset, deeplab_dataset_collate\r\nfrom utils.utils import download_weights, show_config\r\nfrom utils.utils_fit import fit_one_epoch\r\n\r\n'''\r\n训练自己的语义分割模型一定需要注意以下几点:\r\n1、训练前仔细检查自己的格式是否满足要求,该库要求数据集格式为VOC格式,需要准备好的内容有输入图片和标签\r\n 输入图片为.jpg图片,无需固定大小,传入训练前会自动进行resize。\r\n 灰度图会自动转成RGB图片进行训练,无需自己修改。\r\n 输入图片如果后缀非jpg,需要自己批量转成jpg后再开始训练。\r\n\r\n 标签为png图片,无需固定大小,传入训练前会自动进行resize。\r\n 由于许多同学的数据集是网络上下载的,标签格式并不符合,需要再度处理。一定要注意!标签的每个像素点的值就是这个像素点所属的种类。\r\n 网上常见的数据集总共对输入图片分两类,背景的像素点值为0,目标的像素点值为255。这样的数据集可以正常运行但是预测是没有效果的!\r\n 需要改成,背景的像素点值为0,目标的像素点值为1。\r\n 如果格式有误,参考:https://github.com/bubbliiiing/segmentation-format-fix\r\n\r\n2、损失值的大小用于判断是否收敛,比较重要的是有收敛的趋势,即验证集损失不断下降,如果验证集损失基本上不改变的话,模型基本上就收敛了。\r\n 损失值的具体大小并没有什么意义,大和小只在于损失的计算方式,并不是接近于0才好。如果想要让损失好看点,可以直接到对应的损失函数里面除上10000。\r\n 训练过程中的损失值会保存在logs文件夹下的loss_%Y_%m_%d_%H_%M_%S文件夹中\r\n \r\n3、训练好的权值文件保存在logs文件夹中,每个训练Epoch(Epoch)包含若干训练步长(Step),每个训练步长(Step)进行一次梯度下降。\r\n 如果只是训练了几个Step是不会保存的,Epoch和Step的概念要捋清楚一下。\r\n'''\r\nif __name__ == \"__main__\":\r\n Cuda = True\r\n\r\n \"\"\"\r\n distributed:用于指定是否使用单机多卡分布式运行\r\n 终端指令仅支持Linux。CUDA_VISIBLE_DEVICES用于在Ubuntu下指定显卡。\r\n 系统下默认使用DP模式调用所有显卡,不支持DDP。\r\n \r\n DP模式:\r\n 设置 distributed = False\r\n 在终端中输入 CUDA_VISIBLE_DEVICES=0,1 python train.py\r\n DDP模式:\r\n 设置 distributed = True\r\n 在终端中输入 CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 train.py\r\n \"\"\"\r\n distributed = True\r\n\r\n # 是否使用同步的BN,DDP模式多卡可用\r\n sync_bn = False\r\n\r\n # 是否使用混合精度训练(可减少约一半的显存)\r\n fp16 = True\r\n\r\n # num_classes 训练自己的数据集必须要修改的(自己需要的分类个数+1)\r\n num_classes = 21\r\n\r\n \"\"\"\r\n 所使用的的主干网络\r\n 1. MobileNet v2\r\n 2. Xception\r\n \"\"\"\r\n backbone = \"mobilenet\"\r\n\r\n \"\"\"\r\n 是否使用主干网络的预训练权重,此处使用的是主干的权重,因此是在模型构建的时候进行加载的。\r\n 如果设置了model_path,则主干的权值无需加载,pretrained的值无意义。\r\n 如果不设置model_path,pretrained = True,此时仅加载主干开始训练。\r\n 如果不设置model_path,pretrained = False,Freeze_Train = Fasle,此时从0开始训练,且没有冻结主干的过程。\r\n \"\"\"\r\n pretrained = False\r\n\r\n \"\"\"\r\n 权值文件的下载请看README,可以通过网盘下载。模型的 预训练权重 对不同数据集是通用的,因为特征是通用的。\r\n 模型的 预训练权重 比较重要的部分是 主干特征提取网络的权值部分,用于进行特征提取。\r\n 预训练权重对于99%的情况都必须要用,不用的话主干部分的权值太过随机,特征提取效果不明显,网络训练的结果也不会好\r\n 训练自己的数据集时提示维度不匹配正常,预测的东西都不一样了自然维度不匹配\r\n 如果训练过程中存在中断训练的操作,可以将model_path设置成logs文件夹下的权值文件,将已经训练了一部分的权值再次载入。\r\n 同时修改下方的 冻结阶段 或者 解冻阶段 的参数,来保证模型epoch的连续性。\r\n \r\n 当model_path = ''的时候不加载整个模型的权值。\r\n 此处使用的是整个模型的权重,因此是在train.py进行加载的,pretrain不影响此处的权值加载。\r\n 如果想要让模型从主干的预训练权值开始训练,则设置model_path = '',pretrain = True,此时仅加载主干。\r\n 如果想要让模型从0开始训练,则设置model_path = '',pretrain = Fasle,Freeze_Train = Fasle,此时从0开始训练,且没有冻结主干的过程。 \r\n \r\n 一般来讲,网络从0开始的训练效果会很差,因为权值太过随机,特征提取效果不明显,因此非常、非常、非常不建议大家从0开始训练!\r\n 如果一定要从0开始,可以了解imagenet数据集,首先训练分类模型,获得网络的主干部分权值,分类模型的 主干部分 和该模型通用,基于此进行训练。\r\n \"\"\"\r\n model_path = \"model_data/deeplab_mobilenetv2.pth\"\r\n\r\n \"\"\"\r\n 下采样的倍数,可选 16 或 8\r\n 8下采样的倍数较小、理论上效果更好,但也要求更大的显存\r\n \"\"\"\r\n downsample_factor = 16\r\n\r\n # 输入图片的大小\r\n input_shape = [512, 512]\r\n\r\n # epoch设置\r\n Init_Epoch = 0\r\n Freeze_Epoch = 0\r\n Freeze_batch_size = 32\r\n\r\n UnFreeze_Epoch = 50 # 模型总共训练的epoch\r\n Unfreeze_batch_size = 32 # 模型在解冻后的batch_size\r\n\r\n # 是否进行冻结训练(默认先冻结主干训练后解冻训练)\r\n Freeze_Train = False\r\n\r\n \"\"\"\r\n Init_lr:模型的最大学习率\r\n 当使用Adam优化器时建议设置Init_lr=5e-4\r\n 当使用SGD优化器时建议设置Init_lr=7e-3\r\n Min_lr:模型的最小学习率,默认为最大学习率的0.01\r\n \"\"\"\r\n Init_lr = 7e-3\r\n Min_lr = Init_lr * 0.01\r\n\r\n \"\"\"\r\n optimizer_type:使用到的优化器种类,可选的有adam、sgd\r\n 当使用Adam优化器时建议设置 Init_lr=5e-4\r\n 当使用SGD优化器时建议设置 Init_lr=7e-3\r\n momentum: 优化器内部使用到的momentum参数\r\n weight_decay:权值衰减,可防止过拟合\r\n adam会导致weight_decay错误,使用adam时建议设置为0。\r\n \"\"\"\r\n optimizer_type = \"sgd\"\r\n momentum = 0.9\r\n weight_decay = 1e-4\r\n\r\n # 使用到的学习率下降方式,可选的有'step'、'cos'\r\n lr_decay_type = 'cos'\r\n\r\n # 多少个epoch保存一次权值\r\n save_period = 5\r\n\r\n # 权值与日志文件保存的文件夹\r\n save_dir = 'logs'\r\n\r\n \"\"\"\r\n eval_flag 是否在训练时进行评估,评估对象为验证集\r\n eval_period 代表多少个epoch评估一次,不建议频繁的评估\r\n 评估需要消耗较多的时间,频繁评估会导致训练非常慢\r\n 此处获得的mAP会与get_map.py获得的会有所不同,原因有二:\r\n (一)此处获得的mAP为验证集的mAP。\r\n (二)此处设置评估参数较为保守,目的是加快评估速度。\r\n \"\"\"\r\n eval_flag = True\r\n eval_period = 1\r\n\r\n VOCdevkit_path = '../Datasets/VOCdevkit'\r\n\r\n \"\"\"\r\n 建议选项:\r\n 种类少(几类)时,设置为True\r\n 种类多(十几类)时,如果batch_size比较大(10以上),那么设置为True\r\n 种类多(十几类)时,如果batch_size比较小(10以下),那么设置为False\r\n \"\"\"\r\n dice_loss = True\r\n\r\n # 是否使用focal loss来防止正负样本不平衡\r\n focal_loss = False\r\n\r\n \"\"\"\r\n 是否给不同种类赋予不同的损失权值,默认是平衡的。\r\n 设置的话,注意设置成numpy形式的,长度和num_classes一样。\r\n 如:\r\n num_classes = 3\r\n cls_weights = np.array([1, 2, 3], np.float32)\r\n \"\"\"\r\n cls_weights = np.ones([num_classes], np.float32)\r\n\r\n num_workers = 8\r\n\r\n # 设置用到的显卡\r\n ngpus_per_node = torch.cuda.device_count()\r\n if distributed:\r\n dist.init_process_group(backend=\"nccl\")\r\n local_rank = int(os.environ[\"LOCAL_RANK\"])\r\n rank = int(os.environ[\"RANK\"])\r\n device = torch.device(\"cuda\", local_rank)\r\n if local_rank == 0:\r\n print(\r\n f\"[{os.getpid()}] (rank = {rank}, local_rank = {local_rank}) training...\")\r\n print(\"Gpu Device Count : \", ngpus_per_node)\r\n else:\r\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n local_rank = 0\r\n\r\n # 下载预训练权重\r\n if pretrained:\r\n if distributed:\r\n if local_rank == 0:\r\n download_weights(backbone)\r\n dist.barrier()\r\n else:\r\n download_weights(backbone)\r\n\r\n model = DeepLab(num_classes=num_classes, backbone=backbone,\r\n downsample_factor=downsample_factor, pretrained=pretrained)\r\n if not pretrained:\r\n weights_init(model)\r\n if model_path != '':\r\n if local_rank == 0:\r\n print('Load weights {}.'.format(model_path))\r\n\r\n # 根据预训练权重的Key和模型的Key进行加载\r\n model_dict = model.state_dict()\r\n pretrained_dict = torch.load(model_path, map_location=device)\r\n load_key, no_load_key, temp_dict = [], [], {}\r\n for k, v in pretrained_dict.items():\r\n if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):\r\n temp_dict[k] = v\r\n load_key.append(k)\r\n else:\r\n no_load_key.append(k)\r\n model_dict.update(temp_dict)\r\n model.load_state_dict(model_dict)\r\n\r\n # 显示没有匹配上的Key\r\n if local_rank == 0:\r\n print(\"\\nSuccessful Load Key:\", str(load_key)[\r\n :500], \"……\\nSuccessful Load Key Num:\", len(load_key))\r\n print(\"\\nFail To Load Key:\", str(no_load_key)[\r\n :500], \"……\\nFail To Load Key num:\", len(no_load_key))\r\n print(\r\n \"\\n\\033[1;33;44m温馨提示,head部分没有载入是正常现象,Backbone部分没有载入是错误的。\\033[0m\")\r\n\r\n # 记录Loss\r\n if local_rank == 0:\r\n time_str = datetime.datetime.strftime(\r\n datetime.datetime.now(), '%Y_%m_%d_%H_%M_%S')\r\n log_dir = os.path.join(save_dir, \"loss_\" + str(time_str))\r\n loss_history = LossHistory(log_dir, model, input_shape=input_shape)\r\n else:\r\n loss_history = None\r\n\r\n if fp16:\r\n from torch.cuda.amp import GradScaler as GradScaler\r\n scaler = GradScaler()\r\n else:\r\n scaler = None\r\n\r\n model_train = model.train()\r\n\r\n # 多卡同步Bn\r\n if sync_bn and ngpus_per_node > 1 and distributed:\r\n model_train = torch.nn.SyncBatchNorm.convert_sync_batchnorm(\r\n model_train)\r\n elif sync_bn:\r\n print(\"Sync_bn is not support in one gpu or not distributed.\")\r\n\r\n if Cuda:\r\n if distributed:\r\n\r\n # 多卡平行运行\r\n model_train = model_train.cuda(local_rank)\r\n model_train = torch.nn.parallel.DistributedDataParallel(\r\n model_train, device_ids=[local_rank], find_unused_parameters=True)\r\n else:\r\n model_train = torch.nn.DataParallel(model)\r\n cudnn.benchmark = True\r\n model_train = model_train.cuda()\r\n\r\n # 读取数据集对应的txt\r\n with open(os.path.join(VOCdevkit_path, \"VOC2012/ImageSets/Segmentation/train.txt\"), \"r\") as f:\r\n train_lines = f.readlines()\r\n with open(os.path.join(VOCdevkit_path, \"VOC2012/ImageSets/Segmentation/val.txt\"), \"r\") as f:\r\n val_lines = f.readlines()\r\n num_train = len(train_lines)\r\n num_val = len(val_lines)\r\n\r\n if local_rank == 0:\r\n show_config(\r\n num_classes=num_classes, backbone=backbone, model_path=model_path, input_shape=input_shape,\r\n Init_Epoch=Init_Epoch, Freeze_Epoch=Freeze_Epoch, UnFreeze_Epoch=UnFreeze_Epoch, Freeze_batch_size=Freeze_batch_size, Unfreeze_batch_size=Unfreeze_batch_size, Freeze_Train=Freeze_Train,\r\n Init_lr=Init_lr, Min_lr=Min_lr, optimizer_type=optimizer_type, momentum=momentum, lr_decay_type=lr_decay_type,\r\n save_period=save_period, save_dir=save_dir, num_workers=num_workers, num_train=num_train, num_val=num_val\r\n )\r\n\r\n \"\"\"\r\n 总Epoch指的是遍历全部数据的总次数\r\n 总训练步长指的是梯度下降的总次数 \r\n 每个训练Epoch包含若干训练步长,每个训练步长进行一次梯度下降。\r\n \r\n 此处仅建议最低训练Epoch,上不封顶,计算时只考虑了解冻部分\r\n \"\"\"\r\n wanted_step = 1.5e4 if optimizer_type == \"sgd\" else 0.5e4\r\n total_step = num_train // Unfreeze_batch_size * UnFreeze_Epoch\r\n if total_step <= wanted_step:\r\n if num_train // Unfreeze_batch_size == 0:\r\n raise ValueError('数据集过小,无法进行训练,请扩充数据集。')\r\n wanted_epoch = wanted_step // (num_train //\r\n Unfreeze_batch_size) + 1\r\n print(\"\\n\\033[1;33;44m[Warning] 使用%s优化器时,建议将训练总步长设置到%d以上。\\033[0m\" % (\r\n optimizer_type, wanted_step))\r\n print(\"\\033[1;33;44m[Warning] 本次运行的总训练数据量为%d,Unfreeze_batch_size为%d,共训练%d个Epoch,计算出总训练步长为%d。\\033[0m\" % (\r\n num_train, Unfreeze_batch_size, UnFreeze_Epoch, total_step))\r\n print(\"\\033[1;33;44m[Warning] 由于总训练步长为%d,小于建议总步长%d,建议设置总Epoch为%d。\\033[0m\" % (\r\n total_step, wanted_step, wanted_epoch))\r\n\r\n # 主干特征提取网络特征通用,冻结训练可以加快训练速度,也可以在训练初期防止权值被破坏。\r\n if True:\r\n UnFreeze_flag = False\r\n\r\n # 冻结一定部分训练\r\n if Freeze_Train:\r\n for param in model.backbone.parameters():\r\n param.requires_grad = False\r\n\r\n # 如果不冻结训练的话,直接设置batch_size为Unfreeze_batch_size\r\n batch_size = Freeze_batch_size if Freeze_Train else Unfreeze_batch_size\r\n\r\n # 判断当前batch_size,自适应调整学习率\r\n nbs = 16\r\n lr_limit_max = 5e-4 if optimizer_type == 'adam' else 1e-1\r\n lr_limit_min = 3e-4 if optimizer_type == 'adam' else 5e-4\r\n if backbone == \"xception\":\r\n lr_limit_max = 1e-4 if optimizer_type == 'adam' else 1e-1\r\n lr_limit_min = 1e-4 if optimizer_type == 'adam' else 5e-4\r\n Init_lr_fit = min(max(batch_size / nbs * Init_lr,\r\n lr_limit_min), lr_limit_max)\r\n Min_lr_fit = min(max(batch_size / nbs * Min_lr,\r\n lr_limit_min * 1e-2), lr_limit_max * 1e-2)\r\n\r\n # 根据optimizer_type选择优化器\r\n optimizer = {\r\n 'adam': optim.Adam(model.parameters(), Init_lr_fit, betas=(momentum, 0.999), weight_decay=weight_decay),\r\n 'sgd': optim.SGD(model.parameters(), Init_lr_fit, momentum=momentum, nesterov=True, weight_decay=weight_decay)\r\n }[optimizer_type]\r\n\r\n # 获得学习率下降的公式\r\n lr_scheduler_func = get_lr_scheduler(\r\n lr_decay_type, Init_lr_fit, Min_lr_fit, UnFreeze_Epoch)\r\n\r\n # 判断每一个Epoch的长度\r\n epoch_step = num_train // batch_size\r\n epoch_step_val = num_val // batch_size\r\n\r\n if epoch_step == 0 or epoch_step_val == 0:\r\n raise ValueError(\"数据集过小,无法继续进行训练,请扩充数据集。\")\r\n\r\n train_dataset = DeeplabDataset(\r\n train_lines, input_shape, num_classes, True, VOCdevkit_path)\r\n val_dataset = DeeplabDataset(\r\n val_lines, input_shape, num_classes, False, VOCdevkit_path)\r\n\r\n if distributed:\r\n train_sampler = torch.utils.data.distributed.DistributedSampler(\r\n train_dataset, shuffle=True,)\r\n val_sampler = torch.utils.data.distributed.DistributedSampler(\r\n val_dataset, shuffle=False,)\r\n batch_size = batch_size // ngpus_per_node\r\n shuffle = False\r\n else:\r\n train_sampler = None\r\n val_sampler = None\r\n shuffle = True\r\n\r\n gen = DataLoader(train_dataset, shuffle=shuffle, batch_size=batch_size, num_workers=num_workers, pin_memory=True,\r\n drop_last=True, collate_fn=deeplab_dataset_collate, sampler=train_sampler)\r\n gen_val = DataLoader(val_dataset, shuffle=shuffle, batch_size=batch_size, num_workers=num_workers, pin_memory=True,\r\n drop_last=True, collate_fn=deeplab_dataset_collate, sampler=val_sampler)\r\n\r\n # 记录eval的map曲线\r\n if local_rank == 0:\r\n eval_callback = EvalCallback(model, input_shape, num_classes, val_lines, VOCdevkit_path, log_dir, Cuda,\r\n eval_flag=eval_flag, period=eval_period)\r\n else:\r\n eval_callback = None\r\n\r\n # 开始模型训练\r\n for epoch in range(Init_Epoch, UnFreeze_Epoch):\r\n\r\n # 如果模型有冻结学习部分,则解冻,并设置参数\r\n if epoch >= Freeze_Epoch and not UnFreeze_flag and Freeze_Train:\r\n batch_size = Unfreeze_batch_size\r\n\r\n # 判断当前batch_size,自适应调整学习率\r\n nbs = 16\r\n lr_limit_max = 5e-4 if optimizer_type == 'adam' else 1e-1\r\n lr_limit_min = 3e-4 if optimizer_type == 'adam' else 5e-4\r\n if backbone == \"xception\":\r\n lr_limit_max = 1e-4 if optimizer_type == 'adam' else 1e-1\r\n lr_limit_min = 1e-4 if optimizer_type == 'adam' else 5e-4\r\n Init_lr_fit = min(\r\n max(batch_size / nbs * Init_lr, lr_limit_min), lr_limit_max)\r\n Min_lr_fit = min(max(batch_size / nbs * Min_lr,\r\n lr_limit_min * 1e-2), lr_limit_max * 1e-2)\r\n\r\n # 获得学习率下降的公式\r\n lr_scheduler_func = get_lr_scheduler(\r\n lr_decay_type, Init_lr_fit, Min_lr_fit, UnFreeze_Epoch)\r\n\r\n for param in model.backbone.parameters():\r\n param.requires_grad = True\r\n\r\n epoch_step = num_train // batch_size\r\n epoch_step_val = num_val // batch_size\r\n\r\n if epoch_step == 0 or epoch_step_val == 0:\r\n raise ValueError(\"数据集过小,无法继续进行训练,请扩充数据集。\")\r\n\r\n if distributed:\r\n batch_size = batch_size // ngpus_per_node\r\n\r\n gen = DataLoader(train_dataset, shuffle=shuffle, batch_size=batch_size, num_workers=num_workers, pin_memory=True,\r\n drop_last=True, collate_fn=deeplab_dataset_collate, sampler=train_sampler)\r\n gen_val = DataLoader(val_dataset, shuffle=shuffle, batch_size=batch_size, num_workers=num_workers, pin_memory=True,\r\n drop_last=True, collate_fn=deeplab_dataset_collate, sampler=val_sampler)\r\n\r\n UnFreeze_flag = True\r\n\r\n if distributed:\r\n train_sampler.set_epoch(epoch)\r\n\r\n set_optimizer_lr(optimizer, lr_scheduler_func, epoch)\r\n\r\n fit_one_epoch(model_train, model, loss_history, eval_callback, optimizer, epoch,\r\n epoch_step, epoch_step_val, gen, gen_val, UnFreeze_Epoch, Cuda, dice_loss, focal_loss, cls_weights, num_classes, fp16, scaler, save_period, save_dir, local_rank)\r\n\r\n if distributed:\r\n dist.barrier()\r\n\r\n if local_rank == 0:\r\n loss_history.writer.close()\r\n","repo_name":"Le0v1n/ml_code","sub_path":"Segmentation/DeepLab v3+/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":21121,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"89"} +{"seq_id":"7929068020","text":"import pathlib\n\nimport scipy.stats\nimport pandas\nimport numpy\nimport pylab\n\nimport studies\n\ntissue = snakemake.wildcards.tissue\noutdir = pathlib.Path(f\"results/{tissue}/assess_jtk\")\noutdir.mkdir(exist_ok=True)\n\njtk = pandas.read_csv(f\"results/{tissue}/jtk.results.txt\", sep=\"\\t\", index_col=[0,1])\nrobustness = pandas.read_csv(f\"results/{tissue}/robustness_score.txt\", sep=\"\\t\", index_col=0)['0'].sort_index()\n\n# Find studies with at least 8 timepoints per day, since these have the resolution to detect\n# periods other than 24. And we require all to be 'highlighted' studies, i.e. from consistent\n# conditions and so comparable\nsample_info = pandas.read_csv(f\"results/{tissue}/all_samples_info.txt\", sep=\"\\t\", index_col=0)\nnum_timepoints = sample_info.groupby(\"study\").time.apply(lambda x: (x%24).nunique())\nhighres_study = num_timepoints.index[num_timepoints >= 8]\nselected_study = [study for study in highres_study if studies.targets[study].get(\"highlight\", False)]\nif len(selected_study) < 2:\n print(\"No suitable studies. Aborting\")\n (outdir / \"period_statistics.txt\").touch()\n exit()\n\n# Select just data when JTK significant and from high resolution datasets\nsig = (~jtk.dropped) & (jtk['qvalue'] < 0.05)\ngood_jtk = jtk[sig]\nperiod = good_jtk.reset_index().pivot(index=\"ID\", columns=\"study\", values=\"PER\")[highres_study]\nperiod = period.sort_index()\n\n# Compute the Cramer's V and Spearman correlation statistics between the selected studies\nstatistics_list = []\nfor studyA in selected_study:\n for studyB in selected_study:\n if studyA == studyB:\n continue\n contingency = pandas.crosstab(period[studyA], period[studyB])\n chi2, p, ddof, expected = scipy.stats.chi2_contingency(contingency)\n cramerV = numpy.sqrt(chi2 / (contingency.sum().sum() * (numpy.min(contingency.shape) - 1)))\n spearmanr, p = scipy.stats.spearmanr(period[studyA], period[studyB], nan_policy=\"omit\")\n statistics_list.append({\n \"studyA\": studyA,\n \"studyB\": studyB,\n \"CramerV\": cramerV,\n \"SpearmanR\": spearmanr,\n })\nstatistics = pandas.DataFrame(statistics_list)\nstatistics.to_csv(outdir / \"period_statistics.txt\", sep=\"\\t\", index=False)\nprint(\"Median PER statistics comparing pairs of {len(selected_study)} studies:\")\nprint(statistics.median(axis=0))\nprint(\"Max PER statistics comparing pairs of {len(selected_study)} studies:\")\nprint(statistics[['CramerV', 'SpearmanR']].max(axis=0))\n\n# Check if any genes are consistently non-24\n# Just look at < 24 since few studies can detect >24\n# Require no >=24 periods and at least four <24 hours periods among those significant\ndef find_low_period(period):\n return period[ ((period < 24) | (period.isna())).all(axis=1) & (period.count(axis=1) >= 4)]\nlow_period = find_low_period(period)\nprint(f\"Identified {len(low_period)} low-period genes\")\nlow_period.to_csv(outdir/ \"low_period_genes.txt\", sep=\"\\t\")\nrandom_num_low_period = []\ndef permute_finite(x):\n ''' permutation of finite elements of x, leaving nans unchanged '''\n out = x.copy()\n finite = ~pandas.isna(x)\n out[finite] = numpy.random.permutation(out[finite])\n return out\nfor i in range(100):\n shuffled_period = pandas.DataFrame({key: permute_finite(col.values)\n for key,col in period.items()})\n random_num_low_period.append(len(find_low_period(shuffled_period)))\nprint(\"Random permutations give low periods:\")\nprint(pandas.Series(random_num_low_period).describe())\n\n## By robustness\n# Categories of robustness\nhigh = (robustness >= 35)\nmid = (robustness < 35) & (robustness >= 20)\nlow = (robustness < 20)\n\n# Period histograms by robustness\nbins = numpy.arange(19,30)\nfig, ax = pylab.subplots(figsize=(4,4))\nax.hist(period[robustness >= 35].unstack(), bins=bins, density=True, alpha=0.5, label=\">=35\")\nax.hist(period[(robustness < 35) & (robustness >= 20)].unstack(), bins=bins, density=True, alpha=0.5, label=\"20-34\")\nax.hist(period[(robustness < 20)].unstack(), bins=bins, density=True, alpha=0.5, label=\"0-19\")\nax.set_xlabel(\"JTK Period\")\nax.legend()\nfig.savefig(outdir/\"period_distributions.png\", dpi=300)\n\n# Tabular form: period by robustness\ncounts_by_period = pandas.DataFrame({\n \">=35\": period[high].unstack().value_counts() / period[high].count().sum(),\n \"20-34\": period[mid].unstack().value_counts() / period[mid].count().sum(),\n \"0-19\": period[low].unstack().value_counts() / period[low].count().sum(),\n}).fillna(0)\ncounts_by_period.index.name = \"period\"\ncounts_by_period.index = counts_by_period.index.astype(int)\n(counts_by_period*100).to_csv(outdir / \"period_counts.txt\", sep=\"\\t\", float_format=\"%0.1f\")\n\n# Phase differences\n#\n## Compute the differences between studies\n## Using two ways:\n## 1. mean absolute difference of phase (done cyclicly, so 24=0)\n## 2. number of highly discordant genes (per difference > 6 hrs)\n#mad_dict = {}\n#discordant_dict = {}\n#for study1, jtk1 in jtk.groupby(\"study\"):\n# mad_dict[study1] = {}\n# discordant_dict[study1] = {}\n# for study2, jtk2 in jtk.groupby(\"study\"):\n# in_both = (jtk1.qvalue < 0.1) & (jtk2.qvalue < 0.1)\n# #mad = scipy.stats.circmean(jtk1[in_both].LAG - jtk2[in_both].LAG, 0, 24)\n# diff = (jtk1.LAG[in_both] - jtk2.LAG[in_both])%24\n# diff[diff > 12] = 24 - diff[diff > 12]\n# mad_dict[study1][study2] = diff.mean()\n# discordant_dict[study1][study2] = sum(diff >= 6)\n#mean_abs_diff = pandas.DataFrame(mad_dict)\n#num_discordant_by_study = pandas.DataFrame(discordant_dict)\n#\n#\n#def circ_abs_diff(x,y):\n# ''' circular absolute difference between two phases '''\n# diff = (x - y) % 24\n# if diff > 12:\n# return 24 - diff\n# else:\n# return diff\n#\n#def count_discordant_phases(phases):\n# ''' number of phase pairs that are discordant (>= 6 hrs apart) '''\n# return len([x for x in phases for y in phases if circ_abs_diff(x,y) >= 6 and x > y])\n#\n#num_discordant_by_gene = jtk.groupby(jtk.index).apply(lambda x: count_discordant_phases(x[x.qvalue < 0.1].LAG))\n","repo_name":"tgbrooks/circadian_comparison","sub_path":"scripts/assess_period_differences.py","file_name":"assess_period_differences.py","file_ext":"py","file_size_in_byte":6052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"15779001651","text":"from distutils.core import setup\n\nDISTNAME = 'novo-muta'\nMAINTAINER = 'Melissa Ip'\nMAINTAINER_EMAIL = 'mip [at] asu [dot] asu'\nDESCRIPTION = ('A set of Python modules for detecting de novo mutations in' + \n ' a set of genome sequencing data')\nLICENSE = 'new BSD' \nURL = 'https://github.com/maip/novo-muta' \nVERSION = '0.0.1' \nLONG_DESCRIPTION = open('README.md').read()\nPACKAGES = ['family']\nREQUIRES = ['numpy', 'scipy']\n\nsetup(name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n long_description=LONG_DESCRIPTION,\n packages=PACKAGES,\n requires=REQUIRES)\n","repo_name":"reedacartwright/novo-muta","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"38680244183","text":"# -*- coding: UTF-8 -*-\n\"\"\"\ntitle: 重建序列\n给定一个长度为 n 的整数数组 nums ,其中 nums 是范围为 [1,n] 的整数的排列。还提供了一个 2D 整数数组 sequences ,其中 sequences[i] 是 nums 的子序列。\n检查 nums 是否是唯一的最短 超序列 。最短 超序列 是 长度最短 的序列,并且所有序列 sequences[i] 都是它的子序列。对于给定的数组 sequences ,可能存在多个有效的 超序列 。\n 例如,对于 sequences = [[1,2],[1,3]] ,有两个最短的 超序列 ,[1,2,3] 和 [1,3,2] 。\n 而对于 sequences = [[1,2],[1,3],[1,2,3]] ,唯一可能的最短 超序列 是 [1,2,3] 。[1,2,3,4] 是可能的超序列,但不是最短的。\n如果 nums 是序列的唯一最短 超序列 ,则返回 true ,否则返回 false 。\n子序列 是一个可以通过从另一个序列中删除一些元素或不删除任何元素,而不改变其余元素的顺序的序列。\n\n\n示例 1:\n输入:nums = [1,2,3], sequences = [[1,2],[1,3]]\n输出:false\n解释:有两种可能的超序列:[1,2,3]和[1,3,2]。\n序列 [1,2] 是[1,2,3]和[1,3,2]的子序列。\n序列 [1,3] 是[1,2,3]和[1,3,2]的子序列。\n因为 nums 不是唯一最短的超序列,所以返回false。\n\n示例 2:\n输入:nums = [1,2,3], sequences = [[1,2]]\n输出:false\n解释:最短可能的超序列为 [1,2]。\n序列 [1,2] 是它的子序列:[1,2]。\n因为 nums 不是最短的超序列,所以返回false。\n\n示例 3:\n输入:nums = [1,2,3], sequences = [[1,2],[1,3],[2,3]]\n输出:true\n解释:最短可能的超序列为[1,2,3]。\n序列 [1,2] 是它的一个子序列:[1,2,3]。\n序列 [1,3] 是它的一个子序列:[1,2,3]。\n序列 [2,3] 是它的一个子序列:[1,2,3]。\n因为 nums 是唯一最短的超序列,所以返回true。\n\n\n提示:\nn == nums.length\n1 <= n <= 10^4\nnums 是 [1, n] 范围内所有整数的排列\n1 <= sequences.length <= 10^4\n1 <= sequences[i].length <= 10^4\n1 <= sum(sequences[i].length) <= 10^5\n1 <= sequences[i][j] <= n\nsequences 的所有数组都是 唯一 的\nsequences[i] 是 nums 的一个子序列\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def sequenceReconstruction(self, nums: List[int], sequences: List[List[int]]) -> bool:\n \"\"\"拓扑排序 + BFS\"\"\"\n n = len(nums)\n graph = [[] for _ in range(n + 1)]\n in_degree = [0] * (n + 1)\n for s in sequences:\n pre = s[0]\n for cur in s[1:]:\n graph[pre].append(cur)\n in_degree[cur] += 1\n pre = cur\n # 若nums中的元素个数比sequences中出现的元素个数多,则多出来的那些元素,它们的入度为0。若存在多个入度为0的节点,则下面会返回False\n queue = [i for i in range(1, n + 1) if in_degree[i] == 0]\n # BFS的起始节点有且只能有一个,否则拓扑序列就不是唯一的\n if len(queue) != 1:\n return False\n idx = 0\n for u in queue:\n # 若nums中的元素个数比sequences中出现的元素个数少,则这里会返回False\n if not (idx < n and nums[idx] == u):\n return False\n idx += 1\n next_cnt = 0\n for v in graph[u]:\n in_degree[v] -= 1\n if in_degree[v] == 0:\n queue.append(v)\n next_cnt += 1\n # 若当前节点u的下一层节点存在多个,则拓扑序列不唯一\n if next_cnt > 1:\n return False\n return True\n\n\nif __name__ == '__main__':\n print(Solution().sequenceReconstruction(nums=[1, 2, 3, 4], sequences=[[1, 2], [1, 3], [2, 3]]))\n","repo_name":"atm1992/LeetCode_in_Python3","sub_path":"jian_zhi_offer_2/a115_sequenceReconstruction.py","file_name":"a115_sequenceReconstruction.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"13976467598","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\nfrom itertools import accumulate\n\n# Complete the arrayManipulation function below.\ndef arrayManipulation(n):\n arr=[0]*(n+1)\n for _ in range(m):\n a,b,c=map(int, input().split(' '))\n arr[a-1]+=c\n arr[b]-=c\n return(max(accumulate(arr)))\n \n \n \n \n \n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n nm = input().split()\n\n n = int(nm[0])\n\n m = int(nm[1])\n\n \n\n result = arrayManipulation(n)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"Priyankasaggu11929/Hacker-rank-practice","sub_path":"Array_Manipulation.py","file_name":"Array_Manipulation.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"87"} +{"seq_id":"38002859031","text":"import pygame\r\nimport os\r\nimport math\r\nimport time\r\n\r\nimport numpy as np\r\nfrom scipy.interpolate import griddata\r\n\r\nfrom colour import Color\r\n\r\n#low range of the sensor (this will be blue on the screen)\r\nMINTEMP = 20\r\n\r\n#high range of the sensor (this will be red on the screen)\r\nMAXTEMP = 26\r\n\r\n#how many color values we can have\r\nCOLORDEPTH = 1024\r\n\r\nos.putenv('SDL_FBDEV', '/dev/fb1')\r\npygame.init()\r\n\r\n\r\npoints = [(math.floor(ix / 8), (ix % 8)) for ix in range(0, 64)]\r\ngrid_x, grid_y = np.mgrid[0:7:32j, 0:7:32j]\r\n\r\n#sensor is an 8x8 grid so lets do a square\r\nheight = 480\r\nwidth = 480\r\n\r\n#the list of colors we can choose from\r\nblue = Color(\"indigo\")\r\ncolors = list(blue.range_to(Color(\"red\"), COLORDEPTH))\r\n\r\n#create the array of colors\r\ncolors = [(int(c.red * 255), int(c.green * 255), int(c.blue * 255)) for c in colors]\r\n\r\ndisplayPixelWidth = width / 30\r\ndisplayPixelHeight = height / 30\r\n\r\nlcd = pygame.display.set_mode((width, height))\r\n\r\nlcd.fill((255,0,0))\r\n\r\npygame.display.update()\r\npygame.mouse.set_visible(False)\r\n\r\nlcd.fill((0,0,0))\r\npygame.display.update()\r\n\r\n#some utility functions\r\ndef constrain(val, min_val, max_val):\r\n return min(max_val, max(min_val, val))\r\n\r\ndef map(x, in_min, in_max, out_min, out_max):\r\n return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min\r\n\r\n#let the sensor initialize\r\ntime.sleep(.1)\r\n\r\nf = open(\"data14.txt\", \"r\")\r\npixels_str = f.read().splitlines()\r\n#x_list = []\r\n#y_list = []\r\npixels = []\r\nfor line in pixels_str:\r\n #x_list.append(line.split(\" \")[0])\r\n #y_list.append(line.split(\" \")[1])\r\n pixels.append(float(line.split(\" \")[2])-273.15)\r\n#pixels = [float(item) for item in pixels_str]\r\n#print(pixels)\r\nnew_pixels = []\r\n\r\nfor y in range(0,480):\r\n for x in range(0,480):\r\n #print(pixels[480*x+y])\r\n #pygame.draw.rect(screen, [red, blue, green], [left, top, width, height], filled)\r\n pygame.draw.rect(lcd, [int(pixels[480*x+y]*5),int(pixels[480*x+y]*5),int(pixels[480*x+y]*5)],[x,y,1,1])\r\n new_pixels.append(str(x-60)+\" \"+str(y)+\" \"+str(pixels[480*x+y])+\"\\n\")\r\n\r\n'''\r\nf2 = open(\"1_BottleFLIR.txt\",\"w\")\r\nf2.writelines(new_pixels)\r\nf2.close()'''\r\n\r\npygame.display.update()\r\n\r\n","repo_name":"KuuGary/FLIR","sub_path":"raw data/drawHighReso.py","file_name":"drawHighReso.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"15385446634","text":"import config\nfrom schemas import CurrencySchema, ValueSchema\n\nimport requests\nimport logging\nimport time\nfrom datetime import datetime\nimport xmltodict\n\n\ndef today_is(date_format: str) -> str:\n return datetime.today().strftime(date_format)\n\n\ndef convert_xml_to_list(xml_data: str) -> list:\n converted = xmltodict.parse(xml_data)['rates']['item']\n return converted\n\n\ndef make_project_msg(cur_obj: CurrencySchema, value_obj: ValueSchema, previous: str) -> str:\n msg = ''\n float_cur = float(value_obj.value)\n float_prev = float(previous)\n if float_cur > float_prev:\n msg += f\"⬆ {cur_obj.code} {cur_obj.quant} - {float_cur} ({float_prev})\"\n elif float_cur < float_prev:\n msg += f\"⬇ {cur_obj.code} {cur_obj.quant} - {float_cur} ({float_prev})\"\n elif float_cur == float_prev:\n msg += f\"↔ {cur_obj.code} {cur_obj.quant} - {float_cur}\"\n if value_obj.percent >= 5:\n msg += f\" ‼️\"\n elif value_obj.percent <= -5:\n msg += f\" ✅\"\n return msg\n\n\ndef make_msg(msg: list) -> str:\n message = '\\n'.join(msg)\n message += f\"\\n\\n{config.TG_CHANNEL}\"\n return message\n\n\ndef get_percentage(current: float, previous: float) -> float:\n if current < previous:\n current, previous = previous, current\n return ((current - previous) / current) * 100\n # a < b = ((b - a) / a) * 100\n # a > b = ((a - b) / a) * 100\n\n\ndef is_holiday() -> bool:\n today = int(today_is('%Y%m%d'))\n holiday = requests.get(config.URL_HOLIDAYS).json()\n if today in holiday.get('holidays'):\n return True\n return False\n\n\ndef send_to_tg(message: str):\n url = f'https://api.telegram.org/bot{config.TG_TOKEN}/sendMessage'\n params = {\n 'chat_id': config.TG_CHANNEL,\n 'text': message,\n 'parse_mode': 'HTML'\n }\n r = requests.post(url, data=params)\n if r.status_code != 200:\n data = r.json()\n logging.info(f\"TG MSG: {data}\")\n time_to_sleep = data['parameters']['retry_after']\n time.sleep(time_to_sleep)\n send_error(message)\n\n\ndef send_error(message):\n url = f'https://api.telegram.org/bot{config.TG_TOKEN}/sendMessage'\n params = {\n 'chat_id': config.TG_CHANNEL_ERROR,\n 'text': message\n }\n r = requests.post(url, data=params)\n if r.status_code != 200:\n data = r.json()\n logging.info(f\"TG ERROR: {data}\")\n time_to_sleep = data['parameters']['retry_after']\n time.sleep(time_to_sleep)\n send_error(message)\n\n\nif __name__ == '__main__':\n print(is_holiday())\n","repo_name":"daradan/currency_to_tg_chnl","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"40563975679","text":"import random\nchavez = 0\nnum = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]\nfor i in range(0, 30):\n num[i] = random.randint(1, 15)\n\nchave = int(input('Digite a chave: '))\nprint(num)\nfor i in range(0, 30):\n if num[i] == chave:\n print(f'A chave foi encontrada na posição {i}.')\n chavez += 1\nprint(f'A chave apareceu {chavez} vezes')\n","repo_name":"Megelado/exercicios-portugol-para-python","sub_path":"ex080.py","file_name":"ex080.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"41517378931","text":"import socket\r\nimport time\r\nimport pickle\r\n\r\nHEADER_SIZE = 10\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ns.bind((socket.gethostname(), 1236))\r\ns.listen(5)\r\n\r\nwhile True:\r\n client_socket, address = s.accept()\r\n print(f\"connection from {address} has been established\")\r\n\r\n test_dict = {\"1\": \"Hey\", \"2\": \"There\"}\r\n msg = pickle.dumps(test_dict)\r\n # print(msg)\r\n msg = bytes(f\"{len(msg) :< {HEADER_SIZE}}\", \"utf-8\") + msg\r\n\r\n client_socket.send(msg)\r\n","repo_name":"SindujaVijayakumar/python_networking","sub_path":"simple_server.py","file_name":"simple_server.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"39314788032","text":"import numpy as np\nimport math as m\nimport time\nimport sys\nimport matplotlib\nimport matplotlib.pylab as pylab\n\npfile = '/home/rumbaugh/LFC/sc1322.lfc.newIDsandoldIds.radecmag.cat'\nsfile= '/home/rumbaugh/FINAL.cl1322.lrisplusdeimos.cat'\n\ncr = read_file(pfile)\ncrs = read_file(sfile)\n\nRA = copy_colvals(cr,'col3')\nDec = copy_colvals(cr,'col4')\nsRA = copy_colvals(crs,'col4')\nsDec = copy_colvals(crs,'col5')\nsz = copy_colvals(crs,'col9')\nsq = copy_colvals(crs,'col11')\n\nFILE = open('/home/rumbaugh/Cl1324+3013.companion.anal.reg','w')\n\nFILE.write('global color=green font=\"helvetica 10 normal\" select=1 highlite=1 edit=1 move=1 delete=1 include=1 fixed=0 width=2 source\\nfk5\\n')\nfor i in range(0,len(RA)):\n FILE.write('circle(%f,%f,0.8\") # color=cyan\\n'%(RA[i],Dec[i]))\ng = np.where((sq < 0.2) | (sq > 1.2))\ng = g[0]\nfor i in range(0,len(g)):\n FILE.write('circle(%f,%f,3\") # color=magenta\\n'%(sRA[g[i]],sDec[g[i]]))\nFILE.close()\n","repo_name":"takkyon/runs","sub_path":"run.6.2.12.1755.py","file_name":"run.6.2.12.1755.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"2456946029","text":"import poek as pk\n\n\nmodel = pk.model()\n\nN = 165\n\ngamma = 9.81\ntmass = 500.0\nbl = 1.0\nfract = 0.6\n\nlength = bl * (N + 1) * fract\nmass = tmass / (N + 1)\nmg = mass * gamma\n\nS = list(range(0, N + 2))\n\nx = model.add_variable(index=S)\nfor i in S:\n x[i].value = i * length / (N + 1)\ny = model.add_variable(index=S, value=0)\nz = model.add_variable(index=S, value=0.0)\n\nmodel.add_objective(mg * y[0] / 2.0 + sum(mg * y[i] for i in range(1, N + 1)) + mg * y[N + 1] / 2.0)\n\nfor i in range(1, N + 2):\n model.add_constraint(\n (x[i] - x[i - 1]) ** 2 + (y[i] - y[i - 1]) ** 2 + (x[i] - z[i - 1]) ** 2 == bl**2\n )\n\nx[0].value = 0.0\nx[0].fixed = True\ny[0].value = 0.0\ny[0].fixed = True\nz[0].value = 0.0\nz[0].fixed = True\nx[N + 1].value = length\nx[N + 1].fixed = True\n","repo_name":"sandialabs/coek","sub_path":"test/cute/poek/catenary.py","file_name":"catenary.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"87"} +{"seq_id":"820274712","text":"import gzip\nimport math\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport csv\nimport numpy as np\nimport PySimpleGUI as sg\nimport pandas as pd\nimport os\nimport matplotlib.image as mpimg\nimport json\nimport datetime\nimport tkinter as tk\n\n\ndef convert_unix_to_seconds(unix_timestamp):\n return datetime.datetime.fromtimestamp(unix_timestamp)\n\ndef delta_unix_respect_to_video_start(video_start_unix, actual_unix):\n d1 = convert_unix_to_seconds(video_start_unix)\n d2 = convert_unix_to_seconds(actual_unix)\n delta_seconds = round((d2 - d1).total_seconds(), 0)\n return delta_seconds\n\n\nn=int(sg.popup_get_text(\"Quanti pazienti vuoi analizzare(grafico immagini)?\"))\n\n\n#hypersex\nImgTask4 = []\nTime1=[]\nTime1_2=[]\nriga1=0\n\n#gambling\nImgTask42 = []\nTime2=[]\nTime2_2=[]\nriga2=0\n\n#eating\nImgTask43 = []\nTime3=[]\nTime3_2=[]\nriga3=0\n\n\n#shopping\nImgTask44 = []\nTime4=[]\nTime4_2=[]\nriga4=0\n\n\ndur1_1=[]\ndur1_2=[]\ndur2_1=[]\ndur2_2=[]\ndur3_1=[]\ndur3_2=[]\ndur4_1=[]\ndur4_2=[]\n\n\n\n\nvalorii = []\nvalneutro = []\nstimolo = []\nvaloreneu = 0\ndur_stimolo=[]\ndur_neutro=[]\n\ndxx2=[]\nsxx2=[]\ndxx3=[]\nsxx3=[]\ndxx4=[]\nsxx4=[]\ndxx5=[]\nsxx5=[]\n\n\n\n\n\n\n\n\ndef Fisstask(time1, time2, time3, csv_filef):\n listTime = []\n listTime.append(time1)\n inter = []\n Timeimg = [time1, time2, time3]\n # print(Timeimg)\n num = 2\n\n for i in range(num):\n\n if i == num - 1:\n helpTime = time3\n else:\n helpTime = Timeimg[i]\n numTime = (str(listTime[i]) + '-' + str(helpTime))\n listTime.append(helpTime)\n inter.append(numTime)\n\n # dati del grafico fixation\n csv_filee = csv_filef\n dataFrame = pd.read_csv(csv_filee)\n data = dataFrame.iloc[:, [0, 1, 3, 4]].values # Prendo i valori che mi serviranno\n times = [element for element in data[:, 1]]\n time2 = []\n smin = 0\n numF1 = []\n numFix1 = []\n\n # calco numFix delle foto\n for i in range(1, len(listTime)):\n for x in times:\n if x <= listTime[i]:\n time2.append(x)\n numFix1 = [int(element) for element in data[smin:len(time2), 1]]\n diff = len(time2) - smin\n smin = smin + diff\n numF1.append(len(numFix1))\n numFix1.clear()\n # print(\"VALORIIIIIII\")\n # print(numF1)\n time2.clear()\n\n return numF1\n\n\n\n\n\n\n\n\n\n\nroot = tk.Tk() # Libreria tkinter\n\n\ndef calcolofissdxsx(time1, time2, time3, riga, csv_file):\n fix = Fisstask(time1, time2, time3, csv_file)\n dataFrame = pd.read_csv(csv_file)\n data = dataFrame.iloc[:, [0, 1, 2, 3, 4]].values\n posX = [element for element in data[:, 3]]\n posY = [element for element in data[:, 4]]\n displayWidth = root.winfo_screenwidth()\n displayHeight = root.winfo_screenheight()\n print(displayHeight, displayHeight)\n\n posXPix = []\n posYPix = []\n for x, y in zip(posX, posY):\n posXPix.append(round(x * displayWidth))\n posYPix.append(round(y * displayHeight))\n\n center = (displayHeight*posXPix[riga]) - (displayWidth*posYPix[riga])\n fix.remove(fix[0])\n print(fix)\n sx_fixations = [i for i in fix if posX[riga] <= center]\n dx_fixations = [i for i in fix if posX[riga] > center]\n\n print(\"Fissazioni a sinistra:\", sx_fixations)\n print(\"Fissazioni a destra:\", dx_fixations)\n\n return sx_fixations, dx_fixations\n\n\ndef durataFiss(csv_file, time1, line,dx,sx):\n fileFix = pd.read_csv(csv_file, sep=',', engine='python', header=None)\n dataFix = fileFix.values.tolist()\n duration_stim = []\n duration_neu = []\n parameter_to_find = int(time1)\n print(time1)\n\n\n with open(csv_file, 'r') as file:\n csv_reader = csv.reader(file)\n next(csv_reader)\n line_count = 0\n for row in csv_reader:\n line_count += 1\n float_row = [float(value) for value in row]\n print(float_row)\n if (float_row[1]) >= round(parameter_to_find):\n print(\"TROVATOO\")\n print(f\"La riga {line_count} contiene il parametro {parameter_to_find}.\")\n line=line_count\n print(line)\n break\n\n\n\n for l in range(sum(dx + sx)):\n if (sum(sx) == 0):\n duration_neu.append(dataFix[line + l][2])\n else:\n duration_stim.append(dataFix[line + l][2])\n print(time1)\n print(line)\n print(sx, dx)\n print(\"VALORIIIIII\")\n print(duration_stim, duration_neu)\n return duration_stim, duration_neu\n\n\n\n\n\n\n\n\ndef RecupImg(dataTime):\n # hypersex\n for i in dataTime:\n\n if i[0] == \"['Decision_making_image'\" and i[1].startswith(\" 'T4_09/T4_09_AB.jpg\") and i[4].endswith(\"]\"):\n ImgTask4.append(i[1])\n Time1.append(i[2])\n Time1_2.append(i[3])\n for j, row in enumerate(dataTime):\n if \" 'T4_09/T4_09_AB.jpg'\" in row:\n print(f'Riga {j + 1}: {row}')\n print('La riga è:', j + 1)\n riga1 = j + 1\n\n # gambling\n for k in dataTime:\n\n if k[0] == \"['Decision_making_image'\" and k[1].startswith(\" 'T4_05/T4_05_AB.jpg\") and k[4].endswith(\"]\"):\n ImgTask42.append(k[1])\n Time1.append(k[2])\n Time1_2.append(k[3])\n for j, row in enumerate(dataTime):\n if \" 'T4_05/T4_05_AB.jpg'\" in row:\n print(f'Riga {j + 1}: {row}')\n print('La riga è:', j + 1)\n riga2 = j + 1\n\n # eating\n for t in dataTime:\n\n if t[0] == \"['Decision_making_image'\" and t[1].startswith(\" 'T4_03/T4_03_AB.jpg\") and t[4].endswith(\"]\"):\n ImgTask43.append(t[1])\n Time3.append(t[2])\n Time3_2.append(t[3])\n for j, row in enumerate(dataTime):\n if \" 'T4_03/T4_03_AB.jpg'\" in row:\n print(f'Riga {j + 1}: {row}')\n print('La riga è:', j + 1)\n riga3 = j + 1\n\n # shopping\n for s in dataTime:\n\n if s[0] == \"['Decision_making_image'\" and s[1].startswith(\" 'T4_13/T4_13_AB.jpg\") and s[4].endswith(\"]\"):\n ImgTask44.append(s[1])\n Time4.append(s[2])\n Time4_2.append(s[3])\n for j, row in enumerate(dataTime):\n if \" 'T4_13/T4_13_AB.jpg'\" in row:\n print(f'Riga {j + 1}: {row}')\n print('La riga è:', j + 1)\n riga4 = j + 1\n\n print(riga1)\n print(ImgTask4)\n print(Time1)\n print(Time1_2)\n\n # hpesex time\n delta1 = delta_unix_respect_to_video_start(dataTime[0][2], dataTime[riga1][2])\n delta2 = delta_unix_respect_to_video_start(dataTime[0][2], dataTime[riga1][3])\n Hypersex = calcolofissdxsx(delta1, delta1, delta2, riga1, csv_file)\n sx2 = Hypersex[0]\n if (len(sx2) == 0): sx2.append(0)\n\n dx2 = Hypersex[1]\n if (len(dx2) == 0): dx2.append(0)\n print(Hypersex)\n\n # gambling\n delta2 = delta_unix_respect_to_video_start(dataTime[0][2], dataTime[riga2][2])\n delta3 = delta_unix_respect_to_video_start(dataTime[0][2], dataTime[riga2][3])\n Gamb = calcolofissdxsx(delta2, delta2, delta3, riga2, csv_file)\n sx3 = Gamb[0]\n if (len(sx3) == 0): sx3.append(0)\n\n dx3 = Gamb[1]\n if (len(dx3) == 0): dx3.append(0)\n print(Gamb)\n\n # EATING\n delta3 = delta_unix_respect_to_video_start(dataTime[0][2], dataTime[riga3][2])\n delta4 = delta_unix_respect_to_video_start(dataTime[0][2], dataTime[riga3][3])\n eat = calcolofissdxsx(delta3, delta3, delta4, riga3, csv_file)\n sx4 = eat[0]\n if (len(sx4) == 0): sx4.append(0)\n\n dx4 = eat[1]\n if (len(dx4) == 0): dx4.append(0)\n print(eat)\n\n # SHOPPING\n delta4 = delta_unix_respect_to_video_start(dataTime[0][2], dataTime[riga4][2])\n delta5 = delta_unix_respect_to_video_start(dataTime[0][2], dataTime[riga4][3])\n shop = calcolofissdxsx(delta4, delta4, delta5, riga4, csv_file)\n sx5 = shop[0]\n if (len(sx5) == 0): sx5.append(0)\n\n dx5 = shop[1]\n if (len(dx5) == 0): dx5.append(0)\n print(shop)\n\n line1 = \"\"\n line2 = \"\"\n line3 = \"\"\n line4 = \"\"\n\n dura1_1,dura1_2 = durataFiss(csv_file, delta1, line1, dx2, sx2)\n dura2_1,dura2_2 = durataFiss(csv_file, delta2, line2, dx3, sx3)\n dura3_1,dura3_2 = durataFiss(csv_file, delta3, line3, dx4, sx4)\n dura4_1,dura4_2 = durataFiss(csv_file, delta4, line4, dx5, sx5)\n\n dura1_1=PiuPazientiDur(dura1_1,n)\n dura1_2=PiuPazientiDur(dura1_2,n)\n\n dura2_1 = PiuPazientiDur(dura2_1, n)\n dura2_2 = PiuPazientiDur(dura2_2, n)\n\n dura3_1 = PiuPazientiDur(dura3_1, n)\n dura3_2 = PiuPazientiDur(dura3_2, n)\n\n dura4_1 = PiuPazientiDur(dura4_1, n)\n dura4_2 = PiuPazientiDur(dura4_2, n)\n\n return sx2, dx2, sx3, dx3, sx4, dx4, sx5, dx5, dura1_1,dura1_2,dura2_1,dura2_2,dura3_1,dura3_2,dura4_1,dura4_2\n\n\ndef PiuPazientiDur(Listfix1, n):\n for i in range(len(Listfix1)):\n Listfix1[i] = float(Listfix1[i])\n if n == 1:\n Listfix1[i] = Listfix1[i]\n if n >= 2:\n Listfix1[i] = Listfix1[i] + Listfix1[i]\n\n return Listfix1\n\n\n\n\n\nfor i in range(n):\n sg.popup(\"Inserire i file del paziente n:\", i + 1)\n pathTime = sg.popup_get_file(sg.FileBrowse(), title=\"RECUPERA TEMPI.TXT del paziente\")\n fileTime = pd.read_csv(pathTime, sep=',', engine='python', header=None)\n dataTime = fileTime.values.tolist()\n # print(dataTime)\n csv_file = sg.popup_get_file(sg.FileBrowse(), title=\"RECUPERA FILE FIX.CSV del paziente\")\n sxx2, dxx2, sxx3, dxx3, sxx4, dxx4, sxx5, dxx5,dur1_1,dur1_2,dur2_1,dur2_2,dur3_1,dur3_2,dur4_1,dur4_2=RecupImg(dataTime)\n\n dur1_1 = PiuPazientiDur(dur1_1, n)\n dur1_2 = PiuPazientiDur(dur1_2, n)\n\n dur2_1 = PiuPazientiDur(dur2_1, n)\n dur2_2 = PiuPazientiDur(dur2_2, n)\n\n dur3_1 = PiuPazientiDur(dur3_1, n)\n dur3_2 = PiuPazientiDur(dur3_2, n)\n\n dur4_1 = PiuPazientiDur(dur4_1, n)\n dur4_2 = PiuPazientiDur(dur4_2, n)\n\n\n\n\n\n\n\n\n\n\n\n valorii = [\n int(dxx2[0]), int(sxx2[0]),\n int(dxx3[0]), int(sxx3[0]),\n int(dxx4[0]), int(sxx4[0]),\n int(dxx5[0]), int(sxx5[0])]\n\n valneutro = [int(dxx2[0]), int(dxx3[0]), int(dxx4[0]), int(dxx5[0])]\n stimolo = [int(sxx2[0]), int(sxx3[0]), int(sxx4[0]), int(sxx5[0])]\n\n\n\n valoreneu = sum(valneutro)\n\n # durate delle fissazioni in ms\n\n print(\"durateeeeeeeee\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#FUNZIONI PER RECUPERARE LE DURATE NETURE E CON STIMOLO DI OGNI IMMAGINE DICOTOMICA\n#prendiamo la riga e il tempo, per prendere la durate delle fix che si sono verifcate quando c'è stata la foto\n\n\n\n\n\n\n\nfig, ax = plt.subplots(num='Conteggio Fissazioni', figsize=(12,8))\nax.set_xticks(np.arange(-4, 16, 1))\nax.tick_params(axis='x', which='major', labelsize=10, pad=4)\nplt.xticks(rotation=20)\n\n\nplt.bar(\"Hypersexuality\", stimolo[0],label=ImgTask4[0],color=\"red\")\nplt.bar(\"Gambling\", stimolo[1],label=ImgTask42[0],color=\"blue\")\nplt.bar(\"Eating\", stimolo[2],label=ImgTask43[0],color=\"green\")\nplt.bar(\"Shopping\", stimolo[3],label=ImgTask44[0],color=\"pink\")\nplt.bar(\"Neutra\",valoreneu,label=\"Immagine neutra\",color=\"yellow\")\nplt.ylabel('Valore totale delle fissazioni')\nplt.suptitle('Grafico delle frequenze assolute rispetto al task 4 dicotomico',fontweight='bold',fontsize=16)\nplt.title('Il valore totale delle fissazioni fa riferimento alle foto con AB,dove:la foto neutra è a dx, e la foto con stimolo è a sx',fontsize=10)\nplt.legend(loc=\"best\")\nfig.savefig('grafic/STIMOLONEU_AB.png')\n\nplt.show()\n\ndur_stimolo=dur1_1+dur2_1+dur3_1+dur4_1\ndur_neutro=dur1_2+dur2_2+dur3_2+dur4_2\n\n\n#BOXPLOT\n\ndata=[dur_stimolo,dur_neutro]\nfig, ax = plt.subplots(figsize=(12,8))\nbp = ax.boxplot(data,labels=[\"Immagini con stimolo\",\"Neutre\"])\n\n# Personalizzazione dell'asse y\n\nplt.ylabel(\"Tempo(sec)\",size=9)\nplt.suptitle(\"Boxplot task 4 dicotomico:\\nRappresentazione visuale della durata delle fissazioni\",size=11,fontweight='bold')\nplt.title(\"Le durate delle fissazioni fanno riferimento alle foto che hanno la sigla AB,dove:la foto neutra è a dx, e la foto con stimolo è a sx\", transform=ax.transAxes,\n fontsize=10, va='top', ha='center')\n\n\n\n# Mostra il grafico\nfig.savefig('grafic/boxplotsk4IMG.png')\nplt.show()\n\n\n\n\n","repo_name":"Paolog98/Tobi_Statistica","sub_path":"TASK4STIMOLI_AB.py","file_name":"TASK4STIMOLI_AB.py","file_ext":"py","file_size_in_byte":11129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"15723627978","text":"__author__ = 'nico'\n\nf = open('../input/ATS_TOA_1PRBCM20030102_113342_000000482012_00337_04399_0003.N1', 'r')\noutput = open('../output/catalogue.xml', 'w')\n\nwrite = output.write\nwrite('')\nwrite('\\n')\n\nfor i in range(16):\n line = f.readline()\n\n\n\n a = 'PRODUCT='\n if line.startswith(a):\n write(' ')\n write('')\n startIndex = line.index('\\\"')+1\n endIndex = len(line)-2\n write(line[startIndex:endIndex])\n write('')\n write('\\n')\n\n b = 'ACQUISITION_STATION='\n if line.startswith(b):\n write(' ')\n write('')\n startIndex = line.index('\\\"')+1\n endIndex = len(line)-2\n write(line[startIndex:endIndex].strip())\n write('')\n write('\\n')\n\n c = 'PROC_CENTER='\n if line.startswith(c):\n write(' ')\n write('')\n startIndex = line.index('\\\"')+1\n endIndex = len(line)-2\n write(line[startIndex:endIndex])\n write('')\n write('\\n')\n\n d = 'PROC_TIME='\n if line.startswith(d):\n write(' ')\n write('')\n startIndex = line.index('\\\"')+1\n endIndex = len(line)-2\n write(line[startIndex:endIndex])\n write('')\n write('\\n')\n\n e = 'SOFTWARE_VER='\n if line.startswith(e):\n write(' ')\n write('')\n startIndex = line.index('\\\"')+1\n endIndex = len(line)-2\n write(line[startIndex:endIndex])\n write('')\n write('\\n')\n\n j = 'SENSING_START='\n if line.startswith(j):\n write(' ')\n write('')\n startIndex = line.index('\\\"')+1\n endIndex = len(line)-2\n write(line[startIndex:endIndex])\n write('')\n write('\\n')\n\n g = 'SENSING_STOP='\n if line.startswith(g):\n write(' ')\n write('')\n startIndex = line.index('\\\"')+1\n endIndex = len(line)-2\n write(line[startIndex:endIndex])\n write('')\n write('\\n')\n\n h = 'ABS_ORBIT='\n if line.startswith(h):\n write(' ')\n write('')\n startIndex = line.index('=')+1\n endIndex = len(line)-1\n write(line[startIndex:endIndex])\n write('')\n write('\\n')\n\n\n\n\nwrite('')\nf.close()","repo_name":"giconarming/metadata-conversion","sub_path":"XML-Datei_programmieren/logic/catalogue_XML.py","file_name":"catalogue_XML.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"20935828464","text":"# -*- encoding: utf-8 -*-\nimport unittest\nfrom supriya.tools import nonrealtimetools\nfrom supriya.tools import requesttools\n\n\nclass TestCase(unittest.TestCase):\n\n def test_Session(self):\n session = nonrealtimetools.Session()\n request = requesttools.BufferReadChannelRequest(\n buffer_id=1,\n channel_indices=[4, 5],\n file_path=session,\n frame_count=512,\n leave_open=True,\n starting_frame_in_buffer=0,\n starting_frame_in_file=0,\n )\n assert request.file_path is session\n osc_message = request.to_osc_message(with_textual_osc_command=True)\n assert osc_message.address == '/b_readChannel'\n assert osc_message.contents == (1, session, 0, 512, 0, 1, 4, 5)\n","repo_name":"bsdpunk/supriya","sub_path":"supriya/tools/requesttools/test/test_BufferReadChannelRequest.py","file_name":"test_BufferReadChannelRequest.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"87"} +{"seq_id":"13460695003","text":"from datapackage_pipelines_knesset.common.processors.base_processor import BaseProcessor\nfrom knesset_data.protocols.committee import CommitteeMeetingProtocol\nfrom knesset_data.protocols.exceptions import AntiwordException\nimport os, csv, json, subprocess, logging, shutil\n\n\nclass ParseCommitteeMeetingProtocolsProcessor(BaseProcessor):\n\n def __init__(self, *args, **kwargs):\n super(ParseCommitteeMeetingProtocolsProcessor, self).__init__(*args, **kwargs)\n self._schema[\"fields\"] = [\n {\"name\": \"kns_committee_id\", \"type\": \"integer\", \"description\": \"primary key from kns_committee table\"},\n {\"name\": \"kns_session_id\", \"type\": \"integer\", \"description\": \"primary key from kns_committeesession table\"},\n {\"name\": \"protocol_url\", \"type\": \"string\"},\n {\"name\": \"text_url\", \"type\": \"string\"},\n {\"name\": \"parts_url\", \"type\": \"string\"},]\n self._schema[\"primaryKey\"] = [\"kns_session_id\"]\n self._all_filenames = []\n\n def _process(self, datapackage, resources):\n return self._process_filter(datapackage, resources)\n\n def _get_filename(self, relpath):\n return os.path.join(self._parameters[\"out-path\"], relpath)\n\n def _filter_row(self, meeting_protocol, **kwargs):\n committee_id = meeting_protocol[\"kns_committee_id\"]\n meeting_id = meeting_protocol[\"kns_session_id\"]\n parts_relpath = os.path.join(str(committee_id), \"{}.csv\".format(meeting_id))\n text_relpath = os.path.join(str(committee_id), \"{}.txt\".format(meeting_id))\n parts_filename = self._get_filename(parts_relpath)\n text_filename = self._get_filename(text_relpath)\n protocol_filename = meeting_protocol[\"protocol_file\"].strip()\n protocol_ext = \".docx\" if protocol_filename.endswith(\".docx\") else protocol_filename[-4:]\n if not os.path.exists(parts_filename) or os.path.getsize(parts_filename) < 5:\n self._ensure_parts_path_exists(parts_filename, parts_relpath)\n if protocol_ext == \".doc\":\n parse_res = self._parse_doc_protocol(committee_id, meeting_id, protocol_filename, parts_filename,\n text_filename)\n elif protocol_ext == \".rtf\":\n parse_res = self._parse_rtf_protocol(committee_id, meeting_id, protocol_filename, parts_filename,\n text_filename)\n elif protocol_ext == \".docx\":\n parse_res = None\n else:\n raise Exception(\"unknown extension: {}\".format(protocol_ext))\n if not parse_res:\n if os.path.exists(text_filename):\n os.unlink(text_filename)\n if os.path.exists(parts_filename):\n os.unlink(parts_filename)\n text_filename = None\n parts_filename = None\n if parts_filename:\n self._all_filenames += [parts_relpath]\n if text_filename:\n self._all_filenames += [text_relpath]\n parsed_url = lambda f: \"https://next.oknesset.org/data/committee-meeting-protocols-parsed/{}\".format(f)\n yield {\"kns_committee_id\": committee_id,\n \"kns_session_id\": meeting_id,\n \"protocol_url\": meeting_protocol[\"protocol_url\"],\n \"text_url\": parsed_url(text_relpath) if text_filename is not None else None,\n \"parts_url\": parsed_url(parts_relpath) if parts_filename is not None else None,}\n\n def _ensure_parts_path_exists(self, parts_filename, parts_relpath):\n if parts_relpath not in self._all_filenames:\n os.makedirs(os.path.dirname(parts_filename), exist_ok=True)\n\n def _parse_rtf_protocol(self, committee_id, meeting_id, protocol_filename, parts_filename, text_filename):\n rtf_extractor = os.environ.get(\"RTF_EXTRACTOR_BIN\")\n if rtf_extractor:\n cmd = rtf_extractor + ' ' + protocol_filename + ' ' + text_filename\n try:\n subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)\n with open(text_filename) as f:\n protocol_text = f.read()\n with CommitteeMeetingProtocol.get_from_text(protocol_text) as protocol:\n self._parse_protocol_parts(parts_filename, protocol)\n except subprocess.SubprocessError:\n logging.exception(\"committee {} meeting {}: failed to parse rtf file, skipping\".format(committee_id,\n meeting_id))\n return False\n return True\n else:\n logging.warning(\"missing RTF_EXTRACTOR_BIN environment variable, skipping rtf parsing\")\n return False\n\n def _parse_doc_protocol(self, committee_id, meeting_id, protocol_filename, parts_filename, text_filename):\n try:\n with CommitteeMeetingProtocol.get_from_filename(protocol_filename) as protocol:\n with open(text_filename, \"w\") as f:\n f.write(protocol.text)\n logging.info(\"parsed doc to text -> {}\".format(text_filename))\n self._parse_protocol_parts(parts_filename, protocol)\n except (AntiwordException, subprocess.SubprocessError):\n logging.exception(\"committee {} meeting {}: failed to parse doc file, skipping\".format(committee_id,\n meeting_id))\n return False\n return True\n\n def _parse_protocol_parts(self, parts_filename, protocol):\n with open(parts_filename, \"w\") as f:\n csv_writer = csv.writer(f)\n csv_writer.writerow([\"header\", \"body\"])\n for part in protocol.parts:\n csv_writer.writerow([part.header, part.body])\n logging.info(\"parsed parts file -> {}\".format(parts_filename))\n\n def _process_cleanup(self):\n filename = self._get_filename(\"datapackage.json\")\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, \"w\") as f:\n descriptor = {\"name\": \"_\", \"path\": self._all_filenames}\n descriptor.update(**self._parameters.get(\"data-resource-descriptor\", {}))\n json.dump({\"name\": \"_\", \"resources\": [descriptor]}, f)\n\nif __name__ == '__main__':\n ParseCommitteeMeetingProtocolsProcessor.main()\n","repo_name":"mayasch/knesset-data-pipelines","sub_path":"datapackage_pipelines_knesset/committees/processors/parse_committee_meeting_protocols.py","file_name":"parse_committee_meeting_protocols.py","file_ext":"py","file_size_in_byte":6482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"87"} +{"seq_id":"36946313109","text":"\"\"\" update wake-up flag status \"\"\"\r\nimport numpy as np\r\n\r\n\r\ndef update_flag_wake_up(flag,\r\n current_flag,\r\n threshold):\r\n \"\"\"\r\n The update_flag_wake_up function is used to update the flag and\r\n index values for each channel. The function takes in a flag, current_flag,\r\n and threshold value as inputs. The function then updates the flag_value array\r\n by setting all the values that are equal to True in current_flag to 0. It then\r\n sets all the values that are equal to True in flag_value (which will be any\r\n peaks detected) to 1 more than their previous value. If this new value is greater\r\n than a threshold, it sets both its index and flag_value\r\n back down to - 1 (so it can start over).\r\n\r\n :param flag: Keep track of the previous state of the flag\r\n :param current_flag: Indicate the index of the current flag\r\n :param threshold: Determine how many samples must pass before the flag is reset\r\n :return: The flag and the index of the changed flags\r\n\r\n \"\"\"\r\n flag_value = np.copy(flag[0])\r\n index = flag[1]\r\n\r\n # if a peak is detected\r\n index[current_flag] = 0\r\n flag_value[current_flag] = True\r\n index[flag_value] += 1\r\n\r\n # if the last peak was threshold samples ago\r\n flag_time_threshold = index > threshold\r\n flag_value[flag_time_threshold] = False\r\n index[flag_time_threshold] = -1\r\n\r\n # One flag has changed\r\n event = np.arange(len(flag[0]))[flag_value != flag[0]].tolist()\r\n\r\n return [flag_value, index], event\r\n","repo_name":"astrolabe-expeditions/LittObs-LAMOS","sub_path":"python/deps/update_flag_wake_up.py","file_name":"update_flag_wake_up.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"41190425644","text":"import argparse\nimport copy\nimport random\nfrom datetime import datetime\nfrom generator import makeBasicProject, makeElement, makeColBorder, makeCol, genQuestions, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk\n\ndef vijayaGame():\n\n # Set up a barebones project\n project = makeBasicProject()\n\n # Create sprite sheet for the player sprite\n player_sprite_sheet = addSpriteSheet(\n project, \"player.png\", \"player\", \"player\")\n project.settings[\"playerSpriteSheetId\"] = player_sprite_sheet[\"id\"]\n\n # add sprites\n a_rock_sprite = addSpriteSheet(project, \"rock.png\", \"rock\", \"static\")\n doorway_sprite = addSpriteSheet(project, \"tower.png\", \"tower\", \"static\")\n duck_sprite = addSpriteSheet(project, \"duck.png\", \"duck\", \"animated\", 2)\n a_dog_sprite = addSpriteSheet(project, \"dog.png\", \"dog\", \"static\")\n\n # Adding actors\n actor = makeActor(a_rock_sprite, 9, 8)\n actor2 = makeActor(a_rock_sprite, 2, 3)\n actor3 = makeActor(duck_sprite, 9, 10, \"animated\", True)\n\n # Testing genQuestions with dog actor\n dog_actor = makeActor(a_dog_sprite, 7, 8)\n dog_script = []\n genQuestions('mc.txt', dog_script)\n dog_actor[\"script\"] = dog_script\n\n # Add a background image\n default_bkg = makeBackground(\"placeholder.png\", \"placeholder\")\n project.backgrounds.append(default_bkg)\n\n # Add scenes with some actors\n a_scene2 = copy.deepcopy(makeScene(f\"Scene\", default_bkg))\n a_scene2[\"actors\"].append(dog_actor)\n project.scenes.append(copy.deepcopy(a_scene2))\n \n # Randomly generate num of scenes\n random.seed(datetime.now())\n num = random.randint(1, 20)\n for y in range(num):\n a_scene = copy.deepcopy(makeScene(f\"Scene\", default_bkg)) \n makeColBorder(a_scene) \n if y%2 == 0:\n a_scene[\"actors\"].append(actor)\n if y%3 == 0:\n a_scene[\"actors\"].append(actor3)\n project.scenes.append(copy.deepcopy(a_scene))\n\n # Adding connections\n scene_connections_translations = {\"right\":0, \"left\":1, \"up\":2, \"down\":3}\n scene_connections = [[True, True, True, True] for n in range(num)]\n for y in range(num):\n for attempts in range(num):\n other_scene = random.randint(0, num - 2)\n if other_scene >= y:\n other_scene += 1\n chosen_direction = random.choice([\"right\", \"left\", \"up\", \"down\"])\n if scene_connections[y][scene_connections_translations[chosen_direction]]:\n if scene_connections[other_scene][scene_connections_translations[reverse_direction[chosen_direction]]]:\n scene_connections[y][scene_connections_translations[chosen_direction]] = False\n scene_connections[other_scene][scene_connections_translations[reverse_direction[chosen_direction]]] = False\n addSymmetricSceneConnections(project, project.scenes[y], project.scenes[other_scene], chosen_direction, doorway_sprite)\n break\n\n # Get information about the background\n bkg_x = default_bkg[\"imageWidth\"]\n bkg_y = default_bkg[\"imageHeight\"]\n bkg_width = default_bkg[\"width\"]\n bkg_height = default_bkg[\"height\"]\n\n # add a sprite to indicate the location of a doorway\n # a better way to do this in the actual levels is to alter the background image instead\n doorway_sprite = addSpriteSheet(project, \"tower.png\", \"tower\", \"static\")\n\n # Add some music\n project.music.append(makeMusic(\"template\", \"template.mod\"))\n\n # Set the starting scene\n project.settings[\"startSceneId\"] = project.scenes[0][\"id\"]\n return project\n\n# Utilities\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Generate a Game Boy ROM via a GB Studio project file.\")\n parser.add_argument('--destination', '-d', type=str, help=\"destination folder name\", default=\"../../gbprojects/projects/\")\n parser.add_argument('--assets', '-a', type=str, help=\"asset folder name\", default=\"../assets/\")\n parser.add_argument('--subfolder', '-s', type=bool, help=\"asset folder name\", default=False)\n args = parser.parse_args()\n initializeGenerator(asset_folder=args.assets)\n project = vijayaGame()\n writeProjectToDisk(project, output_path = args.destination)\n\n if args.destination == \"../gbprojects/projects/\":\n print(f\"{bcolors.WARNING}NOTE: Used default output directory, change with the -d flag{bcolors.ENDC}\")\n print(f\"{bcolors.OKBLUE}See generate.py --help for more options{bcolors.ENDC}\")\n","repo_name":"ikarth/game-boy-rom-generator","sub_path":"rom_generator/individual/vijaya_generator.py","file_name":"vijaya_generator.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"87"} +{"seq_id":"39833161785","text":"\"\"\"\n헤드라인을 엔터를 기준으로 입력하여 테스트 파일 생성\ntext : keyword has news 테이블에 들어갈 내용\nfile : hadoop wordcount 처리용\n\"\"\"\n\nfrom app import keyword_processing as keyproc, morphological_analysis as morph\n\ni = 1\ntext = []\nwhile True :\n line = input()\n if line == '':\n break\n else:\n text.append(tuple([line, i]))\n i += 1\ntext = tuple(text)\nprint(text)\n\nanalysis_result = morph.morphological_analysis(text)\nprint(analysis_result)\nkeyproc.save_as_file(analysis_result)","repo_name":"tmddnrdl333/TrendIT","sub_path":"python/utils/test_morphological_analysis.py","file_name":"test_morphological_analysis.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"87"} +{"seq_id":"27393907766","text":"\r\n# *****************************************************************************************************************************\r\n#\r\n#\t\t\t\t\t\t\t\t\t\t\tManages core operations data\r\n#\r\n# *****************************************************************************************************************************\r\n\r\nclass CoreOperations:\r\n\tdef __init__(self):\r\n\t\tcore = \"\"\"\t\t\r\n\t\t\t\t_temp ; ! ?exec @ *wordsize &signmask + >r 0= 2/ c! c@ depth nand r> repeat\r\n\t\t\t\"\"\"\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# core words.\r\n\t\tself.coreList = core.lower().split()\t\t\t\t\t\t\t\t\t\t\t\t# make a list of words\r\n\t\tself.coreList.sort()\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# alphabetical order\r\n\t\tself.coreDictionary = {} \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# convert to dictionary\r\n\t\tfor n in range(0,len(self.coreList)):\r\n\t\t\tself.coreDictionary[self.coreList[n]] = (n+1) * 2\r\n\r\n\tdef getName(self,id):\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# id -> name\r\n\t\treturn self.coreList[id]\r\n\tdef getID(self,word):\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# name -> id\r\n\t\treturn self.coreDictionary(word.lower().strip())\r\n\r\n\tdef createFiles(self):\r\n\t\th = open(\"__primitives.h\",\"w\")\t\t\t\t\t\t\t\t\t\t\t\t\t\t# create any files which depend on IDs\r\n\t\th.write(\"/* Automatically generated */\\n\\n\")\r\n\t\th.write(\"#ifndef __PRIMITIVES\\n#define __PRIMITIVES\\n\\n\")\r\n\t\th.write(\"#define COP_COUNT ({0})\\n\\n\".format(len(self.coreList)))\r\n\t\tfor name in self.coreDictionary.keys():\r\n\t\t\th.write(\"#define COP_{0} ({1})\\n\".format(self.toIdentifier(name).upper(),self.coreDictionary[name]))\r\n\t\th.write(\"\\n#ifdef STATIC_WORD_NAMES\\n\\n\")\r\n\t\ts = \",\".join(['\"'+x+'\"' for x in self.coreList])\r\n\t\th.write(\"static const char *__primitives[] = {\"+'\"\",'+s+\"};\\n\")\r\n\t\th.write(\"#endif\\n\\n\")\r\n\t\th.write(\"#endif\\n\\n\")\r\n\t\th.close()\r\n\r\n\tdef toIdentifier(self,s):\r\n\t\ts = s.replace(\"@\",\"_READ_\").replace(\"!\",\"_STORE_\").replace(\"+\",\"_ADD_\")\t\t\t\t# convert word to valid C identifier\r\n\t\ts = s.replace(\"-\",\"_SUB_\").replace(\"/\",\"_DIV_\").replace(\">\",\"_GREATER_\")\r\n\t\ts = s.replace(\"*\",\"_MUL_\").replace(\"<\",\"_LESS_\").replace(\"=\",\"_EQUAL_\")\r\n\t\ts = s.replace(\";\",\"_RETURN_\").replace(\"$\",\"_SYS_\").replace(\"&\",\"_AMPERSAND_\")\r\n\t\ts = s.replace(\"?\",\"_QUESTION_\").replace(\"__\",\"_\")\r\n\t\ts = s[1:] if s[0] == \"_\" else s\r\n\t\ts = s[:-1] if s[-1] == \"_\" else s\r\n\t\treturn s\r\n\r\nc = CoreOperations()\r\nc.createFiles()\r\n","repo_name":"paulscottrobson/one-word-forth","sub_path":"primitives/fc.py","file_name":"fc.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"33647939884","text":"from typing import NamedTuple, Union, Tuple, Optional, Generator\n\nfrom rdflib import Graph, URIRef, Literal, BNode\nfrom rdflib.term import Node\n\nQueryTriple = Tuple[Optional[URIRef], Optional[URIRef], Optional[Union[Literal, URIRef]]]\n\nSUBJ = Union[URIRef, BNode]\nPRED = URIRef\nOBJ = Node\n\n\nclass RDFTriple(NamedTuple):\n s: SUBJ = None\n p: PRED = None\n o: OBJ = None\n\n\nclass SortOGraph(Graph):\n \"\"\" rdflib Graph wrapper that sorts the outputs\n \"\"\"\n\n def triples(self,\n pattern: Optional[Union[QueryTriple, SUBJ]]) -> Generator[RDFTriple, None, None]:\n for t in sorted(super().triples(pattern)):\n yield t\n","repo_name":"hsolbrig/PyShEx","sub_path":"tests/utils/SortoGraph.py","file_name":"SortoGraph.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"87"} +{"seq_id":"37978890986","text":"'''\nDuck Donuts Nutrition\nAuthor: Rishi Masand\nYear: 2020\n'''\n\nimport numpy as np\n\nfrom donuts.collections import *\nfrom donuts.components import *\nfrom donuts.replacements import *\nfrom utils import *\n\n\nclass DuckDonutsNutrition():\n '''\n Duck Donuts Nutrition\n\n Calculates nutrition facts for Duck Donuts donuts and donut collections\n '''\n\n def __init__(self):\n self._nutrition_data_file = 'data/obtained/nutrition_info.txt'\n\n def _add_nutrition_values(self, n1, n2):\n '''\n Adds nutrition dictionaries n1 and n2 by key-wise values\n and stores sum in n1\n '''\n for key in n1:\n if key in n2:\n n1_val = float(n1[key][:first_non_num_idx(n1[key])])\n n2_val = float(n2[key][:first_non_num_idx(n2[key])])\n unit = n1[key][first_non_num_idx(n1[key]):]\n n1[key] = str(n1_val + n2_val) + unit\n return n1\n\n def _get_sanitized_line(self, line):\n '''\n Sanitizes data file line\n '''\n return line.replace('<', '').replace('**', '0')\n\n def _get_item_from_line(self, line):\n '''\n Gets item from data file line\n '''\n idx = first_num_idx(line)\n return line[:idx].replace(' - ', ' ').strip()\n\n def _get_units_from_line(self, line):\n '''\n Gets units from data file line\n '''\n idx = first_num_idx(line)\n values = line[idx:].split()\n units = [v[first_non_num_idx(v):] for v in values]\n return units\n\n def _get_values_from_line(self, line):\n '''\n Gets values from data file line\n '''\n idx = first_num_idx(line)\n values = line[idx:].split()\n values = [float(v[:first_non_num_idx(v)]) for v in values]\n return values\n\n def _get_item_nutritions(self):\n '''\n Gets item nutrition facts from nutrition data file\n '''\n with open(self._nutrition_data_file, 'r') as f:\n item_nutritions = {}\n fields = f.readline()[:-1].split()[1:]\n units = None\n line = f.readline()[:-1]\n while line:\n if len(line) > 100:\n line = f.readline()[:-1]\n continue\n line = self._get_sanitized_line(line)\n item = self._get_item_from_line(line)\n if not units:\n units = self._get_units_from_line(line)\n values = self._get_values_from_line(line)\n item_nutritions[item.lower()] = values\n line = f.readline()[:-1]\n f.close()\n return item_nutritions, fields, units\n\n def _get_well_formatted_donut_nutrition(self, donut, item_nutritions,\n fields, units):\n '''\n Gets nutrition facts from donut that is in order summary format\n order summary format: \"A, B, C\", where\n A = bare/glazed/icing selection\n B = topping selection\n C = drizzle selection\n '''\n nutrition = item_nutritions['donut (bare)']\n components = donut.lower().split(', ')\n for component in components:\n if component == 'glazed':\n nutrition = item_nutritions['donut glazed']\n continue\n elif component == 'bare':\n continue\n elif component in item_nutritions:\n nutrition += np.array(item_nutritions[component])\n continue\n elif component == components[-1]:\n component = component + ' drizzle'\n if component in item_nutritions:\n nutrition += np.array(item_nutritions[component])\n elif component in REPLACEMENTS and \\\n REPLACEMENTS[component] in item_nutritions:\n nutrition += np.array(item_nutritions[REPLACEMENTS[component]])\n return {fields[i]: str(v) + units[i] for i, v in enumerate(nutrition)}\n\n def _sanitize_ill_formatted_donut(self, donut):\n '''\n Sanitizes ill-formatted donut\n '''\n if ' - ' in donut:\n donut = donut.split(' - ')[1]\n if ': ' in donut:\n donut = donut.split(': ')[1]\n donut = donut.replace(' w/ ', ' with ').replace(', and ', ' with ') \\\n .replace(', ', ' with ').replace(' and ', ' with ') \\\n .replace(' & ', ' with ')\n return donut\n\n def _get_ill_formatted_donut_components(self, donut):\n '''\n Gets components from ill-formatted donut\n '''\n return donut.lower().split(' with ')\n\n def _get_base_nutrition(self, fields):\n '''\n Gets base (zeroed) nutrition facts\n '''\n return np.array([0 for _ in fields]).astype(float)\n\n def _get_ill_formatted_donut_nutrition(self, donut, item_nutritions,\n fields, units):\n '''\n Gets nutrition facts from donuts in arbitrary format\n '''\n donut = self._sanitize_ill_formatted_donut(donut)\n components = self._get_ill_formatted_donut_components(donut)\n nutrition = self._get_base_nutrition(fields)\n found_donut_type = False\n while components:\n component = components[0]\n if component in item_nutritions:\n if 'donut' in component:\n found_donut_type = True\n nutrition += np.array(item_nutritions[component])\n elif component in REPLACEMENTS:\n replacement = REPLACEMENTS[component]\n if 'donut' in replacement:\n found_donut_type = True\n nutrition += np.array(item_nutritions[replacement])\n elif ' '.join(component.split()[1:]) in item_nutritions:\n reformatted = ' '.join(component.split()[1:])\n nutrition += np.array(item_nutritions[reformatted])\n elif component.split()[0] in item_nutritions:\n nutrition += np.array(item_nutritions[component.split()[0]])\n elif component == components[-1]:\n component = component + ' drizzle'\n if component in item_nutritions:\n nutrition += np.array(item_nutritions[component])\n elif component in REPLACEMENTS:\n replacement = REPLACEMENTS[component]\n nutrition += np.array(item_nutritions[replacement])\n components = components[1:]\n if not found_donut_type:\n nutrition += np.array(item_nutritions['donut (bare)'])\n return {fields[i]: str(v) + units[i] for i, v in enumerate(nutrition)}\n\n def _get_collection_nutrition(self, collection, item_nutritions,\n fields, units):\n '''\n Gets nutrition facts for collection of donuts\n '''\n collection_nutrition = {}\n for donut in collection:\n nutrition = self._get_ill_formatted_donut_nutrition(\n donut, item_nutritions, fields, units\n )\n collection_nutrition[donut] = nutrition\n if 'Total' in collection_nutrition:\n collection_nutrition['Total'] = \\\n self._add_nutrition_values(\n collection_nutrition['Total'], nutrition\n )\n else:\n collection_nutrition['Total'] = dict(nutrition)\n\n return collection_nutrition\n\n def _format_collection_nutrition(self, collection, collection_nutrition):\n '''\n Format nutrition facts for collection of donuts for writing to file\n '''\n formatted_str = COLLECTION_NAMES[tuple(collection)] + ','\n for n_key in collection_nutrition['Total']:\n formatted_str += collection_nutrition['Total'][n_key] + ','\n formatted_str = formatted_str[:-1]\n formatted_str += '\\n'\n del collection_nutrition['Total']\n for donut in collection_nutrition:\n formatted_str += donut.replace(',', '') + ','\n for n_key in collection_nutrition[donut]:\n formatted_str += collection_nutrition[donut][n_key] + ','\n formatted_str = formatted_str[:-1]\n formatted_str += '\\n'\n return formatted_str\n\n def save_collection_nutritions_to_file(self, file):\n '''\n Saves nutrition facts for collection of donuts to file\n '''\n item_nutritions, fields, units = self._get_item_nutritions()\n data = 'Item,' + ','.join(fields) + '\\n'\n for collection in FEATURED_COLLECTIONS:\n collection_nutrition = self._get_collection_nutrition(\n collection, item_nutritions, fields, units)\n data += self._format_collection_nutrition(\n collection, collection_nutrition)\n with open(file, 'w') as f:\n f.write(data)\n f.close()\n\n def get_custom_donut_nutrition(self, donut, well_formatted=False):\n '''\n Gets nutrition facts for custom donuts\n '''\n item_nutritions, fields, units = self._get_item_nutritions()\n if well_formatted:\n return self._get_well_formatted_donut_nutrition(\n donut, item_nutritions, fields, units)\n return self._get_ill_formatted_donut_nutrition(\n donut, item_nutritions, fields, units)\n","repo_name":"darthbatman/duck-donuts-nutrition","sub_path":"duck_donuts_nutrition.py","file_name":"duck_donuts_nutrition.py","file_ext":"py","file_size_in_byte":9431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"26610781523","text":"t = int(input())\n\nfor tc in range(1, t+1):\n n = int(input())\n li = list(map(int, input().split()))\n result = 1e9\n\n for i in range(7):\n if li[i] == 1:\n index = i\n temp_n = n\n temp_result = 0\n\n while temp_n:\n if li[index] == 1 :\n temp_n -= 1\n temp_result += 1\n index = (index + 1) % 7\n\n if result > temp_result:\n result = temp_result\n\n print(f'#{tc} {result}')","repo_name":"min486/Algorithm-Python","sub_path":"SWEA/D3/13038_교환학생.py","file_name":"13038_교환학생.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"87"} +{"seq_id":"29569776208","text":"import urllib.request, urllib.parse, urllib.error\nimport re\nimport pandas as pd\nimport string\n\nurl='https://fantasy.premierleague.com/api/bootstrap-static/'\nfhand=urllib.request.urlopen(url)\n\ninfo=''\nfor line in fhand:\n info=info+line.decode().strip()\n \n#Gameweek Summaries\ndata=re.findall('\"name\":.+?},{\"id\"', info)\ndatadict={}\nfor i in data:\n if 'Gameweek' in i:\n if 'Gameweek 1\"' in i:\n GW=i.split(',')\n GW[12]=GW[12]+','+GW[13]\n GW[14]=GW[14]+','+GW[15]\n GW[19]=GW[19]+','+GW[20]\n \n GW.pop(13)\n GW.pop(14)\n GW.pop(18)\n datadict[i[i.find('G'):i.find('\",\"')]]=GW[1:3]+GW[5:9]+list(GW[12:13])+[\"\",\"\"]+GW[13:21]\n \n elif 'Gameweek 30+' in i:\n GW=i.split(',')\n GW[12]=GW[12]+','+GW[13]\n GW[14]=GW[14]+','+GW[15]\n GW[16]=GW[16]+','+GW[17]\n GW[21]=GW[21]+','+GW[22]\n \n GW.pop(13)\n GW.pop(14)\n GW.pop(15)\n GW.pop(19)\n datadict[i[i.find('G'):i.find('\",\"')]]=GW[1:3]+GW[5:9]+GW[12:14]+[\"\"]+GW[14:22]\n \n else:\n GW=i.split(',')\n GW[12]=GW[12]+','+GW[13]\n GW[14]=GW[14]+','+GW[15]\n GW[16]=GW[16]+','+GW[17]\n GW[18]=GW[18]+','+GW[19]\n GW[23]=GW[23]+','+GW[24]\n \n GW.pop(13)\n GW.pop(14)\n GW.pop(15)\n GW.pop(16)\n GW.pop(20)\n datadict[i[i.find('G'):i.find('\",\"')]]=GW[1:3]+GW[5:9]+GW[12:23]\n\ndf=pd.DataFrame(datadict)\ndf=df.transpose()\ndf=df.drop([3,4], axis=1)\ndf.columns=['Deadline Time', 'Average Entry Score', 'Highest Scoring Entry', 'Highest Score', 'Bench Boosts Played', 'Free Hits Played', 'Wildcards Played', 'Triple Captains Played', 'Most Selected', 'Most Transferred', 'Top Player', 'Top Player Points', 'Transfers Made', 'Most Captained', 'Most Vice-Captained']\n\n#removed unnecessary data in df cells\nfor column in df.columns:\n for i in range(0,len(df[column])):\n if column in ['Bench Boosts Played', 'Free Hits Played', 'Wildcards Played', 'Triple Captains Played']:\n df[column][i]=df[column][i][df[column][i].find('played\":')+8:df[column][i].find('}')]\n \n elif column=='Top Player Points':\n df[column][i]=df[column][i][df[column][i].find('points\":')+8:df[column][i].find('}')]\n \n else:\n df[column][i]=df[column][i][df[column][i].find(':')+1:]\n \ndf['Deadline Time']=df['Deadline Time'].str.replace('\"','')\ndf['Most Vice-Captained']=df['Most Vice-Captained'].str.replace('}','')\ndf.to_excel(\"Gameweek Summaries.xlsx\")\n\ndef renamecolumns():\n columns=[]\n for i in list(df.iloc[0]):\n title=i[:i.find(':')]\n title=title.replace('\"','')\n title=title.replace('_',' ')\n title=string.capwords(title)\n columns.append(title)\n df.columns=columns\n \ndef dataremoval():\n for column in df.columns:\n for i in range(0,len(df[column])):\n df[column][i]=df[column][i][df[column][i].find(':')+1:]\n df[column][i]=df[column][i].replace('\"','') \n\n#Teams\ndata=re.findall('\"code\":.+?},', info)\ndata=data[:20]\ndata[19]=data[19][:data[19].find('\"pulse_id\":')+14]+','\n\ndatadict={}\nfor i in data:\n team=i.split(',')\n datadict[i[i.find(\"name\")+7:i.find('\",\"played\"')]]=team\n \ndf=pd.DataFrame(datadict)\ndf=df.transpose()\ndf=df.drop([0,5,11,12,20,21], axis=1)\nrenamecolumns()\ndataremoval() \n\ndf.to_excel(\"Teams.xlsx\")\n\n#Playerstats\ndata=re.findall('\"chance_of_playing_next_round\":.+?},', info)\ndata[-1]=data[-1][:data[-1].find('}]')+1]+','\n\ndatadict={}\nfor i in data:\n player=i.split(',')\n fullname=i[i.find(\"first_name\")+13:i.find('\",\"form\"')]+' '+i[i.find(\"second_name\")+14:i.find('\",\"selected_by_percent\"')]\n datadict[fullname]=player\n \ndf=pd.DataFrame(datadict)\ndf=df.transpose()\nrenamecolumns()\ndataremoval()\n\ndf['Ict Index Rank Type']=df['Ict Index Rank Type'].str.replace('}','')\ndf=df.drop(['Photo',''], axis=1)\ndf.index.rename('Full Name', inplace=True)\n\ndf.to_excel(\"Players Stats.xlsx\")\nPlayerstats=df\n\n#Player Performances\nPlayerstats.reset_index(inplace=True)\nPlayerstats['Id']=Playerstats['Id'].astype(int)\nPlayerstats.set_index('Id', inplace=True)\nperfdb=pd.DataFrame()\n\nfor j in list(Playerstats.index):\n url='https://fantasy.premierleague.com/api/element-summary/'\n url=url+str(j)+'/'\n \n fhand=urllib.request.urlopen(url)\n info=''\n for line in fhand:\n info=info+line.decode().strip()\n \n data=re.findall('\"element\":.+?}', info)\n datadict={}\n for i in data:\n GW=i.split(',')\n datadict[i[i.find(\"fixture\"):i.find(',\"opponent_team\"')].replace('\":',' ')]=GW\n \n df=pd.DataFrame(datadict)\n df=df.transpose()\n renamecolumns()\n dataremoval() \n df['Transfers Out']=df['Transfers Out'].str.replace('}','')\n \n name=Playerstats.loc[j,'Full Name']\n df['Index']=name+' '+df['Fixture']\n perfdb=pd.concat([perfdb,df])\n\nperfdb.set_index('Index', inplace=True) \nperfdb.to_excel(\"Player Performance DB.xlsx\")","repo_name":"dozieuwakwe/Fantasy-Premier-League","sub_path":"FPL Data Extraction.py","file_name":"FPL Data Extraction.py","file_ext":"py","file_size_in_byte":5170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"38926139658","text":"from collections import deque\n\n\ndef solution(priorities, location):\n cnt = 0\n d = deque([(v, i) for i, v in enumerate(priorities)])\n\n while True:\n temp = d.popleft()\n\n if len(list(filter(lambda x: x[0] > temp[0], d))) > 0:\n d.append(temp)\n else:\n cnt += 1\n if temp[1] == location:\n return cnt","repo_name":"CSOS-Study/Python_Algorithm","sub_path":"Programmers/알고리즘/Level2/프린터/강문영_복습.py","file_name":"강문영_복습.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"87"} +{"seq_id":"72830140442","text":"import subprocess\nimport tempfile\nimport shutil\nimport sys\nimport os\n\nclass Document:\n \"\"\" represents a document as topic-distribution\"\"\"\n def __init__(self, source, topicDistribution):\n self.source = source\n self.filename = self.findFileName(source)\n self.topicDistribution = topicDistribution\n self.numTopics = len(topicDistribution.keys())\n\n def findFileName(self, source):\n splitted = source.split(\"/\")\n filename = splitted[-1]\n return filename\n\n def printDistr(self):\n print(\"len: {}\".format(len(self.topicDistribution)))\n for x in self.topicDistribution:\n print(\"{0} {1}\".format(x, self.topicDistribution[x]))\n\n def getTopicDistribution(self):\n return self.topicDistribution\n\n\nclass Topicmodel:\n \"\"\" a topic model estimated with mallet \"\"\"\n def __init__(self):\n self.documents = []\n self.tmpDir=\"/dev/shm\"\n\n def setFromFile(self,filename):\n self.filename = filename\n self.topicmodel = self._readMalletFile(filename)\n\n def addDocument(self,document):\n self.documents.append(document)\n\n def getNumTopics(self):\n if len(self.documents)>0:\n doc = self.documents[0]\n return doc.numTopics\n else:\n return 0\n \n def getDocument(self,filename):\n for d in self.documents:\n if d.filename == filename:\n return d\n print(\"Document {0} not found.\".format(filename),file=sys.stderr)\n sys.exit()\n\n def _readMalletFile(self,fileName):\n \"\"\" read mallet file and construct TopicModel object\n #doc source topic proportion ...\n (first line contains this comment)\n \"\"\"\n topicModel = [] # list of topic distributions\n FILE = open(fileName)\n for l in FILE:\n l = l.strip()\n if not l.startswith(\"#\"):\n lineArray = l.split(\" \")\n docno = lineArray[0]\n source = lineArray[1]\n i=0\n topicDistr = {}\n topic = \"init\"\n for item in lineArray[2:]:\n if i % 2 == 0:\n topic = item\n else:\n proportion = float(item)\n #if proportion != 0.0: #only add if value diff from 0.0 === no add all\n # topicDistr[topic] = proportion\n i+=1\n document = Document(source, topicDistr)\n self.addDocument(document)\n FILE.close()\n return topicModel\n\n\n def estimate(self,targetCorpus,directory,fileNames):\n \"\"\" estimate Mallet topic model - article level \"\"\"\n print('# (experimental code: use --mallet-topicmodel) ')\n d = tempfile.mkdtemp(prefix='tmp',dir=self.tmpDir)\n # create temporary directory with files\n shutil.copy(targetCorpus.getPath(),d)\n for fileName in fileNames:\n path = directory + \"/\" + fileName\n shutil.copy(path,d)\n d_sents = tempfile.mkdtemp(prefix='tmp',dir=self.tmpDir)\n print(\"# tmp file: \",d_sents)\n for f in os.listdir(d):\n filein = d + \"/\" + f\n fileout = d_sents + \"/\" + f\n subprocess.call(\"cat \"+ filein + \" | awk '{if (NF>0) printf \\\"%s \\\",$2; else print}' > \" + fileout,shell=True)\n shutil.rmtree(d)\n cdir = subprocess.call(\"pwd\",shell=True)\n self.__callMallet(directory,d_sents)\n\n def __callMallet(self,directory,d_sents):\n \n myhome = os.environ.get(\"MEASURES_HOME\")\n if directory.endswith(\"/\"):\n directory = directory[:-1]\n subprocess.call(myhome+\"/mallet-2.0.6/bin/mallet import-dir --remove-stopwords --extra-stopwords \"+myhome+\"/punct.txt --input \"+d_sents+\" --output \"+directory+\".mallet --keep-sequence\",shell=True)\n #subprocess.call(myhome+\"/mallet-2.0.6/bin/mallet import-dir --input \"+d_sents+\" --output \"+directory+\".mallet --keep-sequence\",shell=True)\n subprocess.call(myhome+\"/mallet-2.0.6/bin/mallet train-topics --input \"+directory+\".mallet --num-topics 100 --output-state \"+directory+\".mallet.state.gz --output-doc-topics \"+directory+\".mallet.doc-topics.gz --output-topic-keys \"+directory+\".mallet.topic-keys.gz\",shell=True)\n print(\"# mallet file saved in: \",directory+\".mallet.doc-topics.gz\")\n shutil.rmtree(d_sents)\n self.setFromFile(directory+\".mallet.doc-topics.gz\")\n\n def estimateFromSents(self,targetCorpus,corpora):\n \"\"\" estimate Mallet topic model - every sentence is a doc \"\"\"\n d = tempfile.mkdtemp(prefix='tmp',dir=self.tmpDir)\n print(\"# Temp dir: \",d)\n for corpusName in corpora:\n corpus = corpora[corpusName]\n FILE = open(d+\"/\"+corpusName,\"w\")\n for instance in corpus.getInstances():\n sent = \" \".join(instance.getSentence()) + \"\\n\"\n FILE.write(sent)\n FILE.close()\n shutil.copy(targetCorpus.getPath(),d)\n self.__callMallet(\"topicmodel\",d)\n \n\n\ndef main():\n test=\"topicmodel.mallet.doc-topics.gz\"\n t = Topicmodel()\n t.setFromFile(test)\n print(t.getNumTopics())\n d = t.documents[0] #get first document\n print(d.filename)\n d.printDistr()\n\n d2 = t.documents[1]\n print(d2.filename)\n d2.printDistr()\n\n\n #d3 = t.getDocument(\"wsj_2300\")\n #d4 = t.getDocument(\"wsj_2168\") # js: 0.137698\n #d5 = t.getDocument(\"wsj_1208\") # js: 0.182491\n #d3.printDistr()\n #print(d3.filename)\n #d3.printDistr()\n #print(d4.filename)\n #d4.printDistr()\n #print(d5.filename)\n #d5.printDistr()\n #subprocess.call(\"mallet\")\n#main()\n","repo_name":"bplank/domainsim","sub_path":"Topicmodel.py","file_name":"Topicmodel.py","file_ext":"py","file_size_in_byte":5722,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"87"} +{"seq_id":"35203152692","text":"#\n# This module contains an example wasmer runtime\n# https://github.com/wasmerio/wasmer-python\n#\n\nimport json\nimport os\nimport threading\nimport time\n\nimport eea_api\nimport eea_utils\nimport eea_registered_functions\n\nfrom wasmer import engine, Store, ImportObject, Function, Module, Instance, Memory, MemoryType\nfrom wasmer_compiler_cranelift import Compiler\n\n# Initialize an instance of the EEA.\ndef init(device_id, eea_queue, mqtt_queue,\n persisted_bundle_path=\"./bundle-wasmer.wasm\",\n persisted_storage_path=\"./storage.json\"):\n\n # These are set when the EEA calls eea_set_message_buffers.\n # These will point to pre-allocated buffers These are the buffers\n # used when sending data to the EEA for direct triggers, commands, etc.\n ptr_buffer_message_topic = None\n ptr_buffer_message_payload = None\n buffer_message_topic_length = 0\n buffer_message_payload_length = 0\n\n # EAA trace log level.\n # 0 = None, 1 = Errors Only, 2 = All/Verbose\n EEA_TRACE_LOG_LEVEL = 2\n\n # https://wasmerio.github.io/wasmer-python/api/wasmer/#wasmer.Instance\n wasm_instance = None\n\n # https://wasmerio.github.io/wasmer-python/api/wasmer/#wasmer.Module\n wasm_module = None\n\n # https://wasmerio.github.io/wasmer-python/api/wasmer/#wasmer.Memory\n wasm_memory = None\n\n # https://wasmerio.github.io/wasmer-python/api/wasmer/#wasmer.Store\n wasm_store = None\n\n # https://wasmerio.github.io/wasmer-python/api/wasmer/#wasmer.MemoryType\n wasm_memory_type = None\n\n #\n # Sends the hello message to the cloud to track which bundle\n # is deployed to this device.\n #\n def send_hello_message(bundle_id):\n print(\"send_hello_message\")\n\n hello_message = {\n \"service\": \"embeddedWorkflowAgent\",\n \"version\": \"1.0.0\",\n \"bundle\": bundle_id,\n \"compilerOptions\": {\n \"traceLevel\": 2\n }\n }\n\n mqtt_queue.put({\n \"topic\": \"losant/\" + device_id + \"/fromAgent/hello\",\n \"payload\": json.dumps(hello_message)\n })\n\n print(hello_message)\n\n # Called by the EEA to provide pre-allocated message buffers.\n # These buffers are used to send data to the EEA for \n # direct triggers, commands, etc.\n #\n # Even though this is an EEA API function, it is not defined\n # in eea_api.py because these messages buffers are only used\n # by the EEA worker thread in this module.\n #\n def eea_set_message_buffers(buffer_topic:int, topic_buffer_len:int, buffer_payload:int, payload_buffer_len:int) -> int:\n print(\"eea_set_message_buffers\")\n\n nonlocal ptr_buffer_message_topic\n nonlocal ptr_buffer_message_payload\n nonlocal buffer_message_topic_length\n nonlocal buffer_message_payload_length\n\n ptr_buffer_message_topic = buffer_topic\n ptr_buffer_message_payload = buffer_payload\n\n buffer_message_topic_length = topic_buffer_len\n buffer_message_payload_length = payload_buffer_len\n return 0\n\n #\n # Gets the bundle identifier from globals.\n # The identifier is encoded as two globals:\n # BUNDLE_IDENTIFIER_LENGTH: pointer to the length, in bytes, of the bundle ID string (int).\n # BUNDLE_IDENTIFIER: pointer to the bundle ID string.\n #\n def get_bundle_identifier(wasm_instance):\n bundle_id_length_ptr = wasm_instance.exports.BUNDLE_IDENTIFIER_LENGTH.value\n\n bundle_id_str_ptr = wasm_instance.exports.BUNDLE_IDENTIFIER.value\n bundle_id_str_length = eea_utils.decode_int(wasm_memory, 1, bundle_id_length_ptr)\n print(bundle_id_str_length)\n return eea_utils.decode_string(wasm_memory, bundle_id_str_ptr, bundle_id_str_length)\n\n #\n # Loads a WASM module from a byte array.\n #\n def load_wasm_bundle(bundle):\n print(\"Loading wasm bundle...\")\n\n # Persist bundle so it will automatically load on a restart.\n f = open(persisted_bundle_path, \"wb\")\n f.write(bundle)\n f.close()\n\n nonlocal wasm_memory\n nonlocal wasm_module\n nonlocal wasm_instance\n nonlocal wasm_store\n nonlocal wasm_memory_type\n\n wasm_store = Store(engine.JIT(Compiler))\n wasm_memory_type = MemoryType(minimum=5, shared=False)\n wasm_memory = Memory(wasm_store, wasm_memory_type)\n wasm_module = Module(wasm_store, bundle)\n\n reg_funcs = eea_registered_functions.init(wasm_memory)\n eea_api_funcs = eea_api.init(device_id, wasm_memory, mqtt_queue, persisted_storage_path)\n\n imports = {\n \"memory\": wasm_memory,\n \"eea_set_message_buffers\": Function(wasm_store, eea_set_message_buffers)\n }\n\n # Add EEA API functions.\n for name, func in eea_api_funcs.items():\n imports[name] = Function(wasm_store, func[\"func\"])\n\n # Add the registered functions.\n for name, func in reg_funcs.items():\n imports[name] = Function(wasm_store, func[\"func\"])\n\n # https://wasmerio.github.io/wasmer-python/api/wasmer/#wasmer.ImportObject\n import_object = ImportObject()\n import_object.register(\n \"env\",\n imports\n )\n\n # Now the module is compiled, we can instantiate it.\n wasm_instance = Instance(wasm_module, import_object)\n\n # Set EEA configuration.\n wasm_instance.exports.eea_config_set_trace_level(EEA_TRACE_LOG_LEVEL)\n wasm_instance.exports.eea_config_set_storage_size(32768)\n wasm_instance.exports.eea_config_set_storage_interval(30000)\n\n # Initialize the EEA.\n wasm_instance.exports.eea_init()\n\n # Send the Hello message to the platform so it can\n # keep track of what bundle this device has received.\n send_hello_message(get_bundle_identifier(wasm_instance))\n\n # The main EEA worker thread.\n def thread():\n # Attempt to load a previously persisted WASM bundle.\n if os.path.exists(persisted_bundle_path):\n f = open(persisted_bundle_path, 'rb')\n bundle = f.read()\n f.close()\n load_wasm_bundle(bundle)\n else:\n send_hello_message(\"nullVersion\")\n\n while(True):\n\n # Check the queue to see if any messages were received\n # from another thread that we need to handle.\n while not eea_queue.empty():\n message = eea_queue.get()\n\n # Messages that are dictionaries instead of MQTTMessage are\n # internal messages sent between threads. In this case\n # it a message sent whenever the MQTT connection status changes.\n if type(message) is dict and message[\"topic\"] == \"connection_status\":\n # The MQTT connection status has changed.\n if not wasm_instance == None:\n wasm_instance.exports.eea_set_connection_status(message[\"payload\"])\n elif message.topic == \"losant/\" + device_id + \"/toAgent/flows\":\n # This message is a new WASM bundle. Make sure to call eea_shutdown\n # before loading a new bundle.\n if not wasm_instance == None:\n wasm_instance.exports.eea_shutdown()\n\n load_wasm_bundle(message.payload)\n else:\n # All other messages need to be sent into the EEA using\n # the message buffers.\n topic_length = eea_utils.encode_string(\n wasm_memory, message.topic,\n ptr_buffer_message_topic, buffer_message_topic_length)\n\n message_length = eea_utils.encode_string(wasm_memory,\n message.payload.decode(\"utf-8\"),\n ptr_buffer_message_payload, buffer_message_payload_length)\n\n if not wasm_instance == None:\n wasm_instance.exports.eea_message_received(topic_length, message_length)\n \n # Pump the EEA.\n if not wasm_instance == None:\n wasm_instance.exports.eea_loop(int(time.time() * 1000))\n\n time.sleep(0.1)\n\n # Start the EEA thread.\n threading.Thread(target=thread, args=()).start()","repo_name":"Losant/eea-examples","sub_path":"python/eea_runtime_wasmer.py","file_name":"eea_runtime_wasmer.py","file_ext":"py","file_size_in_byte":7503,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"87"} +{"seq_id":"8538091842","text":"import unittest\nimport os\nimport datetime\n\nfrom sql_connection_manager import SqlConnectionManager\nfrom vaccine_caregiver import VaccineCaregiver\nfrom enums import *\nfrom utils import *\nfrom COVID19_vaccine import COVID19Vaccine as covid\nfrom vaccine_patient import VaccinePatient as patient\nfrom vaccine_reservation_scheduler import VaccineReservationScheduler\n\nclass TestDB(unittest.TestCase):\n\n def test_db_connection(self):\n try:\n self.connection_manager = SqlConnectionManager(Server=os.getenv(\"Server\"),\n DBname=os.getenv(\"DBName\"),\n UserId=os.getenv(\"UserID\"),\n Password=os.getenv(\"Password\"))\n self.conn = self.connection_manager.Connect()\n except Exception:\n self.fail(\"Connection to databse failed\")\n\n\nclass TestPatient(unittest.TestCase):\n def test_init(self):\n with SqlConnectionManager(Server=os.getenv(\"Server\"),\n DBname=os.getenv(\"DBName\"),\n UserId=os.getenv(\"UserID\"),\n Password=os.getenv(\"Password\")) as sqlClient:\n with sqlClient.cursor(as_dict=True) as cursor:\n try:\n # clear the tables before testing\n clear_tables(sqlClient)\n\n # create a new Patient object\n self.patient_a = patient(name='dj',cursor=cursor)\n \n sqlQuery = '''\n SELECT *\n FROM Patients\n WHERE PatientName = 'dj'\n '''\n cursor.execute(sqlQuery)\n rows = cursor.fetchall()\n\n if len(rows) < 1:\n self.fail(\"Patientnot found\")\n\n # clear the tables after testing, just in-case\n clear_tables(sqlClient)\n print(rows[0])\n except Exception:\n # clear the tables if an exception occurred\n # clear_tables(sqlClient)\n self.fail(\"Creating Patient failed\")\n\n def test_reservation(self):\n with SqlConnectionManager(Server=os.getenv(\"Server\"),\n DBname=os.getenv(\"DBName\"),\n UserId=os.getenv(\"UserID\"),\n Password=os.getenv(\"Password\")) as sqlClient:\n with sqlClient.cursor(as_dict=True) as cursor:\n cursor.connection.autocommit(False)\n try:\n # clear the tables before testing\n clear_tables(sqlClient)\n\n # initialize vaccines\n self.vaccine_1 = covid(\"Pfizer\",\"Biotech\",2,21,cursor)\n self.vaccine_2 = covid('Moderna','Moderna',2,28, cursor)\n self.vaccines = [self.vaccine_1,self.vaccine_2]\n\n self.vaccine_1.AddDoses(\"Pfizer\",2,cursor)\n self.vaccine_2.AddDoses(\"Moderna\",3,cursor)\n\n # create a new VaccineCaregiver object\n self.caregiver_a = VaccineCaregiver(name=\"John\",\n cursor=cursor)\n self.caregiver_b = VaccineCaregiver(name=\"Steve\",\n cursor=cursor)\n # create a new Patient object\n\n self.patients = [patient(name='Marc',cursor=cursor),\n patient(name='Marc2',cursor=cursor),\n patient(name='Marc3',cursor=cursor),\n patient(name='Marc4',cursor=cursor),\n patient(name='Marc5',cursor=cursor)\n ]\n # for each patient:\n for patient_a in self.patients:\n # See what vaccines are available\n for vaccine_a in self.vaccines:\n sqlQuery = '''\n SELECT *\n FROM Vaccines\n WHERE VaccineName = '{name}'\n '''.format(name = vaccine_a.name)\n cursor.execute(sqlQuery)\n rows = cursor.fetchall()\n if len(rows)>0:\n if rows[0]['AvailableDoses']>=rows[0]['DosesPerPatient']:\n # if enough doses are available\n # 1) create a reservation \n self.reservation_a = VaccineReservationScheduler()\n # 2) get first caregiver slot ID & reserve it & schedule it\n self.reservedId = self.reservation_a.PutHoldOnAppointmentSlot(cursor =cursor)\n # if no slot is available, rollback commit\n if self.reservedId in [0,-1]: \n cursor.connection.rollback()\n patient_a.first_VaccineAppointmentId = 0\n print(\"No slots available in the next 3 weeks\")\n break\n else:\n patient_a.first_VaccineAppointmentId = patient_a.ReserveAppointment(self.reservedId,vaccine_a.name,cursor)\n patient_a.vaccine_name = vaccine_a.name\n \n # 3) get second slot & reserve it \n self.reservation_a.ScheduleAppointmentSlot(slotid = self.reservedId,cursor = cursor)\n patient_a.ScheduleAppointment(Vaccine = vaccine_a,cursor = cursor)\n \n days_between_doses = int(rows[0]['DaysBetweenDoses'])\n if int(rows[0]['DosesPerPatient'])==2:\n self.reservedId = self.reservation_a.PutHoldOnAppointmentSlot(cursor =cursor,date = datetime.datetime.now()+ datetime.timedelta(days=days_between_doses))\n if self.reservedId in [0,-1]: \n \n cursor.connection.rollback()\n patient_a.first_VaccineAppointmentId = 0\n patient_a.second_VaccineAppointmentId = 0\n patient_a.vaccine_name = ''\n # if second slot is not available try next vaccine\n print(\"second slot not available for, cancelling first appointment & checking other vaccines\",vaccine_a.name)\n continue\n else:\n patient_a.second_VaccineAppointmentId = patient_a.ReserveAppointment(self.reservedId,vaccine_a.name,cursor)\n patient_a.vaccine_name = vaccine_a.name\n self.reservation_a.ScheduleAppointmentSlot(slotid = self.reservedId,cursor = cursor)\n patient_a.ScheduleAppointment(Vaccine = vaccine_a,cursor = cursor)\n\n break\n \n else:\n print(vaccine_a.name, \"not enough doses available\")\n\n if patient_a.first_VaccineAppointmentId!=0:\n print(\"Reservation Successful for Patient!!!!!!!!!!!\" ,patient_a.name)\n cursor.connection.commit()\n else:\n print(\"not successful\")\n \n\n\n\n except Exception:\n # clear the tables if an exception occurred\n clear_tables(sqlClient)\n self.fail(\"Reservation failed\")\n\n\n# class Test(unittest.TestCase):\n# def test(self):\n# #allocate 2 caregivers\n\n# #add 2 doses of the vaccine\n\n# #Initialize a patient\n\n# #check for first \n# pass\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"marcm97/vaccineScheduler","sub_path":"test_week2.py","file_name":"test_week2.py","file_ext":"py","file_size_in_byte":8882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"29175769215","text":"import sys\nimport pickle\nimport numpy as np\nimport pandas as pd\n\nfrom modules.core.vars.bool_man import valid_bool\nfrom modules.core.vars import string_man as sm, char_man as cm\nfrom modules.core.loadsave import file_dir as fd, file_types as ft\n\nslash = fd.slash()\nvalid_types = ft.types\n\n\ndef validate(\n filename,\n path=fd.dir_path(),\n warning=True,\n):\n filename = sm.valid_string(filename)\n path = sm.slash_check(sm.valid_string(path))\n warning = valid_bool(warning)\n\n if fd.dir_exists(path):\n if fd.file_exists(path + filename):\n if warning:\n h = 0\n while h < 1:\n usr_inp = input(\"Do you wish to override the current file? (y/n)\")\n if cm.lower_all_letter(sm.valid_string(usr_inp)) == 'y':\n h = 1\n elif cm.lower_all_letter(sm.valid_string(usr_inp)) == 'n':\n sys.exit()\n return path + filename\n elif not fd.file_exists(filename):\n return path + filename\n else:\n raise ValueError(\n f\"Error: Location {path} does not exist, please try again.\"\n )\n\n\ndef save_csv(\n data,\n filename,\n path=fd.dir_path(),\n warning=True,\n):\n loc = validate(filename, path, warning)\n if not isinstance(data, pd.DataFrame):\n raise TypeError(\n \"Error: Incorrect data type for save, dataframe is \"\n f\"instead got type of type{data}.\"\n )\n else:\n return data.to_csv(loc)\n\n\ndef save_pkl(\n data,\n filename,\n path=fd.dir_path(),\n warning=True,\n):\n loc = validate(filename, path, warning)\n if ft.type_check(loc, '.pkl'):\n with open(loc, 'wb') as handle:\n return pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef save_txt(\n data,\n filename,\n path=fd.dir_path(),\n warning=True,\n):\n loc = validate(filename, path, warning)\n if ft.type_check(loc, '.txt'):\n return np.savetxt(loc, data)\n\n\ndef save_xlsx(\n data,\n filename,\n path=fd.dir_path(),\n warning=True,\n):\n validate(filename, path, warning)\n if not isinstance(data, pd.DataFrame):\n raise TypeError(\n \"Error: Incorrect data type for save, dataframe is \"\n f\"instead got type of type{data}.\"\n )\n else:\n return data.to_excel(sm.slash_check(path), sheet_name=filename, index=False)\n\n\nsave = {}\nfor ftype in valid_types.valid_file_types:\n save[ftype] = globals()[str('save_' + cm.remove_begin(ftype))]\n\n\ndef file_export(\n data,\n filename,\n path=fd.dir_path(),\n warning=True,\n):\n warning = valid_bool(warning)\n loc = validate(filename, path, warning)\n\n if fd.file_ext(loc) in valid_types.valid_file_types:\n save[fd.file_ext(loc)](data, filename, path, False)\n return 'Save Successful.'\n else:\n raise ValueError(\n f\"Error: The following file type {fd.file_ext(loc)}, is \"\n \"not supported, please try the following extensions, \"\n f\"{valid_types.valid_file_types}\"\n )\n","repo_name":"ejohnson-96/Collisionality","sub_path":"modules/core/loadsave/file_export.py","file_name":"file_export.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"9172458539","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 18 23:35:39 2016\n\n@author: meichengshih\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom xgboost.sklearn import XGBClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nimport time\nfrom sklearn.metrics import log_loss\nfrom sklearn.cross_validation import StratifiedKFold\nfrom sklearn.ensemble import ExtraTreesClassifier\n\nstime = time.time()\n\ncdf = pd.read_pickle(\"cat_data.pkl\")\ncdftest = pd.read_pickle(\"cat_datatest.pkl\")\nddf = pd.read_pickle(\"dog_data.pkl\")\nddftest = pd.read_pickle(\"dog_datatest.pkl\")\nnf=25\n\n############ need to change back to cat parameters #############\n\n## x, y:\ncy=cdf[\"OutcomeType\"]\ncx=cdf.drop(\"OutcomeType\",axis=1)\n\ncx=np.array(cx)\ncy=np.array(cy)\ncdftest=np.array(cdftest)\n\n### K-Fold with Shufffle\nskf = list(StratifiedKFold(cy, nf))\n\n## Classifiers\n# XGB optimallog loss: 0.44981 \ncclf1=XGBClassifier(max_depth=22, \n learning_rate=0.0519, n_estimators=402, \n objective='multi:softprob', \n nthread=8, gamma=0.8170,min_child_weight=1.039,\n subsample=0.9892, colsample_bytree=0.5972)\n \n# XGB all log_loss: 0.72068\ncclf2=XGBClassifier(max_depth=11, \n learning_rate=0.0346, n_estimators=285, \n objective='multi:softprob', \n nthread=8, gamma=0.7249,min_child_weight=1,\n subsample=0.8012, colsample_bytree=0.8087)\n\n#optimal RF accuracy 0.82630\ncclf3 = RandomForestClassifier(n_estimators=397, \n min_samples_leaf=1, \n min_samples_split=1,\n max_features=0.4653, \n max_depth=None, \n criterion=\"gini\")\n\n#optimal ERT accuracy 0.81220\ncclf4 = ExtraTreesClassifier(n_estimators=748,\n min_samples_split=4,\n max_features=0.6237,\n min_samples_leaf=1,\n criterion=\"gini\")\n\n# XGB All accuracy: 0.70927\ncclf5=XGBClassifier(max_depth=27, \n learning_rate=0.0463, n_estimators=277, \n objective='multi:softprob', \n nthread=8, gamma=0.4552,min_child_weight=1.3451,\n subsample=0.8215, colsample_bytree=0.8179)\n\n#optimal RF All accuracy 0.70122\ncclf6 = RandomForestClassifier(n_estimators=449, \n min_samples_leaf=1, \n min_samples_split=2,\n max_features=0.3873, \n max_depth=None, \n criterion=\"gini\") \n\n#optimal ERT All accuracy 0.71046\ncclf7 = ExtraTreesClassifier(n_estimators=118,\n min_samples_split=14,\n max_features=0.2676,\n min_samples_leaf=9,\n criterion=\"gini\") \n\ncclfs=[cclf1, cclf2, cclf3, cclf4, cclf5, cclf6, cclf7]\n\n\n\n## KFold\nprint (\"Cat Turn\")\neval_rec=np.zeros((nf,len(cclfs)))\nblend_temp=np.zeros((cx.shape[0],5))\nblend_sub_temp=np.zeros((cdftest.shape[0],5))\nblend_train=np.zeros((cx.shape[0],5*len(cclfs)))\nblend_sub=np.zeros((cdftest.shape[0],5*len(cclfs)))\n\nfor j, clf in enumerate(cclfs):\n print (str(j)+\"th Classifier\")\n for i in xrange(nf):\n train, test=skf[i]\n cxtrain, xtest = cx[train], cx[test]\n cytrain, ytest = cy[train], cy[test]\n clf.fit(cxtrain, cytrain)\n ytest_pred = clf.predict_proba(xtest)\n blend_temp[test]=ytest_pred\n sub_pred = clf.predict_proba(cdftest)\n \n if i==0:\n blend_sub_temp=sub_pred\n else:\n blend_sub_temp=blend_sub_temp+sub_pred\n \n print (i, log_loss(ytest,ytest_pred))\n eval_rec[i,j]=log_loss(ytest,ytest_pred)\n blend_train[:,5*j:5*j+5]=blend_temp\n blend_sub[:,5*j:5*j+5]=blend_sub_temp/float(nf)\n \nnp.savetxt(\"cat_blend_train7.txt\",blend_train) \nnp.savetxt(\"cat_blend_pred7.txt\",blend_sub) \nnp.savetxt(\"cy7.txt\",cy) \n\n\n## dx, dy:\ndy=ddf[\"OutcomeType\"]\ndx=ddf.drop(\"OutcomeType\",axis=1)\n\ndx=np.array(dx)\ndy=np.array(dy)\nddftest=np.array(ddftest)\n\n### K-Fold with Shufffle\nskf = list(StratifiedKFold(dy, nf))\n\n## Classifiers\n# XGB optimal dog log loss: 0.883xx \ndclf1=XGBClassifier(max_depth=9, \n learning_rate=0.0185, n_estimators=553, \n objective='multi:softprob', \n nthread=8, gamma=0.7152,min_child_weight=1.416,\n subsample=0.8861, colsample_bytree=0.6017)\n\n# XGB all log_loss: 0.72068\ndclf2=XGBClassifier(max_depth=11, \n learning_rate=0.0346, n_estimators=285, \n objective='multi:softprob', \n nthread=8, gamma=0.7249,min_child_weight=1,\n subsample=0.8012, colsample_bytree=0.8087)\n\n#optimal RF accuracy 0.61500, \ndclf3 = RandomForestClassifier(n_estimators=522, \n min_samples_leaf=1, \n min_samples_split=5,\n max_features=0.2948, \n max_depth=None, \n criterion=\"gini\")\n\n#optimal RF accuracy 0.60750, \ndclf4 = ExtraTreesClassifier(n_estimators=520,\n min_samples_split=12,\n max_features=0.4047,\n min_samples_leaf=1,\n criterion=\"gini\") \n# XGB accuracy: 0.70927\ndclf5=XGBClassifier(max_depth=27, \n learning_rate=0.0463, n_estimators=277, \n objective='multi:softprob', \n nthread=8, gamma=0.4552,min_child_weight=1.3451,\n subsample=0.8215, colsample_bytree=0.8179)\n\n#optimal RF accuracy 0.70122\ndclf6 = RandomForestClassifier(n_estimators=449, \n min_samples_leaf=1, \n min_samples_split=2,\n max_features=0.3873, \n max_depth=None, \n criterion=\"gini\") \n\n#optimal ERT All accuracy 0.71046\ndclf7 = ExtraTreesClassifier(n_estimators=118,\n min_samples_split=14,\n max_features=0.2676,\n min_samples_leaf=9,\n criterion=\"gini\") \n\ndclfs=[dclf1, dclf2, dclf3, dclf4, dclf5, dclf6, dclf7]\n\n\n\n## KFold\nprint (\"Dog Turn\")\neval_rec=np.zeros((nf,len(dclfs)))\nblend_temp=np.zeros((dx.shape[0],5))\nblend_sub_temp=np.zeros((ddftest.shape[0],5))\nblend_train=np.zeros((dx.shape[0],5*len(dclfs)))\nblend_sub=np.zeros((ddftest.shape[0],5*len(dclfs)))\nfor j, clf in enumerate(dclfs):\n print (str(j)+\"th Classifier\")\n for i in xrange(nf):\n train, test=skf[i]\n dxtrain, dxtest = dx[train], dx[test]\n dytrain, dytest = dy[train], dy[test]\n clf.fit(dxtrain, dytrain)\n ytest_pred = clf.predict_proba(dxtest)\n blend_temp[test]=ytest_pred \n sub_pred = clf.predict_proba(ddftest) \n \n if i==0:\n blend_sub_temp=sub_pred\n else:\n blend_sub_temp=blend_sub_temp+sub_pred\n \n print (i, log_loss(dytest,ytest_pred))\n eval_rec[i,j]=log_loss(dytest,ytest_pred)\n \n blend_train[:,5*j:5*j+5]=blend_temp\n blend_sub[:,5*j:5*j+5]=blend_sub_temp/float(nf)\n \nnp.savetxt(\"dog_blend_train7.txt\",blend_train) \nnp.savetxt(\"dog_blend_pred7.txt\",blend_sub) \nnp.savetxt(\"dy7.txt\",dy)\n\netime = float(time.time()-stime)","repo_name":"mshih2/Data-Science-Projects","sub_path":"Classification Project Animal Shelter Outcome/Stacking&Blending/Stacking-1-Stage.py","file_name":"Stacking-1-Stage.py","file_ext":"py","file_size_in_byte":7880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"87"} +{"seq_id":"21407778140","text":"\nimport django\n\n\nif django.VERSION < (1, 8):\n from django.contrib.sessions.tests import SessionTestsMixin\n\nelse:\n # This Mixin is no more in django core, it has been moved in django test suite (not part of django distribution)\n # Copy/pasting the whole logic...\n # We will have to take care of that on every Django upgrade.\n import base64\n import unittest\n from datetime import timedelta\n\n from django.conf import settings\n from django.test.utils import override_settings, patch_logger\n from django.utils import six, timezone\n\n\n class SessionTestsMixin(object):\n # This does not inherit from TestCase to avoid any tests being run with this\n # class, which wouldn't work, and to allow different TestCase subclasses to\n # be used.\n\n backend = None # subclasses must specify\n\n def setUp(self):\n self.session = self.backend()\n\n def tearDown(self):\n # NB: be careful to delete any sessions created; stale sessions fill up\n # the /tmp (with some backends) and eventually overwhelm it after lots\n # of runs (think buildbots)\n self.session.delete()\n\n def test_new_session(self):\n self.assertFalse(self.session.modified)\n self.assertFalse(self.session.accessed)\n\n def test_get_empty(self):\n self.assertEqual(self.session.get('cat'), None)\n\n def test_store(self):\n self.session['cat'] = \"dog\"\n self.assertTrue(self.session.modified)\n self.assertEqual(self.session.pop('cat'), 'dog')\n\n def test_pop(self):\n self.session['some key'] = 'exists'\n # Need to reset these to pretend we haven't accessed it:\n self.accessed = False\n self.modified = False\n\n self.assertEqual(self.session.pop('some key'), 'exists')\n self.assertTrue(self.session.accessed)\n self.assertTrue(self.session.modified)\n self.assertEqual(self.session.get('some key'), None)\n\n def test_pop_default(self):\n self.assertEqual(self.session.pop('some key', 'does not exist'),\n 'does not exist')\n self.assertTrue(self.session.accessed)\n self.assertFalse(self.session.modified)\n\n def test_setdefault(self):\n self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')\n self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')\n self.assertTrue(self.session.accessed)\n self.assertTrue(self.session.modified)\n\n def test_update(self):\n self.session.update({'update key': 1})\n self.assertTrue(self.session.accessed)\n self.assertTrue(self.session.modified)\n self.assertEqual(self.session.get('update key', None), 1)\n\n def test_has_key(self):\n self.session['some key'] = 1\n self.session.modified = False\n self.session.accessed = False\n self.assertIn('some key', self.session)\n self.assertTrue(self.session.accessed)\n self.assertFalse(self.session.modified)\n\n def test_values(self):\n self.assertEqual(list(self.session.values()), [])\n self.assertTrue(self.session.accessed)\n self.session['some key'] = 1\n self.assertEqual(list(self.session.values()), [1])\n\n def test_iterkeys(self):\n self.session['x'] = 1\n self.session.modified = False\n self.session.accessed = False\n i = six.iterkeys(self.session)\n self.assertTrue(hasattr(i, '__iter__'))\n self.assertTrue(self.session.accessed)\n self.assertFalse(self.session.modified)\n self.assertEqual(list(i), ['x'])\n\n def test_itervalues(self):\n self.session['x'] = 1\n self.session.modified = False\n self.session.accessed = False\n i = six.itervalues(self.session)\n self.assertTrue(hasattr(i, '__iter__'))\n self.assertTrue(self.session.accessed)\n self.assertFalse(self.session.modified)\n self.assertEqual(list(i), [1])\n\n def test_iteritems(self):\n self.session['x'] = 1\n self.session.modified = False\n self.session.accessed = False\n i = six.iteritems(self.session)\n self.assertTrue(hasattr(i, '__iter__'))\n self.assertTrue(self.session.accessed)\n self.assertFalse(self.session.modified)\n self.assertEqual(list(i), [('x', 1)])\n\n def test_clear(self):\n self.session['x'] = 1\n self.session.modified = False\n self.session.accessed = False\n self.assertEqual(list(self.session.items()), [('x', 1)])\n self.session.clear()\n self.assertEqual(list(self.session.items()), [])\n self.assertTrue(self.session.accessed)\n self.assertTrue(self.session.modified)\n\n def test_save(self):\n if (hasattr(self.session, '_cache') and 'DummyCache' in\n settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND']):\n raise unittest.SkipTest(\"Session saving tests require a real cache backend\")\n self.session.save()\n self.assertTrue(self.session.exists(self.session.session_key))\n\n def test_delete(self):\n self.session.save()\n self.session.delete(self.session.session_key)\n self.assertFalse(self.session.exists(self.session.session_key))\n\n def test_flush(self):\n self.session['foo'] = 'bar'\n self.session.save()\n prev_key = self.session.session_key\n self.session.flush()\n self.assertFalse(self.session.exists(prev_key))\n self.assertNotEqual(self.session.session_key, prev_key)\n self.assertTrue(self.session.modified)\n self.assertTrue(self.session.accessed)\n\n def test_cycle(self):\n self.session['a'], self.session['b'] = 'c', 'd'\n self.session.save()\n prev_key = self.session.session_key\n prev_data = list(self.session.items())\n self.session.cycle_key()\n self.assertNotEqual(self.session.session_key, prev_key)\n self.assertEqual(list(self.session.items()), prev_data)\n\n def test_invalid_key(self):\n # Submitting an invalid session key (either by guessing, or if the db has\n # removed the key) results in a new key being generated.\n try:\n session = self.backend('1')\n try:\n session.save()\n except AttributeError:\n self.fail(\"The session object did not save properly. Middleware may be saving cache items without namespaces.\")\n self.assertNotEqual(session.session_key, '1')\n self.assertEqual(session.get('cat'), None)\n session.delete()\n finally:\n # Some backends leave a stale cache entry for the invalid\n # session key; make sure that entry is manually deleted\n session.delete('1')\n\n def test_session_key_is_read_only(self):\n def set_session_key(session):\n session.session_key = session._get_new_session_key()\n self.assertRaises(AttributeError, set_session_key, self.session)\n\n # Custom session expiry\n def test_default_expiry(self):\n # A normal session has a max age equal to settings\n self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)\n\n # So does a custom session with an idle expiration time of 0 (but it'll\n # expire at browser close)\n self.session.set_expiry(0)\n self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)\n\n def test_custom_expiry_seconds(self):\n modification = timezone.now()\n\n self.session.set_expiry(10)\n\n date = self.session.get_expiry_date(modification=modification)\n self.assertEqual(date, modification + timedelta(seconds=10))\n\n age = self.session.get_expiry_age(modification=modification)\n self.assertEqual(age, 10)\n\n def test_custom_expiry_timedelta(self):\n modification = timezone.now()\n\n # Mock timezone.now, because set_expiry calls it on this code path.\n original_now = timezone.now\n try:\n timezone.now = lambda: modification\n self.session.set_expiry(timedelta(seconds=10))\n finally:\n timezone.now = original_now\n\n date = self.session.get_expiry_date(modification=modification)\n self.assertEqual(date, modification + timedelta(seconds=10))\n\n age = self.session.get_expiry_age(modification=modification)\n self.assertEqual(age, 10)\n\n def test_custom_expiry_datetime(self):\n modification = timezone.now()\n\n self.session.set_expiry(modification + timedelta(seconds=10))\n\n date = self.session.get_expiry_date(modification=modification)\n self.assertEqual(date, modification + timedelta(seconds=10))\n\n age = self.session.get_expiry_age(modification=modification)\n self.assertEqual(age, 10)\n\n def test_custom_expiry_reset(self):\n self.session.set_expiry(None)\n self.session.set_expiry(10)\n self.session.set_expiry(None)\n self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)\n\n def test_get_expire_at_browser_close(self):\n # Tests get_expire_at_browser_close with different settings and different\n # set_expiry calls\n with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):\n self.session.set_expiry(10)\n self.assertFalse(self.session.get_expire_at_browser_close())\n\n self.session.set_expiry(0)\n self.assertTrue(self.session.get_expire_at_browser_close())\n\n self.session.set_expiry(None)\n self.assertFalse(self.session.get_expire_at_browser_close())\n\n with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):\n self.session.set_expiry(10)\n self.assertFalse(self.session.get_expire_at_browser_close())\n\n self.session.set_expiry(0)\n self.assertTrue(self.session.get_expire_at_browser_close())\n\n self.session.set_expiry(None)\n self.assertTrue(self.session.get_expire_at_browser_close())\n\n def test_decode(self):\n # Ensure we can decode what we encode\n data = {'a test key': 'a test value'}\n encoded = self.session.encode(data)\n self.assertEqual(self.session.decode(encoded), data)\n\n def test_decode_failure_logged_to_security(self):\n bad_encode = base64.b64encode(b'flaskdj:alkdjf')\n with patch_logger('django.security.SuspiciousSession', 'warning') as calls:\n self.assertEqual({}, self.session.decode(bad_encode))\n # check that the failed decode is logged\n self.assertEqual(len(calls), 1)\n self.assertTrue('corrupted' in calls[0])\n\n\n def test_actual_expiry(self):\n # this doesn't work with JSONSerializer (serializing timedelta)\n with override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'):\n self.session = self.backend() # reinitialize after overriding settings\n\n # Regression test for #19200\n old_session_key = None\n new_session_key = None\n try:\n self.session['foo'] = 'bar'\n self.session.set_expiry(-timedelta(seconds=10))\n self.session.save()\n old_session_key = self.session.session_key\n # With an expiry date in the past, the session expires instantly.\n new_session = self.backend(self.session.session_key)\n new_session_key = new_session.session_key\n self.assertNotIn('foo', new_session)\n finally:\n self.session.delete(old_session_key)\n self.session.delete(new_session_key)\n","repo_name":"iwoca/django-seven","sub_path":"django_seven/compat/test/sessions.py","file_name":"sessions.py","file_ext":"py","file_size_in_byte":12530,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"87"} +{"seq_id":"4962186215","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n\ndef visualise(imagelist):\n fig = plt.figure() # make figure\n\n # make axesimage object\n # the vmin and vmax here are very important to get the color map correct\n im = plt.imshow(imagelist[0], cmap=plt.get_cmap('jet'), vmin=0, vmax=255)\n\n # function to update figure\n def updatefig(j):\n # set the data in the axesimage object\n im.set_array(imagelist[j])\n # return the artists set\n return [im]\n # kick off the animation\n ani = animation.FuncAnimation(fig, updatefig, frames=len(imagelist), \n interval=60, blit=True)\n plt.show(block=True)","repo_name":"cberkery/neural-snake","sub_path":"Visualise.py","file_name":"Visualise.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"29540861332","text":"\"\"\"\n=================================================\nDemo of affinity propagation clustering algorithm\nDemo của thuật toán phân cụm affinity propagation\n\"\"\"\n\nprint(__doc__)\n\nfrom sklearn.cluster import AffinityPropagation\nfrom sklearn import metrics\nfrom sklearn.datasets import make_blobs\n\n# ############################################################################\n# Tạo dữ liệu mẫu\ncenters = [[1, 1], [-1, -1], [1, -1]]\nX, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,\n random_state=0)\n\n# ############################################################################\n# tính toán Affinity Propagation\naf = AffinityPropagation(preference=-50).fit(X)\ncluster_centers_indices = af.cluster_centers_indices_\nlabels = af.labels_\n\nn_clusters_ = len(cluster_centers_indices)\n\nprint('Estimated number of clusters: %d' % n_clusters_)\nprint('Homogeneity: %0.3f' % metrics.homogeneity_score(labels_true, labels))\nprint(('Completeness: %0.3f' % metrics.completeness_score(labels_true, labels)))\nprint(('V-measuare: %0.3f' % metrics.v_measure_score(labels_true, labels)))\nprint(('Adjusted Rand Index: %0.3f')\n % metrics.adjusted_rand_score(labels_true, labels))\nprint('Adjusted Muatual Information: %0.3f'\n % metrics.adjusted_mutual_info_score(labels_true, labels))\nprint('Silhouette Coefficient: %0.3f'\n % metrics.silhouette_score(X, labels, metric='sqeuclidean'))\n\n# ###########################################################################\n# phác họa sơ đồ kết quả\nimport matplotlib.pyplot as plt\nfrom itertools import cycle\n\nplt.close('all')\nplt.figure(1)\nplt.clf()\n\ncolors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')\nfor k, col in zip(range(n_clusters_), colors):\n class_members = labels == k\n cluster_center = X[cluster_centers_indices[k]]\n plt.plot(X[class_members, 0], X[class_members, 1], col + '.')\n plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,\n markeredgecolor='k', markersize=14)\n for x in X[class_members]:\n plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)\n\nplt.title('Estimated number of clusters: %d' % n_clusters_)\nplt.show()\n","repo_name":"minhtoando0899/ML-scikit-learn","sub_path":"5.Clustering/7.DemoOfAffinityPropagationClusteringAlgorithm.py","file_name":"7.DemoOfAffinityPropagationClusteringAlgorithm.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"17829785074","text":"import os\n\nimport openai\nfrom flask import Flask, redirect, render_template, request, url_for,jsonify\nimport requests\n\n\napp = Flask(__name__)\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\n\"\"\"\n@app.route(\"/\", methods=(\"GET\", \"POST\"))\ndef index():\n if request.method == \"POST\":\n animal = request.form[\"animal\"]\n response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=generate_prompt(animal),\n temperature=0.6,\n )\n return redirect(url_for(\"index\", result=response.choices[0].text))\n\n result = request.args.get(\"result\")\n return render_template(\"index.html\", result=result)\n\"\"\"\n@app.route('/')\ndef hello_world():\n return 'Hello, World!'\n# Your ChatGPT code goes here\ndef generate_response(message):\n response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=message,\n temperature=0.6,\n )\n try:\n return response.choices[0].text\n except:\n return \"Error generating response\"\n\n@app.route('/chat', methods=['POST'])\ndef chat():\n message = request.json['message']\n response = generate_response(message)\n print(response)\n return jsonify({'message': response})\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"andricio360/chatgpt_flask","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"29250134198","text":"from src.base import AdventOfCode\n\n\nAOC = AdventOfCode().load(\"input\")\n\nrock, paper, scissors = 1, 2, 3\ndraw, win = 3, 6\n\nrps = {}\nscore = {}\nwol = {}\n\n\ndef letter_to_value(letter: str) -> int:\n if letter in ['A', 'X']:\n return rock\n elif letter in ['B', 'Y']:\n return paper\n elif letter in ['C', 'Z']:\n return scissors\n\n\ndef calculate_score(turn: dict, index: int):\n if (turn['player'] == turn['elf'] + 1) or (turn['player'] == 1 and turn['elf'] == 3):\n score[index] = turn['player'] + win\n elif turn['elf'] == turn['player']:\n score[index] = turn['player'] + draw\n else:\n score[index] = turn['player']\n\n\ndef calculate_win(turn: dict, index: int):\n if turn['player'] == 1:\n wol[index] = (turn['elf'] - 1) if turn['elf'] != 1 else 3\n elif turn['player'] == 2:\n wol[index] = turn['elf'] + draw\n else:\n wol[index] = ((turn['elf'] + 1) if turn['elf'] != 3 else 1) + win\n\n\nfor count, line in enumerate(AOC.input):\n values = str(line).split(\" \")\n\n rps[count] = {'elf': letter_to_value(values[0]), 'player': letter_to_value(values[1])}\n calculate_score(rps[count], count)\n calculate_win(rps[count], count)\n\n\nprint(rps)\nprint(wol)\n\n\nprint(\"Part 1:\")\nprint(sum(score.values()))\n\nprint()\nprint(\"Part 2:\")\nprint(sum(wol.values()))\n","repo_name":"Ewan-Selkirk/Advent-of-Code-2022","sub_path":"src/day02/day02.py","file_name":"day02.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"87"} +{"seq_id":"24014848131","text":"\"\"\"\nMinimally improved noise for python. Pragmatic, minimal improved logging for python.\n\n* Provides two separate log levels.\n* One log level for dependencies and one for the application.\n* Improved default format string.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nimport logging\nimport sys\nfrom . import version\n\n__title__ = 'threepio'\n__version__ = version.get_version(form='short')\n__author__ = 'J. Matt Peterson'\n__license__ = 'Apache 2.0'\n__copyright__ = 'Copyright 2013-2014 J. Matt Peterson'\n\n\nlogger = None\n\nVERSION = version.VERSION\nversion = version.get_version(form='verbose')\n\nLOGGER_NAME = \"threepio\"\nLOG_FILENAME = \"./threepio.log\"\nAPP_LOGGING_LEVEL = logging.DEBUG\nDEP_LOGGING_LEVEL = logging.INFO\n\ndef initialize(logger_name=LOGGER_NAME,\n log_filename=LOG_FILENAME,\n app_logging_level=APP_LOGGING_LEVEL,\n dep_logging_level=DEP_LOGGING_LEVEL,\n format=None,\n logger_class=None,\n handlers=[],\n global_logger=True):\n \"\"\"\n Constructs and initializes a `logging.Logger` object.\n\n Returns :class:`logging.Logger` object.\n\n :param logger_name: name of the new logger.\n :param log_filename: The log file location :class:`str` or None.\n :param app_logging_level: The logging level to use for the application.\n :param dep_logging_level: The logging level to use for dependencies.\n :param format: The format string to use :class: `str` or None.\n :param logger_class: The logger class to use\n :param handlers: List of handler instances to add.\n :param global_logger: If true set threepio's global logger variable to this logger.\n \"\"\"\n # If there is no format, use a default format.\n if not format:\n format = \"%(asctime)s %(name)s-%(levelname)s \"\\\n + \"[%(pathname)s %(lineno)d] %(message)s\"\n formatter = logging.Formatter(format)\n\n # Setup the root logging for dependencies, etc.\n if log_filename:\n logging.basicConfig(\n level=dep_logging_level,\n format=format,\n filename=log_filename,\n filemode='a+')\n else:\n logging.basicConfig(\n level=dep_logging_level,\n format=format)\n\n # Setup and add separate application logging.\n if logger_class:\n original_class = logging.getLoggerClass()\n logging.setLoggerClass(logger_class)\n new_logger = logging.getLogger(logger_name)\n logging.setLoggerClass(original_class)\n else:\n new_logger = logging.getLogger(logger_name)\n\n # Set the app logging level.\n new_logger.setLevel(app_logging_level) # required to get level to apply.\n\n # Set the global_logger by default.\n if global_logger:\n global logger\n logger = new_logger\n\n for handler in handlers:\n handler.setFormatter(formatter)\n handler.setLevel(app_logging_level)\n new_logger.addHandler(handler)\n return new_logger\n","repo_name":"jmatt/threepio","sub_path":"threepio/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"87"} +{"seq_id":"1007375039","text":"import pytest\nimport requests\nimport vcf\n\nfrom utils import RESOURCE_FOLDER, BASE_URI, run_command, get_cookies\n\nDB_NAME = \"db_test_variant-annotator\"\n\n\ndef _read_vcf(filepath):\n # type: (str) -> list\n reader = vcf.Reader(open(filepath, \"r\"))\n return [record for record in reader]\n\n\ndef _request_getAnnotatedVariants(data):\n url = \"{}/getAnnotatedVariants\".format(BASE_URI)\n return requests.post(url=url, cookies=get_cookies(), json=data, headers={\"Content-type\": \"application/json\"})\n\n\n@pytest.mark.first\ndef test_post_vcfs_success():\n def convert_cookies(cookies):\n return [\"{}={}\".format(key, value) for key, value in cookies.items()]\n\n url = \"{}/{}\".format(BASE_URI, \"vcfs\")\n cookies = convert_cookies(get_cookies())\n headers = 'content-type: multipart/form-data'\n file = \"{}/{}\".format(RESOURCE_FOLDER, \"test_vcf_file.vcf\")\n command = \"curl -w \\\"%{{http_code}}\\\" -X POST -b {} '{}?db_name={}' -H \\'{}\\' -F vcf_file=@{}\".format(cookies[0],\n url,\n DB_NAME,\n headers,\n file)\n out, err = run_command(command)\n assert int(out) == 200, {\"OUT: \": out, \"ERROR: \": err, \"COMMAND: \": command}\n\n\ndef test_post_getAnnotatedVariants_empty():\n data = {\n \"variants\": []\n }\n response = _request_getAnnotatedVariants(data)\n assert response.status_code // 100 == 2, (response.status_code, response.json())\n assert response.json() == data\n\n\ndef test_post_getAnnotatedVariants():\n vcf_data = _read_vcf(\"{}/test_vcf_file.vcf\".format(RESOURCE_FOLDER))\n data = {\n \"variants\": [{\n \"pos\": int(vcf_data[0].POS),\n \"chrom\": str(vcf_data[0].CHROM),\n \"ref\": str(vcf_data[0].REF),\n \"alt\": str(vcf_data[0].ALT[0])\n }]\n }\n info = {}\n for key, value in vcf_data[0].INFO.items():\n info[key] = float(value[0])\n\n expected_data = {\n \"variants\": [\n {\n \"pos\": int(vcf_data[0].POS),\n \"chrom\": str(vcf_data[0].CHROM),\n \"ref\": str(vcf_data[0].REF),\n \"alt\": str(vcf_data[0].ALT[0]),\n \"annotations\": [\n {\n \"info\": info,\n \"dbName\": DB_NAME\n }]\n }\n ]\n }\n\n response = _request_getAnnotatedVariants(data)\n assert response.status_code // 100 == 2, (response.status_code, response.json())\n response_body = response.json()\n assert len(response_body[\"variants\"]) == 1\n r_info = {}\n for key, value in response_body['variants'][0]['annotations'][0]['info'].items():\n r_info[key] = float(value)\n response_body['variants'][0]['annotations'][0]['info'] = r_info\n assert response_body == expected_data\n","repo_name":"Bioinformatics-internship-EPAM/variant-annotator","sub_path":"tests/end2end/test_vcfs.py","file_name":"test_vcfs.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"20618889913","text":"import cv2\r\nimport numpy as np\r\n\r\nsource_image = cv2.imread('scene.jpg')\r\ntemplate = cv2.imread('house.jpg')\r\n\r\n# Define a range of scales to consider\r\nscales = np.linspace(0.5, 1.5, 5)\r\n\r\n\r\n# Variables to store best case values\r\nbest_match_value = float('inf')\r\nbest_match_location = (0, 0)\r\nbest_scale = 1.0 \r\n\r\nfor scale in scales:\r\n print(scale)\r\n # Resize the template according to the current scale\r\n scaled_template = cv2.resize(template, None, fx=scale, fy=scale)\r\n \r\n template_height, template_width = scaled_template.shape[:2]\r\n\r\n for y in range(0, source_image.shape[0] - template_height):\r\n for x in range(0, source_image.shape[1] - template_width):\r\n region = source_image[y:y + template_height, x:x + template_width]\r\n \r\n # Mean squared difference between the region and template\r\n diff = np.sum((region - scaled_template)**2)\r\n \r\n if diff < best_match_value:\r\n best_match_value = diff\r\n best_match_location = (x, y)\r\n best_scale = scale\r\n\r\n# Actual size of the detected object\r\ndetected_width = int(template.shape[1] * best_scale)\r\ndetected_height = int(template.shape[0] * best_scale)\r\n\r\ncv2.rectangle(source_image, best_match_location, (best_match_location[0] + detected_width, best_match_location[1] + detected_height), (0, 255, 0), 2)\r\n\r\ncv2.imshow('Object Detection Result', source_image)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","repo_name":"OrangeAVA/Mastering-OpenCV-with-Python","sub_path":"Chapter 11/Object Detection using Sliding Windows.py","file_name":"Object Detection using Sliding Windows.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"3164760351","text":"\"\"\"Define the module containing the function used to scrap data from the APD website.\"\"\"\nimport asyncio\nfrom pathlib import Path\nimport re\nfrom urllib.parse import urljoin\n\nimport aiohttp\nfrom loguru import logger\nfrom tenacity import retry\nfrom tenacity import stop_after_attempt\nfrom tenacity import wait_exponential\n\nfrom scrapd.core import article\nfrom scrapd.core import constant\nfrom scrapd.core import date_utils\nfrom scrapd.core import model\nfrom scrapd.core.regex import match_pattern\n\nAPD_URL = 'http://austintexas.gov/department/news/296'\nPAGE_DETAILS_URL = 'http://austintexas.gov/'\n\n\n@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=3), reraise=True)\nasync def fetch_text(session, url, params=None):\n \"\"\"\n Fetch the data from a URL as text.\n\n :param aiohttp.ClientSession session: aiohttp session\n :param str url: request URL\n :param dict params: request paramemters, defaults to None\n :return: the data from a URL as text.\n :rtype: str\n \"\"\"\n if not params:\n params = {}\n try:\n async with session.get(url, params=params) as response:\n logger.debug(response.url)\n return await response.text()\n except (\n aiohttp.ClientError,\n aiohttp.http_exceptions.HttpProcessingError,\n ) as e:\n logger.error(f'aiohttp exception for {url} -> {e}')\n raise e\n\n\nasync def fetch_news_page(session, page=1):\n \"\"\"\n Fetch the content of a specific news page from the APD website.\n\n The page number starts at 1.\n\n :param aiohttp.ClientSession session: aiohttp session\n :param int page: page number to fetch, defaults to 1\n :return: the page content.\n :rtype: str\n \"\"\"\n params = {}\n if page > 1:\n params['page'] = page - 1\n return await fetch_text(session, APD_URL, params)\n\n\nasync def fetch_detail_page(session, url):\n \"\"\"\n Fetch the content of a detail page.\n\n :param aiohttp.ClientSession session: aiohttp session\n :param str url: request URL\n :return: the page content.\n :rtype: str\n \"\"\"\n return await fetch_text(session, url)\n\n\ndef extract_traffic_fatalities_page_details_link(news_page):\n \"\"\"\n Extract the fatality detail page links from the news page.\n\n :param str news_page: html content of the new pages\n :return: a list of links.\n :rtype: list or `None`\n \"\"\"\n regex = re.compile(\n r'''\n (?:\n (/news/traffic-fatality-(\\d{1,3})(?:-(\\d?|[a-z]+))?)\\\"\n |\n (/news/fatality-crash-(\\d{1,3})-(\\d))\n )\n ''',\n re.VERBOSE | re.MULTILINE,\n )\n matches = regex.findall(news_page, re.MULTILINE)\n compact_matches = []\n for match in matches:\n parts = tuple(part for part in match if part != '')\n compact_matches.append(parts)\n return compact_matches\n\n\ndef generate_detail_page_urls(titles):\n \"\"\"\n Generate the full URLs of the fatality detail pages.\n\n :param list titles: a list of partial link\n :return: a list of full links to the fatality detail pages.\n :rtype: list\n \"\"\"\n return [urljoin(PAGE_DETAILS_URL, title[0]) for title in titles]\n\n\ndef has_next(news_page):\n \"\"\"\n Return `True` if there is another news page available.\n\n :param str news_page: the news page to parse\n :return: `True` if there is another news page available, `False` otherwise.\n :rtype: bool\n \"\"\"\n if not news_page:\n return False\n\n pattern = re.compile(\n r'''\n \n (››) # Test indicating a next page\n \n ''',\n re.VERBOSE | re.MULTILINE,\n )\n element = match_pattern(news_page, pattern)\n return bool(element)\n\n\ndef parse_page(page, url, dump=False):\n \"\"\"\n Parse the page using all parsing methods available.\n\n :param str page: the content of the fatality page\n :param str url: detail page URL\n :return: a dictionary representing a fatality.\n :rtype: dict\n \"\"\"\n report = model.Report(case='19-123456')\n\n # Parse the page.\n article_report, artricle_err = article.parse_content(page)\n report.update(article_report)\n if artricle_err: # pragma: no cover\n article_err_str = f'\\nArticle fields:\\n\\t * ' + \"\\n\\t * \".join(artricle_err) if artricle_err else ''\n logger.debug(f'Errors while parsing {url}:{article_err_str}')\n\n # Dump the file.\n if dump:\n dumpr_dir = Path(constant.DUMP_DIR)\n dumpr_dir.mkdir(parents=True, exist_ok=True)\n dump_file_name = url.split('/')[-1]\n dump_file = dumpr_dir / dump_file_name\n dump_file.write_text(page)\n\n return report\n\n\n@retry()\nasync def fetch_and_parse(session, url, dump=False):\n \"\"\"\n Parse a fatality page from a URL.\n\n :param aiohttp.ClientSession session: aiohttp session\n :param str url: detail page URL\n :return: a dictionary representing a fatality.\n :rtype: dict\n \"\"\"\n # Retrieve the page.\n page = await fetch_detail_page(session, url)\n if not page:\n raise ValueError(f'The URL {url} returned a 0-length content.')\n\n # Parse it.\n report = parse_page(page, url, dump)\n if not report:\n raise ValueError(f'No data could be extracted from the page {url}.')\n\n # Add the report link.\n report.link = url\n\n return report\n\n\nasync def async_retrieve(pages=-1, from_=None, to=None, attempts=1, backoff=1, dump=False):\n \"\"\"\n Retrieve fatality data.\n\n :param str pages: number of pages to retrieve or -1 for all\n :param str from_: the start date\n :param str to: the end date\n :param int attempts: number of attempts per report\n :param int backoff: initial backoff time (second)\n :param bool dump: dump reports with parsing issues\n :return: the list of fatalities and the number of pages that were read.\n :rtype: tuple\n \"\"\"\n res = {}\n page = 1\n has_entries = False\n no_date_within_range_count = 0\n from_date = date_utils.from_date(from_)\n to_date = date_utils.to_date(to)\n\n logger.debug(f'Retrieving fatalities from {from_date} to {to_date}.')\n\n async with aiohttp.ClientSession() as session:\n while True:\n # Fetch the news page.\n logger.info(f'Fetching page {page}...')\n try:\n news_page = await fetch_news_page(session, page)\n except Exception:\n raise ValueError(f'Cannot retrieve news page #{page}.')\n\n # Looks for traffic fatality links.\n page_details_links = extract_traffic_fatalities_page_details_link(news_page)\n\n # Generate the full URL for the links.\n links = generate_detail_page_urls(page_details_links)\n logger.debug(f'{len(links)} fatality page(s) to process.')\n\n # Fetch and parse each link.\n tasks = [\n fetch_and_parse.retry_with(\n stop=stop_after_attempt(attempts),\n wait=wait_exponential(multiplier=backoff),\n reraise=True,\n )(session, link, dump) for link in links\n ]\n page_res = await asyncio.gather(*tasks)\n\n if page_res:\n # If the page contains fatalities, ensure all of them happened within the specified time range.\n entries_in_time_range = [\n entry for entry in page_res if date_utils.is_between(entry.date, from_date, to_date)\n ]\n\n # If 2 pages in a row:\n # 1) contain results\n # 2) but none of them contain dates within the time range\n # 3) and we did not collect any valid entries\n # Then we can stop the operation.\n past_entries = all([date_utils.is_before(entry.date, from_date) for entry in page_res])\n if from_ and past_entries and not has_entries:\n no_date_within_range_count += 1\n if no_date_within_range_count > 1:\n logger.debug(f'{len(entries_in_time_range)} fatality page(s) within the specified time range.')\n break\n\n # Check whether we found entries in the previous pages.\n if not has_entries:\n has_entries = not has_entries and bool(entries_in_time_range)\n logger.debug(f'{len(entries_in_time_range)} fatality page(s) is/are within the specified time range.')\n\n # If there are none in range, we do not need to search further, and we can discard the results.\n if has_entries and not entries_in_time_range:\n logger.debug(f'There are no data within the specified time range on page {page}.')\n break\n\n # Store the results if the ID number is new.\n res.update({entry.case: entry for entry in entries_in_time_range if entry.case not in res})\n\n # Stop if there is no further pages.\n if not has_next(news_page) or page >= pages > 0:\n break\n\n page += 1\n\n return list(res.values()), page\n","repo_name":"scrapd/scrapd","sub_path":"scrapd/core/apd.py","file_name":"apd.py","file_ext":"py","file_size_in_byte":9146,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"87"} +{"seq_id":"16638055815","text":"import time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass AutoLogger:\n\n def __init__(self, student):\n s = Service('/Users/konstantinasam/PycharmProjects/chromedriver_98')\n self.web = webdriver.Chrome(service=s)\n self.wait = WebDriverWait(self.web, 10)\n self.website = \"https://eur-nl.libcal.com/r\"\n self.student = student\n self.code = None\n\n def get_code(self):\n # To retrieve the code we have to check the outlook e-mail account\n outlookLogin = \"https://login.live.com/login.srf?wa=wsignin1.0&rpsnv=13&ct=1643724024&rver=7.0.6737.0&wp\" \\\n \"=MBI_SSL&wreply=https%3a%2f%2foutlook.live.com%2fowa%2f%3fnlp%3d1%26RpsCsrfState%3db6ec5ca0\" \\\n \"-ed2e-16f2-8709-1c6e3fa784b3&id=292841&aadredir=1&CBCXT=out&lw=1&fl=dob%2cflname%2cwld\" \\\n \"&cobrandid=90015 \"\n self.web.get(outlookLogin)\n\n # Log in procedure\n self.web.find_element(By.NAME, \"loginfmt\").send_keys(self.student.get_user_name())\n self.web.find_element(By.ID, \"idSIButton9\").click()\n\n # Redirected to the EUR site we fill in the password\n self.wait.until(EC.element_to_be_clickable((By.NAME, \"Password\")))\n self.web. find_element(By.NAME, \"Password\").send_keys(self.student.get_password())\n self.web.find_element(By.XPATH, \"//*[@id=\\\"submitButton\\\"]\").click()\n self.wait.until(EC.element_to_be_clickable((By.ID, \"idSIButton9\"))).click()\n\n \n time.sleep(0.5)\n self.web.find_element(By.CSS_SELECTOR, 'div[title=\"Library reservations\"]').click()\n time.sleep(0.5)\n title = self.web.find_element(By.CSS_SELECTOR, 'div[role=\"option\"]').get_attribute(\"aria-label\")\n print(title.split(\"Enter this code: \")[1].split(\" \")[0])\n\n while True:\n time.sleep(3)\n\n def log_in(self):\n self.web.get(self.website)\n\n # Fill in the code\n self.wait.until(EC.element_to_be_clickable((By.ID, \"s-lc-new-reservation-checkin\"))).click()\n fill_in_code_here = self.wait.until(EC.element_to_be_clickable((By.NAME, \"code\")))\n fill_in_code_here.click()\n fill_in_code_here.send_keys(self.code)\n\n # Submit and we are done\n self.wait.until(EC.element_to_be_clickable((By.ID, \"s-lc-checkin-button\"))).click()\n time.sleep(2)\n self.web.close()\n","repo_name":"brammesland/testinglibbot","sub_path":"AutoLogger.py","file_name":"AutoLogger.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"27250786459","text":"txt1 = \"울릉도\"\ntxt2 = \"동남쪽\"\ntxt3 = \"뱃길따라\"\ntxt4 = \"87k\"\ntxt5 = \"외로운\"\ntxt6 = \"섬하나\"\ntxt7 = \"새들의 고향\"\nprint(f\"{txt1} {txt2} {txt3} {txt4} {txt5} {txt6} {txt7}\")\n\n# expression : 표현식, syntax(statement) : 구문\n# 표현식 : 값으로 표현될 수 있는 것, 변수의 값으로 할당할 수 있는 것\n# 구문 : 값에 할당할 수 없는 것\nanswer = 40 + 2\n\n\ndef add(a, b):\n \"\"\"\n 더하기를 하는 함수\n :param1 a:\n :param1 b:\n :return:\n \"\"\"\n return a + b\n\n\n# 함수를 실행한 결과가 answer2에 대입되므로 표현식\nanswer2 = add(41, 1)\n\n\n# 함수 할당\nfn = add\n\n\nprint(f\"{42 + 1}\")\nprint(f\"{answer}\")\nprint(f\"{add(10, 10)}\")\nprint(f\"{answer2}\")\nprint(f\"{add}\")\nprint(f\"{fn}\")\nprint(f\"{add.__doc__}\")\n\n# 구문 : if, for, while문, 할당 불가\n# an = for x in range(10): pass -> 할당 불가\n\n","repo_name":"Happiness44/just_python","sub_path":"7_day_1/f_strings_1.py","file_name":"f_strings_1.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"73654455965","text":"\nimport unittest\nfrom pyd.maze import *\n\n\nclass MirrorTest(unittest.TestCase):\n\n\tdef test_mirror_types(self):\n\t\tself.assertEqual(set(Mirror.get_mirrors().keys()), {'/', '\\\\'})\n\t\tself.assertEqual(Mirror.get_by_name('/').name, '/')\n\t\tself.assertEqual(Mirror.get_by_name('\\\\').name, '\\\\')\n\n\tdef test_direction_forward_mirror(self):\n\t\tmirror = Mirror.get_by_name('/')\n\t\tmirror = mirror((1, 5))\n\t\texpected = (\n\t\t\t(Direction.North, Direction.East),\n\t\t\t(Direction.South, Direction.West),\n\t\t) \n\t\tfor incident, redirect in expected:\n\t\t\tself.assertEqual(mirror.get_direction(incident), redirect)\n\t\t\tself.assertEqual(mirror.get_direction(redirect), incident)\n\n\nclass LaserTest(unittest.TestCase):\n\n\tdef test_no_loop(self):\n\n\t\tlaser = Laser((1, 1), Direction.North)\n\t\tlaser.set_pos((0, 1))\n\n\t\tself.assertEqual(laser.get_trail(), [(1, 1), (0, 1)])\n\t\tself.assertEqual(laser.directions, [Direction.North] * 2)\n\t\t# no mirror\n\t\tlaser.set_direction(Direction.South)\n\t\tself.assertFalse(laser._revisiting((1, 1)))\n\n\n\tdef test_loop(self):\n\t\t'''\n\t\t\t/ \t\t\\\n\n\t\t\t\\ \t\t/\n\n\t\t'''\n\t\t_ = [(2, 1), (1, 1), (1, 2), (3, 2), (3, 1),]\n\n\t\tlaser = Laser(_[0], Direction.North)\n\t\tlaser.set_pos(_[1])\n\t\tlaser.set_direction(Direction.East)\n\t\tlaser.set_pos(_[2])\n\t\tlaser.set_direction(Direction.South)\n\t\tlaser.set_pos(_[3])\n\t\tlaser.set_direction(Direction.West)\n\t\tlaser.set_pos(_[4])\n\n\t\tself.assertEqual(laser.get_trail(), _)\n\t\tself.assertEqual(laser.directions, list('NNESW'))\n\n\t\tlaser.set_pos((1, 1))\n\t\tlaser.set_direction(Direction.East)\n\t\tself.assertTrue(laser._revisiting((1, 1)))\n\n\tdef test_loop2(self):\n\t\t'''\n\t\t\t\t\t/ - - - \\\n\t\t\t\t\t|\t\t|\n\t\t\t/ - -> /\t\t|\n\t\t\t|\t\t\t\t|\n\t\t\t\\ - - - - - - - /\n\n\t\t'''\n\t\t_ = [(3, 2), (3, 3), (1, 3), (1, 4), (4, 4), (4, 1), (3, 1), (3, 3)]\n\t\tdirections = list('EENESWNE')\n\t\tlaser = None\n\t\tfor pos, direction in zip(_, directions):\n\t\t\tif laser is None:\n\t\t\t\tlaser = Laser(pos, direction)\n\t\t\telse:\n\t\t\t\tlaser.set_direction(direction)\n\t\t\t\tlaser.set_pos(pos)\n\n\t\tself.assertEqual(laser.get_trail(), _)\n\t\tself.assertEqual(laser.directions, list('EENESWNE'))\n\n\t\t# change direction not called, so no loop\n\t\tself.assertFalse(laser._revisiting((3, 3)))\n\t\t# set direction called\n\t\tlaser.set_direction(Direction.North)\n\t\tself.assertTrue(laser._revisiting((3, 3)))\n\n\n\nclass BoardTest(unittest.TestCase):\n\n\tdef test_board_simple(self):\n\t\tfor pos, direction in zip([(3, 3), (4, 2), (3, 1), (2, 2)], 'NESW'):\n\t\t\tboard = Board(6, 6)\n\t\t\tboard.add_laser((3, 2), direction)\n\t\t\tself.assertEqual(next(board.get_next_pos()), pos)\n\n\n\tdef test_laser_position(self):\n\t\t'''\n\t\t\tno mirrors\n\t\t'''\n\t\tboard = Board(6, 6)\n\t\tboard.add_laser((3, 2), Direction.East)\n\n\t\ttrails = board.get_laser_position()\n\t\tself.assertEqual(list(trails), [(4, 2), (5, 2)])\n\n\tdef test_laser_with_mirror(self):\n\n\t\tboard = Board(6, 6)\n\t\tboard.add_laser((2, 3), Direction.East)\n\t\tboard.add_mirror(Forward((4, 3)))\n\n\t\ttrails = board.get_laser_position()\n\t\tself.assertEqual(next(trails), (3, 3))\n\t\tself.assertEqual(board.laser.direction, Direction.East)\n\t\tself.assertEqual(next(trails), (4, 3))\n\t\tself.assertEqual(board.laser.direction, Direction.East)\n\t\tself.assertEqual(next(trails), (4, 4))\n\t\tself.assertEqual(board.laser.direction, Direction.North)\n\n\tdef test_mirror_loop(self):\n\t\t'''\n\t\t\t\t\t/ - - - \\\n\t\t\t\t\t|\t\t|\n\t\t\t/ - -> /\t\t|\n\t\t\t|\t\t\t\t|\n\t\t\t\\ - - - - - - - /\n\n\t\t'''\n\t\tF, B = Forward, Backward\n\t\tmirrors = [F((3, 2)), F((3, 4)), B((4, 4)), F((4, 1)), B((1, 1)), F((1, 2))]\n\t\tboard = Board(6, 6)\n\t\tboard.add_laser((2, 2), Direction.East)\n\t\tfor mirror in mirrors:\n\t\t\tboard.add_mirror(mirror)\n\n\t\ttrails = list(board.get_laser_position())\n\t\t# has loop\n\t\tself.assertEqual(trails[-1], (-1, -1))\n\t\tself.assertEqual(len(trails) - 1, 13)\n\n\tdef test_mirror_loop2(self):\n\t\t'''\n\t\t\t\t\t/ - - - \\\n\t\t\t\t\t|\t\t|\n\t\t\t/ - -> / - - - /\n\t\t\t|\t\t|\n\t\t\t\\ - - - /\n\n\t\t'''\n\t\tF, B = Forward, Backward\n\t\tmirrors = [F((3, 2)), F((3, 4)), B((4, 4)), F((4, 2)), F((3, 1)), B((1, 1)), F((1, 2))]\n\t\tboard = Board(6, 6)\n\t\tboard.add_laser((2, 2), Direction.East)\n\t\tfor mirror in mirrors:\n\t\t\tboard.add_mirror(mirror)\n\n\t\ttrails = list(board.get_laser_position())\n\t\t# has loop\n\t\tself.assertEqual(trails[-1], (-1, -1))\n\t\tself.assertEqual(len(trails) - 1, 13)\n\n\tdef test_boundary_loop(self):\n\t\tF, B = Forward, Backward\n\t\tmirrors = [B((0, 0)), F((0, 3)), B((3, 3)), F((3, 0))]\n\t\tboard = Board(4, 4)\n\t\tboard.add_laser((0, 1), Direction.North)\n\t\tfor mirror in mirrors:\n\t\t\tboard.add_mirror(mirror)\n\n\t\ttrails = list(board.get_laser_position())\n\t\t# has loop\n\t\tself.assertEqual(trails[-1], (-1, -1))\n\t\tself.assertEqual(len(trails) - 1, 14)\n\n\tdef test_boundary(self):\n\t\t'''\n\t\t\t- - -> /\n\t\t'''\n\t\tF, B = Forward, Backward\n\t\tmirrors = [B((0, 2))]\n\t\tboard = Board(4, 4)\n\t\tboard.add_laser((0, 1), Direction.North)\n\t\tfor mirror in mirrors:\n\t\t\tboard.add_mirror(mirror)\n\n\t\ttrails = list(board.get_laser_position())\n\t\tself.assertEqual(trails, [(0, 2),])\n\n\nif '__main__' == __name__:\n\tunittest.main()\n","repo_name":"arunkumarpatange/fib100","sub_path":"tests/ut_maze.py","file_name":"ut_maze.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"25411587694","text":"# get a line of raw bitmap and plot the components\nimport serial\nser = serial.Serial('COM9',230400) # the name of your Pico port\nprint('Opening port: ')\nprint(ser.name)\n\nser.write(b'hi\\r\\n') # send a newline to request data\ndata_read = ser.read_until(b'\\n',50) # read the echo\n\nsampnum = 0\nindex = 0\nraw = []\nreds = []\ngreens = []\nblues = []\nbright = []\n\n\n# Function for finding the COM of the line based on the red, green and blue input values from image\ndef bw_image(red, green, blue):\n filtered = []\n\n #inverting the data... high values = black areas in the image\n for i in range(len(red)):\n if (red[i] + green[i] + blue[i]) > 175:\n filtered.append(0)\n else:\n filtered.append(100)\n \n return filtered\n\ndef grayscale(red, green, blue, bright):\n filtered = []\n \n #inverting the data... high values = black areas in the image\n for i in range(len(red)):\n gray = (red[i] + green[i] + blue[i])/3\n filtered.append(gray)\n \n return filtered\n\n\ndef find_COM(filtered_data):\n numerator = 0\n denominator = 0\n\n for i in range(len(filtered_data)):\n numerator += (filtered_data[i] * i)\n denominator += filtered_data[i]\n \n COM = round(numerator/denominator)\n return COM\n\n# Pico sends back index and raw pixel value\nwhile sampnum < 60: # width of bitmap\n data_read = ser.read_until(b'\\n',50) # read until newline\n data_text = str(data_read,'utf-8') # convert bytes to string\n data = list(map(int,data_text.split())) # convert string to values\n\n if(len(data)==2):\n index = data[0]\n raw.append(data[1])\n reds.append(((data[1]>>5)&0x3F)/0x3F*100) # red value is middle 6 bits\n greens.append((data[1]&0x1F)/0x1F*100) # green value is rightmost 5 bits\n blues.append(((data[1]>>11)&0x1F)/0x1F*100) # blue vale is leftmost 5 bits\n bright.append((data[1]&0x1F)+((data[1]>>5)&0x3F)+((data[1]>>11)&0x1F)) # sum of colors\n sampnum = sampnum + 1\n\n# print the raw color as a 16bit binary to double check bitshifting\nfor i in range(len(reds)):\n print(f\"{raw[i]:#018b}\")\n\nfiltered_image = grayscale(reds, blues, greens, bright)\ncom = find_COM(filtered_image)\nprint(com)\n\n# plot the colors \nimport matplotlib.pyplot as plt \nx = range(len(reds)) # time array\nplt.plot(x,reds,'r*-',x,greens,'g*-',x,blues,'b*-', x,filtered_image,'k*-')\nplt.ylabel('color')\nplt.xlabel('position')\nplt.show()\n\n# be sure to close the port\nser.close()\n\n\n","repo_name":"tylerhummer/ME433","sub_path":"HW15/color_plotting.py","file_name":"color_plotting.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"16239828144","text":"import click\nfrom click.core import Context, Option\nfrom termcolor import colored\n\nfrom huggingface_tool import __AUTHOR__, __EMAIL__, __TITLE__, __VERSION__\nfrom huggingface_tool.utils.util import get_system_info\n\n\ndef red(text: str):\n return colored(text, \"red\")\n\n\ndef print_version(\n ctx: Context,\n param: Option,\n value: bool,\n) -> None:\n if not value or ctx.resilient_parsing:\n return\n click.secho(f\"{__TITLE__.upper()} version: {red(__VERSION__)}\")\n click.secho(f\"Developed by {__AUTHOR__}, Email: {red(__EMAIL__)}\")\n ctx.exit()\n\n\ndef print_system_info(\n ctx: Context,\n param: Option,\n value: bool,\n) -> None:\n if not value or ctx.resilient_parsing:\n return\n info_dict = get_system_info()\n for key, value in info_dict.items():\n click.secho(f\"- {key}: {red(value)}\")\n ctx.exit()\n\n\nCONTEXT_SETTINGS = dict(help_option_names=[\"-h\", \"--help\"])\n\n\n@click.group(invoke_without_command=True)\n@click.option(\n \"--version\",\n is_flag=True,\n callback=print_version,\n expose_value=False,\n is_eager=True,\n help=\"Show package's version information.\",\n)\n@click.option(\n \"--system_info\",\n is_flag=True,\n callback=print_system_info,\n expose_value=False,\n is_eager=True,\n help=\"Show system information.\",\n)\n@click.pass_context\ndef cli(ctx):\n if ctx.invoked_subcommand is None:\n pass\n else:\n pass\n # click.echo(f\"I am about to invoke {ctx.invoked_subcommand}\")\n\n\n@cli.command()\n@click.argument(\"model_name\")\n@click.argument(\"save_dir\")\ndef save_dm(model_name, save_dir):\n from huggingface_tool.savers.diffusion_model_saver import DiffusionModelSaver\n\n saver = DiffusionModelSaver(model_name)\n if saver.load():\n saver.save(save_dir)\n else:\n saver.logger.info(\"Model not found\")\n\n\n@cli.command()\n@click.argument(\"file_name\")\n@click.argument(\"save_dir\")\n@click.option(\n \"--repo_type\",\n \"-r\",\n type=click.Choice(\n [\n \"model\",\n \"dataset\",\n ]\n ),\n default=\"model\",\n help=\"repo type\",\n)\ndef save_file(file_name, save_dir, repo_type):\n from huggingface_tool.savers.file_saver import FileSaver\n\n saver = FileSaver(file_name, repo_type)\n if saver.load():\n saver.save(save_dir)\n else:\n saver.logger.info(\"File not found\")\n\n\n@cli.command()\n@click.argument(\"repo_name\")\n@click.argument(\"save_dir\")\n@click.option(\n \"--repo_type\",\n \"-r\",\n type=click.Choice(\n [\n \"model\",\n \"dataset\",\n ]\n ),\n default=\"model\",\n help=\"repo type\",\n)\ndef save_repo(repo_name, save_dir, repo_type):\n from huggingface_tool.savers.repo_saver import RepoSaver\n\n saver = RepoSaver(repo_name, repo_type)\n if saver.load():\n saver.save(save_dir)\n else:\n saver.logger.info(\"Repo not found\")\n\n\n@cli.command()\n@click.argument(\"tokenizer_name\")\n@click.argument(\"save_dir\")\ndef save_tk(tokenizer_name, save_dir):\n from huggingface_tool.savers.tokenizer_saver import TokenizerSaver\n\n saver = TokenizerSaver(tokenizer_name)\n if saver.load():\n saver.save(save_dir)\n else:\n saver.logger.info(\"Tokenizer not found\")\n\n\n@cli.command()\n@click.argument(\"dataset_name\")\n@click.argument(\"save_dir\")\ndef save_data(dataset_name, save_dir):\n from huggingface_tool.savers.dataset_saver import DatasetSaver\n\n saver = DatasetSaver(dataset_name)\n if saver.load():\n saver.save(save_dir)\n else:\n saver.logger.info(\"Dataset not found\")\n\n\n@cli.command()\n@click.argument(\"model_class\")\n@click.argument(\"model_name\")\n@click.argument(\"save_dir\")\ndef save_model(model_class, model_name, save_dir):\n from huggingface_tool.savers.model_saver import ModelSaver\n\n saver = ModelSaver(model_class, model_name)\n if saver.load():\n saver.save(save_dir)\n else:\n saver.logger.info(\"Dataset not found\")\n\n\n@cli.command()\n@click.argument(\"dataset_dir\")\n@click.argument(\"dataset_name\")\ndef upload_data(dataset_dir, dataset_name):\n from huggingface_tool.uploaders.dataset_uploader import DatasetUploader\n\n uploader = DatasetUploader(dataset_dir, dataset_name)\n if uploader.check():\n uploader.push()\n else:\n uploader.logger.info(\"Dataset not valid\")\n\n\n@cli.command()\n@click.argument(\"file_path\")\n@click.argument(\"remote_file_path\")\n@click.option(\n \"--repo_type\",\n \"-r\",\n type=click.Choice(\n [\n \"model\",\n \"dataset\",\n ]\n ),\n default=\"model\",\n help=\"repo type\",\n)\ndef upload_file(file_path, remote_file_path, repo_type):\n from huggingface_tool.uploaders.file_uploader import FileUploader\n\n uploader = FileUploader(file_path, remote_file_path, repo_type)\n if uploader.check():\n uploader.push()\n else:\n uploader.logger.info(\"File not valid\")\n\n\n@cli.command()\n@click.argument(\"dir_path\")\n@click.argument(\"remote_dir_path\")\n@click.option(\n \"--repo_type\",\n \"-r\",\n type=click.Choice(\n [\n \"model\",\n \"dataset\",\n ]\n ),\n default=\"model\",\n help=\"repo type\",\n)\ndef upload_dir(dir_path, remote_dir_path, repo_type):\n from huggingface_tool.uploaders.dir_uploader import DirUploader\n\n uploader = DirUploader(dir_path, remote_dir_path, repo_type)\n if uploader.check():\n uploader.push()\n else:\n uploader.logger.info(\"Directory not valid\")\n\n\n@cli.command()\n@click.argument(\"model_dir\")\n@click.argument(\"model_name\")\ndef upload_model(model_dir, model_name):\n from huggingface_tool.uploaders.model_uploader import ModelUploader\n\n uploader = ModelUploader(model_dir, model_name)\n if uploader.check():\n uploader.push()\n else:\n uploader.logger.info(\"Model not valid\")\n","repo_name":"OpenRL-Lab/huggingface_tool","sub_path":"huggingface_tool/cli/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":5739,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"86"} +{"seq_id":"34887002346","text":"# -- coding: utf-8 --\n# @info:\n# @Author : liyahui\n# @Time : 2023/6/2 上午8:33\n# @File : performance_testing.py\n# @Software: PyCharm\n\n\"\"\"电池性能测试\"\"\"\nimport datetime\nimport configparser\nimport os.path\nimport sys\nimport threading\nimport time\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import *\nimport tkinter.messagebox as msgbox\nfrom LogFormat import LogFormat\nfrom UPS3072Adapter import UPS3072Adapter\nimport openpyxl\n\nlog = LogFormat('performance_testing', \"performance_testing.log\")\nlogger = log.logger\n\n\nclass Performance(tk.Frame):\n def __init__(self, config_path, master=tk.Tk()):\n super().__init__(master)\n self.log_text = None\n self.treeview_scrollbar = None\n self.root = master\n self.exit_button = None\n self.tree_view = None\n self.clear_text_button = None\n self.entry_box = None\n self.background = \"#8FBC8F\"\n self.font = \"微软雅黑\"\n self.clear_button = None\n self.execute_button = None\n self.start_height = None\n self.start_width = None\n self.box_width = None\n self.filepath = None\n self.text_box = None\n self.adapter_object = UPS3072Adapter('./conf/UPS3072Adapter.conf')\n self.config_path = config_path\n self.init_conf()\n self.run()\n\n def init_conf(self):\n \"\"\"加载配置文件,初始化配置信息\"\"\"\n config = configparser.ConfigParser()\n try:\n config.read(self.config_path, encoding='utf-8')\n self.start_width = config.getint('performance_testing', 'width')\n self.start_height = config.getint('performance_testing', 'height')\n self.box_width = config.getint('performance_testing', 'box_width')\n self.filepath = config.get('performance_testing', 'filepath')\n except Exception as e:\n error_msg = f'init config file error:{e}'\n logger.error(error_msg)\n return False, error_msg\n\n def run(self):\n \"\"\"界面初始化\"\"\"\n self.root.title(\"Performance testing\")\n self.root.geometry(f\"{self.start_width}x{self.start_height}+100+50\")\n # Create the menu\n menu_bar = tk.Menu(self.root)\n # Create the file menu\n file_menu = tk.Menu(menu_bar, tearoff=0)\n file_menu.add_command(label=\"退出\", command=self.exit)\n\n # Create the help menu\n help_menu = tk.Menu(menu_bar, tearoff=0)\n help_menu.add_command(label=\"关于\", command=self.about)\n\n # Add the menus to the menu bar\n menu_bar.add_cascade(label=\"文件\", menu=file_menu)\n menu_bar.add_cascade(label=\"帮助\", menu=help_menu)\n\n # Add the menu bar to the window\n self.root.config(menu=menu_bar)\n # 第一行\n label = tk.Label(self.root, text=\"SN:\", font=(self.font, 12))\n\n self.entry_box = tk.Entry(self.root, width=self.box_width, highlightcolor='red', highlightthickness=1)\n self.execute_button = tk.Button(self.root, text=\"执行测试\", command=self.execute, width=10,\n background=self.background, font=(self.font, 12))\n self.execute_button.bind('', lambda event=None: self.execute_button.invoke())\n self.clear_button = tk.Button(self.root, text=\"清空\", command=self.clear_input, width=10,\n font=(self.font, 12), background=self.background)\n\n label.grid(row=0, column=0, padx=5, pady=5, sticky=\"w\")\n self.entry_box.grid(row=0, column=1, padx=5, pady=5, sticky=\"w\")\n self.execute_button.grid(row=0, column=2, padx=5, pady=5)\n self.clear_button.grid(row=0, column=3, padx=5, pady=5)\n # 第二行\n result_label = tk.Label(self.root, text=\"测试结果\", font=(self.font, 12))\n result_label.grid(row=1, column=0, padx=5, pady=5)\n # Create the result box\n # 第三行\n self.clear_text_button = tk.Button(self.root, text=\"全部清空\", command=self.clear_text, width=10,\n background=self.background,\n font=(self.font, 12))\n self.exit_button = tk.Button(self.root, text=\"退出程序\", command=self.exit, width=10,\n background=self.background,\n font=(self.font, 12))\n\n style = ttk.Style(self.root)\n style.configure('Treeview.Heading', font=('Helvetica', 12, 'bold'))\n columns_list = [\"序号\", \"测试项目\", \"方法\", \"3.2\", \"3.3\", \"3.4\", \"结果判断\", \"描述\"]\n self.tree_view = ttk.Treeview(self.root, columns=columns_list, height=20,\n show=\"headings\")\n self.tree_view.grid(row=2, column=1, columnspan=3, padx=5, pady=5, sticky=\"w\") #\n # 设置样式\n self.tree_view.configure(style='Treeview')\n self.clear_text_button.grid(row=3, column=2, padx=5, pady=5)\n self.exit_button.grid(row=3, column=3, padx=5, pady=5)\n # 第四行\n self.log_text = tk.Text(self.root,height=10)\n self.log_text.grid(row=4, column=1, columnspan=3, padx=5, pady=5,sticky=\"nwse\")\n # Start the main loop\n self.root.mainloop()\n\n def clear_input(self):\n \"\"\"清空输入框\"\"\"\n self.entry_box.delete(0, \"end\")\n logger.info(\"清空输入框\")\n\n def clear_text(self):\n \"\"\"全部清空\"\"\"\n self.entry_box.delete(0, \"end\")\n self.log_text.delete('1.0',\"end\")\n for column in self.tree_view[\"columns\"]:\n self.tree_view.heading(column, text=\"\")\n self.tree_view.delete(*self.tree_view.get_children())\n logger.info(\"全部清空\")\n\n def get_log(self):\n \"\"\"获取日志信息\"\"\"\n q = self.adapter_object.log_q\n while True:\n if not q.empty():\n log_msg = q.get()\n date_info = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n self.log_text.insert(1.0, f\"{date_info} {log_msg}\" + \"\\r\\n\")\n else:\n pass\n time.sleep(1)\n\n def execute(self):\n \"\"\"执行测试\"\"\"\n self.execute_button['state'] = 'disable'\n sn_text = self.entry_box.get().strip() # 获取输入框中的文本,去掉两侧的换行符\n logger.info(f\"设备:{sn_text},开始测试\")\n th = threading.Thread(target=self.thread_execute, args=(sn_text,), daemon=True)\n th.start()\n get_log_th = threading.Thread(target=self.get_log, daemon=True)\n get_log_th.start()\n\n def thread_execute(self, sn_text):\n \"\"\"线程执行测试\"\"\"\n time_str = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n time_file_str = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n filename = f\"SN_{sn_text}_{time_file_str}.xlsx\"\n log_msg = \"\"\n is_ok = False\n if not os.path.exists(self.filepath):\n log_msg = f\"找不到文件路径:{self.filepath}\"\n else:\n abs_filepath = os.path.join(self.filepath, filename)\n # abs_filepath = os.path.join(r\"C:\\Users\\lyh\\Desktop\\工作文件\\UPS3702\\测试报告\",\n # \"data.xlsx\")\n try:\n self.adapter_object.connect()\n is_ok, log_msg = self.adapter_object.read_hold_register(abs_filepath, time_str, sn_text)\n # is_ok = True\n except Exception as e:\n log_msg = f\"执行测试出错:{e}\"\n logger.info(log_msg)\n\n else:\n if is_ok: # 测试成功\n # 读取xlsx文件内容\n rows = []\n if os.path.exists(abs_filepath):\n try:\n workbook = openpyxl.load_workbook(abs_filepath)\n worksheet = workbook.active\n except Exception as e:\n log_msg = f\"打开测试报告出错:{e}\"\n else:\n if worksheet.max_row <= 2 or worksheet.max_column <= 4:\n log_msg = f\"测试报告内容为空\"\n else:\n for row in worksheet.iter_rows(values_only=True):\n rows.append(row)\n # 设置列名\n self.tree_view['columns'] = tuple(rows[1])\n self.treeview_scrollbar = ttk.Scrollbar(self.root, orient='vertical',\n command=self.tree_view.yview)\n self.treeview_scrollbar.grid(row=2, column=4, sticky=(tk.N, tk.S))\n self.tree_view.configure(yscrollcommand=self.treeview_scrollbar.set)\n\n # 设置 heading\n for i, column in enumerate(rows[0]):\n self.tree_view.heading(i, text=column)\n for row_index, row in enumerate(rows[1:]):\n if row_index == 0:\n table_head_list = list(row)\n table_head_list = [item.replace(\"\\n\", \"\") for item in table_head_list]\n row = tuple(table_head_list)\n self.tree_view.insert(\"\", tk.END, values=row)\n else:\n self.tree_view.insert(\"\", tk.END, values=row)\n log_msg = f\"测试完成!\"\n else:\n log_msg = f\"找不到报告:{abs_filepath}\"\n\n logger.info(log_msg)\n msgbox.showinfo(title=\"提示\", message=log_msg)\n self.execute_button['state'] = 'normal'\n\n def exit(self):\n \"\"\"退出\"\"\"\n self.root.destroy()\n # sys.exit(0)# 终止线程\n logger.info(f\"退出\")\n\n @staticmethod\n def about(self):\n \"\"\"返回版本号\"\"\"\n version = \"1.0\"\n show_msg = f\"版本:{version}\"\n msgbox.showinfo(title=\"提示\", message=show_msg)\n logger.info(f\"关于:{show_msg}\")\n\n\nif __name__ == \"__main__\":\n performance = Performance('./conf/performance_testing.conf')\n performance.mainloop()\n","repo_name":"yibeihaifeng/tkinter","sub_path":"performance_testing.py","file_name":"performance_testing.py","file_ext":"py","file_size_in_byte":10520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"26852300530","text":"\n# 팩토리얼 구하기\ndef f(n):\n if n <= 1:\n return 1\n else:\n return n * f(n-1)\n\na = f(int(input()))\na = str(a) # 문자로 만든 후 뒤집기\na = a[::-1]\ncnt = 0\ni = 0\nwhile True: # 0 개수 세기\n if a[i] != '0':\n break\n else:\n cnt += 1\n i += 1\nprint(cnt)\n","repo_name":"soheemoon37/TIL","sub_path":"algorithm/algo_solved/BOJ/10_1/1676_팩토리얼0개수.py","file_name":"1676_팩토리얼0개수.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"9605432724","text":"from sortedcontainers import SortedList\n\nclass Solution:\n def nthSuperUglyNumber(self, n: int, primes: List[int]) -> int:\n cnt = 1\n curr = 1\n nums = SortedList(primes)\n seen = set()\n while cnt < n:\n curr = nums.pop(0)\n cnt += 1\n for p in primes:\n if p*curr not in seen:\n seen.add(p*curr)\n nums.add(p*curr)\n return curr","repo_name":"heidenrei/leetcode","sub_path":"super-ugly-number/super-ugly-number.py","file_name":"super-ugly-number.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"413899091","text":"# 회문 확인용 함수 만들기 (회문이면 1 return, 아니면 0 return)\ndef palindrome(word):\n n = len(word) // 2\n for i in range(n):\n if word[i] != word[-i-1]:\n return 0\n else:\n return 1\n\n# test_case 갯수\nT = int(input())\n\nfor test_case in range(1,T+1):\n\n N, M = map(int,input().split())\n result = []\n\n # 글자판 만들기\n arr = []\n for _ in range(N):\n arr.append(list(input()))\n\n # 행방향 회문 확인하기\n for row in arr:\n if result:\n break\n for i in range(N-M+1):\n word = row[i:i+M]\n if palindrome(word):\n result = word\n break\n \n # 글자판 transpose\n for i in range(N):\n for j in range(N):\n if i > j:\n arr[i][j], arr[j][i] = arr[j][i], arr[i][j]\n\n # 열방향 회문 확인하기\n for col in arr:\n if result:\n break\n for i in range(N-M+1):\n word = col[i:i+M]\n if palindrome(word):\n result = word\n break\n\n # 출력\n print('#{} {}'.format(test_case,''.join(result)))","repo_name":"adj5672/self_study","sub_path":"Algorithm/TIL/0218/회문_안동준.py","file_name":"회문_안동준.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"38745113329","text":"from queue import PriorityQueue\r\n\r\nclass Graph:\r\n def __init__(self, num):\r\n self.vertex = num\r\n self.edges = [[-1 for i in range(num)] for j in range(num)]\r\n self.visited = []\r\n #adding vals\r\n def add_edge(self, u, v, weight):\r\n self.edges[u][v] = weight\r\n self.edges[v][u] = weight\r\n\r\n\r\ndef dijkstra(graph, source):\r\n\r\n distance = [10 ** 10] * graph.vertex\r\n distance[source] = 0\r\n queue = PriorityQueue()\r\n queue.put((distance[source], source))\r\n #check if queue not empty\r\n\r\n while not queue.empty():\r\n dist, m = queue.get()\r\n graph.visited.append(m)\r\n for j in range(graph.vertex):\r\n if j not in graph.visited and graph.edges[m][j] != -1:\r\n if distance[j] > distance[m] + graph.edges[m][j]:\r\n distance[j] = distance[m] + graph.edges[m][j]\r\n queue.put((distance[j], j))\r\n return distance[m]\r\n\r\n\r\nfile1 = open('input1.txt', 'r').read().splitlines()\r\n\r\np = int(file1[0])\r\nvertex = []\r\nedges = []\r\n\r\n\r\n\r\nfor i in range(1, len(file1)):\r\n x = file1[i].split()\r\n if len(x) == 2:\r\n vertex.append(x)\r\n if len(x) > 2:\r\n edges.append(x)\r\noutput = open('output1.txt', 'w+')\r\ndef call():\r\n for i in vertex:\r\n temp = int(i[1])\r\n g = Graph(temp+2)\r\n\r\n for i in range(temp):\r\n m = edges.pop(0)\r\n g.add_edge(int(m[0]), int(m[1]), int(m[2]))\r\n\r\n x = dijkstra(g, 1)\r\n #print(x)\r\n output.write(f\"{x}\\n\")\r\ncall()\r\n","repo_name":"legaldomain/CSE-221---Algorithm","sub_path":"LAB 4/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"6341158126","text":"#!/usr/bin/python\r\n# service_employer.py\r\n#\r\n# Copyright (C) 2008-2018 Veselin Penev, https://bitdust.io\r\n#\r\n# This file (service_employer.py) is part of BitDust Software.\r\n#\r\n# BitDust is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU Affero General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# BitDust Software is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU Affero General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU Affero General Public License\r\n# along with BitDust Software. If not, see .\r\n#\r\n# Please contact us if you have any questions at bitdust.io@gmail.com\r\n#\r\n#\r\n#\r\n#\r\n\r\n\"\"\"\n..\n\nmodule:: service_employer\n\"\"\"\r\n\r\nfrom services.local_service import LocalService\r\n\r\n\r\ndef create_service():\r\n return EmployerService()\r\n\r\n\r\nclass EmployerService(LocalService):\r\n\r\n service_name = 'service_employer'\r\n config_path = 'services/employer/enabled'\r\n\r\n def dependent_on(self):\r\n return ['service_customer',\r\n 'service_nodes_lookup',\r\n ]\r\n\r\n def start(self):\r\n from customer import fire_hire\r\n from main.config import conf\r\n from main import events\r\n from raid import eccmap\r\n eccmap.Update()\r\n fire_hire.A('init')\r\n conf().addCallback('services/customer/suppliers-number',\r\n self._on_suppliers_number_modified)\r\n conf().addCallback('services/customer/needed-space',\r\n self._on_needed_space_modified)\r\n events.add_subscriber(self._on_supplier_modified, 'supplier-modified')\r\n self._do_cleanup_dht_suppliers()\r\n return True\r\n\r\n def stop(self):\r\n from customer import fire_hire\r\n from main.config import conf\r\n from main import events\r\n events.remove_subscriber(self._on_supplier_modified)\r\n conf().removeCallback('services/customer/suppliers-number')\r\n conf().removeCallback('services/customer/needed-space')\r\n fire_hire.Destroy()\r\n return True\r\n\r\n def _on_suppliers_number_modified(self, path, value, oldvalue, result):\r\n from customer import fire_hire\r\n from raid import eccmap\r\n eccmap.Update()\r\n fire_hire.ClearLastFireTime()\r\n fire_hire.A('restart')\r\n\r\n def _on_needed_space_modified(self, path, value, oldvalue, result):\r\n from customer import fire_hire\r\n fire_hire.ClearLastFireTime()\r\n fire_hire.A('restart')\r\n\r\n def _do_cleanup_dht_suppliers(self):\r\n from logs import lg\r\n from services import driver\r\n if driver.is_on('service_entangled_dht'):\r\n from dht import dht_relations\r\n from userid import my_id\r\n d = dht_relations.scan_customer_supplier_relations(my_id.getLocalID())\r\n d.addCallback(self._on_my_dht_relations_discovered)\r\n d.addErrback(self._on_my_dht_relations_failed)\r\n else:\r\n lg.warn('service service_entangled_dht is not ready')\r\n\r\n def _on_supplier_modified(self, evt):\r\n self._do_cleanup_dht_suppliers()\r\n\r\n def _on_my_dht_relations_discovered(self, discovered_suppliers_list):\r\n from p2p import p2p_service\r\n from contacts import contactsdb\r\n from logs import lg\r\n suppliers_to_be_dismissed = set()\r\n # clean up old suppliers\r\n for idurl in discovered_suppliers_list:\r\n if not idurl:\r\n continue\r\n if not contactsdb.is_supplier(idurl):\r\n lg.warn('dht relation with %s is not valid anymore' % idurl)\r\n suppliers_to_be_dismissed.add(idurl)\r\n for idurl in suppliers_to_be_dismissed:\r\n p2p_service.SendCancelService(\r\n remote_idurl=idurl,\r\n service_name='service_supplier',\r\n )\r\n p2p_service.SendCancelService(\r\n remote_idurl=idurl,\r\n service_name='service_supplier_relations',\r\n )\r\n if suppliers_to_be_dismissed:\r\n lg.info('found %d suppliers to be cleaned and sent CancelService() packets' % len(suppliers_to_be_dismissed))\r\n\r\n def _on_my_dht_relations_failed(self, err):\r\n from logs import lg\r\n lg.err(err)\r\n","repo_name":"vesellov/bitdust.public.old","sub_path":"services/service_employer.py","file_name":"service_employer.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"86"} +{"seq_id":"27192112033","text":"#!/usr/bin/env python\n\nprint(\"please w8\")\n\nimport numpy as np\nfrom RPLCD.i2c import CharLCD\n\nimport RPi.GPIO as GPIO\nimport time\nimport os\nimport json\nimport requests\n\nGPIO.setmode(GPIO.BCM)\n\nimport sys\nimport Adafruit_DHT\n\nTRIG = 23\nECHO = 24\nSENSOR_PIN = 12\n\nsensor = Adafruit_DHT.DHT11\npin = 4\naccessToken = \"accessToken\"\n\nroomId = \"roomId\"\n\nGPIO.setup(SENSOR_PIN, GPIO.IN)\n\nlcd = CharLCD(\"PCF8574\", 0x3f)\n\ndef display_humidity():\n print(\"Fetch Temperature and Humidity\")\n sensor = Adafruit_DHT.DHT11\n pin = 4\n humidity, temperature = Adafruit_DHT.read_retry(11, 4)\n\n lcd.cursor_pos = (0, 0)\n lcd.write_string(\"Temp: %d C\" % temperature)\n lcd.cursor_pos = (1, 0)\n lcd.write_string(\"Humidity: %d %%\" % humidity)\n\n print(\"Temperature: %d C\" % temperature)\n print(\"Humidity: %d %%\" % humidity)\n\n return (humidity, temperature)\n\ndef toggleLED(channel, onoff):\n GPIO.output(channel, onoff)\n\ndef post_data(roomId, accessToken, temperature, humidity):\n data = {\n 'roomId': str(roomId),\n 'accessToken': str(accessToken),\n 'text': '{\"Temperature\": '+str(temperature)+', \"Humidity\": '+str(humidity)+'}'\n }\n url = 'https://api.ciscospark.com/v1/messages'\n headers = {\"Authorization\":\"Bearer \" + accessToken,\n \"Content-Type\":\"application/json; charset=utf-8\"}\n response = requests.post(url, headers=headers, verify=True, json=data)\n print(response.status_code)\n print(response.text)\n print(response.json())\n return response\n\ndef callback(channel):\n print(\"motion detected\")\n humidity, temperature = display_humidity()\n\n post_data(roomId, accessToken, temperature, humidity)\n\ntry:\n GPIO.add_event_detect(SENSOR_PIN, GPIO.RISING, callback=callback)\n while True:\n time.sleep(10)\n\nexcept KeyboardInterrupt:\n print(\"Done...\")\n\nfinally:\n GPIO.cleanup()\n","repo_name":"arte7/iot_school","sub_path":"measurements.py","file_name":"measurements.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"73020436765","text":"import csv\nimport datetime\nfrom collections import defaultdict\n\nfrom annotation.schema.annotations_rds import create_session\n\nsession = create_session()\n\nmain_list = session.execute(\n \"\"\"\n SELECT\n claim.id AS claim_id,\n concat(verdict_line.page, '---', line_number) AS oracle,\n annotation.user\n FROM claim\n INNER JOIN annotation ON annotation.claim_id = claim.id\n INNER JOIN annotation_verdict ON annotation_verdict.annotation_id = annotation.id\n INNER JOIN verdict_line ON verdict_line.verdict_id = annotation_verdict.id\n WHERE annotation.isOracleMain = 1 and annotation.isForReportingOnly = 0 \n and annotation.verifiable<2\n and annotation.isTestMode = 0\n and annotation.version = 4\n \"\"\").fetchall()\n\n\nsubordinate_list = session.execute(\n \"\"\"\n select\n annotation.user,\n annotation.claim_id,\n concat(verdict_line.page, '---', line_number) AS annotator,\n date_format(annotation.created, '%Y-%m-%d') as Date,\n date_format(annotation.created, '%Y-%U') AS Week,\n claimtext\n from annotation\n inner join (\n SELECT\n claim.id AS claimid,\n claim.text AS claimtext\n FROM claim\n INNER JOIN annotation ON annotation.claim_id = claim.id\n WHERE annotation.isOracleMain = 1 \n and annotation.isForReportingOnly = 0 \n and annotation.verifiable<2\n and annotation.isTestMode = 0\n and annotation.version = 4\n group by claim.id\n ) as t on t.claimid = annotation.claim_id\n INNER JOIN annotation_verdict ON annotation_verdict.annotation_id = annotation.id\n INNER JOIN verdict_line ON verdict_line.verdict_id = annotation_verdict.id\n where annotation.isOracleMain = 0 \n and annotation.isForReportingOnly = 0\n and annotation.verifiable<2\n and annotation.isTestMode = 0\n and annotation.version = 4\n \"\"\").fetchall()\n\nmain_evidence_all = defaultdict(set)\nsubordinate_evidence_all = defaultdict(set)\nsubordinate_evidence_texts = {}\n\nmain_evidence = defaultdict(lambda: defaultdict(set))\nsubordinate_evidence = defaultdict(lambda: defaultdict(set))\n\nsubordinate_day_evidence = defaultdict(lambda: defaultdict(lambda: defaultdict(set)))\nsubordinate_day_evidence_all = defaultdict(lambda: defaultdict(set))\n\nsubordinate_week_evidence = defaultdict(lambda: defaultdict(lambda: defaultdict(set)))\nsubordinate_week_evidence_all = defaultdict(lambda: defaultdict(set))\n\nfor item in main_list:\n main_evidence_all[item['claim_id']].add(item['oracle'])\n main_evidence[item['user']][item['claim_id']].add(item['oracle'])\n\nfor item in subordinate_list:\n subordinate_evidence_all[item['claim_id']].add(item['annotator'])\n subordinate_evidence_texts[item['claim_id']] = item['claimtext']\n subordinate_day_evidence_all[item['Date']][item['claim_id']].add(item['annotator'])\n subordinate_week_evidence_all[item['Week']][item['claim_id']].add(item['annotator'])\n\n subordinate_evidence[item['user']][item['claim_id']].add(item['annotator'])\n subordinate_day_evidence[item['user']][item['Date']][item['claim_id']].add(item['annotator'])\n subordinate_week_evidence[item['user']][item['Week']][item['claim_id']].add(item['annotator'])\n\n\nusers = set()\nusers.update(main_evidence.keys())\nusers.update(subordinate_evidence.keys())\n\n\ndef prec_rec(relevant, retrieved):\n precision = len(relevant.intersection(retrieved)) / len(retrieved) if len(retrieved) > 0 else 1\n recall = len(relevant.intersection(retrieved)) / len(relevant) if len(relevant) > 0 else 1\n return precision, recall\n\n\ndef evidence_missed_by_all(dataset):\n report_html = \"\"\n missed_claims = defaultdict(list)\n for claim in dataset:\n relevant = main_evidence_all[claim]\n retrieved = subordinate_evidence_all[claim]\n diff = relevant.difference(retrieved)\n missed_claims[claim].extend(list(diff))\n missed_sorted_keys = sorted(missed_claims.keys(), key=lambda k: len(missed_claims[k]), reverse=True)\n for key in missed_sorted_keys:\n if len(missed_claims[key]) == 0:\n continue\n claim_link = 'https://fever-annotate.corp.amazon.com/#!/label-claims/%d' % key\n report_html += '%s\\n' % (claim_link, subordinate_evidence_texts[key])\n for missed_claim in sorted(missed_claims[key]):\n report_html += '
  • %s
  • \\n' % missed_claim\n report_html += '
    '\n return report_html\n\n\ndef evidence_missed_by_user(dataset, user_set):\n report_html = ''\n for user in user_set:\n total_missed = 0\n missed_claims = defaultdict(list)\n for claim in dataset:\n relevant = main_evidence_all[claim]\n retrieved = subordinate_evidence[user][claim]\n diff = relevant.difference(retrieved)\n missed_claims[claim].extend(list(diff))\n total_missed += len(diff)\n missed_sorted_keys = sorted(missed_claims.keys(), key=lambda k: len(missed_claims[k]), reverse=True)\n\n report_html += '%s (%d claims)\\n' % (user, user, total_missed)\n report_html += '
    '\n for key in missed_sorted_keys:\n if len(missed_claims[key]) == 0:\n continue\n claim_link = 'https://fever-annotate.corp.amazon.com/#!/label-claims/%d' % key\n report_html += '%s\\n' % (claim_link, subordinate_evidence_texts[key])\n for missed_claim in sorted(missed_claims[key]):\n report_html += '
  • %s
  • \\n' % missed_claim\n report_html += '
    \\n'\n report_html += '
    \\n
    \\n'\n return report_html\n\n\ndef macro_pr(user, dataset):\n p = []\n r = []\n for claim in dataset:\n relevant = main_evidence_all[claim]\n retrieved = subordinate_evidence[user][claim]\n\n precision, recall = prec_rec(relevant, retrieved)\n p.append(precision)\n r.append(recall)\n\n return sum(p)/len(p) if len(p) > 0 else 0, sum(r)/len(r) if len(r) > 0 else 0\n\n\ndef micro_pr_all(dataset):\n p = []\n r = []\n for claim in dataset:\n relevant = main_evidence_all[claim]\n retrieved = subordinate_evidence_all[claim]\n\n precision, recall = prec_rec(relevant, retrieved)\n p.append(precision)\n r.append(recall)\n\n return sum(p)/len(p) if len(p) > 0 else 0, sum(r)/len(r) if len(r) > 0 else 0\n\n\ndef macro_pr_all_by_user(dataset, user_set):\n p = []\n r = []\n for user in user_set:\n for claim in dataset:\n if claim not in subordinate_evidence[user]:\n continue\n\n relevant = main_evidence_all[claim]\n retrieved = subordinate_evidence[user][claim]\n\n precision, recall = prec_rec(relevant, retrieved)\n p.append(precision)\n r.append(recall)\n\n return sum(p)/len(p) if len(p) > 0 else \"-\", sum(r)/len(r) if len(r) > 0 else \"-\"\n\n\ndef macro_pr_time_by_user(dataset, user_set, time):\n p = []\n r = []\n for user in user_set:\n for claim in dataset[user][time]:\n relevant = main_evidence_all[claim]\n retrieved = dataset[user][time][claim]\n\n precision, recall = prec_rec(relevant, retrieved)\n p.append(precision)\n r.append(recall)\n\n return sum(p)/len(p) if len(p) > 0 else \"-\", sum(r)/len(r) if len(r) > 0 else \"-\"\n\n\np_by_user = defaultdict(float)\nr_by_user = defaultdict(float)\n\np_by_day = defaultdict(float)\nr_by_day = defaultdict(float)\n\np_by_week = defaultdict(float)\nr_by_week = defaultdict(float)\n\np_by_day_by_user = defaultdict(lambda: defaultdict(float))\nr_by_day_by_user = defaultdict(lambda: defaultdict(float))\n\np_by_week_by_user = defaultdict(lambda: defaultdict(float))\nr_by_week_by_user = defaultdict(lambda: defaultdict(float))\n\n\nrracc = 0\nppacc = 0\n\nfor u in users:\n up, ur = macro_pr(u, subordinate_evidence[u])\n p_by_user[u] = up\n r_by_user[u] = ur\n\n for d in subordinate_day_evidence[u].keys():\n upd, urd = macro_pr(u, subordinate_day_evidence[u][d])\n\n p_by_day_by_user[d][u] = upd\n r_by_day_by_user[d][u] = urd\n\n for d in subordinate_week_evidence[u].keys():\n upw, urw = macro_pr(u, subordinate_week_evidence[u][d])\n\n p_by_week_by_user[d][u] = upw\n r_by_week_by_user[d][u] = urw\n\n\npp, rr = macro_pr_all_by_user(subordinate_evidence_all, users)\nprint(pp, rr)\n\nfor d in subordinate_day_evidence_all.keys():\n p_by_day[d], r_by_day[d] = macro_pr_time_by_user(subordinate_day_evidence, users, d)\n\nfor d in subordinate_week_evidence_all.keys():\n p_by_week[d], r_by_week[d] = macro_pr_time_by_user(subordinate_week_evidence, users, d)\n\n\ndef save_csv(filename, header, report):\n with open(\"data/reports/\"+filename+\".csv\", \"w+\") as f:\n writer = csv.writer(f)\n writer.writerow(header)\n for line in report:\n writer.writerow(line)\n\n\ndef save_html(filename, report):\n with open('data/reports/'+filename+'.html', 'w', encoding='utf-8') as f:\n f.write('\\n\\n')\n f.write('\\n\\n\\n'\n '\\n')\n f.write('\\n')\n f.write(report)\n f.write('\\n')\n f.write('\\n')\n\n\nusers = list(users)\n\n# Manually exclude users from the reports (in case they are not currently active)\n# NB: This is a duplicate of the list in annotation_reporting_service.py (line 23)\nexclude_list = ['esservis', 'flynna', 'guest', 'hjingnin',\n 'hokathle', 'mpearsal', 'stefom', 'chrchrs']\n\nusers = [user for user in users if user not in exclude_list]\n\n\ndef daily_report(title, data, user_set):\n ds = data\n report = []\n for key in sorted(ds.keys()):\n report_line = [key]\n for user in user_set:\n report_line.append(ds[key][user])\n report.append(report_line)\n\n header = [title]\n header.extend(user_set)\n return header, report\n\n\ndef user_report(title, data, user_set):\n report = []\n\n for user in sorted(user_set):\n report_line = [user, data[user]]\n report.append(report_line)\n\n header = [\"User\", title]\n return header, report\n\n\ndef date_report(title, data, days):\n report = []\n\n for day in sorted(days):\n report_line = [day, data[day]]\n report.append(report_line)\n\n header = [\"Date\", title]\n return header, report\n\n\nsave_csv(\"p_user_by_day\", *daily_report(\"Date\", p_by_day_by_user, users))\nsave_csv(\"r_user_by_day\", *daily_report(\"Date\", r_by_day_by_user, users))\nsave_csv(\"p_user_by_week\", *daily_report(\"Week\", p_by_week_by_user, users))\nsave_csv(\"r_user_by_week\", *daily_report(\"Week\", r_by_week_by_user, users))\n\nsave_csv(\"p_by_user\", *user_report(\"Precision\", p_by_user, users))\nsave_csv(\"r_by_user\", *user_report(\"Recall\", r_by_user, users))\n\n\nsave_csv(\"p_by_day\", *date_report(\"Precision\", p_by_day, p_by_day.keys()))\nsave_csv(\"r_by_day\", *date_report(\"Recall\", r_by_day, r_by_day.keys()))\n\nsave_csv(\"p_by_week\", *date_report(\"Precision\", p_by_week, p_by_week.keys()))\nsave_csv(\"r_by_week\", *date_report(\"Recall\", r_by_week, r_by_week.keys()))\n\nsave_csv(\"pr\", [\"Precision\", \"Recall\"], [[pp, rr]])\n\nsave_html(\"missed_by_all\", evidence_missed_by_all(subordinate_evidence_all))\nsave_html(\"missed_by_user\", evidence_missed_by_user(subordinate_evidence_all, users))\n\ntoday = datetime.datetime.utcnow()\nlast_week = today + datetime.timedelta(weeks=-1)\n\nstart_date = datetime.datetime.strptime(\"2017-09-01\", \"%Y-%m-%d\")\nqdate = start_date\n\nlabels1 = []\n\nlabels2 = []\npdata = []\nrdata = []\npddata = []\nrddata = []\n\n\ndef get_date_week(date_obj):\n return datetime.datetime.strptime(date_obj.strftime(\"%Y-%U-1\"), \"%Y-%U-%w\")\n\n\nwhile get_date_week(qdate) <= get_date_week(today):\n labels1.append(qdate.strftime(\"%Y-%U\"))\n pdata.append(p_by_week[qdate.strftime(\"%Y-%U\")])\n rdata.append(r_by_week[qdate.strftime(\"%Y-%U\")])\n qdate = qdate+datetime.timedelta(weeks=1)\n\nqdate = start_date\n\n\ndef get_date_day(date_obj):\n return datetime.datetime.strptime(date_obj.strftime(\"%Y-%m-%d\"), \"%Y-%m-%d\")\n\n\nwhile get_date_day(qdate) <= get_date_day(today):\n labels2.append(qdate.strftime(\"%Y-%m-%d\"))\n print(qdate.strftime(\"%Y-%m-%d\"))\n pddata.append(p_by_day[qdate.strftime(\"%Y-%m-%d\")])\n rddata.append(r_by_day[qdate.strftime(\"%Y-%m-%d\")])\n qdate = qdate+datetime.timedelta(days=1)\n\n\nsdata = [pdata, rdata]\noracle_report = {\n \"p\": pp,\n \"r\": rr,\n \"p_tw\": p_by_week[today.strftime(\"%Y-%U\")],\n \"r_tw\": r_by_week[today.strftime(\"%Y-%U\")],\n \"p_lw\": p_by_week[last_week.strftime(\"%Y-%U\")],\n \"r_lw\": r_by_week[last_week.strftime(\"%Y-%U\")],\n \"chart\": {\n \"data\": sdata,\n \"series\": [\"Precision (Weekly)\", \"Recall (Weekly)\"],\n \"labels\": labels1\n },\n \"chart2\": {\n \"data\": [pddata, rddata],\n \"series\": [\"Precision (Daily)\", \"Recall (Daily)\"],\n \"labels\": labels2\n }\n\n}\n\nimport json\n\njson.dump(oracle_report, open(\"data/oracle.json\", \"w\"))\n","repo_name":"awslabs/fever","sub_path":"fever-annotations-platform/src/annotation/jobs/periodic_jobs/oracle_eval.py","file_name":"oracle_eval.py","file_ext":"py","file_size_in_byte":13407,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"86"} +{"seq_id":"25056287000","text":"import pandas as pd\n\nfrom data.Perno import Perno\nfrom manage.FileIniHandler import FileIniHandler\nfrom util.Globals import OK, KO, SKIP\nfrom util.Keys import *\n\n\nclass TestVerifier(object):\n\n # -------------------------------------------------------\n #\n # -------------------------------------------------------\n def __init__(self, json_data, sn_to_find=None):\n if json_data is None:\n return\n\n self.serial_number_to_find = sn_to_find\n self.file_ini_handler = FileIniHandler()\n\n self.data_frame = pd.read_json(json_data, orient='split')\n\n self.serial_numbers = self.data_frame[KEY_SN]\n self.num_perni_validi = 0\n\n # solo per i file out\n self.esito_ch1 = self.data_frame.get('ESITO_CH1')\n self.esito_ch2 = self.data_frame.get('ESITO_CH2')\n\n self.perni = []\n\n # serve per ricavare quanti valori non sono NA da cui si ricava poi il numero di perni\n series_as_booleans = self.serial_numbers.notna()\n channel_as_dec = 23 # 0x17\n\n for serial_number_index in range(self.serial_numbers.size):\n if series_as_booleans[serial_number_index]:\n serial_number = str(int(self.serial_numbers[serial_number_index]))\n\n ch1_as_hex = hex(channel_as_dec)\n channel_as_dec += 1\n ch2_as_hex = hex(channel_as_dec)\n channel_as_dec += 1\n\n perno = Perno(serial_number)\n perno.ch1 = ch1_as_hex\n perno.ch2 = ch2_as_hex\n perno.ch1_column_load = self.data_frame[perno.getTitleColumnLoadCh1()]\n perno.ch1_column_dldt = self.data_frame[perno.getTitleColumnDLDTCh1()]\n perno.ch2_column_load = self.data_frame[perno.getTitleColumnLoadCh2()]\n perno.ch2_column_dldt = self.data_frame[perno.getTitleColumnDLDTCh2()]\n\n if self.serial_number_to_find is not None and self.serial_number_to_find != serial_number:\n perno.skip = True\n continue\n\n self.num_perni_validi += 1\n\n self.perni.append(perno)\n\n if self.esito_ch1 is not None:\n esito = self.esito_ch1[serial_number_index]\n perno.ch1_esito = esito\n\n if self.esito_ch2 is not None:\n esito = self.esito_ch2[serial_number_index]\n perno.ch2_esito = esito\n\n self.column_sn = []\n self.column_ch = []\n\n self.initTest()\n\n # -------------------------------------------------------\n #\n # -------------------------------------------------------\n def initTest(self):\n\n num_channels = 2 * self.num_perni_validi\n\n self.error_test_1 = [OK for _ in range(num_channels)]\n self.error_test_2 = [OK for _ in range(num_channels)]\n self.error_test_3 = [OK for _ in range(num_channels)]\n self.error_test_4 = [OK for _ in range(num_channels)]\n self.error_test_5 = [KO for _ in range(num_channels)]\n self.error_test_6 = [KO for _ in range(num_channels)]\n self.error_test_7 = [OK for _ in range(num_channels)]\n self.error_test_8 = [KO for _ in range(num_channels)]\n self.esito_dei_test = [KO for _ in range(num_channels)]\n\n # -------------------------------------------------------\n #\n # -------------------------------------------------------\n def buildDictionary(self):\n\n row_index = 0\n\n for perno in self.perni:\n if perno.skip:\n row_index += 2\n continue\n\n serial_number = perno.serial_number\n channel_1 = perno.ch1\n channel_2 = perno.ch2\n\n self.column_sn.append(serial_number)\n self.column_sn.append(serial_number)\n self.column_ch.append(channel_1)\n self.column_ch.append(channel_2)\n\n ch1_column_load = perno.ch1_column_load\n ch1_column_dldt = perno.ch1_column_dldt\n ch1_esito = perno.ch1_esito\n\n self.startTestChannel(row_index, ch1_column_load, ch1_column_dldt, ch1_esito)\n\n row_index += 1\n\n ch2_column_load = perno.ch2_column_load\n ch2_column_dldt = perno.ch2_column_dldt\n ch2_esito = perno.ch2_esito\n\n self.startTestChannel(row_index, ch2_column_load, ch2_column_dldt, ch2_esito)\n\n len1 = len(ch1_column_load)\n len2 = len(ch2_column_load)\n\n if len1 <= len2:\n min_len = len1\n else:\n min_len = len2\n\n for sub_index in range(min_len):\n self.test3(row_index, ch2_column_load, ch1_column_load, sub_index)\n self.test5(row_index, ch2_column_load, ch1_column_load, sub_index)\n\n row_index += 1\n\n self.controllaEsito()\n\n data_dictionary = {\n KEY_SN: self.column_sn,\n KEY_CHANNEL: self.column_ch,\n KEY_TEST_1: self.error_test_1,\n KEY_TEST_2: self.error_test_2,\n KEY_TEST_3: self.error_test_3,\n KEY_TEST_4: self.error_test_4,\n KEY_TEST_5: self.error_test_5,\n KEY_TEST_6: self.error_test_6,\n KEY_TEST_7: self.error_test_7,\n KEY_TEST_8: self.error_test_8,\n KEY_ESITO: self.esito_dei_test\n }\n\n return data_dictionary\n\n # -------------------------------------------------------\n #\n # -------------------------------------------------------\n def controllaEsito(self):\n row_index = 0\n\n for perno in self.perni:\n if perno.skip:\n continue\n\n if self.areTestOK(row_index):\n self.esito_dei_test[row_index] = OK\n\n row_index += 1\n\n if self.areTestOK(row_index):\n self.esito_dei_test[row_index] = OK\n\n row_index += 1\n\n # -------------------------------------------------------\n #\n # -------------------------------------------------------\n def startTestChannel(self, row_index, column_load, column_dldt, esito):\n\n self.test1(row_index, column_load)\n\n for sub_index in range(len(column_load)):\n self.test2(row_index, column_load, sub_index)\n self.test4(row_index, column_dldt, sub_index)\n self.test6(row_index, column_load, sub_index)\n\n self.test7(row_index, column_load)\n self.test8(row_index, esito)\n\n # -------------------------------------------------------\n # TEST_1: valore iniziale singolo canale entro un certo intervallo MAX\n # -------------------------------------------------------\n def test1(self, row_index, column_load):\n first_load = column_load[0]\n\n th_max = int(self.file_ini_handler.test_1_th_max)\n value = abs(first_load)\n\n if value > th_max:\n self.error_test_1[row_index] = KO\n\n # -------------------------------------------------------\n # TEST_2: singolo canale entro un certo intervallo MIN-MAX rispetto al valore iniziale\n # -------------------------------------------------------\n def test2(self, row_index, column_load, sub_index):\n first_load = column_load[0]\n\n th_max = int(self.file_ini_handler.test_2_th_max)\n value = abs(column_load[sub_index]) - abs(first_load)\n\n if value > th_max:\n self.error_test_2[row_index] = KO\n\n # -------------------------------------------------------\n # TEST_3: valore assoluto differenza canale 1-2 sempre entro una soglia MAX\n # -------------------------------------------------------\n def test3(self, row_index, column_load, column_load_prev, sub_index):\n th_max = int(self.file_ini_handler.test_3_th_max)\n value = abs(column_load_prev[sub_index] - column_load[sub_index])\n\n if value > th_max:\n self.error_test_3[row_index - 1] = KO\n self.error_test_3[row_index] = KO\n\n # -------------------------------------------------------\n # TEST_4: valore assoluto della derivata del singolo canale sempre entro un intervallo MIN-MAX\n # -------------------------------------------------------\n def test4(self, row_index, column_dldt, sub_index):\n th_max = int(self.file_ini_handler.test_4_th_max)\n value = abs(column_dldt[sub_index])\n\n if value > th_max:\n self.error_test_4[row_index] = KO\n\n # -------------------------------------------------------\n # TEST_5: massimo del valore assoluto della differenza fra i 2 canali che NON deve stare all’interno di un intervallo MIN-MAX (tipicamente 0 e 2)\n # -------------------------------------------------------\n def test5(self, row_index, column_load, column_load_prev, sub_index):\n th_max = int(self.file_ini_handler.test_5_th_max)\n value = abs(column_load_prev[sub_index] - column_load[sub_index])\n\n if value > th_max:\n self.error_test_5[row_index - 1] = OK\n self.error_test_5[row_index] = OK\n\n # -------------------------------------------------------\n # TEST_6: massimo della differenza fra valore attuale e valore iniziale he DEVE essere maggiore di un valore minimo (tipicamente 0)\n # -------------------------------------------------------\n def test6(self, row_index, column_load, sub_index):\n first_load = column_load[0]\n\n th_min = int(self.file_ini_handler.test_6_th_min)\n value = abs(column_load[sub_index] - first_load)\n\n if value > th_min:\n self.error_test_6[row_index] = OK\n\n # -------------------------------------------------------\n # TEST_7: massimo della differenza fra valore finale e valore iniziale che DEVE essere minore di una soglia MAX\n # -------------------------------------------------------\n def test7(self, row_index, column_load):\n first_load = column_load[0]\n\n th_max = int(self.file_ini_handler.test_7_th_max)\n value = abs(column_load[len(column_load) - 1] - first_load)\n\n if value > th_max:\n self.error_test_7[row_index] = KO\n\n # -------------------------------------------------------\n # TEST_8: compensazione termica\n # -------------------------------------------------------\n def test8(self, row_index, esito):\n size = len(self.perni) * 2\n\n if row_index >= size:\n self.error_test_8[row_index] = SKIP\n else:\n if esito is None:\n self.error_test_8[row_index] = SKIP\n elif esito:\n self.error_test_8[row_index] = OK\n else:\n self.error_test_8[row_index] = KO\n\n # -------------------------------------------------------\n #\n # -------------------------------------------------------\n def areTestOK(self, row_index):\n return self.error_test_1[row_index] == OK and \\\n self.error_test_2[row_index] == OK and \\\n self.error_test_3[row_index] == OK and \\\n self.error_test_4[row_index] == OK and \\\n self.error_test_5[row_index] == OK and \\\n self.error_test_6[row_index] == OK and \\\n self.error_test_7[row_index] == OK and \\\n (self.error_test_8[row_index] == OK or self.error_test_8[row_index] == SKIP)\n","repo_name":"Tirrel/PyDashboard","sub_path":"manage/TestVerifier.py","file_name":"TestVerifier.py","file_ext":"py","file_size_in_byte":11303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"71544620125","text":"from kivy.base import EventLoop\nfrom kivymd.uix.textfield import MDTextField\n\n\nclass RightClickTextInput(MDTextField):\n def on_touch_down(self, touch):\n super(RightClickTextInput, self).on_touch_down(touch)\n if touch.button == 'right':\n print(\"right mouse clicked\")\n pos = super(RightClickTextInput, self).to_local(*self._touch_down.pos, relative=True)\n self._show_cut_copy_paste(touch.pos, EventLoop.window, mode='paste')\n\n\n","repo_name":"GoBig87/Google-Places-Email-Extractor","sub_path":"ui/widgets/paste_textinput.py","file_name":"paste_textinput.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"14662648978","text":"from flask import render_template, url_for, redirect, request, send_from_directory\nfrom datetime import datetime\nimport os\nimport json\nimport time\nfrom app import app\nfrom pprint import pprint\nfrom operator import itemgetter\nimport redis\nimport pickle\nimport pymysql\nimport hashlib\n\n# import dstw\n# Global var, ugly. I know. :-(\nrconn = redis.Redis(host=os.environ[\"DCS_SERVER_TRACKER_REDIS_IP\"],port=os.environ[\"DCS_SERVER_TRACKER_REDIS_PORT\"],db=os.environ[\"DCS_SERVER_TRACKER_REDIS_DB\"])\n\ndef executeQuery(query,params=[],type=\"all\",expiry=240):\n queryHash = \"query_\"+hashlib.md5(str(query+str(params)).encode()).hexdigest()\n if rconn.exists(queryHash) == 1:\n return pickle.loads(rconn.get(queryHash))\n db = pymysql.connect(host=os.environ[\"DCS_SERVER_TRACKER_MYSQL_SERVER\"],\n port=int(os.environ[\"DCS_SERVER_TRACKER_MYSQL_PORT\"]),\n user=os.environ[\"DCS_SERVER_TRACKER_MYSQL_USERNAME\"],\n password=os.environ[\"DCS_SERVER_TRACKER_MYSQL_PASSWORD\"],\n db=os.environ[\"DCS_SERVER_TRACKER_MYSQL_DATABASE\"],\n cursorclass=pymysql.cursors.DictCursor,\n use_unicode=True,\n charset=\"utf8\")\n db.autocommit(True)\n cursor = db.cursor()\n cursor.execute(query,params)\n cursor.close()\n db.close()\n if type==\"all\":\n rconn.set(queryHash,pickle.dumps(cursor.fetchall()))\n rconn.expire(queryHash,expiry)\n return pickle.loads(rconn.get(queryHash))\n elif type==\"one\":\n rconn.set(queryHash,pickle.dumps(cursor.fetchone()))\n rconn.expire(queryHash,expiry)\n return pickle.loads(rconn.get(queryHash))\n\n\nsql_servers_online = \"SELECT * FROM servers WHERE status='up'\"\nsql_servers_offline = \"SELECT * FROM servers WHERE status='down'\"\nsql_servers_both = \"SELECT * FROM servers\"\n\nsql_server_select = \"SELECT * FROM servers WHERE INSTANCE_ID=%s\"\nsql_server_history = \"SELECT * FROM scenarios WHERE instance_id=%s ORDER BY start DESC\"\nsql_server_players = \"SELECT * FROM players WHERE INSTANCE_ID=%s ORDER BY TIMESTAMP ASC\"\nsql_server_samehost = \"SELECT * FROM servers WHERE IP_ADDRESS=%s AND PORT!=%s ORDER BY PORT\"\n\n#sql_server_update_country_iso = \"UPDATE servers SET country_iso=%s WHERE INSTANCE_ID=%s\"\n#sql_server_update_country_name = \"UPDATE servers SET country_name=%s WHERE INSTANCE_ID=%s\"\n\nsql_stats_online = \"SELECT * FROM servers WHERE status='up'\"\nsql_stats_all = \"SELECT * FROM servers\"\n\n# The default/servers page\n@app.route(\"/\")\n@app.route(\"/servers/\")\ndef page_servers():\n edJSON = fetchJSON()\n\n search_prep={}\n search_active = {}\n\n search_prep['servers'] = request.args.get('servers', default = 'online', type = str)\n search_prep['column'] = request.args.get('column', default = 'players', type = str)\n search_prep['order'] = request.args.get('order', default = 'desc', type = str)\n\n # Depending on search queries sort stuff\n if search_prep['column'] == \"players\":\n search_field = \"PLAYERS\"\n elif search_prep['column'] == \"servers\":\n search_field = \"NAME\"\n elif search_prep['column'] == \"scenario\":\n search_field = \"MISSION_NAME\"\n elif search_prep['column'] == \"countries\":\n search_field = \"country_iso\"\n else:\n search_field = \"PLAYERS\"\n\n if search_prep['order'] == \"desc\":\n search_order = True\n search_order_sql = \"DESC\"\n elif search_prep['order'] == \"asc\":\n search_order = False\n search_order_sql = \"ASC\"\n else:\n search_order_sql = \"DESC\"\n\n if search_prep['servers'] == \"online\":\n servers = executeQuery(sql_servers_online)\n search_active['servers_online'] = True\n elif search_prep['servers'] == \"offline\":\n servers = executeQuery(sql_servers_offline)\n search_active['servers_offline'] = True\n elif search_prep['servers'] == \"both\":\n servers = executeQuery(sql_servers_both)\n search_active['servers_both'] = True\n else:\n servers = edJSON['SERVERS']\n search_active['servers_online'] = True\n\n # Prepare the list of servers for publication\n for server in servers:\n server['name'] = str(server['NAME'])[:120]\n server['players'] = int(server['PLAYERS'])\n server['instance_id'] = str(server['IP_ADDRESS'])+\":\"+str(server['PORT'])\n if server['DESCRIPTION'] == \"No\":\n server['DESCRIPTION'] = \"\"\n else:\n server['DESCRIPTION'] = server['DESCRIPTION'].replace('
    ','')\n if server['PASSWORD']==\"No\":\n del server['PASSWORD']\n\n\n # Sort the list\n servers = sortListofDicts(servers,search_field,search_order)\n\n # Print everything to screen\n content = render_template(\"servers.html\", server_count=len(edJSON['SERVERS']), player_count=edJSON['PLAYERS_COUNT'], servers=servers, search_prep=search_prep, search_active=search_active)\n active = {\"servers\":True}\n edJSON = fetchJSON()\n return render_template(\"template.html\", title=\"DCS Server Tracker\", content=content, active=active, last_update=timestamp_pretty(edJSON['timestamp']))\n\n\n# Show data of a specific server\n@app.route(\"/servers/\")\ndef page_server(instance_id):\n server = executeQuery(sql_server_select, [instance_id],\"one\")\n\n server['DESCRIPTION'] = str(server['DESCRIPTION']).replace('%lt;','<')\n server['DESCRIPTION'] = str(server['DESCRIPTION']).replace('%gt;','>')\n server['DESCRIPTION'] = str(server['DESCRIPTION']).replace(' ','&')\n server['MISSION_TIME_FORMATTED'] = str(server['MISSION_TIME_FORMATTED']).replace(' ','&')\n if server['PASSWORD']==\"No\":\n del server['PASSWORD']\n if server['status']==\"down\":\n del server['status']\n\n server['samehost'] = executeQuery(sql_server_samehost,[server['IP_ADDRESS'],server['PORT']])\n\n results = executeQuery(sql_server_players, [instance_id])\n values = []\n for result in results:\n value = {}\n value['key'] = timestamp_timeonly(result['timestamp'])\n value['val'] = result['players']\n values.append(value)\n\n history = executeQuery(sql_server_history, [instance_id])\n for event in history:\n event['start'] = timestamp_pretty(event['start'])\n event['end'] = timestamp_timeonly(event['end'])\n\n content = render_template(\"server.html\", server=server, values=values, history=history)\n active = {\"servers\":True}\n edJSON = fetchJSON()\n return render_template(\"template.html\", title=server['NAME']+\" - DCS Server Tracker\", content=content, active=active, last_update=timestamp_pretty(edJSON['timestamp']))\n\n\n# The /stats-page with whatever stats\n@app.route(\"/stats/\")\ndef page_stats():\n\n metadata = pickle.loads(rconn.get(\"dcst_stats_metadata\"))\n countrydata = pickle.loads(rconn.get(\"dcst_stats_countrydata\"))\n allPlayers = pickle.loads(rconn.get(\"dcst_stats_allPlayers\"))\n countrydataall = pickle.loads(rconn.get(\"dcst_stats_countrydataall\"))\n\n content = render_template(\"stats.html\", metadata=metadata, countrydata=countrydata, allPlayers=allPlayers, countrydataall=countrydataall)\n active = {\"stats\":True}\n edJSON = fetchJSON()\n return render_template(\"template.html\", title=\"Stats - DCS Server Tracker\", content=content, active=active, last_update=timestamp_pretty(edJSON['timestamp']))\n\n\n# The /about-page is just a formality at this point really\n@app.route(\"/about/\")\ndef page_about():\n content = render_template(\"about.html\")\n active = {\"about\":True}\n edJSON = fetchJSON()\n return render_template(\"template.html\", title=\"About - DCS Server Tracker\", content=content, active=active, last_update=timestamp_pretty(edJSON['timestamp']))\n\n\n## Functions!\n\n# Latest timestamp\ndef data_latest_meta():\n metadata = {}\n metadata['dcst_meta_players_count'] = str(rconn.lrange(\"dcst_meta_players_count\",0,0)[0].decode()).split(\",\")[1]\n metadata['dcst_meta_servers_max_data'] = str(rconn.lrange(\"dcst_meta_servers_max_data\",0,0)[0].decode()).split(\",\")[1]\n metadata['dcst_meta_servers_online'] = str(rconn.lrange(\"dcst_meta_servers_online\",0,0)[0].decode()).split(\",\")[1]\n metadata['dcst_meta_servers_count'] = str(rconn.lrange(\"dcst_meta_servers_count\",0,0)[0].decode()).split(\",\")[1]\n metadata['dcst_meta_servers_max_count'] = str(rconn.lrange(\"dcst_meta_servers_max_count\",0,0)[0].decode()).split(\",\")[1]\n return metadata\n\n# Print a timestamp pretty\ndef timestamp_pretty(timestamp):\n return datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')\n\ndef timestamp_timeonly(timestamp):\n return datetime.utcfromtimestamp(timestamp).strftime('%H:%M:%S')\n\ndef fetchJSON():\n return pickle.loads( rconn.get(\"dcst_meta_servers_online\") )\n\ndef sortListofDicts(myList,myColumn,reverse=False):\n return sorted(myList, key=itemgetter(myColumn), reverse=reverse)\n\ndef getCountryISO(instance_id):\n server = executeQuery(sql_server_select, [instance_id],\"one\")\n return server['country_iso']\n\n\ndef getCountryName(instance_id):\n server = executeQuery(sql_server_select, [instance_id],\"one\")\n return server['country_name']\n","repo_name":"flying-huckleberry/dcs_server_tracker","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"38478316442","text":"import random as rand\n\n\nclass Grid:\n\n translate_to_dict = {\n 1: \"A \",\n 2: \"B \",\n 3: \"C \",\n 4: \"D \",\n 5: \"E \",\n 6: \"F \",\n 7: \"G \",\n 8: \"H \",\n 9: \"I \",\n 10: \"J \",\n 11: \"K \",\n 12: \"L \",\n 13: \"M \",\n 14: \"N \",\n 15: \"O \",\n 16: \"P \",\n 17: \"Q \",\n 18: \"R \",\n 19: \"S \",\n 20: \"T \",\n 21: \"U \",\n 22: \"V \",\n 23: \"W \",\n 24: \"X \",\n 25: \"Y \",\n 26: \"Z \",\n }\n translate_from_dict = {\n \"A\": 1,\n \"B\": 2,\n \"C\": 3,\n \"D\": 4,\n \"E\": 5,\n \"F\": 6,\n \"G\": 7,\n \"H\": 8,\n \"I\": 9,\n \"J\": 10,\n \"K\": 11,\n \"L\": 12,\n \"M\": 13,\n \"N\": 14,\n \"O\": 15,\n \"P\": 16,\n \"Q\": 17,\n \"R\": 18,\n \"S\": 19,\n \"T\": 20,\n \"U\": 21,\n \"V\": 22,\n \"W\": 23,\n \"X\": 24,\n \"Y\": 25,\n \"Z\": 26,\n }\n\n def __init__(self, rows, columns):\n self.griddy = []\n self.rows = rows\n self.columns = columns\n self.row_legend = []\n self.column_legend = []\n self.remaining_guess_list = []\n self.remaining_ship_list = []\n # create legends\n for i in range(1, rows + 1):\n self.row_legend.append([Grid.translate_to_dict.get(i)])\n\n for j in range(1, columns + 1):\n if j < 10:\n self.column_legend.append([\"\" + str(j) + \" \"])\n else:\n self.column_legend.append([\"\" + str(j) + \"\"])\n\n # create board matrix\n for i in range(rows):\n temp_list = []\n for j in range(columns):\n temp_list.append([\" \"])\n #create list of all coords for guesses\n self.remaining_guess_list.append([i,j])\n self.griddy.append(temp_list)\n\n\n def print_grid(self):\n column_string = \"[' ']\"\n for i in range(len(self.column_legend)):\n column_string += str(self.column_legend[i])\n print(column_string)\n for i in range(self.rows):\n temp_string = \"\"\n secondary_counter = 0 # to display the row_header\n for j in range(self.columns):\n if secondary_counter == 0:\n temp_string += str(self.row_legend[i])\n\n temp_string += str(self.griddy[i][j])\n secondary_counter += 1\n print(temp_string)\n\n def coord_conv(self, rows, columns):\n # takes alphanumeric grid coordinates true to Battleship Game Style\n # and converts to common zero-indexed list of row and column\n # indicies\n coord_list=[]\n coord_list.append(self.translate_from_dict.get(rows)-1)\n coord_list.append(columns-1)\n return coord_list\n \n\n\nclass Ship:\n movement_dict={'N':[-1, 0], 'S': [1, 0], 'E': [0, 1], 'W':[0, -1]}\n\n def __init__(self, ship_name, ship_size, player, icon):\n self.ship_name = ship_name\n self.ship_size = ship_size\n # self.hp = ship_size\n # self.owner = player\n self.is_placed = False\n self.abbreviation = icon\n\n def __repr__(self):\n return str(self.ship_name)\n \n def is_valid_placement(self, player, coords, direction):\n # if invalid location, i.e. if any positions along the length of the ship are not\n # locations in the grid or not empty, return False\n # print(\"Starting is_valid_placement coords\")\n # print(coords)\n coordy=coords.copy()\n # print(coordy)\n direction_vector = self.movement_dict[direction]\n # print(direction_vector)\n for i in range(self.ship_size):\n if coordy[0] < 0 or coordy[1] < 0 or coordy[0] > len(player.griddy)-1 or coordy[1] > len(player.griddy[0])-1:\n # note, len(player.griddy[0]) column length check assumes all columns of the list are the same length\n # as the first column to avoid an index out of range error\n return False\n if player.griddy[coordy[0]][coordy[1]] != [' ']:\n return False\n coordy[0] += direction_vector[0]\n coordy[1] += direction_vector[1]\n # print(coordy)\n # print(direction_vector)\n # print(\"Ending is_valid_placement coords\")\n # print(coords)\n # print(coordy)\n return True\n \n\n def place_ship(self, player, coord_list, direction):\n # place ship in grid\n # print(\"place_ships coordinates\")\n # print(coord_list)\n # print(direction)\n\n if direction == \"N\":\n for length in range(self.ship_size):\n copy_list=coord_list.copy()\n player.griddy[copy_list[0]][copy_list[1]] = [self.abbreviation]\n player.remaining_ship_list.append(copy_list)\n coord_list[0] -= 1\n elif direction == \"S\":\n for length in range(self.ship_size):\n copy_list=coord_list.copy()\n player.griddy[copy_list[0]][copy_list[1]] = [self.abbreviation]\n player.remaining_ship_list.append(copy_list)\n coord_list[0] += 1\n elif direction == \"E\":\n for length in range(self.ship_size):\n copy_list=coord_list.copy()\n player.griddy[copy_list[0]][copy_list[1]] = [self.abbreviation]\n player.remaining_ship_list.append(copy_list)\n coord_list[1] += 1\n elif direction == \"W\":\n for length in range(self.ship_size):\n copy_list=coord_list.copy()\n player.griddy[copy_list[0]][copy_list[1]] = [self.abbreviation]\n player.remaining_ship_list.append(copy_list)\n coord_list[1] -= 1\n self.is_placed = True\n\n\ndef guess(enemy_board, personal_guess_board, coords):\n # checks a grid location for a ship, marks hit or miss in that location on guess board and board\n # print(\"guess initializing\")\n coord_list = coords.copy()\n # print(coords)\n # print(\" ^coords passed to function\")\n # print(coord_list)\n # print(\"^ copy of coords passed to function\")\n # print(coord_list in personal_guess_board.remaining_guess_list)\n # print(\"^coord_list in personal_guess_board.remaining_guess_list \")\n if (not(coord_list[0] < 0 or coord_list[1] < 0 or coord_list[0] > len(enemy_board.griddy)-1 or coord_list[1] > len(enemy_board.griddy[0])-1) and coord_list in personal_guess_board.remaining_guess_list):\n # note, len(player.griddy[0]) column length check assumes all columns of the list are the same length\n # as the first column to avoid an index out of range error\n if enemy_board.griddy[coord_list[0]][coord_list[1]] != [' ']:\n print(\"{ship} was hit!\".format(ship = enemy_board.griddy[coord_list[0]][coord_list[1]]))\n enemy_board.griddy[coord_list[0]][coord_list[1]] = ['><']\n enemy_board.remaining_ship_list.remove(coord_list)\n personal_guess_board.griddy[coord_list[0]][coord_list[1]] = ['><']\n personal_guess_board.remaining_guess_list.remove(coord_list)\n else:\n print(\"Miss\")\n personal_guess_board.griddy[coord_list[0]][coord_list[1]] = ['()']\n enemy_board.griddy[coord_list[0]][coord_list[1]] = ['()']\n personal_guess_board.remaining_guess_list.remove(coord_list)\n\n\ndef rand_place_ships(board, ship_list):\n direction_list = ['N', 'S', 'E', 'W']\n while len(ship_list) > 0:\n x_rand = rand.randint(0,9)\n # print(x_rand)\n y_rand = rand.randint(0,9)\n # print(y_rand)\n dir_rand = rand.choice(direction_list)\n # print(dir_rand)\n coord = [x_rand, y_rand]\n # print(coord)\n if ship_list[0].is_valid_placement(board, coord, dir_rand):\n ship_list[0].place_ship(board, coord, dir_rand)\n ship_list.pop(0)\n # print(ship_list)\n # board.print_grid()\n\n# new_grid = Grid(4, 5)\n# print(new_grid.griddy)\n# print(\"/n/n/n\")\n# new_grid.print_grid()\n\n# player1.griddy[2][1] = \"['DD']\"\n# player1.griddy[3][1] = \"['DD']\"\n# carrier.place_ship(player2, player2.coord_conv('B',3), \"E\")\n# print(battleship.is_valid_placement(player2, player2.coord_conv('B', 9), \"N\"))\n# print(battleship.is_valid_placement(player2, player2.coord_conv('B', 9), \"S\"))\n# print(battleship.is_valid_placement(player2, player2.coord_conv('B', 9), \"E\"))\n# print(battleship.is_valid_placement(player2, player2.coord_conv('B', 9), \"W\"))\n# battleship.place_ship(player2, player2.coord_conv(\"E\", 4), \"S\")\n\n# player1.print_grid()\n# print(\"\")\n# player2.print_grid()\n\n# main gameplay loop\nstandard_board_size = [10, 10]\n\nplayer1 = Grid(standard_board_size[0], standard_board_size[1])\nplayer2 = Grid(standard_board_size[0], standard_board_size[1])\nplayer1_guesses = Grid(standard_board_size[0], standard_board_size[1])\nplayer2_guesses = Grid(standard_board_size[0], standard_board_size[1])\n\ncarrier = Ship(\"Carrier\", 5, player1, \"CV\")\nbattleship = Ship(\"Battleship\", 4, player1, \"BB\")\ncruiser = Ship(\"Cruiser\", 3, player1, \"CR\")\nsubmarine = Ship(\"Submarine\", 3, player1, \"SF\")\ndestroyer = Ship(\"Destroyer\", 2, player1, \"DD\")\n\nship_list_player1 = [carrier, battleship, cruiser, submarine, destroyer]\n\ncarrier = Ship(\"Carrier\", 5, player2, \"CV\")\nbattleship = Ship(\"Battleship\", 4, player2, \"BB\")\ncruiser = Ship(\"Cruiser\", 3, player2, \"CR\")\nsubmarine = Ship(\"Submarine\", 3, player2, \"SF\")\ndestroyer = Ship(\"Destroyer\", 2, player2, \"DD\")\n\nship_list_player2 = [carrier, battleship, cruiser, submarine, destroyer]\n\n\n# print(ship_list_player1)\nrand_place_ships(player1, ship_list_player1)\nrand_place_ships(player2, ship_list_player2)\n\nprint(\"Welcome to an electronic Battleship clone\")\nprint(\"this is a non-commercial copy created as a python educational product\")\n# print(\"Place your ships: 1: Randomly, 2: Manually\")\n# while ships left to print > 0: do the following\n# player1.print_grid()\n# #print list of ships to place\n# print(\"Please place your ships by entering the ship number, its starting location, and direction\")\n# print(\"in the form: 1, B, 7, S\")\n# # input_string = input()\n# # check validity of ship placement, then place the ship\n# # if not valid, print error message to user\n# # create CPU ship placement\n\n# at this point, both Grids have all ships placed\nwhile (len(player1.remaining_ship_list) > 0) and (len(player2.remaining_ship_list) > 0):\n # player2.print_grid()\n # print(\"------\")\n player1_guesses.print_grid()\n print(\"\\n\")\n player1.print_grid()\n print(\"Input the capital letter of your guess row: (or X for eXit)\")\n guess_row = input()\n if guess_row == 'X':\n break\n print(\"Input the number of your guess column:\")\n guess_column = int(input())\n guess(player2, player1_guesses, player1.coord_conv(guess_row, guess_column))\n # perform cpu player guess\n player2_guess_coord = rand.choice(player2_guesses.remaining_guess_list)\n guess(player1, player2_guesses, player2_guess_coord)\n\nif len(player1.remaining_ship_list) == 0:\n print(\"player2 wins\")\nelif len(player2.remaining_ship_list) == 0:\n print(\"player1 wins\") \nelse:\n print(\"Error:no winner found\")\nprint(\"Thanks for playing!\")\n\n# print(\"Here \")\n\n\n# left to do:\n# \n# done --add A-J, 1-10 elements to display in print_grid.\n# done --create list of ships in main method\n# done --create ship placement logic function to grid class\n# done -- logic test for ship.is_valid_placement()\n# create AI logic behavior for guessing\n# initialize boards for player and AI\n# create main gameplay loop\n# use [()] to register a miss in our two character display format\n\n# future options:\n# create salvo alternate playstyle feature that is 'canon' (hah) in Battleship\n# Rules\n# create logic to narrow down AI player guesses based on available grid space\n# aggregate various AI strategies into single game diffuculty settings\n# to hide the ships left on the board\n\n\n# ship abbreviations are from https://www.history.navy.mil\n# /research/histories/ship-histories/abbreviations.html\n\n# started with a weird formatting of a string inside a list but then realized\n# it met the core functionality of displaying a grid pattern while still\n# having room for 2 character string representing icons for empty space, ships,\n# hits, and misses.\n\n# got a cyclomatic complexity warning for function Ship.place_ship. Originally\n# this function both checked for a valid placement and then placed the ship, to\n# reduce the complexity a new function Ship.is_valid_placement() was created to\n# split the complexity and remove the warning.\n\n# several times I thought is_valid_placement() worked, once I got the rand_\n# place_ships() function in place I have discovered several out of index errors\n# and ship overwrites/incorrect placements. Using print() statements to track \n# location indexes throughout the two methods revealed that the location index \n# was modified during checks along the length of the proposed ship as specified\n# but what was unknown was that this was performed on the location variable and\n# not a copy of one. Tested individually, both functions worked fine but when\n# combined, tried to place a ship one space away from the end of the checked \n# location instead of the start. Changing is_valid_placement to work with a \n# copy of the location coordinates using b = a.copy() resolved the logic \n# errors.\n\n# game is initialized with a 10x10 board size but built with customizable size\n# logically sound up to 26x26 as-is but not visually practical printed to the\n# standard terminal past 22x12\n\n# looks like coord_conv() is a Facade pattern\n\n\n","repo_name":"aerostar105/singleplayergames","sub_path":"battleship.py","file_name":"battleship.py","file_ext":"py","file_size_in_byte":13730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"22224680019","text":"from os.path import dirname, join as pjoin\r\n\r\nfrom scipy.fft import fft\r\nfrom scipy.io import wavfile\r\nimport scipy.io\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom scipy.signal import butter, lfilter\r\nimport IPython\r\n#Ibrahim Nobani 1190278\r\n\r\ndef RangeIndex(BWrange,fcenIndex,frq_DS,maxMin):\r\n RangeIndex = (np.abs(frq_DS - BWrange)).argmin() - fcenIndex\r\n if (maxMin==0):\r\n RangeIndexMin = fcenIndex - RangeIndex\r\n if RangeIndexMin < 0:\r\n RangeIndexMin = 0\r\n return RangeIndexMin\r\n if(maxMin==1):\r\n RangeIndexMax = fcenIndex + RangeIndex\r\n if RangeIndexMax > len(frq_DS) - 1:\r\n RangeIndexMax = len(frq_DS) - 1\r\n return RangeIndexMax\r\n\r\ndef plotTimeFreq(y, Fs, BWrange):\r\n n = len(y) # length of the signal\r\n k = np.arange(n)\r\n T = n / Fs\r\n\r\n t = np.arange(0, n * Ts, Ts) # time vector\r\n\r\n frq = k / T # two sides frequency range\r\n fcen = frq[int(len(frq) / 2)]\r\n frq_DS = frq - fcen\r\n frq_SS = frq[range(int(n / 2))] # one side frequency range\r\n\r\n Y = np.fft.fft(y) # fft computing and normalization\r\n yinv = np.fft.ifft(Y).real # ifft computing and normalization\r\n Y_DS = np.roll(Y, int(n / 2))\r\n Y_SS = Y[range(int(n / 2))]\r\n\r\n fcenIndex = (np.abs(frq_DS)).argmin()\r\n RangeIndexMin = RangeIndex(BWrange,fcenIndex,frq_DS,0)\r\n RangeIndexMax = RangeIndex(BWrange, fcenIndex, frq_DS, 1)\r\n fig, ax = plt.subplots(2, 1, figsize=(16, 6))\r\n ax[0].plot(t, y)\r\n ax[0].set_xlabel('Time')\r\n ax[0].set_ylabel('Amplitude')\r\n ax[1].set_xlabel('Freq (Hz)')\r\n ax[1].set_ylabel('|Y(freq)|')\r\n ax[1].plot(frq_DS[RangeIndexMin:RangeIndexMax], abs(Y_DS[RangeIndexMin:RangeIndexMax]),\r\n 'r') # plotting the spectrum\r\n ax[1].set_xlabel('Freq (Hz)')\r\n ax[1].set_ylabel('|Y(freq)|')\r\n plt.show()\r\n return yinv\r\ndef BBfilter(B,B2,Y_DS,frq_DS,n,Fs,rate):\r\n fcenIndex = (np.abs(frq_DS)).argmin()\r\n fBWIndex = (np.abs(frq_DS - B)).argmin()\r\n fBWIndex2= (np.abs(frq_DS - B2)).argmin()\r\n B = frq_DS[fBWIndex]\r\n Mask_DS = np.ones(len(frq_DS))\r\n Yf_DS = np.copy(Y_DS)\r\n for cnt in range(len(frq_DS)):\r\n if ~((-1 * B2 > (frq_DS[cnt]) > -1 * B) or (B2 < (frq_DS[cnt]) < B)):\r\n Mask_DS[cnt] = 0;\r\n #print(B,frq_DS[cnt],Yf_DS[cnt])\r\n Yf_DS[cnt] = Y_DS[cnt] * 0;\r\n\r\n Yf = np.roll(Yf_DS, int(n /2))\r\n Yinv2 = np.fft.fft(Yf)\r\n yinv = np.fft.ifft(Yf).real # ifft computing and normalization\r\n yinv = np.array(yinv)\r\n yinv_int = yinv.astype(np.int16)\r\n RangeIndexMin = RangeIndex(B, fcenIndex, frq_DS, 0)\r\n RangeIndexMax = RangeIndex(B+50, fcenIndex, frq_DS, 1)\r\n fig, ax = plt.subplots(3, 1, figsize=(16, 9))\r\n ax[0].plot(frq_DS[RangeIndexMin:RangeIndexMax], abs(Mask_DS[RangeIndexMin:RangeIndexMax]),\r\n 'r') # plotting the spectrum\r\n ax[0].set_xlabel('Freq (Hz)')\r\n ax[0].set_ylabel('|H(freq)|')\r\n # print(abs(Yf_DS[RangeIndexMin:RangeIndexMax]))\r\n ax[1].plot(frq_DS[RangeIndexMin:RangeIndexMax], abs(Yf_DS[RangeIndexMin:RangeIndexMax]),\r\n 'r') # plotting the spectrum\r\n ax[1].set_xlabel('Freq (Hz)')\r\n ax[1].set_ylabel('|Y(freq)|')\r\n plt.show()\r\n return yinv\r\ndef demodulation (yinv,t,Fs,rate,n,frq_DS,FC) :\r\n yinv_int = yinv.astype(np.int16)\r\n y2 = [float(x) for x in yinv]\r\n carrier_signal = 0.1 * np.cos(2 * np.pi * FC * t)\r\n output_signal = y2 * carrier_signal\r\n plotTimeFreq(output_signal, Fs, BWrange)\r\n Yf = np.fft.fft(output_signal)\r\n Yf = np.roll(Yf, int(n / 2))\r\n LPBW = input(\"Insert the low pass filter bandwidth: \")\r\n BW = int(LPBW)\r\n yinv2=BBfilter(BW,0,Yf,frq_DS,n,Fs,rate)\r\n #yinv2 = upsampler(yinv2, 30)\r\n plotTimeFreq(yinv2, Fs, BWrange)\r\n yinv_int16 = yinv2.astype(np.int16)\r\n wavfile.write(\"sound1.wav\", rate, yinv_int16)\r\n IPython.display.Audio(yinv2,rate=rate)\r\nBWrange=10000\r\nTs=1;\r\n\r\n########-------------------\r\ndef readMixedandGraph():\r\n rate1, data1 = wavfile.read('FDMAMixedAudio12.wav')\r\n length = data1.shape[0] / rate1\r\n #print(rate1)\r\n #print(f\"length = {length}s\")\r\n time = np.linspace(0., length, data1.shape[0])\r\n #Fs=rate1*upsamplerate;\r\n Fs=rate1\r\n #Fs=1.0/time[1]\r\n Ts = 1.0/Fs;\r\n #print(len(time))\r\n plotTimeFreq(data1, Fs, BWrange)\r\n t = np.arange(0, len(data1) * Ts, Ts) # time vector\r\n y = [float(x) for x in data1]\r\n n = len(y) # length of the signal\r\n k = np.arange(n)\r\n T = n / Fs\r\n frq = k / T # two sides frequency range\r\n fcen = frq[int(len(frq) / 2)]\r\n frq_DS = frq - fcen\r\n frq_SS = frq[range(int(n / 2))] # one side frequency range\r\n\r\n Y = np.fft.fft(y) # fft computing and normalization\r\n yinv = np.fft.ifft(Y).real # ifft computing and normalization\r\n Y_DS = np.roll(Y, int(n / 2))\r\n Y_SS = Y[range(int(n / 2))]\r\n\r\n fcenIndex = (np.abs(frq_DS)).argmin()\r\n RangeIndex = (np.abs(frq_DS - BWrange)).argmin() - fcenIndex\r\n LowerFrequency=input(\"Insert the lower frequency range: \")\r\n LowerFrequency=int(LowerFrequency)\r\n HigherFrequency = input(\"Insert the Higher frequency range: \")\r\n HigherFrequency = int(HigherFrequency)\r\n filteredY=BBfilter(HigherFrequency, LowerFrequency, Y_DS,frq_DS,n,Fs,rate1)\r\n FC = input(\"Insert the carrier frequency: \")\r\n FC = int(FC)\r\n demodulation(filteredY,t,Fs,rate1*35,n,frq_DS,FC)\r\n\r\n\r\nans=1\r\nwhile ans:\r\n print (\"\"\"\r\n 1.Read the FDMA signals and graph in time and frequency domain\r\n 2.Change Bandiwdth Range\r\n 3.Exit/Quit\r\n \"\"\")\r\n ans=input(\"What would you like to do? \")\r\n if ans==\"1\":\r\n readMixedandGraph()\r\n elif ans==\"2\":\r\n BWrange=input(\"Insert the Bandwidth Range: \")\r\n BWrange = int(BWrange)\r\n elif ans==\"3\":\r\n print(\"\\n Goodbye\")\r\n ans = False\r\n elif ans !=\"\":\r\n print(\"\\n Not Valid Choice Try again\")","repo_name":"Ibrahim-Nobani/Communication-Systems","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":5901,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"13451960676","text":"# -*- coding: utf-8 -*-\n__all__ = ('CoveragePyCollector', 'CoveragePyCollectorConfig')\n\nfrom typing import Any, ClassVar, Dict, Mapping, Optional, Set\nimport json\nimport typing\n\nimport attr\n\nfrom .collector import CoverageCollector, CoverageCollectorConfig\nfrom ..core import FileLineSet\n\nif typing.TYPE_CHECKING:\n from ..container import ProgramContainer\n from ..environment import Environment\n from ..program import ProgramDescription\n\n\n@attr.s(frozen=True)\nclass CoveragePyCollectorConfig(CoverageCollectorConfig):\n NAME: ClassVar[str] = 'coverage.py'\n\n @classmethod\n def from_dict(cls,\n dict_: Mapping[str, Any],\n dir_: Optional[str] = None\n ) -> 'CoverageCollectorConfig':\n assert dict_['type'] == cls.NAME\n return CoveragePyCollectorConfig()\n\n def build(self,\n environment: 'Environment',\n program: 'ProgramDescription'\n ) -> 'CoverageCollector':\n return CoveragePyCollector(program=program)\n\n\n@attr.s(frozen=True, slots=True, auto_attribs=True)\nclass CoveragePyCollector(CoverageCollector):\n program: 'ProgramDescription'\n\n def _read_report_json(self, json_: Mapping[str, Any]) -> FileLineSet:\n filename_to_lines: Dict[str, Set[int]] = {}\n filename_to_json_report = json_['files']\n for filename, file_json in filename_to_json_report.items():\n filename_to_lines[filename] = set(file_json['executed_lines'])\n return FileLineSet(filename_to_lines)\n\n def _read_report_text(self, text: str) -> FileLineSet:\n json_ = json.loads(text)\n return self._read_report_json(json_)\n\n def _extract(self, container: 'ProgramContainer') -> FileLineSet:\n files = container.filesystem\n shell = container.shell\n temporary_filename = files.mktemp()\n command = (f'coverage json -o {temporary_filename} '\n '--omit=\"tests/* && coverage erase\"')\n shell.check_call(command, cwd=self.program.source_directory)\n report_text = files.read(temporary_filename)\n return self._read_report_text(report_text)\n","repo_name":"squaresLab/Darjeeling","sub_path":"src/darjeeling/coverage/coveragepy.py","file_name":"coveragepy.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"86"} +{"seq_id":"42241376324","text":"import pytest\nfrom django.urls import reverse\nfrom rest_framework import status\n\nfrom tests.goals.factories import CreateGoalRequest\n\n\n@pytest.mark.django_db()\nclass TestCreateGoalView:\n url = reverse('goals:create-goal')\n\n def test_create_goal_auth_required_fail(self, client):\n response = client.post(self.url)\n assert response.status_code == status.HTTP_403_FORBIDDEN\n\n def test_create_goal_not_owner_fail(self, auth_client, goal_category):\n data = CreateGoalRequest.build(category=goal_category.id)\n response = auth_client.post(self.url, data=data)\n assert response.status_code == status.HTTP_403_FORBIDDEN\n\n @pytest.mark.parametrize('role', [1, 2, 3])\n def test_create_goal_owner_or_writer(self, auth_client, board_participant, goal_category, role):\n board_participant.role = role\n board_participant.save(update_fields=['role'])\n data = CreateGoalRequest.build(category=goal_category.id)\n response = auth_client.post(self.url, data=data)\n if role in (1, 2):\n assert response.status_code == status.HTTP_201_CREATED\n else:\n assert response.status_code == status.HTTP_403_FORBIDDEN\n\n def test_create_goal_on_deleted_category_fail(self, auth_client, goal_category):\n goal_category.is_deleted = True\n goal_category.save(update_fields=['is_deleted'])\n data = CreateGoalRequest.build(category=goal_category.id)\n response = auth_client.post(self.url, data=data)\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n\n def test_create_goal_on_existing_category(self, auth_client):\n data = CreateGoalRequest.build(category=1)\n response = auth_client.post(self.url, data=data)\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n","repo_name":"karlovdaniil/todolist","sub_path":"tests/goals/goal_test.py","file_name":"goal_test.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"74571955483","text":"try:\n import simplejson as json\nexcept ImportError:\n import json\n\nfrom rest_framework.views import APIView\nfrom rest_framework import permissions, status\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import NotFound\n\nfrom django.db import IntegrityError\n\nfrom view_templates.views import GenericReadOnlyDetailView, GenericDetailView\n\nfrom slides_manager.models import Slide\nfrom reviews_manager.models import ROIsAnnotationStep\nfrom reviews_manager.serializers import ROIsAnnotationStepFullSerializer, \\\n ROIsAnnotationStepROIsTreeSerializer\nfrom rois_manager.models import Slice, Core, FocusRegion\nfrom rois_manager.serializers import SliceSerializer, SliceDetailsSerializer, \\\n CoreSerializer, CoreDetailsSerializer, FocusRegionSerializer\n\nimport logging\nlogger = logging.getLogger('promort')\n\n\nclass SlideROIsList(APIView):\n permission_classes = (permissions.IsAuthenticated,)\n\n def _serialize_rois_data(self, rois, roi_type, annotation_step):\n rois_data = []\n roi_parent_type = {\n 'slice': None,\n 'core': 'slice',\n 'focus_region': 'core'\n }\n for r in rois:\n roi_details = {\n 'roi_id': r.id,\n 'roi_type': roi_type,\n 'annotation_step': annotation_step,\n 'parent_type': roi_parent_type[roi_type],\n 'parent_id': None\n }\n if roi_type == 'core':\n roi_details['parent_id'] = r.slice.id\n elif roi_type == 'focus_region':\n roi_details['parent_id'] = r.core.id\n rois_data.append(roi_details)\n return rois_data\n\n def get(self, request, pk, format=None):\n try:\n slide_obj = Slide.objects.get(id=pk)\n except Slide.DoesNotExist:\n raise NotFound('There is no Slide with label {0}'.format(pk))\n roi_type = request.query_params.get('roi_type')\n rois_annotation_steps = ROIsAnnotationStep.objects.filter(slide=slide_obj, completion_date__isnull=False)\n rois = []\n if roi_type is None:\n for step in rois_annotation_steps:\n rois.extend(self._serialize_rois_data(step.slices.all(), 'slice', step.label))\n rois.extend(self._serialize_rois_data(step.cores, 'core', step.label))\n rois.extend(self._serialize_rois_data(step.focus_regions, 'focus_region', step.label))\n else:\n if roi_type == 'slice':\n for step in rois_annotation_steps:\n print(step.label)\n rois.extend(self._serialize_rois_data(step.slices.all(), 'slice', step.label))\n elif roi_type == 'core':\n for step in rois_annotation_steps:\n rois.extend(self._serialize_rois_data(step.cores, 'core', step.label))\n elif roi_type == 'focus_region':\n for step in rois_annotation_steps:\n rois.extend(self._serialize_rois_data(step.focus_regions, 'focus_region', step.label))\n else:\n return Response(\n '{0} is not a valid ROI type'.format(roi_type), \n status=status.HTTP_400_BAD_REQUEST\n )\n return Response(rois, status=status.HTTP_200_OK)\n\n\nclass ROIsTreeList(APIView):\n permission_classes = (permissions.IsAuthenticated,)\n\n def get(self, request, label, format=None):\n try:\n obj = ROIsAnnotationStep.objects.get(label=label)\n except ROIsAnnotationStep.DoesNotExist:\n raise NotFound('There is no ROIsAnnotationStep with label %s' % label)\n serializer = ROIsAnnotationStepROIsTreeSerializer(obj)\n return Response(serializer.data,\n status=status.HTTP_200_OK)\n\n def delete(self, request, label, format=None):\n slices = Slice.objects.filter(annotation_step__label=label)\n for s in slices:\n s.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass SliceList(APIView):\n model = ROIsAnnotationStep\n model_serializer = ROIsAnnotationStepFullSerializer\n permission_classes = (permissions.IsAuthenticated,)\n\n def _find_rois_annotation_step(self, label):\n try:\n return ROIsAnnotationStep.objects.get(label=label)\n except ROIsAnnotationStep.DoesNotExist:\n raise NotFound('There is no ROIsAnnotationStep with label %s' % label)\n\n def get(self, request, label, format=None):\n annotation_step = self._find_rois_annotation_step(label)\n serializer = self.model_serializer(annotation_step)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def post(self, request, label, format=None):\n annotation_step = self._find_rois_annotation_step(label)\n slice_data = request.data\n slice_data['annotation_step'] = annotation_step.id\n slice_data['author'] = request.user.username\n\n serializer = SliceSerializer(data=slice_data)\n if serializer.is_valid():\n try:\n serializer.save()\n except IntegrityError:\n return Response({\n 'status': 'ERROR',\n 'message': 'duplicated slice label %s' % (slice_data['label'])\n }, status=status.HTTP_409_CONFLICT)\n return Response(serializer.data,\n status=status.HTTP_201_CREATED)\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n\n\nclass SliceDetail(GenericDetailView):\n model = Slice\n model_serializer = SliceSerializer\n permission_classes = (permissions.IsAuthenticated,)\n\n\nclass CoreList(GenericReadOnlyDetailView):\n model = Slice\n model_serializer = SliceDetailsSerializer\n permission_classes = (permissions.IsAuthenticated,)\n\n def post(self, request, pk, format=None):\n core_data = request.data\n core_data['author'] = request.user.username\n core_data['slice'] = pk\n\n serializer = CoreSerializer(data=core_data)\n if serializer.is_valid():\n try:\n serializer.save()\n except IntegrityError:\n return Response({\n 'status': 'ERROR',\n 'message': 'duplicated core label %s for slice %s' % (core_data['label'], pk)\n }, status=status.HTTP_409_CONFLICT)\n return Response(serializer.data,\n status=status.HTTP_201_CREATED)\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n\n\nclass CoreDetail(GenericDetailView):\n model = Core\n model_serializer = CoreSerializer\n permission_classes = (permissions.IsAuthenticated,)\n\n\nclass FocusRegionList(GenericReadOnlyDetailView):\n model = Core\n model_serializer = CoreDetailsSerializer\n permission_classes = (permissions.IsAuthenticated,)\n\n def post(self, request, pk, format=None):\n focus_region_data = request.data\n focus_region_data['author'] = request.user.username\n focus_region_data['core'] = pk\n\n serializer = FocusRegionSerializer(data=focus_region_data)\n if serializer.is_valid():\n try:\n serializer.save()\n except IntegrityError:\n return Response({\n 'status': 'ERROR',\n 'message': 'duplicated focus region label %s for core %s' %\n (focus_region_data['label'], pk)\n }, status=status.HTTP_409_CONFLICT)\n return Response(serializer.data,\n status=status.HTTP_201_CREATED)\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n\n\nclass FocusRegionDetail(GenericDetailView):\n model = FocusRegion\n model_serializer = FocusRegionSerializer\n permission_classes = (permissions.IsAuthenticated,)\n","repo_name":"crs4/DigitalPathologyPlatform","sub_path":"promort/rois_manager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7991,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"71089864285","text":"#!/usr/bin/env python3\n#\n# external dependencies:\n# python-sh (provides module sh)\n# pacman\n# git\n# makepkg\n# wget\n# gunzip\n#\n# hacked: /etc/sudoers\n# /usr/share/polkit-1/rules.d/00-aur-build.rules\n\nimport argparse\nimport os\nimport sys\nimport time\nimport datetime\nimport csv\nimport pydoc\nimport glob\nimport sh\nimport shutil\nimport getpass\n\nVERSION = \"0.1\"\nLOCAL_DB_PATH = \"/var/cache/aur-build/\"\nLOCAL_DB = LOCAL_DB_PATH + \"db\"\n# PAMAC_BUILD_FOLDER = \"/var/tmp/pamac-build-\" + getpass.getuser()\n# PACMAN_PKG_FOLDER = \"/var/cache/pacman/pkg/\"\nBUILD_FOLDER = \"/var/tmp/aur-build-\" + getpass.getuser()\nPACKAGES_FOLDER = \"/mnt/packages\"\nSTOPFILE = LOCAL_DB_PATH + \"stop\" # touch this to stop execution\n\nSTATUS_NEW = \"NEW\"\nSTATUS_DOESNTBUILD = \"DOESNTBUILD\"\nSTATUS_BUILDS = \"BUILDS\"\nSTATUS_DELETED = \"DELETED\"\nSTATUS_OFFICIAL = \"OFFICIAL\"\n\nSKIP_PACKAGES = None # mainly for debug. None means no limit.\nMAX_PACKAGES = None # mainly for debug. None means no limit.\n\ndb = None\nargs = None\n\n\nclass Database:\n def __init__(self, path):\n self.path = path\n if not os.path.exists(self.path):\n self.create()\n\n def create(self):\n \"\"\"\n Create/recreate database. No backup is done.\n \"\"\"\n open(self.path, 'w').close()\n\n def write(self, pkgs_dict):\n \"\"\"\n Overwrite whole database with new data\n \"\"\"\n with open(self.path, 'w') as text_file:\n writer = csv.writer(text_file, delimiter=';')\n\n writer.writerow(['Name',\n 'Status',\n 'Build time (min)',\n 'Built on',\n 'Filename'])\n\n for pkg in sorted(pkgs_dict.values()):\n writer.writerow([pkg.pkgname,\n pkg.status,\n pkg.buildtime,\n pkg.builtwhen,\n pkg.filename])\n\n def load(self):\n \"\"\"\n Load data from \n :return: a dict pkgname -> Package object\n \"\"\"\n pkgs_dict = {}\n with open(self.path, 'r') as text_file:\n csv_reader = csv.reader(text_file, delimiter=';')\n header = True\n for row in csv_reader:\n if header:\n header = False\n else:\n if not row or not row[0]:\n continue\n pkgname = row[0]\n pkg = Package(pkgname,\n row[1] or STATUS_NEW,\n row[2] or None,\n row[3] or None,\n row[4] or None)\n pkgs_dict[pkgname] = pkg\n return pkgs_dict\n\n def show(self):\n \"\"\"\n This is essentially \n less /var/cache/aur-build/db\n \"\"\"\n file = open(self.path, 'r')\n pydoc.pager(file.read())\n file.close()\n\n\nclass Package:\n def __init__(self,\n pkgname,\n status=STATUS_NEW,\n buildtime=None,\n builtwhen=None,\n filename=None):\n self.pkgname = pkgname\n self.status = status\n self.filename = filename\n try:\n self.buildtime = int(buildtime)\n except:\n self.buildtime = None\n self.builtwhen = builtwhen\n\n def __lt__(self, other):\n \"\"\"\n This allows ordering a list of packages by name\n \"\"\"\n return self.pkgname < other.pkgname\n\n def become_official(self):\n \"\"\"\n Search the package in standard Manjaro repositories\n Also non-git packages are considered\n \"\"\"\n try:\n sh.pacman(\"-Ss\", \"^\" + self.pkgname + \"$\")\n # ^ and $ for exact match\n # if not found, pacman gives rc=1\n return True\n except sh.ErrorReturnCode_1:\n if self.pkgname.endswith(\"-git\"):\n try:\n sh.pacman(\"-Ss\", \"^\" + self.pkgname[:-4] + \"$\")\n return True\n except sh.ErrorReturnCode_1:\n return False\n return False\n # for all other exceptions, raise\n\n def build(self):\n \"\"\"\n Install the package, to have the .tar.xz\n Update the object itself\n WARNING: enable wheel in /etc/sudoers, and polkit\n \"\"\"\n start_time = time.time() # in seconds from Epoch\n self.filename = None\n self.buildtime = None\n self.builtwhen = get_iso_date() # es. 2008-11-22\n try:\n # sh.pamac(\"build\", \"--no-confirm\", self.pkgname,\n # _in=sys.stdin,\n # _out=sys.stdout,\n # _err=sys.stderr,\n # _timeout=7200) # max. two hours\n os.makedirs(BUILD_FOLDER, exist_ok=True)\n os.chdir(BUILD_FOLDER)\n sh.rm(\"-rf\", BUILD_FOLDER + \"/\" + self.pkgname)\n sh.git(\"clone\", \"https://aur.archlinux.org/\" + self.pkgname + \".git\",\n _in=sys.stdin,\n _out=sys.stdout,\n _err=sys.stderr,\n _timeout=7200) # max. two hours\n try:\n os.chdir(BUILD_FOLDER + \"/\" + self.pkgname)\n sh.makepkg(\"-sc\", \"--noconfirm\",\n _in=sys.stdin,\n _out=sys.stdout,\n _err=sys.stderr,\n _timeout=7200) # max. two hours\n # WARNING makepkg won't search for aur dependencies\n self.status = STATUS_BUILDS\n file_filter = BUILD_FOLDER + \"/\" + self.pkgname + \"/\" + self.pkgname + \"*.pkg.tar.xz\"\n \n try:\n filename = sorted(glob.glob(file_filter))[-1]\n self.filename = os.path.basename(filename)\n shutil.move(filename, PACKAGES_FOLDER + \"/\" + self.filename)\n except IndexError:\n self.filename = None\n except sh.ErrorReturnCode as e:\n print(\"makepkg aborted with status %d\" % e.exit_code)\n self.status = STATUS_DOESNTBUILD\n except sh.ErrorReturnCode as e1:\n print(\"git clone aborted with status %d\" % e1.exit_code)\n self.status = STATUS_DOESNTBUILD\n # for all other exceptions, raise\n\n self.buildtime = round((time.time() - start_time) / 60)\n\n # if self.status == STATUS_BUILDS:\n # try:\n # sh.pamac(\"remove\", \"--no-confirm\", self.pkgname,\n # _out=sys.stdout,\n # _err=sys.stderr)\n # # suggestion: don't use RemoveUnrequiredDeps\n # except sh.ErrorReturnCode as e:\n # print(\"Warning! cannot remove package. Pamac aborted with status %d\" %\n # e.exit_code)\n # # for all other exceptions, raise\n\n # clean build folder\n # why pamac does not clean it by itself ?!?\n # warning: shutil.rmtree fails because pkg subfolder is not readable\n # shutil.rmtree(PAMAC_BUILD_FOLDER)\n # sh.rm(\"-rf\", PAMAC_BUILD_FOLDER)\n sh.rm(\"-rf\", BUILD_FOLDER + \"/\" + self.pkgname)\n\n\ndef program_name():\n \"\"\"\n Return this file's name, without path\n \"\"\"\n return os.path.basename(__file__)\n\n\ndef create_arg_parser():\n \"\"\"\n Create parser for parsing CLI arguments\n \"\"\"\n parser = argparse.ArgumentParser(\n description='Build all AUR packages')\n group_actions = parser.add_argument_group(\"Actions\")\n group_actions.add_argument('--run',\n dest='run',\n action='store_true',\n help='Shorthand for -d -n -e')\n group_actions.add_argument('-i', '--init-db',\n dest='init_db',\n action='store_true',\n help='Initialize/clear database')\n group_actions.add_argument('-d', '--download',\n dest='download',\n action='store_true',\n help='Download packages list and update database')\n group_actions.add_argument('-n', '--build-new-packages',\n dest='build_new',\n action='store_true',\n help='Build all packages in status ' + STATUS_NEW)\n group_actions.add_argument('-e', '--build-packages-with-errors',\n dest='build_err',\n action='store_true',\n help='Build all packages in status ' + STATUS_DOESNTBUILD)\n group_actions.add_argument('-r', '--rebuild-built-packages',\n dest='rebuild',\n action='store_true',\n help='Rebuild all packages in status ' + STATUS_BUILDS)\n group_actions.add_argument('-b', '--build-all',\n dest='build_all',\n action='store_true',\n help='Re/build all packages. Shorthand for -n -e -r')\n group_actions.add_argument('--single',\n dest='build_single_pkg',\n action='store',\n help='Re/build a single package. Cannot be used with -n -e -r')\n group_actions.add_argument('--show-log',\n dest='show_log',\n action='store_true',\n help='Print database content')\n group_actions.add_argument('--stats',\n dest='stats',\n action='store_true',\n help='Print database statistics')\n parser.add_argument('--skip',\n dest='skip_packages',\n action='store',\n type=int,\n default=SKIP_PACKAGES,\n help='Skip first n packages in db')\n parser.add_argument('--max',\n dest='max_packages',\n action='store',\n type=int,\n default=MAX_PACKAGES,\n help='Analyse at most first n packages in db (after skipped ones)')\n parser.add_argument('-v', '--version',\n action='version',\n version='%(prog)s ' + VERSION)\n return parser\n\n\ndef get_aur_package_list():\n \"\"\"\n Retrieve full list of AUR packages\n currently, 'packages' file is 900Kb with 59K rows \n :see:\n https://forum.manjaro.org/t/list-all-aur-packages-not-just-installed/39631/2\n :return: unsorted list of strings (package names only)\n \"\"\"\n REMOTE_FILE = \"https://aur.archlinux.org/packages.gz\"\n\n stdout = sh.gunzip(sh.wget(\"-q\", \"-O\", \"-\", REMOTE_FILE)).stdout\n # stdout is a 'bytes' object\n pkgnames_v0 = stdout.decode('utf-8').split('\\n')\n pkgnames_v1 = [line.strip() for line in pkgnames_v0]\n pkgnames = [line for line in pkgnames_v1 if line and line[0] != '#']\n return pkgnames\n # NO WAY TO GET THE PKG VERSION ????\n\n\ndef get_iso_date():\n \"\"\"\n es. 2008-11-22\n \"\"\"\n return str(datetime.date.today())\n\n\ndef get_iso_time():\n \"\"\"\n es. 2008-11-22 23:59:59\n \"\"\"\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime()) \n\n\ndef stop_file_exists():\n \"\"\"\n Return true if somebody has created a stop file\n \"\"\"\n return os.path.exists(STOPFILE)\n\n\ndef build_all(pkgs_dict, build_status=[STATUS_NEW]):\n \"\"\"\n Build all packages with correct status\n \"\"\"\n num_packages = len(pkgs_dict)\n num_skipped_packages = 0\n num_analysed_packages = 0\n for pkgname, pkg in pkgs_dict.items():\n print(\"=== Reading package: %s =========\" % pkgname)\n\n if args.skip_packages and num_skipped_packages < args.skip_packages:\n print(\"Skipping.\")\n num_skipped_packages += 1\n continue\n\n num_analysed_packages += 1\n\n if pkg.status not in build_status:\n must_build = False\n # DELETED and OFFICIAL packages fall here\n # We never build packages DELETED or OFFICIAL\n elif pkg.become_official():\n pkg.status = STATUS_OFFICIAL\n db.write(pkgs_dict)\n must_build = False\n else:\n must_build = True\n\n if must_build:\n pkg.build()\n db.write(pkgs_dict)\n else:\n print(\"Skipping, package has status %s\" % pkg.status)\n\n print(\"%d packages elaborated out of %d (%d%%)\" % (\n num_skipped_packages + num_analysed_packages,\n num_packages,\n (num_skipped_packages + num_analysed_packages) // num_packages))\n\n if args.max_packages and num_analysed_packages >= args.max_packages:\n print(\"====================\")\n print(\"Exiting because we analysed MAX_PACKAGES=%d packages\" %\n num_analysed_packages)\n return\n\n if stop_file_exists():\n print(\"====================\")\n print(\"Exiting because stop file exists\");\n try:\n os.unlink(STOPFILE)\n except:\n print(\"Error deleting stop file, you *must* delete it on your own\");\n return\n\n\ndef update_db():\n \"\"\"\n Update packages database\n i.e. download packages list, add new packages, mark missing packages as DELETED\n \"\"\"\n pkgs_dict = db.load()\n new_pkg_names = set(get_aur_package_list())\n for pkgname in pkgs_dict:\n if pkgname not in new_pkg_names:\n pkg = pkgs_dict[pkgname]\n pkg.status = \"DELETED\"\n for pkgname in new_pkg_names:\n if pkgname not in pkgs_dict:\n pkgs_dict[pkgname] = Package(pkgname)\n db.write(pkgs_dict)\n\n\ndef format_minutes(minutes):\n \"\"\"\n Convert minutes into days/hours/minutes\n :param: minutes must be int\n \"\"\"\n if minutes < 60: # min/hour\n return str(minutes) + \"'\"\n elif minutes < 1440: # min/day\n return str(minutes // 60) + \"h\" + str(minutes % 60) + \"'\"\n else:\n return str(minutes // 1440) + \"d\" + str((minutes % 1440) // 60) + \"h\"\n\n\ndef format_size(bytes):\n if bytes < 1024:\n return str(bytes) + \"b\"\n elif bytes < 1048576:\n return str(bytes // 1024) + \"Kb\"\n elif bytes < 1073741824:\n return str(bytes // 1048576) + \"Mb\"\n else:\n return str(bytes // 1073741824) + \"Gb\"\n\n\ndef print_statistics(pkgs_dict):\n \"\"\"\n Print out statistics on database, grouped by status\n \"\"\"\n pkgcnt = {\n STATUS_NEW: 0,\n STATUS_DOESNTBUILD: 0,\n STATUS_BUILDS: 0,\n STATUS_DELETED: 0,\n STATUS_OFFICIAL: 0\n }\n buildtime = {\n STATUS_NEW: 0,\n STATUS_DOESNTBUILD: 0,\n STATUS_BUILDS: 0,\n STATUS_DELETED: 0,\n STATUS_OFFICIAL: 0\n }\n fsize = 0\n max_buildtime = 0\n sum_buildtime = 0\n max_fsize = 0\n\n for pkg in pkgs_dict.values():\n pkgcnt[pkg.status] = pkgcnt[pkg.status] + 1\n tm = pkg.buildtime or 0\n buildtime[pkg.status] = buildtime[pkg.status] + tm\n sum_buildtime = sum_buildtime + tm\n if tm > max_buildtime:\n max_buildtime = tm\n if pkg.filename is not None:\n sz = os.path.getsize(PACKAGES_FOLDER + \"/\" + pkg.filename) # TODO could be stored on db\n fsize = fsize + sz\n if sz > max_fsize:\n max_fsize = sz\n\n for status, time in buildtime.items():\n buildtime[status] = format_minutes(time)\n \n print(\"\\t\\tPckgs\\tBuild time (h)\\tSize\")\n print((\"Builds:\\t\\t%d\\t%s\\t\\t%s\\n\" +\n \"Doesn't build:\\t%d\\t%s\\n\" +\n \"New:\\t\\t%d\\t%s\\n\" +\n \"Deleted:\\t%d\\t%s\\n\" +\n \"Official:\\t%d\\t%s\\n\") %\n (pkgcnt[STATUS_BUILDS], buildtime[STATUS_BUILDS], format_size(fsize),\n pkgcnt[STATUS_DOESNTBUILD], buildtime[STATUS_DOESNTBUILD],\n pkgcnt[STATUS_NEW], buildtime[STATUS_NEW],\n pkgcnt[STATUS_DELETED], buildtime[STATUS_DELETED],\n pkgcnt[STATUS_OFFICIAL], buildtime[STATUS_OFFICIAL]))\n print(\"Max build time for a package is \" + str(max_buildtime) + \" min.\")\n print(\"Average build time for a package is \" + str(sum_buildtime // pkgcnt[STATUS_BUILDS]) + \" min.\")\n print(\"Max file size is \" + format_size(max_fsize))\n\n\ndef check_if_root():\n if os.geteuid() != 0:\n exit(\"You need to have root privileges to run this script.\")\n\n\nif __name__ == \"__main__\":\n parser = create_arg_parser()\n args = parser.parse_args()\n\n db = Database(LOCAL_DB)\n\n something_was_done = False\n\n if args.run:\n args.download = True\n args.build_new = True\n args.build_err = True\n\n if args.build_all:\n args.build_new = True\n args.build_err = True\n args.rebuild = True\n\n if args.build_single_pkg:\n if args.build_new or args.build_err or args.rebuild:\n print(\"Cannot --single with other kinds of build\")\n exit(2)\n\n if args.init_db:\n print(\"Cleaning database...\")\n db.create()\n print(\"Done.\")\n something_was_done = True\n\n if args.download:\n print(\"Updating package database...\")\n update_db()\n print(\"Done.\")\n something_was_done = True\n\n if args.build_new or args.build_err or args.rebuild:\n print(\"Start building packages at %s ...\" % get_iso_time())\n pkgs_dict = db.load()\n \n allowed_status = []\n if args.build_new:\n allowed_status.append(STATUS_NEW)\n if args.build_err:\n allowed_status.append(STATUS_DOESNTBUILD)\n if args.rebuild:\n allowed_status.append(STATUS_BUILDS)\n \n build_all(pkgs_dict, allowed_status)\n print(\"Done.\")\n something_was_done = True\n\n if args.build_single_pkg:\n print(\"Start building packages at %s ...\" % get_iso_time())\n pkgs_dict = db.load()\n pkg = pkgs_dict[args.build_single_pkg]\n if not pkg:\n print(\"Package not found on local db: %s\" % args.build_single_pkg)\n exit(5)\n # warning: we are not checking if OFFICIAL\n pkg.build()\n db.write(pkgs_dict)\n print(\"Done.\")\n something_was_done = True\n\n if args.show_log:\n db.show()\n something_was_done = True\n\n if args.stats:\n pkgs_dict = db.load()\n print_statistics(pkgs_dict)\n something_was_done = True\n\n if not something_was_done:\n print(\"Missing arguments.\")\n parser.print_help()\n exit(2)\n","repo_name":"luca-vercelli/aur-build","sub_path":"aur-build.py","file_name":"aur-build.py","file_ext":"py","file_size_in_byte":19037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"46357923622","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\n\nimport gym \nfrom gym.envs.registration import register\nimport sys, os\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random as pr\n\n \nif __name__ == '__main__':\n \n env = gym.make(\"CartPole-v0\")\n \n # Input and output size based on the Env\n input_size = env.observation_space.shape[0] # 4\n output_size = env.action_space.n # 2\n learning_rate = 1e-1\n \n # These lines establish the feed-forward part of the network used to choose actions\n X = tf.placeholder(shape=[None,input_size], dtype=tf.float32, name=\"input_x\") # state input\n \n # First layer of weights\n W1 = tf.get_variable(\"W1\", shape=[input_size, output_size],\n initializer=tf.contrib.layers.xavier_initializer()) # weight # (4, 2)\n print(W1) # (4, 2)\n\n Qpred = tf.matmul(X, W1) # Out Q prediction\n \n # We need to define the parts of the network needed for learning a policy\n Y = tf.placeholder(shape=[None, output_size], dtype=tf.float32) # Y label\n\n # Cost function\n loss = tf.reduce_sum(tf.square(Y-Qpred))\n # Learning\n train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n \n # Set Q-learning related parameters\n gamma = 0.9 # discount factor\n num_episodes = 200\n \n # create lists to contain total rewards and steps per episode\n rList = [] \n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n for episode in range(num_episodes):\n # Reset environment and get first new observation\n state = env.reset()\n e = 1. / ((episode/10)+1) # decaying E-greedy\n rAll = 0\n step_count = 0\n terminal = False\n local_loss = []\n \n # The Q-Network training\n while not terminal:\n step_count += 1\n x = np.reshape(state, [1, input_size])\n # Choose an action by greedily (with e chance of random action) from the Q-network\n Qs = sess.run(Qpred, feed_dict={X: x})\n if np.random.rand(1) < e:\n action = env.action_space.sample()\n else:\n action = np.argmax(Qs)\n \n # Get new state and reward from environment\n new_state, reward, terminal, _ = env.step(action)\n if terminal:\n # Update Q, and no Qs+1, since it's a terminal state\n Qs[0,action] = -100\n else:\n x1 = np.reshape(new_state, [1, input_size])\n # Obtain the Q_sq values by feeding the new state through our network\n Qs1 = sess.run(Qpred, feed_dict={X: x1})\n # Update Q\n Qs[0,action] = reward + gamma * np.max(Qs1)\n #print(Qs)\n \n # Train our network using target (Y) and predicted Q (Qpred) values\n sess.run(train, feed_dict={X: x, Y: Qs})\n \n rAll += reward\n state = new_state\n \n rList.append(step_count)\n print(\"Episode: {} steps: {}\".format(episode, step_count))\n # If last 10's avg steps are 500, it's good enough\n if len(rList) > 10 and np.mean(rList[-10:]) > 500:\n break\n \n print(\"Success rate: \"+ str(sum(rList)/num_episodes))\n plt.bar(range(len(rList)), rList, color=\"blue\")\n plt.show()\n \n \n # see our trained network in action\n observation = env.reset()\n reward_sum = 0\n while True:\n env.render()\n \n x = np.reshape(observation, [1, input_size])\n Qs = sess.run(Qpred, feed_dict={X: x})\n action = np.argmax(Qs)\n \n observation, reward, terminal, _ = env.step(action)\n reward_sum += reward\n if terminal:\n print(\"Total score: {}\".format(reward_sum))\n break\n \n","repo_name":"sjk0709/Cartpole-DQN","sub_path":"CartPole_DQN2015_tf140/CartPole-Q-Network-Learning.py","file_name":"CartPole-Q-Network-Learning.py","file_ext":"py","file_size_in_byte":4250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"8135420410","text":"'''\nMonitor the Windows Event Log\n'''\nfrom datetime import datetime\nimport calendar\ntry:\n import wmi\nexcept Exception:\n wmi = None\n\nfrom monagent.collector.checks import AgentCheck\n\nSOURCE_TYPE_NAME = 'event viewer'\nEVENT_TYPE = 'win32_log_event'\n\n\nclass Win32EventLog(AgentCheck):\n\n def __init__(self, name, init_config, agent_config):\n AgentCheck.__init__(self, name, init_config, agent_config)\n self.last_ts = {}\n self.wmi_conns = {}\n\n def _get_wmi_conn(self, host, user, password):\n key = \"%s:%s:%s\" % (host, user, password)\n if key not in self.wmi_conns:\n self.wmi_conns[key] = wmi.WMI(host, user=user, password=password)\n return self.wmi_conns[key]\n\n def check(self, instance):\n if wmi is None:\n raise Exception(\"Missing 'wmi' module\")\n\n host = instance.get('host')\n user = instance.get('username')\n password = instance.get('password')\n tags = instance.get('tags')\n notify = instance.get('notify', [])\n w = self._get_wmi_conn(host, user, password)\n\n # Store the last timestamp by instance\n instance_key = self._instance_key(instance)\n if instance_key not in self.last_ts:\n self.last_ts[instance_key] = datetime.utcnow()\n return\n\n # Find all events in the last check that match our search by running a\n # straight WQL query against the event log\n last_ts = self.last_ts[instance_key]\n q = EventLogQuery(ltype=instance.get('type'),\n user=instance.get('user'),\n source_name=instance.get('source_name'),\n log_file=instance.get('log_file'),\n message_filters=instance.get('message_filters', []),\n start_ts=last_ts)\n wql = q.to_wql()\n self.log.debug(\"Querying for Event Log events: %s\" % wql)\n events = w.query(wql)\n\n # Save any events returned to the payload as Datadog events\n for ev in events:\n log_ev = LogEvent(ev, self.agent_config.get('api_key', ''),\n self.hostname, tags, notify)\n\n # Since WQL only compares on the date and NOT the time, we have to\n # do a secondary check to make sure events are after the last\n # timestamp\n if log_ev.is_after(last_ts):\n self.event(log_ev.to_event_dict())\n else:\n self.log.debug('Skipping event after %s. ts=%s' % (last_ts, log_ev.timestamp))\n\n # Update the last time checked\n self.last_ts[instance_key] = datetime.utcnow()\n\n @staticmethod\n def _instance_key(instance):\n ''' Generate a unique key per instance for use with keeping track of\n state for each instance.\n '''\n return '%s' % (instance)\n\n\nclass EventLogQuery(object):\n\n def __init__(self, ltype=None, user=None, source_name=None, log_file=None,\n start_ts=None, message_filters=None):\n self.filters = [\n ('Type', self._convert_event_types(ltype)),\n ('User', user),\n ('SourceName', source_name),\n ('LogFile', log_file)\n ]\n self.message_filters = message_filters or []\n self.start_ts = start_ts\n\n def to_wql(self):\n ''' Return this query as a WQL string. '''\n wql = \"\"\"\n SELECT Message, SourceName, TimeGenerated, Type, User, InsertionStrings\n FROM Win32_NTLogEvent\n WHERE TimeGenerated >= \"%s\"\n \"\"\" % (self._dt_to_wmi(self.start_ts))\n for name, vals in self.filters:\n wql = self._add_filter(name, vals, wql)\n for msg_filter in self.message_filters:\n wql = self._add_message_filter(msg_filter, wql)\n return wql\n\n @staticmethod\n def _add_filter(name, vals, q):\n if not vals:\n return q\n # A query like (X = Y) does not work, unless there are multiple\n # statements inside the parentheses, such as (X = Y OR Z = Q)\n if len(vals) == 1:\n vals = vals[0]\n if not isinstance(vals, list):\n q += '\\nAND %s = \"%s\"' % (name, vals)\n else:\n q += \"\\nAND (%s)\" % (' OR '.join(\n ['%s = \"%s\"' % (name, l) for l in vals]\n ))\n return q\n\n @staticmethod\n def _add_message_filter(msg_filter, q):\n ''' Filter on the message text using a LIKE query. If the filter starts\n with '-' then we'll assume that it's a NOT LIKE filter.\n '''\n if msg_filter.startswith('-'):\n msg_filter = msg_filter[1:]\n q += '\\nAND NOT Message LIKE \"%s\"' % msg_filter\n else:\n q += '\\nAND Message LIKE \"%s\"' % msg_filter\n return q\n\n @staticmethod\n def _dt_to_wmi(dt):\n ''' A wrapper around wmi.from_time to get a WMI-formatted time from a\n time struct.\n '''\n return wmi.from_time(year=dt.year, month=dt.month, day=dt.day,\n hours=dt.hour, minutes=dt.minute, seconds=dt.second, microseconds=0,\n timezone=0)\n\n @staticmethod\n def _convert_event_types(types):\n ''' Detect if we are running on <= Server 2003. If so, we should convert\n the EventType values to integers\n '''\n return types\n\n\nclass LogEvent(object):\n\n def __init__(self, ev, api_key, hostname, tags, notify_list):\n self.event = ev\n self.api_key = api_key\n self.hostname = hostname\n self.tags = tags\n self.notify_list = notify_list\n self.timestamp = self._wmi_to_ts(self.event.TimeGenerated)\n\n def to_event_dict(self):\n return {\n 'timestamp': self.timestamp,\n 'event_type': EVENT_TYPE,\n 'api_key': self.api_key,\n 'msg_title': self._msg_title(self.event),\n 'msg_text': self._msg_text(self.event).strip(),\n 'aggregation_key': self._aggregation_key(self.event),\n 'alert_type': self._alert_type(self.event),\n 'source_type_name': SOURCE_TYPE_NAME,\n 'host': self.hostname,\n 'tags': self.tags\n }\n\n def is_after(self, ts):\n ''' Compare this event's timestamp to a give timestamp. '''\n if self.timestamp >= int(calendar.timegm(ts.timetuple())):\n return True\n return False\n\n @staticmethod\n def _wmi_to_ts(wmi_ts):\n ''' Convert a wmi formatted timestamp into an epoch using wmi.to_time().\n '''\n year, month, day, hour, minute, second, microsecond, tz = \\\n wmi.to_time(wmi_ts)\n dt = datetime(year=year, month=month, day=day, hour=hour, minute=minute,\n second=second, microsecond=microsecond)\n return int(calendar.timegm(dt.timetuple()))\n\n @staticmethod\n def _msg_title(event):\n return '%s/%s' % (event.Logfile, event.SourceName)\n\n def _msg_text(self, event):\n msg_text = \"\"\n if event.Message:\n msg_text = \"%s\\n\" % event.Message\n elif event.InsertionStrings:\n msg_text = \"\\n\".join([i_str for i_str in event.InsertionStrings\n if i_str.strip()])\n\n if self.notify_list:\n msg_text += \"\\n%s\" % ' '.join([\" @\" + n for n in self.notify_list])\n\n return msg_text\n\n @staticmethod\n def _alert_type(event):\n event_type = event.Type\n # Convert to a Datadog alert type\n if event_type == 'Warning':\n return 'warning'\n elif event_type == 'Error':\n return 'error'\n return 'info'\n\n @staticmethod\n def _aggregation_key(event):\n return event.SourceName\n","repo_name":"ghessler/mon-agent","sub_path":"monagent/collector/checks_d/win32_event_log.py","file_name":"win32_event_log.py","file_ext":"py","file_size_in_byte":7755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"30314973440","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"Demo\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 10000\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True))\n\nprocess.load(\"Configuration.StandardSequences.GeometryRecoDB_cff\")\n\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\n\nfrom Configuration.AlCa.GlobalTag import GlobalTag\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n\ninputFilesMiniAOD = cms.untracked.vstring('/store/data/Run2017B/SingleElectron/MINIAOD/31Mar2018-v1/90000/FC89D712-AF37-E811-AD13-008CFAC93F84.root')\n\n# Set up input/output depending on the format\n# You can list here either AOD or miniAOD files, but not both types mixed\n#\nuseAOD = False\nif useAOD == True :\n inputFiles = inputFilesAOD\n outputFile = \"electron_ntuple.root\"\n print(\"AOD input files are used\")\nelse :\n inputFiles = inputFilesMiniAOD\n outputFile = \"electron_ntuple_mini.root\"\n print(\"MiniAOD input files are used\")\n\nprocess.source = cms.Source(\"PoolSource\", fileNames = inputFiles )\n\n#\n# Set up electron ID (VID framework)\n#\n\nfrom PhysicsTools.SelectorUtils.tools.vid_id_tools import *\n# turn on VID producer, indicate data format to be\n# DataFormat.AOD or DataFormat.MiniAOD, as appropriate \nif useAOD == True :\n dataFormat = DataFormat.AOD\nelse :\n dataFormat = DataFormat.MiniAOD\n\nswitchOnVIDElectronIdProducer(process, dataFormat)\n# define which IDs we want to produce\nmy_id_modules = ['RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_Fall17_94X_V2_cff',\n 'RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_Fall17_94X_V1_cff']\n\n#add them to the VID producer\nfor idmod in my_id_modules:\n setupAllVIDIdsInModule(process,idmod,setupVIDElectronSelection)\n\n#\n# Configure the module\n#\nprocess.demo = cms.EDAnalyzer('DemoAnalyzer',\n electrons = cms.InputTag(\"slimmedElectrons\"),\n\n # ID decisions (common to all formats)\n #\n # all IDs listed below are available given the content of \"my_id_modules\" defined above.\n # only one is exercised for this example.\n #\n\n #eleIdMapLoose = cms.InputTag(\"egmGsfElectronIDs:cutBasedElectronID-Fall17-94X-V2-loose\"),\n #eleIdMapMedium = cms.InputTag(\"egmGsfElectronIDs:cutBasedElectronID-Fall17-94X-V2-medium\"),\n #eleIdMapTight = cms.InputTag(\"egmGsfElectronIDs:cutBasedElectronID-Fall17-94X-V2-tight\"),\n eleIdMapLoose = cms.InputTag(\"egmGsfElectronIDs:cutBasedElectronID-Fall17-94X-V1-loose\"),\n eleIdMapMedium = cms.InputTag(\"egmGsfElectronIDs:cutBasedElectronID-Fall17-94X-V1-medium\"),\n eleIdMapTight = cms.InputTag(\"egmGsfElectronIDs:cutBasedElectronID-Fall17-94X-V1-tight\")\n)\n\n\n\nprocess.TFileService = cms.Service(\"TFileService\",\nfileName = cms.string(outputFile)\n)\n\nprocess.p = cms.Path(process.egmGsfElectronIDSequence*process.demo)\n","repo_name":"saumyaphor4252/CMSSW_Exercises","sub_path":"Elelctron_VID_Exercise/python/ConfFile_cfg.py","file_name":"ConfFile_cfg.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"2741647552","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport time\nfrom multiprocessing import Pool, cpu_count\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom selenium import webdriver\n\nHEADERS = {\n 'X-Requested-With': 'XMLHttpRequest',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '\n '(KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\n 'Referer': \"https://www.fanbus.pw/\"\n}\n\nDIR_PATH = r\"C:\\dmmbus\" # 下载图片保存路径\nDMM_BUS_PATH = \"https://www.fanbus.pw/\"\n\n\ndef create_folder(path, name):\n # 创建文件夹\n folder_path = os.path.join(path, name)\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n\ndef save_pic(pic_src, pic_title):\n \"\"\"\n 将图片下载到本地文件夹\n \"\"\"\n try:\n img = requests.get(pic_src, HEADERS, 10)\n img_name = pic_title + \".jpg\"\n with open(img_name, 'ab') as f:\n f.write(img.content)\n print(img_name)\n except Exception as e:\n print(e)\n\n\n# 写文件\ndef save_file(title, text):\n try:\n file_name = title.replace('\\n', '').replace('\\t', '').replace(' ', '') + '.txt'\n with open(file_name.encode('utf-8').decode('utf-8'), 'w') as f:\n f.write(text)\n except Exception as e:\n print('Exception: ' + e)\n\n\ndef make_dir(folder_name):\n \"\"\"\n 新建套图文件夹并切换到该目录下\n \"\"\"\n path = os.path.join(DIR_PATH, folder_name)\n # 如果目录已经存在就不用再次爬取了,去重,提高效率。存在返回 False,否则反之\n if not os.path.exists(path):\n os.makedirs(path)\n print(path)\n os.chdir(path)\n return True\n print(\"Folder has existed!\")\n return False\n\n\ndef delete_empty_dir(save_dir):\n \"\"\"\n 如果程序半路中断的话,可能存在已经新建好文件夹但是仍没有下载的图片的\n 情况但此时文件夹已经存在所以会忽略该套图的下载,此时要删除空文件夹\n \"\"\"\n if os.path.exists(save_dir):\n if os.path.isdir(save_dir):\n for d in os.listdir(save_dir):\n path = os.path.join(save_dir, d) # 组装下一级地址\n if os.path.isdir(path):\n delete_empty_dir(path) # 递归删除空文件夹\n if not os.listdir(save_dir):\n os.rmdir(save_dir)\n print(\"remove the empty dir: {}\".format(save_dir))\n else:\n print(\"Please start your performance!\") # 请开始你的表演\n\n\ndef selenium_request(url):\n # C:\\Program Files (x86)\\Google\\Chrome\\Application\\chromedriver.exe\n # browser = webdriver.Chrome('C:\\Program Files (x86)\\Google\\Chrome\\Application\\chromedriver.exe')\n # browser = webdriver.Chrome('C:\\Users\\Administrator\\AppData\\Local\\Google\\Chrome\\Application\\chromedriver.exe')\n browser = webdriver.Chrome(r'C:\\Users\\dalian\\AppData\\Local\\Google\\Chrome\\Application\\chromedriver.exe')\n # browser.maximize_window() # 浏览器窗口最大化\n # 浏览器窗口最小化\n # browser.minimize_window()\n browser.set_window_size(2, 2)\n browser.get(url)\n return browser.page_source\n\n\ndef urls_crawler(url):\n \"\"\"\n 爬虫入口,主要爬取操作\n \"\"\"\n try:\n # 获取目标地址的html结构文档\n html_doc = requests.get(url, HEADERS, 10).text\n # 解析html\n folder_name = BeautifulSoup(html_doc, 'lxml')\n waterfall = folder_name.find('div', 'waterfall')\n all_waterfall_box = waterfall.find_all('a')\n for water_item in all_waterfall_box:\n href = water_item.get('href')\n img = water_item.find('img')\n img_title = img.get('title')\n title = water_item.find('date').text\n print(href)\n print(img_title)\n print(title)\n if make_dir(title):\n\n # 这里进入第二级界面\n # https://www.dmmbus.us/SSNI-473\n # html_doc = requests.get(href, headers=HEADERS, timeout=10).text\n html_doc = selenium_request(href)\n # print html_doc\n\n second_folder_name = BeautifulSoup(html_doc, 'lxml')\n\n # 获得樣品圖像\n h4_box = second_folder_name.find_all('h4')\n for h4_item in h4_box:\n h4_item_string = h4_item.text\n if '樣品圖像' == h4_item_string:\n row_movie = second_folder_name.find('div', class_='row movie')\n img = row_movie.find('img')\n sec_img_url = img.get('src')\n sec_img_title = img.get('title')\n # 保存封面图\n save_pic(sec_img_url, sec_img_title)\n sample_waterfall = second_folder_name.find('div', id='sample-waterfall')\n all_sample_box = sample_waterfall.find_all('a')\n for sample_item in all_sample_box:\n href = sample_item.get('href')\n pic_title = sample_item.find('img').get('title')\n # 保存大图\n save_pic(href, pic_title)\n break\n\n # 获得磁力链接\n movie_table = second_folder_name.find('table', id='magnet-table')\n movie_box = movie_table.find_all('a')\n print(movie_box)\n for movie_item in movie_box:\n movie_href = movie_item.get('href')\n print(movie_href)\n movie_text = movie_item.text\n print(movie_text)\n save_file(movie_text, movie_href)\n break\n except Exception as e:\n print('Exception: ' + e)\n\n\nif __name__ == \"__main__\":\n urls = [DMM_BUS_PATH + 'page/{cnt}'.format(cnt)\n for cnt in range(1, 2)]\n pool = Pool(cpu_count())\n try:\n delete_empty_dir(DIR_PATH)\n pool.map(urls_crawler, urls)\n\n except Exception:\n time.sleep(3)\n delete_empty_dir(DIR_PATH)\n pool.map(urls_crawler, urls)\n","repo_name":"wenyuling24/dmmbus_crawer","sub_path":"dmmbus_img.py","file_name":"dmmbus_img.py","file_ext":"py","file_size_in_byte":6198,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"32675026048","text":"import requests, re, time, os, csv\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\nimport pandas as pd\r\n\r\n# Initialize lists\r\naddress=[]\r\nneighbor=[]\r\narea=[]\r\nroom=[]\r\nbath=[]\r\npark=[]\r\nprice=[]\r\n\r\n# Get the number of pages to extract information\r\npages_number=int(input('How many pages ? '))\r\ntic = time.time()\r\n\r\n# Configure chromedriver\r\nchromedriver = \"./chromedriver\"\r\nos.environ[\"webdriver.chrome.driver\"] = chromedriver\r\ndriver = webdriver.Chrome(chromedriver)\r\n\r\n# Create a folder to save downloaded HTML pages\r\ndirName = 'SavedPages'\r\ntry:\r\n os.mkdir(dirName)\r\n print(\"Directory \" , dirName , \" Created \") \r\nexcept FileExistsError:\r\n print(\"Directory \" , dirName , \" already exists\")\r\n\r\n# Loop through the website's pages\r\nfor page in range(1,pages_number+1):\r\n \r\n # Get Link and Change page number - Edit if necessary !\r\n link = 'https://www.vivareal.com.br/venda/ceara/fortaleza/?pagina='+str(page)+'#onde=BR-Ceara-NULL-Fortaleza&tipos=apartamento_residencial'\r\n \r\n driver.get(link)\r\n time.sleep(2)\r\n data = driver.execute_script(\"return document.getElementsByTagName('html')[0].innerHTML\")\r\n soup_complete_source = BeautifulSoup(data.encode('utf-8'), \"lxml\")\r\n \r\n soup = soup_complete_source.find(class_='results-list js-results-list') \r\n \r\n # download page html\r\n with open('SavedPages//site'+str(page)+'.html', 'w', encoding='utf-16') as outf:\r\n outf.write(str(soup_complete_source))\r\n\r\n # Web-Scraping\r\n for line in soup.findAll(class_=\"property-card__main-content\"):\r\n # Get Full Address and Neighborhood\r\n try:\r\n full_address=line.find(class_=\"property-card__address js-property-card-address js-see-on-map\").text.strip()\r\n address.append(full_address) #Get all address\r\n if full_address[:3]=='Rua' or full_address[:7]=='Avenida' or full_address[:8]=='Travessa' or full_address[:7]=='Alameda':\r\n neighbor_first=full_address.strip().find('-')\r\n neighbor_second=full_address.strip().find(',', neighbor_first)\r\n if neighbor_second!=-1:\r\n neighbor_text=full_address.strip()[neighbor_first+2:neighbor_second]\r\n neighbor.append(neighbor_text) #Get all Neighborhood - Correct formatting\r\n else: # Neighbor can not be found\r\n neighbor_text='-'\r\n neighbor.append(neighbor_text) #Get all Neighborhood - Correct formatting\r\n else:\r\n get_comma=full_address.find(',')\r\n if get_comma!=-1:\r\n neighbor_text=full_address[:get_comma]\r\n neighbor.append(neighbor_text) #Get all Neighborhood - Problematic formatting \r\n else:\r\n get_hif=full_address.find('-')\r\n neighbor_text=full_address[:get_hif]\r\n neighbor.append(neighbor_text)\r\n \r\n # Get Apto's Area \r\n full_area=line.find(class_=\"property-card__detail-value js-property-card-value property-card__detail-area js-property-card-detail-area\").text.strip()\r\n area.append(full_area)\r\n\r\n # Get Apto's Rooms\r\n full_room=line.find(class_=\"property-card__detail-item property-card__detail-room js-property-detail-rooms\").text.strip()\r\n full_room=full_room.replace(' ','')\r\n full_room=full_room.replace('\\n','')\r\n full_room=full_room.replace('Quartos','')\r\n full_room=full_room.replace('Quarto','')\r\n room.append(full_room) #Get apto's rooms\r\n\r\n # Get Apto's Bathrooms\r\n full_bath=line.find(class_=\"property-card__detail-item property-card__detail-bathroom js-property-detail-bathroom\").text.strip() \r\n full_bath=full_bath.replace(' ','')\r\n full_bath=full_bath.replace('\\n','')\r\n full_bath=full_bath.replace('Banheiros','')\r\n full_bath=full_bath.replace('Banheiro','')\r\n bath.append(full_bath) #Get apto's Bathrooms\r\n\r\n # Get Apto's parking lot\r\n full_park=line.find(class_=\"property-card__detail-item property-card__detail-garage js-property-detail-garages\").text.strip() \r\n full_park=full_park.replace(' ','')\r\n full_park=full_park.replace('\\n','')\r\n full_park=full_park.replace('Vagas','')\r\n full_park=full_park.replace('Vaga','')\r\n park.append(full_park) #Get apto's parking lot\r\n\r\n # Get Apto's price\r\n full_price=line.find(class_=\"property-card__price js-property-card-prices js-property-card__price-small\").text.strip() \r\n full_price=full_price.replace(' ','')\r\n full_price=full_price.replace('\\n','')\r\n full_price=full_price.replace('R$','')\r\n full_price=full_price.replace('.','')\r\n full_price=full_price.replace('Apartirde','')\r\n full_price=full_price.replace('SobConsulta','-')\r\n price.append(full_price) #Get apto's parking lot\r\n\r\n except:\r\n continue\r\n \r\n# Close chromedriver\r\ndriver.quit()\r\n\r\n# Save as a CSV file\r\nfor i in range(0,len(neighbor)):\r\n combinacao=[address[i],neighbor[i],area[i],room[i],bath[i],park[i],price[i]]\r\n df=pd.DataFrame(combinacao)\r\n with open('VivaRealData.csv', 'a', encoding='utf-16', newline='') as f:\r\n df.transpose().to_csv(f, encoding='iso-8859-1', header=False)\r\n\r\n# Execution time\r\ntoc = time.time()\r\nget_time=round(toc-tic,3)\r\nprint('Finished in ' + str(get_time) + ' seconds')\r\nprint(str(len(price))+' results!')","repo_name":"luiseduardobr1/VivaRealWebScraping","sub_path":"vivaWebScraping.py","file_name":"vivaWebScraping.py","file_ext":"py","file_size_in_byte":5667,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"86"} +{"seq_id":"72948362204","text":"class Person():\r\n\tdef __init__(self, first_name, last_name, *args, **kwargs):\r\n\t\tself.name_attrs = ['first_name', 'middle_name', 'last_name', 'suffix']\r\n\t\tself.first_name = first_name\r\n\t\tself.last_name = last_name\r\n\t\tself.__dict__.update(kwargs)\r\n\r\n\tdef has_suffix(self):\r\n\t\tif hasattr(self, 'suffix'):\r\n\t\t\treturn bool(self.suffix)\r\n\t\telse:\r\n\t\t\treturn False\r\n\r\n\tdef print_legal_name(self):\r\n\t\tnames_parts = [getattr(self, x) for x in self.name_attrs if hasattr(self, x) and getattr(self, x)]\r\n\t\tfull_name = ' '.join(names_parts)\r\n\t\tprint('Legal name: %s' % full_name)\r\n\r\n\tdef print_age(self):\r\n\t\ttry:\r\n\t\t\tprint(self.age)\r\n\t\texcept AttributeError as err:\r\n\t\t\tprint(err)\r\n\r\ndef run_example_person():\r\n\tme = Person('Nicholas', 'Stanford', middle_name='John', age=28, title='The Awesome')\r\n\tme.name_attrs = ['title'] + me.name_attrs\r\n\tme.print_legal_name()\r\n\tprint('Has suffix: %s' % me.has_suffix())\r\n\tme.print_age()\r\n\r\nif __name__ == '__main__':\r\n\trun_example_person()\r\n","repo_name":"njs08008/miscelaneous","sub_path":"person_class.py","file_name":"person_class.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"36197395441","text":"# -*- coding: utf8 -*-\n\nfrom __future__ import unicode_literals\n\"\"\"\nDjango settings for idril project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n\n'''\nGives the absolute path of x, relative to the root of the project.\nWorks when we execute \"python manage.py\", because manage.py is in the root and x is relative to the root.\n'''\nABSOLUTE_PATH = lambda x: os.path.abspath(os.path.dirname(x))\n\nMEDIA_ROOT = ABSOLUTE_PATH('media/')\nMEDIA_URL = '/media/'\n\nSTATIC_URL = '/static/'\n\nLOGIN_URL = '/member/login'\n\nSTATIC_ROOT = '/'\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '!$$!bsp86f7_7xyou@&&c*go8&405p$e$ffs0gdnw@o4e!9!2v'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\nTEMPLATE_DEBUG = True\nALLOWED_HOSTS = []\n\nTEMPLATE_DIRS = (\n 'templates/'\n)\n\nTINYMCE_DEFAULT_CONFIG = {\n 'plugins': \"table,spellchecker,paste,searchreplace\",\n 'theme': \"simple\",\n 'cleanup_on_startup': True,\n 'custom_undo_redo_levels': 10,\n}\n \n# Application definition\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'unique-snowflake'\n }\n}\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.formtools',\n 'base',\n 'member',\n 'project',\n 'forum',\n 'mangopay_idril',\n 'sekizai',\n 'tinymce',\n 'south',\n 'paypal.standard.pdt',\n 'payment',\n 'bleach',\n 'downtime',\n 'mangopaysdk',\n 'django_iban',\n 'celery',\n 'sphinx',\n 'model_utils',\n 'storages',\n 'mangopay',\n 'manager',\n 'legals',\n)\n\nMIDDLEWARE_CLASSES = (\n 'downtime.middleware.DowntimeMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.cache.UpdateCacheMiddleware',\n)\n\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.static\",\n \"django.core.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"sekizai.context_processors.sekizai\",\n )\n\nAUTHENTICATION_BACKENDS = (\n 'member.backends.EmailLoginBackend',\n #'django.contrib.auth.backends.ModelBackend',\n )\n\nROOT_URLCONF = 'idril.urls'\n\nWSGI_APPLICATION = 'idril.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n \t## POSTGRESQL ##\n # 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n # 'NAME': 'idrildbpsql',\n # 'HOST': '127.0.0.1',\n # 'USER': 'usrpsql',\n # 'PASSWORD': 'usrpsql',\n # 'PORT':'5432',\n\t\n # MYSQL ##\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'idrildb',\n 'HOST': '127.0.0.1',\n 'USER': 'root',\n 'PASSWORD': '',\n 'PORT':'3306',\n }\n}\n\nDOWNTIME_EXEMPT_PATHS = (\n '/admin',# Path not down when maintenance\n)\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Payment settings\n\nPAYPAL_RECEIVER_EMAIL = \"PAYPAL_RECEIVER_EMAIL\"\nPAYPAL_IDENTITY_TOKEN = \"PAYPAL_IDENTITY_TOKEN\"\n\n# Set l'adresse mail d'Idril\n\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_HOST_USER = ''\nEMAIL_HOST_PASSWORD = '???'\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\n\n\n# MangoPay\n\nMANGOPAY_CLIENT_ID = \"\"\nMANGOPAY_PASSPHRASE = \"\"\nMANGOPAY_BASE_URL = \"https://api.sandbox.mangopay.com\"\nMANGOPAY_DEBUG_MODE = 1\nMANGOPAY_PAGE_DEFAULT_STORAGE = True\n\nfrom django.contrib.auth.models import User\nUser._meta.get_field_by_name('email')[0]._unique = True\n\nDATE_INPUT_FORMATS = ('%d/%m/%Y','%Y/%m/%d')\n","repo_name":"Xodia/Idril","sub_path":"idril/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"38181114424","text":"import numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom e2cnn import gspaces\nfrom e2cnn import nn as e2nn\nfrom e2cnn.nn import GeometricTensor\n\nfrom models.helpers import EquivariantDebugReturn, StandardReturn, StandardReturnWithAuxInfo, get_solver, get_deq_layer\nfrom modules.pooling import GroupReducedMaxPooling\nfrom modules.DEQ import DEQLayer\n\n\nclass Planner(nn.Module):\n \"\"\"\n Implementation of the Symmetric Value Iteration Network, with DEQ layer\n \"\"\"\n\n def __init__(self, num_orient, num_actions, args):\n super(Planner, self).__init__()\n self.args = args\n\n self.num_orient = num_orient\n self.num_actions = num_actions\n self.enable_e2_pi = args.enable_e2_pi\n\n self.l_q = args.l_q\n self.l_h = args.l_h\n self.k = args.k\n self.f = args.f\n self.padding_mode = 'circular' if 'wrap' in args.mechanism else 'zeros'\n\n self.group = args.group\n # > Init symmetry group space\n assert self.group in ['c2', 'c4', 'c8', 'c16', 'd2', 'd4', 'd8']\n self.rot_num = int(self.group[1:])\n self.enable_reflection = 'd' in self.group # for dihedral group\n self.group_size = self.rot_num if not self.enable_reflection else (self.rot_num * 2)\n\n if not self.enable_reflection:\n self.r2_act = gspaces.Rot2dOnR2(N=self.rot_num)\n else:\n self.r2_act = gspaces.FlipRot2dOnR2(N=self.rot_num)\n\n print('> Group:', self.group)\n print('> Group space:', self.r2_act)\n\n self._init_layers(args, num_actions, num_orient)\n\n @property\n def residuals_forward(self):\n return self.deq_layer.residuals_forward\n\n @property\n def residuals_backward(self):\n return self.deq_layer.residuals_backward\n\n def _get_repr(self, name):\n name2repr = {\n 'trivial': self.r2_act.trivial_repr,\n 'regular': self.r2_act.regular_repr,\n }\n\n # > may also quotient repr for latent layer (same as action output); not verified\n if name == 'quotient':\n _, repr_out_pi, _ = self._get_action_repr()\n name2repr.update({\n 'quotient': repr_out_pi\n })\n\n return name2repr[name], name2repr[name].size\n\n def _get_action_repr(self):\n\n if self.num_actions == 4 and self.rot_num == 4:\n if self.enable_reflection:\n # > quotient out reflections and keep rotations\n repr_out_pi = self.r2_act.quotient_repr(subgroup_id=(0, 1))\n else:\n repr_out_pi = self.r2_act.regular_repr\n reprs_out_pi = 1 * [repr_out_pi]\n\n elif self.num_actions == 4 and self.rot_num in [8, 16]:\n if self.enable_reflection:\n repr_out_pi = self.r2_act.quotient_repr(subgroup_id=(0, self.rot_num // self.num_actions))\n else:\n repr_out_pi = self.r2_act.quotient_repr(subgroup_id=self.rot_num // self.num_actions)\n reprs_out_pi = 1 * [repr_out_pi]\n\n elif self.num_actions == 8 and self.rot_num == 4:\n # > TODO e.g., C4/D8 for 8 actions\n raise NotImplementedError\n\n elif self.num_actions == 8 and self.rot_num == 8:\n raise NotImplementedError\n\n else:\n raise ValueError\n\n field_type = e2nn.FieldType(self.r2_act, reprs_out_pi)\n\n return reprs_out_pi, repr_out_pi, field_type\n\n def _init_layers(self, args, num_actions, num_orient):\n \"\"\"\n 1. define fiber repr types\n 2. decide sizes of fiber reprs\n 3. update conv layer sizes accordingly\n 4. define steerable conv layers\n \"\"\"\n\n num_in_h = num_orient + 1\n num_out_h = args.l_h # trivial repr, no division by self.group_size\n num_out_r = num_orient # reward per orientation\n num_in_q = num_out_r * 2 # concat input for q and r\n\n if args.divide_by_size:\n # > for regular repr - can choose to divide by group size, to keep intermediate embedding sizes the same\n num_out_q = args.l_q * num_orient // self.group_size\n else:\n # > or don't divide, to keep layers' parameter number the same\n num_out_q = args.l_q * num_orient\n\n # > Define repr type for steerable conv layers\n # > + Store repr size\n repr_in_h, self.size_repr_in_h = self._get_repr(args.repr_in_h)\n repr_out_h, self.size_repr_out_h = self._get_repr(args.repr_out_h)\n repr_out_r, self.size_repr_out_r = self._get_repr(args.repr_out_r)\n repr_in_q, self.size_repr_in_q = repr_out_r, self.size_repr_out_r # R out = Q in\n repr_out_q, self.size_repr_out_q = self._get_repr(args.repr_out_q)\n\n # > only generate repr for action space when\n if self.enable_e2_pi:\n reprs_out_pi, repr_out_pi, self.feat_out_pi = self._get_action_repr()\n\n self.repr_out_r = repr_out_r\n\n # > Provide output size\n self.size_out_q = num_out_q * self.group_size\n # TODO fix: no another group size for Q, replace with num_orient instead\n\n # > Define feature (Note: use trivial repr for input space)\n self.feat_in_h = e2nn.FieldType(self.r2_act, num_in_h * [repr_in_h])\n self.feat_out_h = e2nn.FieldType(self.r2_act, num_out_h * [repr_out_h])\n self.feat_out_r = e2nn.FieldType(self.r2_act, num_out_r * [repr_out_r])\n self.feat_in_q = e2nn.FieldType(self.r2_act, num_in_q * [repr_in_q])\n self.feat_out_q = e2nn.FieldType(self.r2_act, num_out_q * [repr_out_q])\n\n # > Define E(2) Conv\n self.h_conv = e2nn.R2Conv(self.feat_in_h, self.feat_out_h,\n kernel_size=3, padding=1,\n padding_mode=self.padding_mode,\n bias=True)\n self.r_conv = e2nn.R2Conv(self.feat_out_h, self.feat_out_r,\n kernel_size=1, padding=0,\n bias=False)\n self.q_conv = e2nn.R2Conv(self.feat_in_q, self.feat_out_q,\n kernel_size=self.f, padding=int((self.f - 1) // 2),\n padding_mode=self.padding_mode,\n bias=False)\n\n # > Use customized (group channel wise) max pooling\n self.max_pool = GroupReducedMaxPooling(in_type=self.feat_out_q, out_repr=repr_out_r)\n\n # > Output policy layer\n if self.enable_e2_pi:\n self.pi_r2conv = e2nn.R2Conv(self.feat_out_q, self.feat_out_pi, kernel_size=1, padding=0, bias=False)\n print(f'> Enable E2 policy! Feature type: {self.feat_out_pi}, fiber group: {self.feat_out_pi.fibergroup}')\n else:\n self.pi_conv2d = nn.Conv2d(self.size_out_q, num_actions,\n kernel_size=(1, 1), stride=1, padding=0, bias=False)\n\n self.sm = nn.Softmax2d() # nn.Softmax(dim=1)\n\n # > Define VI layer\n self.vi_layer = VILayer(\n args=self.args, feat_out_r=self.feat_out_r,\n q_conv=self.q_conv, max_pool=self.max_pool\n )\n\n # > Define Deep Equilibrium Model for solving fixed point - fixed-point layer\n self.deq_layer = get_deq_layer(args, self.vi_layer)\n\n def deq_raw(self, x):\n deq = self.deq_layer.double()\n z, _ = deq(x)\n return z\n\n def forward(self, map_design, goal_map, debug=False):\n batch_size = map_design.size(0)\n maze_size = map_design.size(-1)\n\n x = torch.cat([map_design, goal_map], 1)\n device = x.device\n\n x_geo = e2nn.GeometricTensor(x, self.feat_in_h)\n\n # > value iteration\n q_geo, r_geo, v_geo, jac_loss = self._value_iterate(x_geo, device)\n info = {\n 'jac_loss': jac_loss\n }\n\n # > extract action from policy\n logits, logits_geo = self._value2logits(q_geo)\n logits, probs = self._process_logits(logits, batch_size, maze_size)\n\n if not debug:\n return StandardReturnWithAuxInfo(logits, probs, info)\n else:\n return EquivariantDebugReturn(logits, probs, logits_geo, q_geo, v_geo, r_geo)\n\n def _value_iterate(self, x_geo, device):\n h_trivial = self.h_conv(x_geo)\n r_geo = self.r_conv(h_trivial)\n\n # > put unwrapped PyTorch tensor of r; v initialized inside\n r = r_geo.tensor\n # > DEQ layer\n v_out, jac_loss = self.deq_layer(r)\n\n # > wrap again\n v_geo = e2nn.GeometricTensor(v_out, self.feat_out_r)\n # > additional execution\n rv_geo = e2nn.tensor_directsum([r_geo, v_geo])\n q_geo = self.q_conv(rv_geo)\n\n return q_geo, r_geo, v_geo, jac_loss\n\n def _value2logits(self, q_geo):\n # > Use equivariant policy or not (normal 2D conv)\n if self.enable_e2_pi:\n logits_geo = self.pi_r2conv(q_geo)\n logits = logits_geo.tensor\n else:\n logits = self.pi_conv2d(q_geo.tensor)\n logits_geo = None # `e2nn.GeometricTensor(logits, self.feat_out_pi)`\n\n return logits, logits_geo\n\n def _process_logits(self, logits, batch_size, maze_size):\n logits = logits.view(batch_size, self.num_orient, self.num_actions, maze_size, maze_size)\n\n # > Reshape for probs & Normalize over actions\n logits_reshape = logits.view(-1, self.num_actions, maze_size, maze_size)\n probs = self.sm(logits_reshape)\n\n # > Note: group repr & action space need to match (be compatible group action: G x A -> A)\n # > Reshape to output dimensions\n probs = probs.view(batch_size, self.num_orient, self.num_actions, maze_size, maze_size)\n logits = torch.transpose(logits, 1, 2).contiguous()\n probs = torch.transpose(probs, 1, 2).contiguous()\n\n return StandardReturn(logits, probs)\n\n def get_equivariance_error(self, map_design, goal_map, rand_input=False, atol: float = 1e-6, rtol: float = 1e-5):\n batch_size = map_design.size(0)\n maze_size = map_design.size(-1)\n device = map_design.device\n\n if not rand_input:\n x = torch.cat([map_design, goal_map], 1)\n else:\n x = torch.randn(batch_size, 2, maze_size, maze_size)\n\n x_geo = e2nn.GeometricTensor(x, self.feat_in_h)\n\n # > forward f(x)\n q_geo, r_geo, v_geo, _ = self._value_iterate(x_geo, device)\n _, logits_geo = self._value2logits(q_geo)\n\n # > compute f(g.x) and g.f(x)\n # > Note: e2nn.GroupPooling uses .transform_fibers(e), while .transform(e) should be used here\n ee_dict = {}\n for element in self.r2_act.fibergroup.testing_elements():\n # > f(g.x)\n x_geo_gx = x_geo.transform(element)\n q_geo_fgx, r_geo_fgx, v_geo_fgx, _ = self._value_iterate(x_geo_gx, device)\n _, logits_geo_fgx = self._value2logits(q_geo_fgx)\n\n # > g.f(x)\n q_geo_gfx, r_geo_gfx, v_geo_gfx, logits_geo_gfx = (\n q_geo.transform(element),\n r_geo.transform(element),\n v_geo.transform(element),\n logits_geo.transform(element)\n )\n\n q_err = (q_geo_fgx.tensor - q_geo_gfx.tensor).detach().numpy()\n r_err = (r_geo_fgx.tensor - r_geo_gfx.tensor).detach().numpy()\n v_err = (v_geo_fgx.tensor - v_geo_gfx.tensor).detach().numpy()\n logits_err = (logits_geo_fgx.tensor - logits_geo_gfx.tensor).detach().numpy()\n\n q_err = np.abs(q_err).reshape(-1)\n r_err = np.abs(r_err).reshape(-1)\n v_err = np.abs(v_err).reshape(-1)\n logits_err = np.abs(logits_err).reshape(-1)\n\n print(f'EEs of element {element}:', q_err.mean(), r_err.mean(), v_err.mean(), logits_err.mean())\n\n ee_dict[element] = {\n 'q': q_err.mean(),\n 'r': r_err.mean(),\n 'v': v_err.mean(),\n 'logits': logits_err.mean()\n }\n\n assert torch.allclose(logits_geo_fgx.tensor, logits_geo_gfx.tensor, atol=atol, rtol=rtol), \\\n f'EE of element {element} is too high: {logits_err.mean()}'\n\n return ee_dict\n\n\nclass VILayer(nn.Module):\n def __init__(self, args, feat_out_r, q_conv, max_pool):\n super().__init__()\n self.args = args\n\n self.feat_out_r = feat_out_r\n self.q_conv = q_conv\n self.max_pool = max_pool\n\n def forward(self, v, r):\n \"\"\"\n Note: convert to GeometricTensor internally\n \"\"\"\n\n v_geo = GeometricTensor(v, type=self.feat_out_r)\n r_geo = GeometricTensor(r, type=self.feat_out_r)\n\n # > concat and convolve with \"transition probability\"\n rv_geo = e2nn.tensor_directsum([r_geo, v_geo])\n q_geo = self.q_conv(rv_geo)\n\n # > max over group channel\n # > Q: batch_size x (|G| * #repr) x width x height\n # > V: batch_size x (|G| * 1) x width x height\n v_geo = self.max_pool(q_geo)\n\n v = v_geo.tensor\n\n return v\n\n\nclass GeoVILayer(nn.Module):\n def __init__(self, args, repr_out_r, q_conv, max_pool):\n super().__init__()\n self.args = args\n\n self.repr_out_r = repr_out_r\n self.q_conv = q_conv\n self.max_pool = max_pool\n\n def forward(self, v_geo, r_geo):\n \"\"\"\n Note: take geometric tensors as input and output\n \"\"\"\n\n # > concat and convolve with \"transition probability\"\n rv_geo = e2nn.tensor_directsum([r_geo, v_geo])\n q_geo = self.q_conv(rv_geo)\n\n # > max over group channel\n # > Q: batch_size x (|G| * #repr) x width x height\n # > V: batch_size x (|G| * 1) x width x height\n v_geo = self.max_pool(q_geo)\n\n return v_geo\n\n","repo_name":"zhao0625/DiffPlan","sub_path":"src/models/DE-SymVIN.py","file_name":"DE-SymVIN.py","file_ext":"py","file_size_in_byte":13753,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"86"} +{"seq_id":"32764212999","text":"import discord\nfrom discord.ext import commands\nfrom core.classes import Cog_Extension\nimport random\n\nclass Main(Cog_Extension):\n @commands.command() \n async def 來抽卡(self, ctx, pickup: int, Probability: float, people: int, times: int):\n if pickup > 0 and pickup <= 3 and Probability > 0 and Probability <= 100 and people > 0 and people <= 5000 and times > 0 and times <= 1000 :\n sum = 0\n Probability = Probability/100\n pool = ['other','PU1','PU2','PU3']\n if pickup == 1:\n weights = [1-Probability,Probability,0,0]\n elif pickup == 2:\n weights = [1-Probability*2,Probability,Probability,0]\n else :\n weights = [1-Probability*3,Probability,Probability,Probability]\n\n for person in range(people):\n count = [0,0,0,0]\n cards = random.choices(pool, weights, k=times)\n for card in cards:\n if card == 'other':\n count[0] += 1\n elif card == 'PU1':\n count[1] += 1\n elif card == 'PU2':\n count[2] += 1\n elif card == 'PU3':\n count[3] += 1\n \n if 0 not in count[1:pickup+1]:\n sum = sum + 1\n\n await ctx.send(f'{pickup}Pick up,每一Pick up都是{Probability*100}%的機率下,{people}人去抽{times}連抽的情況下。\\n\\\n有{sum}人成功抽齊了,佔總體{round(sum/people*100, 1)}%。')\n else :\n await ctx.send('參數錯誤')\n\ndef setup(bot):\n bot.add_cog(Main(bot))","repo_name":"kawanaga555/fox_bot","sub_path":"cmds/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"43004031617","text":"class Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n def freqCount(string):\n d = {}\n for s in string:\n if s in d:\n d[s] +=1\n else:\n d[s] = 1\n return d\n count = []\n for string in strs:\n count.append((freqCount(string)))\n temp = (count)\n res = []\n row = 0\n for i in range(len(count)):\n r = []\n for j in range(len(temp)):\n if count[i] == temp[j]:\n r.append(strs[j])\n res.append(r)\n row += 1\n#res = list(dict.fromkeys(res))\n dupFree = []\n for li in res:\n if li not in dupFree:\n dupFree.append(li)\n return dupFree","repo_name":"soumyamandal007/leetcode","sub_path":"0049-group-anagrams/0049-group-anagrams.py","file_name":"0049-group-anagrams.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"27711892718","text":"from __future__ import division, unicode_literals\n\nimport os\nimport re\nimport itertools\nimport warnings\nimport logging\n\nimport six\nimport numpy as np\nfrom numpy.linalg import det\nfrom collections import OrderedDict, namedtuple\nfrom hashlib import md5\n\nfrom monty.io import zopen\nfrom monty.os.path import zpath\nfrom monty.json import MontyDecoder\n\nimport xml.etree.cElementTree as ET\nfrom enum import Enum\nfrom tabulate import tabulate\n\nimport scipy.constants as const\n\nfrom pymatgen import SETTINGS\nfrom pymatgen.core.lattice import Lattice\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.core.periodic_table import Element, get_el_sp\nfrom monty.design_patterns import cached_class\nfrom pymatgen.util.string import str_delimited\nfrom pymatgen.util.io_utils import clean_lines\nfrom monty.json import MSONable\n\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\nfrom pymatgen.symmetry.bandstructure import HighSymmKpath\n\n\"\"\"\nClasses for reading/manipulating/writing exciting input files.\n\"\"\"\n\n__author__ = \"Christian Vorwerk\"\n__copyright__ = \"Copyright 2016\"\n__version__ = \"1.0\"\n__maintainer__ = \"Christian Vorwerk\"\n__email__ = \"vorwerk@physik.hu-berlin.de\"\n__status__ = \"Development\"\n__date__ = \"Nov 28, 2016\"\n\nclass ExcitingInput(MSONable):\n \"\"\"\n Object for representing the data stored in the structure part of the\n exciting input.\n\n Args:\n structure (Structure): Structure object.\n title (str): Optional title for exciting input. Defaults to unit\n cell formula of structure. Defaults to None.\n lockxyz (Nx3 array): bool values for selective dynamics,\n where N is number of sites. Defaults to None.\n\n .. attribute:: structure\n\n Associated Structure.\n\n .. attribute:: title\n\n Optional title string.\n\n .. attribute:: lockxyz\n\n Lockxyz attribute for each site if available. A Nx3 array of\n booleans.\n \"\"\"\n def __init__(self, structure, title=None, lockxyz=None):\n \n if structure.is_ordered:\n site_properties = {}\n if lockxyz:\n site_properties[\"selective_dynamics\"] = lockxyz\n self.structure = structure.copy(site_properties=site_properties)\n self.title = structure.formula if title is None else title\n else:\n raise ValueError(\"Structure with partial occupancies cannot be \"\n \"converted into exciting input!\")\n # define conversion factor between Bohr radius and Angstrom\n bohr2ang=const.value('Bohr radius')/const.value('Angstrom star')\n \n @property\n def lockxyz(self):\n return self.structure.site_properties.get(\"selective_dynamics\")\n @lockxyz.setter\n def lockxyz(self, lockxyz):\n self.structure.add_site_property(\"selective_dynamics\",\n lockxyz)\n @staticmethod\n def from_string(data):\n \"\"\"\n Reads the exciting input from a string\n \"\"\"\n \n root=ET.fromstring(data)\n speciesnode=root.find('structure').iter('species')\n elements = []\n positions = []\n vectors=[]\n lockxyz=[]\n # get title\n title_in=str(root.find('title').text)\n # Read elements and coordinates\n for nodes in speciesnode:\n symbol = nodes.get('speciesfile').split('.')[0]\n if len(symbol.split('_'))==2:\n symbol=symbol.split('_')[0]\n if Element.is_valid_symbol(symbol):\n # Try to recognize the element symbol\n element = symbol\n else:\n raise NLValueError(\"Unknown element!\")\n natoms = nodes.getiterator('atom')\n for atom in natoms:\n x, y, z = atom.get('coord').split()\n positions.append([float(x), float(y), float(z)])\n elements.append(element)\n # Obtain lockxyz for each atom\n if atom.get('lockxyz') is not None:\n lxy=[]\n for l in atom.get('lockxyz').split():\n if l=='True' or l=='true':\n lxyz.append(True)\n else:\n lxyz.append(False)\n lockxyz.append(lxyz)\n else:\n lockxyz.append([False, False, False])\n #check the atomic positions type\n if 'cartesian' in root.find('structure').attrib.keys():\n if root.find('structure').attrib['cartesian']:\n cartesian=True\n for i in range(len(positions)):\n for j in range(3):\n positions[i][j]=positions[i][j]*ExcitingInput.bohr2ang\n print(positions)\n else:\n cartesian=False\n # get the scale attribute\n scale_in=root.find('structure').find('crystal').get('scale')\n if scale_in:\n scale=float(scale_in)*ExcitingInput.bohr2ang\n else:\n scale=ExcitingInput.bohr2ang\n # get the stretch attribute\n stretch_in=root.find('structure').find('crystal').get('stretch')\n if stretch_in:\n stretch=np.array([float(a) for a in stretch_in])\n else:\n stretch=np.array([1.0,1.0,1.0])\n # get basis vectors and scale them accordingly\n basisnode=root.find('structure').find('crystal').iter('basevect')\n for vect in basisnode:\n x, y, z=vect.text.split()\n vectors.append([float(x)*stretch[0]*scale,\n float(y)*stretch[1]*scale,\n float(z)*stretch[2]*scale])\n # create lattice and structure object\n lattice_in=Lattice(vectors)\n structure_in=Structure(lattice_in,elements,positions,coords_are_cartesian=cartesian)\n\n return ExcitingInput(structure_in, title_in, lockxyz)\n @staticmethod\n def from_file(filename):\n with zopen(filename, 'rt') as f:\n data=f.read().replace('\\n','')\n return ExcitingInput.from_string(data)\n\n\n def write_etree(self, celltype, cartesian=False, bandstr=False, symprec=0.4, angle_tolerance=5):\n root=ET.Element('input')\n root.set('{http://www.w3.org/2001/XMLSchema-instance}noNamespaceSchemaLocation',\n 'http://xml.exciting-code.org/excitinginput.xsd')\n title=ET.SubElement(root,'title')\n title.text=self.title\n if cartesian:\n structure=ET.SubElement(root,'structure',cartesian=\"true\",speciespath=\"./\")\n else:\n structure=ET.SubElement(root,'structure',speciespath=\"./\")\n\n crystal=ET.SubElement(structure,'crystal')\n # set scale such that lattice vector can be given in Angstrom\n ang2bohr=const.value('Angstrom star')/const.value('Bohr radius')\n crystal.set('scale',str(ang2bohr))\n # determine which structure to use\n finder=SpacegroupAnalyzer(self.structure,symprec=symprec, angle_tolerance=angle_tolerance)\n if celltype=='primitive':\n new_struct=finder.get_primitive_standard_structure(international_monoclinic=False)\n elif celltype=='conventional':\n new_struct=finder.get_conventional_standard_structure(international_monoclinic=False)\n elif celltype=='unchanged':\n new_struct=self.structure\n else:\n raise ValueError('Type of unit cell not recognized!')\n\n\n # write lattice\n basis=new_struct.lattice.matrix\n for i in range(3):\n basevect=ET.SubElement(crystal,'basevect')\n basevect.text= \"%16.8f %16.8f %16.8f\" % (basis[i][0], basis[i][1],\n basis[i][2])\n # write atomic positions for each species\n index=0\n for i in new_struct.types_of_specie:\n species=ET.SubElement(structure,'species',speciesfile=i.symbol+\n '.xml')\n sites=new_struct.indices_from_symbol(i.symbol)\n\n for j in sites:\n coord=\"%16.8f %16.8f %16.8f\" % (new_struct[j].frac_coords[0],\n new_struct[j].frac_coords[1],\n new_struct[j].frac_coords[2])\n # obtain cartesian coords from fractional ones if needed\n if cartesian:\n coord2=[]\n for k in range(3):\n inter=(new_struct[j].frac_coords[k]*basis[0][k]+\\\n new_struct[j].frac_coords[k]*basis[1][k]+\\\n new_struct[j].frac_coords[k]*basis[2][k])*ang2bohr\n coord2.append(inter)\n coord=\"%16.8f %16.8f %16.8f\" % (coord2[0],\n coord2[1],\n coord2[2])\n\n # write atomic positions\n index=index+1\n atom=ET.SubElement(species,'atom',coord=coord)\n # write bandstructure if needed\n if bandstr and celltype=='primitive':\n kpath=HighSymmKpath(new_struct, symprec=symprec, angle_tolerance=angle_tolerance)\n prop=ET.SubElement(root,'properties')\n bandstrct=ET.SubElement(prop,'bandstructure')\n for i in range(len(kpath.kpath['path'])):\n plot=ET.SubElement(bandstrct,'plot1d')\n path=ET.SubElement(plot, 'path',steps='100')\n for j in range(len(kpath.kpath['path'][i])):\n symbol=kpath.kpath['path'][i][j]\n coords=kpath.kpath['kpoints'][symbol]\n coord=\"%16.8f %16.8f %16.8f\" % (coords[0],\n coords[1],\n coords[2])\n if symbol=='\\\\Gamma':\n symbol='GAMMA'\n pt=ET.SubElement(path,'point',coord=coord,label=symbol)\n elif bandstr and celltype is not 'primitive':\n raise ValueError(\"Bandstructure is only implemented for the \\\n standard primitive unit cell!\")\n return root\n def write_string(self, celltype, cartesian=False, bandstr=False, symprec=0.4, angle_tolerance=5):\n try:\n root=self.write_etree(celltype, cartesian, bandstr, symprec, angle_tolerance)\n self.indent(root)\n # output should be a string not a bytes object\n string=ET.tostring(root).decode('UTF-8')\n except:\n raise ValueError('Incorrect celltype!')\n return string\n def write_file(self, celltype, filename, cartesian=False, bandstr=False, symprec=0.4, angle_tolerance=5):\n try:\n root=self.write_etree(celltype, cartesian, bandstr, symprec, angle_tolerance)\n self.indent(root)\n tree=ET.ElementTree(root)\n tree.write(filename)\n except:\n raise ValueError('Incorrect celltype!')\n return\n # Missing PrerryPrint option in the current version of xml.etree.cElementTree\n @staticmethod\n def indent(elem,level=0):\n i = \"\\n\" + level*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n ExcitingInput.indent(elem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n","repo_name":"comscope/ComDMFT","sub_path":"ComRISB/pyextern/pymatgen/pymatgen/io/exciting/inputs.py","file_name":"inputs.py","file_ext":"py","file_size_in_byte":11736,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"86"} +{"seq_id":"74987471325","text":"from colorama import Fore, Back, Style\nfrom global_var import *\nimport time\n\n\nclass static_bg:\n def __init__(self):\n self._lives = 3\n self._score = 0\n self._start = int(time.time())\n self._cn = 0\n self._grid = ([[Back.BLACK + Fore.BLACK + ' ' for col in range(cols)] for row in range(rows)])\n\n \n for val in range(cols):\n self._grid[0][val] = Fore.GREEN + '_'\n self._grid[2][val] = Fore.GREEN + '_'\n self._grid[rows - 1][val] = Fore.GREEN + '*'\n\n for val in range(1,rows-1):\n self._grid[val][0] = Fore.GREEN + '|'\n self._grid[val][cols-1] = Fore.GREEN + '|'\n \n def print_grid(x):\n info = \"SCORE: \" + str(x._score) + \"| Lives: \" + str(x._lives) + \"| Time :\" + str(int(time.time()) - x._start)\n for val in range(len(info)):\n x._grid[1][val+1] = Fore.GREEN + info[val]\n output_str = \"\"\n for row in range(rows):\n for col in range(cols):\n output_str += x._grid[row][col]\n \n output_str += \"\\n\"\n print('\\033[H' + output_str)\n print(Style.RESET_ALL)\n","repo_name":"Dineshg49/brick-game","sub_path":"static_bg.py","file_name":"static_bg.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"11068954182","text":"def solve(n, x, y, z):\n # base case\n if(n == 0):\n return 0\n\n if(n < 0):\n return -24370199\n\n ans1 = solve(n-x, x,y,z) + 1\n ans2 = solve(n-y, x,y,z) + 1\n ans3 = solve(n-z, x,y,z) + 1\n\n ans = max(ans1, max(ans2, ans3))\n return ans\n\nn = 7\nx = 5\ny = 2\nz = 2\n\n# solve function -> returns maximum number of segments\nans = solve(n,x,y,z)\n# ans -> valid && invalid\n\nif(ans < 0):\n ans = 0\nprint(\"Answer is: \", ans)\n","repo_name":"anshawasthi01/Supreme-DSA","sub_path":"7. Recursion and Backtracking/5. Week Connect [Recursion - Level 4]/2. Cut into Segments.py","file_name":"2. Cut into Segments.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"73329623324","text":"#Cr\n\nwith open(\"input.txt\", \"r\") as f:\n lines = f.readlines()\n\nTASK = 2\n\ndef stackup():\n sizes.append(stack.pop(-1))\n if stack:\n stack[-1] += sizes[-1]\n\nstack = []\nsizes = []\nfor line in lines:\n line = line.strip()\n if line == \"$ cd ..\":\n stackup()\n elif line.startswith(\"$ cd \"):\n stack.append(0)\n else:\n size = line.split()[0]\n if size.isdigit():\n stack[-1] += int(size)\n\nwhile stack:\n stackup()\n\nif TASK == 1:\n print(sum(i for i in sizes if i <= 100000))\nelse:\n print(min(i for i in sizes if (70000000 - max(sizes) + i) > 30000000))\n","repo_name":"koksiangng/programming-challenges","sub_path":"Advent-of-Code/2022/Day7/Day7.py","file_name":"Day7.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"22369111619","text":"#!/usr/bin/env python3.8\n#\n# WEBenum\n#\n# @desc a tool to enumerator web directories using both recursive crawling and directory guessing.\n#\n# @author Evan Palmiotti\n# @required argparse, bs4, colorama, os, re, requests, signal, sys, threading, time, warnings\n########################################################################################################\nimport os\nimport requests\nimport argparse\nfrom argparse import RawTextHelpFormatter\nfrom bs4 import BeautifulSoup\nimport re\nfrom sys import stdout\nfrom time import time\nimport colorama\nimport threading\nimport warnings\nimport signal\n\nBANNER = \"\\n\" \\\n \"\\033[34m|‾|\" + \" \" * 5 + \"/‾/ \\033[31m\" + \"_\" * 4 + \"/\\033[33m __ )\\033[34m___ \\033[32m____ \\033[34m__ \" \\\n \"__\\033[31m____ ___\\n\" \\\n \"\\033[34m| | /| / /\\033[31m __/\\033[33m / __ /\\033[34m _ \\\\\\033[32m/ __ \\\\\\033[34m/ / / / \\033[31m__ `__ \\\\\\n\"\\\n \"\\033[34m| |/ |/ / \\033[31m/___/\\033[33m /_/ /\\033[34m __/\\033[32m / / /\\033[34m /_/ / \\033[31m/\"+\" /\" * 4 + \\\n \"\\n\" \\\n \"\\033[34m|__/|__/\\033[31m\"+\"_\" * 5 + \"/\\033[33m\"+\"_\" * 5 + \"\" \\\n \"/\\033[34m\\\\___/\\033[32m_/ /_/\\033[34m\\\\__,_/\\033[31m_/ /_/ /_/\\n\" \\\n \"\" + \"=\" * 50 + \"\\n\" + \" \" * 25 + \"\\033[33m=\" * 25 + \"\\n\" + \" \" * 38 + \"\\033[32m=\" * 12 # Ascii art banner\n\nURLS = [] # found urls (updated by script)\nDOMAINS = [] # found domains (updated by script)\nWORDLIST = [] # word list read from file\nARGS: any # stored arguments (updated by script)\nORIGINAL_DOMAIN = '' # Original Domain (updated by script)\nORIGINAL_PORT = ''\nTHREAD_LOCK = threading.Lock()\nSTART = time()\n\n#\n# @desc easy access storage for urls\n# @var service - service string (http or https)\n# @var domain - domain and subdomain string\n# @var path - url path\n# @var port - port sting (ex. :443)\n# @var params - anything after '?' in the url\n#\nclass Url:\n def __init__(self, path):\n reg_match = parse_url(path)\n self.service = reg_match.group('service') or ''\n self.domain = reg_match.group('domain') or ''\n self.path = reg_match.group('path') or ''\n self.port = reg_match.group('port') or ''\n self.params = reg_match.group('params') or ''\n self.status = None\n self.size = None\n\n def set_status(self, status):\n self.status = status\n\n def __str__(self): return self.service+self.domain+self.port+self.path+self.params\n\n def __repr__(self): return self.service+self.domain+self.port+self.path\n\n def __eq__(self, other): return repr(self) == repr(other)\n\n def __lt__(self, other): return repr(other) < repr(self)\n\n def __le__(self, other): return repr(other) <= repr(self)\n\n def __gt__(self, other): return repr(other) > repr(self)\n\n def __ge__(self, other): return repr(other) >= repr(self)\n\n#\n# @desc Thread used for brute forcing directories\n# @var url - url being brute forced\n# @var word - current directory guess\n# @var found_urls - list of found urls\n# @var depth - current depth (for progress updates)\n# @var index - current index of word in wordlist (for progress reporting)\n# @var end - length of wordlist (for progress reporting)\n# @var exc - exception raised by thread (to be caught when threads are joined)\n#\nclass BruteForceThread(threading.Thread):\n def __init__(self, url, word, found_urls, depth, index, end):\n threading.Thread.__init__(self)\n self.url = url\n self.word = word\n self.found_urls = found_urls\n self.depth = depth\n self.index = index\n self.end = end\n self.exc = None\n\n def run(self):\n try:\n with warnings.catch_warnings() as warn:\n warnings.simplefilter('ignore')\n bf_info = \"%i/%i\" % (self.index, self.end)\n brute_force_thread(self.depth, self.url, self.word, self.found_urls, bf_info)\n except Exception as e:\n self.exc = e\n\n def join(self):\n threading.Thread.join(self)\n if self.exc:\n raise self.exc\n\n#\n# @desc function used by brute force threads\n# @param depth - current depth\n# @param url - current url to be guessed\n# @param word - current guess\n# @param found_urls list of found urls (shared by threads)\n# @param bf_info - string describing brute force progress\n#\ndef brute_force_thread(depth, url, word, found_urls, bf_info):\n # figure out if a / needs to be added or taken away\n if len(url.path) != 0 and len(word) != 0:\n if url.path[-1] == '/' and word[0] == '/':\n word = word[1::]\n elif url.path[-1] != '/' and word[0] != '/':\n word = '/' + word\n elif len(url.path) == 0:\n word = '/' + word\n\n print_update(depth, url, None, bf_info)\n test_url = url.service + url.domain + url.port + url.path + word\n if test_url not in URLS:\n new_url = Url(test_url)\n result = request(new_url)\n if result is None:\n raise ConnectionError(\"Lost connection to server\")\n status = result.status_code\n size = len(result.text)\n if checkResponse(result):\n new_url.status = status\n new_url.size = size\n # synchronize threads for writing operations\n THREAD_LOCK.acquire()\n if new_url not in URLS:\n URLS.append(new_url)\n print_update(depth, url, new_url, bf_info)\n if status not in [401, 400, 403]:\n found_urls.append(new_url)\n THREAD_LOCK.release()\n\n#\n# @desc sets command line arguments and help strings.\n# parses command line arguments using argparse and returns an object of settings.\n#\n# @return parsed_args - Namespace with arguments\n#\ndef parseargs():\n\n examples = ['webenum.py -h http://test.com -d 4',\n 'webenum.py -h http://test.com -w wordlist.txt',\n 'webenum.py -h https://test.com -w wordlist -d 5 -b 3',\n 'webenum.py -h https://test.com -s -w wordlist -o urls.txt -Od domains.txt']\n explanations = ['Enumerate using only crawling to depth of 4',\n 'Enumerate using crawling and brute forcing to level 3',\n 'Enumerate using crawling to level 5 and brute forcing to level 3',\n 'Enumerate using both methods including subdomains and saving both '\n 'found urls and found subdomains to files']\n epilog = 'Examples:\\n'\n for i in range(0, 4):\n epilog += '\\t%-75s --%s\\n' % (examples[i], explanations[i])\n parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,\n description='Enumerate web directories using crawling and directory guessing.',\n epilog=epilog)\n parser.add_argument('-url', '-u', help='Url to crawl', required=True)\n parser.add_argument('--quiet', '-q', help=\"Only print found urls\", action='store_true')\n parser.add_argument('--allow-subdomains', '-s', help=\"Allow scanner to request subdomains\", action='store_true',\n default=False)\n parser.add_argument('--allow-other-ports', '-p', help=\"Allow scanner to request other ports\", action='store_true',\n default=False)\n parser.add_argument('--depth', '-d', help=\"Depth of directory crawling (default=3) (0=unlimited)\", type=int,\n default=3)\n parser.add_argument('--brute-force-depth', '-b', help=\"Maximum crawling depth to do brute force directory guessing\"\n \"(default=0) (0=same as crawl depth)\", type=int, default=0)\n parser.add_argument('--wordlist', '-w', help=\"Wordlist to use for directory guessing\")\n parser.add_argument('--check-all-urls', '-z', help=\"Check URLs found in HTML pages for status codes\",\n action='store_true')\n parser.add_argument('--timeout', help='Timeout time for requests.', default=10)\n parser.add_argument('--out-file', '-o', help='Write results to specified file', default=None)\n parser.add_argument('--threads', '-t', help='Number of threads to run', default=10)\n parser.add_argument('--no-verify-ssl', '-v', help=\"Don't verify SSL\", action='store_true')\n parser.add_argument('--out-file-domains', '-Od', help='Write domains to a file', default=None)\n parser.add_argument('--follow-redirects', '-r', help='Follow HTTP redirects', action='store_true')\n parser.add_argument('--basic-auth', '-a', help='Set basic authentication creds in the form user:pass', default=None)\n parser.add_argument('--cookies', '-c', help='Set cookie on requests in the form name:value,name1:value1',\n default=None)\n parser.add_argument('--fail-cond', '-f',\n help='Set a string within the response for failure or \"not found\" condition',\n type=str)\n return parser.parse_args()\n\n#\n# @desc Prints pretty ascii art banner\n#\ndef print_banner():\n print(BANNER)\n print('\\n'+'='*75)\n for arg_name, arg_value in vars(ARGS).items():\n if arg_name != \"quiet\" and arg_value is not None:\n print('%-20s %s' % (arg_name+':', arg_value))\n print('='*75+'\\n\\n')\n\n#\n# @desc pad a string to the size of the terminal\n# @param string - string to be padded\n# @return padded string\n#\ndef pad(string):\n max_width = os.get_terminal_size().columns\n length = len(string)\n if length < max_width:\n padding = max_width - length\n string = string + ' '*padding\n return string\n\n#\n# @desc trim a string if it is longer than the terminal size\n# @param string - string to be trimmed\n# @return trimmed string\n#\ndef trim(string):\n max_width = os.get_terminal_size().columns\n length = len(string)\n if length > max_width:\n string = string[0:max_width-3:]+'...'\n return string\n\n#\n# @desc Prints all updates to screen while running\n# @param update - string to print\n#\ndef print_update(depth, url, new_url, bf_info):\n\n if bf_info:\n update = trim('\\r%.2f | Depth: %2i | Brute Forcing: %s | URL: %-15s' % (time() - START, depth, bf_info, url))\n else:\n update = trim('\\r%.2f | Depth: %2i | crawling: %-15s' % (time() - START, depth, url))\n\n if new_url is not None:\n if new_url.status is not None and not ARGS.quiet:\n url_string = \"\\r%-75s (status:%s) [size:%s]\" % (str(new_url), new_url.status, new_url.size)\n stdout.write(pad(url_string)+'\\n')\n stdout.flush()\n else:\n url_string = \"\\r%-75s\" % (str(new_url))\n stdout.write(pad(url_string)+'\\n')\n stdout.flush()\n if not ARGS.quiet:\n stdout.write(pad(update))\n stdout.flush()\n else:\n if not ARGS.quiet:\n stdout.write(pad(update))\n stdout.flush()\n\n#\n# @desc Overwrites final update and print various statistics\n#\ndef print_final_stats():\n print('\\r'+' '*100)\n print('='*75+'\\n')\n print('STATISTICS:')\n print('\\tURLS:', len(URLS))\n print('\\tDOMAINS:', len(DOMAINS))\n\n#\n# @desc Parses url in string form and return a regex object with named groups\n# @param str_url - url to print\n#\ndef parse_url(str_url):\n reg_url = re.compile(\"^(?Phttps?://)?(?P[\\-a-zA-Z0-9.]*)?(?P:\\d{1,4})?\"\n \"(?P[/.%_\\-a-zA-Z0-9]*)?(?P[?,#].*)?\")\n match = reg_url.match(str_url)\n return match\n\n#\n# @desc helper function for build_url_string. takes and path as a string and returns the parent path.\n# @param path - path as a string (ex. /test/testy)\n# @return new_path - parent path as a string (ex. /test)\n#\ndef move_up_path(path):\n path_array = re.split(r\"(/[^/]*)\", path)\n new_path = ''\n for directory in path_array[:-2:]:\n new_path += directory\n return new_path\n\n#\n# @desc builds url string for cases where a partial url is given and returns it.\n# @param str_url - url string a html page (ex. ../test/testy)\n# @param original_url - object of url requested to get the html page (ex. https://test.com/about)\n# @return new_url - the url object built by combining str_url and original_url (ex. https:/test.com/test/testy\n#\ndef build_url_string(str_url, original_url):\n new_url = original_url.service + original_url.domain + original_url.port\n original_path = original_url.path\n\n # if the original url is a file use the parent path (ex. /test/test.html -> /test/)\n if '.' in original_path:\n original_path = move_up_path(original_url.path)\n\n if str_url and len(str_url) > 1:\n if str_url[0] == '.':\n # handle urls like ./\n if str_url[1] == '/':\n if original_path[-1] == '/':\n new_url = new_url + original_path + str_url[2::]\n else:\n new_url = new_url + original_path + str_url[1::]\n return new_url\n # handle urls like ../\n elif str_url[1] == '.':\n new_url = new_url + move_up_path(original_url.path) + str_url[2::]\n\n # handle urls like /test\n elif str_url[0] == '/':\n new_url = new_url + str_url\n return new_url\n\n # handle urls like #test\n elif str_url[0] == '#':\n new_url = new_url + original_path + original_url.params + str_url\n else:\n if ':' not in str_url:\n if len(original_path) > 0:\n if original_path[-1] == '/':\n new_url = new_url + original_path + str_url\n else:\n new_url = new_url + original_path + '/' + str_url\n return new_url\n\n#\n# @desc finds all links on a given html page and returns a list of Url objects to visit next\n# @param page - html string\n# @param path - the request url that produced the html page\n# @return paths - list of Url objects to visit next\n#\ndef find_links(result, depth):\n soup = BeautifulSoup(result.text, 'html.parser')\n links = soup.findAll('a')\n links += soup.findAll('link')\n links += soup.findAll('base')\n paths = []\n for link in links:\n # ignore flags with no 'href' attribute\n try:\n url_str = link['href']\n except KeyError:\n continue\n # if the href is a shortened url, build a full url\n if '://' not in url_str:\n new_url = Url(build_url_string(url_str, Url(result.url)))\n else:\n new_url = Url(url_str)\n # Need to handle empty hrefs\n if new_url is None:\n continue\n\n # check if the new url is on an acceptable domain and add it to the necessary lists\n if ARGS.check_all_urls:\n result = request(new_url)\n if result is not None:\n new_url.status = result.status_code\n new_url.size = len(result.text)\n else:\n new_url.status = 'Timeout'\n\n if ORIGINAL_PORT == new_url.port or ARGS.allow_other_ports:\n if ORIGINAL_DOMAIN in new_url.domain and ARGS.allow_subdomains:\n if new_url.domain not in DOMAINS:\n DOMAINS.append(new_url.domain)\n if new_url not in URLS:\n paths.append(new_url)\n URLS.append(new_url)\n print_update(depth, Url(result.url), new_url, None)\n elif ORIGINAL_DOMAIN == new_url.domain:\n if new_url not in URLS:\n paths.append(new_url)\n URLS.append(new_url)\n print_update(depth, Url(result.url), new_url, None)\n return paths\n\n#\n# @desc requests a given url object and returns response object.\n# @param url - url object to request\n# @return response object\n#\ndef request(url):\n r = None\n try:\n with warnings.catch_warnings() as warn:\n warnings.simplefilter('ignore')\n if ARGS.cookies:\n cs = ARGS.cookies.split(',')\n cookies = {}\n for c in cs:\n csplit = c.split(':')\n cookies[csplit[0]] = csplit[1]\n\n auth = None\n if ARGS.basic_auth:\n creds = ARGS.basic_auth.split(':')[0]\n auth = (creds[0], creds[1])\n headers = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.67 Safari/537.36\"}\n r = requests.get(str(url).strip('\\n'), timeout=int(ARGS.timeout), verify=(not ARGS.no_verify_ssl),\n allow_redirects=ARGS.follow_redirects, auth=auth, cookies=ARGS.cookies, headers=headers)\n if str(url) == ARGS.url and r.status_code == 404:\n exit_with_error('Error validating request: Received 404')\n except Exception as e:\n if str(url) == ARGS.url:\n exit_with_error('Error sending request: ' + str(e))\n return r\n\n#\n# @desc recursive function to crawl the web address and add found urls to global list.\n# @param url - starting url as Url object\n# @param depth - integer for maximum recursive depth\n#\ndef crawl(url, depth):\n found_urls = []\n print_update(depth, url, None, None)\n r = request(url)\n if ARGS.brute_force_depth == 0 or depth <= ARGS.brute_force_depth:\n found_urls = brute_force(url, depth)\n paths = []\n if r is not None:\n paths = find_links(r, depth)\n paths = paths + found_urls\n # exit conditions for recursion\n if depth >= ARGS.depth > 0 or len(paths) == 0:\n return\n\n for path in paths:\n crawl(path, depth+1)\n\n#\n# @desc exits program with error message\n# @param error - error message\n#\ndef exit_with_error(error):\n print(pad('\\r\\033[31m '+error))\n exit(0)\n\n#\n# @desc loads words from wordlist file\n#\ndef parse_wordlist():\n global WORDLIST, ARGS\n try:\n with open(ARGS.wordlist, 'r') as word_file:\n WORDLIST = word_file.readlines()\n except Exception as e:\n exit_with_error('Error reading wordlist file ' + str(e))\n\n\n#\n# @desc orchestrates brute force threads and returns found urls\n# @param url - url to guess from\n# @param depth - current depth in the crawling process\n# @return found_urls any urls discovered during brute forcing\n#\ndef brute_force(url, depth):\n found_urls = []\n if '.' not in url.path and (url.domain + url.path != ORIGINAL_DOMAIN+'/' or depth == 0):\n index = 0\n while index < len(WORDLIST):\n thread_number = int(ARGS.threads)\n words_left = len(WORDLIST) - index\n if words_left < thread_number:\n thread_number = words_left\n\n thread_list = []\n for i in range(0, thread_number):\n thread_list.append(BruteForceThread(url, WORDLIST[index].strip('\\n'), found_urls, depth,\n index, len(WORDLIST)))\n index += 1\n thread_list[i].start()\n\n for thread in thread_list:\n try:\n thread.join()\n except Exception as e:\n exit_with_error('Error joining threads: '+str(e))\n return found_urls\n\n#\n# @desc called to handle ctrl+c exits. writes to files specified by args and prints final stats\n# @param signum needed for handling signal\n# @param frame needed for handling signal\n#\ndef exit_handler(signum, frame):\n if not ARGS.quiet:\n print('\\nexiting')\n if threading.active_count() > 1:\n if not ARGS.quiet:\n print('Joining threads...')\n for thread in threading.enumerate()[1::]:\n try:\n thread.join()\n except Exception as e:\n exit_with_error('Error joining threads:'+e)\n output_to_file()\n if not ARGS.quiet:\n print_final_stats()\n exit(0)\n\n#\n# @desc handles writing to files based on arguments\n#\ndef output_to_file():\n if ARGS.out_file:\n try:\n with open(ARGS.out_file, 'w') as out_file:\n index = 0\n for url in URLS:\n index += 1\n if index >= len(URLS):\n out_file.write(str(url))\n else:\n out_file.write(str(url)+'\\n')\n except Exception as e:\n exit_with_error('Error writing to file ' + str(e))\n if ARGS.out_file_domains:\n try:\n with open(ARGS.out_file_domains, 'w') as out_file:\n index = 0\n for domain in DOMAINS:\n index += 1\n if index >= len(DOMAINS):\n out_file.write(str(domain))\n else:\n out_file.write(str(domain) + '\\n')\n except Exception as e:\n exit_with_error('Error writing to file' + str(e))\n\n#\n# @checkResponse\n# @param result requests response object\n# @desc check if a response indicates not found\n# @return true if the response is a found response\n#\ndef checkResponse(result):\n if result.status_code == 404 or (ARGS.fail_cond and ARGS.fail_cond in result.text):\n return False\n return True\n\n#\n# @main\n# @desc processes arguments and kicks off crawling. Once done, prints final statistics.\n#\ndef main():\n global ARGS, ORIGINAL_DOMAIN, ORIGINAL_PORT\n colorama.init(autoreset=True)\n requests.urllib3.disable_warnings()\n ARGS = parseargs()\n signal.signal(signal.SIGINT, exit_handler)\n if not ARGS.quiet:\n print_banner()\n original_url = Url(ARGS.url)\n\n result = request(original_url)\n stat = result.status_code\n if not checkResponse(result):\n #if stat == 404:\n exit_with_error('URL provided results in 404')\n else:\n URLS.append(original_url)\n print_update(0, original_url, original_url, None)\n test_url = build_url_string('/cfe15ae6b841b3ac72777ace53f35ab4888', original_url,)\n result = request(test_url)\n stat = result.status_code\n if checkResponse(result):\n exit_with_error('Could not validate 404 on bad url: ' + test_url + ' Status Code: ' + str(result.status_code))\n\n ORIGINAL_DOMAIN = original_url.domain\n ORIGINAL_PORT = original_url.port\n DOMAINS.append(ORIGINAL_DOMAIN)\n\n if ARGS.wordlist:\n parse_wordlist()\n crawl(original_url, 0)\n output_to_file()\n if not ARGS.quiet:\n print_final_stats()\n\n\nmain()\n","repo_name":"ejp3496/webenum","sub_path":"webenum.py","file_name":"webenum.py","file_ext":"py","file_size_in_byte":22394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"23264166075","text":"# Copyright (c) Alibaba, Inc. and its affiliates.\r\nfrom typing import Dict\r\n\r\nfrom modelscope.metainfo import Models\r\nfrom modelscope.models.base import Tensor, TorchModel\r\nfrom modelscope.models.builder import MODELS\r\nfrom modelscope.outputs import OutputKeys\r\nfrom modelscope.utils.constant import Tasks\r\n\r\n__all__ = ['GPT3ForTextGeneration']\r\n\r\n\r\n@MODELS.register_module(Tasks.text_generation, module_name=Models.gpt3)\r\nclass GPT3ForTextGeneration(TorchModel):\r\n\r\n def __init__(self, model_dir: str, *args, **kwargs):\r\n \"\"\"initialize the text generation model from the `model_dir` path.\r\n\r\n Args:\r\n model_dir (str): the model path.\r\n \"\"\"\r\n super().__init__(model_dir, *args, **kwargs)\r\n\r\n from modelscope.models.nlp.gpt3 import GPT3Model\r\n from transformers import BertTokenizer\r\n\r\n self.model = GPT3Model.from_pretrained(model_dir)\r\n self.tokenizer = BertTokenizer.from_pretrained(model_dir)\r\n\r\n def forward(self, input: Dict[str, Tensor]) -> Dict[str, Tensor]:\r\n \"\"\"return the result by the model\r\n\r\n Args:\r\n input (Dict[str, Tensor]): the preprocessed data\r\n\r\n Returns:\r\n Dict[str, Tensor]: results\r\n Example:\r\n {\r\n 'logits': Tensor([[0.54, 0.32...])]), # logits\r\n }\r\n \"\"\"\r\n return self.model(**input)\r\n\r\n def generate(self, input: Dict[str, Tensor]) -> Dict[str, Tensor]:\r\n assert 'input_ids' in input, \"generate function must accept 'input_ids' key\"\r\n input_ids = input['input_ids']\r\n if 'attention_mask' in input:\r\n attention_mask = input['attention_mask']\r\n input_ids = input_ids[0][attention_mask[0].nonzero()] \\\r\n .squeeze().unsqueeze(0)\r\n # remove sep token at the end of tokenizer output\r\n input_ids = input_ids[:, :-1]\r\n\r\n gen_params = dict()\r\n gen_params['inputs'] = input_ids\r\n gen_params['do_sample'] = input.pop('do_sample', True)\r\n gen_params['max_length'] = input.pop('max_length', 128)\r\n gen_params['top_k'] = input.pop('top_k', 10)\r\n gen_params['top_p'] = input.pop('top_p', None)\r\n sample_output = self.model.generate(**gen_params)\r\n return {'sequences': sample_output[0]}\r\n","repo_name":"sdjamesliu/alldata","sub_path":"ai/modelscope-versions/modelscope-master/modelscope/models/nlp/gpt3/text_generation.py","file_name":"text_generation.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"45801914074","text":"from app.models import User, Trainer, Subscription, Course\nfrom app.forms import SearchForm\nfrom flask import request, redirect, url_for\nimport math\n\nnavs = [(\"Uczestnicy\", \"/users/0\"), (\"Trenerzy\", \"/trainers/0\"), (\"Zajęcia\", \"/courses/0\"), (\"Abonamenty\", \"/subscriptions/0\")]\n\n\ndef search():\n search_form = SearchForm()\n if request.method == \"POST\":\n name = (search_form.data[\"name\"]).lower()\n objects = [User, Trainer, Subscription, Course]\n names = []\n for object in objects:\n for item in object.query.all():\n names.append((item.name, item.id, str(item)))\n result = []\n for nam in names:\n if nam[0].lower() == name:\n result.append(nam)\n if result:\n if result[0][2] == 'user':\n return redirect(url_for(\"user_details\", user_id=result[0][1]))\n elif result[0][2] == \"trainer\":\n return redirect(url_for(\"trainer_details\", trainer_id=result[0][1]))\n elif result[0][2] == \"subscription\":\n subs = Subscription.query.all()\n subss = [(sub.id, sub.name, subs.index(sub)) for sub in subs]\n num = 0\n for s in subss:\n if s[1] == result[0][0]:\n num = math.floor(s[2] / 7)\n return redirect(url_for(\"subscriptions_all\", num=num))\n elif result[0][2] == \"course\":\n courses = Course.query.all()\n coursess = [(course.id, course.name, courses.index(course)) for course in courses]\n num = 0\n for c in coursess:\n if c[1] == result[0][0]:\n num = math.floor(c[2] / 7)\n return redirect(url_for(\"courses_all\", num=num))\n\n","repo_name":"agatamartyna/fitness_club","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"41407247623","text":"from faker.providers import BaseProvider\nfrom faker import Factory\nimport string\nimport random\nimport re\nimport datetime\n\n\nclass Invoice(BaseProvider):\n \"\"\"\n Provider for Eletronic Invoicing Number for Consumer (NFC-e)\n https://www.edicomgroup.com/en_US/news/5891-electronic-invoicing-in-brazil-nf-e-nfs-e-and-ct-e.html\n https://www.ibm.com/support/knowledgecenter/en/SSRJDU/einvoicing/SCN_eINV_Brazil_Definitions.htm\n\n A example of NFC-e:\n unmasked: 51080701212344000127550010000000981364112281\n masked: 51-0807-01.212.344/0001-27-55-001-000.000.098-136.411.228-1\n\n A NFC-e has an identifier with 44 digits which is compound by:\n 02 digit code - federation unit code of the emitter of NFC-e\n 04 digit code - Month and Year of emission of the invoice (YYMM format)\n 14 digit code - CNPJ of the company who emited the NFC-e\n 02 digit code - model of the NFC-e\n 03 digit code - series of the NFC-e\n 09 digit code - number of the NFC-e\n 09 digit code - code of the NFC-e generated by the government system\n 01 digit code - verification code\n\n This provider has two mainly methods:\n nfce which return a invoice identifier with mask\n nfce_code which return a invoice identifier without mask\n \"\"\"\n __provider__ = 'invoice'\n __lang__ = \"pt_BR\"\n\n fake = Factory.create(\"pt_BR\")\n # federation unit codes\n uf_cod_map = {\n 'RO': 11,\n 'AC': 12,\n 'AM': 13,\n 'RR': 14,\n 'PA': 15,\n 'AP': 16,\n 'TO': 17,\n 'MA': 21,\n 'PI': 22,\n 'CE': 23,\n 'RN': 24,\n 'PB': 25,\n 'PE': 26,\n 'AL': 27,\n 'SE': 28,\n 'BA': 29,\n 'MG': 31,\n 'ES': 32,\n 'RJ': 33,\n 'SP': 35,\n 'PR': 41,\n 'SC': 42,\n 'RS': 43,\n 'MS': 50,\n 'MT': 51,\n 'GO': 52,\n 'DF': 53\n }\n\n def parse_date(self, date: datetime.datetime):\n \"\"\"\n Parser to return a 04 digits code to compound an NFC-e code.\n \"\"\"\n year_date = str(date.year)[-2:]\n month_date = str(date.month)\n if len(month_date) == 1:\n month_date = \"0\" + month_date\n code_date = f\"{year_date}{month_date}\"\n return code_date\n\n def validate_dtype(self, variable, type, error_msg):\n \"\"\"\n Validation for data type\n \"\"\"\n if not isinstance(variable, type):\n raise TypeError(f\"{error_msg}\")\n\n def nfce(self, **kwargs) -> str:\n \"\"\"\n Return a 59 character identifier for NFC-e with mask\n\n Keyword Args:\n start_dt (datetime.datetime): if no invoice_no has been entered, a start datetime is needed to\n compute a random 04 digits code which has format YYMM. In case of missing value, an random date of the current year will be used.\n end_dt (datetime.datetime): if no invoice_no has been entered, an end datetime is needed to\n compute a random 04 digits code which has format YYMM. In case of missing value, an random datevof the current year will be used.\n uf_code(str): a common two letter abbreviation for brazilian states (http://www.brazil-help.com/brazilian_states.htm)\n \"\"\"\n start_dt = kwargs.get('start_dt')\n end_dt = kwargs.get('end_dt')\n if start_dt and end_dt:\n [self.validate_dtype(variable, datetime.datetime, \"The date must be a datetime.datetime object.\") for variable in [start_dt, end_dt]]\n picked_date = self.fake.date_between_dates(date_start=start_dt, date_end=end_dt)\n dt_code = self.parse_date(picked_date) # 2 digits code (YYMM)\n else:\n picked_date = self.fake.date_time_this_year()\n dt_code = self.parse_date(picked_date) # 2 digits code (YYMM)\n # federation unit code\n uf_choice = kwargs.get('uf_code')\n if uf_choice:\n try:\n get_uf_cod = self.uf_cod_map[uf_choice]\n except KeyError:\n raise KeyError(\"Invalid abbreviation for Brazilian State.\")\n else:\n get_uf_cod = random.choice([cod for cod in self.uf_cod_map.values()])\n # CNPJ\n cnpj = str(self.fake.cnpj()).replace(\".\", \"\").replace(\"/\", \"\").replace(\"-\", \"\")\n nfce_cod = f\"{get_uf_cod}{dt_code}{cnpj}\"\n last_digits = \"\".join([random.choice(string.digits) for n in range(24)])\n cod_digits = nfce_cod + last_digits\n nfce_cod = f\"{cod_digits[:2]}-{cod_digits[2:6]}-{cod_digits[6:8]}.{cod_digits[8:11]}.{cod_digits[11:14]}/{cod_digits[14:18]}-{cod_digits[18:20]}\"\\\n f\"-{cod_digits[20:22]}-{cod_digits[22:25]}-{cod_digits[25:28]}.{cod_digits[28:31]}.{cod_digits[31:34]}-{cod_digits[34:37]}.{cod_digits[37:40]}.\"\\\n f\"{cod_digits[40:43]}-{cod_digits[43]}\"\n return nfce_cod\n\n def nfce_code(self, **kwargs) -> str:\n \"\"\"\n Return a 44 character code for NFC-e without mask\n Keyword Args:\n invoice_no (str): invoice number with 44 digits or its masked version with 59 character length\n start_dt (datetime.datetime): if no invoice_no has been entered, a start datetime is needed to\n compute a random 04 digits code which has format YYMM. In case of missing value, an random date of the current year will be used.\n end_dt (datetime.datetime): if no invoice_no has been entered, an end datetime is needed to\n compute a random 04 digits code which has format YYMM. In case of missing value, an random datevof the current year will be used.\n uf_code(str): a common two letter abbreviation for brazilian states (http://www.brazil-help.com/brazilian_states.htm)\n \"\"\"\n code = self.nfce(**kwargs)\n code = code.replace(\"-\", \"\").replace(\".\", \"\").replace(\"/\", \"\")\n return code\n","repo_name":"netoferraz/oeuanalitico-posts","sub_path":"oeuanalitico-posts/nfe/preprocessing/nfeProvider.py","file_name":"nfeProvider.py","file_ext":"py","file_size_in_byte":5873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"23566465474","text":"import ray\n\n\ndef main():\n \"\"\"Confirms placement of a GPU actor.\"\"\"\n gpu_actor = ray.get_actor(\"gpu_actor\")\n actor_response = ray.get(gpu_actor.where_am_i.remote())\n return actor_response\n\n\nif __name__ == \"__main__\":\n ray.init(\"auto\", namespace=\"gpu-test\")\n out = main()\n print(out)\n","repo_name":"ray-project/ray","sub_path":"python/ray/tests/kuberay/scripts/gpu_actor_validation.py","file_name":"gpu_actor_validation.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":28715,"dataset":"github-code","pt":"86"} +{"seq_id":"70695683164","text":"def multiplicaV():\n total = 1\n print('Digite os numeros que deseja mutiplicar e 1 para sair: ')\n while True:\n num = float(input('* '))\n total = total * num\n if num == 1:\n break\n if total%2 == 0 or total%2 == 1:\n print('Total = {} '.format(int(total)))\n else:\n print('Total = {} '.format(total))\n\n#multiplica()\n\ndef sub():\n fator=[] #vai receber o array de numeros\n while True: # coleta dados para a subtração\n number = input('digite os valores a subtrair: ') #requisitando os termos\n while number != \"=\" and number.isnumeric()!= True: # se não achar sinal de = ou um numero pede de novo\n number = input('você precisa digitar um numero ou sinal = para continuar:') #pedindo de novo \n if number.isnumeric(): #se for numerico add ao vetor\n fator.append(float(number)) #add ao vetor\n elif number=='=': # se for igual sai do while e vai para ao passo de subtrair\n if len(fator) >= 2: # se mandar = sem ter numeros sufcientes continua pedindo\n break #saindo da captura de dados\n \n i=1# incremento do prox while -> começa da seguda posição do vetor\n result= fator[0] #porquê a primeira já está sendo passada aqui \n while True:\n if i==(len(fator)): #i não pode ser igual a ultima posição\n break #porquê vai sair do len do vetor\n \n result = result - fator[i] #vetor [1][2][3] result seria = 1 e fator[i] = 2 em seguida result seria\n i=i+1 #o resultadoda primeira operação e fator[i] seria o prox term da prox operação\n \n return(result)\n\nprint(sub())","repo_name":"Diego-rosas/Introducao-a-Python","sub_path":"Lista 03/02/testecid.py","file_name":"testecid.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"20487615323","text":"import math\nimport torch\nimport numpy as np\nq = torch.randn((1,23,512,64))\nk= torch.randn((1,23,256,64))\nv= torch.randn((1,23,256,64))\natt = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))\nprint(att)\ny = att @ v\nprint(y)\nx =torch.randn((10,64))\nseq_length = x.size(1)\nbach_size = x.size(0)\nposition_ids = torch.arange(seq_length, dtype=torch.long, device=x.device)\nposition_ids = position_ids.unsqueeze(0).expand_as(x)\nposition_ids\ntoken_type_ids = torch.ones_like(x)\nx\n\nasssw = torch.arange(64, dtype=torch.long,).unsqueeze(0)\ny_1 = torch.chunk(asssw, 8, dim=1)\nz = torch.cat(y_1, dim=0)\nasssw = z.unsqueeze(0).expand_as(torch.randn((1,512,8,8), device=x.device))\nasssw1 =asssw.view(1,512,64).transpose(1, 2)\nbbb =asssw1.numpy()\nasssw2 =asssw1.transpose(1, 2).view(1,512,8,8)\nbbb1 =asssw2.numpy()\ntkm =torch.zeros((32,32))\n\ntkm[0][0]=1.0\n\ntkm[1][12]=1.0\ntkm[2][14]=1.0\ntkm[3][18]=1.0\n\nx","repo_name":"autumn-2-net/some_inst","sub_path":"testw.py","file_name":"testw.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"8743793504","text":"import ply.lex as lex\nimport ply.yacc as yacc\n\nkeywords = [\n 'INGRESS','GROUP','PRIORITY','TRANSFER','IS',\n 'MASK','SPEC','ETH','VLAN','VXLAN','SCTP','NVGRE',\n 'PFCP','GTPU','PPPOES','IPV4','IPV6','TCP','UDP',\n 'TYPE','SRC','DST','TCI', 'S_FIELD','TNI','VNI','TEID',\n 'GTP_PSC','TOS','TTL','PROTO','ESP','SPI','AH',\n 'SESSION_ID','L2TPV3OIP','QFI','SEID','PPPOE_PROTO_ID',\n 'INT','MACADDR','IPADDR4','IPADDR6','TC',\n 'VF','ID','QUEUE','INDEX','DROP','MARK','ORIGINAL',\n 'ETHDEV_PORT_ID', 'REPRESENTED_PORT'\n ]\n\nclass Parser(object):\n def __init__(self):\n self.tokens = keywords\n self.lexer = lex.lex(object=self,debug=False)\n self.parser = yacc.yacc(module=self,debug=False)\n\n def parse(self,s):\n# print('ready to parse ',s)\n return self.parser.parse(s)\n\n def t_error(self,t):\n raise Exception('patternParser : Illegal character %c %d' % (t.value[0],ord(t.value[0])))\n\n t_ignore = r' '\n t_INGRESS = r'ingress'\n t_GROUP = r'group'\n t_PRIORITY = r'priority'\n t_TRANSFER = r'transfer'\n t_IS = r'is'\n t_MASK = r'mask'\n t_SPEC = r'spec'\n t_ETH = r'eth'\n t_VLAN = r'vlan'\n t_VXLAN = r'vxlan'\n t_SCTP = r'sctp'\n t_NVGRE = 'nvgre'\n\n t_PFCP = r'pfcp'\n t_GTPU = r'gtpu'\n t_PPPOES = r'pppoes'\n t_IPV4 = r'ipv4'\n t_IPV6 = r'ipv6'\n t_TCP = r'tcp'\n t_UDP = r'udp'\n t_TYPE = r'type'\n t_SRC = r'src'\n t_DST = r'dst'\n t_TCI = r'tci'\n t_S_FIELD = r's_field'\n t_TNI = r'tni'\n\n t_VNI = r'vni'\n t_TEID = r'teid'\n t_GTP_PSC = r'gtp_psc'\n t_TOS = r'tos'\n t_TTL = r'ttl'\n t_PROTO = r'proto'\n t_TC = r'tc'\n t_ESP = r'esp'\n t_SPI = r'spi'\n t_AH = r'ah'\n t_SESSION_ID = r'session_id'\n t_L2TPV3OIP = r'l2tpv3oip'\n t_QFI = r'qfi'\n t_SEID = r'seid'\n t_PPPOE_PROTO_ID = r'pppoe_proto_id'\n t_VF = r'vf'\n t_QUEUE = r'queue'\n t_INDEX = r'index'\n t_ID = r'id'\n t_DROP = r'drop'\n t_MARK = r'mark'\n t_ORIGINAL = r'original'\n t_ETHDEV_PORT_ID = r'ethdev_port_id'\n t_REPRESENTED_PORT = r'represented_port'\n\n t_MACADDR = r'([0-9a-fA-F]{2}\\:){5}[0-9a-fA-F]{2}'\n t_IPADDR4 = r'([0-9]{1,3}\\.){3}[0-9]{1,3}'\n t_IPADDR6 = r'([0-9a-fA-F]{4}\\:){7}[0-9a-fA-F]{4}'\n t_INT = r'0x[0-9a-fA-F]+|[0-9]+'\n\n def p_error(self,p):\n print('patternParser Error Token : ',p,p.value)\n\n def p_result(self,p):\n '''result : attrs\n | pattern\n | action\n '''\n p[0] = p[1]\n\n def p_attrs(self,p):\n '''attrs : attr attrs\n |\n '''\n if len(p) == 3:\n p[0] = p[2] + [p[1]]\n else:\n p[0] = []\n\n def p_attr(self,p):\n '''attr : INGRESS\n | TRANSFER\n\t | GROUP INT\n\t | PRIORITY INT\n '''\n if len(p) == 3:\n p[0] = attrAst(p[1],p[2])\n else:\n p[0] = attrAst(p[1])\n\n def p_pattern(self,p):\n '''pattern : layer_name layer_fields\n | PPPOE_PROTO_ID IS INT\n '''\n if len(p) == 4:\n p[0] = patternAst('pppoe_proto_id',[fieldAst(p[1],p[2],p[3])])\n else:\n p[0] = patternAst(p[1],p[2])\n\n def p_layer_fields(self,p):\n '''layer_fields : layer_field layer_fields\n |\n '''\n if len(p) == 3:\n if not p[2]:\n p[0] = [p[1]]\n else:\n p[0] = [p[1]] + p[2]\n else:\n p[0] = []\n\n def p_layer_name(self,p):\n '''layer_name : ETH\n | VLAN\n | VXLAN\n | IPV4\n | IPV6\n | TCP\n | UDP\n | SCTP\n | NVGRE\n | GTPU\n | PFCP\n | GTP_PSC\n | PPPOES\n | AH\n | ESP\n | L2TPV3OIP\n\t '''\n p[0] = p[1]\n\n def p_layer_field(self,p):\n '''layer_field : SRC IS value\n | DST IS value\n | SRC SPEC value\n | SRC MASK value\n | DST SPEC value\n | DST MASK value\n | TYPE IS INT\n | SPI IS INT\n | SESSION_ID IS INT\n | VNI IS INT\n | TCI IS INT\n | SEID IS INT\n | PROTO IS INT\n | TTL IS INT\n | TOS IS INT\n | QFI IS INT\n | S_FIELD IS INT\n | TNI IS INT\n | TEID MASK INT\n | TEID IS INT\n | TC IS INT\n '''\n p[0] = fieldAst(p[1],p[2],p[3])\n\n def p_value(self,p):\n '''value : MACADDR\n | IPADDR4\n | IPADDR6\n | INT\n '''\n p[0] = p[1]\n\n def p_action(self,p):\n '''action : VF ID INT\n\t | VF ORIGINAL INT\n\t | QUEUE INDEX INT\n | MARK\n | DROP\n | MARK ID INT\n | REPRESENTED_PORT ETHDEV_PORT_ID INT\n '''\n if len(p) == 4:\n p[0] = actionAst(p[1],p[2],p[3])\n else:\n p[0] = actionAst(p[1])\n\n# prep -- prepostion\nclass fieldAst(object):\n def __init__(self,field,prep,val):\n assert(isinstance(field,str))\n self.field = field\n self.prep = prep\n self.val = val\n\n def __repr__(self):\n return '%s %s %s' % (self.field,self.prep,self.val)\n\nclass actionAst(object):\n def __init__(self,action,prep=None,val=None):\n self.action = action\n self.prep = prep\n self.val = val\n\n def __repr__(self):\n s = self.action\n if self.prep:\n s += ' ' + self.prep\n if self.val:\n s += ' ' + self.val\n return s\n\nclass patternAst(object):\n def __init__(self,name,fields):\n self.name = name\n self.fields = fields\n\n def __repr__(self):\n s = ''\n if self.name:\n s += self.name\n for f in self.fields:\n s += ' ' + str(f)\n return s\n\n def __iter__(self):\n return iter(self.fields)\n\nclass attrAst(object):\n def __init__(self,key,val=None):\n self.key = key\n self.val = val\n\n def __repr__(self):\n s = self.key\n if self.val:\n s += self.val\n return s\n\n","repo_name":"yiding-zhou/uft_testsuite","sub_path":"core/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":6625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"41421023105","text":"import os\nimport sys\nimport urllib\n\nfrom flask import session, request, redirect, render_template\n\nfrom portfolio_common import render_template_with_username, path_from_sessionuser_root\nfrom portfolio_common import ALLOWED_EXTENSIONS\n\ndef unquote(s):\n if isinstance(s, unicode):\n s = s.encode('utf-8')\n return urllib.unquote(s).decode('utf-8')\n\ndef quote(s):\n if isinstance(s, unicode):\n s = s.encode('utf-8')\n return urllib.quote(s)\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\ndef list_files_and_dirs(dirpath):\n dirs_and_files = os.listdir(dirpath)\n dirlist = []\n filelist = []\n for name in dirs_and_files:\n (dirlist if os.path.isdir(os.path.join(dirpath, name)) else filelist) \\\n .append(name)\n return filelist, dirlist\n\ndef check_filename(filename):\n unpermitted_chars = '&:;\"' + \"'\"\n if any((c in filename) for c in unpermitted_chars):\n return False\n if any((ord(c) < 0x20) for c in filename): # including control chars?\n return False\n return True\n\ndef add_artifact_functions(app):\n @app.route('/artifact_upload_error')\n def artifact_upload_error():\n return render_template(\"upload_error.html\")\n \n @app.route('/artifact/', methods=['GET'])\n def artifact_dir(dirpath):\n username = session['username']\n filelist, dirlist = list_files_and_dirs(path_from_sessionuser_root(dirpath))\n return render_template_with_username(\"artifact.html\", \n ls=[(n, quote(n)) for n in filelist],\n dir=[(n, quote(n)) for n in dirlist],\n dirpath=quote(dirpath) + \"/\")\n\n @app.route('/artifact/', methods=['POST'])\n def artifact_dir_post(dirpath):\n makedir = unquote(request.form['directoryname'])\n file = request.files['file']\n if file:\n if allowed_file(file.filename) and check_filename(file.filename):\n file.save(path_from_sessionuser_root(dirpath, file.filename))\n else:\n sys.stderr.write(\"log> upload failed (unallowed name): %s\\n\" % repr(file.filename))\n elif makedir:\n os.mkdir(path_from_sessionuser_root(dirpath, makedir))\n\n filelist, dirlist = list_files_and_dirs(path_from_sessionuser_root(dirpath))\n return render_template_with_username(\"artifact.html\", \n ls=[(n, quote(n)) for n in filelist],\n dir=[(n, quote(n)) for n in dirlist],\n dirpath=quote(dirpath) + \"/\")\n\n @app.route('/artifact', methods=['GET'])\n def artifact_get():\n filelist, dirlist = list_files_and_dirs(path_from_sessionuser_root())\n return render_template_with_username(\"artifact.html\",\n ls=[(n, quote(n)) for n in filelist],\n dir=[(n, quote(n)) for n in dirlist],\n dirpath=\"\")\n\n @app.route('/artifact', methods=['POST'])\n def artifact_post():\n makedir = unquote(request.form['directoryname'])\n file = request.files['file']\n if file:\n if allowed_file(file.filename) and check_filename(file.filename):\n file.save(path_from_sessionuser_root(file.filename))\n else:\n return redirect(\"/artifact_upload_error\")\n sys.stderr.write(\"log> upload failed (unallowed name): %s\\n\" % repr(file.filename))\n elif makedir:\n os.mkdir(path_from_sessionuser_root(makedir))\n\n filelist, dirlist = list_files_and_dirs(path_from_sessionuser_root())\n return render_template_with_username(\"artifact.html\",\n ls=[(n, quote(n)) for n in filelist],\n dir=[(n, quote(n)) for n in dirlist],\n dirpath=\"\")\n\n\n","repo_name":"ICTKyouikukei2013/portbacker","sub_path":"portfolio_artifact.py","file_name":"portfolio_artifact.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"35165511497","text":"#!/usr/bin/env python3\n\nimport rclpy\nfrom rclpy.action import ActionServer, CancelResponse, GoalResponse\nfrom rclpy.action.server import ServerGoalHandle\nfrom rclpy.callback_groups import ReentrantCallbackGroup\nfrom rclpy.executors import MultiThreadedExecutor\nfrom rclpy.node import Node\nfrom rclpy.qos import qos_profile_sensor_data, qos_profile_system_default\n\nfrom riptide_msgs2.msg import PwmStamped\nfrom riptide_msgs2.action import ThrusterTest\nfrom std_msgs.msg import Float32MultiArray\n\nimport yaml\n\nimport time\n\n# The force value to publish to thruster_forces\n# This is added primarily for visualization, so the thruster can be viewed in rviz during testing\n# hopefully allowing diagnosing of any thruster positioning/directionality issues\nTHRUSTER_TEST_FORCE = 1.3333\nTHRUSTER_NEUTRAL_FORCE = 0.0\n\nNEUTRAL_PWM = 1500\nPOSITIVE_PWM = 1550\nNEGATIVE_PWM = 1450\n\nclass ThrusterTestActionServer(Node):\n THRUSTER_PERCENT = 0.05\n\n def __init__(self):\n super().__init__('thruster_test')\n self.declare_parameter(\"vehicle_config\", rclpy.Parameter.Type.STRING)\n\n self.pwm_pub = self.create_publisher(PwmStamped ,\"command/pwm\", qos_profile_sensor_data)\n self.thruster_forces_pub = self.create_publisher(Float32MultiArray ,\"thruster_forces\", qos_profile_system_default)\n\n # Get the mass and COM\n with open(self.get_parameter('vehicle_config').value, 'r') as stream:\n self.vehicle_file = yaml.safe_load(stream)\n self.num_thrusters = len(self.vehicle_file[\"thrusters\"])\n\n self.running = False\n\n self._action_server = ActionServer(\n self,\n ThrusterTest,\n 'thruster_test',\n self.execute_cb,\n goal_callback=self.goal_callback,\n cancel_callback=self.cancel_callback,\n callback_group=ReentrantCallbackGroup())\n\n ##############################\n # Protections \n \n def destroy(self):\n self.destroy_node()\n self._action_server.destroy()\n\n def goal_callback(self, goal_request):\n \"\"\"Accept or reject a client request to begin an action.\"\"\"\n if self.running:\n return GoalResponse.REJECT\n else:\n self.running = True\n return GoalResponse.ACCEPT\n\n def cancel_callback(self, goal):\n return CancelResponse.ACCEPT\n\n\n def publish_pwm(self, pwm):\n msg = PwmStamped()\n msg.header.stamp = self.get_clock().now().to_msg()\n msg.pwm = pwm\n self.pwm_pub.publish(msg)\n\n def publish_forces(self, forces):\n msg = Float32MultiArray()\n msg.data = forces\n self.thruster_forces_pub.publish(msg)\n\n def execute_cb(self, goal_handle: ServerGoalHandle):\n self.get_logger().info(\"Starting ThrusterTest Action\")\n pwm = [NEUTRAL_PWM] * self.num_thrusters\n thruster_forces = [THRUSTER_NEUTRAL_FORCE] * self.num_thrusters\n\n while True:\n for i in range(self.num_thrusters):\n if goal_handle.is_cancel_requested:\n self.get_logger().info('Preempted ThrusterTest Action')\n self.publish_pwm([NEUTRAL_PWM] * self.num_thrusters)\n self.publish_forces([THRUSTER_NEUTRAL_FORCE] * self.num_thrusters)\n\n self.running = False\n goal_handle.canceled()\n return ThrusterTest.Result()\n\n thruster_type = self.vehicle_file[\"thrusters\"][i][\"type\"]\n thruster_name = self.vehicle_file[\"thrusters\"][i][\"name\"]\n\n self.get_logger().info(f'Testing {thruster_name} Thruster ({i+1})')\n\n thruster_forces[i] = THRUSTER_TEST_FORCE\n if thruster_type == 0:\n pwm[i] = POSITIVE_PWM\n else:\n pwm[i] = NEGATIVE_PWM\n\n for _ in range(300):\n self.publish_pwm(pwm)\n self.publish_forces(thruster_forces)\n time.sleep(0.01)\n \n pwm[i] = NEUTRAL_PWM\n thruster_forces[i] = THRUSTER_NEUTRAL_FORCE\n\n #should never reach this point in the code\n self.get_logger().info(\"ThrustTest succeeded\")\n goal_handle.succeed()\n self.running = False\n return ThrusterTest.Result()\n\ndef main(args=None):\n rclpy.init(args=args)\n\n thruster_test_action_server = ThrusterTestActionServer()\n\n executor = MultiThreadedExecutor()\n rclpy.spin(thruster_test_action_server, executor=executor)\n\n thruster_test_action_server.destroy()\n rclpy.shutdown()\n\nif __name__ == '__main__':\n main()","repo_name":"osu-uwrt/riptide_control","sub_path":"riptide_controllers/riptide_controllers2/actions/thruster_test_old.py","file_name":"thruster_test_old.py","file_ext":"py","file_size_in_byte":4642,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"19544877926","text":"import os\nimport json\n\nJSON_FILENAME = \"recette.json\"\n\nMES_INGREDIENTS = [\"beurre\", \"beurre doux\", \"beurre doux mou\", \"beurre fondu\", \"chocolat noir\", \"citron\",\n \"confiture d’abricots\", \"crème fraîche\", \"eau tiède\", \"farine\", \"farine blanche\",\n \"farine de blé\", \"farine tout usage\", \"gingembre\", \"gros œuf\", \"huile\",\n \"huile neutre\", \"lait\", \"lait tiède\", \"lait végétal\", \"miel\", \"pomme de terre\",\n \"sel\", \"sel fin\", \"sucre\", \"sucre en grains\", \"sucre en poudre\", \"sucre fin\",\n \"tasse de sucre\", \"tasse d’huile d’olive tiède\", \"tasse farine\", \"œuf\",\n \"œuf battu\", \"œuf entier\", \"œufs\", \"œufs entiers\", \"œufs séparés\", \"beurre ramolli\",\n \"chocolat à pâtisserie noir\"]\n\n\ndef charger_fichier_json(filename):\n if os.path.exists(filename):\n f = open(filename, \"r\")\n json_data = f.read()\n f.close()\n return json.loads(json_data)\n return None\n\n\ndef filtrer_nom_ingredient(nom_ingredient):\n # \" de \"\n # \" d'\"\n # nombre + espace\n # strip / split / find(\" de \") / [:] / isdigit()\n filtre_gauche = False\n\n index_de = nom_ingredient.find(\" de \")\n if index_de != -1:\n nom_ingredient = nom_ingredient[index_de + 4:]\n filtre_gauche = True\n\n if not filtre_gauche:\n index_d_apostrophe = nom_ingredient.find(\" d'\")\n if index_d_apostrophe == -1:\n index_d_apostrophe = nom_ingredient.find(\" d’\")\n if index_d_apostrophe != -1:\n nom_ingredient = nom_ingredient[index_d_apostrophe + 3:]\n filtre_gauche = True\n\n if not filtre_gauche:\n nom_split = nom_ingredient.split(\" \")\n if nom_split[0].isdigit() and nom_split[1] == \"ou\" and nom_split[2].isdigit():\n nom_ingredient = \" \".join(nom_split[3:])\n filtre_gauche = True\n\n if not filtre_gauche:\n nom_split = nom_ingredient.split(\" \")\n if nom_split[0].isdigit() and nom_split[1] == \"g\":\n nom_ingredient = \" \".join(nom_split[2:])\n filtre_gauche = True\n\n if not filtre_gauche:\n nom_split = nom_ingredient.split(\" \")\n if nom_split[0].isdigit():\n nom_ingredient = \" \".join(nom_split[1:])\n filtre_gauche = True\n\n if not filtre_gauche:\n if nom_ingredient.startswith(\"du \"):\n nom_ingredient = nom_ingredient[3:]\n filtre_gauche = True\n\n if not filtre_gauche:\n if nom_ingredient.startswith(\"des \"):\n nom_ingredient = nom_ingredient[4:]\n filtre_gauche = True\n\n # filtre à droite\n index_parenthese = nom_ingredient.find(\"(\")\n if index_parenthese != -1:\n return nom_ingredient[:index_parenthese]\n\n index_tiret = nom_ingredient.find(\" - \")\n if index_tiret != -1:\n return nom_ingredient[:index_tiret]\n\n index_crochet = nom_ingredient.find(\" [\")\n if index_crochet != -1:\n return nom_ingredient[:index_crochet]\n\n return nom_ingredient\n\n\ndef trier_recettes_par_liste_ingredients(liste_recettes, liste_ingredients):\n # liste_recettes_sauvegardees\n # \"noms_ingredients\" = []\n for recette in liste_recettes:\n ingredients = recette[\"recette\"][\"ingredients\"]\n noms_ingredients = [filtrer_nom_ingredient(i).lower().strip() for i in ingredients]\n recette[\"noms_ingredients\"] = noms_ingredients\n recette[\"ingredients_correspondants\"] = [i for i in noms_ingredients if i in liste_ingredients]\n recette[\"ingredients_manquants\"] = [i for i in noms_ingredients if i not in liste_ingredients]\n recette[\"score_correspondance_ingredients\"] = len(recette[\"ingredients_correspondants\"]) - 4 * (\n len(recette[\"ingredients_manquants\"]))\n if len(recette[\"ingredients_correspondants\"]) == 0:\n recette[\"score_correspondance_ingredients\"] -= 100\n if len(recette[\"ingredients_manquants\"]) == 0:\n recette[\"score_correspondance_ingredients\"] += 100\n\n liste_recettes.sort(key=lambda x: x[\"score_correspondance_ingredients\"], reverse=True)\n return liste_recettes\n","repo_name":"jeremylanes/GourmetDelights","sub_path":"src/scraper/jonathant_cake_srap_script/cake_scrap_lib.py","file_name":"cake_scrap_lib.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"70877311006","text":"\n# coding: utf-8\n\n# In[125]:\n\n\nimport numpy as np \nimport pandas as pd\n\nimport sklearn \nfrom sklearn.cluster import KMeans\nfrom sklearn import datasets\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\nmatplotlib.style.use('ggplot')\n\nimport math\n\n\n# In[126]:\n\n\ndataset = datasets.load_boston()\n\n\n# In[127]:\n\n\ndf = pd.DataFrame(dataset.data, columns=dataset.feature_names)\n#Shape of the dataset each column is an attribute ('feature')\nprint(dataset.feature_names) #print columns of the dataset\n\n#Print first 10 rows\nprint(dataset.data[:10])\n\n\n# In[128]:\n\n\ndf.head()\n\n\n# In[129]:\n\n\ncrime = df[\"CRIM\"].values\nzn = df[\"ZN\"].values\nindus = df[\"INDUS\"].values\nchas = df[\"CHAS\"].values\nnox = df[\"NOX\"].values\nrm = df[\"RM\"].values\nage = df[\"AGE\"].values\ndis = df[\"DIS\"].values\nrad = df[\"RAD\"].values\ntax = df[\"TAX\"].values\nptratio = df[\"PTRATIO\"].values\nb = df[\"B\"].values\nlstat = df[\"LSTAT\"].values\n\n\n# In[130]:\n\n\n# Calculate mean for each column\n\ndef estimate_mean(data):\n return sum(data)/len(data)\nprint(\"mean of crime col is :\" ,estimate_mean(crime))\nprint(\"mean of ZN col is :\" ,estimate_mean(zn))\nprint(\"mean of INDUS col is :\" ,estimate_mean(indus))\nprint(\"mean of CHAS col is :\" ,estimate_mean(chas))\nprint(\"mean of NOX col is :\" ,estimate_mean(nox))\nprint(\"mean of RM col is :\" ,estimate_mean(rm))\nprint(\"mean of AGE col is :\" ,estimate_mean(age))\nprint(\"mean of DIS col is :\" ,estimate_mean(dis))\nprint(\"mean of RAD col is :\" ,estimate_mean(rad))\nprint(\"mean of TAX col is :\" ,estimate_mean(tax))\nprint(\"mean of PTRATIO col is :\" ,estimate_mean(ptratio))\nprint(\"mean of B col is :\" ,estimate_mean(b))\nprint(\"mean of LSTAT col is :\" ,estimate_mean(lstat))\n\n\n# In[131]:\n\n\n# Calculate Variance for all columns\n\ndef estimate_variance(data, mu=None):\n if mu is None:\n mu = estimate_mean(data)\n return sum([(x - mu)**2 for x in data]) / len(data)\n\nprint(\"Variance of CRIME col is :\" ,estimate_variance(crime))\nprint(\"Variance of ZN col is :\" ,estimate_variance(zn))\nprint(\"Variance of INDUS col is :\" ,estimate_variance(indus))\nprint(\"Variance of CHAS col is :\" ,estimate_variance(chas))\nprint(\"Variance of NOX col is :\" ,estimate_variance(nox))\nprint(\"Variance of RM col is :\" ,estimate_variance(rm))\nprint(\"Variance of AGE col is :\" ,estimate_variance(age))\nprint(\"Variance of DIS col is :\" ,estimate_variance(dis))\nprint(\"Variance of RAD col is :\" ,estimate_variance(rad))\nprint(\"Variance of TAX col is :\" ,estimate_variance(tax))\nprint(\"Variance of PTRATIO col is :\" ,estimate_variance(ptratio))\nprint(\"Variance of B col is :\" ,estimate_variance(b))\nprint(\"Variance of LSTAT col is :\" ,estimate_variance(lstat))\n\n\n# In[132]:\n\n\n#1(c) Scatter 'NOX' vs 'CRIM' \n\nplt.scatter(nox, crime)\nplt.xlabel(\"Nitric Oxide Concentration\")\nplt.ylabel(\"Per Captia Crime\")\n\n\n# In[133]:\n\n\n#1(c) Scatter \"CRIME\" vs \"The Housing Prices\"\n\ndf[\"price\"]= dataset.target\nhousing_price = df[\"price\"]\nplt.scatter(crime, housing_price)\nplt.xlabel(\"Crime\")\nplt.ylabel(\"Housing Prices\")\n\n\n\n# In[136]:\n\n\n#1(d) Correlations between two pairs \"NOX' and \"CRIM\"\n #Correlation lies between -1(perfect anti correlation) to 1(perfect correlation) # number like 0.25 is a weak positive correlation\n \n \n\ndef std_dev(data): #Calculate standard deviation\n return math.sqrt(estimate_variance(data))\n\ndef covariance(x,y): #Covariance:how two variables vary in tandem from their means\n n = len(x)\n return np.dot(estimate_mean(x),estimate_mean(y))/(n-1)\n\ndef correlation(x,y): #divide out standard deviations of both the variables.\n stddev_x = std_dev(x)\n stddev_y = std_dev(y)\n if stddev_x > 0 and stddev_y > 0 :\n return covariance(x,y)/ stddev_x/ stddev_y\n else:\n return 0 #if no variation then correlation is 0\n \nprint(\"Correlation between NOX and Crime is : \" ,correlation(nox,crime))\nprint(\"Correlation between Crime and Housing Prices is : \" ,correlation(crime,housing_price))\n\n\n","repo_name":"Githubshambhavi/Python-Applied-Data-Mining","sub_path":"Boston_housing.py","file_name":"Boston_housing.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"9344492023","text":"##\n# Ice sheet simulation\n# A real-time evolving simulation of a 1-D ice sheet that models an \n# increasing profile elevation as snow accumulates over time. \n# This drives a pressure gradient which causes an outward flow in both directions.\n##\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Predefined constants\nnX = 10\t\t\t# number of grid points\ndomainWidth = 1e6\t\t# meters\ntimeStep = 100\t\t\t# years\nnYears = 20000\t\t\t# years\nflowParam = 1e4\t\t\t# m horizontal / yr\nsnowFall = 1\t\t\t# m / y\nplotLimit = 4000\n\nnSteps = int(nYears / timeStep)\ndX = domainWidth / nX\n\n# Initialise elevation and flow\nelevations = np.zeros(nX+2)\nflows = np.zeros(nX+1)\n\nfig = plt.figure()\nax = fig.add_subplot(1,1,1)\n\n# Loop through time\nfor i in range(0, nYears, timeStep):\n for ix in range(0, nX+1):\n surface_gradient = ( elevations[ix] - elevations[ix+1] ) / dX\n flows[ix] = surface_gradient * flowParam * ( elevations[ix]+elevations[ix+1] ) / 2 / dX\n\n for ix in range(1, nX+1):\n elevations[ix] = elevations[ix] + ( snowFall + flows[ix-1] - flows[ix] ) * timeStep\n\n\n print (\"Years:\", i)\n ax.clear()\t\t\t# to update graph\n ax.plot(elevations)\n plt.title('1D Ice Sheet Model')\n plt.ylim(0,plotLimit)\n plt.show(block=False)\n plt.pause(0.001)\t\t\t# delay between update\n\nax.clear()\nax.plot( elevations )\nax.set_ylim([0,plotLimit])\nplt.show()","repo_name":"mel-liow/climate-modelling","sub_path":"ice_sheet_simulation.py","file_name":"ice_sheet_simulation.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"20889385543","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n # Conv2d(in_channel, out_channel, kernel_size)\n self.conv1 = nn.utils.spectral_norm(nn.Conv2d(3, 32, 3, stride=1, padding=1))\n self.conv2 = nn.utils.spectral_norm(nn.Conv2d(32, 64, 3, stride=2, padding=1))\n self.conv3 = nn.utils.spectral_norm(nn.Conv2d(64, 128, 3, stride=1, padding=1))\n self.norm1 = nn.InstanceNorm2d(128)\n self.conv4 = nn.utils.spectral_norm(nn.Conv2d(128, 128, 3, stride=2, padding=1))\n self.conv5 = nn.utils.spectral_norm(nn.Conv2d(128, 256, 3, stride=1, padding=1))\n self.norm2 = nn.InstanceNorm2d(256)\n self.conv6 = nn.utils.spectral_norm(nn.Conv2d(256, 256, 3, stride=1, padding=1))\n self.norm3 = nn.InstanceNorm2d(256)\n self.conv7 = nn.utils.spectral_norm(nn.Conv2d(256, 1, 3, stride=1, padding=1))\n\n \n def forward(self, x, hidden_state=None):\n x = self.conv1(x)\n x = F.leaky_relu(x)\n\n x = self.conv2(x)\n x = F.leaky_relu(x)\n \n x = self.conv3(x)\n x = self.norm1(x)\n x = F.leaky_relu(x)\n\n x = self.conv4(x)\n x = F.leaky_relu(x)\n\n x = self.conv5(x)\n x = self.norm2(x)\n x = F.leaky_relu(x)\n\n x = self.conv6(x)\n x = self.norm3(x)\n x = F.leaky_relu(x)\n \n x = self.conv7(x)\n return x\n \n def save_model(self, path):\n torch.save(self.state_dict(), path)\n\n def load_model(self, path):\n self.load_state_dict(torch.load(path))\n","repo_name":"NakuraMino/AnimeGeneration","sub_path":"model/discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"14267463765","text":"import argparse\nimport logging\nimport os\nimport pkgutil\nfrom functools import partial\nfrom threading import Thread\n\nfrom bokeh.application.application import Application\nfrom bokeh.application.handlers import ScriptHandler\nfrom bokeh.server.server import Server\n\nfrom streamvis import __version__\nfrom streamvis.handler import StreamvisCheckHandler, StreamvisHandler\nfrom streamvis.receiver import Receiver, StreamAdapter\nfrom streamvis.statistics_handler import StatisticsHandler\n\nlogging.basicConfig(format=\"%(asctime)s %(message)s\", level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n \"\"\"The streamvis command line interface.\n\n This is a wrapper around bokeh server that provides an interface to launch\n applications bundled with the streamvis package.\n \"\"\"\n base_path = os.path.dirname(os.path.abspath(__file__))\n\n # Discover streamvis apps\n apps_path = os.path.join(base_path, \"apps\")\n available_apps = []\n for module_info in pkgutil.iter_modules([apps_path]):\n available_apps.append(module_info.name)\n\n # Prepare argument parser\n parser = argparse.ArgumentParser(\n prog=\"streamvis\", formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n parser.add_argument(\"-v\", \"--version\", action=\"version\", version=f\"%(prog)s {__version__}\")\n\n parser.add_argument(\"app\", type=str, choices=available_apps, help=\"streamvis application\")\n\n parser.add_argument(\n \"--port\", type=int, default=5006, help=\"a port to listen on for HTTP requests\"\n )\n\n parser.add_argument(\n \"--allow-websocket-origin\",\n metavar=\"HOST[:PORT]\",\n type=str,\n action=\"append\",\n default=None,\n help=\"a hostname that can connect to the server websocket\",\n )\n\n parser.add_argument(\n \"--page-title\", type=str, default=\"StreamVis\", help=\"browser tab title for the application\"\n )\n\n parser.add_argument(\n \"--address\",\n metavar=\"PROTOCOL://HOST:PORT\",\n type=str,\n default=\"tcp://127.0.0.1:9001\",\n help=\"an address string for zmq socket\",\n )\n\n parser.add_argument(\n \"--connection-mode\",\n type=str,\n choices=[\"connect\", \"bind\"],\n default=\"connect\",\n help=\"whether to bind a socket to an address or connect to a remote socket with an address\",\n )\n\n parser.add_argument(\n \"--io-threads\",\n type=int,\n default=1,\n help=\"the size of the zmq thread pool to handle I/O operations\",\n )\n\n parser.add_argument(\n \"--buffer-size\",\n type=int,\n default=1,\n help=\"a number of last received zmq messages to keep in memory\",\n )\n\n parser.add_argument(\n \"--max-client-connections\",\n type=int,\n default=2,\n help=\"a maximum number of concurrent client connections\",\n )\n\n parser.add_argument(\n \"--client-fps\", type=float, default=1, help=\"client update rate in frames per second\"\n )\n\n parser.add_argument(\n \"--allow-client-subnet\",\n type=str,\n action=\"append\",\n default=None,\n help=\"a subnet from which client connections are allowed\",\n )\n\n parser.add_argument(\n \"--args\",\n nargs=argparse.REMAINDER,\n default=[],\n help=\"command line arguments for the streamvis application\",\n )\n\n args = parser.parse_args()\n\n app_path = os.path.join(apps_path, args.app + \".py\")\n logger.info(app_path)\n\n # StatisticsHandler is used by Receiver to parse metadata information to be displayed in\n # 'statistics' application, all messages are being processed.\n stats = StatisticsHandler()\n\n # Receiver gets messages via zmq stream and parses statistics with StatisticsHandler\n receiver = Receiver(on_receive=stats.parse, buffer_size=args.buffer_size)\n\n # Start receiver in a separate thread\n start_receiver = partial(receiver.start, args.io_threads, args.connection_mode, args.address)\n t = Thread(target=start_receiver, daemon=True)\n t.start()\n\n # Reconstructs requested images\n jf_adapter = StreamAdapter()\n\n # StreamvisHandler is a custom bokeh application Handler, which sets some of the core\n # properties for new bokeh documents created by all applications.\n sv_handler = StreamvisHandler(receiver, stats, jf_adapter, args)\n sv_check_handler = StreamvisCheckHandler(\n max_sessions=args.max_client_connections, allow_client_subnet=args.allow_client_subnet\n )\n\n applications = dict() # List of bokeh applications\n\n # Main application\n bokeh_handler = ScriptHandler(filename=app_path, argv=args.args)\n applications[\"/\"] = Application(sv_handler, bokeh_handler, sv_check_handler)\n\n # Add all common applications\n common_apps_path = os.path.join(base_path, \"common_apps\")\n for module_info in pkgutil.iter_modules([common_apps_path]):\n app_name = module_info.name\n bokeh_handler = ScriptHandler(filename=os.path.join(common_apps_path, app_name + \".py\"))\n sv_check_handler = StreamvisCheckHandler(allow_client_subnet=args.allow_client_subnet)\n applications[f\"/{app_name}\"] = Application(sv_handler, bokeh_handler, sv_check_handler)\n\n server = Server(\n applications,\n port=args.port,\n allow_websocket_origin=args.allow_websocket_origin,\n unused_session_lifetime_milliseconds=1,\n check_unused_sessions_milliseconds=3000,\n )\n\n server.start()\n server.io_loop.start()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"paulscherrerinstitute/streamvis","sub_path":"streamvis/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":5501,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"86"} +{"seq_id":"43686780462","text":"'''\n在一个 m*n 的棋盘中的每一个格都放一个礼物,每个礼物都有一定的价值(价值大于0).你可以从棋盘的左上角开始拿各\n种里的礼物,并每次向左或者向下移动一格,直到到达棋盘的右下角。给定一个棋盘及上面个的礼物,请计算你最多能拿\n走多少价值的礼物?\n'''\nclass Solution:\n def getmaxValue(self, values, rows, cols):\n if not values or rows<=0 or cols <=0:\n return 0\n # 用于存放中间数值的临时数组\n temp = [0] * cols\n\n for i in range(rows):\n for j in range(cols):\n left = 0\n up = 0\n\n if i > 0:\n up = temp[j]\n if j > 0:\n left = temp[j-1]\n temp[j] = max(up,left) + values[i*rows+j]\n return temp[-1]\ns = Solution()\na = s.getmaxValue([1,10,3,8,12,2,9,6,5,7,4,11,3,7,16,5],4,4)\nprint(a)\n","repo_name":"david6666666/cwq","sub_path":"33剑指offer/69 礼物的最大价值.py","file_name":"69 礼物的最大价值.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"46139162064","text":"from os import path, mkdir\nimport shutil\nimport json\nimport warnings\nimport logging\n\n\nclass Storage(object):\n \"\"\"Image storage and db.\"\"\"\n\n db = None\n basepath = None\n dbname = None\n\n def __init__(self, basepath):\n \"\"\"Initialize storage object.\"\"\"\n self.basepath = basepath\n self.dbname = path.join(basepath, 'db.json')\n\n def __enter__(self):\n \"\"\"Open db connection.\"\"\"\n try:\n self.db = json.load(open(self.dbname))\n except IOError:\n self.db = {}\n except ValueError:\n warnings.warn(\n \"Database '%s' was corrupt or incomplete, rebuilding\"\n % self.dbname)\n self.db = {}\n\n if 'images' not in self.db:\n self.db['images'] = {}\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Close connection to db.\"\"\"\n json.dump(self.db, open(self.dbname, \"w\"))\n self.db = None\n self.images = {}\n\n def insert(self, image):\n \"\"\"Insert new image.\"\"\"\n if image.id in self.db['images']:\n logging.info(\"Skipped (duplicate)\")\n return\n logging.info(\"done.\")\n fullpath = path.join(self.basepath, image.new_path)\n\n # Create dest dir\n imagedir = path.join(self.basepath,\n path.split(image.new_path)[0])\n if not path.isdir(imagedir):\n mkdir(imagedir)\n\n # Copy to dest\n shutil.copy(image.old_path,\n path.join(self.basepath, image.new_path))\n\n # Store in db\n self.db['images'][image.id] = image.old_path\n\n","repo_name":"awagner83/PhotoStore","sub_path":"photostore/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"29727482765","text":"from . import version, uuid_models\n\nimport time, datetime\nfrom datetime import timedelta\nfrom datetime import date\nfrom pprint import pprint, pformat\n\nfrom .config_env import formatEnvVar\n\nAPIVIZ_SUPER_ADMINS = formatEnvVar('SUPER_ADMIN_LIST', format_type=\"list\", separator=\",\") \n\nuuid_auth_model = {\n \n \"apiviz_front_uuid\" : None,\n \"apiviz_front_name\" : None,\n \"uuid_is_authorized\" : True,\n \"date_added\" : None,\n \"added_by\" : {\n \"name\" : None,\n \"surname\" : None,\n \"email\" : None,\n },\n\n \"months_renewal\" : 1,\n\n ### apiviz free/pro options\n \"private_instance\" : False,\n \"apiviz_options\" : {\n \"datasets\" : 1,\n \"admins\" : 1,\n \"staff\" : 1,\n \"statics\" : -1,\n \"js_plugins\" : True,\n \"can_push_data\" : False,\n \"backoffice\" : True,\n \"users_management\" : True,\n \"options_management\" : False,\n },\n\n \"logs\" : {\n \"user_logs\" : [],\n \"total_logs\" : 0,\n \"view_logs\" : {\n \"static\" : [],\n \"map\" : [],\n \"list\" : [],\n \"stat\" : [],\n \"table\" : [],\n \"backoffice\" : [],\n }\n },\n\n \"auth_role_users\" : {\n \"admin_list\" : [], ### \n \"staff_list\" : [], ###\n \"guest_list\" : [], ###\n },\n\n \"users_list\" : [],\n\n \"is_default\" : False,\n}\n\n\n\ndefault_uuids_auth = [\n]\n\nfor key, val in uuid_models.items() : \n\n ### copy uuid_auth_model\n temp_auth = uuid_auth_model.copy()\n # print (\"... temp_auth : \\n\", pformat(temp_auth))\n\n ### setting main specs\n temp_auth[\"apiviz_front_name\"] = key\n temp_auth[\"apiviz_front_uuid\"] = val\n\n ### setting options\n temp_auth[\"uuid_is_authorized\"] = True\n temp_auth[\"private_instance\"] = False\n temp_auth[\"months_renewal\"] = 120\n\n temp_auth[\"apiviz_options\"][\"datasets\"] = 3\n temp_auth[\"apiviz_options\"][\"admins\"] = 1\n temp_auth[\"apiviz_options\"][\"staff\"] = 5\n temp_auth[\"apiviz_options\"][\"statics\"] = -1\n temp_auth[\"apiviz_options\"][\"js_plugins\"] = True\n temp_auth[\"apiviz_options\"][\"can_push_data\"] = True\n temp_auth[\"apiviz_options\"][\"backoffice\"] = True\n temp_auth[\"apiviz_options\"][\"users_management\"] = True\n temp_auth[\"apiviz_options\"][\"options_management\"] = True\n\n ### setting admin list\n temp_auth[\"auth_role_users\"][\"admin_list\"] = APIVIZ_SUPER_ADMINS\n\n ### define as default model\n temp_auth[\"is_default\"] = True\n \n ### setting added infos\n temp_auth[\"date_added\"] = datetime.datetime.now()\n temp_auth[\"added_by\"][\"name\"] = \"system\"\n temp_auth[\"added_by\"][\"surname\"] = \"apivviz\"\n temp_auth[\"added_by\"][\"email\"] = APIVIZ_SUPER_ADMINS[0]\n\n default_users_auth_list = [\n {\n \"name\" : \"system\",\n \"surname\" : \"apiviz\",\n \"email\" : APIVIZ_SUPER_ADMINS[0],\n \"role\" : \"admin\",\n }\n ]\n temp_auth[\"users_list\"] = default_users_auth_list\n\n # print (\"... temp_auth : \\n\", pformat(temp_auth))\n\n ### append to list to add / renew in mongodb\n default_uuids_auth.append(temp_auth)\n","repo_name":"co-demos/apiviz-backend","sub_path":"backend/config_app/default_uuids_auth.py","file_name":"default_uuids_auth.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"30010834021","text":"from lab07 import *\n\n# Q6\ndef reverse_other(t):\n \"\"\"Reverse the roots of every other level of the tree using mutation.\n\n >>> t = Tree(1, [Tree(2), Tree(3), Tree(4)])\n >>> reverse_other(t)\n >>> t\n Tree(1, [Tree(4), Tree(3), Tree(2)])\n >>> t = Tree(1, [Tree(2, [Tree(5, [Tree(7), Tree(8)]), Tree(6)]), Tree(3)])\n >>> reverse_other(t)\n >>> t\n Tree(1, [Tree(3, [Tree(5, [Tree(8), Tree(7)]), Tree(6)]), Tree(2)])\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n def reverse_helper(t, need_reverse):\n if t.is_leaf():\n return\n new_labs = [child.root for cihld in t.branches][::-1]\n for i in range(len(t.branches)):\n child = t.branches[i]\n reverse_helper(child, not need_reverse)\n if need_reverse:\n child.root = new_labs[i]\n reverse_helper(t, True)\n\n# Q7\ndef cumulative_sum(t):\n \"\"\"Mutates t where each node's root becomes the sum of all entries in the\n corresponding subtree rooted at t.\n\n >>> t = Tree(1, [Tree(3, [Tree(5)]), Tree(7)])\n >>> cumulative_sum(t)\n >>> t\n Tree(16, [Tree(8, [Tree(5)]), Tree(7)])\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n for b in t.branches:\n cumulative_sum(b)\n t.root += sum([b.root for b in t.branches])\n\n# Q8\ndef deep_map_mut(fn, link):\n \"\"\"Mutates a deep link by replacing each item found with the\n result of calling fn on the item. Does NOT create new Links (so\n no use of Link's constructor)\n\n Does not return the modified Link object.\n\n >>> link1 = Link(3, Link(Link(4), Link(5, Link(6))))\n >>> deep_map_mut(lambda x: x * x, link1)\n >>> print_link(link1)\n <9 <16> 25 36>\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n if link is Link.empty:\n return\n elif isinstance(link.first, Link):\n deep_map_mut(fn, link.first)\n else:\n link.first = fn(link.first)\n deep_map_mut(fn, link.rest)\n\n\n# Q9\ndef has_cycle(link):\n \"\"\"Return whether link contains a cycle.\n\n >>> s = Link(1, Link(2, Link(3)))\n >>> s.rest.rest.rest = s\n >>> has_cycle(s)\n True\n >>> t = Link(1, Link(2, Link(3)))\n >>> has_cycle(t)\n False\n >>> u = Link(2, Link(2, Link(2)))\n >>> has_cycle(u)\n False\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n seen = []\n while link is not Link.empty:\n if link in seen:\n return True\n else:\n seen.append(link)\n link = link.rest\n return False\n\ndef has_cycle_constant(link):\n \"\"\"Return whether link contains a cycle.\n\n >>> s = Link(1, Link(2, Link(3)))\n >>> s.rest.rest.rest = s\n >>> has_cycle_constant(s)\n True\n >>> t = Link(1, Link(2, Link(3)))\n >>> has_cycle_constant(t)\n False\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n","repo_name":"dongsubkim/CS61A","sub_path":"LAB/lab07/lab07_extra.py","file_name":"lab07_extra.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"86"} +{"seq_id":"37222255961","text":"from typing import Optional, List, Tuple\n\nimport pandas as pd\n\nfrom anomaly.isolation.split_rule import SplitRule\n\n\nclass Node:\n def __init__(self, data: pd.DataFrame, depth: int):\n self.data = data\n self.depth = depth\n self.left: Optional[Node] = None\n self.right: Optional[Node] = None\n self.split_rule: Optional[SplitRule] = None\n\n def create_split(self, split_rule: SplitRule) -> Tuple['Node', 'Node']:\n self.split_rule = split_rule\n left_data, right_data = split_rule.split(data=self.data)\n self.left = Node(data=left_data, depth=self.depth + 1)\n self.right = Node(data=right_data, depth=self.depth + 1)\n return self.left, self.right\n\n def terminal(self):\n return self.split_rule is None\n\n def diverse_columns(self) -> List[str]:\n relevant_columns = []\n for column in self.data.columns:\n if self.data[column].nunique(dropna=False) > 1:\n relevant_columns.append(column)\n return relevant_columns\n","repo_name":"landeraxe/mixed-anomaly","sub_path":"anomaly/isolation/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"71673181403","text":"#!/usr/bin/env python3\n\nimport os\nimport random\nfrom collections.abc import Callable\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom json import dumps as dump_json\nfrom math import ceil\nfrom typing import Optional, Self, Union\n\nimport pandas as pd # type: ignore[import]\n\n#############################################################################\n# Core Roster\n#############################################################################\n\nIDList = list[str]\nGroupName = str\nPath = Union[str, bytes, os.PathLike]\n\n\n@dataclass\nclass Roster:\n class Field(Enum):\n ID = 'Student ID' # only used as an index\n LAST_NAME = 'Last Name'\n PREFERRED_NAME = 'Preferred Name'\n EMAIL = 'Email'\n PLAN = 'Plan'\n\n table: pd.DataFrame\n\n def get_email_ids(self) -> list[str]:\n return self.table[Roster.Field.EMAIL.value].tolist() # type: ignore[no-any-return]\n\n def get_ids(self) -> list[str]:\n return self.table.index.values.tolist() # type: ignore[no-any-return]\n\n def group_by(self, column: str) -> pd.core.groupby.DataFrameGroupBy:\n return self.table.groupby(column)\n\n\ndef _make_last_name(full_name: str) -> str:\n return full_name[:full_name.index(',')]\n\n\ndef _email_to_username(address: str) -> str:\n if address.endswith('@sfu.ca'):\n return address[:-7]\n if address.endswith('@alumni.sfu.ca'):\n return address[:-14]\n return address\n\n\ndef from_sims_csv(sims_csv_path: Path) -> Roster:\n sims_frame = pd.read_csv(sims_csv_path)\n\n # Most of the SIMS roster is irrelevant, so select only a few\n # parts to retain. If another field/column becomes relevant,\n # it should be added to the list\n relevant_labels = [\n 'student_ID', # numerical student ID\n 'student_name', # full student name in form \"Last, First\"\n 'student_PRF_Fname', # preferred name\n 'Campus_Email', # SFU email address\n 'acad_plan', # Academic plan, e.g. \"SOSYMAJ/SYSONE\"\n ]\n columns_to_keep = sims_frame.columns.difference(relevant_labels)\n sims_frame.drop(columns=columns_to_keep, inplace=True)\n\n sims_frame['student_name'] = sims_frame['student_name'].map(_make_last_name)\n sims_frame['Campus_Email'] = sims_frame['Campus_Email'].map(_email_to_username)\n\n new_labels = {\n 'student_ID': Roster.Field.ID.value,\n 'student_name': Roster.Field.LAST_NAME.value,\n 'student_PRF_Fname': Roster.Field.PREFERRED_NAME.value,\n 'Campus_Email': Roster.Field.EMAIL.value,\n 'acad_plan': Roster.Field.PLAN.value,\n }\n sims_frame = sims_frame.rename(columns=new_labels)\n sims_frame.set_index(Roster.Field.ID.value, inplace=True, drop=True)\n\n return Roster(sims_frame)\n\n\ndef from_roster_csv(roster_csv_path: Path) -> Roster:\n roster_dataframe = pd.read_csv(roster_csv_path)\n assert {field.value for field in Roster.Field}.issubset(roster_dataframe.columns)\n\n roster_dataframe.set_index(Roster.Field.ID.value, inplace=True, drop=True)\n\n return Roster(roster_dataframe)\n\n\n# This generates a fake Roster of a given size. This is useful, for instance\n# when testing out different tools or workflows that you may want to use in\n# a course.\ndef from_nothing(size: int) -> Roster:\n plans = ['SOSYMAJ', 'CMPTMAJ']\n fake_dataframe = pd.DataFrame({\n Roster.Field.ID.value: [str(900000000 + i) for i in range(size)],\n Roster.Field.LAST_NAME.value: ['L' + str(i) for i in range(size)],\n Roster.Field.PREFERRED_NAME.value: ['P' + str(i) for i in range(size)],\n Roster.Field.EMAIL.value: ['user' + str(i) for i in range(size)],\n Roster.Field.PLAN.value: [random.choices(plans, k=1)[0] for i in range(size)],\n })\n fake_dataframe.set_index(Roster.Field.ID.value, inplace=True, drop=True)\n return Roster(fake_dataframe)\n\n\ndef to_roster_csv(roster: Roster, path: Path) -> None:\n roster.table.to_csv(path, index=True)\n\n\n#############################################################################\n# Group Management and Collaborations\n#############################################################################\n\ndef group_students(roster: Roster,\n label: str,\n namer: Callable[[str, str], Optional[str]]) -> None:\n def wrapper(x: pd.Series) -> Optional[str]:\n return namer(x.name, x.Email)\n\n roster.table[label] = roster.table.apply(wrapper, axis=1)\n\n\ndef group_students_randomly(\n roster: Roster,\n group_size: int,\n label: str,\n group_labels: Optional[list[GroupName]] = None) -> None:\n students = roster.get_ids()\n\n random.shuffle(students)\n num_groups = int(ceil(len(students) / group_size))\n group_list = [students[group:len(students):num_groups]\n for group in range(num_groups)]\n\n def name_mapping(group_id: int) -> str:\n if group_labels and group_id < len(group_labels):\n return group_labels[group_id]\n return str(group_id)\n\n group_map = {student: name_mapping(group_id)\n for group_id, group in enumerate(group_list)\n for student in group}\n\n group_students(roster, label, lambda id, username: group_map[id])\n\n\n@dataclass\nclass GroupMatching:\n # contains columns for:\n # *index*: Roster.Field.ID.value: The id of the student\n # group_label: The label of the main group of the student\n # assigned_label: The label of the group the student will collaborate with\n table: pd.DataFrame\n group_label: Optional[str]\n assigned_label: str\n\n def to_csv(self, path: str) -> None:\n self.table.to_csv(path)\n\n @classmethod\n def from_csv(cls, path: str) -> Self:\n table = pd.read_csv(path)\n assigned_label = 'Assigned'\n\n table.set_index(Roster.Field.ID.value, inplace=True, drop=True)\n\n return cls(table, None, assigned_label)\n\n\n# Given a roster and column label for groups, assign each student another\n# group to collaborate with. Students should be assigned roughly uniformly\n# across the groups.\ndef assign_across_groups(roster: Roster, group_label: str) -> GroupMatching:\n groups = set(roster.table[group_label].unique())\n assert len(groups) > 1\n target_count = len(roster.table.index) // len(groups)\n\n # The result will be a new frame with just\n # [*index*: Student ID, Original Group, Assigned Group]\n df = roster.table[[group_label]]\n df = df.assign(Assigned=lambda x: '')\n assigned_label = 'Assigned'\n\n # TODO: Do we want uniform permutations so that groups are consistently\n # \"cross pollinated\"? The elow would work fine after hitting a maximum\n # number of permutations based on the roster.\n\n # First, sample enough to ensure all groups get collaborators\n for group_name in groups:\n # For mutability, we have to construct a sample over indices with pandas.\n # The sample comes from all students not in the given group who have not\n # yet been assigned another group to collaborate with.\n sources = df.index[(df[group_label] != group_name) & (df[assigned_label] == '')]\n sample_size = min(target_count, len(sources))\n sample_indices = random.sample(sources.tolist(), sample_size)\n df.loc[sample_indices, assigned_label] = group_name\n\n # The clean up by making sure that all students are assigned collaboration.\n # As a semester progresses, this can get messier as students drop and group\n # sizes become uneven. For now, simply assign each remaining student.\n # Prefer not to reuse a group if possible.\n leftovers = df.index[df[assigned_label] == '']\n used_groups: set[str] = set()\n for student in leftovers:\n original_group = df.loc[student][group_label]\n possible_groups = groups.difference({original_group}).difference(used_groups)\n if not possible_groups:\n possible_groups = groups.difference({original_group})\n (assigned_group,) = random.sample(list(possible_groups), 1)\n df.loc[student, assigned_label] = assigned_group\n used_groups.add(assigned_group)\n\n assert df.loc[df[assigned_label] == ''].empty\n\n return GroupMatching(df, group_label, assigned_label)\n\n\ndef from_matching_assignment_csv(roster_csv_path: Path) -> GroupMatching:\n matching_dataframe = pd.read_csv(roster_csv_path)\n columns = list(matching_dataframe.columns)\n expected_column_count = 3\n assert len(columns) == expected_column_count\n assert Roster.Field.ID.value in columns\n\n matching_dataframe.set_index(Roster.Field.ID.value, inplace=True, drop=True)\n\n return GroupMatching(matching_dataframe, columns[1], columns[2])\n\n\ndef to_matching_assignment_csv(matching: GroupMatching, path: Path) -> None:\n matching.table.to_csv(path, index=True)\n\n\ndef get_group_stubs(roster: Roster,\n group_label: str) -> list[tuple[GroupName, str]]:\n groups = roster.group_by(group_label)\n\n def get_student_stub(student: pd.Series) -> dict[str, str]:\n last_name = student[Roster.Field.LAST_NAME.value]\n preferred_name = student[Roster.Field.PREFERRED_NAME.value]\n return {\n 'name': last_name + ', ' + preferred_name,\n 'email': student[Roster.Field.EMAIL.value],\n }\n\n def get_stub(group: pd.Series) -> str:\n return dump_json(group.apply(get_student_stub, axis=1).to_list())\n\n stubs = [(group_id, get_stub(group)) for group_id, group in groups]\n stubs.sort()\n return stubs\n","repo_name":"nsumner/course_roster","sub_path":"src/course_roster/roster.py","file_name":"roster.py","file_ext":"py","file_size_in_byte":9589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"23568821914","text":"# Some code in this file is from\n# https://github.com/ionelmc/python-remote-pdb/blob/07d563331c4ab9eb45731bb272b158816d98236e/src/remote_pdb.py\n# (BSD 2-Clause \"Simplified\" License)\n\nimport errno\nimport inspect\nimport json\nimport logging\nimport os\nimport re\nimport select\nimport socket\nimport sys\nimport time\nimport traceback\nimport uuid\nfrom pdb import Pdb\nfrom typing import Callable\n\nimport setproctitle\n\nimport ray\nfrom ray._private import ray_constants\nfrom ray.experimental.internal_kv import _internal_kv_del, _internal_kv_put\nfrom ray.util.annotations import DeveloperAPI\n\nlog = logging.getLogger(__name__)\n\n\ndef _cry(message, stderr=sys.__stderr__):\n print(message, file=stderr)\n stderr.flush()\n\n\nclass _LF2CRLF_FileWrapper(object):\n def __init__(self, connection):\n self.connection = connection\n self.stream = fh = connection.makefile(\"rw\")\n self.read = fh.read\n self.readline = fh.readline\n self.readlines = fh.readlines\n self.close = fh.close\n self.flush = fh.flush\n self.fileno = fh.fileno\n if hasattr(fh, \"encoding\"):\n self._send = lambda data: connection.sendall(data.encode(fh.encoding))\n else:\n self._send = connection.sendall\n\n @property\n def encoding(self):\n return self.stream.encoding\n\n def __iter__(self):\n return self.stream.__iter__()\n\n def write(self, data, nl_rex=re.compile(\"\\r?\\n\")):\n data = nl_rex.sub(\"\\r\\n\", data)\n self._send(data)\n\n def writelines(self, lines, nl_rex=re.compile(\"\\r?\\n\")):\n for line in lines:\n self.write(line, nl_rex)\n\n\nclass _PdbWrap(Pdb):\n \"\"\"Wrap PDB to run a custom exit hook on continue.\"\"\"\n\n def __init__(self, exit_hook: Callable[[], None]):\n self._exit_hook = exit_hook\n Pdb.__init__(self)\n\n def do_continue(self, arg):\n self._exit_hook()\n return Pdb.do_continue(self, arg)\n\n do_c = do_cont = do_continue\n\n\nclass _RemotePdb(Pdb):\n \"\"\"\n This will run pdb as a ephemeral telnet service. Once you connect no one\n else can connect. On construction this object will block execution till a\n client has connected.\n Based on https://github.com/tamentis/rpdb I think ...\n To use this::\n RemotePdb(host=\"0.0.0.0\", port=4444).set_trace()\n Then run: telnet 127.0.0.1 4444\n \"\"\"\n\n active_instance = None\n\n def __init__(\n self,\n breakpoint_uuid,\n host,\n port,\n ip_address,\n patch_stdstreams=False,\n quiet=False,\n ):\n self._breakpoint_uuid = breakpoint_uuid\n self._quiet = quiet\n self._patch_stdstreams = patch_stdstreams\n self._listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)\n self._listen_socket.bind((host, port))\n self._ip_address = ip_address\n\n def listen(self):\n if not self._quiet:\n _cry(\n \"RemotePdb session open at %s:%s, \"\n \"use 'ray debug' to connect...\"\n % (self._ip_address, self._listen_socket.getsockname()[1])\n )\n self._listen_socket.listen(1)\n connection, address = self._listen_socket.accept()\n if not self._quiet:\n _cry(\"RemotePdb accepted connection from %s.\" % repr(address))\n self.handle = _LF2CRLF_FileWrapper(connection)\n Pdb.__init__(\n self,\n completekey=\"tab\",\n stdin=self.handle,\n stdout=self.handle,\n skip=[\"ray.*\"],\n )\n self.backup = []\n if self._patch_stdstreams:\n for name in (\n \"stderr\",\n \"stdout\",\n \"__stderr__\",\n \"__stdout__\",\n \"stdin\",\n \"__stdin__\",\n ):\n self.backup.append((name, getattr(sys, name)))\n setattr(sys, name, self.handle)\n _RemotePdb.active_instance = self\n\n def __restore(self):\n if self.backup and not self._quiet:\n _cry(\"Restoring streams: %s ...\" % self.backup)\n for name, fh in self.backup:\n setattr(sys, name, fh)\n self.handle.close()\n _RemotePdb.active_instance = None\n\n def do_quit(self, arg):\n self.__restore()\n return Pdb.do_quit(self, arg)\n\n do_q = do_exit = do_quit\n\n def do_continue(self, arg):\n self.__restore()\n self.handle.connection.close()\n return Pdb.do_continue(self, arg)\n\n do_c = do_cont = do_continue\n\n def set_trace(self, frame=None):\n if frame is None:\n frame = sys._getframe().f_back\n try:\n Pdb.set_trace(self, frame)\n except IOError as exc:\n if exc.errno != errno.ECONNRESET:\n raise\n\n def post_mortem(self, traceback=None):\n # See https://github.com/python/cpython/blob/\n # 022bc7572f061e1d1132a4db9d085b29707701e7/Lib/pdb.py#L1617\n try:\n t = sys.exc_info()[2]\n self.reset()\n Pdb.interaction(self, None, t)\n except IOError as exc:\n if exc.errno != errno.ECONNRESET:\n raise\n\n def do_remote(self, arg):\n \"\"\"remote\n Skip into the next remote call.\n \"\"\"\n # Tell the next task to drop into the debugger.\n ray._private.worker.global_worker.debugger_breakpoint = self._breakpoint_uuid\n # Tell the debug loop to connect to the next task.\n data = json.dumps(\n {\n \"job_id\": ray.get_runtime_context().get_job_id(),\n }\n )\n _internal_kv_put(\n \"RAY_PDB_CONTINUE_{}\".format(self._breakpoint_uuid),\n data,\n namespace=ray_constants.KV_NAMESPACE_PDB,\n )\n self.__restore()\n self.handle.connection.close()\n return Pdb.do_continue(self, arg)\n\n def do_get(self, arg):\n \"\"\"get\n Skip to where the current task returns to.\n \"\"\"\n ray._private.worker.global_worker.debugger_get_breakpoint = (\n self._breakpoint_uuid\n )\n self.__restore()\n self.handle.connection.close()\n return Pdb.do_continue(self, arg)\n\n\ndef _connect_ray_pdb(\n host=None,\n port=None,\n patch_stdstreams=False,\n quiet=None,\n breakpoint_uuid=None,\n debugger_external=False,\n):\n \"\"\"\n Opens a remote PDB on first available port.\n \"\"\"\n if debugger_external:\n assert not host, \"Cannot specify both host and debugger_external\"\n host = \"0.0.0.0\"\n elif host is None:\n host = os.environ.get(\"REMOTE_PDB_HOST\", \"127.0.0.1\")\n if port is None:\n port = int(os.environ.get(\"REMOTE_PDB_PORT\", \"0\"))\n if quiet is None:\n quiet = bool(os.environ.get(\"REMOTE_PDB_QUIET\", \"\"))\n if not breakpoint_uuid:\n breakpoint_uuid = uuid.uuid4().hex\n if debugger_external:\n ip_address = ray._private.worker.global_worker.node_ip_address\n else:\n ip_address = \"localhost\"\n rdb = _RemotePdb(\n breakpoint_uuid=breakpoint_uuid,\n host=host,\n port=port,\n ip_address=ip_address,\n patch_stdstreams=patch_stdstreams,\n quiet=quiet,\n )\n sockname = rdb._listen_socket.getsockname()\n pdb_address = \"{}:{}\".format(ip_address, sockname[1])\n parentframeinfo = inspect.getouterframes(inspect.currentframe())[2]\n data = {\n \"proctitle\": setproctitle.getproctitle(),\n \"pdb_address\": pdb_address,\n \"filename\": parentframeinfo.filename,\n \"lineno\": parentframeinfo.lineno,\n \"traceback\": \"\\n\".join(traceback.format_exception(*sys.exc_info())),\n \"timestamp\": time.time(),\n \"job_id\": ray.get_runtime_context().get_job_id(),\n }\n _internal_kv_put(\n \"RAY_PDB_{}\".format(breakpoint_uuid),\n json.dumps(data),\n overwrite=True,\n namespace=ray_constants.KV_NAMESPACE_PDB,\n )\n rdb.listen()\n _internal_kv_del(\n \"RAY_PDB_{}\".format(breakpoint_uuid), namespace=ray_constants.KV_NAMESPACE_PDB\n )\n\n return rdb\n\n\n@DeveloperAPI\ndef set_trace(breakpoint_uuid=None):\n \"\"\"Interrupt the flow of the program and drop into the Ray debugger.\n\n Can be used within a Ray task or actor.\n \"\"\"\n # If there is an active debugger already, we do not want to\n # start another one, so \"set_trace\" is just a no-op in that case.\n if ray._private.worker.global_worker.debugger_breakpoint == b\"\":\n frame = sys._getframe().f_back\n rdb = _connect_ray_pdb(\n host=None,\n port=None,\n patch_stdstreams=False,\n quiet=None,\n breakpoint_uuid=breakpoint_uuid.decode() if breakpoint_uuid else None,\n debugger_external=ray._private.worker.global_worker.ray_debugger_external,\n )\n rdb.set_trace(frame=frame)\n\n\ndef _driver_set_trace():\n \"\"\"The breakpoint hook to use for the driver.\n\n This disables Ray driver logs temporarily so that the PDB console is not\n spammed: https://github.com/ray-project/ray/issues/18172\n \"\"\"\n print(\"*** Temporarily disabling Ray worker logs ***\")\n ray._private.worker._worker_logs_enabled = False\n\n def enable_logging():\n print(\"*** Re-enabling Ray worker logs ***\")\n ray._private.worker._worker_logs_enabled = True\n\n pdb = _PdbWrap(enable_logging)\n frame = sys._getframe().f_back\n pdb.set_trace(frame)\n\n\ndef _is_ray_debugger_enabled():\n return \"RAY_PDB\" in os.environ\n\n\ndef _post_mortem():\n rdb = _connect_ray_pdb(\n host=None,\n port=None,\n patch_stdstreams=False,\n quiet=None,\n debugger_external=ray._private.worker.global_worker.ray_debugger_external,\n )\n rdb.post_mortem()\n\n\ndef _connect_pdb_client(host, port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n\n while True:\n # Get the list of sockets which are readable.\n read_sockets, write_sockets, error_sockets = select.select(\n [sys.stdin, s], [], []\n )\n\n for sock in read_sockets:\n if sock == s:\n # Incoming message from remote debugger.\n data = sock.recv(4096)\n if not data:\n return\n else:\n sys.stdout.write(data.decode())\n sys.stdout.flush()\n else:\n # User entered a message.\n msg = sys.stdin.readline()\n s.send(msg.encode())\n","repo_name":"ray-project/ray","sub_path":"python/ray/util/rpdb.py","file_name":"rpdb.py","file_ext":"py","file_size_in_byte":10579,"program_lang":"python","lang":"en","doc_type":"code","stars":28715,"dataset":"github-code","pt":"86"} +{"seq_id":"73339480283","text":"import networkx as nx\nfrom networkx.readwrite import json_graph\nimport json\n\n\ndef taxoLevels():\n taxonomy=[\"Domain\",\"Phylum\",\"Class\",\"Order\",\"Family\",\"Genus\",\"Species\"]\n tindex={-1:\"Root\",0:\"Domain\",1:\"Phylum\",2:\"Class\",3:\"Order\",4:\"Family\",5:\"Genus\",6:\"Species\",7:\"Sub-species\"}\n return[taxonomy,tindex]\n\ndef load_abundance(fi):\n abundance=[]\n for i in open(fi):\n abundance.append(i.split('\\t'))\n return abundance\n\n#g=taxonomy_tree(load_abundance('alignment.BpErAFsqEm.matches.taxonomy.abundance.rpkm'),'/home/gustavo1/tmp/a','b','c','d')\n\ndef taxonomy_tree(abundance,filename,pipeline,analysis,dbname):\n G=nx.DiGraph()\n counts,tix=taxoLevels()\n for i in abundance:\n #i=i.split()\n taxo=i[-1].replace(\"\\n\",\"\").split(\";\") #last column is the taxonomy lineage\n taxo=['R']+taxo\n \n abundance=float(i[1])/2 #first column is the number of sites (unique sites)\n matches=float(i[2])/2 #Second column is the number of matches\n rpkm=float(i[3])/2\n \n for tx in range(len(taxo)-1):\n parent=taxo[tx].replace(\"unknown\",\"unknown.\"+taxo[tx-1]).replace(\"uncultured\",\"uncultured.\"+taxo[tx-1])\n child=taxo[tx+1].replace(\"unknown\",\"unknown.\"+taxo[tx]).replace(\"uncultured\",\"uncultured.\"+taxo[tx])\n #if \"unknown\" in parent or \"unknown\" in child: break\n if not (parent,child) in G.edges():\n if not child in G.nodes():\n G.add_node(child,matches=matches,sites=abundance,rpkm=rpkm,level=tix[tx])\n else:\n G.node[child]['sites']+=abundance;G.node[child]['rpkm']+=rpkm; G.node[child]['matches'] +=matches;\n if not parent in G.nodes():\n G.add_node(parent,matches=matches,sites=abundance,rpkm=rpkm,level=tix[tx-1])\n else:\n G.node[parent]['sites'] +=abundance; G.node[parent]['rpkm']+=rpkm; G.node[parent]['matches'] +=matches;\n if not G.predecessors(child):\n G.add_edge(parent,child)\n else:\n G.node[parent]['sites']+=abundance;G.node[parent]['rpkm']+=rpkm; G.node[parent]['matches'] +=matches;\n G.node[child]['sites'] +=abundance; G.node[child]['rpkm']+=rpkm; G.node[child]['matches'] +=matches;\n try:\n G.node['R']['rpkm']=2*G.node['R']['rpkm']\n except:\n G.add_node('R')\n G.node['R']['rpkm']=1\n G.node['R']['matches']=1\n G.node['R']['sites']=1\n G.node['R']['level']='Root'\n # now take the different levels\n #print G.node['R']['level']\n #print G.edges('Bacteria')\n if pipeline==\"matches\":\n nx.write_gpickle(G,filename+\".\"+analysis+\".abundance\"+'.pk')\n tree=json_graph.tree_data(G,root='R')\n with open(filename+\".\"+analysis+\".abundance\"+'.json', 'w') as outfile:\n json.dump(tree, outfile)\n else:\n nx.write_gpickle(G,filename+\".\"+analysis+\".abundance\"+'.pk')\n tree=json_graph.tree_data(G,root='R')\n with open(filename+\".\"+analysis+\".abundance\"+'.json', 'w') as outfile:\n json.dump(tree, outfile)\n return(G)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef metaphlan_taxonomy_tree(filename):\n with open(filename) as f:\n content = f.readlines()\n G=nx.DiGraph()\n counts,tix=taxoLevels()\n #add first node bacteria!\n for i in content[2:-1]:\n i=i.split()\n taxo=['R']+i[0].split(\"|\")\n #print taxo\n matches=int(i[4])\n abundance=float(i[1])\n G.add_node(taxo[-1], sites=abundance, matches=matches, rpkm=abundance, level=tix[len(taxo)-2])\n for tx in range(0,len(taxo)-1):\n parent=taxo[tx]\n child=taxo[tx+1]\n G.add_edge(parent,child)\n try:\n G.node['R']['rpkm']=G.node['k__Bacteria']['rpkm']\n G.node['R']['matches']=G.node['k__Bacteria']['matches']\n G.node['R']['sites']=G.node['k__Bacteria']['sites']\n G.node['R']['level']='Root'\n except:\n G.add_node('R')\n G.node['R']['rpkm']=1\n G.node['R']['matches']=1\n G.node['R']['sites']=1\n G.node['R']['level']='Root'\n \n tree=json_graph.tree_data(G,root='R')\n with open(filename+\".taxonomy.abundance.json\", 'w') as outfile:\n json.dump(tree, outfile)\n nx.write_gpickle(G,filename+'.taxonomy.abundance.pk')\n return G\n\n\n\n\n\ndef mytaxa_taxonomy_tree(data,filename):\n G=nx.DiGraph()\n counts,tix=taxoLevels()\n for lineage in data:\n taxo=['R']+lineage.split(\";\")\n for tx in range(len(taxo)-1):\n parent=taxo[tx]\n child=taxo[tx+1]\n matches=data[lineage]['genes']\n abundance=data[lineage]['scaffold']\n\n #if \"unknown\" in parent or \"unknown\" in child: break\n if not (parent,child) in G.edges():\n if not child in G.nodes():\n G.add_node(child,matches=matches,sites=abundance,rpkm=1*float(matches),level=tix[tx])\n else:\n G.node[child]['sites']+=abundance;G.node[child]['rpkm']+=1*float(matches); G.node[child]['matches'] +=matches;\n if not parent in G.nodes():\n G.add_node(parent,matches=matches,sites=abundance,rpkm=1*float(matches),level=tix[tx-1])\n else:\n G.node[parent]['sites'] +=abundance; G.node[parent]['rpkm']+=1*float(matches); G.node[parent]['matches'] +=matches;\n if not G.predecessors(child):\n G.add_edge(parent,child)\n else:\n G.node[parent]['sites']+=abundance;G.node[parent]['rpkm']+=1*float(matches); G.node[parent]['matches'] +=matches;\n G.node[child]['sites'] +=abundance; G.node[child]['rpkm']+=1*float(matches); G.node[child]['matches'] +=matches;\n G.node['R']['rpkm']=2*G.node['R']['rpkm']\n tree=json_graph.tree_data(G,root='R')\n with open(filename+\".json\", 'w') as outfile:\n json.dump(tree, outfile)\n nx.write_gpickle(G,filename+'.pk')\n return G\n\n\n\n\n\n\n\n\n\n\n\n\n #\n","repo_name":"gaarangoa/MetaStorm","sub_path":"cluster/app/lib/run/TaxonomyProcess.py","file_name":"TaxonomyProcess.py","file_ext":"py","file_size_in_byte":6054,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"8847686681","text":"# encoding: utf-8\nimport requests\nimport json\n\nurl = 'https://oapi.dingtalk.com/robot/send?access_token=c4544097c2614d779b38fbefc0714df58e8f48901928991f845dd4aafa18eedb'\n\n\ndef send_message_dingtalk(msg,url):\n data = {\n 'msgtype': 'text',\n 'text': {\n 'content': '{}'.format(msg)\n },\n 'at': {\n 'atMobiles': []\n }\n }\n headers = {\n 'Content-Type': 'application/json',\n 'Charset': 'utf-8'\n }\n requests.post(url, headers=headers, data=json.dumps(data))\n\n\nif __name__ == '__main__':\n msg = '测试信息'\n send_message_dingtalk(msg,url)","repo_name":"zhouqiw/prom","sub_path":"gateway/monitor_script/dingtalk.py","file_name":"dingtalk.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"17026077537","text":"import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gdk\n\nimport re\nimport signal\nfrom collections import deque\n\nimport prog\nfrom boekwidget import BoekWidget\nfrom kastcodewidget import KastcodeWidget\nfrom categoriewidget import CategorieWidget\nfrom persoonwidget import PersoonWidget\nfrom uitleenwidget import UitleenWidget\nfrom barcodeswidget import BarcodesWidget\n\nre_isbn_char = re.compile('^[0-9]$')\nre_isbn = re.compile('^[0-9]{13}$')\nbarcode_buf = deque((), 13)\n\ndef alarm_handler(signum, stackframe):\n barcode_buf.clear()\n\nclass About(Gtk.Label):\n def __init__(self):\n super().__init__()\n self.invalidated = False\n self.set_markup(\"\"\n \"{} versie {}\\n\"\n \"GNU Public License (GPL)\\n\"\n \"© 2019 Heiko Noordhof\\n\"\n \"\".format(prog.name.capitalize(), prog.version))\n\nclass Stub(Gtk.Label):\n def __init__(self):\n super().__init__()\n self.set_angle(20)\n self.set_markup(\"Nog niet gemaakt\")\n\nclass MainWindow(Gtk.ApplicationWindow):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.set_default_size(750, 550)\n\n self.notebook = Gtk.Notebook()\n self.notebook.set_tab_pos(Gtk.PositionType.BOTTOM)\n\n self.connect(\"key-press-event\", self.on_key_press)\n signal.signal(signal.SIGALRM, alarm_handler)\n\n def add_tab(tabclass, tabtitle):\n widget = tabclass(self, self.props.application.db)\n self.notebook.append_page(widget, Gtk.Label(tabtitle))\n\n for tab in self.props.application.tabs:\n if tab == \"boek\": add_tab(BoekWidget, \"Boeken\")\n elif tab == \"kastcode\": add_tab(KastcodeWidget, \"Kastcodes\")\n elif tab == \"categorie\": add_tab(CategorieWidget, \"Categoriën\")\n elif tab == \"lener\": add_tab(PersoonWidget, \"Leners\")\n elif tab == \"uitlenen\": add_tab(UitleenWidget, \"Uitlenen\")\n elif tab == \"barcodes\": add_tab(BarcodesWidget, \"Streepjescodes\")\n else:\n print(\"Error: onbekende tab-naam. Hoort niet te gebeuren\", file=sys.stderr)\n\n self.notebook.append_page(About(), Gtk.Image.new_from_icon_name(\"help-about\", Gtk.IconSize.MENU))\n self.notebook.connect(\"switch-page\", self.on_switch_page)\n self.add(self.notebook)\n\n # activeren\n #\n self.show_all()\n\n def invalidate(self):\n for tab in self.notebook:\n tab.invalidated = True\n\n def on_switch_page(self, notebook, page, page_num):\n if page.invalidated and hasattr(page, 'refresh') and callable(page.refresh):\n page.refresh()\n\n def on_key_press(self, win, event):\n if event.keyval == Gdk.KEY_Return:\n signal.alarm(0)\n isbn = \"\".join(barcode_buf)\n barcode_buf.clear()\n if re_isbn.match(isbn): # ziet er grofweg uit als een geldig isbn?\n self.on_isbn_scan(isbn)\n return True\n else:\n try:\n key = chr(event.keyval)\n except ValueError: # een of andere rare toests combi (geluid harder, o.i.d) negeren.\n signal.alarm(0)\n barcode_buf.clear()\n return True\n if re_isbn_char.match(key):\n if len(barcode_buf) == 0:\n signal.alarm(1)\n barcode_buf.append(key)\n else:\n signal.alarm(0)\n barcode_buf.clear()\n\n def on_isbn_scan(self, isbn):\n current_page = self.notebook.get_nth_page(self.notebook.get_current_page())\n if hasattr(current_page, 'on_isbn_scan') and callable(current_page.on_isbn_scan):\n current_page.on_isbn_scan(isbn)\n\n","repo_name":"hkoof/boekleen","sub_path":"mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"3963663444","text":"import requests\n\n\nclass DataManager:\n # This class is responsible for talking to the Google Sheet.\n\n def __init__(self):\n self.GOOGLE_SHEET_KEY = \"\"\n self.GOOGLE_PRICE_SHEET_API_ENDPOINT = \"https://api.sheety.co/9f9b62b3fef9c5050eef57ceb4e58bda/copyOfFlightDeals/prices\"\n self.GOOGLE_USER_SHEET_API_ENDPOINT = \"https://api.sheety.co/9f9b62b3fef9c5050eef57ceb4e58bda/copyOfFlightDeals/users\"\n self.google_sheet_header = {\n \"Authorization\": f\"Bearer {self.GOOGLE_SHEET_KEY}\"\n }\n self.body = {}\n\n def update_excel_flight_data(self, data):\n self.body[\"price\"] = data\n\n response = requests.put(\n url=f\"{self.GOOGLE_PRICE_SHEET_API_ENDPOINT}/{data['id']}\",\n headers=self.google_sheet_header,\n json=self.body\n )\n response.raise_for_status()\n\n def add_excel_user_data(self, first_name, last_name, email):\n\n # response = requests.get(\n # url=self.GOOGLE_USER_SHEET_API_ENDPOINT,\n # headers=self.google_sheet_header\n # )\n\n body = {}\n body[\"user\"] = {}\n print(body)\n\n body[\"user\"][\"firstName\"] = first_name\n body[\"user\"][\"lastName\"] = last_name\n body[\"user\"][\"email\"] = email\n\n response = requests.post(\n url=self.GOOGLE_USER_SHEET_API_ENDPOINT,\n headers=self.google_sheet_header,\n json=body\n )\n response.raise_for_status()\n print(response.json())\n print(\"You're in the club!\")\n\n def get_excel_user_data(self):\n\n response = requests.get(\n url=self.GOOGLE_USER_SHEET_API_ENDPOINT,\n headers=self.google_sheet_header\n )\n response.raise_for_status()\n\n return response.json()[\"users\"]\n\n","repo_name":"Yoodahun/100-Days-of-code","sub_path":"Day 39/Personal Flight Club/data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"11498560684","text":"import streamlit as st\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\n#DATA\npath = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"df_NAN.csv\")\ndf=pd.read_csv(path)\npath2 = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"df_avg.csv\")\ndf_avg=pd.read_csv(path2)\npath3 = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"df_avg_cpc_cpm.csv\")\ndf_avg_cpc_cpm=pd.read_csv(path3)\nchannels_spend = [\"tt_costs\", \"fb_costs\", \"google_costs\"]\ndf[\"Total_Spend\"] = df[channels_spend].sum(axis=1)\ndf.drop(columns=['Unnamed: 0'], inplace=True, axis=1)\ndf['ROI']= df['total_sales']/df[\"Total_Spend\"]\ndf['fb_cpc']=df['fb_costs']/df['fb_clicks']\ndf['tt_cpc']=df['tt_costs']/df['tt_clicks']\ndf['google_cpc']=df['google_costs']/df['google_clicks']\ndf['fb_cpm']=(df['fb_costs']/df['fb_impressions'])*1000\ndf['tt_cpm']=(df['tt_costs']/df['tt_impressions'])*1000\ndf['google_cpm']=(df['google_costs']/df['google_impressions'])*1000\nchannels_spend_CPC = [\"tt_cpc\", \"fb_cpc\", \"google_cpc\"]\ndf[\"Total_Spend_CPC\"] = df[channels_spend_CPC].sum(axis=1)\nchannels_spend_CPM = [\"tt_cpm\", \"fb_cpm\", \"google_cpm\"]\ndf[\"Total_Spend_CPM\"] = df[channels_spend_CPM].sum(axis=1)\n# Convert 'Day' column to datetime type\ndf['Day'] = pd.to_datetime(df['Day'])\n# Set 'Day' column as index\ndf.set_index('Day', inplace=True)\n# Check if the index is a DatetimeIndex\nprint(df.index)\n# Resample data by month and aggregate using sum (you can use other aggregation functions)\ndf_monthly = df.resample('M').sum()\n\n\nst.header('General Overview', divider='green')\ntab1, tab2, tab3, tab4, tab5, tab6 = st.tabs([\"Monthly Sales/Spend/ROI\",\"Total Sales vs Impressions/Clicks\",\"Average ROI vs Weekday\",\"Media Buying\",\"Media Impressions/Clicks/Costs Corr To Sale\",\"Comparing Platforms\"])\n\n#FIRST PLOT\nfig1_1 = make_subplots()\nfig1_1.add_trace(go.Scatter(x=df.index,y=df['Total_Spend'],name='Total Spend',marker={'color':'#FF7F50'}))\nfig1_1.add_trace(go.Scatter(x=df.index,y=df['total_sales'], name='Total Sales',marker={'color':'#008000'}))\n# Set x-axis title\nfig1_1.update_xaxes(title_text=\"Date\")\n# Set y-axes titles\nfig1_1.update_yaxes(title_text=\"Amount ( € )\")\n\n\nfig1_2 = make_subplots()\nfig1_2.add_trace(go.Scatter(x=df.index,y=df['ROI'], name='ROI',marker={'color':'#008000'}))\n# Set x-axis title\nfig1_2.update_xaxes(title_text=\"Date\")\n# Set y-axes titles\nfig1_2.update_yaxes(title_text=\"ROI\")\nfig1_2.update_layout(xaxis_range=['2022-02-01','2023-08-24'])\nfig1_2.update_layout(yaxis_range=[0,15])\n\nwith tab1:\n st.subheader(\"Monthly Sales/Spend\")\n st.plotly_chart(fig1_1)\n st.subheader(\"Return On Investment\")\n st.plotly_chart(fig1_2)\n st.markdown(\"\"\"\n
    \n ✅ July (summer) seasonality is clearly visible.\n\n ✅ We improved ROI while increasing marketing spend which is a positive business result.\n
    \"\"\",unsafe_allow_html=True)\n\n#THIRD PLOT\nfig3 = make_subplots(rows=1,cols=2)\nfig3.add_trace(go.Scatter(x=df['tt_impressions'],y=df['total_sales'],name='TikTok',mode='markers',marker={'color':'#FF7F50'}),row=1,col=1)\nfig3.add_trace(go.Scatter(x=df['fb_impressions'],y=df['total_sales'], name='Facebook',mode='markers',marker={'color':'#069AF3'}),row=1,col=1)\nfig3.add_trace(go.Scatter(x=df['google_impressions'],y=df['total_sales'],name='Google',mode='markers',marker={'color':'#008000'}),row=1,col=1)\n# Set x-axis title\nfig3.update_xaxes(title_text=\"Impressions\",row=1,col=1)\n# Set y-axes titles\nfig3.update_yaxes(title_text=\"Total Sales\",row=1,col=1)\nfig3.add_trace(go.Scatter(x=df['tt_clicks'],y=df['total_sales'],name='TikTok',mode='markers',marker={'color':'#FF7F50'},showlegend=False),row=1,col=2)\nfig3.add_trace(go.Scatter(x=df['fb_clicks'],y=df['total_sales'], name='Facebook',mode='markers',marker={'color':'#069AF3'},showlegend=False),row=1,col=2)\nfig3.add_trace(go.Scatter(x=df['google_clicks'],y=df['total_sales'],name='Google',mode='markers',marker={'color':'#008000'},showlegend=False),row=1,col=2)\n#fig2.update_layout(showlegend=False,row=1,col=2)\n# Set x-axis title\nfig3.update_xaxes(title_text=\"Clicks\",row=1,col=2)\n#FOR HAVING FRAME(in update_xaxes):mirror=True,showline=True, linecolor = 'lightgray'\n# Set y-axes titles\nfig3.update_yaxes(title_text=\"Total Sales\",row=1,col=2)\n\nfig3.update_traces(marker_size=6,marker_line=dict(width=1, color='black'))\nwith tab2:\n st.subheader(\"Total Sales vs Impressions/Clicks\")\n st.plotly_chart(fig3)\n st.markdown(\"\"\"\n
    \n ✅ We can see there is a positive correlation to the overall marketing spend and sales.\n\n ✅ Google is showing the strongest sales based on few impressions which makes sense as it is usually an important pull channel.\n\n ✅ When looking at the “Impressions to total sales” scatter plot, TikTok seems to show better sales results based on fewer impressions than Facebook → we know it is more a view platform or perhaps we had view objectives for the campaigns active compared to Facebook campaigns.\n\n ✅ Facebook needs higher amount of impressions (or has higher frequency) in order to get higher sales.\n\n ✅ For clicks TikTok is showing a difference when comparing Impressions with Clicks to sales. The clicks generated do not seem to result in direct sales.\n
    \"\"\",unsafe_allow_html=True)\n\n#SIXTH PLOT\nx=df.groupby(df.index.dayofweek)[\"ROI\"].mean()\nfig6 = make_subplots()\nfig6.update_layout(showlegend=False)\nfig6.add_trace(go.Bar(x=x.index.map({0:'Monday',1:'Tuesday',2:'Wednesday',3:'Thursday',4:'Friday',5:'Saturday',6:'Sunday'}),y=x,marker={'color':'#008000'}))\nfig6.update_xaxes(title_text=\"Weekday\")\nfig6.update_yaxes(title_text=\"Average ROI\")\nwith tab3:\n st.subheader(\"Average ROI vs Weekday\")\n st.plotly_chart(fig6)\n st.markdown(\"\"\"\n
    \n ✅ The graphs clearly reveal that our average return on investment (ROI) is at its lowest on Fridays and Saturdays, suggesting these days witness the weakest sales performance throughout the week.\n\n ✅ To address this, we should delve into the underlying reasons behind this trend and consider tailored marketing strategies to potentially boost sales during these weekend days.\n
    \"\"\",unsafe_allow_html=True)\n\n\n#SECOND PLOT\nfig2 = make_subplots(rows=1,cols=2)\nfig2.add_trace(go.Scatter(x=df.index,y=df['Total_Spend_CPM'],marker={'color':'#008000'},showlegend=False),row=1,col=1)\nfig2.update_xaxes(title_text=\"Date\",row=1,col=1)\nfig2.update_yaxes(title_text=\"Cost Impressions\",row=1,col=1)\nfig2.add_trace(go.Scatter(x=df.index,y=df['Total_Spend_CPC'],marker={'color':'#008000'},showlegend=False),row=1,col=2)\nfig2.update_xaxes(title_text=\"Date\",row=1,col=2)\nfig2.update_yaxes(title_text=\"Cost Per Click\",row=1,col=2)\nwith tab4:\n st.subheader(\"Media Buying\")\n st.plotly_chart(fig2)\n st.markdown(\"\"\"\n
    \n ✅ We can see that during season Click Per Impressions’ are increasing → competition is rising\n\n ✅ BUT! we can see that Cost Per Clicks in 2023 dropped during high season meaning we did a good job in terms of marketing strategy/activity.\n
    \"\"\",unsafe_allow_html=True)\n\n\n\n#FORTH PLOT\ndf_corr1=df[['orders', 'total_sales',\n 'fb_impressions', 'google_impressions', 'tt_impressions', 'Total_Spend', 'ROI']].corr()\nlabels1=['Orders', 'Total_sales','Facebook_impressions', 'Google_impressions', 'Tiktok_impressions', 'Total_Spend', 'ROI']\nfig4_1=go.Figure(data=go.Heatmap(z=df_corr1, colorscale='greens',x=labels1,y=labels1))\nfig4_1.layout.height = 600\nfig4_1.layout.width = 600\n\ndf_corr2=df[['orders', 'total_sales',\n 'fb_clicks', 'google_clicks', 'tt_clicks', 'Total_Spend', 'ROI']].corr()\nlabels2=['Orders', 'Total_sales',\n 'Facebook_clicks', 'Google_clicks', 'Tiktok_clicks', 'Total_Spend', 'ROI']\nfig4_2=go.Figure(data=go.Heatmap(z=df_corr2, colorscale='oranges',x=labels2,y=labels2))\nfig4_2.layout.height = 600\nfig4_2.layout.width = 600\n\ndf_corr3=df[['orders', 'total_sales',\n 'fb_costs', 'google_costs', 'tt_costs', 'Total_Spend', 'ROI']].corr()\nlabels3=['Orders', 'Total_sales',\n 'Facebook_costs', 'Google_costs', 'Tiktok_costs', 'Total_Spend', 'ROI']\nfig4_3=go.Figure(data=go.Heatmap(z=df_corr3, colorscale='blues',x=labels3,y=labels3))\n\nfig4_3.layout.height = 600\nfig4_3.layout.width = 600\nwith tab5:\n st.subheader('Social Media Impressions Corr To Sale')\n st.plotly_chart(fig4_1)\n st.subheader(\"Social Media Clicks Corr To Sale\")\n st.plotly_chart(fig4_2)\n st.subheader(\"Social Media Costs Corr To Sale\")\n st.plotly_chart(fig4_3)\n st.markdown(\"\"\"\n
    \n ✅ We can see that there is indeed a correlation between facebook impressions with orders and total sales (revenue).\n\n ✅ When looking at the clicks correlation data, Google clicks correlate with orders and total sales which we could already see in the scatter plots.\n\n ✅ But also the total (marketing) spend is correlated to orders and total sales.\n\n ✅ TikTok clicks is showing the least correlation to orders and sales which we also already noticed in the scatter plots.\n\n ✅ Lastly we see that Facebook costs in particular is effecting the ROI which may tells us we should watch out with spending too much on Facebook. Trying to find a better sweet spot. Of course this makes sense as it is the highest spending channel.\n
    \"\"\",unsafe_allow_html=True)\n\n#FIFTH PLOT\nfig5 = make_subplots(rows=1,cols=2)\nfig5.add_trace(go.Bar(x=df_avg['Company'],y=df_avg['% Clicks'], name='% Clicks', marker={'color':'#008000'},orientation='v'),row=1,col=1)\nfig5.add_trace(go.Bar(x=df_avg['Company'], y=df_avg['% Spend'], name='% Spends', marker={'color':'#FF7F50'},orientation='v'),row=1,col=1)\nfig5.add_trace(go.Bar(x=df_avg['Company'], y=df_avg['% Impressions'], name='% Impressions', marker={'color':'#069AF3'},orientation='v'),row=1,col=1)\n#fig5.update_layout(legend=dict(x=0.5))\nfig5.update_xaxes(title_text=\"Company\",row=1,col=1)\nfig5.update_yaxes(title_text=\"% Value\",row=1,col=1)\nfig5.add_trace(go.Bar(x=df_avg_cpc_cpm['Company'],y=df_avg_cpc_cpm['Avg CPC'], name='Avg CPC', marker={'color':'#FF7F50'},orientation='v'),row=1,col=2)\nfig5.add_trace(go.Bar(x=df_avg_cpc_cpm['Company'], y=df_avg_cpc_cpm['Avg CPM'], name='Avg CPM', marker={'color':'#008000'},orientation='v'),row=1,col=2)\nfig5.update_xaxes(title_text=\"Company\",row=1,col=2)\nfig5.update_yaxes(title_text=\"Average Value\",row=1,col=2)\nwith tab6:\n st.subheader(\"Comparing Platforms\")\n st.plotly_chart(fig5)\n","repo_name":"data-Zee/MMM-project-lewagon","sub_path":"mmmproject/app/pages/2📕_General_Overview.py","file_name":"2📕_General_Overview.py","file_ext":"py","file_size_in_byte":10646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"4113047326","text":"#Detect a loop in a linkedList\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.next = None\n\ndef detectLoop(a):\n nodeMap = {}\n curr = a\n while curr != None:\n if curr not in nodeMap:\n nodeMap[curr] = 0\n else:\n return True\n nodeMap[curr] += 1\n curr = curr.next\n\n return False\n","repo_name":"sophiajwchoi/daily-coding-challenges","sub_path":"Aug 2018/Aug 10 - 12/detectLoopInLinkedList.py","file_name":"detectLoopInLinkedList.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"17282595885","text":"class IntList(list):\n def __init__(self, size=0):\n super().__init__()\n self.size = size\n\n def add(self, num):\n if self.full():\n self.append(num)\n\n def full(self):\n if len(self) < self.size:\n return True\n\n def __add__(self, other):\n if self.size > other.size:\n new_list = IntList(other.size)\n if self.size < other.size:\n new_list = IntList(self.size)\n new_list = [x + y for x, y in zip(self, other)]\n return new_list\n\n\n# Main program starts here\nlist1 = IntList(5) \t# Constructs an IntList that can hold 5 integers\nlist2 = IntList(12) \t# Constructs an IntList that can hold 12 integers\n\nfor i in range(10):\n list1.add(i)\n list2.add(i)\n\nprint(list1)\nprint(list2)\n\nprint(\"Length of list1 is: {}\".format(len(list1)))\nprint(\"Length of list2 is: {}\".format(len(list2)))\n\nif list1.full():\n print(\"list1 is full\")\nif list2.full():\n print(\"list2 is full\")\n\nlist3 = list1 + list2\nprint(list3)\n\nlist4 = list2 + list1\nprint(list4)","repo_name":"joningi98/SC-T-111-PROG","sub_path":"verkefni_fyrir_prof/progs/verkefni/lol_intlist.py","file_name":"lol_intlist.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"43350533146","text":"import os\nimport logging\n\nfrom telegram.ext.dispatcher import run_async\n\nfrom log import LOG_PATH\n\nlogger = logging.getLogger(__name__)\n\n\n@run_async\ndef log_handler(update, context):\n \"\"\"Send log\n\n Args:\n update (telegram.Update): Object represents an incoming update.\n context (telegram.ext.CallbackContext): Telegram bot context\n \"\"\"\n if os.path.isfile(LOG_PATH):\n update.effective_message.reply_document(open(LOG_PATH, \"rb\"))\n else:\n update.effective_message.reply_html(f'Log file not found\\n{LOG_PATH}')\n\n\n@run_async\ndef dump_handler(update, context):\n \"\"\"Send database dump\n\n Args:\n update (telegram.Update): Object represents an incoming update.\n context (telegram.ext.CallbackContext): Telegram bot context\n \"\"\"\n with context.bot.db.create_dump() as dump_file:\n caption = \"\\n\".join(\n f\"{tablename}: {len(data)} rows\"\n for tablename, data in dump_file.data.items()\n )\n update.effective_message.reply_document(dump_file, caption=caption, parse_mode='HTML')\n\n\n@run_async\ndef restore_handler(update, context):\n \"\"\"Restore database\n\n Args:\n update (telegram.Update): Object represents an incoming update.\n context (telegram.ext.CallbackContext): Telegram bot context\n \"\"\"\n message = update.effective_message\n if message.document.file_name == context.bot.db.DUMP_FILE_NAME:\n dump_file = message.document.get_file()\n restore_result = context.bot.db.restore_dump(dump_file.download_as_bytearray())\n\n restore_result_str = \"\\n\".join(\n f\"{tablename}: {len(data)} rows\"\n for tablename, data in restore_result.items()\n )\n message.reply_html(f'Successfully restored:\\n{restore_result_str}')\n else:\n message.reply_html(f'Bad file name: {message.document.file_name}')\n\n\n@run_async\ndef export_handler(update, context):\n \"\"\"Send exported database in csv format\n\n Args:\n update (telegram.Update): Object represents an incoming update.\n context (telegram.ext.CallbackContext): Telegram bot context\n \"\"\"\n for csv_file in context.bot.db.export_csv():\n update.effective_message.reply_document(csv_file)\n","repo_name":"aatrubilin/subredditbot","sub_path":"redditbot/bot/handlers/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"38243767210","text":"import datetime\nimport glob\nimport os\nimport os.path as osp\nimport shutil\nfrom concurrent.futures import ProcessPoolExecutor\nfrom typing import Any\n\nimport cv2\nimport numpy as np\nimport tqdm\nfrom realsense_recorder.io import get_directory_reader\nfrom rich.console import Console\n\nconsole = None\n\n\ndef _read_color(path: str) -> Any:\n \"\"\"\n Read anything, from a path\n :param path:\n :return: numpy.ndarray, RGB frame\n \"\"\"\n return cv2.imread(path, cv2.IMREAD_COLOR)\n\n\ndef _compress_color_folder(input_folder, output_folder, n_prefetch):\n reader = get_directory_reader(input_folder, 'color_bmp', num_preload=n_prefetch, read_function=_read_color)\n with tqdm.tqdm(range(len(reader))) as pbar:\n while not reader.eof:\n frame, meta, _ = reader.next()\n frame_basename_without_ext = osp.splitext(osp.basename(meta['basename']))[0]\n cv2.imwrite(osp.join(output_folder, frame_basename_without_ext + \".jpeg\"), frame)\n pbar.update()\n\n\ndef _compress_depth_folder(input_folder, output_folder, n_prefetch):\n reader = get_directory_reader(input_folder, 'depth_npy', num_preload=n_prefetch)\n with tqdm.tqdm(range(len(reader))) as pbar:\n while not reader.eof:\n frame, meta, _ = reader.next()\n frame_basename_without_ext = osp.splitext(osp.basename(meta['basename']))[0]\n np.savez_compressed(osp.join(output_folder, frame_basename_without_ext + \".npz\"), frame)\n pbar.update()\n\n\ndef compress_record(input_recording: str, output_base_dir: str, n_prefetch=16):\n input_recording_name = osp.basename(input_recording)\n\n console.log(f\"Input recording: {input_recording}\")\n\n # Prepare output directory\n OUTPUT_RECORDING = osp.join(output_base_dir, input_recording_name)\n if osp.exists(OUTPUT_RECORDING):\n console.log(f\"Output recording: {OUTPUT_RECORDING} already exists. Delete it first.\")\n\n os.mkdir(OUTPUT_RECORDING) if not osp.exists(OUTPUT_RECORDING) else None\n os.mkdir(osp.join(OUTPUT_RECORDING, \"realsense\")) if not osp.exists(osp.join(OUTPUT_RECORDING, \"realsense\")) else None\n camera_folders = list(\n map(lambda x: os.path.basename(x),\n list(filter(lambda x: os.path.isdir(x),\n glob.glob(osp.join(input_recording, \"realsense\", \"*\"))\n )\n )\n )\n )\n # console.log(f\"Camera folders: {camera_folders}\")\n [os.mkdir(osp.join(OUTPUT_RECORDING, \"realsense\", camera_folder)) for camera_folder in camera_folders if not osp.exists(osp.join(OUTPUT_RECORDING, \"realsense\", camera_folder))]\n [os.mkdir(osp.join(OUTPUT_RECORDING, \"realsense\", camera_folder, 'color')) for camera_folder in camera_folders if not osp.exists(osp.join(OUTPUT_RECORDING, \"realsense\", camera_folder, 'color'))]\n [os.mkdir(osp.join(OUTPUT_RECORDING, \"realsense\", camera_folder, 'depth')) for camera_folder in camera_folders if not osp.exists(osp.join(OUTPUT_RECORDING, \"realsense\", camera_folder, 'depth'))]\n\n system_meta_data_files = list(filter(lambda x: os.path.isfile(x), glob.glob(osp.join(input_recording, \"realsense\", \"*\"))))\n # console.log(f\"Meta data: {system_meta_data_files}\")\n\n try:\n shutil.rmtree(osp.join(OUTPUT_RECORDING, \"imu\"))\n except:\n pass\n shutil.copytree(osp.join(input_recording, \"imu\"), osp.join(OUTPUT_RECORDING, \"imu\"))\n [shutil.copyfile(meta_data_file, osp.join(OUTPUT_RECORDING, \"realsense\", osp.basename(meta_data_file))) for meta_data_file in system_meta_data_files]\n [shutil.copyfile(osp.join(input_recording, \"realsense\", camera_folder, \"realsense_intrinsic.json\"), osp.join(OUTPUT_RECORDING, \"realsense\", camera_folder, \"realsense_intrinsic.json\")) for\n camera_folder in camera_folders]\n\n folder_compression_mapping_color = {\n osp.join(input_recording, \"realsense\", camera_folder, 'color'): osp.join(OUTPUT_RECORDING, \"realsense\", camera_folder, 'color') for camera_folder in camera_folders\n }\n folder_compression_mapping_depth = {\n osp.join(input_recording, \"realsense\", camera_folder, 'depth'): osp.join(OUTPUT_RECORDING, \"realsense\", camera_folder, 'depth') for camera_folder in camera_folders\n }\n\n console.log(\"Folder compression mapping:\")\n console.log(folder_compression_mapping_color)\n console.log(folder_compression_mapping_depth)\n\n with ProcessPoolExecutor(max_workers=4) as pool:\n ctx = []\n for _input_folder, _output_folder in folder_compression_mapping_color.items():\n console.log(f\"Compressing color {_input_folder} to {_output_folder}\")\n ctx.append(pool.submit(_compress_color_folder, _input_folder, _output_folder, n_prefetch))\n [_.result() for _ in ctx]\n\n # for _input_folder, _output_folder in folder_compression_mapping_color.items():\n # console.log(f\"Compressing color {_input_folder} to {_output_folder}\")\n # _compress_color_folder(_input_folder, _output_folder, n_prefetch)\n\n with ProcessPoolExecutor(max_workers=4) as pool:\n ctx = []\n for _input_folder, _output_folder in folder_compression_mapping_depth.items():\n console.log(f\"Compressing depth {_input_folder} to {_output_folder}\")\n ctx.append(pool.submit(_compress_depth_folder, _input_folder, _output_folder, n_prefetch))\n [_.result() for _ in ctx]\n\n #\n # for _input_folder, _output_folder in folder_compression_mapping_depth.items():\n # console.log(f\"Compressing depth {_input_folder} to {_output_folder}\")\n # _compress_depth_folder(_input_folder, _output_folder, n_prefetch)\n #\n # for _input_folder, _output_folder in folder_compression_mapping_color.items():\n # console.log(f\"Compressing color {_input_folder} to {_output_folder}\")\n # _compress_color_folder(_input_folder, _output_folder, n_prefetch)\n\n\ndef main():\n global console\n with open('./' + \"run_compression_\" + datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\") + \".log\", 'w') as f:\n console = Console(file=f)\n\n # FIXME: REPLACE WITH YOUR OWN PATHS\n N_PREFETCH = 8\n IMMOBILE_INPUT_RECORDING_PATTERN = r\"\\\\100.99.96.101\\articulated_recording\\pre-release\\data\\immobile\\*\"\n IMMOBILE_INPUT_RECORDING_LIST = list(filter(lambda x: osp.isdir(x), glob.glob(IMMOBILE_INPUT_RECORDING_PATTERN)))\n IMMOBILE_OUTPUT_DIR = r\"D:\\pre-release\\data\\immobile\"\n\n PORTABLE_INPUT_RECORDING_PATTERN = r\"\\\\100.99.96.101\\articulated_recording\\pre-release\\data\\portable\\*\"\n PORTABLE_INPUT_RECORDING_LIST = list(filter(lambda x: osp.isdir(x), glob.glob(PORTABLE_INPUT_RECORDING_PATTERN)))\n PORTABLE_OUTPUT_DIR = r\"D:\\pre-release\\data\\portable\"\n\n ARGUMENT_LIST = [(x, IMMOBILE_OUTPUT_DIR, N_PREFETCH) for x in IMMOBILE_INPUT_RECORDING_LIST] + [(x, PORTABLE_OUTPUT_DIR, N_PREFETCH) for x in PORTABLE_INPUT_RECORDING_LIST]\n\n console.print(ARGUMENT_LIST)\n\n # compress_record(*ARGUMENT_LIST[0])\n\n for arg in ARGUMENT_LIST:\n try:\n compress_record(*arg)\n except Exception as e:\n console.log(e)\n\n\nif __name__ == '__main__':\n # debug\n pass\n # main()\n console = Console()\n compress_record(r\"C:\\Users\\liyutong\\Downloads\\eyeglasses-049-1\", r\"D:\\pre-release\\data\\immobile\")\n","repo_name":"mvig-robotflow/rfmarkit-processing","sub_path":"deprecated/tasks/run_compression.py","file_name":"run_compression.py","file_ext":"py","file_size_in_byte":7303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"45567286947","text":"import pandas as pd\r\n\r\nimport numpy as np\r\n\r\ntrain = pd.read_csv('H:\\\\pythonchengx_u\\\\Tianchiantai\\\\dianshangtuijian\\\\Antai_AE_round1_train_20190626.csv')\r\ntest = pd.read_csv('H:\\\\pythonchengx_u\\\\Tianchiantai\\\\dianshangtuijian\\\\Antai_AE_round1_test_20190626.csv')\r\n\r\nall_data = pd.concat([train,test])\r\nall_data = all_data.sort_values(by=['buyer_admin_id', 'irank'], ascending=[True, True])\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndf_ratings_train, df_ratings_test= train_test_split(all_data,\r\n stratify=all_data['buyer_admin_id'],\r\n random_state = 15688,\r\n test_size=0.30)\r\n\r\nprint(\"Number of training data: \"+str(len(df_ratings_train)))\r\nprint(\"Number of test data: \"+str(len(df_ratings_test)))\r\n\r\n\r\n# 为每个用户生成列表 用户的每一个item看做一个词,用户所有的item 在一个句子里\r\ndef get_preprocessing(df_):\r\n df = df_.copy() \r\n df['hour'] = df['create_order_time'].apply(lambda x:int(x[11:13]))\r\n df['day'] = df['create_order_time'].apply(lambda x:int(x[8:10]))\r\n df['month'] = df['create_order_time'].apply(lambda x:int(x[5:7]))\r\n df['year'] = df['create_order_time'].apply(lambda x:int(x[0:4]))\r\n df['date'] = (df['month'].values - 7) * 31 + df['day'] \r\n del df['create_order_time'] \r\n return df\r\n\r\ndef splitter(df):\r\n\r\n df['item_id'] = df['item_id'].astype('str')\r\n gp_user_like = df.groupby(['buyer_admin_id','date'])\r\n return ([gp_user_like.get_group(gp)['item_id'].tolist() for gp in gp_user_like.groups])\r\n\r\nall_data = get_preprocessing(all_data)\r\nsplitted_items = splitter(all_data)\r\n\r\nimport warnings\r\nwarnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')\r\n\r\nimport gensim\r\nassert gensim.models.word2vec.FAST_VERSION > -1\r\n\r\nimport random\r\n\r\n# for item_list in splitted_items:\r\n\r\n# random.shuffle(item_list)\r\n\r\nfrom gensim.models import Word2Vec\r\nimport datetime\r\nstart = datetime.datetime.now()\r\n\r\nmodel = Word2Vec(sentences = splitted_items, # We will supply the pre-processed list of moive lists to this parameter\r\n iter = 10, # epoch\r\n min_count = 1, # a movie has to appear more than 10 times to be keeped\r\n size = 200, # size of the hidden layer\r\n workers = 4, # specify the number of threads to be used for training\r\n sg = 1, # Defines the training algorithm. We will use skip-gram so 1 is chosen.\r\n hs = 0, # Set to 0, as we are applying negative sampling.\r\n negative = 5, # If > 0, negative sampling will be used. We will use a value of 5.\r\n window = 10)\r\n\r\nprint(\"Time passed: \" + str(datetime.datetime.now()-start))\r\n#Word2Vec.save('item2vec_20180327')\r\n\r\nmodel.save('H:\\\\pythonchengx_u\\\\Tianchiantai\\\\dianshangtuijian\\\\item2vec_8_9_5')","repo_name":"928689035wings/Tianchi_Recommendation-algorithm","sub_path":"item2vec8_9_5_3.py","file_name":"item2vec8_9_5_3.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"13954901787","text":"import json\r\nimport numpy as np\r\nfrom itertools import count\r\n\r\n\r\nclass Individual:\r\n \"\"\"Implementação do tabuleiro NxN.\"\"\"\r\n\r\n def __init__(self, n=8, queens=[], cross=\"two_point\"):\r\n self.n = n\r\n self.queens = np.random.randint(0, self.n, size=(\r\n self.n,)) if len(queens) == 0 else queens\r\n self.score = self.__score()\r\n self._cross_type = cross\r\n\r\n @property\r\n def n(self):\r\n return self._n\r\n\r\n @n.setter\r\n def n(self, n):\r\n if n > 0:\r\n self._n = n\r\n else:\r\n raise Exception(\"Erro o N deve ser maior que 0.\")\r\n\r\n @property\r\n def score(self):\r\n return self._score\r\n\r\n @score.setter\r\n def score(self, val):\r\n if val > 0:\r\n self._score = val\r\n\r\n def conflict(self, i):\r\n error = False\r\n for k, queen in enumerate(self.queens[:i]):\r\n if any([queen == self.queens[i],\r\n self.queens[k] == (self.queens[i]-(i-k)), self.queens[k] == (self.queens[i]+(i-k))]):\r\n error = True\r\n break\r\n return error\r\n\r\n def __score(self):\r\n # return 11\r\n score = 0\r\n for j in range(self.n):\r\n score += 1 if not self.conflict(j) else 0\r\n return score\r\n\r\n def cross_point(self, parent):\r\n point = np.random.randint(1, self.n-1)\r\n offspring = np.concatenate(\r\n (self.queens[:point], parent.queens[point:])).copy()\r\n return Individual(self.n, offspring)\r\n\r\n def cross_two_point(self, parent):\r\n fp = np.random.randint(1, self.n-2)\r\n sp = np.random.randint(fp+1, self.n)\r\n offspring = np.concatenate(\r\n (self.queens[:fp], parent.queens[fp:sp], self.queens[sp:])).copy()\r\n return Individual(self.n, offspring)\r\n\r\n def cross_uniform(self, parent):\r\n choices = np.random.randint(2, size=self.n)\r\n offspring = np.empty((0,)).astype('uint32')\r\n for (index, ), choice in np.ndenumerate(choices):\r\n if not choice:\r\n offspring = np.append(\r\n offspring, self.queens[index])\r\n else:\r\n offspring = np.append(\r\n offspring, parent.queens[index])\r\n return Individual(self.n, offspring)\r\n\r\n @property\r\n def cross(self):\r\n \"\"\"\r\n :rtype: function\r\n \"\"\"\r\n return self.__getattribute__(\"cross_{}\".format(self._cross_type))\r\n\r\n def mutate(self):\r\n to_mutate_size = np.random.randint(1, self.n+1)\r\n index = np.random.choice(self.n, to_mutate_size, False)\r\n\r\n def permut_gen(queen):\r\n moves = np.random.randint(self.n-1)\r\n if np.random.randint(2):\r\n queen = queen + \\\r\n moves if (queen+moves) <= (self.n -\r\n 1) else ((queen+moves) - self.n)\r\n else:\r\n queen = (queen-moves) if (queen -\r\n moves) >= 0 else (self.n-1) - (abs(queen-moves))\r\n\r\n return queen\r\n self.queens[index] = np.vectorize(permut_gen)(self.queens[index])\r\n self.score = self.__score()\r\n return self\r\n\r\n def __str__(self):\r\n return \"Individual Score : {} , Queens :{} \".format(self.score, str(self.queens))\r\n\r\n @property\r\n def to_dict(self):\r\n return {\r\n \"queens\": self.queens,\r\n \"score\": self.score,\r\n \"cross_type\": self._cross_type\r\n }\r\n\r\n @staticmethod\r\n def random_population(k, n, cross):\r\n for _ in range(k):\r\n yield Individual(n, cross=cross)\r\n","repo_name":"thalesdev/studies","sub_path":"python/evolutive/NQueens/v3/Individual.py","file_name":"Individual.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"72973046045","text":"#1. işlem\n# boş liste tanımlayıp \n# sınıftaki kişilerin adını listeye tek tek ekleyin\n\nl=[]\nl.append(\"gülce\")\nl.append(\"hasan\")\nl.append(\"ece\")\nl.append(\"ada\")\nl.append(\"rüzgar\")\nl.append(\"cem\")\nl.append(\"okyanus\")\nl.append(\"emin\")\nl.append(\"arke\")\nprint(l)\n# 1 ile 22 arasında sayıları ekrana yazdıran kodu while yardımı ile yapınız.\na=1\nwhile a<=22:\n print(a)\n a+=1\n# 20 den -2 ye kadar(20 ve -2 dahil) ikişer azalarak sayıları ekrana for döngüsü yardımı ile yazdırnız.\nprint(30*\"*\")\nfor b in range(20,-3,-2):\n print(b)\n \n\n#1 den 10 a kadar sayıların toplamını for döngüsü ile bulunuz.\n\nt=0\nr=int(input(\"sayı giriniz:\"))\nfor s in range(1,r+1):\n t+=s\n \nprint(f\"1 den {r}'a kadar sayıların toplamı={t}\")\n","repo_name":"serkancam/bilsem23_24","sub_path":"O1A1_2324/temeller/hatirlatma.py","file_name":"hatirlatma.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"5607490218","text":"import faiss\nimport numpy as np\nfrom openai.embeddings_utils import get_embedding, cosine_similarity\nimport pandas as pd\n\nembedding_model = \"text-embedding-ada-002\"\n\ndef load_embeddings_to_faiss(df):\n embeddings = np.array(df['embedding'].tolist()).astype('float32')\n index = faiss.IndexFlatL2(embeddings.shape[1])\n index.add(embeddings)\n return index\n\ndef search_index(index, df, query, k=5):\n query_vector = np.array(get_embedding(query, engine=embedding_model)).reshape(1, -1).astype('float32')\n distances, indexes = index.search(query_vector, k)\n\n results = []\n for i in range(len(indexes)):\n product_names = df.iloc[indexes[i]]['product_name'].values.tolist()\n results.append((distances[i], product_names))\n return results\n\ndf = pd.read_parquet(\"/Users/zm/aigcData/my_taobao_produtct_title.parquet\")\nindex = load_embeddings_to_faiss(df)\nproducts = search_index(index, df, \"自然淡雅背包\", k=3)\n\nfor distances, product_names in products:\n for i in range(len(distances)):\n print(product_names[i], distances[i])\n\n","repo_name":"szd1007/guava-learn","sub_path":"src/main/python/aigc/p9_3_search_product_faiss.py","file_name":"p9_3_search_product_faiss.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"4022604148","text":"import json\nimport jsonpatch\nimport traceback\n\nfrom adh_sample_library_preview import (ADHClient, Role, RoleScope, Trustee, TrusteeType, User, UserInvitation, AccessControlList,\n AccessControlEntry, AccessType, CommonAccessRightsEnum, SdsType, SdsTypeProperty, SdsTypeCode, SdsStream)\n\ncustom_role_name = 'custom role - security management sample'\n\ndef get_appsettings():\n \"\"\"Open and parse the appsettings.json file\"\"\"\n\n # Try to open the configuration file\n try:\n with open(\n 'appsettings.json',\n 'r',\n ) as f:\n appsettings = json.load(f)\n except Exception as error:\n print(f'Error: {str(error)}')\n print(f'Could not open/read appsettings.json')\n exit()\n\n return appsettings\n\n\ndef get_tenant_member_role_id(client: ADHClient):\n \"\"\"Helper function that retrieves the first role with the Tenant Member role type Id\"\"\"\n roles = client.Roles.getRoles()\n for role in roles:\n if role.RoleTypeId == client.Roles.TenantMemberRoleTypeId:\n return role.Id\n\n\ndef main(test = False):\n global custom_role_name\n\n try:\n print('Sample starting...')\n\n # Read appsettings and create a client\n appsettings = get_appsettings()\n\n tenant_id = appsettings.get('TenantId')\n namespace_id = appsettings.get('NamespaceId')\n contact_given_name = appsettings.get('ContactGivenName')\n contact_surname = appsettings.get('ContactSurname')\n contact_email = appsettings.get('ContactEmail')\n\n client = ADHClient(appsettings.get('ApiVersion'),\n appsettings.get('TenantId'),\n appsettings.get('Resource'),\n appsettings.get('ClientId'),\n appsettings.get('ClientSecret'))\n\n # Step 1 - Create a role\n print('Creating a role')\n custom_role = Role(name=custom_role_name,\n role_scope=RoleScope.Tenant, tenant_id=tenant_id)\n custom_role = client.Roles.createRole(custom_role)\n\n # Step 2 - Create a user and invite them\n print('Creating a user and invite them')\n user = User(contact_given_name=contact_given_name, contact_surname=contact_surname, contact_email=contact_email,\n identity_provider_id=client.Users.MicrosoftIdentityProviderId, role_ids=[custom_role.Id])\n\n user.RoleIds.append(get_tenant_member_role_id(client))\n user = client.Users.createUser(user)\n invitation = UserInvitation(send_invitation=True)\n client.Users.createOrUpdateInvitation(user.Id, invitation)\n\n # Step 3 - Create a type\n print('Creating a type')\n date_time_type = SdsType('DateTimeType', SdsTypeCode.DateTime)\n int_type = SdsType('IntType', SdsTypeCode.Int32)\n date_time_property = SdsTypeProperty('DateTime', True, date_time_type)\n int_property = SdsTypeProperty('Value', False, int_type)\n example_type = SdsType('example_type-security_management_sample', SdsTypeCode.Object, [\n date_time_property, int_property], 'This is a type example.')\n example_type = client.Types.getOrCreateType(namespace_id, example_type)\n\n # Step 4 - Create a stream\n print('Creating a stream')\n example_stream = SdsStream(\n 'example_stream-security_management_sample', example_type.Id)\n example_stream = client.Streams.getOrCreateStream(\n namespace_id, example_stream)\n\n # Step 5 - Add a custom role to example type, example stream, and streams collection ACL using PUT\n print('Adding custom role to example type, example stream, and streams collection access control lists using PUT')\n trustee = Trustee(TrusteeType.Role, tenant_id, custom_role.Id)\n entry = AccessControlEntry(trustee, AccessType.Allowed,\n CommonAccessRightsEnum.Read | CommonAccessRightsEnum.Write)\n\n type_acl = client.Types.getAccessControl(\n namespace_id, example_type.Id)\n type_acl.RoleTrusteeAccessControlEntries.append(entry)\n client.Types.updateAccessControl(\n namespace_id, example_type.Id, type_acl)\n\n stream_acl = client.Streams.getAccessControl(\n namespace_id, example_stream.Id)\n stream_acl.RoleTrusteeAccessControlEntries.append(entry)\n client.Streams.updateAccessControl(\n namespace_id, example_stream.Id, stream_acl)\n \n # The access control list (ACL) of the Streams collection is modified in this step\n # The collection ACL is used as a default for all new items in a collection, so any new stream created will have this ACL\n # In addition, it governs who has access to a collection and who can make new collection items (such as new streams)\n streams_acl = client.Streams.getDefaultAccessControl(namespace_id)\n streams_acl.RoleTrusteeAccessControlEntries.append(entry)\n client.Streams.updateDefaultAccessControl(namespace_id, streams_acl)\n\n # Step 6 - Add a role from the example stream ACL using PATCH\n print('Adding a role from the example stream access control list using PATCH')\n patch = jsonpatch.JsonPatch(\n [{\n 'op': 'add', 'path': '/RoleTrusteeAccessControlEntries/-',\n 'value': {\n 'AccessRights': 0,\n 'AccessType': 'Allowed',\n 'Trustee': {'ObjectId': get_tenant_member_role_id(client), 'TenantId': tenant_id, 'Type': 'Role'}\n }\n }])\n client.Streams.patchAccessControl(\n namespace_id, example_stream.Id, patch)\n\n # Step 7 - Change owner of example stream\n print('Changing owner of example stream')\n stream_owner = client.Streams.getOwner(namespace_id, example_stream.Id)\n stream_owner.ObjectId = user.Id\n stream_owner.Type = TrusteeType.User\n client.Streams.updateOwner(\n namespace_id, example_stream.Id, stream_owner)\n\n # Step 8 - Retrieve the access rights of the example stream\n print('Retrieving the access rights of the example stream')\n access_rights = client.Streams.getAccessRights(\n namespace_id, example_stream.Id)\n for access_right in access_rights:\n print(access_right.name)\n\n except Exception as error:\n print((f'Encountered Error: {error}'))\n print()\n traceback.print_exc()\n print()\n if test:\n raise error\n \n finally:\n if test:\n return user, stream_owner, custom_role, stream_acl, streams_acl, type_acl\n \n print('Complete!')\n\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"osisoft/sample-adh-security_management-python","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":6789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"30250329990","text":"from datetime import datetime, timedelta\n\nfrom django.conf import settings\nfrom django.db import models\n\nclass GameQuerySet(models.query.QuerySet):\n\t\"\"\"A lazy database lookup for a set of games\"\"\"\n\tdef joinable(self, user=None):\n\t\tqs = self.exclude(slots=0)\n\t\tif user:\n\t\t\tqs = qs.exclude(player__user=user)\n\t\treturn qs\n\n\tdef pending(self, user=None):\n\t\tqs = self.filter(started__isnull=True)\n\t\tif user:\n\t\t\tqs = qs.filter(player__user=user)\n\t\treturn qs\n\t\n\tdef in_progress(self, user=None):\n\t\tqs = self.filter(started__isnull=False, finished__isnull=True)\n\t\tif user:\n\t\t\tqs = qs.filter(player__user=user)\n\t\treturn qs\n\n\tdef finished(self, user=None):\n\t\tqs = self.filter(finished__isnull=False)\n\t\tif user:\n\t\t\tqs = qs.filter(score__user=user)\n\t\treturn qs\n\n\tdef private(self):\n\t\treturn self.filter(private=True)\n\n\tdef by_teams(self):\n\t\treturn self.filter(teams__gt=0)\n\t\n\tdef expired(self):\n\t\t\"\"\"Return a queryset of games that have not started and are too\n\t\told.\"\"\"\n\t\ts = getattr(settings, 'GAME_EXPIRATION', 60*60*24*30) \n\t\told_date = datetime.now() - timedelta(seconds=s)\n\t\treturn self.filter(\n\t\t\tstarted__isnull=True,\n\t\t\tfinished__isnull=True,\n\t\t\tcreated__lt=old_date\n\t\t)\n\n\tdef get_promoted(self, user=None):\n\t\t\"\"\"Return a Game that is about to start\"\"\"\n\t\ts = getattr(settings, 'GAME_PROMOTION', 60*60*24*7)\n\t\tafter = datetime.now() - timedelta(seconds=s)\n\t\tg = self.filter(\n\t\t\tslots__gt=0,\n\t\t\tprivate=False,\n\t\t\tcreated__gt=after).order_by('slots')\n\t\tif user:\n\t\t\tg = g.exclude(player__user=user)\n\t\ttry:\n\t\t\tpromoted = g[0]\n\t\texcept IndexError:\n\t\t\treturn self.none()\n\t\t\nclass GameCommentQuerySet(models.query.QuerySet):\n\t\"\"\"A lazy database lookup for a set of game comments\"\"\"\n\tdef public(self):\n\t\treturn self.filter(is_public=True)\n\nclass PlayerQuerySet(models.query.QuerySet):\n\t\"\"\"A lazy database lookup for a set of players\"\"\"\n\tdef active(self, user=None):\n\t\t\"\"\"Return a queryset of players that are active in games\"\"\"\n\t\tp = self.filter(\n\t\t\tgame__started__isnull=False,\n\t\t\tgame__finished__isnull=True,\n\t\t\tsurrendered=False\n\t\t)\n\t\tif user:\n\t\t\tp = p.filter(user=user)\n\t\treturn p\n\t\n\tdef by_cities(self):\t\n\t\tqs = self.exclude(user__isnull=True). \\\n\t\t\torder_by('team_id').extra(\n\t\t\tselect={\n\t\t\t\t'cities': 'SELECT COUNT(*) FROM machiavelli_gamearea \\\n\t\t\t\tINNER JOIN condottieri_scenarios_area \\\n\t\t\t\tON machiavelli_gamearea.board_area_id=\\\n\t\t\t\tcondottieri_scenarios_area.id \\\n\t\t\t\tAND condottieri_scenarios_area.has_city=1 \\\n\t\t\t\tWHERE machiavelli_gamearea.player_id=machiavelli_player.id'\n\t\t\t},\n\t\t\torder_by = ['-cities']\n\t\t\t)\n\t\treturn qs\n\t\n\tdef human(self):\n\t\treturn self.exclude(user__isnull=True)\n\n\tdef waited(self, user=None):\n\t\t\"\"\"Return a queryset of players that must confirm their actions\n\t\t\"\"\"\n\t\tp = self.filter(\n\t\t\tgame__started__isnull=False,\n\t\t\tdone = False,\n\t\t\tsurrendered = False\n\t\t)\n\t\tif user:\n\t\t\tp = p.filter(user=user)\n\t\treturn p\n\t\n\nclass RevolutionQuerySet(models.query.QuerySet):\n\t\"\"\"A lazy database lookup for a set of revolutions\"\"\"\n\tdef open(self):\n\t\treturn self.exclude(overthrow=True)\n\n\tdef successful(self):\n\t\treturn self.filter(overthrow=True)\n","repo_name":"jantoniomartin/machiavelli","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"86"} +{"seq_id":"41416904719","text":"for _ in range(int(input())):\n n = int(input())\n arr = []\n for _ in range(n):\n arr.append(list(input()))\n st1 = st2 = '.'\n st1Index = st2Index = []\n for s in range(n):\n for i in range(n):\n if arr[s][i] == \"*\" and st1 != \"*\":\n st1Index = [s, i]\n st1 = \"*\"\n elif arr[s][i] == \"*\" and st2 != \"*\":\n st2Index = [s, i]\n st2 = \"*\"\n break\n if st1 == \"*\" and st2 == \"*\":\n break\n i_n1 = st1Index[1]\n i_n2 = st2Index[1]\n if st1Index[0] == st2Index[0]:\n i_n = st1Index[0]\n if st1Index[0] < n-1:\n a = 1\n else:\n a = -1\n arr[i_n+a][i_n1] = \"*\"\n arr[i_n+a][i_n2] = \"*\"\n else:\n if st1Index[1] > st2Index[1]:\n arr[st2Index[0]][i_n1] = \"*\"\n arr[st1Index[0]][i_n2] = \"*\"\n elif st1Index[1] < st2Index[1]:\n arr[st1Index[0]][i_n2] = \"*\"\n arr[st2Index[0]][i_n1] = \"*\"\n else:\n if st1Index[1] < n - 1:\n a = 1\n else:\n a = -1\n arr[st1Index[0]][i_n1+a] = \"*\"\n arr[st2Index[0]][i_n2+a] = \"*\"\n\n for ai in arr:\n pr = ''\n for i in ai:\n pr += i\n print(pr)\n\n\n","repo_name":"ChinmoyBD/Competitive_Programming","sub_path":"CodeForces/Round_#713_Div.3/B._Almost_Rectangle.py","file_name":"B._Almost_Rectangle.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"15560998633","text":"# Lab 2 assignment Python\r\n# Autho: Sunny Kriplani\r\n# Student number : 119220438\r\n\r\nimport re\r\n\r\n\r\n# This function finds all the strings containing EUR amount specified in a\r\n# specific format\r\n\r\ndef moolah(text):\r\n regexp = re.compile(r'EUR\\s?\\d+(\\.\\d+)?')\r\n lst = []\r\n for m in regexp.finditer(text):\r\n lst.append(m.group())\r\n return lst\r\n\r\n\r\n# This functoin bleeps all 4-letter words and replace them with ****\r\ndef bleep(text):\r\n regex = re.compile(r\"\\b[a-zA-Z]{4}\\b\")\r\n text = regex.sub('****', text)\r\n return text\r\n\r\n\r\n# This function finds digits in a string and adds english words to it\r\ndef to_english(text):\r\n regex = re.compile(r\"\\d+\\ ?\")\r\n lst = [\"zero\", \"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\",\r\n \"eight\", \"nine\"]\r\n values = regex.findall(text)\r\n values = [x.strip(' ') for x in values]\r\n val = []\r\n for i in values:\r\n if i not in val:\r\n val.append(i)\r\n for m in val:\r\n eng = \"\"\r\n reg = re.compile(r\"\\d\")\r\n eng = m + \" \"\r\n eng += \"(\"\r\n for n in reg.finditer(str(m)):\r\n eng += lst[int(n.group())]\r\n eng += \" \"\r\n eng = eng[:-1]\r\n eng += \")\"\r\n pattern = re.compile(r'\\b%s\\b' % m)\r\n text = re.sub(pattern, eng, text)\r\n return text\r\n\r\n\r\n# This function finds all the emails address in a given string\r\ndef harvest_emails(text):\r\n regexp = re.compile(r'[\\w.-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+')\r\n lst = regexp.findall(text)\r\n emails = []\r\n for m in lst:\r\n lst = re.split(r\"\\@\", m)\r\n local = lst[0]\r\n domain = lst[1]\r\n if (local[0] != \".\" and local[-1] != \".\" and (local.find(\"..\") < 0)\r\n and local.find(\"-\") < 0):\r\n if(domain[0] != \"-\" and domain[-1] != \"-\"):\r\n emails.append(m)\r\n # To sort the emails based on the local and domain names\r\n emails = sorted(emails, key=lambda x: (x.split(\"@\", 1)[::-1], x))\r\n if len(emails) == 0:\r\n return None\r\n else:\r\n return emails\r\n","repo_name":"sunnykriplani/py_assignment","sub_path":"term 2/a2.py","file_name":"a2.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"74422492764","text":"from common.numpy_fast import interp\nfrom openpilot.selfdrive.controls.lib.lateral_planner import TRAJECTORY_SIZE\n\nLEAD_WINDOW_SIZE = 5\nLEAD_PROB = 0.6\n\nSLOW_DOWN_WINDOW_SIZE = 5\nSLOW_DOWN_PROB = 0.6\nSLOW_DOWN_BP = [0., 10., 20., 30., 40., 50., 55.]\nSLOW_DOWN_DIST = [10, 30., 50., 70., 80., 90., 120.]\n\nSLOWNESS_WINDOW_SIZE = 20\nSLOWNESS_PROB = 0.6\nSLOWNESS_CRUISE_OFFSET = 1.05\n\nDANGEROUS_TTC_WINDOW_SIZE = 5\nDANGEROUS_TTC = 2.0\n\nHIGHWAY_CRUISE_KPH = 75\n\nSTOP_AND_GO_FRAME = 60\n\nSET_MODE_TIMEOUT = 10\n\nMPC_FCW_WINDOW_SIZE = 5\nMPC_FCW_PROB = 0.6\n\n\nclass SNG_State:\n off = 0\n stopped = 1\n going = 2\n\n\nclass GenericMovingAverageCalculator:\n def __init__(self, window_size):\n self.window_size = window_size\n self.data = []\n self.total = 0\n\n def add_data(self, value):\n if len(self.data) == self.window_size:\n self.total -= self.data.pop(0)\n self.data.append(value)\n self.total += value\n\n def get_moving_average(self):\n if len(self.data) == 0:\n return None\n return self.total / len(self.data)\n\n def reset_data(self):\n self.data = []\n self.total = 0\n\n\nclass DynamicExperimentalController:\n def __init__(self):\n self._is_enabled = False\n self._mode = 'acc'\n self._mode_prev = 'acc'\n self._frame = 0\n\n self._lead_gmac = GenericMovingAverageCalculator(window_size=LEAD_WINDOW_SIZE)\n self._has_lead_filtered = False\n self._has_lead_filtered_prev = False\n\n self._slow_down_gmac = GenericMovingAverageCalculator(window_size=SLOW_DOWN_WINDOW_SIZE)\n self._has_slow_down = False\n\n self._has_blinkers = False\n\n self._slowness_gmac = GenericMovingAverageCalculator(window_size=SLOWNESS_WINDOW_SIZE)\n self._has_slowness = False\n\n self._has_nav_enabled = False\n\n self._dangerous_ttc_gmac = GenericMovingAverageCalculator(window_size=DANGEROUS_TTC_WINDOW_SIZE)\n self._has_dangerous_ttc = False\n\n self._v_ego_kph = 0.\n self._v_cruise_kph = 0.\n\n self._has_lead = False\n\n self._has_standstill = False\n self._has_standstill_prev = False\n\n self._sng_transit_frame = 0\n self._sng_state = SNG_State.off\n\n self._mpc_fcw_gmac = GenericMovingAverageCalculator(window_size=MPC_FCW_WINDOW_SIZE)\n self._has_mpc_fcw = False\n self._mpc_fcw_crash_cnt = 0\n\n self._set_mode_timeout = 0\n pass\n\n def _update(self, car_state, lead_one, md, controls_state):\n self._v_ego_kph = car_state.vEgo * 3.6\n self._v_cruise_kph = controls_state.vCruise\n self._has_lead = lead_one.status\n self._has_standstill = car_state.standstill\n\n # fcw detection\n self._mpc_fcw_gmac.add_data(self._mpc_fcw_crash_cnt > 0)\n self._has_mpc_fcw = self._mpc_fcw_gmac.get_moving_average() >= MPC_FCW_PROB\n\n # nav enable detection\n self._has_nav_enabled = md.navEnabled\n\n # lead detection\n self._lead_gmac.add_data(lead_one.status)\n self._has_lead_filtered = self._lead_gmac.get_moving_average() >= LEAD_PROB\n\n # slow down detection\n self._slow_down_gmac.add_data(len(md.orientation.x) == len(md.position.x) == TRAJECTORY_SIZE and md.position.x[TRAJECTORY_SIZE - 1] < interp(self._v_ego_kph, SLOW_DOWN_BP, SLOW_DOWN_DIST))\n self._has_slow_down = self._slow_down_gmac.get_moving_average() >= SLOW_DOWN_PROB\n\n # blinker detection\n self._has_blinkers = car_state.leftBlinker or car_state.rightBlinker\n\n # sng detection\n if self._has_standstill:\n self._sng_state = SNG_State.stopped\n self._sng_transit_frame = 0\n else:\n if self._sng_transit_frame == 0:\n if self._sng_state == SNG_State.stopped:\n self._sng_state = SNG_State.going\n self._sng_transit_frame = STOP_AND_GO_FRAME\n elif self._sng_state == SNG_State.going:\n self._sng_state = SNG_State.off\n elif self._sng_transit_frame > 0:\n self._sng_transit_frame -= 1\n\n # slowness detection\n if not self._has_standstill:\n self._slowness_gmac.add_data(self._v_ego_kph <= (self._v_cruise_kph*SLOWNESS_CRUISE_OFFSET))\n self._has_slowness = self._slowness_gmac.get_moving_average() >= SLOWNESS_PROB\n\n # dangerous TTC detection\n if not self._has_lead_filtered and self._has_lead_filtered_prev:\n self._dangerous_ttc_gmac.reset_data()\n self._has_dangerous_ttc = False\n\n if self._has_lead and car_state.vEgo >= 0.01:\n self._dangerous_ttc_gmac.add_data(lead_one.dRel/car_state.vEgo)\n\n self._has_dangerous_ttc = self._dangerous_ttc_gmac.get_moving_average() is not None and self._dangerous_ttc_gmac.get_moving_average() <= DANGEROUS_TTC\n\n # keep prev values\n self._has_standstill_prev = self._has_standstill\n self._has_lead_filtered_prev = self._has_lead_filtered\n self._frame += 1\n\n def _blended_priority_mode(self):\n # when mpc fcw crash prob is high\n # use blended to slow down quickly\n if self._has_mpc_fcw:\n self._set_mode('blended')\n return\n\n # when blinker is on and speed is driving below highway cruise speed: blended\n # we dont want it to switch mode at higher speed, blended may trigger hard brake\n if self._has_blinkers and self._v_ego_kph < HIGHWAY_CRUISE_KPH:\n self._set_mode('blended')\n return\n\n # when at highway cruise and SNG: blended\n # ensuring blended mode is used because acc is bad at catching SNG lead car\n # especially those who accel very fast and then brake very hard.\n if self._sng_state == SNG_State.going and self._v_cruise_kph >= HIGHWAY_CRUISE_KPH:\n self._set_mode('blended')\n return\n\n # when standstill: blended\n # in case of lead car suddenly move away under traffic light, acc mode wont brake at traffic light.\n if self._has_standstill:\n self._set_mode('blended')\n return\n\n # when detecting slow down scenario: blended\n # e.g. traffic light, curve, stop sign etc.\n if self._has_slow_down:\n self._set_mode('blended')\n return\n\n # when detecting lead slow down: blended\n # use blended for higher braking capability\n if self._has_dangerous_ttc:\n self._set_mode('blended')\n return\n\n # car driving at speed lower than set speed: acc\n if self._has_slowness:\n self._set_mode('acc')\n return\n\n self._set_mode('blended')\n\n def _acc_priority_mode(self):\n # when mpc fcw crash prob is high\n # use blended to slow down quickly\n if self._has_mpc_fcw:\n self._set_mode('blended')\n return\n\n # If there is a filtered lead, the vehicle is not in standstill, and the lead vehicle's yRel meets the condition,\n #if self._has_lead_filtered and not self._has_standstill:\n # self._set_mode('acc')\n # return\n\n # when blinker is on and speed is driving below highway cruise speed: blended\n # we dont want it to switch mode at higher speed, blended may trigger hard brake\n if self._has_blinkers and self._v_ego_kph < HIGHWAY_CRUISE_KPH:\n self._set_mode('blended')\n return\n\n # when standstill: blended\n # in case of lead car suddenly move away under traffic light, acc mode wont brake at traffic light.\n if self._has_standstill:\n self._set_mode('blended')\n return\n\n # when detecting slow down scenario: blended\n # e.g. traffic light, curve, stop sign etc.\n if self._has_slow_down:\n self._set_mode('blended')\n return\n\n # car driving at speed lower than set speed: acc\n if self._has_slowness:\n self._set_mode('acc')\n return\n\n self._set_mode('acc')\n\n def get_mpc_mode(self, radar_unavailable, car_state, lead_one, md, controls_state):\n if self._is_enabled:\n self._update(car_state, lead_one, md, controls_state)\n if radar_unavailable:\n self._blended_priority_mode()\n else:\n self._acc_priority_mode()\n\n self._mode_prev = self._mode\n return self._mode\n\n def set_enabled(self, enabled):\n self._is_enabled = enabled\n\n def is_enabled(self):\n return self._is_enabled\n\n def set_mpc_fcw_crash_cnt(self, crash_cnt):\n self._mpc_fcw_crash_cnt = crash_cnt\n\n def _set_mode(self, mode):\n if self._set_mode_timeout == 0:\n self._mode = mode\n if mode == \"blended\":\n self._set_mode_timeout = SET_MODE_TIMEOUT\n\n if self._set_mode_timeout > 0:\n self._set_mode_timeout -= 1\n","repo_name":"CHaucke89/openpilot","sub_path":"selfdrive/controls/lib/dynamic_experimental_controller.py","file_name":"dynamic_experimental_controller.py","file_ext":"py","file_size_in_byte":8157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"6164624186","text":"# -*- coding: cp936 -*-\r\nimport platform\r\nimport os\r\nimport hashlib\r\n\r\nsourceid=10080\r\n\r\ndef readfile(filename):\r\n fobject=open(filename,\"rb\")\r\n totalfile=\"\"\r\n while 1:\r\n line = fobject.readline()\r\n if not line:\r\n fobject.close()\r\n return totalfile\r\n totalfile=totalfile+line\r\n\r\ndef getversion():\r\n return platform.version()\r\n\r\ndef getvbit():\r\n return platform.architecture()[0]\r\n\r\ndef getpid():\r\n return os.getpid()\r\n\r\ndef getuid(chose):#指定偷窃什么程序\r\n if chose==1:\r\n return sourceid+6\r\n if chose==2:\r\n return sourceid+7\r\n if chose==3:\r\n return sourceid+8\r\n else:\r\n return sourceid+9\r\n\r\ndef gettoken(src):\r\n mt=hashlib.md5()\r\n mt.update(src)\r\n return mt.hexdigest()\r\n\r\n\r\n#组合数据包头部\r\ndef headmaker(uid):\r\n version=getversion()\r\n vbit=getvbit()\r\n uid=getuid(uid)\r\n pid=getpid()\r\n token=gettoken(str(uid)+str(pid))\r\n result=version+vbit[:2]+str(uid)+str(pid)+token\r\n result=result.replace(\".\",\"\")\r\n return result\r\ndef makepack(uid):\r\n header=headmaker(uid)\r\n while len(header)<50:\r\n header=header+'8'\r\n result=''\r\n j=0\r\n for i in readfile(\"key.txt\"):\r\n if j<50:\r\n result=result+str(ord(i)^ord(header[j]))\r\n else:\r\n j=0\r\n result=result+str(ord(i)^ord(header[j]))\r\n j=j+1\r\n\r\n img=readfile(\"C:\\\\Users\\\\Public\\\\Pictures\\\\Sample Pictures\\\\Desert.jpg\")\r\n data2send=''\r\n for k in result:\r\n for p in range(len(img)):\r\n if ord(k)==ord(img[p]):\r\n data2send=data2send+chr(p)\r\n break \r\n fw=open(\"data.css\",\"wb\")\r\n fw.write(data2send)\r\n fw.close()\r\n\r\n","repo_name":"yuzunzz/script-sole","sub_path":"encode.py","file_name":"encode.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"19786485653","text":"from django.shortcuts import render, redirect\nfrom .forms import *\nfrom .models import *\nfrom django.contrib import messages\nfrom django.http import JsonResponse\n\n\nfrom django.contrib.auth import authenticate, logout\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import user_passes_test, login_required\nfrom django.contrib.auth.models import Group\nfrom django.db.models import Count\nfrom .utils import cookieCart, cartData\nimport json\nfrom slb.settings import DEFAULT_FROM_EMAIL\nfrom django.core.mail import send_mail\n# Create your views here.\ndef index(request):\n\n equipment = Equipments.objects.all()\n total_quantity = sum([item.quantity for item in equipment ])\n total_eq = equipment.count()\n\n\n students = Students.objects.all()\n total_students = students.count()\n print(total_eq)\n\n uncollected_equipments = Equipments.objects.filter(borrowed=True, returned=False)\n quantity_uncollected = sum([item.quantity for item in uncollected_equipments])\n total_uncollected = uncollected_equipments.count()\n \n available_equipments = Equipments.objects.filter(borrowed=False, returned=True)\n quantity_available = sum([item.quantity for item in available_equipments])\n total_available = available_equipments.count()\n\n\n reserve= Reserve.objects.filter(returned=False)\n equipment_approved = Equipments.objects.filter(approved=False) \n reserveItem= ReserveItem.objects.all().filter(reserve__in=reserve, equipment__in=equipment_approved)\n reserved_total = sum([item.quantity for item in reserveItem])\n\n new_equipment= Equipments.objects.filter(returned=False)\n new_reserveItem= ReserveItem.objects.all().filter(equipment__in=new_equipment)\n new_reserved_total = sum([item.quantity for item in new_reserveItem])\n\n new_available = total_quantity - new_reserved_total\n\n context = {'equipment': total_eq, \n 'total_students':total_students, \n 'total_uncollected':total_uncollected, \n 'total_available':total_available, \n 'reserveItem':reserveItem,\n 'total_quantity': total_quantity,\n 'quantity_uncollected': quantity_uncollected,\n 'quantity_available':quantity_available,\n 'new_available': new_available,\n 'reserved_equipment': new_reserved_total,\n 'reserved_total': reserved_total}\n return render(request, 'slb1/admin/index.html', context)\n\ndef login(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n print(username)\n\n user = authenticate(request, username=username, password=password)\n if user is not None and user.groups.filter(name='students'):\n if user.students.status == True:\n auth_login(request, user)\n return redirect('student_home')\n else:\n messages.warning(\n request, 'Your application is still pending please wait until it is approved!')\n elif user is not None and user.is_staff:\n auth_login(request, user)\n return redirect('index')\n else:\n messages.warning(request, 'Username or password is incorrect')\n return redirect('login')\n context = {}\n return render(request, 'slb1/auth/login.html')\n\n\ndef register(request):\n form = CreateUserForm()\n \n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n\n if form.is_valid():\n user = form.save()\n idn ='LMS2021-00'+str(user.id) \n\n student, created = Students.objects.get_or_create(user=user, idn=idn)\n request.user.students = student\n\n group, created = Group.objects.get_or_create(name='students')\n user.groups.add(group)\n return redirect('login')\n \n context = {'form': form}\n return render(request, 'slb1/auth/register.html', context)\n\n\ndef equipment(request):\n equipment = Equipments.objects.all()\n total_quantity = sum([item.quantity for item in equipment ])\n # total_eq = equipment.count()\n\n context={'equipment':equipment, 'total_equipment':total_quantity}\n return render(request, 'slb1/admin/equipment.html', context)\n\n\ndef new_equipment(request):\n\n form = EquipmentForm()\n\n if request.method == 'POST':\n form = EquipmentForm(request.POST, request.FILES)\n\n if form.is_valid():\n form.save()\n messages.warning(request, 'Equipment added successfuly')\n return redirect('equipment')\n else:\n messages.warning(request, 'An error has occured')\n return redirect('new_equipment')\n context = {'form':form}\n return render(request, 'slb1/admin/new_equipment.html', context)\n\n\ndef update_equipment(request, pk):\n equipment = Equipments.objects.get(pk=pk)\n\n form = EquipmentForm(instance=equipment)\n \n if request.method == 'POST':\n form = EquipmentForm(request.POST, request.FILES, instance=equipment)\n\n if form.is_valid():\n form.save()\n messages.warning(request, 'Equipment Updated successfuly')\n return redirect('equipment')\n else:\n messages.warning(request, 'An error has occured')\n return redirect('new_equipment')\n \n context = {'form':form}\n return render(request, 'slb1/admin/new_equipment.html', context)\n\n\ndef delete_equipment(request, pk):\n equipment = Equipments.objects.get(pk=pk)\n equipment.delete()\n messages.warning(request, 'successfully deletd the equipment')\n return redirect('equipment')\n\n\ndef students(request):\n students = Students.objects.all()\n\n context = {'students':students}\n return render(request, 'slb1/admin/students.html', context)\n\n\n\ndef student_home(request):\n equipment = Equipments.objects.all()\n\n data = cartData(request)\n cartItems = data['cartItems']\n\n\n context = {'equipment': equipment, 'cartItems':cartItems}\n return render(request, 'slb1/student/index.html', context)\n\n\ndef logout_view(request):\n logout(request)\n return redirect('login')\n\n\ndef reserve_summary(request):\n data = cartData(request)\n cartItems = data['cartItems']\n items = data['items']\n order = data['order']\n \n context = {'items':items, 'order':order, 'cartItems':cartItems }\n print(context)\n return render(request, 'slb1/student/reserve_summary.html', context)\n\n\n\ndef updateItem(request):\n data = json.loads(request.body)\n equipmentId = data['equipmentId']\n action =data['action']\n \n print('houseId:',equipmentId)\n print('action:',action)\n \n student = request.user.students\n \n equipment = Equipments.objects.get(id=equipmentId)\n # equipment.borrowed == True\n reserve, created = Reserve.objects.get_or_create(student=student, returned=False)\n \n reserveItem, created = ReserveItem.objects.get_or_create(reserve=reserve, equipment=equipment)\n \n if action == 'add':\n if(reserveItem.quantity + 1 <= equipment.quantity):\n reserveItem.quantity = (reserveItem.quantity + 1)\n else:\n messages.warning(request, 'Sorry! There is no more equipment of this type')\n elif action == 'remove':\n reserveItem.quantity = (reserveItem.quantity - 1)\n reserveItem.save()\n \n \n if reserveItem.quantity <= 0:\n reserveItem.delete()\n \n \n return JsonResponse('item was added', safe=False)\n\n\ndef reserved(request):\n\n student = request.user.students\n\n reserve= Reserve.objects.filter(student=student)\n \n equipment = Equipments.objects.all().filter(borrowed=True)\n reserveItem= ReserveItem.objects.all().filter(reserve__in=reserve)\n\n print(reserveItem)\n\n context = {'reserveItem': reserveItem}\n return render(request, 'slb1/student/reserved.html', context)\n\n\ndef approve(request, pk):\n equipment = Equipments.objects.get(pk=pk)\n equipment.approved = True\n equipment.save()\n messages.success(request, 'Successfully approved the equipments to students')\n return redirect('index')\n\n\ndef complete_order(request, pk):\n reserve = Reserve.objects.get(pk=pk)\n reserve.collect = True\n reserve.save()\n messages.success(request, 'Successfully reserved! please wait for the approval from the lab technician')\n return redirect('rcart')\n\n\n\n\ndef collection(request):\n equipment= Equipments.objects.filter(approved=True)\n reserveItem= ReserveItem.objects.all().filter(equipment__in=equipment)\n reserved_total = sum([item.quantity for item in reserveItem])\n\n context = {'reserveItem': reserveItem}\n return render(request, 'slb1/admin/collection.html', context)\n\n\ndef collect(request, pk):\n equipment = Equipments.objects.get(pk=pk)\n equipment.returned = True\n equipment.save()\n messages.success(request, 'Equipment collected successfully!')\n return redirect('collection')\n\n\ndef student_approve(request, pk):\n student = Students.objects.get(pk=pk)\n student.status = True\n student.save()\n subject = 'Account Verification'\n message = 'Congratulation your account has now been activated you can log in using your creditials.'\n recepient = str(student.user.email)\n print(recepient)\n send_mail(subject,\n message, DEFAULT_FROM_EMAIL, [recepient], fail_silently=False)\n messages.success(\n request, 'Successfully approved the student')\n return redirect('students')\n\n ","repo_name":"drneyx/Laboratory-Management","sub_path":"slb/slb1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"43515048283","text":"__author__ = 'winzard'\nimport os\nfrom setuptools import setup\n\nwith open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:\n README = readme.read()\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nsetup(\n name='django_ymap',\n version='1.1',\n packages=['django_ymap'],\n include_package_data=True,\n url='https://github.com/xacce/django-simple-yandex-map/',\n license='MIT License',\n author='DveBukvy',\n author_email='mail@dvebukvy.ru',\n description='several useful tools for any django-project',\n long_description=README,\n install_requires=[]\n\n)","repo_name":"xacce/django-simple-yandex-map","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"86"} +{"seq_id":"74456748443","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\n\"\"\"\n=======================================\nprospect.scripts.prospect_std_templates\n=======================================\n\nScript to produce \"standard templates\",\nused in prospect.viewer.load_std_templates\n\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nfrom astropy.table import Table\nfrom astropy.io import fits\n\nfrom desispec.interpolation import resample_flux\nfrom prospect.utilities import load_redrock_templates\n\n# Log of previous versions:\n# std_templates_v0.fits : old Redrock templates only\n\ndef main():\n \"\"\"Entry-point for command-line scripts.\n\n Returns\n -------\n :class:`int`\n An integer suitable for passing to :func:`sys.exit`.\n \"\"\"\n std_template_file = os.path.join(os.environ['HOME'], 'prospect/py/prospect/data/std_templates.fits')\n if os.path.isfile(std_template_file):\n print('Error std template file already exists')\n\n #- Templates produced from 1st component of old (pre-Aug 2022) Redrock templates:\n template_dir = os.path.join(os.environ['DESICONDA'], '../code/redrock-templates/0.7.2')\n #std_templates = {'QSO': ('QSO',''), 'GALAXY': ('GALAXY',''), 'STAR': ('STAR','F') }\n std_templates = {'GALAXY': ('GALAXY',''), 'STAR': ('STAR','F') }\n delta_lambd_templates = 3\n\n rr_templts = load_redrock_templates(template_dir=template_dir)\n for key,rr_key in std_templates.items() :\n wave_array = np.arange(rr_templts[rr_key].wave[0], rr_templts[rr_key].wave[-1], delta_lambd_templates)\n flux_array = resample_flux(wave_array, rr_templts[rr_key].wave, rr_templts[rr_key].flux[0,:])\n table_templates = Table(data=[wave_array, flux_array], names=['wave_'+key, 'flux_'+key], meta={'name':key})\n table_templates.write(std_template_file, append=True)\n\n #- Case of QSO (Summer 2022): use new template provided by A. Brodzeller\n qsotemplate_file = os.environ['HOME'] + '/stdtemplate-qso.fits'\n hdul = fits.open(qsotemplate_file)\n qsowave = 10**(hdul[0].header['CRVAL1']+np.arange(hdul[0].header['NAXIS1'])*hdul[0].header['CDELT1'])\n qsoflux = hdul[0].data\n # Resample as previously:\n wave_array = np.arange(qsowave[0], qsowave[-1], delta_lambd_templates)\n flux_array = resample_flux(wave_array, qsowave, qsoflux)\n table_templates = Table(data=[wave_array, flux_array], names=['wave_QSO', 'flux_QSO'], meta={'name':'QSO'})\n table_templates.write(std_template_file, append=True)\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())","repo_name":"desihub/prospect","sub_path":"py/prospect/scripts/prospect_std_templates.py","file_name":"prospect_std_templates.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"86"} +{"seq_id":"15920116256","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # imports\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.cluster import KMeans\n\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.metrics import confusion_matrix\n\nimport pickle\n\n\n# In[32]:\n\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# In[164]:\n\n\nfrom sklearn.cluster import DBSCAN\n\n\n# # data\n\n# In[2]:\n\n\nmnist = fetch_openml('mnist_784', version=1, as_frame=True)\nmnist.target = mnist.target.astype(np.uint8)\n\n\n# In[3]:\n\n\nX = mnist[\"data\"]\ny = mnist[\"target\"]\n\n\n# # KMeans\n\n# In[4]:\n\n\nn_kmeans = [8, 9, 10, 11, 12]\n\n\n# In[5]:\n\n\nkmeans_clfs = []\npreds = []\n\nfor n in range(len(n_kmeans)):\n print(f\"n = {n_kmeans[n]}\\n\")\n clf = KMeans(n_clusters=n_kmeans[n], random_state=42)\n kmeans_clfs.append(clf)\n preds.append(clf.fit_predict(X))\n\n\n# In[ ]:\n\n\n\n\n\n# ## silhoutte scores\n\n# In[6]:\n\n\ns_scores = []\n\n\n# In[7]:\n\n\nfor clf in kmeans_clfs:\n s_score = silhouette_score(X, clf.labels_)\n s_scores.append(s_score)\n\n\n# In[ ]:\n\n\ns_scores\n\n\n# In[14]:\n\n\nwith open(\"kmeans_sil.pkl\", \"wb\") as f:\n pickle.dump(s_scores, f, pickle.HIGHEST_PROTOCOL)\n\n\n# ## confusion matrix\n\n# In[10]:\n\n\nprint(kmeans_clfs[2], preds[2])\nconfusion_matrix_k10 = confusion_matrix(y, preds[2])\nprint(confusion_matrix_k10)\n\n\n# In[9]:\n\n\nindices_max = np.argmax(confusion_matrix_k10, axis=1)\nindices_max\n\n\n# In[11]:\n\n\nindices_max_set = set(indices_max)\nindices_max_set\n\n\n# In[12]:\n\n\nwith open(\"kmeans_argmax.pkl\", \"wb\") as f:\n pickle.dump(indices_max_set, f, pickle.HIGHEST_PROTOCOL)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# # DBSCAN\n\n# In[49]:\n\n\ndistances = set()\n\n\n# In[50]:\n\n\nfor i in range(300):\n for j in range(300):\n distances.add(np.linalg.norm(X.loc[i] - X.loc[j]))\n\n\n# In[70]:\n\n\ndistances_list = list(distances)\ndistances_list.sort()\n\n\n# In[72]:\n\n\ndist = []\n\nfor i in range(1, 11):\n dist.append(distances_list[i])\n\n\n# In[74]:\n\n\nwith open(\"dist.pkl\", \"wb\") as f:\n pickle.dump(dist, f, pickle.HIGHEST_PROTOCOL)\n\n\n# In[154]:\n\n\neps_list = []\n\n\n# In[162]:\n\n\ns = sum(dist[7:]) / 3\ns_top = s * 1.1\nstep = s * 0.04\n\nindex = 0\nwhile s < s_top:\n eps_list.append(s)\n s += step\n\n\n# In[169]:\n\n\ndbscans = []\n\nfor eps in eps_list:\n print(f\"eps = {eps}\")\n dbscan = DBSCAN(eps=eps)\n dbscan.fit(X)\n dbscans.append(dbscan)\n print(\"appended\")\n\n\n# In[177]:\n\n\ndbscan_len_list = []\n\nfor dbscan in dbscans:\n dbscan_len_list.append(len(set(dbscan.labels_)))\n\n\n# In[178]:\n\n\nwith open(\"dbscan_len.pkl\", \"wb\") as f:\n pickle.dump(dbscan_len_list, f, pickle.HIGHEST_PROTOCOL)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"derekktor/Uczenie-Maszynow","sub_path":"7 - Clustering/lab7.py","file_name":"lab7.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"10596455618","text":"# author: Mathieu Renzo\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or (at\n# your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see http://www.gnu.org/licenses/.\n\n\n\n# This is a library to analyze output data from COSMIC simulations\nimport numpy as np\n\ndef isNSBHWD(array):\n # array is a kstar_* array of stellar types\n # assumes the stellar types from Hurley 2000\n array = np.array(array,dtype=int)\n return ((array >=10) & (array != 15))\n\ndef bothWD(array1, array2):\n # array* is a kstar_* array of stellar types\n # assumes the stellar types from Hurley 2000\n array1 = np.array(array1,dtype=int)\n array2 = np.array(array2,dtype=int)\n return(((array1[:] >= 10) & (array1[:] <13)) & ((array2[:] >= 10) & (array2[:] <13)))\n\ndef make2Dmap(x, y, z, x1=0, x2=1, y1=0, y2=1, res=20):\n minx = min(min(x),x1)\n maxx = min(max(x),x2)\n miny = min(min(y),y1)\n maxy = min(max(y),y2)\n\n x_int = np.linspace(minx,maxx,res)\n y_int = np.linspace(miny,maxy,res)\n\n mat = np.zeros([len(x_int),len(y_int)])\n for i in range(0,len(x_int)-1):\n for j in range(0,len(y_int)-1):\n mat[j,i] = np.sum(z[(x>=x_int[i])*(x=y_int[j])*(y1) & (k1<10)) & ((k2 <=1) | (k2>=10)), dtype=bool)\n Mgw[star1_nondeg_star2_ms] = Mcore1[star1_nondeg_star2_ms]+M2[star1_nondeg_star2_ms]\n # other non-MS and non-degenerate and other MS or degenerate\n star2_nondeg_star1_ms = np.array(((k2>1) & (k2<10)) & ((k1 <=1)| (k1>=10)), dtype=bool)\n Mgw[star2_nondeg_star1_ms] = Mcore2[star2_nondeg_star1_ms]+M1[star2_nondeg_star1_ms]\n # both non-MS and non-deg\n bothnonMS = np.array(((k1>1) & (k1<10)) & ((k2>1) & (k2<10)), dtype=bool)\n Mgw[bothnonMS] = Mcore1[bothnonMS] + Mcore2[bothnonMS]\n # now deal with MS and degenerate stars\n # both MS or both compact\n bothMS_or_deg = np.array(((k1<=1) & (k2<=1)) | ((k1>10) & (k2>10)), dtype=bool)\n Mgw[bothMS_or_deg] = M1[bothMS_or_deg] + M2[bothMS_or_deg]\n # one MS other degenerate\n oneMS_other_deg = np.array((k1<=1) & (k2>10), dtype=bool)\n Mgw[oneMS_other_deg] = M1[oneMS_other_deg] + M2[oneMS_other_deg]\n # other MS one degenerate\n otherMS_one_deg = np.array((k2<=1) & (k1>10), dtype=bool)\n Mgw[otherMS_one_deg] = M1[otherMS_one_deg] + M2[otherMS_one_deg]\n ## sanity check:\n all = np.array(star1_nondeg_star2_ms,dtype=int)+np.array(star2_nondeg_star1_ms,dtype=int)+np.array(bothnonMS,dtype=int)+np.array(bothMS_or_deg,dtype=int)+np.array(oneMS_other_deg,dtype=int)+np.array(otherMS_one_deg,dtype=int)\n # print(\"1, 1?\", min(all), max(all))\n #print(min(Mgw), max(Mgw))\n return Mgw\n","repo_name":"tcallister/LISA-and-CE-Evolution","sub_path":"COSMIC_popsynth/COSMIClib.py","file_name":"COSMIClib.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"86"} +{"seq_id":"25466748783","text":"# Functions\n\n# name = input(\"what is your name?\")\n\n# def say_hello():\n# print(\"hello \" + name)\n\n# say_hello()\n\n# def say_hello2(name=\"test\", emoji=\"😑\"):\n# print(f\"Hello {name} {emoji}\")\n\n\n# say_hello2(\"jovan\", \"🤓\")\n# say_hello2()\n\n# def sum(num1, num2):\n# def another_func(num3, num4):\n# return num3 + num4\n# return another_func(num1, num2)\n\n\n# total = sum(10, 20)\n# print(total)\n\n# txt = [\"haha\", \"lol\"]\n\n\n# total = sum(10, 5)\n# print(sum(4, 5))\n# print(total)\n# print(sum(4, total))\n\n# def test(a):\n# '''Info: This is a completely useless function, testing docstrings'''\n# print(a)\n\n\n# help(test)\n\n\n# def is_even(num):\n# return num % 2 == 0\n\n\n# print(is_even(50))\n\ndef super_func(*args, **kwargs):\n total = 0\n print(args)\n for items in kwargs.values():\n total += items\n return sum(args) + total\n\n\nprint(super_func(1, 2, 3, 4, 5, num1=5, num2=10))\n","repo_name":"connorjnel/ztm-python-dev","sub_path":"03-python-basics-2/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"3558893820","text":"class Solution(object):\n def canCompleteCircuit(self, gas, cost):\n n = len(gas)\n dp = [gas[0] - cost[0]]\n for i in range(1,n):\n dp.append(dp[i-1] + gas[i] - cost[i])\n if dp[-1] < 0:\n return -1\n return (dp.index(min(dp)) + 1)%n\n\n\ngas = [3,1,1]\ncost = [1,2,2]\nprint(Solution().canCompleteCircuit(gas, cost))\n","repo_name":"chenfu2017/Algorithm","sub_path":"leetcode/134.GasStation.py","file_name":"134.GasStation.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"9724336158","text":"#!/usr/bin/env python\n# coding=utf-8\nimport os, sys\nfrom copy import copy\n\nimport collections\nfrom setuptools import setup, find_packages\n\n__version__ = open('VERSION','rU').read()\nsys.path.insert(0,'nmrbrew')\n\n# Defaults for py2app / cx_Freeze\nbuild_py2app=dict(\n argv_emulation=True,\n includes=[\n 'PyQt5',\n \"PyQt5.uic.port_v3.proxy_base\",\n\n 'nmrglue',\n 'nmrglue.fileio.fileiobase',\n\n ],\n excludes=[\n '_xmlplus',\n 'test',\n 'networkx',\n 'wx',\n 'mpl-data',\n 'Tkinter',\n \"collections.abc\",\n 'nose',\n 'PyQt4',\n 'PySide',\n 'debug',\n ], \n resources=[\n 'nmrbrew/tools',\n 'nmrbrew/icons',\n 'nmrbrew/static',\n 'nmrbrew/translations',\n 'VERSION',\n 'README.md',\n ],\n plist=dict(\n CFBundleName = \"NMRBrew\",\n CFBundleShortVersionString = __version__,\n CFBundleGetInfoString = \"NMRBrew %s\" % __version__,\n CFBundleExecutable = \"NMRBrew\",\n CFBundleIdentifier = \"org.nmrbrew.nmrbrew\",\n ), \n iconfile='nmrbrew/static/icon.icns',\n #/usr/local/Cellar/qt5/5.3.2/plugins\n qt_plugins=[\n 'platforms/libqcocoa.dylib',\n 'imageformats',\n 'printsupport/libcocoaprintersupport.dylib',\n 'accessible',\n ],\n )\n\nsetup(\n\n name='NMRBrew',\n version=__version__,\n author='Martin Fitzpatrick',\n packages = find_packages(),\n include_package_data = True,\n app=['NMRBrew.py'],\n options={\n 'py2app': build_py2app,\n },\n setup_requires=['py2app'],\n )\n","repo_name":"mfitzp/nmrbrew","sub_path":"setup.mac.py","file_name":"setup.mac.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"17669988217","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom matplotlib import pyplot as plt\nfrom tqdm import tqdm\n\nimport research.code.util as H\nfrom disent.nn.functional import torch_box_kernel_2d\nfrom disent.nn.functional import torch_conv2d_channel_wise_fft\nfrom disent.nn.functional import torch_gaussian_kernel_2d\n\n\n# ========================================================================= #\n# distance function #\n# ========================================================================= #\n\n\ndef spearman_rank_dist(\n pred: torch.Tensor,\n targ: torch.Tensor,\n reduction='mean',\n nan_to_num=False,\n):\n # add missing dim\n if pred.ndim == 1:\n pred, targ = pred.reshape(1, -1), targ.reshape(1, -1)\n assert pred.shape == targ.shape\n assert pred.ndim == 2\n # sort the last dimension of the 2D tensors\n pred = torch.argsort(pred).to(torch.float32)\n targ = torch.argsort(targ).to(torch.float32)\n # compute individual losses\n # TODO: this can result in nan values, what to do then?\n pred = pred - pred.mean(dim=-1, keepdim=True)\n pred = pred / pred.norm(dim=-1, keepdim=True)\n targ = targ - targ.mean(dim=-1, keepdim=True)\n targ = targ / targ.norm(dim=-1, keepdim=True)\n # replace nan values\n if nan_to_num:\n pred = torch.nan_to_num(pred, nan=0.0)\n targ = torch.nan_to_num(targ, nan=0.0)\n # compute the final loss\n loss = (pred * targ).sum(dim=-1)\n # reduce the loss\n if reduction == 'mean':\n return loss.mean()\n elif reduction == 'none':\n return loss\n else:\n raise KeyError(f'Invalid reduction mode: {repr(reduction)}')\n\n\ndef check_xy_squares_dists(kernel='box', repeats=100, samples=256, pairwise_samples=256, kernel_radius=32, show_prog=True):\n if kernel == 'box':\n kernel = torch_box_kernel_2d(radius=kernel_radius)[None, ...]\n elif kernel == 'max_box':\n crange = torch.abs(torch.arange(kernel_radius * 2 + 1) - kernel_radius)\n y, x = torch.meshgrid(crange, crange)\n d = torch.maximum(x, y) + 1\n d = d.max() - d\n kernel = (d.to(torch.float32) / d.sum())[None, None, ...]\n elif kernel == 'min_box':\n crange = torch.abs(torch.arange(kernel_radius * 2 + 1) - kernel_radius)\n y, x = torch.meshgrid(crange, crange)\n d = torch.minimum(x, y) + 1\n d = d.max() - d\n kernel = (d.to(torch.float32) / d.sum())[None, None, ...]\n elif kernel == 'manhat_box':\n crange = torch.abs(torch.arange(kernel_radius * 2 + 1) - kernel_radius)\n y, x = torch.meshgrid(crange, crange)\n d = (y + x) + 1\n d = d.max() - d\n kernel = (d.to(torch.float32) / d.sum())[None, None, ...]\n elif kernel == 'gaussian':\n kernel = torch_gaussian_kernel_2d(sigma=kernel_radius / 4.0, truncate=4.0)[None, None, ...]\n else:\n raise KeyError(f'invalid kernel mode: {repr(kernel)}')\n\n # make dataset\n dataset = H.make_dataset('xysquares')\n\n losses = []\n prog = tqdm(range(repeats), postfix={'loss': 0.0}) if show_prog else range(repeats)\n\n for i in prog:\n # get random samples\n factors = dataset.sample_factors(samples)\n batch = dataset.dataset_batch_from_factors(factors, mode='target')\n if torch.cuda.is_available():\n batch = batch.cuda()\n kernel = kernel.cuda()\n factors = torch.from_numpy(factors).to(dtype=torch.float32, device=batch.device)\n\n # random pairs\n ia, ib = torch.randint(0, len(batch), size=(2, pairwise_samples), device=batch.device)\n\n # compute factor distances\n f_dists = torch.abs(factors[ia] - factors[ib]).sum(dim=-1)\n\n # compute loss distances\n aug_batch = torch_conv2d_channel_wise_fft(batch, kernel)\n # TODO: aug - batch or aug - aug\n # b_dists = torch.abs(aug_batch[ia] - aug_batch[ib]).sum(dim=(-3, -2, -1))\n b_dists = F.mse_loss(aug_batch[ia], aug_batch[ib], reduction='none').sum(dim=(-3, -2, -1))\n\n # compute ranks\n # losses.append(float(torch.clamp(torch_mse_rank_loss(b_dists, f_dists), 0, 100)))\n # losses.append(float(torch.abs(torch.argsort(f_dists, descending=True) - torch.argsort(b_dists, descending=False)).to(torch.float32).mean()))\n losses.append(float(spearman_rank_dist(b_dists, f_dists)))\n\n if show_prog:\n prog.set_postfix({'loss': np.mean(losses)})\n\n return np.mean(losses), aug_batch[0]\n\n\ndef run_check_all_xy_squares_dists(show=False):\n for kernel in [\n 'box',\n 'max_box',\n 'min_box',\n 'manhat_box',\n 'gaussian',\n ]:\n rs = list(range(1, 33, 4))\n ys = []\n for r in rs:\n ave_spearman, last_img = check_xy_squares_dists(kernel=kernel, repeats=32, samples=128, pairwise_samples=1024, kernel_radius=r, show_prog=False)\n H.plt_imshow(H.to_img(last_img, scale=True), show=show)\n ys.append(abs(ave_spearman))\n print(kernel, r, ':', r*2+1, abs(ave_spearman))\n plt.plot(rs, ys, label=kernel)\n plt.legend()\n plt.show()\n\n\n# ========================================================================= #\n# MAIN #\n# ========================================================================= #\n\n\nif __name__ == '__main__':\n run_check_all_xy_squares_dists()\n","repo_name":"nmichlo/msc-research","sub_path":"research/part03_learnt_overlap/e01_learn_to_disentangle/run_02_check_aug_gt_dists.py","file_name":"run_02_check_aug_gt_dists.py","file_ext":"py","file_size_in_byte":5442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"1812806747","text":"class Solution:\n def plusOne(self, digits: List[int]) -> List[int]:\n # reverse the list\n digits = digits[::-1]\n\n # initialize carry and pointer\n # 1 because only need to increment by 1, and 0 for the starting index\n one, i = 1, 0\n\n # continue to iterate while one == 1\n while one:\n # if i is still in bound\n if i < len(digits):\n # if digits i position is at an integer 9\n if digits[i] == 9:\n # if it is 9 we're going to reset pointer back to 0\n digits[i] = 0\n # if not 9\n else:\n # increment by 1\n digits[i] += 1\n # no longer need a carry so change one back to 0\n one = 0\n # if i is not in bound we've reached the end\n else:\n digits.append(1)\n # don't have a carry so reset one to 0\n one = 0\n # increment i\n i += 1\n # return the array re-reversed\n return digits[::-1]\n","repo_name":"Introvette/LeetCode","sub_path":"#66.py","file_name":"#66.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"20097912223","text":"from django.shortcuts import redirect\nfrom django.core.mail import send_mail\nfrom django.contrib import auth, messages\nfrom contas.models import Token\nfrom django.core.urlresolvers import reverse\n\ndef envia_email_login(request):\n email = request.POST['email']\n token = Token.objects.create(email=email)\n\n url = request.build_absolute_uri(\n reverse('login') + '?token=' + str(token.uid)\n )\n corpo_do_email = f'Use este link para logar:\\n\\n{url}'\n\n send_mail('Seu link para login no BikeUnit',\n corpo_do_email,\n 'noreply@bikeunit.com', \n [email]\n )\n messages.success(\n request,\n 'Verifique seu email, te enviamos um link para que você possa acessar.'\n )\n return redirect('/')\n\ndef login(request):\n user = auth.authenticate(uid=request.GET.get('token'))\n if user:\n auth.login(request, user)\n return redirect('/')\n","repo_name":"natannjos/bikeunit","sub_path":"contas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"42140452852","text":"from __future__ import absolute_import\n\nimport logging\nimport subprocess\nimport os\nimport time\nimport glob\n\nclass PsqlDumperManager:\n\n def __init__(self, conf, pg_user, dump_root, email=None, success=False):\n self.conf = conf\n self.pg_user = pg_user\n self.dump_root = dump_root\n if email:\n self.notify = True\n self.email = email\n self.notify_success = success\n else:\n self.notify = False\n\n def check_if_master(self):\n try:\n subprocess.check_call(\"/usr/bin/psql -U {} -t -P format=unaligned -c 'SELECT pg_is_in_recovery();' | grep -q 'f'\".format(self.pg_user), shell = True)\n return True\n except subprocess.CalledProcessError:\n return False\n\n def __db_exists(self, db):\n try:\n subprocess.check_call('/usr/bin/psql -U {} -lqt | cut -d \\| -f 1 | grep -w {} > /dev/null'.format(self.pg_user, db), shell = True)\n return True\n except subprocess.CalledProcessError:\n return False\n\n def select_databases(self):\n for db in self.conf.keys():\n if self.__db_exists(db):\n now = int(time.strftime(\"%H\"))\n freq = int(self.conf[db]['frequency'])\n if now % freq == 0:\n dump_ext = 'sql' if self.conf[db]['sql_dump'] else 'dmp'\n dump_args = '' if self.conf[db]['sql_dump'] else ' -Fc -b'\n\n self.conf[db]['dump'] = True\n self.conf[db]['dump_cmd'] = '/usr/bin/pg_dump -U {user}{args} {db} > {root}/{db}_DATE.{ext}'.format(user=self.pg_user, args=dump_args, db=db, root=self.dump_root, ext=dump_ext)\n else:\n self.conf[db]['dump'] = False\n self.conf[db]['details'] = 'Not time to backup yet (every {} hours)'.format(self.conf[db]['frequency'])\n else:\n self.conf[db]['dump'] = False\n self.conf[db]['details'] = 'Database does not exist on this server'\n\n def run_jobs(self):\n for db in self.conf.keys():\n date = time.strftime(\"%Y%m%d-%H%M%S\")\n\n if self.conf[db]['dump']:\n try:\n logging.getLogger('psqldumper').info('[{}] Running dump job'.format(db))\n\n cmd = '{} 2> {}/{}_{}.log'.format(self.conf[db]['dump_cmd'].replace('DATE',date), self.dump_root, db, date)\n subprocess.check_call(cmd, shell = True)\n \n logging.getLogger('psqldumper').info('[{}] Dump job completed'.format(db))\n self.conf[db]['status'] = 'Done'\n\n logging.getLogger('psqldumper').info('[{}] Cleaning up old dump'.format(db))\n\n retention = int(self.conf[db]['retention'])\n now = time.time()\n cutoff = now - (retention * 86400)\n glob_match = '{}/{}_*'.format(self.dump_root, db)\n for dump in glob.glob(glob_match):\n c = os.stat(dump).st_ctime\n if c < cutoff:\n logging.getLogger('psqldumper').info('[{}] Removing old dump: {}'.format(db, dump))\n os.remove(dump)\n else:\n logging.getLogger('psqldumper').info('[{}] Not touching: {}'.format(db, dump))\n\n logging.getLogger('psqldumper').info('[{}] Cleanup done'.format(db))\n except subprocess.CalledProcessError(e):\n logging.getLogger('psqldumper').info('[{}] Dump job FAILED : {}'.format(db,e))\n self.conf[db]['status'] = 'Failed'\n self.conf[db]['details'] = 'Log file is attached'\n self.conf[db]['logfile'] = '{}_{}.log'.format(db, date)\n else:\n logging.getLogger('psqldumper').info('[{}] Database dump ignored: {}'.format(db, self.conf[db]['details']))\n self.conf[db]['status'] = 'Ignored'\n\n def send_notifications(self):\n pass\n","repo_name":"oasiswork/psqldumper","sub_path":"psqldumper/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"27342262449","text":"import copy\n\nwith open('Input5.txt') as f:\n input = f.read().splitlines()\n\nfor i in range(len(input)):\n if input[i] == '':\n stacks = input[:i-1]\n moves = input[i+1:]\n break\n\nstackOverview = [None] * len(stacks[-1].split(' '))\nstacks.reverse()\nfor stack in stacks:\n stackSplit = [stack[i+1] for i in range(0, len(stack), 4)]\n for j in range(len(stackSplit)):\n if stackOverview[j] is None:\n stackOverview[j] = [stackSplit[j]]\n elif stackSplit[j] != ' ':\n stackOverview[j].append(stackSplit[j])\n else:\n continue\n\noriginalStacks = copy.deepcopy(stackOverview)\n\nfor move in moves:\n moveSplit = move.split(' ')\n amount = int(moveSplit[1])\n source = int(moveSplit[3]) - 1\n target = int(moveSplit[5]) - 1\n movedStack = originalStacks[source][len(originalStacks[source])-amount:]\n for stack in movedStack:\n originalStacks[target].append(stack)\n originalStacks[source].pop()\n for crate in range(amount):\n stackOverview[target].append(stackOverview[source][-1])\n stackOverview[source].pop()\n\nanswer1 = ''\nanswer2 = ''\nfor stack in stackOverview:\n answer1 += stack[-1]\nfor stack in originalStacks:\n answer2 += stack[-1]\nprint(answer1)\nprint(answer2)","repo_name":"tbennis/AdventOfCode","sub_path":"2022/Week1/Day5.py","file_name":"Day5.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"14464543814","text":"import turtle\nimport math\nt = turtle.Turtle() \nt.speed(\"fastest\")\nwn = turtle.Screen()\nwn.title(\"Flags\") \nt.shape(\"turtle\")\n\ndef basic_flag (width, height, color):\n for i in range(2):\n t.color(color)\n t.pendown()\n t.begin_fill()\n t.forward(width)\n t.right(90)\n t.forward(height)\n t.right(90)\n t.end_fill()\n t.penup()\n\ndef three_color_flag (width, height, color1, color2, color3, ori):\n\t# rect vert\n\tcolors = []\n\tcolors.append(color1)\n\tcolors.append(color2)\n\tcolors.append(color3)\n\tif ori == 1:\n\t\tfor color in colors:\n\t\t\tbasic_flag(width*2/3, height, color)\n\t\t\tt.forward(width*2/3)\n\telse:\n\t\tfor color in colors:\n\t\t\tbasic_flag(width, height*2/3, color)\n\t\t\tt.right(90)\n\t\t\tt.forward(height*2/3)\n\t\t\tt.left(90)\n\ndef star(size):\n\tt.color(\"yellow\")\n\tt.pendown()\n\tt.begin_fill()\n\tfor i in range (5):\n\t\tt.forward(size)\n\t\tt.right(144)\n\tt.end_fill()\n\tt.penup()\n\tt.right(18)\n\tt.forward(size/2)\n\tt.left(18)\n\n\n\ndef european_flag (width, height, size):\n\tbasic_flag(width, height, \"blue\")\n\tt.forward(width/2)\n\tt.right(90)\n\tt.forward(height/2)\n\tt.right(90)\n\tfor i in range (12):\n\t\tt.forward(height/2-25)\n\t\tstar(size)\n\t\tt.left(180)\n\t\tt.forward(height/2-25)\n\t\tt.right(30)\n\n\ndef belgian_flag():\n\tthree_color_flag(30,20,\"black\", \"yellow\", \"red\", 1)\n\n\n\n\n\neuropean_flag(350, 200, 20)\n\n\nwn.mainloop()\n","repo_name":"davidlefebvre19/mission_ucl","sub_path":"Mission 3/flag.py","file_name":"flag.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"35684116651","text":"from django.contrib.auth import authenticate, login as auth_login, logout\nfrom django.shortcuts import render, render_to_response\nfrom django.http import HttpResponseRedirect\nfrom django.views.decorators.http import require_http_methods\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import RequestContext\nfrom netsoft_create import settings\nfrom main_app.forms import *\nfrom main_app.models import *\nfrom django.contrib import messages\n\nimport main_app.util as util\nimport os\n\n########## STATIC PAGES ##########\n\ndef index(request):\n return render(request, 'index.html')\n\ndef committee(request):\n return render(request, 'committee.html')\n\ndef core_course(request):\n return render(request, 'core_course.html')\n\ndef faculty(request):\n return render(request, 'faculty.html')\n\ndef internship_postings(request):\n return render(request, 'internship_postings.html')\n\ndef partners(request):\n return render(request, 'partners.html')\n\ndef principles_foundations(request):\n return render(request, 'principles_foundations.html')\n\ndef program(request):\n return render(request, 'program.html')\n\ndef technology_enablers(request):\n return render(request, 'technology_enablers.html')\n\ndef under_construction(request):\n return render(request, 'under_construction.html')\n\n\n########## INTERNSHIP PORTAL STUFF ##########\n\n@login_required\ndef internship_profile(request):\n\n resumes = Resume.objects.filter(username = request.user.username)\n\n print(resumes.values())\n profile = Profile.objects.filter(username = request.user.username).values()\n print(profile)\n\n return_dict = {'UploadResumeForm': UploadResumeForm, 'resumes': resumes, 'ProfileForm': ProfileForm, 'profile': profile}\n return render(request,'internship/profile.html', return_dict)\n\n@login_required\n@require_http_methods([\"POST\"])\ndef save_profile(request):\n\n for key in request.POST:\n print(\"%s: %s\" %(key, request.POST[key]))\n\n form = ProfileForm(request.POST or None)\n if form.is_valid():\n\n tech_enablers = False\n princ_foundations = False\n\n if \"technologies_enablers\" in request.POST.getlist(\"courses\"):\n tech_enablers = True\n\n if \"principles_foundations\" in request.POST.getlist(\"courses\"):\n princ_foundations = True\n\n profile = Profile.objects.filter(username = request.user.username)\n if profile:\n\n Profile.objects.filter(username = request.user.username).update(\n first_name = form.cleaned_data.get(\"first_name\"),\n last_name = form.cleaned_data.get(\"last_name\"),\n university = form.cleaned_data.get(\"university\"),\n degree = form.cleaned_data.get(\"degree\"),\n technology_enablers = tech_enablers,\n principles_foundations = princ_foundations\n )\n else:\n\n new_profile = Profile(username = request.user.username,\n first_name = form.cleaned_data.get(\"first_name\"),\n last_name = form.cleaned_data.get(\"last_name\"),\n university = form.cleaned_data.get(\"university\"),\n degree = form.cleaned_data.get(\"degree\"),\n technology_enablers = tech_enablers,\n principles_foundations = princ_foundations\n )\n\n new_profile.save()\n return HttpResponseRedirect(\"/internship/profile/\")\n else:\n curr_profile = Profile.objects.filter(username = request.user.username).values()\n return render(request, 'internship/profile.html', {'ProfileForm': form, 'profile': curr_profile})\n\n\n@login_required\n@require_http_methods([\"POST\"])\ndef upload_resume(request):\n\n form = UploadResumeForm(request.POST, request.FILES)\n if form.is_valid():\n newresume = Resume(resume = request.FILES['resume'], username = request.user.username)\n newresume.save()\n\n return HttpResponseRedirect(\"/internship/profile/\")\n else:\n return render(request, 'internship/profile.html', {'form': form})\n\n\n@login_required\n@require_http_methods([\"POST\"])\ndef delete_resume(request):\n\n resume = request.POST['resume']\n\n # remove the requested resumes to be deleted\n resumes = Resume.objects.filter(resume = resume, username = request.user.username).delete()\n\n file_location = settings.MEDIA_ROOT+'/'+resume\n print(file_location)\n if os.path.exists(file_location):\n os.remove(file_location)\n\n return HttpResponseRedirect(\"/internship/profile/\")\n\n\ndef signin(request):\n\n return_dict = {'signInForm': SignInForm, 'createAccountForm': CreateAccountForm}\n\n if request.user.is_authenticated:\n return HttpResponseRedirect(\"/internship/profile/\")\n\n\n if request.method == \"GET\":\n return render(request, 'login.html', return_dict)\n else:\n form = SignInForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n auth_login(request, user)\n return HttpResponseRedirect(\"/internship/profile/\")\n else:\n return_dict['errors'] = \"Your Username and/or Password does not match! Please try again.\"\n else:\n return_dict['errors'] = \"Invalid input. Try again!\"\n\n return render(request, 'login.html', return_dict)\n\ndef signout(request):\n logout(request)\n return HttpResponseRedirect(\"/signin/\")\n\n\ndef signup(request):\n\n return_dict = {'signInForm': SignInForm, 'createAccountForm': CreateAccountForm}\n\n if request.method == \"GET\":\n return render(request, 'login.html', return_dict)\n\n if request.method == 'POST':\n\n form = CreateAccountForm(request.POST)\n if form.is_valid():\n\n recaptcha_response = request.POST.get('g-recaptcha-response')\n remote_ip = request.META['REMOTE_ADDR']\n\n result = util.verify_recaptcha(recaptcha_response, remote_ip)\n\n # verify RECAPTCHAv2\n if result[\"success\"]:\n return_dict[\"errors\"] = util.create_user(form)\n else:\n return_dict[\"errors\"] = \"RECAPTCHAv2 Failed. Please don't be a Bot ... Zuckerberg\"\n else:\n return_dict['errors'] = \"Invalid input. Try again!\"\n\n return render(request, 'login.html', return_dict)\n","repo_name":"RajsimmanRavi/netsoft_create","sub_path":"main_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"43224299577","text":"#!/usr/bin/env python\n\nimport rospy\nimport cv2\nfrom std_msgs.msg import String\nfrom std_msgs.msg import UInt16MultiArray \nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nimport numpy as np\nimport sys\n\nbridge = CvBridge()\ncentroid_pub = rospy.Publisher(\"centroids\", UInt16MultiArray)\n\ndef read_rgb_image(image_name, show):\n rgb_image = cv2.imread(image_name)\n if show:\n cv2.imshow(\"RGB Image\", rgb_image)\n return rgb_image\n\ndef filter_colour(rgb_image, lower_bound_colour, upper_bound_colour):\n #convert the image into the HSV colour space\n hsv_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2HSV)\n\n mask = cv2.inRange(hsv_image, lower_bound_colour, upper_bound_colour)\n\n return mask\n\ndef image_callback(ros_image):\n global bridge\n global centroid_pub\n #convert ros_image into an opencv-compatible image\n try:\n cv_image = bridge.imgmsg_to_cv2(ros_image, \"bgr8\")\n depth_array = np.array(cv_image, dtype=np.float32)\n print(depth_array.shape)\n except CvBridgeError as e:\n print(e)\n #from now on, you can work exactly like with opencv\n clothUpper = (27, 255, 255)\n clothLower = (0, 126, 128)\n binaryImage = filter_colour(cv_image, clothLower, clothUpper)\n\n bgrImage = cv2.cvtColor(binaryImage, cv2.COLOR_GRAY2BGR)\n\n # Apply an erosion + dilation to get rid of small noise:\n\n # Set kernel (structuring element) size:\n kernelSize = 3\n\n # Set operation iterations:\n opIterations = 3\n\n # Get the structuring element:\n maxKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernelSize, kernelSize))\n\n # Perform closing:\n openingImage = cv2.morphologyEx(binaryImage, cv2.MORPH_OPEN, maxKernel, None, None, opIterations, cv2.BORDER_REFLECT101)\n\n contours, _ = cv2.findContours(openingImage, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_L1)\n centres = []\n for i in range(len(contours)):\n area = cv2.contourArea(contours[i]) \n if area < 1000:\n continue \n imageMoments = cv2.moments(contours[i])\n # Compute centroid\n if imageMoments[\"m00\"] != 0:\n cx = int(imageMoments['m10']/imageMoments['m00'])\n cy = int(imageMoments['m01']/imageMoments['m00'])\n else:\n # set values as what you need in the situation\n cx, cy = 0, 0\n\n centres.append([cx, cy])\n cv2.circle(bgrImage, tuple(centres[-1]), 3, (0, 255, 0), -1)\n\n print(centres)\n centre_msg = UInt16MultiArray()\n centre_msg.data = centres\n centroid_pub.publish(centre_msg)\n cv2.imshow(\"centroid\", bgrImage)\n cv2.waitKey(3)\n\n \ndef main(args):\n rospy.init_node('image_converter', anonymous=True)\n #for turtlebot3 waffle\n #image_topic=\"/camera/rgb/image_raw/compressed\"\n #for usb cam\n #image_topic=\"/usb_cam/image_raw\"\n image_sub = rospy.Subscriber(\"/camera/color/image_raw\",Image, image_callback)\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main(sys.argv)","repo_name":"Shopper-Capstone/object_detection","sub_path":"script/opencv_node.py","file_name":"opencv_node.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"5812427925","text":"import socket\nimport http.server\nimport socketserver\nimport threading\nimport struct\n\n\nHTTP_PORT = 8000\nFAKE_SERVER_PORT=10005\nJAK_PW=\"12345678\"\nOWN_IP=\"172.18.18.103\"\nJAK_IP = \"172.18.18.1\"\nUDP_PORT = 20000\nMESSAGE = \"CONN_NOTIFY,\"+OWN_IP+\":\"+str(FAKE_SERVER_PORT)+\",FakeDroid v4.4.4,uuid:36220724-95ff-4a61-a85e-f3859499f865\"\nUPGRADE_MESSAGE=\"http://\"+OWN_IP+\":\"+str(HTTP_PORT)+\"/upgrade.txt,\"+JAK_PW\n\nclass ThreadedHTTPServer(socketserver.ThreadingMixIn,http.server.HTTPServer):\n pass\nhttpd = ThreadedHTTPServer(('', HTTP_PORT), http.server.SimpleHTTPRequestHandler)\nhttpd_thread = threading.Thread(target=httpd.serve_forever)\nhttpd_thread.daemon = True\nhttpd_thread.start()\n\n# our local server to answer to the sticks requests\nprint(\"starting tcp server on port \"+str(FAKE_SERVER_PORT))\nour_server_address = ('0.0.0.0', FAKE_SERVER_PORT)\nserver_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_sock.bind(our_server_address)\nserver_sock.listen(1)\n\n\n#send UDP paket to the stick to notify it of our existence\nprint(\"notifying JAK\")\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\nsock.sendto(bytes(MESSAGE, \"utf-8\"), (JAK_IP, UDP_PORT))\ni=0\nwhile True:\n # Wait for a connection\n connection, client_address = server_sock.accept()\n try:\n print ( 'connection from', client_address)\n data = connection.recv(1024)\n print('received \"%s\"' % data)\n data = connection.recv(1024)\n print('received \"%s\"' % data)\n print('trying to set upgrade.txt as upgrade target')\n \n connection.sendall( struct.pack(\">BBH\"+str(len(UPGRADE_MESSAGE))+\"s1000x\",1,11,len(UPGRADE_MESSAGE),bytes(UPGRADE_MESSAGE,\"utf-8\")))#update firmware command\n\n finally:\n pass\n # Clean up the connection\n# connection.close()\n","repo_name":"pfoet/jak_shaver","sub_path":"shaver.py","file_name":"shaver.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"21368044335","text":"# 1254732 Mauricio Corado\nleast = 18\nmost = 75\n\n\ndef get_age(): # get age method uses to error check the age parameters\n age = int(input())\n\n if (least > age) or (age > most):\n raise ValueError('Invalid age.')\n return age\n\n\ndef fat_burning_heart_rate(age): # method used to calculate the equation needed\n hr = ((220 - age) * .7)\n return hr\n\nif __name__ == \"__main__\": # main code checks for value error\n try:\n age = get_age()\n hr = fat_burning_heart_rate(age)\n print('Fat burning heart rate for a {} year-old: {} bpm'.format(age, hr))\n except ValueError as wrong:\n print(wrong)\n print('Could not calculate heart rate info.')\n print('')","repo_name":"meece33/CIS2348Summer","sub_path":"Homework4/12.7.py","file_name":"12.7.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"70725903658","text":"from http import HTTPStatus\nimport logging\n\nfrom fastapi import HTTPException\n\nfrom app.api.exceptions import (ExtraFieldsException, FieldNotFountException,\n TasksListIsEmptyException)\nfrom app.core.config import TASKS\nfrom app.models.build import Build\n\n\ndef check_build_is_exists(build_objects: list, build_name: str) -> Build:\n \"\"\"Проверяет, что указанный build существует. \"\"\"\n for build in build_objects:\n if build.name == build_name:\n return build\n message = f'Билда с именем {build_name} не существует'\n logging.error(message)\n raise HTTPException(HTTPStatus.BAD_REQUEST, message)\n\n\ndef _check_required_fields(\n obj: dict,\n obj_fields: list,\n file_path: str,\n) -> None:\n \"\"\"Проверяет наличие обязательных полей в файле. \"\"\"\n fields = obj.keys()\n for field in obj_fields:\n if field not in fields:\n message = f'отсутсвует поле {field} в файле {file_path}'\n logging.error(message)\n raise FieldNotFountException(message)\n\n\ndef _check_max_fields(obj: dict, obj_fields: list, file_path: str) -> None:\n \"\"\"Проверяет наличие лишних полей в файле. \"\"\"\n fields = obj.keys()\n if len(fields) > len(obj_fields):\n message = (\n f'Найдены лишние поля в файле {file_path}: {fields - obj_fields}')\n logging.error(message)\n raise ExtraFieldsException(message)\n\n\ndef _check_tasks_is_not_empty(obj) -> None:\n \"\"\"Проверяет, что в билде присутствует минимум одна задача. \"\"\"\n if TASKS in obj:\n if not obj.get('tasks'):\n message = (\n 'Проверьте, что у всех билдов указана как минимум одна задача')\n logging.error(message)\n raise TasksListIsEmptyException(message)\n\n\ndef check_yaml_file_fields(\n obj: dict,\n obj_fields: list,\n file_path: str,\n) -> None:\n \"\"\"Проверяет поля переданного yaml файла. \"\"\"\n _check_required_fields(obj, obj_fields, file_path)\n _check_max_fields(obj, obj_fields, file_path)\n _check_tasks_is_not_empty(obj)\n","repo_name":"VadimVolkovsky/saber_app","sub_path":"app/api/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"36299026452","text":"import os\n\nfrom ament_index_python.packages import get_package_share_directory\nfrom launch import LaunchDescription\nfrom launch.actions import DeclareLaunchArgument\nfrom launch.actions import IncludeLaunchDescription\nfrom launch.actions import LogInfo\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\nfrom launch.substitutions import LaunchConfiguration\nfrom launch_ros.actions import Node\nfrom launch_ros.substitutions import FindPackageShare\n\ndef generate_launch_description():\n\n ld = LaunchDescription()\n\n use_sim_time = LaunchConfiguration('use_sim_time', default='false')\n\n package_name = 'robot_sim'\n urdf_name = \"robot.urdf\" \n package_path = FindPackageShare(package = package_name).find(package_name) \n urdf_path = os.path.join(package_path, 'urdf', urdf_name)\n param_path = os.path.join(package_path, 'param', 'params.yaml')\n launch_path = os.path.join(package_path, 'launch')\n\n with open(urdf_path, 'r') as urdf:\n robot_desc = urdf.read()\n\n info = LogInfo(\n msg=['Execute Robot Sim']\n )\n\n robot_sim_node = Node(\n package=package_name,\n executable='robot_sim',\n parameters=[param_path],\n output='screen'\n )\n\n robot_state_publisher_node = Node(\n package='robot_state_publisher',\n executable='robot_state_publisher',\n name='robot_state_publisher',\n output='screen',\n arguments=[urdf_path],\n parameters=[{\n 'use_sim_time': use_sim_time,\n 'robot_description': robot_desc\n }]\n )\n \n rviz_launch = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n os.path.join(launch_path, 'rviz2.launch.py')\n )\n )\n\n ld.add_action(info)\n ld.add_action(robot_sim_node)\n ld.add_action(robot_state_publisher_node)\n ld.add_action(rviz_launch)\n\n return ld\n","repo_name":"fan-ziqi/My_ROS_Robot","sub_path":"Software/ROS2-Robot-WS/src/robot_sim/launch/robot_sim.launch.py","file_name":"robot_sim.launch.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"90"} +{"seq_id":"1849931576","text":"sr = [0,1,1]\nsc = [-1,0,-1]\n\ndef elimTile(board, blocks):\n \n for row, col in blocks:\n board[row][col] = '1'\n \n for row in board:\n while row.count('1'):\n row.pop(row.index('1'))\n row.append('0')\n return \n\ndef isBlock(r,c,board,blocks):\n tile = board[r][c]\n \n for i in range(3):\n row,col = r+sr[i],c+sc[i]\n if board[row][col] != tile or board[row][col] == '0':\n return False\n \n blocks.add((r,c))\n for i in range(3):\n blocks.add((r+sr[i],c+sc[i]))\n \n return True\n \ndef solution(m, n, board):\n answer = 0\n board = [[board[i][j] for i in range(m-1,-1,-1)] for j in range(n)]\n\n isFound = 1\n while isFound:\n isFound = 0\n blocks = set()\n \n for j in range(m-1,0,-1):\n for i in range(n-1): \n if isBlock(i,j,board,blocks):\n isFound = 1\n \n answer += len(blocks)\n \n elimTile(board,blocks)\n \n return answer","repo_name":"GANGESHOTTEOK/yaman-algorithm","sub_path":"23_programmers_2018_kakao_blind_recruitment/AN/friends_4_block.py","file_name":"friends_4_block.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"18205271539","text":"import sys\nimport numpy as np\ninput = sys.stdin.readline\nread = sys.stdin.read\n\n\ndef log(*args):\n print(*args, file=sys.stderr)\n\n\ndef main():\n np.set_printoptions(threshold=20)\n N = int(input())\n AB = np.array(read().split(), dtype=np.int)\n A = AB[0::2]\n B = AB[1::2]\n A.sort()\n B.sort()\n if N % 2 == 1:\n n = N // 2\n print(B[n] - A[n] + 1)\n else:\n n = N // 2\n print((B[n] + B[n - 1]) - (A[n] + A[n - 1]) + 1)\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02661/s739071732.py","file_name":"s739071732.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"41466546045","text":"# pylint: disable=redefined-outer-name\nimport pathlib\nimport pytest\nimport numpy as np\nfrom pytest import MonkeyPatch\nfrom pytest_mock import MockerFixture, mocker # noqa: F401\n\nfrom libretro_finder.main import organize, main\nfrom libretro_finder.utils import hash_file\nfrom tests import TEST_SAMPLE_SIZE\nfrom tests.fixtures import setup_files # noqa: F401\n\nclass TestOrganize:\n \"\"\"Bundle of pytest asserts for main.organize\"\"\"\n\n def test_matching(\n self, setup_files, tmp_path: pathlib.Path, monkeypatch: MonkeyPatch\n ) -> None:\n \"\"\"main.organize with (only) matching files\n\n :param setup_files: A pytest fixture that generates fake BIOS files and reference dataframe\n :param tmp_path: A pytest fixture that creates a temporary directory unique to this test\n :param monkeypatch: A pytest fixture that allows us to set certain testing conditions\n \"\"\"\n\n bios_dir, bios_lut = setup_files\n assert bios_dir.exists()\n\n # checking saved files (excluding directories)\n file_paths = list(bios_dir.rglob(\"*\"))\n file_paths = [file_path for file_path in file_paths if file_path.is_file()]\n assert len(file_paths) == TEST_SAMPLE_SIZE\n\n # making output_dir\n output_dir = tmp_path / \"test_matching\"\n output_dir.mkdir()\n\n # checking if currently empty\n output_paths = list(output_dir.rglob(pattern=\"*\"))\n assert len(output_paths) == 0\n\n # swapping out system_df to the one generated from setup_files\n # this is needed because we can't include actual bios files for testing\n monkeypatch.setattr(\"libretro_finder.main.system_df\", bios_lut)\n organize(search_dir=bios_dir, output_dir=output_dir)\n\n # verifying correct output\n output_paths = list(output_dir.rglob(pattern=\"*\"))\n output_paths = [\n output_path for output_path in output_paths if output_path.is_file()\n ]\n output_names = [\n output_path.relative_to(output_dir).as_posix()\n for output_path in output_paths\n ]\n output_hashes = [hash_file(output_path) for output_path in output_paths]\n\n assert len(output_paths) == TEST_SAMPLE_SIZE\n assert bios_lut.shape[0] == len(output_paths)\n assert np.all(np.isin(output_hashes, bios_lut[\"md5\"].values))\n assert np.all(np.isin(bios_lut[\"name\"].values, output_names))\n\n def test_non_matching(self, setup_files, tmp_path: pathlib.Path) -> None:\n \"\"\"main.organize without matching files\n\n :param setup_files: A pytest fixture that generates fake BIOS files and reference dataframe\n :param tmp_path: A pytest fixture that creates a temporary directory unique to this test\n \"\"\"\n\n # same as 'matching' test but without monkeypatching (i.e. different hashes so no matches)\n bios_dir, _ = setup_files\n assert bios_dir.exists()\n\n # checking saved files (excluding directories)\n file_paths = list(bios_dir.rglob(\"*\"))\n file_paths = [file_path for file_path in file_paths if file_path.is_file()]\n assert len(file_paths) == TEST_SAMPLE_SIZE\n\n # making output_dir\n output_dir = tmp_path / \"test_non_matching\"\n output_dir.mkdir()\n\n # checking if currently empty\n output_paths = list(output_dir.rglob(pattern=\"*\"))\n assert len(output_paths) == 0\n\n # running organize with libretro's dataframe\n organize(search_dir=bios_dir, output_dir=output_dir)\n\n # checking if still empty\n assert len(list(output_dir.rglob(\"*\"))) == 0\n\n def test_empty(self, tmp_path: pathlib.Path) -> None:\n \"\"\"main.organize with empty search_dir\n\n :param tmp_path: A pytest fixture that creates a temporary directory unique to this test\n \"\"\"\n\n input_dir = tmp_path / \"input\"\n output_dir = tmp_path / \"output\"\n input_dir.mkdir()\n output_dir.mkdir()\n\n # checking if exists but empty\n assert input_dir.exists()\n assert len(list(input_dir.rglob(\"*\"))) == 0\n assert output_dir.exists()\n assert len(list(output_dir.rglob(\"*\"))) == 0\n\n # checking if still empty\n organize(search_dir=input_dir, output_dir=output_dir)\n assert len(list(output_dir.rglob(\"*\"))) == 0\n\n def test_same_input(self, setup_files, monkeypatch: MonkeyPatch) -> None:\n \"\"\"main.organize with shared input for search_dir and output_dir (verifies if non-additive)\n\n :param setup_files: A pytest fixture that generates fake BIOS files and reference dataframe\n :param monkeypatch: A pytest fixture that allows us to set certain testing conditions\n \"\"\"\n\n # organize but with (prepopulated) bios_dir as input and output\n bios_dir, bios_lut = setup_files\n assert bios_dir.exists()\n\n # checking saved files (excluding directories)\n file_paths = list(bios_dir.rglob(\"*\"))\n file_paths = [file_path for file_path in file_paths if file_path.is_file()]\n current_len = len(file_paths)\n assert current_len == TEST_SAMPLE_SIZE\n\n # swapping out system_df to the one generated from setup_files\n # this is needed because we can't include actual bios files for testing\n monkeypatch.setattr(\"libretro_finder.main.system_df\", bios_lut)\n organize(search_dir=bios_dir, output_dir=bios_dir)\n\n # verifying correct output\n output_paths = list(bios_dir.rglob(pattern=\"*\"))\n output_paths = [\n output_path for output_path in output_paths if output_path.is_file()\n ]\n output_names = [\n output_path.relative_to(bios_dir).as_posix() for output_path in output_paths\n ]\n output_hashes = [hash_file(output_path) for output_path in output_paths]\n\n assert len(output_paths) == current_len\n assert bios_lut.shape[0] == len(output_paths)\n assert np.all(np.isin(output_hashes, bios_lut[\"md5\"].values))\n assert np.all(np.isin(bios_lut[\"name\"].values, output_names))\n\n\nclass TestMain:\n \"\"\"Bundle of pytest asserts for main.main\"\"\"\n\n def test_main(self, tmp_path: pathlib.Path, mocker: MockerFixture):\n \"\"\"main.main with valid input\n\n :param tmp_path: A pytest fixture that creates a temporary directory unique to this test\n :param mocker: A pytest fixture that mocks specific objects for testing purposes\n \"\"\"\n\n # Mocking the organize function to prevent actual file operations\n mock_organize = mocker.patch(\"libretro_finder.main.organize\")\n\n search_dir = tmp_path / \"search\"\n output_dir = tmp_path / \"output\"\n search_dir.mkdir()\n output_dir.mkdir()\n\n argv = [str(search_dir), str(output_dir)]\n main(argv)\n mock_organize.assert_called_once_with(\n search_dir=search_dir, output_dir=output_dir\n )\n\n def test_main_search_directory_not_exists(self, tmp_path: pathlib.Path):\n \"\"\"main.main with non-existent search_dir\n\n :param tmp_path: A pytest fixture that creates a temporary directory unique to this test\n \"\"\"\n\n output_dir = tmp_path / \"output\"\n output_dir.mkdir()\n\n argv = [\"/path/to/nonexistent/search\", str(output_dir)]\n with pytest.raises(FileNotFoundError):\n main(argv)\n\n def test_main_search_directory_not_directory(self, tmp_path: pathlib.Path):\n \"\"\"main.main with file as search_dir\n\n :param tmp_path: A pytest fixture that creates a temporary directory unique to this test\n \"\"\"\n\n file_path = tmp_path / \"search.txt\"\n output_dir = tmp_path / \"output\"\n file_path.touch()\n output_dir.mkdir()\n\n argv = [str(file_path), str(output_dir)]\n with pytest.raises(NotADirectoryError):\n main(argv)\n","repo_name":"jaspersiebring/libretro_finder","sub_path":"tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":7848,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"90"} +{"seq_id":"10401912255","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Module containing client functionality for the MDP implementation.\n\nFor the MDP specification see: http://rfc.zeromq.org/spec:7\n\"\"\"\n\n__license__ = \"\"\"\n This file is part of MDP.\n\n MDP is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n MDP is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with MDP. If not, see .\n\"\"\"\n__author__ = 'Guido Goldstein'\n__email__ = 'gst-py@a-nugget.de'\n\nfrom exceptions import UserWarning\n\nimport zmq\nfrom zmq.eventloop.zmqstream import ZMQStream\nfrom zmq.eventloop.ioloop import IOLoop, DelayedCallback\n\n###\n\nPROTO_VERSION = b'MDPC01'\n\n###\n\nclass InvalidStateError(RuntimeError):\n \"\"\"Exception raised when the requested action is not available due to socket state.\n \"\"\"\n pass\n#\n\nclass RequestTimeout(UserWarning):\n \"\"\"Exception raised when the request timed out.\n \"\"\"\n pass\n#\n###\n\nclass MDPClient(object):\n\n \"\"\"Class for the MDP client side.\n\n Thin asynchronous encapsulation of a zmq.REQ socket.\n Provides a :func:`request` method with optional timeout.\n\n Objects of this class are ment to be integrated into the\n asynchronous IOLoop of pyzmq.\n\n :param context: the ZeroMQ context to create the socket in.\n :type context: zmq.Context\n :param endpoint: the enpoint to connect to.\n :type endpoint: str\n :param service: the service the client should use\n :type service: str\n \"\"\"\n\n _proto_version = b'MDPC01'\n\n def __init__(self, context, endpoint, service):\n \"\"\"Initialize the MDPClient.\n \"\"\"\n socket = context.socket(zmq.REQ)\n ioloop = IOLoop.instance()\n self.service = service\n self.endpoint = endpoint\n self.stream = ZMQStream(socket, ioloop)\n self.stream.on_recv(self._on_message)\n self.can_send = True\n self._proto_prefix = [ PROTO_VERSION, service]\n self._tmo = None\n self.timed_out = False\n socket.connect(endpoint)\n return\n\n def shutdown(self):\n \"\"\"Method to deactivate the client connection completely.\n\n Will delete the stream and the underlying socket.\n\n .. warning:: The instance MUST not be used after :func:`shutdown` has been called.\n\n :rtype: None\n \"\"\"\n if not self.stream:\n return\n self.stream.socket.setsockopt(zmq.LINGER, 0)\n self.stream.socket.close()\n self.stream.close()\n self.stream = None\n return\n\n def request(self, msg, timeout=None):\n \"\"\"Send the given message.\n\n :param msg: message parts to send.\n :type msg: list of str\n :param timeout: time to wait in milliseconds.\n :type timeout: int\n \n :rtype None:\n \"\"\"\n if not self.can_send:\n raise InvalidStateError()\n # prepare full message\n to_send = self._proto_prefix[:]\n to_send.extend(msg)\n self.stream.send_multipart(to_send)\n self.can_send = False\n if timeout:\n self._start_timeout(timeout)\n return\n\n def _on_timeout(self):\n \"\"\"Helper called after timeout.\n \"\"\"\n self.timed_out = True\n self._tmo = None\n self.on_timeout()\n return\n\n def _start_timeout(self, timeout):\n \"\"\"Helper for starting the timeout.\n\n :param timeout: the time to wait in milliseconds.\n :type timeout: int\n \"\"\"\n self._tmo = DelayedCallback(self._on_timeout, timeout)\n self._tmo.start()\n return\n\n def _on_message(self, msg):\n \"\"\"Helper method called on message receive.\n\n :param msg: list of message parts.\n :type msg: list of str\n \"\"\"\n if self._tmo:\n # disable timout\n self._tmo.stop()\n self._tmo = None\n # setting state before invoking on_message, so we can request from there\n self.can_send = True\n self.on_message(msg)\n return\n\n def on_message(self, msg):\n \"\"\"Public method called when a message arrived.\n\n .. note:: Does nothing. Should be overloaded!\n \"\"\"\n pass\n\n def on_timeout(self):\n \"\"\"Public method called when a timeout occured.\n\n .. note:: Does nothing. Should be overloaded!\n \"\"\"\n pass\n#\n###\n\nfrom zmq.core.poll import select\n\ndef mdp_request(socket, service, msg, timeout=None):\n \"\"\"Synchronous MDP request.\n\n This function sends a request to the given service and\n waits for a reply.\n\n If timeout is set and no reply received in the given time\n the function will return `None`.\n\n :param socket: zmq REQ socket to use.\n :type socket: zmq.Socket\n :param service: service id to send the msg to.\n :type service: str\n :param msg: list of message parts to send.\n :type msg: list of str\n :param timeout: time to wait for answer in seconds.\n :type timeout: float\n\n :rtype list of str:\n \"\"\"\n if not timeout or timeout < 0.0:\n timeout = None\n to_send = [PROTO_VERSION, service]\n to_send.extend(msg)\n socket.send_multipart(to_send)\n ret = None\n rlist, _, _ = select([socket], [], [], timeout)\n if rlist and rlist[0] == socket:\n ret = socket.recv_multipart()\n ret.pop(0) # remove service from reply\n return ret\n#\n###\n\n### Local Variables:\n### buffer-file-coding-system: utf-8\n### mode: python\n### End:\n","repo_name":"shykes/pyzmq-mdp","sub_path":"mdp/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5891,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"13355984251","text":"import api_flow\nimport argparse\nimport os\nimport sys\n\n\nparser = argparse.ArgumentParser(\n description='api-flow: an API chaining tool',\n prog='api_flow'\n)\nparser.add_argument(\n 'flow_name',\n metavar='flow',\n type=str,\n help='basename of the YAML file containing a flow definition'\n)\npaths = parser.add_argument_group('optional data paths', 'Customize the data file locations for api-flow')\npaths.add_argument(\n '--data-path',\n dest='data_path',\n type=str,\n metavar='DIR',\n help='a directory containing flow configuration data (default: current directory)'\n)\npaths.add_argument(\n '--flow-path',\n dest='flow_path',\n type=str,\n metavar='DIR',\n help='a directory containing flow definitions (default: /flows)'\n)\npaths.add_argument(\n '--function-path',\n dest='function_path',\n type=str,\n metavar='DIR',\n help='a python module exposing user-defined template-substitution functions (default: /functions)'\n)\npaths.add_argument(\n '--profile-path',\n dest='profile_path',\n type=str,\n metavar='DIR',\n help='a directory containing profile YAML files (default: /profiles)'\n)\npaths.add_argument(\n '--template-path',\n dest='template_path',\n type=str,\n metavar='DIR',\n help='a directory containing template files (default: /templates)'\n)\nparser.add_argument(\n '--profile',\n action='append',\n type=str,\n metavar='PROFILE',\n help='basename of a profile YAML file to include (multiple --profile flags are allowed)'\n)\n\n\nargs = parser.parse_args()\n\napi_flow.configure(\n data_path=args.data_path,\n flow_path=args.flow_path,\n function_path=args.function_path,\n profile_path=args.profile_path,\n template_path=args.template_path\n)\nprint('DATA PATHS:', file=sys.stderr)\nprint(f' Base: {api_flow.Config.data_path}', file=sys.stderr)\nprint(f' Flows: {api_flow.Config.flow_path}', file=sys.stderr)\nprint(f'Functions: {api_flow.Config.function_path}', file=sys.stderr)\nprint(f' Profiles: {api_flow.Config.profile_path}', file=sys.stderr)\nprint(f'Templates: {api_flow.Config.template_path}', file=sys.stderr)\n\nif args.profile:\n print('PROFILES:', file=sys.stderr)\n for profile in args.profile:\n print(f' - {os.path.join(api_flow.Config.profile_path, profile)}.yaml', file=sys.stderr)\n\nprint(f'FLOW:\\n {os.path.join(api_flow.Config.flow_path, args.flow_name)}.yaml', file=sys.stderr)\n\napi_flow.execute(args.flow_name, profiles=args.profile)\n","repo_name":"rothomas/api_flow","sub_path":"src/api_flow/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"37794037147","text":"\"\"\" The training parameters are set in accordance with the setup we used for BERT\n Feel free to change parameters as you see fit\n Use the parameters described in our paper to reproduce results for other models (XLM RoBERTA and DistilBERT)\"\"\"\n\nmodel_type = \"bert\"\n# model_type = \"xlmroberta\"\n# model_type = \"distilbert\"\nmodel_name = \"bert-base-multilingual-cased\"\n# model_name = \"xlm-roberta-base\"\n# model_name = \"distilbert-base-multilingual-cased\"\n\ntrain_args = {\n \"max_seq_length\": 512,\n \"num_train_epochs\": 15,\n \"fp16\" : True,\n # \"scheduler\" : \"polynomial_decay_schedule_with_warmup\",\n # \"polynomial_decay_schedule_lr_end\": 1e-7,\n # \"polynomial_decay_schedule_power\": 2.0,\n \"scheduler\" : \"cosine_schedule_with_warmup\",\n \"cosine_schedule_num_cycles\": 0.5,\n #\"scheduler\" : \"constant_schedule\",\n \"learning_rate\": 1e-5,\n \"train_batch_size\": 8,\n \"weight_decay\": 1,\n \"reprocess_input_data\": False,\n \"overwrite_output_dir\": True,\n \"use_cached_eval_features\": False,\n \"no_save\": False,\n \"use_early_stopping\": False,\n \"evaluate_during_training\": False,\n #the following two parameters are for when this script is run from google colab and wandb is connected with colab\n # \"wandb_project\": \"BERT_final\", #change here for each separate run\n # \"wandb_kwargs\": {\"name\": \"final_\"+str(FoldIndex),\"entity\" :'fakenewscovid'}, #change here for each separate run\n \"save_model_every_epoch\": False,\n \"save_eval_checkpoints\": False,\n \"evaluate_during_training_verbose\" : True\n }","repo_name":"ProtikBose/Bengali-Covid-Fake-News","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"40102810265","text":"from lib2to3.pgen2 import driver\nimport unittest\nfrom pyunitreport import HTMLTestRunner\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\n\nclass HelloWorld(unittest.TestCase):\n\n @classmethod \n def setUpClass(cls):\n service = Service('/usr/bin/chromedriver')\n cls.driver = webdriver.Chrome(service=service)\n driver = cls.driver\n driver.implicitly_wait(10)\n\n def test_hello_world(self):\n driver = self.driver\n driver.get('https://www.platzi.com')\n \n def test_visit_wikipedia(self):\n driver = self.driver\n driver.get =('https://www.wikipedia.org')\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.quit()\n\n\nif __name__ ==\"__main__\":\n unittest.main(verbosity= 2, testRunner=HTMLTestRunner(output= 'reports', report_name= 'hello-world-report'))\n\n","repo_name":"malonr/python_selenium","sub_path":"hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"39424871015","text":"import sqlite3\n\n############### Configuracion ####################\n# Nombre de la base de datos\nDB_NAME = \"datosPECL\"\n\n# Archivo SQL con la definicion de las tablas\nSQL_File_Name = \"CrearTablas.sql\"\n##############################################\n\n# Se carga el archivo SQL a una variable y se eliminan los saltos de linea\nTableSchema=\"\"\nwith open(SQL_File_Name, 'r') as SchemaFile:\n TableSchema=SchemaFile.read().replace('\\n', '')\n\n# Se crea la nueva base de datos\nconn = sqlite3.connect(DB_NAME)\ncurs = conn.cursor()\n\n# Se lanza la consulta de creacion de tablas\nsqlite3.complete_statement(TableSchema)\ncurs.executescript(TableSchema)\n\n# Se cierra la conexion con la base de datos\ncurs.close()\nconn.close()","repo_name":"Marcos-Barranquero/UBICUA-UAH","sub_path":"PECL2/Codigo/InicializarTablas.py","file_name":"InicializarTablas.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"22176351674","text":"from setuptools import setup\n\n__project__ = \"rpg_game\"\n__version__ = \"0.0.1\"\n__description__ = \"A text-based RPG game\"\n__packages__ = [\"rpg_game\"]\n__author__ = \"Caitlin Bailey\"\n__author_email__ = \"caitlinbailey9@gmail.com\"\n\nsetup(\n name = __project__,\n version = __version__,\n description = __description__,\n packages = __packages__,\n author = __author__,\n author_email = __author_email__,\n)","repo_name":"caitlingbailey/python-oop","sub_path":"source/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"15078969680","text":"import heapq\n\nclass Vertex(object):\n \"\"\"docstring for Vertex.\"\"\"\n def __init__(self, name):\n super(Vertex, self).__init__()\n self.name = name\n self.visited = False\n self.adjacenciesList = []\n self.predecessor = None\n\n def __str__(self):\n return self.name\n\nclass Edge(object):\n \"\"\"docstring for Edge.\"\"\"\n def __init__(self, weight, startVertex, targetVertex):\n super(Edge, self).__init__()\n self.weight = weight\n self.startVertex = startVertex\n self.targetVertex = targetVertex\n\n def __cmp__(self, otherEdge):\n return self.cmp(self.weight, otherEdge.weight)\n\n def __lt__(self, other):\n selfPriority = self.weight\n otherPriority = other.weight\n\n return selfPriority < otherPriority\n\nclass PrimsJarnik(object):\n \"\"\"docstring for PrimsJarnik.\"\"\"\n def __init__(self, unvisitedList):\n super(PrimsJarnik, self).__init__()\n self.unvisitedList = unvisitedList\n self.spanningTree = []\n self.cost = 0\n self.edgeHeap = []\n\n def calculateSpanningTree(self, vertex):\n self.unvisitedList.remove(vertex)\n\n while self.unvisitedList:\n for edge in vertex.adjacenciesList:\n if edge.targetVertex in self.unvisitedList:\n heapq.heappush(self.edgeHeap, edge)\n\n minEdge = heapq.heappop(self.edgeHeap)\n\n self.spanningTree.append(minEdge)\n print(\"Edge added to spanningTree ie from {} to {}\".format(minEdge.startVertex.name, minEdge.targetVertex.name))\n self.cost = self.cost + minEdge.weight\n vertex = minEdge.targetVertex\n self.unvisitedList.remove(vertex)\n\n def getSpanningTree(self):\n return self.spanningTree\n\nnode1 = Vertex(\"A\");\nnode2 = Vertex(\"B\");\nnode3 = Vertex(\"C\");\n\nedge1 = Edge(100,node1,node2);\nedge2 = Edge(100,node2,node1);\nedge3 = Edge(1000,node1,node3);\nedge4 = Edge(1000,node3,node1);\nedge5 = Edge(0.01,node3,node2);\nedge6 = Edge(0.01,node2,node3);\n\nnode1.adjacenciesList.append(edge1);\nnode1.adjacenciesList.append(edge3);\nnode2.adjacenciesList.append(edge2);\nnode2.adjacenciesList.append(edge6);\nnode3.adjacenciesList.append(edge4);\nnode3.adjacenciesList.append(edge5);\n\nunvisitedList = [];\nunvisitedList.append(node1);\nunvisitedList.append(node2);\nunvisitedList.append(node3);\n\nalgorithm = PrimsJarnik(unvisitedList);\nalgorithm.calculateSpanningTree(node2);\n","repo_name":"PeterChencha/python_data_structures","sub_path":"Graph/prims.py","file_name":"prims.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"40295616014","text":"# -*- coding: utf-8 -*-\n\n### CSF3 setup instructions\n# module load apps/anaconda3/5.2.0/bin\n# pip install --user bng_latlon\n#\n \n\n\nfrom pathlib import Path\nimport os\n#import shutil\n#import netCDF4 as nc\nimport numpy as np\nimport xarray as xr\nimport pandas as pd\n#from bng_to_latlon import OSGB36toWGS84\n\n\nop_root = '/mnt/iusers01/support/mbessdl2/Projects/Emissions_Processing/Emission_Datasets/'\n#op_root = '/Users/mbessdl2/work/manchester/EMEP/example_emissions_processing/Emission_Datasets/'\n\n## path to excel spreadsheet with point source information\nps_file = op_root+'NAEI_2016/download_data/'+'NAEIPointsSources_2016.xlsx'\n\n\n## path to netcdf files containing area emission information\nnc_origin = op_root+'NAEI_2016/emissions_netcdf/original_emep/'\nnc_work = op_root+'NAEI_2016/emissions_netcdf/'\n\nnc_head = ['ch4', 'voc', 'so2', 'co', 'nh3', 'nox', 'pmco', 'pm25']\ntailstr = '_emiss.nc'\n\n\n#nc_files = [x + tailstr for x in nc_head]\n#nc_fpath = [nc_work + x for x in nc_files]\n#print(nc_files)\n\n#%% setting the sector mapping for the emissions\n\nsector_mapping = {}\nsector_mapping['Iron & steel industries'] = 'indcom'\nsector_mapping['Waste collection, treatment & disposal'] = 'waste'\nsector_mapping['Other industries'] = 'indproc'\nsector_mapping['Paper, printing & publishing industries'] = 'indproc'\nsector_mapping['Other mineral industries'] = 'indproc'\nsector_mapping['Vehicles'] = 'roadtrans'\nsector_mapping['Oil & gas exploration and production'] = 'offshore'\nsector_mapping['Non-ferrous metal industries'] = 'indcom'\nsector_mapping['Food, drink & tobacco industry'] = 'indproc'\nsector_mapping['Textiles, clothing, leather & footwear'] = 'indproc'\nsector_mapping['Chemical industry'] = 'solvents'\nsector_mapping['Other fuel production'] = 'indproc'\nsector_mapping['Major power producers'] = 'energyprod'\nsector_mapping['Electrical engineering'] = 'indproc'\nsector_mapping['Lime'] = 'indproc'\nsector_mapping['Cement'] = 'indproc'\nsector_mapping['Public administration'] = 'domcom'\nsector_mapping['Processing & distribution of petroleum products'] = 'offshore'\nsector_mapping['Processing & distribution of natural gas'] = 'offshore'\nsector_mapping['Agriculture, forestry & fishing'] = 'agric'\nsector_mapping['Commercial'] = 'domcom'\nsector_mapping['Water & sewerage'] = 'waste'\nsector_mapping['Minor power producers'] = 'energyprod'\nsector_mapping['Mechanical engineering'] = 'indproc'\nsector_mapping['Miscellaneous'] = 'indcom'\nsector_mapping['Construction'] = 'indproc'\n\nsector_list = ['energyprod','domcom','indcom','indproc','offshore',\\\n 'solvents','roadtrans','othertrans','waste','agric','nature']\n\nchem_species = ['Ammonia','Carbon monoxide','Oxides of nitrogen','Sulpher dioxide',\\\n 'Non-methane VOC','PM10','PM2.5']\nchem_list = nc_head\nchem_mapping = {}\nchem_mapping['Non-methane VOC'] = 'voc'\nchem_mapping['Sulpher dioxide'] = 'so2'\nchem_mapping['Carbon monoxide'] = 'co'\nchem_mapping['Ammonia'] = 'nh3'\nchem_mapping['Oxides of nitrogen'] = 'nox'\nchem_mapping['PM10'] = 'pmco'\nchem_mapping['PM2.5'] = 'pm25'\n\n\n#%% housekeeping, backing up existing output files so we don't accidently delete these \nfor my_head in nc_head:\n \n my_file = my_head + tailstr\n \n my_new_file = nc_work + my_file\n\t# check that we don't have existing copies of the netcdf files, if there are then back them up\n if Path(my_new_file).is_file():\n print('backing up file: '+my_file)\n os.rename(my_new_file,my_new_file+'.bckup')\n\t\n# my_old_file = nc_origin + my_file\n# # copy the original netcdf files to new location (to avoid any double counting!!!)\n# if Path(my_old_file).is_file():\n# print('copying file: '+my_file)\n# shutil.copy2(my_old_file,my_new_file)\n# else:\n# print('file does not exist: '+my_file)\n\n#%% loading the original netcdf datafiles (we will *NOT* be writing to these)\n\norig_files = {}\n\nfor nhead in nc_head:\n npath = nc_origin + nhead + tailstr\n orig_files[nhead] = xr.open_mfdataset(npath)\n \n \n\n\n#%% functions for use below\n\n# https://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array\ndef find_nearest(array, value):\n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return array[idx][0] # we assume that only 1 value is found, this might only be true for 1D arrays?\n\n\n\n#%% set northing, easting mapping\n\nnorthing_centre = orig_files['ch4'].north.copy(deep=True).to_dataframe()\neasting_centre = orig_files['ch4'].east.copy(deep=True).to_dataframe()\n\n#easting_edges = easting_centre - 500.0\n#temp_array = pd.DataFrame([easting_edges.east.values[-1]+1000.0],columns=['east'],index=[easting_edges.shape[0]])\n#easting_edges = easting_edges.append(temp_array)\n\n#northing_edges = northing_centre - 500.0\n#temp_array = pd.DataFrame([northing_edges.north.values[-1]+1000.0],columns=['north'],index=[northing_edges.shape[0]])\n#northing_edges = northing_edges = northing_edges.append(temp_array)\n\neasting_grid, northing_grid = np.meshgrid(easting_centre,northing_centre)\n\neasting_grid = pd.DataFrame(easting_grid)\nnorthing_grid = pd.DataFrame(northing_grid)\n\n\n#%% creating temporary chemical storage arrays\n\ntemplate_array = easting_grid * 0.0\n\nchem_data = {}\nfor chem in chem_list:\n chem_data[chem] = {}\n for sector in sector_list:\n chem_data[chem][sector] = template_array.copy(deep=True)\n\n\n#%% open excel spreadsheet, and extract the data sheet\nxlspread = pd.ExcelFile(ps_file)\npoint_src = xlspread.parse('Data')\n\n\n#%% sorting, and filtering point source data\n\n# get rid of unneeded fields\npsj = point_src.drop(['Year','Operator','Region','Unit','Site','Data Type','PollutantD','SectorID'],axis=1)\n\n# assuming that any data with North/East grid points = 0 are erroneous, so get rid of them\npsj_dropped_bad_location = psj[psj.Northing == 0.0]\npsj = psj[psj.Northing != 0.0]\n\n# scan for the the nearest geographic grid centre for each data point\npsj['Northing_Grid'] = psj['Northing'].apply(lambda x: find_nearest(northing_centre, x))\npsj['Easting_Grid'] = psj['Easting'].apply(lambda x: find_nearest(easting_centre, x))\n\n# setting indexes, and sorting data\npsj_ne = psj.set_index(['Northing_Grid','Easting_Grid','Sector','PlantID','Pollutant']).sort_index()\n# select only the chemical data that we want (from the list given above)\npsj_ne_chem = psj_ne.loc(axis=0)[:,:,:,:,chem_species]\n\n\n#%% gridding the point source data onto temporary data grids\n\nfor east_index in psj_ne_chem.index.get_level_values(1).unique():\n print(east_index)\n east_pos = easting_centre[easting_centre.east==east_index].index.values.astype(int)[0]\n \n psj_temp_east = psj_ne_chem.loc(axis=0)[:,east_index,:,:,:]\n \n for north_index in psj_temp_east.index.get_level_values(0).unique():\n print('\\t\\t',north_index)\n north_pos = northing_centre[northing_centre.north==north_index].index.values.astype(int)[0]\n \n psj_temp_ne = psj_temp_east.loc(axis=0)[north_index,:,:,:,:]\n #print(psj_ne_chem.loc(axis=0)[north_index,east_index,:])\n \n for sect_index in psj_temp_ne.index.get_level_values(2).unique():\n if(sect_index in sector_mapping):\n psj_temp_sec = psj_temp_ne.loc(axis=0)[:,:,sect_index,:,:]\n \n sector = sector_mapping[sect_index]\n \n for site_id in psj_temp_sec.index.get_level_values(3).unique():\n psj_temp_site = psj_temp_sec.loc(axis=0)[:,:,:,site_id,:]\n \n for poll_index in psj_temp_site.index.get_level_values(4).unique():\n if(poll_index in chem_mapping):\n \n chem = chem_mapping[poll_index] \n \n chem_data[chem][sector].values[north_pos,east_pos] += \\\n psj_temp_site.loc(axis=0)[:,:,:,:,poll_index].Emission.values[0]\n \n else:\n print('skipping chemical species: ', poll_index)\n\n else:\n print('skipping sector: ',sect_index)\n\n\n#%% copying emissions data from temporary data grids into xarray data arrays, and write to new files\n#\n# During this process we will recalculate the total area sources, as well as the total sources,\n# in order that these can be compared to the values in the original files to make sure\n# the above processes have not added / removed anything that is should not have.\n \nfor chem in nc_head:\n print(\"processing \",chem)\n total = orig_files[chem]['total'].load()\n totarea = orig_files[chem]['totarea'].load()\n total[:,:,:] = 0.0\n totarea[:,:,:] = 0.0\n for sector in sector_list:\n tdata = orig_files[chem][sector].load()\n totarea += tdata\n tdata[0,:,:] += chem_data[chem][sector] \n tdata[1,:,:] += chem_data[chem][sector]\n total += tdata\n \n my_new_file = nc_work + chem + tailstr\n orig_files[chem].to_netcdf(my_new_file,format='NETCDF3_CLASSIC')\n\n \n\n \n \n \n","repo_name":"douglowe/NAEI_WRF_Emission_Tools","sub_path":"Point_Source_Scripts/point_source_apportionment.py","file_name":"point_source_apportionment.py","file_ext":"py","file_size_in_byte":9582,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"90"} +{"seq_id":"36775745562","text":"import argparse\nimport os\nimport sys\n\nimport albumentations as A\nimport pandas as pd\nimport pytorch_lightning as pl\nimport torch\nfrom albumentations.pytorch import ToTensorV2\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.callbacks.early_stopping import EarlyStopping\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom sklearn.model_selection import train_test_split\nfrom torch.utils.data import DataLoader, Subset\n\nfrom models import LightningModelWrapper, MlpMixer, TransferredInception\nfrom datasets import CassavaDataset\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\n \"architecture\",\n choices=[\"mlpmixer\", \"t-inception\"],\n help=\"Architecture of neural network.\",\n)\nparser.add_argument(\n \"-d\", \"--dataset-dir\", type=str, default=\"./data\", help=\"Path to dataset directory.\"\n)\nparser.add_argument(\"-b\", \"--batch-size\", type=int, default=16, help=\"Batch size.\")\nparser.add_argument(\n \"-l\",\n \"--learning_rate\",\n type=float,\n default=0.001,\n help=\"Learning rate for optimization.\",\n)\nparser.add_argument(\n \"--weighted-loss\",\n action=\"store_true\",\n help=\"Flag for weighted loss, based on class distribution.\",\n)\nparser.add_argument(\n \"-n\",\n \"--exp-name\",\n type=str,\n default=\"default\",\n help=\"Experiment name for Tensorboard logger.\",\n)\nparser.add_argument(\n \"-w\",\n \"--num-workers\",\n type=int,\n default=0,\n help=\"Number of processes for training dataloader.\",\n)\nparams = vars(parser.parse_args())\n\nif not os.path.isdir(params[\"dataset_dir\"]):\n print(\"The specified path does not exist.\")\n sys.exit()\n\nif params[\"exp_name\"] == \"default\":\n params[\"exp_name\"] = params[\"architecture\"]\n\nEXP_NAME = params[\"exp_name\"]\nDATASET_DIR = params[\"dataset_dir\"]\nBATCH_SIZE = params[\"batch_size\"]\nWEIGHTED_LOSS = params[\"weighted_loss\"]\nLEARNING_RATE = params[\"learning_rate\"]\nNUM_WORKERS = params[\"num_workers\"]\n\nif params[\"architecture\"] == \"mlpmixer\":\n IMAGE_SIZE = 448\n model = MlpMixer(\n (3, IMAGE_SIZE, IMAGE_SIZE),\n patch_size=32,\n hidden_channels=512,\n d_s=256,\n d_c=2048,\n mixer_blocks=4,\n out_class=5,\n )\nelif params[\"architecture\"] == \"t-inception\":\n IMAGE_SIZE = 299\n model = TransferredInception()\nelse:\n raise Exception(\"Cannot find specified network architecture.\")\n\naugmentations = A.Compose(\n [\n A.Resize(IMAGE_SIZE, IMAGE_SIZE),\n A.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=15, p=0.5),\n A.RGBShift(r_shift_limit=15, g_shift_limit=15, b_shift_limit=15, p=0.5),\n A.RandomBrightnessContrast(p=0.5),\n A.Normalize(mean=(0, 0, 0), std=(1, 1, 1), always_apply=True),\n ToTensorV2(),\n ]\n)\n\ndataset_without_augs = CassavaDataset(\n os.path.join(DATASET_DIR, \"train_smaller.csv\"),\n os.path.join(DATASET_DIR, \"train_images\"),\n)\ndataset_with_augs = CassavaDataset(\n os.path.join(DATASET_DIR, \"train_smaller.csv\"),\n os.path.join(DATASET_DIR, \"train_images\"),\n augmentations,\n)\n\ntrain_indices, val_indices = train_test_split(\n list(range(len(dataset_without_augs))),\n test_size=0.2,\n stratify=dataset_without_augs.img_labels.iloc[:, 1],\n random_state=123,\n)\ntrain_dataset = Subset(dataset_with_augs, train_indices)\nval_dataset = Subset(dataset_without_augs, val_indices)\ntrain_dataloader = DataLoader(\n train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS\n)\nval_dataloader = DataLoader(\n val_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=NUM_WORKERS\n)\n\nif WEIGHTED_LOSS:\n df = pd.read_csv(os.path.join(DATASET_DIR, \"train.csv\"))\n weights = (df[\"label\"].value_counts() / df.shape[0]).sort_index()\n weights = 1.0 / weights\n weights = weights / weights.sum()\n weights = torch.Tensor(weights)\nelse:\n weights = torch.Tensor([1.0, 1.0, 1.0, 1.0, 1.0])\n\ntb_logger = TensorBoardLogger(\"tensorboard_logs\", name=EXP_NAME)\nearly_stopping = EarlyStopping(\n monitor=\"val_loss\", min_delta=0.01, patience=4, verbose=True, mode=\"min\"\n)\nmodel_checkpoint = ModelCheckpoint(\n monitor=\"val_loss\",\n mode=\"min\",\n dirpath=\"checkpoints\",\n filename=\"mlp-mixer-loss{val_loss:.2f}\",\n verbose=True,\n)\ntrainer = pl.Trainer(\n max_epochs=6,\n logger=tb_logger,\n log_every_n_steps=5,\n callbacks=[early_stopping, model_checkpoint],\n)\n\npl_mlp = LightningModelWrapper(model, LEARNING_RATE, weights)\ntrainer.fit(pl_mlp, train_dataloader, val_dataloader)\n","repo_name":"p-wojciechowski/cassava-classification","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"28449926636","text":"from engine.engine_template import EngineTemplate\nfrom wifuxlogger import WifuxLogger as LOG\nfrom machine import ADC, Pin, RTC\nfrom dev.pin.core import CommonCommands\nfrom dev.tool.gooz_thread import _gooz_start_function_thread\n\n\nclass Usage():\n @staticmethod\n def var_usage():\n return LOG.info('Usage -> pin var adc --name [PIN_NAME] --pin [PIN_NUMBER]')\n\n @staticmethod\n def delete_usage():\n return LOG.info('Usage -> pin adc delete [PIN_NAME]')+'\\n'+LOG.info('deletes registered adc pin named [PIN_NAME]')+'\\n'+LOG.info('To delete all registered adc pins -> pin adc delete all')\n\n @staticmethod\n def update_usage():\n return LOG.info('Usage -> pin adc update [PIN_NAME] --[VALUE_TO_CHANGE] [NEW_VALUE]')+'\\n'+LOG.info('updates the [VALUE_TO_CHANGE] value of the adc pin named [PIN_NAME] to [NEW_VALUE]')\n\n @staticmethod\n def show_usage():\n return LOG.info('Usage -> pin adc show')+'\\n'+LOG.info('shows all registered adc pins')+'\\n'+LOG.info('Usage -> pin adc show [PARAMETER]:[VALUE_TO_SEARCH_FOR]')+'\\n'+LOG.info('shows specific adc pins')\n\n @staticmethod\n def read_usage():\n return LOG.info('Usage -> pin adc read [PIN_NAME] [READ_COUNT]')+'\\n'+LOG.info('reads value [READ_COUNT] times.If not entered, reads once')\n\n @staticmethod\n def listen_usage():\n message = \"\"\n message += LOG.info('Usage -> pin adc listen [PIN_NAME]')+'\\n'\n message += LOG.info('reads value from adc pin named [PIN_NAME] every 1 second')+'\\n'\n message += LOG.info('Usage -> pin adc listen [PIN_NAME] --file [FILE_NAME] --date [DATE_BOOL] --loop [LOOP_COUNT] --delay [SLEEP_TIME] --end [END_CHARACTER]')+'\\n'\n message += LOG.info('If [FILE_NAME] entered, the readed value will be written in the given [FILE_NAME]')+'\\n'\n message += LOG.info('[DATE_BOOL] can be 1 or 0.Default value is 1.')+'\\n'\n message += LOG.info('If [LOOP_COUNT] is bigger than 0, the number of reads will be the given number.Default [LOOP_COUNT] is -1 that means, it will read until stopped manually.')\n return message\n\n\ndef help(cmds):\n message = \"\"\n command_list = ['var', 'delete', 'update', 'show', 'read', 'listen']\n if not len(cmds) > 3:\n for command in command_list:\n message += command\n message += '\\n'\n return 'Commands:\\n'+message+LOG.info('For more information about commands -> pin adc help [COMMAND]')\n try:\n return eval('Usage.{}_usage()'.format(cmds[3]))\n except:\n return LOG.error('There is no help for \"{}\"!'.format(cmds[3]))\n\n\ndef delete(cmds):\n return CommonCommands(cmds).delete() \n\n\ndef registry(cmds):\n blueprint = {\"pinType\": \"adc\", \"--name\": \"\", \"--pin\": \"\"}\n return CommonCommands(cmds).register(blueprint)\n\n\ndef update(cmds):\n return CommonCommands(cmds).update()\n\n\ndef show(cmds):\n return CommonCommands(cmds).show()\n\n\ndef run(cmds):\n if not len(cmds) > 2:\n return LOG.warning(\"Please enter command!\\n\")+help(cmds)\n if cmds[2][0] == '_':\n return \"ERROR\"\n return eval(\"{}({})\".format(cmds[2], EngineTemplate.exec_formatter_api(cmds)))\n\n\ndef read(cmds):\n message = \"\"\n pin = CommonCommands(cmds).get_pin(cmds[1], cmds[3])\n #blueprint = EngineTemplate.parameter_parser(cmds)\n try:\n runner_pin = ADC(Pin(int(pin[\"--pin\"])))\n runner_pin.atten(ADC.ATTN_11DB)\n reading = runner_pin.read()\n del runner_pin\n if '-n' in cmds:\n return reading\n else:\n return LOG.info(reading)\n except Exception as ex:\n return LOG.error(ex)\n\n\ndef listen(cmds):\n if not len(cmds) > 3:\n return LOG.error(\"Please enter pin name!\")\n blueprint = {\"--delay\": \"1\", \"--file\": \"\", \"--end\": \"\\n\", \"--loop\": \"-1\"}# -d : date\n listen_conf = EngineTemplate.parameter_parser(cmds, blueprint)\n thread_conf = {'delay': listen_conf['--delay'], 'loop': listen_conf['--loop'], 'type': 'adc_listen', 'pin_name': cmds[3]}\n del blueprint\n pin = CommonCommands(cmds).get_pin(cmds[1], cmds[3])\n try:\n _gooz_start_function_thread(_adc_listen_thread, (pin, listen_conf), thread_conf)\n del listen_conf\n return \"The pin is listened\"\n except Exception as ex:\n del listen_conf\n return LOG.error(ex)\n\n\ndef _adc_listen_thread(pins, conf={}):\n message = \"\"\n try:\n runner_pin = ADC(Pin(int(pins[\"--pin\"])))\n runner_pin.atten(ADC.ATTN_11DB)\n if conf[\"--file\"] == \"\":\n print('Value of the ADC pin named \"{}\": '.format(pins[\"--name\"]) + str(runner_pin.read()))\n else:\n if '-d' in conf.keys():\n m_rtc = RTC()\n f = open(conf[\"--file\"], \"a\")\n f.write(\"{} {}{}\".format(runner_pin.read(), m_rtc.datetime(), conf[\"--end\"]))\n f.close()\n else:\n f = open(conf[\"--file\"], \"a\")\n f.write(\"{}{}\".format(runner_pin.read(), conf[\"--end\"]))\n f.close()\n except Exception as ex:\n return LOG.error(ex)\n","repo_name":"gooz-project/gooz-os-v1.0.0","sub_path":"dev/pin/gooz_pin_adc.py","file_name":"gooz_pin_adc.py","file_ext":"py","file_size_in_byte":5063,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"90"} +{"seq_id":"27755010791","text":"import numpy as np\nimport torch\n\nfrom itertools import product\nfrom torch import log, exp\nimport torch.nn.functional as F\n\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n# device = \"cpu\"\n\n\nclass VarTable():\n def __init__(self, dims, dtype=torch.float, device=device):\n self.dims = dims\n d1, d2, d_rest = dims[0], dims[1], dims[2:]\n\n self.vars = []\n for i in range(d1):\n self.vars.append([])\n for j in range(d2):\n var = torch.zeros(d_rest).to(dtype).to(device)\n self.vars[i].append(var)\n\n def __getitem__(self, pos):\n i, j = pos\n return self.vars[i][j]\n\n def __setitem__(self, pos, new_val):\n i, j = pos\n if self.vars[i][j].sum() != 0:\n assert False, \"This cell has already been assigned. There must be a bug somwhere.\"\n else:\n self.vars[i][j] = self.vars[i][j] + new_val\n\n def show(self):\n device, dtype = self[0, 0].device, self[0, 0].dtype\n mat = torch.zeros((self.d1, self.d2, self.d3)).to().to(dtype).to(device)\n for dims in product([range(d) for d in self.dims]):\n i, j, rest = dims[0], dims[1], dims[2:]\n mat[dims] = self[i, j][rest]\n return mat\n\n\ndef minGamma(inputs, gamma=1, keepdim=True):\n \"\"\" continuous relaxation of min defined in the D3TW paper\"\"\"\n if type(inputs) == list:\n if inputs[0].shape[0] == 1:\n inputs = torch.cat(inputs)\n else:\n inputs = torch.stack(inputs, dim=0)\n\n if gamma == 0:\n minG = inputs.min(dim=0, keepdim=keepdim)\n else:\n # log-sum-exp stabilization trick\n zi = (-inputs / gamma)\n max_zi = zi.max()\n log_sum_G = max_zi + log(exp(zi - max_zi).sum(dim=0, keepdim=keepdim) + 1e-5)\n minG = -gamma * log_sum_G\n return minG\n\n\ndef minProb(inputs, gamma=1, keepdim=True):\n if type(inputs) == list:\n if inputs[0].shape[0] == 1:\n inputs = torch.cat(inputs)\n else:\n inputs = torch.stack(inputs, dim=0)\n\n if gamma == 0:\n minP = inputs.min(dim=0, keepdim=keepdim)\n else:\n probs = F.softmax(-inputs / gamma, dim=0)\n minP = (probs * inputs).sum(dim=0, keepdim=keepdim) \n return minP\n\n\ndef traceback(D):\n i, j = np.array(D.shape) - 2\n p, q = [i], [j]\n while (i > 0) or (j > 0):\n tb = np.argmin((D[i, j], D[i, j + 1], D[i + 1, j]))\n if tb == 0:\n i -= 1\n j -= 1\n elif tb == 1:\n i -= 1\n else: # (tb == 2):\n j -= 1\n p.insert(0, i)\n q.insert(0, j)\n return np.array(p), np.array(q)","repo_name":"anilbatra2185/pss_soda_matching","sub_path":"misc/softdp/dp_utils.py","file_name":"dp_utils.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17711731707","text":"#! /usr/bin/python3\n\nimport sys, getopt\nimport random\n\noffsets={'0':[], '1':[], '2':[], '3':[], '4':[], '5':[], '6':[], '7':[], '8':[], '9':[], 'A':[], 'B':[], 'C':[], 'D':[], 'E':[], 'F':[]}\n\ndef usage():\n print(\"\\n=================================================================\");\n print(\"Digital Book Cipher Tool\");\n print(\"Usage:\");\n print(\"digibook.py -b book -p plaintext|-c ciphertext -o outfile\");\n print(\"----------\");\n print(\"-b | --book ### book file\");\n print(\"-p | --plain ### plaintext file\");\n print(\"-c | --cipher ### ciphertext file\");\n print(\"-o | --outfile ### output file\");\n print(\"=================================================================\\n\");\n\n# create book table\n# for each matched char in string, append char index to offset list\ndef initialize(book):\n for i in range(len(book)):\n offsets[book[i]].append(i);\n\n# encrypt\ndef encrypt(plaintext):\n ciphertext=\"\";\n for i in range(len(plaintext)):\n ciphertext = ciphertext + str(offsets[plaintext[i]][random.randrange(0,len(offsets[plaintext[i]]))]) + \" \";\n\n return ciphertext;\n\n# decrypt\ndef decrypt(ciphertext):\n plaintext=\"\";\n for c_char in ciphertext:\n for i in offsets.keys():\n if (c_char != \"\"):\n if (offsets[i].count(int(c_char)) > 0 ):\n plaintext = plaintext + str(i);\n\n return plaintext;\n\ndef main(argv):\n isEncrypt = False;\n isDecrypt = False;\n isBook = False;\n\n if (len(argv)<1):\n print(\"Missing Arguments\");\n usage();\n sys.exit(2);\n try:\n opts, args = getopt.getopt(argv, 'hb:p:c:o:', [\"book=\", \"plain=\", \"cipher=\", \"outfile=\"]);\n except getopt.GetoptError as err:\n print(err);\n usage();\n sys.exit(2);\n\n for opt, arg in opts:\n if opt == '-h':\n usage();\n sys.exit();\n elif opt in ('-b', \"--book\"):\n isBook = True\n b_file = arg;\n elif (opt in ('-p', \"--plain\")) and (not isDecrypt) and isBook:\n isEncrypt = True;\n p_file = arg;\n elif (opt in ('-c', \"--cipher\")) and (not isEncrypt) and isBook:\n isDecrypt = True;\n c_file = arg;\n elif opt in ('-o' \"--outfile\"):\n o_file = arg;\n else:\n print(\"what?, how did this happen?\");\n usage();\n sys.exit(2);\n \n # initialize\n book = open(b_file, \"rb\").read().hex().upper();\n initialize(book);\n \n # encrypt\n if isEncrypt:\n outfile = open(o_file, \"w\");\n plaintext = open(p_file, \"rb\").read().hex().upper();\n ciphertext = encrypt(plaintext);\n outfile.write(ciphertext);\n outfile.close()\n\n # decrypt\n if isDecrypt:\n outfile = open(o_file,\"wb\");\n ciphertext = open(c_file, \"r\").read().split(\" \");\n plaintext = decrypt(ciphertext);\n outfile.write(bytearray.fromhex(plaintext));\n outfile.close();\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:]);\n \n##END##\n","repo_name":"jabrown2031/digibook","sub_path":"digibook.py","file_name":"digibook.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"22876125518","text":"class Properties(object):\n def __init__(self, fileName):\n self.fileName = fileName\n self.properties = {}\n\n def __getDict(self, key, dict, value):\n if key.find(\".\") > 0:\n k = key.split(\".\")[0]\n dict.setdefault(k, {})\n return self.__getDict(key[len(k)+1:],dict[k],value)\n else:\n dict[key] = value\n return\n\n def getProperties(self):\n try:\n with open(self.fileName, 'r') as properties_file:\n for line in properties_file.readlines():\n line = line.strip().replace('\\n','')\n if line.find(\"#\") != -1:\n line = line[0:line.find(\"#\")]\n if line.find(\"=\") > 0:\n strs = line.split(\"=\")\n strs[1] = line[len(strs[0])+1:]\n self.__getDict(strs[0].strip(), self.properties, strs[1].strip())\n except Exception as e:\n raise e\n finally:\n return self.properties\n","repo_name":"chenyulian/configurable-crawler","sub_path":"util/read_properties.py","file_name":"read_properties.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"231683993","text":"s='azcbobobobobobegghakl'\nlentgh=len(s)\nword=''\nchar2=''\nchar3=''\ncounter=0\nfor char1 in s:\n word= char1 + char2 + char3\n \n if word=='bob':\n counter+=1\n \n char3=char2\n char2=char1\n \nprint ('Number of times bob occurs is: ')\nprint (counter)\n","repo_name":"RaminMammadzada/6.00.1x-Introduction-to-Computer-Science-and-Programming-Using-Python","sub_path":"22.06.2015/pset1 problem2.py","file_name":"pset1 problem2.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"20045936320","text":"\"\"\"Clip a raster by a polygon vector.\"\"\"\nimport argparse\nimport math\nimport tempfile\nimport os\nimport shutil\n\nimport logging\nimport sys\n\nfrom ecoshard import geoprocessing\nfrom osgeo import gdal\nfrom osgeo import osr\nimport numpy\n\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format=(\n '%(asctime)s (%(relativeCreated)d) %(levelname)s %(name)s'\n ' [%(funcName)s:%(lineno)d] %(message)s'),\n stream=sys.stdout)\n\nLOGGER = logging.getLogger(__name__)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=(\n 'Clip a raster by a polygon vector. Result is a clipped version of '\n 'input in the same projection as the input raster with size equal to '\n 'the projected bounding box of the input vector and any areas outside '\n 'of the polygon masked out. Target raster will be named '\n '{base raster name}_clipped_by_{base vector name}.tif'))\n parser.add_argument(\n 'input_raster', help='Path to arbitrary input raster')\n parser.add_argument(\n 'vector_to_clip_with', help='Path to arbitrary vector to clip')\n args = parser.parse_args()\n\n temp_dir = tempfile.mkdtemp(dir=os.getcwd(), prefix='clip_raster_workspace')\n\n raster_info = geoprocessing.get_raster_info(args.input_raster)\n raster_projection_wkt = raster_info['projection_wkt']\n\n projected_vector_path = os.path.join(\n temp_dir, os.path.basename(args.vector_to_clip_with))\n\n LOGGER.info(f'reproject vector to {projected_vector_path}')\n geoprocessing.reproject_vector(\n args.vector_to_clip_with, raster_projection_wkt, projected_vector_path,\n driver_name='GPKG')\n\n projected_vector_info = geoprocessing.get_vector_info(projected_vector_path)\n\n target_bb = geoprocessing.merge_bounding_box_list(\n [raster_info['bounding_box'], projected_vector_info['bounding_box']],\n 'intersection')\n\n raster_basename = os.path.basename(os.path.splitext(\n args.input_raster)[0])\n vector_basename = os.path.basename(os.path.splitext(\n args.vector_to_clip_with)[0])\n clip_raster_path = os.path.join(\n os.getcwd(), f'{raster_basename}_clipped_by_{vector_basename}.tif')\n LOGGER.info(f'clipping to {clip_raster_path}')\n geoprocessing.warp_raster(\n args.input_raster, raster_info['pixel_size'], clip_raster_path,\n 'near', target_bb=target_bb,\n vector_mask_options={'mask_vector_path': projected_vector_path},\n working_dir=os.getcwd())\n LOGGER.info(f'all done, raster at {clip_raster_path}')\n shutil.rmtree(temp_dir)\n","repo_name":"springinnovate/raster_calculations","sub_path":"clip_raster_by_vector.py","file_name":"clip_raster_by_vector.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"90"} +{"seq_id":"36389235678","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy import Request\nfrom urllib.parse import urlencode\nimport json\nfrom images360.items import Images360Item\n\nclass ImagesSpider(scrapy.Spider):\n name = 'images'\n allowed_domains = ['images.so.com']\n start_urls = ['http://images.so.com/']\n \n#Debug或许从一开始就没调用start_request\n#its start_requests not start_request, there is more 's'.\n def start_requests(self):\n data = {'ch':'beauty','listtype':'new'}\n base_url = 'http://images.so.com/zj?'\n #num = self.settings.get('MAX_PAGE')+1\n #print(num)\n for page in range(1,2):#self.settings.get('MAX_PAGE')+1\n data['sn'] = page * 30\n params = urlencode(data)\n url = base_url + params\n #print(url)\n yield Request(url=url,callback=self.parse)\n\n def parse(self, response):\n result = json.loads(response.text)\n \n for image in result.get('list'):\n item = Images360Item()\n item['ids'] = image.get('imageid')\n item['url'] = image.get('qhimg_url')\n item['title'] = image.get('group_title')\n item['thumb'] = image.get('qhimg_thumb_url')\n yield item\n #print(item['ids'])\n\n# =============================================================================\n# name = 'images'\n# allowed_domains = ['images.so.com']\n# start_urls = ['http://images.so.com/']\n# \n# \n# def start_requests(self):\n# data = {'ch': 'photography', 'listtype': 'new'}\n# base_url = 'https://images.so.com/zj?'\n# for page in range(1, self.settings.get('MAX_PAGE') + 1):\n# data['sn'] = page * 30\n# params = urlencode(data)\n# url = base_url + params\n# yield Request(url, self.parse)\n# \n# def parse(self, response):\n# result = json.loads(response.text)\n# for image in result.get('list'):\n# item = ImageItem()\n# item['id'] = image.get('imageid')\n# item['url'] = image.get('qhimg_url')\n# item['title'] = image.get('group_title')\n# item['thumb'] = image.get('qhimg_thumb_url')\n# yield item\n# =============================================================================\n","repo_name":"systemData/pic_of_360","sub_path":"images360/spiders/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"16108811892","text":"from django.db import models\nfrom django.urls import reverse\nfrom account.models import User\nfrom taggit.managers import TaggableManager\nfrom django.utils import timezone\n# Create your models here.\nclass Post(models.Model):\n POST_TYPE_CHOICES = [\n ('TXT','Text'),\n ('Q','Quote'),\n ('IMG','Image'),\n ('VID','Video'),\n ('GIF','GIF'),\n ('AUD','Audio'),\n ]\n POST_WHEN_CHOICES = [\n ('NOW','Post Now'),\n ('QU','Add to queue'),\n ('DR','Add to draft'),\n ('PR','Post privately'),\n ('SC','Schedule'),\n ]\n post_type = models.CharField( max_length=3,choices=POST_TYPE_CHOICES,default='TXT')\n title = models.CharField(max_length=128,default=' ')\n text = models.TextField(blank=True)\n image = models.ImageField(upload_to='post/images/',blank=True,null=True)\n video = models.FileField(upload_to='post/videos/',blank=True,null=True)\n gif = models.FileField(upload_to='post/gifs/',blank=True,null=True)\n audio = models.FileField(upload_to='post/audio/',blank=True,null=True)\n op = models.ForeignKey(User,on_delete=models.CASCADE,related_name='posts')\n is_reblogged = models.BooleanField(default=False,blank=True)\n tags = TaggableManager(blank=True)#to manage tagging\n posted_on = models.DateTimeField(auto_now_add=True)\n modified_on = models.DateTimeField(auto_now=True)\n source = models.URLField(blank=True)\n quote_text = models.TextField(blank=True)\n post_when = models.CharField(max_length=3,choices=POST_WHEN_CHOICES,default='NOW')\n scheduled_date = models.DateTimeField(blank=True,null=True)\n likes = models.ManyToManyField(User, through='Like',related_name='liked_posts')\n reblogs = models.ManyToManyField(User, through='Reblog',through_fields=('reblogged_content','reblogged_from'),related_name='reblogged_posts')\n notes = models.ManyToManyField(User, through='Note',related_name='post_noted')\n\n class Meta:\n ordering = ['-modified_on']\n\n def __str__(self):\n return self.title\n def get_absolute_url(self):\n return reverse(\"post:detail\",kwargs={\"username\":self.op.username,\"pk\":self.pk})\n\n#defining the relations\nclass Note(models.Model):\n post = models.ForeignKey(Post,on_delete=models.CASCADE)\n user = models.ForeignKey(User,on_delete=models.CASCADE)\n note = models.TextField()\n\n\n\nclass Like(models.Model):\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n user = models.ForeignKey(User,on_delete=models.CASCADE)\n liked_on = models.DateTimeField(auto_now=True)\n class Meta:\n unique_together = [['post','user']]#so that a user cannot like a post more than once\n ordering = ['-liked_on']#ordering on liked on so it is returned in descending order\n\n\nclass Reblog(models.Model):\n reblogged_from = models.ForeignKey(User,on_delete=models.CASCADE)#parent_post.op\n parent_post = models.ForeignKey(Post,on_delete=models.CASCADE,related_name='parent_of')\n reblogged_content = models.ForeignKey(Post,on_delete=models.CASCADE,related_name='reblogged_content')\n def __str__(self):\n return 'reblog | '+str(self.id)\n\nfrom django.db.models.signals import post_save\ndef add_reblogged_from(sender, instance, **kwargs):\n instance.reblogged_from = instance.parent_post.op\n #instance.save()\npost_save.connect(add_reblogged_from, sender=Reblog)\n","repo_name":"seraph-wing/tumblr-clone","sub_path":"tumblr_clone/post/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"31649571274","text":"################################################################\n# HW 5 - ENGI1006\n# Nupur Dave\n# ncd2123\n# NNClassifier - returns the percentage of labels matched between\n# the testing data and training data\n# knn - calculates the nearest numbers (per the argument given)\n# and finds the mode from the data.\n###############################################################\n\nimport numpy as np\nfrom math import sqrt\nfrom statistics import mode\n\n\ndef NNClassifier(training, testing, training_labels, testing_labels, k):\n '''Runs the Nearest Neighbor classifier:\n\n Args:\n training: the subset of data corresponding to the training data as a numpy matrix\n testing: the subset of data corresponding to the testing data as a numpy matrix\n training_labels: the labels for the training data as a numpy array\n testing_labels: the labels for the testing data as a numpy array\n k: the number of nearest neighbors to use\n\n This function should do the following:\n\n - preallocate an array `labels` for the predicted labels of the testing data\n - for each row in the testing data, use knn to predict the label\n - at the end, return what percentagle of labels matched, i.e. how many labels in `labels` matched the label in `testing_labels`\n '''\n # preallocate labels\n labels = np.empty(testing_labels.shape, dtype=str)\n \n # for each point\n # run knn on each point and assign its label into labels\n correct = 0\n for i in range(np.size(testing, 0)):\n predicted_label = knn(training, training_labels, testing[i,:], k)\n labels[i] = predicted_label\n real_label = testing_labels[i]\n \n if real_label == labels[i]:\n correct += 1\n # return % where prediction matched actual\n return correct/len(testing_labels)\n\n '''\n correct = 0\n # preallocate labels\n labels = np.zeros(len(testing_labels))\n\n # for each point\n # run knn on each point and assign its label into labels\n for i in range(len(labels)):\n predicted = knn(training, training_labels, testing[i], k)\n real = testing_labels[i]\n \n if real == predicted:\n correct = correct + 1\n\n # return % where prediction matched actual\n return int(correct/len(testing_labels) * 100)\n'''\n\n\ndef knn(data, data_labels, vector, k):\n '''knn should calculate the nearest neighbor\n\n data: the numpy array of training data\n data_labels: the numpy array of labels for the training data\n vector: a row from the testing data to calculate nearest neighbors\n k: how many nearest neighbors to find\n\n\n This function should find the `k` nearest rows in `data` relative to\n `vector`, and take a vote amongst their labels. Whichever has more (b or m), return\n that value'''\n # preallocate distance array\n distances = np.zeros(len(data_labels))\n \n # for each point in data\n # calculate the distance to vector, store in distance array\n for i in range(len(distances)):\n distances[i] = sqrt(((data[i].astype(float) - vector.astype(float)) **2).sum())\n\n # sort distances, and get indexes to use in data_labels (look at np.argsort)\n indexes = np.argsort(distances)\n\n # take vote amongs top labels\n to_vote = data_labels[indexes]\n return mode(to_vote[:k])\n","repo_name":"nupurd89/machinelearning","sub_path":"engi1006/models/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"36609339478","text":"import keras\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nimport numpy as np\n\n# 카테고리 지정하기\ncategories = [\"chair\",\"camera\",\"butterfly\",\"elephant\",\"flamingo\"]\nnb_classes = len(categories)\n# 이미지 크기 지정하기\nimage_w = 64 \nimage_h = 64\n\n# 데이터 불러오기 --- (※1)\nX_train, X_test, y_train, y_test = np.load(\"11.deep/d0726/image/5obj.npy\")\n# 데이터 정규화하기\nX_train = X_train.astype(\"float\") / 255\nX_test = X_test.astype(\"float\") / 255\nprint('X_train shape:', X_train.shape)\n\n# 딥러닝 선언\n# 합성곱 신경망 선언\nmodel = keras.Sequential()\n\n# CNN\nmodel.add(keras.layers.Conv2D(32,kernel_size=3,activation='relu',padding='same',input_shape=(28,28,1)))\n# 최대풀링\nmodel.add(keras.layers.MaxPooling2D(2))\n\n# CNN - 1회 반복\nmodel.add(keras.layers.Conv2D(64,kernel_size=3,activation='relu',padding='same'))\n# 최대풀링\nmodel.add(keras.layers.MaxPooling2D(2))\n# 딥러닝 훈련\n\n# 딥러닝 평가\n\n\n\n\n","repo_name":"onulee/https---github.com-onulee-kdigital1","sub_path":"11.deep/d0726/05.02이미지변환_딥러닝적용.py","file_name":"05.02이미지변환_딥러닝적용.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18517666619","text":"import sys\nimport numpy as np\n\nstdin = sys.stdin\n\nri = lambda: int(rs())\nrl = lambda: list(map(int, stdin.readline().split())) # applies to numbers only\nrs = lambda: stdin.readline().rstrip() # ignore trailing spaces\n\nN, M, D = rl()\nif D == 0:\n print((M-1) / N)\nelse:\n answer = 2 * (N-D) * (M-1) / N**2\n print(answer)\n# 49","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03304/s883653622.py","file_name":"s883653622.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"72462936936","text":"\nimport json\nimport rook\nfrom os import environ\nfrom requests import post\nfrom http import HTTPStatus\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nfrom requestadapter import RequestAdapter\nfrom responseadapter import ResponseAdapter\n\nDOCS_OPPWA_URL = \"https://docs.oppwa.com/api-request\"\n\n\nclass HttpHandler(BaseHTTPRequestHandler):\n def do_POST(self):\n charset = self.headers.get_charset()\n if charset is None:\n charset = \"utf-8\"\n try:\n content_length = int(self.headers.get(\"content-length\"))\n except (TypeError, ValueError):\n content_length = 0\n\n try:\n request_obj = json.loads(str(self.rfile.read(content_length), charset), encoding=charset)\n except json.JSONDecodeError as e:\n self.report_error(HTTPStatus.BAD_REQUEST, \"Exception while parsing request\", e)\n return\n\n response_obj = {\"messageType\": \"pArs\", \"p_messageVersion\": \"1.0.5\", \"messageVersion\": \"2.1.0\"}\n\n try:\n oppwa_request = RequestAdapter(request_obj).adapt_request()\n except Exception as e:\n self.report_error(HTTPStatus.BAD_REQUEST, \"Exception while adapting request\", e)\n return\n\n try:\n oppwa_response = post(url=DOCS_OPPWA_URL,\n data={\"xmlData\": oppwa_request, \"params\": {}, \"ptype\": \"paypipe\",\n \"sendtolive\": \"test\", \"basicUser\": \"uuid\", \"basicPass\": \"payon\"}).text\n except Exception as e:\n self.report_error(HTTPStatus.INTERNAL_SERVER_ERROR, \"Exception while contacting OPPWA\", e)\n return\n\n try:\n ResponseAdapter(oppwa_response, response_obj).adapt_response()\n except Exception as e:\n self.report_error(HTTPStatus.BAD_REQUEST, \"Exception while adapting response\", e)\n return\n\n content = json.dumps(response_obj).encode(\"utf-8\")\n code = HTTPStatus.OK\n\n self.send_response(code)\n self.send_header(\"Content-Type\", \"application/json; charset=utf-8\")\n self.send_header(\"Content-Length\", len(content))\n self.end_headers()\n self.wfile.write(content)\n\n def report_error(self, code, msg, e):\n self.log_error(msg + \": %s\", format(e))\n self.send_error(code, msg, format(e))\n\nif __name__ == \"__main__\":\n rook.start(token='e2fe21957bb07aba7b222bd7ec9ef1ce2ba6b2c051e3d7120afaebdd2572d6fc')\n\nport = int(environ[\"PORT\"])\nhttpd = HTTPServer((\"\", port), HttpHandler)\nprint(\"Listening on port \" + str(port))\nhttpd.serve_forever()\n","repo_name":"alisa-efremova/master-cert","sub_path":"master-cert.py","file_name":"master-cert.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"31002604755","text":"# General\nimport os\nimport sys\nimport re\nimport itertools\nimport ntpath\nimport datetime\nimport pytz\nimport logging\nimport shlex\nimport base64\nimport traceback\nimport yaml\nimport stix2\nlog = logging.getLogger(__name__)\n\n# Cuckoo\nfrom cuckoo.common.abstracts import Report\nfrom cuckoo.common.exceptions import CuckooReportError, CuckooDependencyError\nfrom cuckoo.misc import cwd\n\nnow = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n\nclass stix2reporter(Report):\n \"\"\" Save analysis in STIX 2.0 \"\"\"\n\n def run(self, results):\n # Save Cuckoo Sandbox results dictionary for processing\n self.results = results\n self.observables = []\n self.processes = []\n self.files = []\n self.directories = []\n self.deleted_files = []\n self.deleted_directories = []\n self.mutexes = []\n self.reg_keys = []\n self.reg_values = {}\n self.deleted_reg_keys = []\n self.deleted_reg_values = {}\n self.connections = []\n self.http_connections = []\n self.domains = []\n self.terminated_pids = []\n\n self.load_blacklist()\n try:\n self.target_name = self.results[\"target\"][\"file\"]['name']\n except KeyError:\n self.target_name = None\n\n # add dropped files\n if \"dropped\" in self.results and len(self.results[\"dropped\"]) > 0:\n for file in self.results[\"dropped\"]:\n if 'filepath' not in file:\n continue\n if not file['pids']:\n continue\n obj = file\n obj['label'] = 'Dropped File'\n self.files.append(obj)\n\n # get terminated processes\n if self.options.get('discard_terminated_processes'):\n self.get_terminated_pids(self.results['debug'])\n\n # analyse and create observables\n self.behaviour(self.results['behavior'])\n self.network(self.results['network'])\n self.remove_observables()\n\n self.write_output()\n\n def write_output(self):\n # build STIX objects\n for file in self.files:\n obs = self.create_file_obs(file, file['label'])\n self.observables.append(obs) if obs else None\n for process in self.processes:\n obs = self.create_process_obs(process)\n self.observables.append(obs) if obs else None\n for directory in self.directories:\n obs = self.create_directory_obs(directory)\n self.observables.append(obs) if obs else None\n for mutex in self.mutexes:\n obs = self.create_mutex_obs(mutex)\n self.observables.append(obs) if obs else None\n for regkey in self.reg_keys:\n obs = self.create_regkey_obs(regkey, self.reg_values.get(regkey['key'], None))\n self.observables.append(obs) if obs else None\n for connection in self.connections:\n obs = self.create_connection_obs(connection)\n self.observables.append(obs) if obs else None\n for http in self.http_connections:\n obs = self.create_http_connection_obs(http)\n self.observables.append(obs) if obs else None\n for domain in self.domains:\n obs = self.create_domain_obs(domain)\n self.observables.append(obs) if obs else None\n\n if not self.observables:\n raise CuckooReportError(\"Failed to generate STIX2 report: No Observables to report.\")\n\n # write report\n try:\n bundle = stix2.Bundle(objects=self.observables).serialize(sort_keys=False, indent=4)\n try:\n os.makedirs(self.reports_path)\n except OSError:\n pass\n with open(os.path.join(self.reports_path, 'stix2.json'), 'w') as report:\n report.write(bundle)\n except Exception as e:\n raise CuckooReportError(\"Failed to generate STIX2 report: %s\" % e)\n\n # network\n def network(self, data):\n for connection in data.get('tcp', []):\n con = self.connection(connection, 'tcp')\n self.connections.append(con) if con else None\n for connection in data.get('udp', []):\n con = self.connection(connection, 'udp')\n self.connections.append(con) if con else None\n for http_con in data.get('http_ex', []):\n con = self.http_connection(http_con, data['http'])\n self.http_connections.append(con) if con else None\n for dns_req in data.get('dns', []):\n query = self.dns_query(dns_req)\n self.domains.append(query) if query else None\n\n def http_connection(self, http_con, http_legacy):\n dest_addr = {'value': http_con['dst']}\n if is_valid_ipv6(http_con['dst']):\n dest_addr['type'] = 'ipv6-addr'\n elif is_valid_ipv4(http_con['dst']):\n dest_addr['type'] = 'ipv4-addr'\n else:\n log.error(\"Type of network address ({0}) not known!\", http_con['dst'])\n return None\n\n header = {}\n for http_legacy in http_legacy:\n if http_con['uri'] == http_legacy['path'] and http_con['host'] == http_legacy['host'] and http_con[\n 'method'] == http_legacy['method'] and http_con['dport'] == http_legacy['port']:\n if 'user-agent' in http_legacy:\n header['user-agent'] = http_legacy['user-agent']\n if 'version' in http_legacy:\n header['version'] = http_legacy['version']\n http = {\n 'request_method': http_con['method'],\n 'request_value': http_con['uri'],\n 'request_version': header.get('version', None),\n 'request_header': {\n 'User-Agent': header.get('user-agent', None),\n 'Host': http_con['host']\n }\n }\n\n # Future ToDo -- not yet in STIX 2.0:\n # http_con['status']\n # http_con['response']\n\n obj = {\n 'type': 'network-traffic',\n 'dst_ref': '1',\n 'dst_port': http_con['dport'],\n 'protocols': ['tcp', 'http'],\n 'extensions': {\n 'http-request-ext': http\n }\n }\n observable = {'0': obj, '1': dest_addr}\n return observable\n\n def connection(self, data, protocol):\n src_addr = None\n if self.options.get(\"include_src_addr\"):\n src_addr = {'value': data['src']}\n if is_valid_ipv6(data['src']):\n src_addr['type'] = 'ipv6-addr'\n elif is_valid_ipv4(data['src']):\n src_addr['type'] = 'ipv4-addr'\n else:\n log.error(\"Type of network address ({0}) not known!\", data['src'])\n return None\n\n dest_addr = {'value': data['dst']}\n if is_valid_ipv6(data['dst']):\n dest_addr['type'] = 'ipv6-addr'\n elif is_valid_ipv4(data['dst']):\n dest_addr['type'] = 'ipv4-addr'\n else:\n log.error(\"Type of network address ({0}) not known!\", data['dst'])\n return None\n\n obj = {'type': 'network-traffic', 'dst_port': data['dport'], 'protocols': [protocol]}\n\n observable = {}\n counter = 0\n observable[str(counter)] = obj\n counter += 1\n observable[str(counter)] = dest_addr\n obj['dst_ref'] = str(counter)\n counter += 1\n if src_addr:\n obj['dst_port'] = data['sport']\n observable[str(counter)] = src_addr\n obj['src_ref'] = str(counter)\n return observable\n\n def dns_query(self, data):\n domain = {'type': 'domain-name', 'value': data['request']}\n answers = []\n\n for answer in data['answers']:\n ip = answer['data']\n if is_valid_ipv4(ip):\n ans = {'type': 'ipv4-addr', 'value': ip}\n answers.append(ans)\n elif is_valid_ipv6(ip):\n ans = {'type': 'ipv6-addr', 'value': ip}\n answers.append(ans)\n\n obj = {'0': domain}\n counter = 1\n for answer in answers:\n obj[str(counter)] = answer\n counter += 1\n\n if counter > 1:\n obj['0']['resolves_to_refs'] = range(1, counter)\n\n return obj\n\n # host\n def behaviour(self, data):\n summary = data.get('summary', {})\n files_created = summary.get('file_created', [])\n files_deleted = summary.get('file_deleted', [])\n directories_created = summary.get('directory_created', [])\n # directories_deleted = summary.get('directory_removed', [])\n # registry_written = summary.get('regkey_written', [])\n # registry_deleted = summary.get('regkey_deleted', [])\n\n # analyse process calls\n for process in data['processes']:\n for call in process['calls']:\n self.process_call(call)\n\n self.process_processes(data['processes'])\n self.process_mutexes(data)\n self.process_files(files_created, files_deleted, directories_created)\n\n def process_processes(self, data):\n for process in data:\n obj = {\n 'type': 'process',\n 'time': self.get_time(process['first_seen']),\n 'name': process['process_name'],\n 'process_path': process['process_path']\n }\n\n if 'pid' in process:\n obj['pid'] = process['pid']\n\n if (process['process_path'].lower() != process['command_line'].lower()\n and not (process['command_line'][0] == '\"' and\n (process['command_line'][-1] == '\"' or process['command_line'][-2:] == '\" ')\n and process['command_line'][1:].index('\"') >= len(process['command_line']) - 3)):\n # process has arguments; the above if-clause checks for things like:\n # [c:\\bad.exe], [\"c:\\bad.exe\"] and [\"c:\\bad.exe\" ](<--mind the trailing witespace!),\n # which all do not feature any arguments but look different!\n # All those examples have been observed running real world malware samples.\n obj['command_line'] = process['command_line']\n try:\n arguments = shlex.split(obj['command_line'], posix=False)[1:]\n if arguments:\n obj['arguments'] = arguments\n except Exception as e:\n if str(e) == 'No closing quotation':\n try:\n arguments = shlex.split(obj['command_line'] + '\"', posix=False)[1:]\n if arguments:\n obj['arguments'] = arguments\n except:\n pass\n self.processes.append(obj)\n\n def process_files(self, files_created, files_deleted, directories_created):\n # files\n known_files = []\n for known_file in self.files:\n if 'filepath' in known_file:\n known_files.append(known_file['filepath'])\n deleted_files = [] + files_deleted\n for deleted_file in self.deleted_files:\n deleted_files.append(deleted_file['filepath'])\n deleted_directories = []\n for deleted_directory in self.deleted_directories:\n deleted_directories.append(deleted_directory['dirpath'])\n\n for file in files_created:\n if file not in known_files and file not in deleted_files and ntpath.dirname(\n file) + r'\\\\' not in deleted_directories and ntpath.dirname(file) not in directories_created:\n obj = {'label': 'File Created', 'type': 'file', 'name': ntpath.basename(file), 'filepath': file}\n self.files.append(obj)\n\n def get_mutexes(self, behaviour):\n mutexes = []\n for process in behaviour.get('generic'):\n pid = process.get('pid')\n if self.options.get('discard_terminated_processes') and pid in self.terminated_pids:\n continue\n if 'summary' in process and 'mutex' in process['summary']:\n mutexes.extend(process['summary']['mutex'])\n return mutexes\n\n def process_mutexes(self, data):\n mutexes = self.get_mutexes(data)\n for mutex in mutexes:\n known = False\n for known_mutex in self.mutexes:\n if known_mutex['name'] == mutex:\n known = True\n break\n if known:\n continue\n obj = {'type': 'mutex', 'name': mutex}\n self.mutexes.append(obj)\n\n def process_call(self, call):\n # api calls regarding files\n if call['category'] == 'file':\n switcher = {\n 'CreateFile2': lambda: self.api_create_file(call),\n 'CreateFileA': lambda: self.api_create_file(call),\n 'CreateFileW': lambda: self.api_create_file(call),\n 'NtCreateFile': lambda: self.api_create_file(call),\n 'WriteFile': lambda: self.api_write_file(call),\n 'WriteFileEx': lambda: self.api_write_file(call),\n 'NtWriteFile': lambda: self.api_write_file(call),\n 'DeleteFile': lambda: self.api_delete_file(call),\n 'DeleteFileA': lambda: self.api_delete_file(call),\n 'DeleteFileW': lambda: self.api_delete_file(call),\n 'ZwDeleteFile': lambda: self.api_delete_file(call),\n 'RemoveDirectoryA': lambda: self.api_delete_directory(call),\n 'RemoveDirectoryW': lambda: self.api_delete_directory(call),\n }\n observable = switcher.get(call['api'], lambda: None)()\n if not observable:\n return None\n\n known = False\n for index, file in enumerate(self.files):\n if 'filepath' in file and 'filepath' in observable and file['filepath'] == observable['filepath']:\n # file already known; do not include it again, but update data\n known = True\n\n # update timestamps\n obs = self.update_timestamps(file, observable)\n if obs:\n self.files[index] = obs\n\n # update artifacts\n obs = self.files[index]\n if 'artifacts' in observable:\n if 'artifacts' in file:\n obs['artifacts'] += observable['artifacts']\n else:\n obs['artifacts'] = observable['artifacts']\n self.files[index] = obs\n break\n if not known:\n observable['first_observed'] = observable['time']\n observable['last_observed'] = observable['time']\n observable.pop('time', None)\n self.files.append(observable)\n\n # api calls regarding registry\n if call['category'] == 'registry':\n switcher = {\n 'RegSetValueExA': lambda: self.api_set_regkey(call),\n 'RegSetValueExW': lambda: self.api_set_regkey(call),\n #'RegCreateKeyExA': lambda: self.api_create_regkey(call),\n #'RegCreateKeyExW': lambda: self.api_create_regkey(call),\n 'RegDeleteValueA': lambda: self.api_delete_regvalue(call),\n 'RegDeleteValueW': lambda: self.api_delete_regvalue(call),\n 'RegDeleteKeyA': lambda: self.api_delete_regkey(call),\n 'RegDeleteKeyW': lambda: self.api_delete_regkey(call),\n 'RegDeleteKeyExA': lambda: self.api_delete_regkey(call),\n 'RegDeleteKeyExW': lambda: self.api_delete_regkey(call),\n }\n observable = switcher.get(call['api'], lambda: None)()\n if not observable:\n return None\n\n if 'value' in observable:\n value = {'data': observable['data'], 'datatype': observable['datatype'], 'time': observable['time']}\n values = self.reg_values.get(observable['key'], {})\n if observable['value'] in values:\n for index, data in enumerate(values[observable['value']]):\n if observable['data'] != data['data']:\n if datetime.datetime.strptime(data['time'],\n '%Y-%m-%dT%H:%M:%S.%fZ') < datetime.datetime.strptime(\n observable['time'], '%Y-%m-%dT%H:%M:%S.%fZ'):\n data['data'] = observable['data']\n data['datatype'] = observable['datatype']\n data['time'] = observable['time']\n values[observable['value']][index] = data\n else:\n data['time'] = observable['time']\n values[observable['value']][index] = data\n else:\n values[observable['value']] = [value]\n self.reg_values[observable['key']] = values\n observable.pop('value', None)\n observable.pop('data', None)\n observable.pop('datatype', None)\n\n known = False\n for index, regkey in enumerate(self.reg_keys):\n if regkey['key'] == observable['key']:\n # regkey already known; do not include it again, but update data\n known = True\n\n # update timestamps\n obs = self.update_timestamps(regkey, observable)\n if obs:\n self.reg_keys[index] = obs\n if not known:\n observable['first_observed'] = observable['time']\n observable['last_observed'] = observable['time']\n observable.pop('time', None)\n self.reg_keys.append(observable)\n\n def api_create_mutant(self, data):\n # [TODO] This may be only for NtCreateMutant!\n if data['return_value'] != 0:\n return None\n obj = {'label': 'Mutex Created', 'name': data['arguments']['mutant_name']}\n time = self.get_time(data['time'])\n if time:\n obj['time'] = time\n return obj\n\n def api_create_file(self, data):\n # [TODO] This may be only for NtCreateFile!\n if data['flags']['status_info'] != 'FILE_CREATED':\n return None\n obj = {\n 'label': 'File Created',\n 'name': ntpath.basename(data['arguments']['filepath']),\n 'filepath': data['arguments']['filepath']\n }\n time = self.get_time(data['time'])\n if time:\n obj['time'] = time\n return obj\n\n def api_write_file(self, data):\n if data['return_value'] != 0:\n return None\n # [TODO] This may be only for NtWriteFile!\n obj = {\n 'label': 'File Written',\n 'name': ntpath.basename(data['arguments']['filepath']),\n 'filepath': data['arguments']['filepath'],\n 'artifacts': [data['arguments']['buffer']]\n }\n time = self.get_time(data['time'])\n if time:\n obj['time'] = time\n return obj\n\n def api_delete_file(self, data):\n # not an observable object, but remove observables affected by this\n if data['return_value'] == 0:\n # yes, zero is fail, non-zero success: https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-deletefilea\n return None\n obj = {'label': 'File Deleted', 'filepath': data['arguments']['filepath']}\n time = self.get_time(data['time'])\n if time:\n obj['time'] = time\n self.deleted_files.append(obj)\n return None\n\n def api_delete_regkey(self, data):\n # not an observable object, but remove observables affected by this\n if data['return_value'] != 0:\n return None\n obj = {'label': 'Registry Key Deleted', 'key': data['arguments']['regkey']}\n time = self.get_time(data['time'])\n if time:\n obj['time'] = time\n self.deleted_reg_keys.append(obj)\n return None\n\n def api_delete_regvalue(self, data):\n # not an observable object, but remove observables affected by this\n if data['return_value'] != 0:\n return None\n key = ntpath.dirname(data['arguments']['regkey'])\n obj = {'label': 'Registry Value Deleted', 'value': ntpath.basename(data['arguments']['regkey'])}\n time = self.get_time(data['time'])\n if time:\n obj['time'] = time\n deleted = self.deleted_reg_values.get(key, [])\n deleted.append(obj)\n self.deleted_reg_values[key] = deleted\n return None\n\n def api_set_regkey(self, data):\n if data['return_value'] != 0:\n return None\n # [TODO] This may be only for RegSetValueExW!\n key = ntpath.dirname(data['arguments']['regkey'])\n value = ntpath.basename(data['arguments']['regkey'])\n obj = {\n 'label': 'Registry Key Written',\n 'key': key,\n 'value': value,\n 'data': data['arguments']['value'],\n 'datatype': data['flags']['reg_type']\n }\n\n time = self.get_time(data['time'])\n if time:\n obj['time'] = time\n return obj\n\n def api_create_regkey(self, data):\n if data['return_value'] != 0:\n return None\n # [TODO] This may be only for RegCreateKeyExW!\n key = ntpath.dirname(data['arguments']['regkey'])\n obj = {'label': 'Registry Key Created or Read', 'key': key}\n\n time = self.get_time(data['time'])\n if time:\n obj['time'] = time\n return obj\n\n def api_delete_directory(self, data):\n # not an observable object, but remove observables affected by this\n if data['return_value'] == 0:\n return None\n obj = {'label': 'Directory Deleted', 'dirpath': data['arguments']['dirpath']}\n time = self.get_time(data['time'])\n if time:\n obj['time'] = time\n self.deleted_directories.append(obj)\n return None\n\n ###### Create STIX2 Observables ######\n def create_process_obs(self, data):\n\n if self.options.get('discard_terminated_processes') and data['pid'] in self.terminated_pids:\n return {}\n\n first_observed = data['time']\n last_observed = data['time']\n del data['time']\n\n obj = {'0': data}\n del data['process_path']\n\n # do not include the process of the target file itself!\n # account for cuckoo running .dll files\n if 'command_line' in data and re.match(\n r'\\\"C:\\\\Windows\\\\System32\\\\rundll32\\.exe\\\" C:\\\\Users\\\\.*\\\\AppData\\\\Local\\\\Temp\\\\' + self.target_name +\n r'\\.dll,DllMain', data['command_line']):\n return {}\n # account for cuckoo appending the .exe file extension, if it does not exists already\n if ('command_line' in data and 'C:\\\\Users\\\\' + self.options.get(\"username\") + '\\\\AppData\\\\Local\\\\Temp\\\\' +\n self.target_name == data['command_line'] or len(data) == 2 and 'name' in data and\n (self.target_name == data['name'] or self.target_name == '.exe'.join(data['name'].split('.exe')[:-1]))):\n return {}\n\n if self.is_blacklisted(obj):\n return {}\n\n if self.options.get('use_env_variables'):\n if 'command_line' in obj['0']:\n obj['0']['command_line'] = self.replace_env_variables(obj['0']['command_line'])\n if 'arguments' in obj['0']:\n for i, arg in enumerate(obj['0']['arguments']):\n obj['0']['arguments'][i] = self.replace_env_variables(obj['0']['arguments'][i])\n\n try:\n observed_data = stix2.ObservedData(first_observed=first_observed,\n last_observed=last_observed,\n number_observed=1,\n objects=obj,\n labels='Process Created')\n return observed_data\n except Exception as e:\n if hasattr(e, 'message'):\n log.error(\"Unable to create Process Observable: {0} {1}\".format(e, e.message))\n traceback.print_exc(file=sys.stdout)\n else:\n log.error(\"Unable to create Process Observable: {0}\".format(e))\n traceback.print_exc(file=sys.stdout)\n return {}\n\n def create_file_obs(self, data, labels=None):\n if 'time' in data:\n first_observed = data['time']\n last_observed = data['time']\n else:\n first_observed = now\n last_observed = now\n if 'first_observed' in data:\n first_observed = data['first_observed']\n if 'last_observed' in data:\n last_observed = data['last_observed']\n\n counter = 1\n obj = {}\n file = {'type': 'file'}\n\n # Add Hashes\n hashes = self.get_hashes(data)\n if hashes:\n file['hashes'] = hashes\n\n # Add other fields\n if 'size' in data and data['size']:\n file['size'] = data['size']\n if 'name' in data and data['name']:\n file['name'] = data['name']\n if 'filepath' in data and data['filepath']:\n directory = {'type': 'directory'}\n directory['path'] = ntpath.dirname(data['filepath'])\n obj[str(counter)] = directory\n file['parent_directory_ref'] = str(counter)\n file['name'] = ntpath.basename(data['filepath'])\n counter += 1\n\n obj['0'] = file\n\n if 'artifacts' in data and data['artifacts']:\n if self.options.get(\"include_all_artifacts\"):\n for artifact in data['artifacts']:\n art = {'type': 'artifact', 'payload_bin': base64.b64encode(artifact.encode('utf-8'))}\n obj[str(counter)] = art\n contains_refs = obj['0'].get('contains_refs', [])\n contains_refs.append(str(counter))\n obj['0']['contains_refs'] = contains_refs\n counter += 1\n elif len(data['artifacts']) == 1:\n artifact = data['artifacts'][0]\n art = {'type': 'artifact', 'payload_bin': base64.b64encode(artifact.encode('utf-8'))}\n obj[str(counter)] = art\n contains_refs = obj['0'].get('contains_refs', [])\n contains_refs.append(str(counter))\n obj['0']['contains_refs'] = contains_refs\n counter += 1\n\n # run blacklist with original filepaths\n if self.is_blacklisted(obj):\n return {}\n\n # run blacklist with modified filepaths\n if not self.options.get('use_env_variables'):\n name_bak = obj['0']['name']\n path_bak = obj['1']['path']\n if 'name' in obj['0']:\n obj['0']['name'] = self.replace_env_variables(obj['0']['name'])\n if '1' in obj:\n obj['1']['path'] = self.replace_env_variables(obj['1']['path'])\n if self.is_blacklisted(obj):\n return {}\n if not self.options.get('use_env_variables'):\n obj['0']['name'] = name_bak\n obj['1']['path'] = path_bak\n\n try:\n observed_data = stix2.ObservedData(first_observed=first_observed,\n last_observed=last_observed,\n number_observed=1,\n objects=obj,\n labels=labels)\n return observed_data\n except Exception as e:\n if hasattr(e, 'message'):\n log.error(\"Unable to create File Observable: {0} {1}\".format(e, e.message))\n traceback.print_exc(file=sys.stdout)\n else:\n log.error(\"Unable to create File Observable: {0}\".format(e))\n traceback.print_exc(file=sys.stdout)\n return {}\n\n def create_directory_obs(self, data):\n obj = {}\n obj['0'] = data\n obj['0']['path'] = obj['0']['path']\n if self.is_blacklisted(obj):\n return {}\n obj['0']['path'] = self.replace_env_variables(obj['0']['path'])\n if self.is_blacklisted(obj):\n return {}\n try:\n observed_data = stix2.ObservedData(first_observed=now,\n last_observed=now,\n number_observed=1,\n objects=obj,\n labels='Directory Created or Read')\n return observed_data\n except Exception as e:\n if hasattr(e, 'message'):\n log.error(\"Unable to create Directory Observable: {0} {1}\".format(e, e.message))\n traceback.print_exc(file=sys.stdout)\n else:\n log.error(\"Unable to create Directory Observable: {0}\".format(e))\n traceback.print_exc(file=sys.stdout)\n return {}\n\n def create_mutex_obs(self, data):\n if not data['name']:\n return {}\n if 'time' in data:\n first_observed = data['time']\n last_observed = data['time']\n else:\n first_observed = now\n last_observed = now\n if 'first_observed' in data:\n first_observed = data['first_observed']\n if 'last_observed' in data:\n last_observed = data['last_observed']\n obj = {0: {'type': 'mutex', 'name': data['name']}}\n if self.is_blacklisted(obj[0]):\n return {}\n try:\n observed_data = stix2.ObservedData(first_observed=first_observed,\n last_observed=last_observed,\n number_observed=1,\n objects=obj,\n labels='Mutex Created')\n return observed_data\n except Exception as e:\n if hasattr(e, 'message'):\n log.error(\"Unable to create Mutex Observable: {0} {1}\".format(e, e.message))\n traceback.print_exc(file=sys.stdout)\n else:\n log.error(\"Unable to create Mutex Observable: {0}\".format(e))\n traceback.print_exc(file=sys.stdout)\n return {}\n\n def create_regkey_obs(self, regkey, values):\n if 'time' in regkey:\n first_observed = regkey['time']\n last_observed = regkey['time']\n else:\n first_observed = now\n last_observed = now\n if 'first_observed' in regkey:\n first_observed = regkey['first_observed']\n if 'last_observed' in regkey:\n last_observed = regkey['last_observed']\n\n values = []\n obj = {\n 'type': 'windows-registry-key',\n 'key': regkey['key'],\n }\n\n values = []\n for key, value in self.reg_values.items():\n if key == regkey['key']:\n for val, data in value.items():\n for d in data:\n value_data = base64.b64encode(\n d['data'].encode('utf-8')) if d['datatype'] == 'REG_BINARY' else d['data']\n v = {'name': val, 'data': value_data, 'data_type': d['datatype']}\n values.append(v)\n if values:\n obj['values'] = values\n\n if self.is_blacklisted(obj):\n return {}\n\n try:\n observed_data = stix2.ObservedData(first_observed=first_observed,\n last_observed=last_observed,\n number_observed=1,\n objects={0: obj},\n labels=regkey['label'])\n return observed_data\n except Exception as e:\n if hasattr(e, 'message'):\n log.error(\"Unable to create RegistryKey Observable: {0} {1}\".format(e, e.message))\n traceback.print_exc(file=sys.stdout)\n else:\n log.error(\"Unable to create RegistryKey Observable: {0}\".format(e))\n traceback.print_exc(file=sys.stdout)\n return {}\n\n def create_connection_obs(self, data):\n if self.is_blacklisted(data):\n return {}\n if 'time' in data:\n first_observed = data['time']\n last_observed = data['time']\n else:\n first_observed = now\n last_observed = now\n if 'first_observed' in data:\n first_observed = data['first_observed']\n if 'last_observed' in data:\n last_observed = data['last_observed']\n try:\n observed_data = stix2.ObservedData(first_observed=first_observed,\n last_observed=last_observed,\n number_observed=1,\n objects=data,\n labels='Network Connection')\n return observed_data\n except Exception as e:\n if hasattr(e, 'message'):\n log.error(\"Unable to create Network Connection Observable: {0} {1}\".format(e, e.message))\n traceback.print_exc(file=sys.stdout)\n else:\n log.error(\"Unable to create Network Connection Observable: {0}\".format(e))\n traceback.print_exc(file=sys.stdout)\n return {}\n\n def create_http_connection_obs(self, data):\n if self.is_blacklisted(data):\n return {}\n if 'time' in data:\n first_observed = data['time']\n last_observed = data['time']\n else:\n first_observed = now\n last_observed = now\n if 'first_observed' in data:\n first_observed = data['first_observed']\n if 'last_observed' in data:\n last_observed = data['last_observed']\n try:\n observed_data = stix2.ObservedData(first_observed=first_observed,\n last_observed=last_observed,\n number_observed=1,\n objects=data,\n labels='HTTP Connection')\n return observed_data\n except Exception as e:\n if hasattr(e, 'message'):\n log.error(\"Unable to create HTTP Connection Observable: {0} {1}\".format(e, e.message))\n traceback.print_exc(file=sys.stdout)\n else:\n log.error(\"Unable to create HTTP Connection Observable: {0}\".format(e))\n traceback.print_exc(file=sys.stdout)\n return {}\n\n def create_domain_obs(self, data):\n if self.is_blacklisted(data['0']):\n return {}\n try:\n observed_data = stix2.ObservedData(first_observed=now,\n last_observed=now,\n number_observed=1,\n objects=data,\n labels='Domain')\n return observed_data\n except Exception as e:\n if hasattr(e, 'message'):\n log.error(\"Unable to create Domain Observable: {0} {1}\".format(e, e.message))\n traceback.print_exc(file=sys.stdout)\n else:\n log.error(\"Unable to create Domain Observable: {0}\".format(e))\n traceback.print_exc(file=sys.stdout)\n return {}\n\n ###### helper functions #######\n def get_time(self, time):\n if not time:\n return None\n try:\n return time.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n except AttributeError:\n None\n try:\n return datetime.datetime.fromtimestamp(time, tz=pytz.utc).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n except TypeError:\n None\n return None\n\n def update_timestamps(self, obj, obs):\n if not 'first_observed' in obj:\n obj['first_observed'] = obs['time']\n obj['last_observed'] = obs['time']\n return obj\n elif datetime.datetime.strptime(obj['first_observed'], '%Y-%m-%dT%H:%M:%S.%fZ') > datetime.datetime.strptime(\n obs['time'], '%Y-%m-%dT%H:%M:%S.%fZ'):\n obj['last_observed'] = obj['first_observed']\n obj['first_observed'] = obs['time']\n return obj\n elif datetime.datetime.strptime(obj['last_observed'], '%Y-%m-%dT%H:%M:%S.%fZ') < datetime.datetime.strptime(\n obs['time'], '%Y-%m-%dT%H:%M:%S.%fZ'):\n obj['last_observed'] = obs['time']\n return obj\n return None\n\n def get_hashes(self, data):\n hashes = {}\n if 'md5' in data and data['md5']:\n hashes['MD5'] = data['md5']\n if 'sha1' in data and data['sha1']:\n hashes['SHA-1'] = data['sha1']\n if 'sha256' in data and data['sha256']:\n hashes['SHA-256'] = data['sha256']\n if 'sha512' in data and data['sha512']:\n hashes['SHA-512'] = data['sha512']\n if 'ssdeep' in data and data['ssdeep'] is not None:\n ssdeephash = Hash()\n ssdeephash.fuzzy_hash_value = data['ssdeep']\n ssdeephash.type_ = 'SSDEEP'\n hashes['SSDEEP'] = ssdeephash\n return hashes\n\n def replace_env_variables(self, path):\n if not self.options.get(\"use_env_variables\"):\n return path\n\n username = self.options.get(\"username\")\n new_path = path\n if re.search(r'C:\\\\Users\\\\' + username + r'\\\\AppData\\\\Roaming\\\\', new_path, flags=re.IGNORECASE):\n new_path = re.sub(r'C:\\\\Users\\\\' + username + r'\\\\AppData\\\\Roaming\\\\',\n '%APPDATA%\\\\\\\\',\n new_path,\n flags=re.IGNORECASE)\n if re.search(r'C:\\\\Users\\\\' + username + r'\\\\AppData\\\\Roaming$', new_path, flags=re.IGNORECASE):\n new_path = re.sub(r'C:\\\\Users\\\\' + username + r'\\\\AppData\\\\Roaming',\n '%APPDATA%',\n new_path,\n flags=re.IGNORECASE)\n if re.search(r'C:\\\\Users\\\\' + username + r'\\\\AppData\\\\Local\\\\', new_path, flags=re.IGNORECASE):\n new_path = re.sub(r'C:\\\\Users\\\\' + username + r'\\\\AppData\\\\Local\\\\',\n '%LOCALAPPDATA%\\\\\\\\',\n new_path,\n flags=re.IGNORECASE)\n if re.search(r'C:\\\\Users\\\\' + username + r'\\\\AppData\\\\Local$', new_path, flags=re.IGNORECASE):\n new_path = re.sub(r'C:\\\\Users\\\\' + username + r'\\\\AppData\\\\Local',\n '%LOCALAPPDATA%',\n new_path,\n flags=re.IGNORECASE)\n if re.search(r'C:\\\\Users\\\\' + username + r'\\\\AppData\\\\Temp\\\\', new_path, flags=re.IGNORECASE):\n new_path = re.sub(r'C:\\\\Users\\\\' + username + r'\\\\AppData\\\\Temp\\\\',\n '%TEMP%\\\\\\\\',\n new_path,\n flags=re.IGNORECASE)\n if re.search(r'C:\\\\Users\\\\' + username + r'\\\\AppData\\\\Temp$', new_path, flags=re.IGNORECASE):\n new_path = re.sub(r'C:\\\\Users\\\\' + username + r'\\\\AppData\\\\Temp', '%TEMP%', new_path, flags=re.IGNORECASE)\n if re.search(r'C:\\\\Users\\\\' + username, new_path, flags=re.IGNORECASE):\n new_path = re.sub(r'C:\\\\Users\\\\' + username, '%USERPROFILE%', new_path, flags=re.IGNORECASE)\n if re.search(username, new_path, flags=re.IGNORECASE):\n new_path = re.sub(username, '%USERNAME%', new_path, flags=re.IGNORECASE)\n if re.search(r'C:\\\\Program Files\\\\', new_path, flags=re.IGNORECASE):\n new_path = re.sub(r'C:\\\\Program Files\\\\', '%PROGRAMFILES%\\\\\\\\', new_path, flags=re.IGNORECASE)\n if re.search(r'C:\\\\Program Files$', new_path, flags=re.IGNORECASE):\n new_path = re.sub(r'C:\\\\Program Files$', '%PROGRAMFILES%', new_path, flags=re.IGNORECASE)\n if re.search(r'C:\\\\Program Files \\(x86\\)\\\\', new_path, flags=re.IGNORECASE):\n new_path = re.sub(r'C:\\\\Program Files \\(x86\\)\\\\', '%PROGRAMFILES(X86)%\\\\\\\\', new_path, flags=re.IGNORECASE)\n if re.search(r'C:\\\\Program Files \\(x86\\)$', new_path, flags=re.IGNORECASE):\n new_path = re.sub(r'C:\\\\Program Files \\(x86\\)$', '%PROGRAMFILES(X86)%', new_path, flags=re.IGNORECASE)\n if re.search(username.upper()[:6] + r'~\\d', new_path):\n new_path = re.sub(username.upper()[:6] + r'~\\d', username, new_path)\n new_path = self.replace_env_variables(new_path)\n if re.search(re.escape(self.target_name), new_path):\n new_path = re.sub(self.target_name, '%TARGETFILE%', new_path)\n return new_path\n\n def remove_observables(self):\n ###### files\n # remove directories marked as file observables\n for files in itertools.combinations(self.files, 2):\n try:\n if 'filepath' in files[0] and 'filepath' in files[1] and re.match(re.escape(\n files[0]['filepath']), files[1]['filepath']) and files[0] in self.files:\n self.files.remove(files[0])\n #print(\"Removed folder:\", files[0]['filepath'])\n elif 'filepath' in files[0] and 'filepath' in files[1] and re.match(re.escape(\n files[1]['filepath']), files[0]['filepath']) and files[1] in self.files:\n self.files.remove(files[1])\n #print(\"Removed folder:\", files[1]['filepath'])\n except ValueError:\n None\n\n marked_for_removal = []\n for file in self.files:\n for deleted in self.deleted_files:\n if 'filepath' in file and file['filepath'] == deleted['filepath']:\n if 'last_observed' in file and datetime.datetime.strptime(\n file['last_observed'], '%Y-%m-%dT%H:%M:%S.%fZ') < datetime.datetime.strptime(\n deleted['time'], '%Y-%m-%dT%H:%M:%S.%fZ'):\n marked_for_removal.append(\n (file, datetime.datetime.strptime(deleted['time'], '%Y-%m-%dT%H:%M:%S.%fZ')))\n elif 'time' in file and datetime.datetime.strptime(\n file['time'], '%Y-%m-%dT%H:%M:%S.%fZ') < datetime.datetime.strptime(\n deleted['time'], '%Y-%m-%dT%H:%M:%S.%fZ'):\n marked_for_removal.append(\n (file, datetime.datetime.strptime(deleted['time'], '%Y-%m-%dT%H:%M:%S.%fZ')))\n elif 'time' not in file and 'last_observed' not in file:\n # TODO: unsure if file should be removed; no timestamps to compare - make it switchable with parameter?\n marked_for_removal.append(\n (file, datetime.datetime.strptime(deleted['time'], '%Y-%m-%dT%H:%M:%S.%fZ')))\n for deleted in self.deleted_directories:\n if 'filepath' in file and ntpath.dirname(file['filepath']) == ntpath.dirname(deleted['dirpath']):\n if 'last_observed' in file and datetime.datetime.strptime(\n file['last_observed'], '%Y-%m-%dT%H:%M:%S.%fZ') < datetime.datetime.strptime(\n deleted['time'], '%Y-%m-%dT%H:%M:%S.%fZ'):\n marked_for_removal.append(\n (file, datetime.datetime.strptime(deleted['time'], '%Y-%m-%dT%H:%M:%S.%fZ')))\n elif 'time' in file and datetime.datetime.strptime(\n file['time'], '%Y-%m-%dT%H:%M:%S.%fZ') < datetime.datetime.strptime(\n deleted['time'], '%Y-%m-%dT%H:%M:%S.%fZ'):\n marked_for_removal.append(\n (file, datetime.datetime.strptime(deleted['time'], '%Y-%m-%dT%H:%M:%S.%fZ')))\n elif 'time' not in file and 'last_observed' not in file:\n # TODO: unsure if file should be removed; no timestamps to compare - make it switchable with parameter?\n marked_for_removal.append(\n (file, datetime.datetime.strptime(deleted['time'], '%Y-%m-%dT%H:%M:%S.%fZ')))\n\n # check if marked files are seen again after their removal; if they are, keep the observable!\n for index, marked in enumerate(marked_for_removal):\n #print(\"Marked for removal:\", marked[0]['filepath'])\n for file in self.files:\n if 'filepath' in file and 'filepath' in marked[0] and marked[0]['filepath'] == file['filepath']:\n if 'last_observed' in file and datetime.datetime.strptime(\n file['last_observed'],\n '%Y-%m-%dT%H:%M:%S.%fZ') > marked[1] and marked in marked_for_removal:\n marked_for_removal.remove(marked)\n #print(\"NOT removed:\", file['filepath'])\n elif 'time' in file and datetime.datetime.strptime(\n file['time'], '%Y-%m-%dT%H:%M:%S.%fZ') > marked[1] and marked in marked_for_removal:\n marked_for_removal.remove(marked)\n #print(\"NOT removed:\", file['filepath'])\n\n # remove marked observables\n for file in self.files[:]:\n for marked in marked_for_removal:\n if file in self.files and 'filepath' in file and 'filepath' in marked[0] and marked[0][\n 'filepath'] == file['filepath']:\n self.files.remove(file)\n #print(\"Removed:\", file['filepath'])\n\n ###### registry\n marked_for_removal = []\n for obj in self.reg_keys:\n for deleted in self.deleted_reg_keys:\n if deleted['key'] == obj['key']:\n if 'last_observed' in obj and datetime.datetime.strptime(\n obj['last_observed'], '%Y-%m-%dT%H:%M:%S.%fZ') < datetime.datetime.strptime(\n deleted['time'], '%Y-%m-%dT%H:%M:%S.%fZ'):\n marked_for_removal.append(\n (obj['key'], datetime.datetime.strptime(deleted['time'], '%Y-%m-%dT%H:%M:%S.%fZ')))\n elif 'time' in obj and datetime.datetime.strptime(\n obj['time'], '%Y-%m-%dT%H:%M:%S.%fZ') < datetime.datetime.strptime(\n deleted['time'], '%Y-%m-%dT%H:%M:%S.%fZ'):\n marked_for_removal.append(\n (obj['key'], datetime.datetime.strptime(deleted['time'], '%Y-%m-%dT%H:%M:%S.%fZ')))\n elif 'time' not in obj and 'last_observed' not in obj:\n # TODO: unsure if obj should be removed; no timestamps to compare - make it switchable with parameter?\n marked_for_removal.append(\n (obj['key'], datetime.datetime.strptime(deleted['time'], '%Y-%m-%dT%H:%M:%S.%fZ')))\n # check timestamps; if object is still seen after its removal event, keep the observable!\n for index, marked in enumerate(marked_for_removal):\n #print(\"Marked for removal:\", marked[0])\n for obj in self.reg_keys:\n if marked[0] == obj['key']:\n if 'last_observed' in obj and datetime.datetime.strptime(\n obj['last_observed'], '%Y-%m-%dT%H:%M:%S.%fZ') > marked[1] and marked in marked_for_removal:\n marked_for_removal.remove(marked)\n #print(\"NOT removed:\", obj['key'])\n elif 'time' in obj and datetime.datetime.strptime(\n obj['time'], '%Y-%m-%dT%H:%M:%S.%fZ') > marked[1] and marked in marked_for_removal:\n marked_for_removal.remove(marked)\n #print(\"NOT removed:\", obj['key'])\n # remove marked observables\n for obj in self.reg_keys[:]:\n for marked in marked_for_removal:\n if marked[0] == obj['key'] and obj in self.reg_keys:\n self.reg_keys.remove(obj)\n #print(\"Removed:\", obj['key'])\n #pprint(self.deleted_reg_values)\n\n # # same procedure with reg values\n # marked_for_removal = []\n # for k1, obj in self.reg_values.items():\n # for k2, del_obj in self.deleted_reg_values.items():\n # if k1 == k2:\n # pprint(del_obj)\n # # TODO!!\n\n def load_blacklist(self):\n try:\n blacklist_file = open(self.options.get(\"blacklist\"), 'r')\n self.blacklist = yaml.load(blacklist_file)\n blacklist_file.close()\n log.info(\"blacklist loaded: {0}\".format(self.options.get(\"blacklist\")))\n except Exception as e:\n self.blacklist = {}\n log.warning(\"Invalid or no blacklist found at {0}: {1}\".format(self.options.get(\"blacklist\"), e))\n\n # checks if a domain is known for given ip, then checks if the domain is blacklisted\n def check_ip_domain(self, ip):\n for domain in self.domains:\n for bl_domain in self.blacklist.get('Domain', []):\n for i in range(1, len(domain.keys())):\n if (re.match(bl_domain, domain['0']['value']) and ip == domain[str(i)]['value']):\n return True\n return False\n\n # checks if a given observable is blacklisted\n def is_blacklisted(self, obj):\n if 'type' in obj and obj['type'] == 'domain-name':\n for domain in self.blacklist.get('Domain', []):\n if re.match(domain, obj['value'], flags=re.IGNORECASE):\n return True\n #print('- ' + re.escape(obj['value']))\n if '0' in obj and obj['0']['type'] == 'network-traffic':\n if '1' in obj:\n if self.check_ip_domain(obj['1']['value']):\n return True\n for bl_obj in self.blacklist.get('NetworkTraffic', []):\n if 'dst_addr' in bl_obj and '1' in obj and re.match(bl_obj['dst_addr'], obj['1']['value']):\n if 'dst_port' in bl_obj:\n if re.match(str(bl_obj['dst_port']), str(obj['0']['dst_port'])):\n return True\n else:\n continue\n else:\n return True\n #print('- dst_addr: ' + re.escape(obj['1']['value']) + ' dst_port: ' + str(obj['0']['dst_port']))\n if 'type' in obj and obj['type'] == 'windows-registry-key':\n for bl_obj in self.blacklist.get('RegistryKey', []):\n if self.traverse_bl(obj, bl_obj):\n return True\n #print('- key: ' + re.escape(obj['key']))\n if 'type' in obj and obj['type'] == 'mutex':\n for bl_obj in self.blacklist.get('Mutex', []):\n if re.match(bl_obj, obj['name'], flags=re.IGNORECASE):\n return True\n #print('- ' + re.escape(obj['name']))\n if '0' in obj and obj['0']['type'] == 'file':\n for bl_obj in self.blacklist.get('File', []):\n if 'path' in bl_obj:\n if not '1' in obj or not re.match(bl_obj['path'], obj['1']['path'], flags=re.IGNORECASE):\n continue\n if 'name' in bl_obj:\n if not '0' in obj or not re.match(bl_obj['name'], obj['0']['name'], flags=re.IGNORECASE):\n continue\n return True\n #if '1' in obj:\n # print('- path: ' + re.escape(obj['1']['path']) + '\\n name: ' + re.escape(obj['0']['name']))\n #else:\n # print('- name: ' + re.escape(obj['0']['name']))\n if '0' in obj and obj['0']['type'] == 'directory':\n for bl_obj in self.blacklist.get('Directory', []):\n if re.match(bl_obj, obj['0']['path'], flags=re.IGNORECASE):\n return True\n for bl_obj in self.blacklist.get('File', []):\n if 'path' in bl_obj and re.match(bl_obj['path'], obj['0']['path'], flags=re.IGNORECASE):\n return True\n #print('- ' + re.escape(obj['0']['path']) + '$')\n if '0' in obj and obj['0']['type'] == 'process':\n for bl_obj in self.blacklist.get('Process', []):\n if self.traverse_bl(obj['0'], bl_obj):\n return True\n #print(obj)\n return False\n\n def traverse_bl(self, obj, bl, at_least_one_key_in_bl=False):\n \"\"\"\n implements a recursive blacklist matching;\n in the blacklist a dict can be defined, which has to match structure of the observable\n \"\"\"\n # create a dict used for checking if every property on BL-item is matched\n bl_matched = {}\n if type(bl) is dict:\n for key in bl:\n bl_matched[key] = False\n\n if type(obj) is dict:\n for key in obj.keys():\n if key in bl:\n at_least_one_key_in_bl = True\n if type(bl[key]) == dict:\n result = self.traverse_bl(obj[key], bl[key], True)\n if not result:\n return False\n bl_matched[key] = True\n elif type(bl[key]) is str:\n try:\n result = re.match(bl[key], obj[key], flags=re.IGNORECASE)\n if not result:\n return False\n bl_matched[key] = True\n except:\n return False\n elif type(bl[key]) is int:\n try:\n result = bl[key] == obj[key]\n if not result:\n return False\n bl_matched[key] = True\n except:\n return False\n elif type(bl[key]) is list:\n for ele1 in bl[key]:\n matched = False\n for ele2 in obj[key]:\n result = self.traverse_bl(ele2, ele1, True)\n if result:\n matched = True\n if not matched:\n return False\n bl_matched[key] = True\n if not at_least_one_key_in_bl:\n return False\n # if not every property on BL-item is matched, return false\n for key, matched in bl_matched.items():\n if matched is False:\n return False\n elif type(obj) is str:\n try:\n result = re.match(bl, obj, flags=re.IGNORECASE)\n if not result:\n return False\n except:\n return False\n return True\n\n def get_terminated_pids(self, debug):\n \"\"\"\n Parses the debug object in order to determine which pids have terminated\n \"\"\"\n for line in debug['log']:\n pids = re.findall(r\"Process with pid (?P\\d+) has terminated\", line)\n for pid in pids:\n self.terminated_pids.append(pid)\n self.terminated_pids = [int(i) for i in self.terminated_pids]\n\n\n# following two functions taken from: https://stackoverflow.com/a/319293\ndef is_valid_ipv4(ip):\n \"\"\"\n Validates IPv4 addresses.\n \"\"\"\n pattern = re.compile(\n r\"\"\"\n ^\n (?:\n # Dotted variants:\n (?:\n # Decimal 1-255 (no leading 0's)\n [3-9]\\d?|2(?:5[0-5]|[0-4]?\\d)?|1\\d{0,2}\n |\n 0x0*[0-9a-f]{1,2} # Hexadecimal 0x0 - 0xFF (possible leading 0's)\n |\n 0+[1-3]?[0-7]{0,2} # Octal 0 - 0377 (possible leading 0's)\n )\n (?: # Repeat 0-3 times, separated by a dot\n \\.\n (?:\n [3-9]\\d?|2(?:5[0-5]|[0-4]?\\d)?|1\\d{0,2}\n |\n 0x0*[0-9a-f]{1,2}\n |\n 0+[1-3]?[0-7]{0,2}\n )\n ){0,3}\n |\n 0x0*[0-9a-f]{1,8} # Hexadecimal notation, 0x0 - 0xffffffff\n |\n 0+[0-3]?[0-7]{0,10} # Octal notation, 0 - 037777777777\n |\n # Decimal notation, 1-4294967295:\n 429496729[0-5]|42949672[0-8]\\d|4294967[01]\\d\\d|429496[0-6]\\d{3}|\n 42949[0-5]\\d{4}|4294[0-8]\\d{5}|429[0-3]\\d{6}|42[0-8]\\d{7}|\n 4[01]\\d{8}|[1-3]\\d{0,9}|[4-9]\\d{0,8}\n )\n $\n \"\"\", re.VERBOSE | re.IGNORECASE)\n return pattern.match(ip) is not None\n\n\ndef is_valid_ipv6(ip):\n \"\"\"\n Validates IPv6 addresses.\n \"\"\"\n pattern = re.compile(\n r\"\"\"\n ^\n \\s* # Leading whitespace\n (?!.*::.*::) # Only a single whildcard allowed\n (?:(?!:)|:(?=:)) # Colon iff it would be part of a wildcard\n (?: # Repeat 6 times:\n [0-9a-f]{0,4} # A group of at most four hexadecimal digits\n (?:(?<=::)|(? 0:\n image_diff = x[i*n:(i+1)*n] - x[:n]\n image_diff /= np.linalg.norm(image_diff, axis=-1, keepdims=True)\n similarity = (image_diff @ data[\"embedding\"].T)[0]\n\n max_indices = np.argsort(similarity)[::-1][:10]\n print(\"\\n---\", merge_ratios[i], \"---\")\n print(np.array(data[\"word\"])[max_indices].tolist())\n print((similarity[max_indices]*100).astype(np.int32).tolist())\n\n fig.legend()\n \n output_filename = f\"{CACHE_DIR}/output/01_merge_embedding.png\"\n fig.savefig(output_filename)\n\nif __name__ == \"__main__\":\n with stub.run():\n main.call()\n os.makedirs(\"output\", exist_ok=True)\n subprocess.run(\n f'modal nfs get model-cache-vol output/01_merge_* .', shell=True)\n","repo_name":"koshian2/ai-art-book","sub_path":"modal_codes/cp05/01_merge_embedding.py","file_name":"01_merge_embedding.py","file_ext":"py","file_size_in_byte":5174,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"90"} +{"seq_id":"4887081454","text":"from sklearn.base import BaseEstimator, TransformerMixin\nfrom imblearn.over_sampling import SMOTE\nimport pandas as pd\n\nclass TimeTransformer(BaseEstimator, TransformerMixin):\n # transforms Date column into date components to be used in the model\n \n def __init__(self):\n \n return\n \n def fit(self, X, y=None):\n\n \n # always copy!\n X = X.copy()\n \n return self\n\n def transform(self, X, y=None):\n\n # always copy!\n X_ = X.copy()\n\n # convert date to datetime\n X_[\"Date\"] = pd.to_datetime(X_[\"Date\"], infer_datetime_format=True, dayfirst=False)\n \n # creates new dataframe to store dates\n new = pd.DataFrame()\n\n new['day'] = X_['Date'].dt.day\n new['month'] = X_['Date'].dt.month\n new['year'] = X_['Date'].dt.year\n new['hour'] = X_['Date'].dt.hour\n new['day of the week'] = X_['Date'].dt.weekday\n \n return new\n \nclass TimeTransformer2(BaseEstimator, TransformerMixin):\n # transforms Date column into date components to be used in the model v2.0\n \n def __init__(self):\n \n return\n \n def fit(self, X, y=None):\n\n \n # always copy!\n X = X.copy()\n \n return self\n\n def transform(self, X, y=None):\n\n # always copy!\n X_ = X.copy()\n\n # convert date to datetime\n X_[\"Date\"] = pd.to_datetime(X_[\"Date\"], infer_datetime_format=True, dayfirst=False)\n \n # creates new dataframe to store dates\n new = pd.DataFrame()\n\n new['quarter'] = X_['Date'].dt.quarter\n new['hour'] = X_['Date'].dt.hour\n new['day of the week'] = X_['Date'].dt.weekday\n\n # below are some of the features that we stopped considering after testing \n\n #new['day'] = X_['Date'].dt.day\n #new['month'] = X_['Date'].dt.month\n #new['year'] = X_['Date'].dt.year\n \n return new\n \n\n\nclass BoolTransformer(BaseEstimator, TransformerMixin):\n # Fills missing values with False and converts boolean values to numeric\n \n def __init__(self):\n \n return\n \n def fit(self, X, y=None):\n\n \n # always copy!\n X = X.copy()\n \n return self\n\n def transform(self, X, y=None):\n\n # always copy!\n X_ = X.copy()\n\n X_['Part of a policing operation'] = X_['Part of a policing operation'].fillna(value=False)\n X_['Part of a policing operation'] = X_['Part of a policing operation'] * 1.0\n \n return X_\n \nclass lat_lon_imputer(BaseEstimator, TransformerMixin):\n # Imputes missing values in the Latitude and Longitude Columns\n \n def __init__(self):\n \n return\n \n def fit(self, X, y=None):\n # The fit function creates a dictionary that has the avg latitude and longitude for each station\n\n # always copy!\n df_impute = X[[\"Latitude\", \"Longitude\", \"station\"]].copy()\n\n station_list = list(df_impute[\"station\"].unique())\n\n self.station_dict = {}\n for station in station_list:\n\n if (station != 'south-yorkshire') & (station != 'nottinghamshire') :\n avg_lat = df_impute.loc[df_impute[\"station\"]==station, \"Latitude\"].mean()\n avg_lon = df_impute.loc[df_impute[\"station\"]==station, \"Longitude\"].mean()\n elif station == 'south-yorkshire':\n # since south-yorkshire has no latitude or longitude values in the data we are using the closest county (west-yorkshire)\n avg_lat = df_impute.loc[df_impute[\"station\"]=='west-yorkshire', \"Latitude\"].mean()\n avg_lon = df_impute.loc[df_impute[\"station\"]=='west-yorkshire', \"Longitude\"].mean()\n elif station == 'nottinghamshire':\n # since nottinghamshire has no latitude or longitude values in the data we are using the closest county (derbyshire)\n avg_lat = df_impute.loc[df_impute[\"station\"]=='derbyshire', \"Latitude\"].mean()\n avg_lon = df_impute.loc[df_impute[\"station\"]=='derbyshire', \"Longitude\"].mean()\n\n self.station_dict[station] = {'lat': avg_lat, 'lon': avg_lon}\n\n return self\n\n def transform(self, X, y=None):\n # The transform function uses the avg_lat and avg_lon dicts created while fitting to fill missing values\n\n\n # always copy!\n df_impute = X[[\"Latitude\", \"Longitude\", \"station\"]].copy()\n\n for station in self.station_dict:\n df_impute.loc[df_impute[\"station\"]==station, \"Latitude\"] = df_impute.loc[df_impute[\"station\"]==station, \"Latitude\"].fillna(value=self.station_dict[station]['lat'])\n df_impute.loc[df_impute[\"station\"]==station, \"Longitude\"] = df_impute.loc[df_impute[\"station\"]==station, \"Longitude\"].fillna(value=self.station_dict[station]['lon'])\n\n return df_impute[[\"Latitude\", \"Longitude\"]].copy()\n \n\nclass Group_Age_Range(BaseEstimator, TransformerMixin):\n # Groups Age Ranges 'under 10' and '10-17' into the new category 'under 17'\n \n def __init__(self):\n \n return\n \n def fit(self, X, y=None):\n\n # always copy!\n X = X.copy()\n \n return self\n\n def transform(self, X, y=None):\n\n # always copy!\n \n X_ = X.copy()\n\n X_['Age range'] = X_['Age range'].astype('object')\n X_.loc[(X_['Age range'] == 'under 10') | (X_['Age range'] == '10-17'), 'Age range'] = 'under 17'\n X_['Age range'] = X_['Age range'].astype('category').cat.as_ordered().cat.reorder_categories(['under 17', '18-24', '25-34', 'over 34'], ordered=True)\n\n return X_.copy()\n\n\nclass Group_Ethnicity(BaseEstimator, TransformerMixin):\n # Groups Mixed ethnicity into Other\n \n def __init__(self):\n \n return\n \n def fit(self, X, y=None):\n\n # always copy!\n X = X.copy()\n \n return self\n\n def transform(self, X, y=None):\n\n # always copy!\n X_ = X.copy()\n\n X_.loc[(X_['Officer-defined ethnicity'] == 'Mixed'), 'Officer-defined ethnicity'] = 'Other'\n\n return X_.copy()\n","repo_name":"joaobssa/ldsa_capstone","sub_path":"transformers.py","file_name":"transformers.py","file_ext":"py","file_size_in_byte":6134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"71690687658","text":"import os\nimport shutil\nfrom mutagen.mp3 import MP3\n\n# audio = MP3(\"A Kabaria (Title Song).mp3\")\n# # print (\"Track: \" + audio.get(\"TIT2\").text[0])\n# # print (\"Encoded By: \" + audio.get(\"TENC\").text[0])\n# print(audio.get(\"TALB\").text[0])\n\nfor i in os.listdir(os.getcwd()):\n if i.endswith(\".mp3\"):\n print(i)\n audio = MP3(i)\n print(audio.get(\"TALB\").text[0])\n directory = audio.get(\"TALB\").text[0]\n if not os.path.exists(directory):\n os.makedirs(directory)\n target = directory+\"/\"+i\n shutil.move(i, target)\n continue\n else:\n continue","repo_name":"lipun12ka4/SongsPKRipper","sub_path":"source/Move_Mp3_To_Album_Folder.py","file_name":"Move_Mp3_To_Album_Folder.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18427282619","text":"S = input()\nL = len(S)\nACGT = ['A', 'C', 'G', 'T']\nans = 0\n\nfor i in range(L):\n for j in range(i, L):\n sub_S = S[i:j+1]\n if all([s in ACGT for s in sub_S]):\n if len(sub_S) > ans:\n ans = len(sub_S)\n\nprint(ans)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03086/s826591190.py","file_name":"s826591190.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"38671490940","text":"#!/usr/bin/env python3\n\n\"\"\"Calculate deposit percent yield based on time period.\n\nImagine your friend wants to put money on a deposit.\nHe has got many offers from different banks:\n- First bank declares +A% each day;\n- Second bank promises +B% each month;\n- Third bank offers +C% by the end of the year;\n- The 4th bank promotes +D% in a 10-year term;\n- ... and so on ...\n\nYour friend gets a terrible headache calculating all this stuff,\nand asks you to help checking everything. You quickly realize\nit is a common task and having a simple script is a great idea.\n\nLet's implement this.\n\nA simplified task:\nGiven the SUM amount of money, and PERCENT yield promised in a\nFIXED_PERIOD of time, calculate the TOTAL equivalent of money\nin a SET_PERIOD of time.\n\nMath formula:\np = PERCENT / 100\nTOTAL = SUM * ((1 + p) ** (SET_PERIOD / FIXED_PERIOD))\n\"\"\"\n\n\n# TODO: add lines to calculate yields for some common periods\n# of time (e.g. 1 month, 1 year, 5 years, 10 years)\n# TODO: change the script to output the 1-year percent yield\n# as well\n# TODO: (extra) Output only percents if the initial SUM is\n# not known at the moment the script is run\n\n\nUSAGE = \"\"\"USAGE: basic mode: {script} initial_sum percent fixed_period set_period\n\\tUSAGE: common_period mode: {script} initial_sum percent fixed_period -cnpd\n\\tUSAGE: unknown_init_sum mode: {script} percent fixed_period set_period\n\n\\tCalculate deposit yield. See script source for more details.\n\"\"\"\nUSAGE = USAGE.strip()\n\n\ndef deposit(initial_sum, percent, fixed_period, set_period):\n \"\"\"Calculate deposit yield.\"\"\"\n per = percent / 100\n growth = (1 + per) ** (set_period / fixed_period)\n\n if(initial_sum != -1):\n return initial_sum * growth\n else:\n return growth\n\n\ndef main(args):\n \"\"\"Gets called when run as a script.\"\"\"\n\n script_mode = \"basic\"\n\n if len(args) == 4 + 1 and args[4] == \"-cnpd\":\n # Common periods of time\n # USAGE: {script} initial_sum percent\n script_mode = \"common_period\"\n elif len(args) == 3 + 1:\n # unknown initial sum\n # USAGE: {script} percent fixed_period set_period\n script_mode = \"unknown_init_sum\"\n elif len(args) != 4 + 1:\n script_mode = \"error\"\n\n args = args[1:4]\n\n if(script_mode == \"basic\"):\n print(\"-------Selected basic mode-------\\n\")\n initial_sum, percent, fixed_period, set_period = map(float, args)\n res = deposit(initial_sum, percent, fixed_period, set_period)\n print(res)\n\n print(\"1 Year:\")\n res = deposit(initial_sum, percent, fixed_period, 365)\n print(res)\n\n elif(script_mode == \"common_period\"):\n print(\"-------Selected common period mode-------\\n\")\n initial_sum, percent, fixed_period = map(float, args)\n\n print(\"1 Month:\")\n res = deposit(initial_sum, percent, fixed_period, 31)\n print(res)\n\n print(\"1 Year:\")\n res = deposit(initial_sum, percent, fixed_period, 365)\n print(res)\n\n print(\"5 Year:\")\n res = deposit(initial_sum, percent, fixed_period, 1825)\n print(res)\n\n print(\"10 Year:\")\n res = deposit(initial_sum, percent, fixed_period, 3650)\n print(res)\n\n elif(script_mode == \"unknown_init_sum\"):\n print(\"-------Selected uknown initial sum mode-------\\n\")\n percent, fixed_period, set_period = map(float, args)\n res = deposit(-1, percent, fixed_period, set_period)\n print(res) \n \n if(script_mode == \"error\"):\n exit(USAGE.format(script=args[0]))\n\n\n # same as\n # initial_sum = float(args[0])\n # percent = float(args[1])\n # ...\n\n\n\n\nif __name__ == '__main__':\n import sys\n\n main(sys.argv)\n","repo_name":"ThisNicknameIsTaken/git_basiscs","sub_path":"project/Lab2_examples/02ex01-deposits.py","file_name":"02ex01-deposits.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"19454222395","text":"def baseline_prediction(data_x, svg_y, result_y):\n import numpy as np\n from scipy.ndimage import interpolation\n import matplotlib.pyplot as plt\n\n y = np.copy(svg_y)\n x = np.copy(data_x)\n\n p = 0\n element_del = []\n while p < (len(result_y) - 1):\n temp = np.arange(result_y[p] - 20, result_y[p + 1] + 20)\n element_del = np.append(element_del, temp)\n p += 2\n x = np.delete(x, element_del)\n y = np.delete(y, element_del)\n\n\n # Importing Linear Regression\n from sklearn.linear_model import LinearRegression\n # importing libraries for polynomial transform\n from sklearn.preprocessing import PolynomialFeatures\n # for creating pipeline\n from sklearn.pipeline import Pipeline\n # creating pipeline and fitting it on data\n Input = [('polynomial', PolynomialFeatures(degree=4)), ('modal', LinearRegression())]\n pipe = Pipeline(Input)\n pipe.fit(x.reshape(-1, 1), y.reshape(-1, 1))\n\n poly_pred = pipe.predict(data_x.reshape(-1, 1))\n # sorting predicted values with respect to predictor\n sorted_zip = sorted(zip(data_x, poly_pred))\n x_poly, poly_pred = zip(*sorted_zip)\n\n x_poly = np.asarray(x_poly, dtype=int)\n poly_pred = np.asarray(poly_pred, dtype=float)\n # reshape multi dimensional array to 1D array\n poly_pred = np.reshape(poly_pred, (np.product(poly_pred.shape),))\n\n plt.plot(data_x, svg_y)\n plt.plot(data_x, poly_pred, color='orange',label='Hasil Prediksi Baseline')\n plt.scatter(x, y, s=1, color=\"red\")\n plt.xlabel('Time', fontsize=10)\n plt.ylabel('R.U', fontsize=10)\n plt.legend()\n plt.pause(3)\n plt.close()\n return poly_pred\n","repo_name":"jejessika/webappspr","sub_path":"baseline_predict.py","file_name":"baseline_predict.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"35661063515","text":"import numpy as np\nimport joblib\nfrom flask import Flask, request, jsonify, render_template\nfrom pandas import array\nimport os\n\n#create flask app\napp = Flask(__name__)\n\n# load the model from joblib\nchurn_model = joblib.load('pipe_model.joblib', 'r+')\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/predict', methods = [\"POST\"])\ndef predict():\n \n Months_Inactive_12_mon = int(request.form['Months_Inactive_12_mon'])\n Credit_Limit = float(request.form['Credit_Limit'])\n Total_Revolving_Bal = float(request.form['Total_Revolving_Bal'])\n Total_Trans_Amt = float(request.form['Total_Trans_Amt'])\n Avg_Utilization_Ratio = float(request.form['Avg_Utilization_Ratio'])\n\n array = np.array([[Months_Inactive_12_mon, Credit_Limit, Total_Revolving_Bal,\n Total_Trans_Amt, Avg_Utilization_Ratio]])\n\n prediction = churn_model.predict(array)\n\n return render_template('result.html', prediction_text_ = \"This customer is belongs to cluster : {}\".format(prediction[0]))\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 5000))\n app.run(host=\"0.0.0.0\", threaded=True, port=port)","repo_name":"Himanshu-pardhi/Churn_prediction_ML","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"34942437934","text":"import DAN\nimport socket\nimport time\n# ServerURL = 'http://demo.iottalk.tw' #with no secure connection\n# 注意你用的 IoTtalk 伺服器網址或 IP # https://goo.gl/6jtP41\nServerURL = 'https://5.iottalk.tw' # with SSL secure connection\n# ServerURL = 'https://Your_DomainName' #with SSL connection (IP can not be used with https)\nReg_addr = None #if None, Reg_addr = MAC address #(本來在 DAN.py 要這樣做 :-)\n# Note that Reg_addr 在以下三句會被換掉! # the mac_addr in DAN.py is NOT used\nmac_addr = 'location.IN' #+ str( random.randint(100,999 ) ) # put here for easy to modify :-)\n# 若希望每次執行這程式都被認為同一個 Dummy_Device, 要把上列 mac_addr 寫死, 不要用亂數。\nReg_addr = mac_addr # Note that the mac_addr generated in DAN.py always be the same cause using UUID !\nDAN.profile['dm_name'] = 'user_location' # you can change this but should also add the DM in server\nDAN.profile['df_list'] = ['location-IDF', 'final-result-ODF'] # Check IoTtalk to see what IDF/ODF the DM has\nDAN.profile['d_name'] = \"location.IN\" #+ str( random.randint(100,999 ) ) +\"_\"+ DAN.profile['dm_name'] # None\nDAN.device_registration_with_retry(ServerURL, Reg_addr)\nprint(\"dm_name is \", DAN.profile['dm_name'])\nprint(\"Server is \", ServerURL)\n# global gotInput, theInput, allDead ## 主程式不必宣告 globel, 但寫了也 OK\n\nsock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\nsock.bind((\"127.0.0.1\", 8195))\n\nwhile True:\n\ttry:\n\t\tdata, info = sock.recvfrom(1024)\n\t\tif data.decode(\"utf-8\") == \"exit\":\n\t\t\tprint(\"\\n\\nDeregister \" + DAN.profile['d_name'] + \" !!!\\n\", flush=True)\n\t\t\tDAN.deregister()\n\t\t\tsock.close()\n\t\t\tbreak\n\t\tvalue = data.decode(\"utf-8\")\n\t\tlatlon = value.split(\" \")\n\t\tDAN.push('location-IDF', latlon[0], latlon[1])\n\t\ttime.sleep(1)\n\t\tpull = DAN.pull('final-result-ODF')\n\t\tif pull != None:\n\t\t\tprint(pull[0])\n\texcept KeyboardInterrupt:\n\t\tprint(\"\\n\\nDeregister \" + DAN.profile['d_name'] + \" !!!\\n\", flush=True)\n\t\tDAN.deregister()\n\t\tsock.close()\n\t\tbreak","repo_name":"YungPingXu/IoT_finalproject","sub_path":"location-IDF.py","file_name":"location-IDF.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"73468055976","text":"#!/usr/bin/env python3\nimport requests\nimport sqlite3\nfrom bs4 import BeautifulSoup\nfrom io import BytesIO\nfrom zipfile import ZipFile\nfrom datetime import datetime\n\nzip_file = 'http://www1.caixa.gov.br/loterias/_arquivos/loterias/D_megase.zip'\nhtml_file = 'D_MEGA.HTM'\n\nwith requests.get(zip_file) as request:\n if request.ok:\n with ZipFile(BytesIO(request.content)).open(html_file) as html:\n db = sqlite3.connect('megasena.db')\n db.execute('''\n DROP TABLE IF EXISTS polls;\n ''')\n db.execute('''\n CREATE TABLE polls (\n id INTEGER NOT NULL PRIMARY KEY,\n date DATE NOT NULL,\n n1 INTEGER NOT NULL,\n n2 INTEGER NOT NULL,\n n3 INTEGER NOT NULL,\n n4 INTEGER NOT NULL,\n n5 INTEGER NOT NULL,\n n6 INTEGER NOT NULL\n );''')\n soup = BeautifulSoup(html, 'html.parser')\n for tr in soup('tr'):\n td = [tag.get_text() for tag in tr('td', limit=8, rowspan=True)]\n if td and td[0].isdigit():\n td[1] = datetime.strptime(td[1], '%d/%m/%Y')\n db.execute('INSERT INTO polls VALUES (?, ?, ?, ?, ?, ?, ?, ?);', td)\n db.commit()\n db.close()\n","repo_name":"accdias/loterias","sub_path":"megasena.py","file_name":"megasena.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"5908590129","text":"# Problem description:\r\n# https://github.com/HackBulgaria/Programming0-1/tree/master/exam/2-Second-Largest\r\n\r\ndef second_largest(numbers):\r\n numbers.sort()\r\n largest_num = numbers[-1]\r\n same_number = False\r\n\r\n if len(numbers) < 2:\r\n return False\r\n \r\n for num in numbers:\r\n if num == largest_num:\r\n same_number = True\r\n else:\r\n same_number = False\r\n \r\n if same_number:\r\n return False\r\n\r\n for num in range(len(numbers), 0, -1):\r\n if numbers[num - 1] != largest_num:\r\n return numbers[num - 1]\r\n\r\nprint(second_largest([5, 5]))\r\n","repo_name":"keremidarski/python_playground","sub_path":"Programming 0/week 8 - Exam/02_second.py","file_name":"02_second.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"69872136937","text":"# bisect 모듈\n# 정렬된 상태를 만들면서 원소를 추가함 (Heap은 list를 생성한 다음 정렬)\n# list의 길이가 상당히 긴 경우 Bisect는 Heap에 비해 정렬 속도가 빠름\n\nimport random\nimport practice_bisect\n\nlist_sample = []\nfor i in range(1, 15):\n num = random.randint(1,100)\n # 첫번째 매개변수는 값을 추가할 iterable(list),\n # 두번째 매개변수는 추가할 값\n # bisect.bisect는 num 이 정렬된 list_sample에 들어갔을 때 어느 위치에 들어갈 수 있는지\n pos = bisect.bisect(list_sample, num)\n # 리스트를 정렬 상태로 유지시킨 채로 정렬될 수 있는 위치에 해당 항목을 삽입한다.\n bisect.insort(list_sample, num)\n print('%3d %3d'%(num, pos), list_sample)\n\nlist_sample_2 = [i for i in range(-5, 10, 2)]\n\n\ndef counts_in_range(start, end, sample_list):\n start = bisect.bisect_left(sample_list, start)\n end = bisect.bisect_right(sample_list, end)\n return end-start\n\n\nprint(list_sample_2)\nprint(counts_in_range(0, 9, list_sample_2))\n","repo_name":"EKYoonD/PythonPractice","sub_path":"Collections/practice_bisect.py","file_name":"practice_bisect.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"37588951392","text":"# Written by Eric Martin for COMP9021\n\n\nfrom math import gcd\n\n\ndef bottom_up_mediants_to(p, q):\n if p == 0:\n print('0/1')\n return\n p, q = normalise(p, q)\n if q == 1:\n print(f'{p}/1')\n return\n left = p // q, 1\n right = left[0] + 1, 1\n target = p, q\n mediant = left[0] + right[0], 2\n dichotomies = []\n w = max(len(str(p)), len(str(q)))\n while mediant != target:\n on_right_side = mediant[0] * target[1] < target[0] * mediant[1]\n print_nonfinal_dichotomy(left, mediant, right, on_right_side, w)\n if on_right_side:\n left = mediant\n else: \n right = mediant \n mediant = left[0] + right[0], left[1] + right[1]\n print_dichotomy(left, ' ', mediant, ' ', right, w)\n\ndef normalise(p, q):\n the_gcd = gcd(p, q)\n p //= the_gcd\n q //= the_gcd\n if q < 0:\n p = -p\n q = -q\n return p, q\n\ndef print_nonfinal_dichotomy(left, mediant, right, on_right_side, w):\n marks = {True: '*', False: ' '}\n print_dichotomy(left, marks[not on_right_side], mediant,\n marks[on_right_side], right, w\n )\n\ndef print_dichotomy(left, left_mark, mediant, right_mark, right, w):\n marks = {True: '*', False: ' '}\n print(f'{left[0]:{w}}/{left[1]:<{w}} {left_mark} '\n f'{mediant[0]:{w}}/{mediant[1]:<{w}} {right_mark} '\n f'{right[0]:{w}}/{right[1]:<{w}}'\n )\n \ndef top_down_mediants_from(p, q):\n return [f'{x}/{y}' if y != 1 else str(x)\n for (x, y) in sorted(mediants_from(p, q),\n key=lambda f: f[0] / f[1]\n )\n ]\n\ndef mediants_from(p, q):\n if p == 0:\n return [(0, 1)]\n p, q = normalise(p, q)\n fractions_to_process = [(p, q)]\n mediants = set()\n while fractions_to_process:\n p, q = fractions_to_process.pop()\n mediants.add((p, q))\n if q == 1:\n continue\n x, y = extended_euclid(p, -q)\n n = -x // q + 1\n p_prime = y + p * n\n q_prime = x + q * n\n fractions_to_process.extend(((p_prime, q_prime),\n (p - p_prime, q - q_prime)\n )\n )\n return mediants\n\ndef extended_euclid(p, q):\n if q == 0:\n return 1, 0\n x, y = extended_euclid(q, p % q)\n return y, x - (p // q) * y\n","repo_name":"marey/UNSW_COMP9021","sub_path":"03.exercies/08.Mediants/mediants.py","file_name":"mediants.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"90"} +{"seq_id":"22868456322","text":"def solution(A, B):\n L = max(A)\n P_max = max(B)\n \n fib = [0] * (L+2)\n fib[1] = 1\n for i in xrange(2, L + 2):\n fib[i] = (fib[i-1] + fib[i-2]) & ((1 << P_max) - 1)\n \n return_arr = [0] * len(A)\n \n for idx in xrange(len(A)):\n return_arr[idx] = fib[A[idx]+1] & ((1 << B[idx]) - 1)\n \n return return_arr\n","repo_name":"kiosklabs/codility-lessons-challenges","sub_path":"lesson-13-fibonacci-numbers/ladder.py","file_name":"ladder.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"36541446563","text":"__author__ = 'thanakorn'\n\n\nimport os\nfrom ordinal import Ordinal\n\n\nclass KeywordProcessing(object):\n\n keywords = {}\n filename = os.path.dirname(os.getcwd()) + '/conf/keyword/keywords.txt'\n file = open(filename, 'r')\n lines = file.readlines()\n for line in lines:\n key = line.strip().split('=')[0].strip()\n words = line.split('=')[1].strip().split(',')\n keywords.update({key: words})\n file.close()\n\n @classmethod\n def contains_keyword(cls, key, sentence):\n for keyword in cls.keywords[key]:\n if keyword in sentence:\n return True\n return False\n\n @classmethod\n def get_index(cls, sentence):\n for ordinal in cls.keywords['ordinal']:\n if ordinal in sentence:\n return Ordinal.ordinal[ordinal]\n return None\n\n @classmethod\n def get_os(cls, sentence):\n for os in cls.keywords['phone_os']:\n if os in sentence:\n return os\n return None\n","repo_name":"thanakorn/speech_recognition_callcenter","sub_path":"src/keyword_processing.py","file_name":"keyword_processing.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"27014669069","text":"import requests\nfrom bs4 import BeautifulSoup\nimport tweepy\nimport random\nimport schedule\nimport time\n\n# List of websites to scrape jokes from\njoke_sites = [\n \"https://www.jokes4us.com/cleanjokes.html\",\n \"https://www.laughfactory.com/jokes/clean-jokes\",\n \"https://www.rd.com/joke/clean-jokes/\",\n \"https://www.funnycleanjokes.net/\",\n \"https://www.jokesoftheday.net/clean-jokes\",\n \"https://www.short-funny.com/clean-jokes.php\"\n]\n\n\ndef scrape_jokes():\n jokes = []\n for site in joke_sites:\n page = requests.get(site)\n soup = BeautifulSoup(page.content, \"html.parser\")\n # find all the jokes on the website\n site_jokes = soup.find_all(\"div\", class_=\"content\")\n jokes.extend(site_jokes)\n return jokes\n\n\ndef post_joke(joke):\n # Insert your Twitter API keys here\n consumer_key = \"\"\n consumer_secret = \"\"\n access_token = \"\"\n access_token_secret = \"\"\n\n # Authenticate with Twitter\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth)\n\n # Post the joke to Twitter\n api.update_status(joke)\n\n\ndef job():\n jokes = scrape_jokes()\n post_joke(random.choice(jokes))\n\n\nschedule.every().day.at(\"12:00\").do(job)\nschedule.every().day.at(\"18:00\").do(job)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n","repo_name":"guilhermeadams/twitterjokebot","sub_path":"jokes.py","file_name":"jokes.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"72912544604","text":"import sys\nimport os\nfrom random import shuffle\n\nfile_name = os.readlink('/proc/self/fd/0')\n#print(file_name)\n\ndata = {} \nfor line in sys.stdin:\n x = line.split()\n node = int(x[0])\n out = []\n for i in range(len(x)-1):\n out.append(int(x[i+1]))\n data[node] = {\"links\": out, \n \"cluster\": None}\n\nclusters = {}\ndef make_cluster(node, num_clusters):\n data[node][\"cluster\"] = num_clusters\n try:\n #check_initialized = clusters[num_clusters][\"area\"]\n clusters[num_clusters][\"nodes\"].append(node)\n shuffle(data[node][\"links\"]) #add some much needed randomness\n for i in data[node][\"links\"]:\n try: \n if(data[i][\"cluster\"] == num_clusters):\n clusters[num_clusters][\"area\"] += 1\n else:\n clusters[num_clusters][\"perimeter\"].append(i)\n except KeyError:\n clusters[num_clusters][\"perimeter\"].append(i)\n while True:\n try:\n clusters[num_clusters][\"perimeter\"].remove(node)\n except ValueError:\n break\n\n except KeyError:\n #print(\"initializing a new cluster\")\n clusters[num_clusters] = {\"area\": 0, \"nodes\": [node],\n \"perimeter\": [i for i in data[node]['links']]}\n\n for link in data[node]['links']:\n try:\n if(data[link][\"cluster\"] != None):\n continue\n \n old_area = clusters[num_clusters][\"area\"]\n old_perimeter = len(clusters[num_clusters][\"perimeter\"])\n\n num_point_to_this_link = sum([True for i in clusters[num_clusters][\"perimeter\"] if i == link])\n\n num_links_in_new_link = len(data[link]['links'])\n new_perimeter = old_perimeter + num_links_in_new_link - 2*num_point_to_this_link\n new_area = old_area + num_point_to_this_link\n if(new_area/(new_perimeter + 0.0001) > old_area/(old_perimeter + 0.0001)):\n make_cluster(link, num_clusters)\n \n except KeyError:\n #print(\"keyError on :\", link)\n pass\n \n \n\n\n\nnum_clusters = 0\nfor node in data:\n if(data[node][\"cluster\"] != None):\n continue\n\n num_clusters += 1\n if(num_clusters<10):\n make_cluster(node, num_clusters)\n else:\n print(\"Too many clusters\")\n sys.exit(1)\n\n#print(file_name, \"num_clusters\", num_clusters)\n#print(data) \nfor c in clusters:\n print(file_name,\"area\", clusters[c]['area'],\"nodes\",clusters[c][\"nodes\"], \"perimeter\", clusters[c][\"perimeter\"])\n\n\n\n","repo_name":"Chuphay/hadoop","sub_path":"python/cluster/map_cluster.py","file_name":"map_cluster.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"35383751197","text":"\"\"\"\n Write a program that takes 5 fruits as input from the user as a list, and convert the list into a tuple.\n\"\"\"\nif __name__ == \"__main__\":\n fruits = list()\n for i in range(5):\n x = input(f\"Enter the fruit number { i + 1 }: \")\n fruits.append(x)\n\n fruits = (*fruits, )\n print(\"Transformed list to tuple is: \", fruits)\n","repo_name":"neerajp99/intro_to_cs_CS-101","sub_path":"cs1101/Neeraj_Pandey_cs1101_practice4/q17.py","file_name":"q17.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"12015452138","text":"\nsum = 0 # Initiate sum variable\n\nwhile True: # Create loop\n num = int(input()) # Initiate num var\n sum += num # Sum is equal to sum + num\n if num == 0: # if num == 0\n break # then break\n\nprint(sum)","repo_name":"jmstudyacc/python_practice","sub_path":"POP1-Sequences_Series_Sets/sequence.sum.py","file_name":"sequence.sum.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"31464749378","text":"# 62280912\n\nfrom typing import List, Union\n\nNumber = Union[int, float]\n\nt9 = {\n 2: 'abc',\n 3: 'def',\n 4: 'ghi',\n 5: 'jkl',\n 6: 'mno',\n 7: 'pqrs',\n 8: 'tuv',\n 9: 'wxyz'\n}\n\n\ndef in_put(string: str = None) -> List:\n if string:\n return [int(char) for char in string]\n return [int(char) for char in input()]\n\n\ndef t9_keypad(input, combinations, index, result=''):\n if index == -1:\n combinations.append(result)\n return result\n\n digit = input[index]\n\n for i in range(len(t9[digit])):\n t9_keypad(input, combinations, index - 1, t9[digit][i] + result)\n\n\ndef t9_keypad_it(input, index):\n def run(input, index, result=''):\n if index == -1:\n # yield from result\n return result\n if index > 0:\n digit = input[index]\n for i in range(len(t9[digit])):\n yield from run(input, index - 1, t9[digit][i] + result)\n yield from run(input, index)\n\n\ndef main(input: str = None):\n if not input:\n input = in_put('3456344535')\n combinations = []\n t9_keypad(input, combinations, len(input)-1)\n\n # # iterator try\n # result = t9_keypad_it(input, len(input)-1)\n # for res in result:\n # print(res)\n\n return ' '.join(sorted(combinations))\n\n\nif __name__ == '__main__':\n input = in_put()\n print(main(input))\n","repo_name":"mbrav/practicum_algorithms","sub_path":"sp13/theory/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"73555716124","text":"import json\nimport re\nfrom RePoE import mods\n\nMAPS_JSON = None\n\n# items json taken from https://www.pathofexile.com/api/trade/data/items\nwith open('items.json') as items:\n\tITEM_JSON = json.load(items)\n\tfor result in ITEM_JSON['result']:\n\t\tif result['id'] == 'maps':\n\t\t\tMAPS_JSON = result['entries']\n\t\t\tbreak\n\nMAP_LIST = []\nUNIQUE_MAP_LIST = {}\nPREFIX_LIST = []\n\n# generate list of map names (including unique maps)\nfor map_ in MAPS_JSON:\n\tif re.search('Map', map_['type']):\n\t\tif 'flags' in map_ and map_['flags']['unique']:\n\t\t\t# encode/decode just to handle Maelstr�m of Chaos (evil)\n\t\t\tmap_name = map_['name'].encode('utf-8').decode('cp1252')\n\t\telse:\n\t\t\tmap_name = map_['type'].split(' Map')[0]\n\t\t\tmap_name = map_name.replace('Shaped ', '')\n\t\tif map_name not in MAP_LIST:\n\t\t\tMAP_LIST.append(map_name)\nMAP_LIST.append('General Notes')\nMAP_LIST.sort()\n\n# generate dictionary of unique map names vs map types for lookup of unidentified unique maps\nfor map_ in MAPS_JSON:\n\tif 'flags' in map_ and map_['flags']['unique']:\n\t\tif not re.search('Replica', map_['name']):\n\t\t\tmap_base = map_['type'].split(' Map')[0]\n\t\t\tUNIQUE_MAP_LIST[map_base] = map_['name'].encode('utf-8').decode('cp1252')\n\n# generate list of map prefixes\nfor mod in mods.values():\n\tif mod['domain'] == 'area' and mod['generation_type'] == 'prefix' and mod['name'] not in PREFIX_LIST:\n\t\tPREFIX_LIST.append(mod['name'])\nPREFIX_LIST.sort()\n\nwith open('map_data.py', 'w') as data_file:\n\tdata_file.write(\"MAP_LIST = {}\\n\".format(MAP_LIST))\n\tdata_file.write(\"UNIQUE_MAP_LIST = {}\\n\".format(UNIQUE_MAP_LIST))\n\tdata_file.write(\"PREFIX_LIST = {}\\n\".format(PREFIX_LIST))\n","repo_name":"jwfiredragon/PoEMapNotes","sub_path":"gen_data_dev.py","file_name":"gen_data_dev.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"86"} +{"seq_id":"36805852032","text":"import gradio as gr\nimport numpy as np\nfrom mindspore import Tensor\nfrom mindspore.nn import Softmax\nimport cv2\nfrom typing import Type, Union, List, Optional\nfrom mindvision.classification.models.blocks import ConvNormActivation\nfrom mindspore import nn\nfrom mindvision.classification.models.classifiers import BaseClassifier\nfrom mindvision.classification.models.head import DenseHead\nfrom mindvision.classification.models.neck import GlobalAvgPooling\nfrom mindvision.classification.utils.model_urls import model_urls\nfrom mindvision.utils.load_pretrained_model import LoadPretrainedModel\nfrom mindspore import load_checkpoint, load_param_into_net\nfrom mindspore.train import Model\n\nimport mindspore\nprint(mindspore.__version__)\n\n\nNUM_CLASS = 10\nclass_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\n\nclass ResidualBlockBase(nn.Cell):\n expansion: int = 1 # 最后一个卷积核数量与第一个卷积核数量相等\n\n def __init__(self, in_channel: int, out_channel: int,\n stride: int = 1, norm: Optional[nn.Cell] = None,\n down_sample: Optional[nn.Cell] = None) -> None:\n super(ResidualBlockBase, self).__init__()\n if not norm:\n norm = nn.BatchNorm2d\n\n self.conv1 = ConvNormActivation(in_channel, out_channel,\n kernel_size=3, stride=stride, norm=norm)\n self.conv2 = ConvNormActivation(out_channel, out_channel,\n kernel_size=3, norm=norm, activation=None)\n self.relu = nn.ReLU()\n self.down_sample = down_sample\n\n def construct(self, x):\n \"\"\"ResidualBlockBase construct.\"\"\"\n identity = x # shortcuts分支\n\n out = self.conv1(x) # 主分支第一层:3*3卷积层\n out = self.conv2(out) # 主分支第二层:3*3卷积层\n\n if self.down_sample:\n identity = self.down_sample(x)\n out += identity # 输出为主分支与shortcuts之和\n out = self.relu(out)\n\n return out\n\n\n# --------------------------- Bottleneck --------------------------------------\nclass ResidualBlock(nn.Cell):\n expansion = 4 # 最后一个卷积核的数量是第一个卷积核数量的4倍\n\n def __init__(self, in_channel: int, out_channel: int,\n stride: int = 1, norm: Optional[nn.Cell] = None,\n down_sample: Optional[nn.Cell] = None) -> None:\n super(ResidualBlock, self).__init__()\n if not norm:\n norm = nn.BatchNorm2d\n\n self.conv1 = ConvNormActivation(in_channel, out_channel,\n kernel_size=1, norm=norm)\n self.conv2 = ConvNormActivation(out_channel, out_channel,\n kernel_size=3, stride=stride, norm=norm)\n self.conv3 = ConvNormActivation(out_channel, out_channel * self.expansion,\n kernel_size=1, norm=norm, activation=None)\n self.relu = nn.ReLU()\n self.down_sample = down_sample\n\n def construct(self, x):\n identity = x # shortscuts分支\n\n out = self.conv1(x) # 主分支第一层:1*1卷积层\n out = self.conv2(out) # 主分支第二层:3*3卷积层\n out = self.conv3(out) # 主分支第三层:1*1卷积层\n\n if self.down_sample:\n identity = self.down_sample(x)\n\n out += identity # 输出为主分支与shortcuts之和\n out = self.relu(out)\n\n return out\n\n\ndef make_layer(last_out_channel, block: Type[Union[ResidualBlockBase, ResidualBlock]],\n channel: int, block_nums: int, stride: int = 1):\n down_sample = None # shortcuts分支\n\n if stride != 1 or last_out_channel != channel * block.expansion:\n down_sample = ConvNormActivation(last_out_channel, channel * block.expansion,\n kernel_size=1, stride=stride, norm=nn.BatchNorm2d, activation=None)\n\n layers = []\n layers.append(block(last_out_channel, channel, stride=stride, down_sample=down_sample, norm=nn.BatchNorm2d))\n\n in_channel = channel * block.expansion\n # 堆叠残差网络\n for _ in range(1, block_nums):\n layers.append(block(in_channel, channel, norm=nn.BatchNorm2d))\n\n return nn.SequentialCell(layers)\n\n\nclass ResNet(nn.Cell):\n def __init__(self, block: Type[Union[ResidualBlockBase, ResidualBlock]],\n layer_nums: List[int], norm: Optional[nn.Cell] = None) -> None:\n super(ResNet, self).__init__()\n if not norm:\n norm = nn.BatchNorm2d\n # 第一个卷积层,输入channel为3(彩色图像),输出channel为64\n self.conv1 = ConvNormActivation(3, 64, kernel_size=7, stride=2, norm=norm)\n # 最大池化层,缩小图片的尺寸\n self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')\n # 各个残差网络结构块定义,\n self.layer1 = make_layer(64, block, 64, layer_nums[0])\n self.layer2 = make_layer(64 * block.expansion, block, 128, layer_nums[1], stride=2)\n self.layer3 = make_layer(128 * block.expansion, block, 256, layer_nums[2], stride=2)\n self.layer4 = make_layer(256 * block.expansion, block, 512, layer_nums[3], stride=2)\n\n def construct(self, x):\n x = self.conv1(x)\n x = self.max_pool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n return x\n\n\ndef _resnet(arch: str, block: Type[Union[ResidualBlockBase, ResidualBlock]],\n layers: List[int], num_classes: int, pretrained: bool, input_channel: int):\n backbone = ResNet(block, layers)\n neck = GlobalAvgPooling() # 平均池化层\n head = DenseHead(input_channel=input_channel, num_classes=num_classes) # 全连接层\n model = BaseClassifier(backbone, neck, head) # 将backbone层、neck层和head层连接起来\n\n if pretrained:\n # 下载并加载预训练模型\n LoadPretrainedModel(model, model_urls[arch]).run()\n\n return model\n\n\ndef resnet50(num_classes: int = 1000, pretrained: bool = False):\n \"ResNet50模型\"\n return _resnet(\"resnet50\", ResidualBlock, [3, 4, 6, 3], num_classes, pretrained, 2048)\n\n\nparam_dict = load_checkpoint(\"./best.ckpt\")\nnetwork = resnet50(num_classes=NUM_CLASS, pretrained=False)\n\nload_param_into_net(network, param_dict)\nmodel = Model(network)\n\n\ndef predict_image(img):\n img = cv2.resize(img, (32, 32))\n print(img.shape)\n img = img.astype(np.float32)\n img = img / 255\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.2023, 0.1994, 0.2010])\n img = (img - mean) / std\n img = img.astype(np.float32)\n img = img.transpose(2, 0, 1)\n print(img.shape)\n img = img.reshape(1, 3, 32, 32)\n\n predict_score = model.predict(Tensor(img)).reshape(-1)\n predict_probability = Softmax()(predict_score)\n\n predict_probability = predict_probability.asnumpy()\n print(predict_probability)\n print({class_names[0]: float(predict_probability[0])})\n\n return {class_names[i]: float(predict_probability[i]) for i in range(NUM_CLASS)}\n\n\nimage = gr.inputs.Image(shape=(224, 224))\nlabel = gr.outputs.Label(num_top_classes=NUM_CLASS)\n\ngr.Interface(css=\".footer {display:none !important}\",\n fn=predict_image,\n inputs=image,\n live=False,\n description=\"Please upload a image in JPG, JPEG or PNG.\",\n title='Image Classification by ResNet50',\n outputs=gr.outputs.Label(num_top_classes=NUM_CLASS, label=\"预测类别\"),\n examples=['./example_img/airplane.jpg','./example_img/automobile.jpg', './example_img/bird.jpg','./example_img/cat.jpg','./example_img/deer.jpg','./example_img/dog.jpg',\n './example_img/frog.jpg','./example_img/horse.JPG','./example_img/ship.jpg','./example_img/truck.jpg']\n ).launch(share=True)\n","repo_name":"drizzlezyk/xihe_tutorial","sub_path":"ResNet50/inference/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"70988100123","text":"import cv2\nimport numpy as np\n\ntemplate=np.array([],dtype=np.uint8)\ncap=cv2.VideoCapture(0)\npts=[]\ncount=0\ndef mouse(event,x,y,flags,param):\n if event == cv2.EVENT_LBUTTONDOWN:\n global pts\n global count\n global template\n if count<4:\n \n pts.append(y)\n count+=1\n pts.append(x)\n count+=1\n \n if count==4:\n template = cropped (cap.read() [1], pts)\n \n \ndef cropped(img,pts):\n cropped=frame[pts[1]:pts[3],pts[0]:pts[2]]\n return cropped\n\ndef comparison(img):\n global template\n greyimg= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n greytem=cv2.cvtColor(template,cv2.COLOR_BGR2GRAY)\n\n res=cv2.matchTemplate(greyimg,greytem,cv2.TM_CCOEFF_NORMED)\n\n loc=np.where(res>=0.8)\n return loc\n\ncv2.namedWindow ('webcam', cv2.WINDOW_NORMAL)\ncv2.setMouseCallback ('webcam', mouse)\n\nwhile True:\n x,frame=cap.read()\n if template.size > 3:\n h=template.shape[0]\n w=template.shape[1]\n loc= comparison(frame)\n\n for x, y in zip (*loc [::-1]):\n cv2.putText (frame, 'Object', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)\n cv2.rectangle (frame, (x, y), (x+w, y+h), (255, 255, 255), 1)\n cv2.imshow('object',template)\n\n cv2.imshow('webcam',frame)\n\n\n if cv2.waitKey (20) & 0xFF == ord('q'):\n break\n","repo_name":"iamutsavparekh/Image_Processing_Utsav","sub_path":"Assignment_5/Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"1566445731","text":"from distutils.core import setup\nLONG_DESCRIPTION = \\\n'''The program reads one or more input FASTQ files.\nFor each file it computes the minimum, maximum and mean FASTQ quality score at each position across all reads in a file.\n\nFor some reason, it then represents these as emoji.\n'''\n\n\nsetup(\n name = 'fastqe',\n packages = ['fastqe'],\n version = '0.1.0',\n license = 'MIT',\n description = 'A emoji based bioinformatics command line tool',\n long_description=(LONG_DESCRIPTION),\n author = 'Andrew Lonsdale',\n author_email = 'andrew.lonsdale@lonsbio.com.au',\n url = 'https://github.com/lonsbio/fastqe',\n download_url = 'https://github.com/lonsbio/fastqe/archive/fastqe-0.1.0.tar.gz',\n keywords = ['emoji', 'bioinformatics', 'next-generation sequencing'],\n classifiers = [],\n install_requires=[\"biopython>=1.66\",'pyemojify'],\n entry_points={\n 'console_scripts': ['fastqe = fastqe.fastqe:main']\n },\n\n)\n","repo_name":"andrewlonsdale/fastqe","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"86"} +{"seq_id":"17349802899","text":"from flask import Flask, request, jsonify\r\nimport sqlite3\r\nfrom datetime import datetime\r\nfrom flask_cors import CORS\r\n\r\n\r\napp = Flask(__name__)\r\nCORS(app)\r\n\r\n@app.route(\"/\", methods=['GET'])\r\ndef ping():\r\n\r\n status = 200\r\n if 1:\r\n status = 200\r\n else:\r\n status = 400\r\n response = {\"status_code\": status,\r\n \"content\": \"We are Up & running!\"}\r\n return jsonify(response)\r\n\r\n\r\n# Route for writing data to the database\r\n@app.route('/write', methods=['POST'])\r\ndef write_data():\r\n data = request.get_json()\r\n mood = data.get('mood')\r\n feel = data.get('feeling')\r\n timestamp = datetime.now()\r\n conn = sqlite3.connect('data.db')\r\n c = conn.cursor()\r\n # Insert the data into the database\r\n c.execute(\"INSERT INTO minddata (mood, feeling, timestamp) VALUES (?, ?, ?)\", (mood, feel, timestamp))\r\n conn.commit()\r\n conn.close()\r\n return 'Data written to database!'\r\n\r\n\r\n# Route for nuking whole table\r\n@app.route('/nukemofo', methods=['GET'])\r\ndef nuke_all():\r\n conn = sqlite3.connect('data.db')\r\n c = conn.cursor()\r\n # Delete all from minddata\r\n c.execute(\"DELETE from minddata\")\r\n conn.commit()\r\n conn.close()\r\n return 'They all dead'\r\n\r\n\r\n# Route for writing batch data to the database\r\n@app.route('/batch_write', methods=['POST'])\r\ndef batch_write_data():\r\n data = request.get_json()\r\n moods = data.get('moods')\r\n feels = data.get('feelings')\r\n timestamp = datetime.now()\r\n conn = sqlite3.connect('data.db')\r\n c = conn.cursor()\r\n for m, f in zip(moods, feels):\r\n # Insert the data into the database\r\n c.execute(\"INSERT INTO minddata (mood, feeling, timestamp) VALUES (?, ?, ?)\", (m, f, timestamp))\r\n conn.commit()\r\n conn.close()\r\n return 'Batch written to database!'\r\n\r\n# Route for getting the mean value of all moods in the database\r\n@app.route('/mood', methods=['GET'])\r\ndef get_mood():\r\n conn = sqlite3.connect('data.db')\r\n c = conn.cursor()\r\n c.execute(\"SELECT mood FROM minddata\")\r\n numbers = c.fetchall()\r\n if len(numbers) > 0:\r\n mean = sum([x[0] for x in numbers]) / len(numbers)\r\n else:\r\n mean = 0\r\n conn.close()\r\n return jsonify({'mean': mean})\r\n\r\n# Route for getting all moods & feels in the database\r\n@app.route('/get_all', methods=['GET'])\r\ndef get_all():\r\n conn = sqlite3.connect('data.db')\r\n c = conn.cursor()\r\n c.execute('select mood, feeling from minddata')\r\n alldata = c.fetchall()\r\n if len(alldata) > 0:\r\n return jsonify(alldata)\r\n else:\r\n print(\"whatever\")\r\n conn.close()\r\n return jsonify(alldata)\r\n\r\n\r\n# Route to return last vote score\r\n@app.route('/lastvote', methods=['GET'])\r\ndef get_last():\r\n conn = sqlite3.connect('data.db')\r\n c = conn.cursor()\r\n c.execute(\"SELECT mood FROM minddata ORDER BY timestamp DESC LIMIT 1\")\r\n lastnumber = c.fetchone()\r\n c.execute(\"SELECT feeling FROM minddata ORDER BY timestamp DESC LIMIT 1\")\r\n lastfeeling = c.fetchone()\r\n if len(lastnumber) > 0:\r\n response = {'latest': lastnumber,\r\n 'lastfeeling': lastfeeling}\r\n else:\r\n return jsonify({'response': \"no data available\"})\r\n conn.close()\r\n return jsonify(response)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","repo_name":"firaun2020/mind-main-api","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"25470210866","text":"# Standard library imports ...\nimport argparse\n\n# Third party library imports ...\nimport yaml\n\n# Local imports ...\nfrom . import RestToIso, NowCoastRestToIso\nfrom .to_html import ISO191152_to_HTML\nfrom .update_iso import NowCoastUpdateIso, UpdateIso\n\n\ndef update_iso():\n \"\"\"\n Update an existing ISO record against REST metadata and a configuration\n file.\n \"\"\"\n description = 'Update ISO 19115-2 metadata'\n kwargs = {\n 'description': description,\n 'formatter_class': argparse.RawDescriptionHelpFormatter,\n }\n parser = argparse.ArgumentParser(**kwargs)\n\n help = 'YAML configuration file'\n parser.add_argument('config', type=str, help=help)\n\n help = 'Root directory of existing XML records'\n parser.add_argument('input', type=str, help=help)\n\n help = 'Output directory of updated XML records'\n parser.add_argument('output', type=str, help=help)\n\n args = parser.parse_args()\n\n # Get the project from the configuration file.\n with open(args.config, 'rt') as f:\n config = yaml.load(f.read())\n\n if config['project'].lower() == 'nowcoast':\n obj = NowCoastUpdateIso(args.config, args.input, args.output)\n else:\n obj = UpdateIso(args.config, args.input, args.output)\n\n obj.run()\n\ndef rest2iso():\n\n description = 'Build ISO 19115-2 metadata from ArcGIS REST directory.'\n kwargs = {\n 'description': description,\n 'formatter_class': argparse.RawDescriptionHelpFormatter,\n }\n parser = argparse.ArgumentParser(**kwargs)\n\n help = 'YAML configuration file'\n parser.add_argument('config', type=str, help=help)\n\n help = 'Log level'\n choices = ['debug', 'info', 'warning', 'error', 'critical']\n parser.add_argument('--verbose', help=help, default='info',\n choices=choices)\n\n args = parser.parse_args()\n\n # Get the project from the configuration file.\n with open(args.config, 'rt') as f:\n config = yaml.load(f.read())\n project = config['project']\n\n if project.lower() == 'nowcoast':\n obj = NowCoastRestToIso(args.config, verbose=args.verbose)\n else:\n obj = RestToIso(args.config, verbose=args.verbose)\n\n obj.run()\n\n\ndef iso191152_to_html():\n \"\"\"\n Entry point for converting ISO19115-2 documents to HTML.\n \"\"\"\n kwargs = {\n 'description': 'Convert ISO 19115-2 XML documents into HTML.',\n 'formatter_class': argparse.RawDescriptionHelpFormatter,\n }\n parser = argparse.ArgumentParser(**kwargs)\n\n parser.add_argument('input', type=str, help='Input root directory')\n parser.add_argument('output', type=str, help='Output root directory')\n\n args = parser.parse_args()\n\n o = ISO191152_to_HTML(args.input, args.output)\n o.run()\n","repo_name":"quintusdias/gis-monitoring","sub_path":"ags_metadata/ags_metadata/command_line.py","file_name":"command_line.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"43164869346","text":"import os\nimport socket\nimport tqdm\nimport json\nimport time\nimport threading\nimport signal\nimport hashlib\nimport shutil\nimport sys\n\nclass Node():\n\n def __init__(self):\n\n # Global flag\n self.GLOBAL_FLAG = False\n\n # PORTS\n self.COMM_IN = 9822\n self.FILE_IN = 9823\n \n # IP range to scan (last octet)\n self.IP_RANGE_START = 190\n self.IP_RANGE_END = 220\n\n self.IP_CHAIN = {socket.gethostbyname_ex(socket.gethostname())[-1][-1]}\n\n # Exit flag\n self.exit_event = threading.Event()\n\n # Socket constants\n self.BUFFER_SIZE = 8192\n self.SEPARATOR = \"\"\n\n self.ROOT_DIR = '\\\\files'\n self.ROOT = os.getcwd() + self.ROOT_DIR\n\n self.meta_time = 0\n\n # If a file system update was logged when the program was previously running, update self.meta_time\n if os.path.isfile(os.getcwd() + '\\\\metadata.txt'):\n with open(os.getcwd() + '\\\\metadata.txt', 'r') as f:\n self.meta_time = int(f.read())\n \n \"\"\"\n Steps to be performed on start up\n 1. self.connect_to_network() in order to update self.IP_CHAIN\n 2. Run self.comm_listener() on a thread to give participate in IP_CHAIN updates, and receive file metadata updates\n 3. Run self.generate_dir_json() on a thread, if generate_dir_json() returns True, an update has taken place.\n This update should broadcast the time of update (metadata.txt) and update details (dir.json) to all IPs\n on the self.IP_CHAIN.\n \"\"\"\n # Ping all local IPs 3 times in order to \n self.connect_to_network(1)\n # self.ping()\n\n # Thread to participate in IP_CHAIN updates\n self.t_in = threading.Thread(target=self.comm_listener)\n self.t_in.start()\n\n # Thread to broadcast file system metadata updates and request files\n self.t_out = threading.Thread(target=self.client_logic)\n self.t_out.start()\n\n # Thread to listen for incoming files\n self.f_out = threading.Thread(target=self.file_listener)\n self.f_out.start()\n\n signal.signal(signal.SIGINT, self.signal_handler)\n\n # Listen for an exit event on the main thread\n while not self.exit_event.is_set():\n time.sleep(1)\n\n\n # Generates dir.json on file update and sends out file system metadata to devices on IP_CHAIN\n def client_logic(self):\n while not self.exit_event.is_set():\n # Global flag will be set to True if scheduled file modifications are taking place\n if self.GLOBAL_FLAG:\n print(f'GLOBAL_FLAG lock - client')\n return None\n update = self.generate_dir_json()\n print('Updated' if update else 'Already up to date')\n # Broadcast data from metadata.txt and dir.json to all devices on the IP_CHAIN\n if update:\n print('Broadcasting file system update')\n s = socket.socket()\n for address in self.IP_CHAIN:\n if address != socket.gethostbyname_ex(socket.gethostname())[-1][-1]:\n try:\n print(f'Sending file updates to {address}:{self.FILE_IN}')\n s.connect((address, self.FILE_IN))\n #'1{time.time_ns()}{JSON_OBJECT_DIR}'\n with open(os.getcwd() + \"\\\\dir.json\", 'r') as f:\n json_payload = f.read()\n s.send((f'1{self.SEPARATOR}{str(time.time_ns())}{self.SEPARATOR}{json_payload}').encode())\n except Exception as e:\n print(f'{address}:{self.FILE_IN} did not accept file metadata update !! {e}')\n\n # Wait 5 seconds in between checks\n time.sleep(5)\n\n\n\n # Kills threads on exit signal (CTRL + C) \n def signal_handler(self, signum, frame):\n self.exit_event.set()\n # Send a kill message to the comm_listener() @port COMM_IN\n s = socket.socket()\n s.connect((socket.gethostbyname_ex(socket.gethostname())[-1][-1], self.COMM_IN))\n s.send('-1'.encode()) # Send exit signal to self.COMM_IN\n\n s = socket.socket()\n s.connect((socket.gethostbyname_ex(socket.gethostname())[-1][-1], self.FILE_IN))\n s.send('-1'.encode()) # Send exit signal to self.FILE_OUT\n \n self.t_in.join() # Exits comm in thread\n self.t_out.join() # Exits comm out thread\n self.f_out.join() # Exits file in thread\n print('Listeners and threads killed')\n\n\n # Read a nested json value -- dev function\n # pylint: disable=unused-argument\n def read_layer(self, json_dict, indices):\n access = json_dict\n for index in indices:\n access = access[index]\n return access\n\n # Return data stored in dir.json, if the file does not\n # exist, return an empty json\n def read_json(self, path):\n if not os.path.isfile(path):\n return {}\n with open(path, 'r') as json_file:\n return json.load(json_file)\n\n # Recursively goes through directory, gathering file names, their associated path, and the last time\n # that their content was modified. Create a json to be saved as 'dir.json'\n def find_endpoints(self, path, json_dict, layer):\n # json_dict = {}\n for file_name in os.listdir(path):\n # If there is no file extension, it is a subdirectory \n if len(file_name.split('.')) == 1:\n # Add the subdirectory to json_dict\n split_layers = path.replace(os.getcwd() + \"\\\\\", \"\").split(\"\\\\\")\n if type(split_layers) != list:\n split_layers = [split_layers]\n split_layers.append(file_name)\n \n # json_dict = append_to_dict(json_dict, split_layers, {})\n if str(split_layers) not in json_dict:\n json_dict[str(split_layers)] = {}\n # Call function and iterate through the subdirectory\n self.find_endpoints(os.path.join(path, file_name), json_dict, layer=layer+1)\n else:\n # Get the directories in between the file and the working directory\n split_layers = path.replace(os.getcwd() + \"\\\\\", \"\").split(\"\\\\\")\n if type(split_layers) != list:\n split_layers = [split_layers]\n # # Append the file and metadata to the json_dict\n # data = {file_name: [os.stat(os.path.join(path, file_name)).st_mtime]}\n\n # Hash file contents for a unique file signature\n md5 = hashlib.md5()\n with open(os.path.join(path, file_name), 'rb') as f:\n while True:\n chunk = f.read(self.BUFFER_SIZE)\n if not chunk:\n break\n md5.update(chunk)\n\n # Append the file and metadata to the json_dict\n data = {file_name: [md5.hexdigest()]}\n \n # result.append((split_layers, data))\n # print(split_layers)\n if str(split_layers) not in json_dict:\n json_dict[str(split_layers)] = {}\n # print(result[str(split_layers)])\n json_dict[str(split_layers)] = json_dict[str(split_layers)] | data\n\n return json_dict \n\n # If dir.json does not exist, generate a blank dir.json. Otherwise\n # write the passed in data to dir.json\n def write_json(self, json_dict={}, path=os.getcwd()):\n with open(path + '\\\\dir.json', 'w') as json_file:\n json.dump(json_dict, json_file, sort_keys=True)\n\n # Iterate through the layers in the dict and append the data to the desired location\n def append_to_dict(self, json_dict, indices, data):\n access = json_dict\n\n for index in indices:\n # If the directory does not exist, create it\n if index not in access:\n access[index] = {}\n if index == indices[-1]:\n access[index] = data\n return json_dict\n # Go deeper into the json\n access = access[index]\n else:\n # If the directory exists, merge the existing contents\n # with the data that needs to be added\n if index == indices[-1]:\n access[index] = access[index] | data\n return json_dict\n # Go deeper into the json\n # print(index)\n access = access[index]\n\n \"\"\"\n If the dir.json is not identical to the current hierarchical structure of the \n root directory (along with identical metadata), then dir.json will be overwritten\n to correspond with the current directory.\n A metadata file will be generated at completion of the operation to store time of\n the original-last dir.json generation\n \"\"\"\n def generate_dir_json(self, root=None):\n\n if root == None:\n root = self.ROOT\n\n generated = self.find_endpoints(root, {}, 0)\n # print(f'generated: {generated}')\n if generated != self.read_json(os.getcwd() + '\\\\dir.json'):\n self.write_json(generated)\n # Generate a time stamp to keep track of when this dir.json was generated\n with open('metadata.txt', 'w') as f:\n self.meta_time = time.time_ns()\n f.write(str(self.meta_time))\n return True \n else:\n return False\n \n\n # Compare the two directories, output a list of modifications.\n # If the two directories are identical, output an empty list.\n # By default this function will not modify any files unless\n # sandbox is set to False.\n # Requests files from IP associated with other_dir\n def dir_json_compare(self, self_dir, other_dir, other_ip, sandbox=True):\n\n request = []\n \n json_dict_1 = self.read_json(self_dir[0])\n # json_dict_2 = self.read_json(dir[0])\n json_dict_2 = json.loads(other_dir[0])\n print(f'type(json_dict_2): {type(json_dict_2)}')\n\n self_dir_time = 0\n other_dir_time = other_dir[1]\n with open(self_dir[1], 'r') as f:\n self_dir_time = int(f.read())\n\n print(self_dir_time)\n print(other_dir_time)\n\n # This assumes that if two directories were created at the exact same time in ns\n # then self_dir is arbitrarily chosen to take precedence\n if json_dict_1 == json_dict_2 or self_dir_time > other_dir_time:\n return request\n else:\n \"\"\"\n If other_dir is more current than self_dir\n \"\"\"\n print('self_dir < other_dir')\n for subdirectory in json_dict_1:\n # If a subdirectory is not present in other_dir that is present in self_dir\n if subdirectory not in json_dict_2:\n if not sandbox:\n # Delete subdirectory and contents\n request.append(['-', subdirectory, {}])\n\n # print('-', subdirectory)\n # print('-', subdirectory, json_dict_1[subdirectory])\n for subdirectory in json_dict_2:\n # If a subdirectory is not present in self_dir that is present in other_dir\n if subdirectory not in json_dict_1:\n if len(json_dict_2[subdirectory]) == 0:\n request.append(['+', subdirectory, {}])\n for file in json_dict_2[subdirectory]:\n if not sandbox: \n request.append(['+', subdirectory, file]) # Request file(s)\n\n else:\n if json_dict_1[subdirectory] != json_dict_2[subdirectory]:\n for file in json_dict_1[subdirectory]:\n if file not in json_dict_2[subdirectory]:\n if not sandbox:\n request.append(['-', subdirectory, file])\n pass # Delete file\n # print(\"- %s {'%s': %s}\" % (subdirectory, file, json_dict_1[subdirectory][file])) \n for file in json_dict_2[subdirectory]:\n # Request file\n if not sandbox: \n request.append(['+', subdirectory, file])\n pass\n return request\n \n\n # Send a specific file to a specific target machine on LAN\n def send(self, target_ip='192.168.254.199', file_name='data3.jpg', root=None):\n\n if root == None:\n root = self.ROOT\n\n file_path = root + file_name\n\n filesize = os.path.getsize(file_path)\n\n s = socket.socket()\n print(f\"Connecting to {target_ip}:{self.FILE_IN}\")\n s.connect((target_ip, self.FILE_IN))\n print(\"Connected.\")\n\n s.send(f\"{file_path}{self.SEPARATOR}{filesize}\".encode())\n\n # progress = tqdm.tqdm(range(filesize), f\"Sending {file_path}\", unit='B', unit_scale=True, unit_divisor=1024)\n with open(file_path, 'rb') as f:\n while True:\n bytes_read = f.read(self.BUFFER_SIZE)\n if not bytes_read:\n break\n s.sendall(bytes_read)\n # progress.update(len(bytes_read))\n s.close()\n\n \n\n\n\n\n\n \"\"\"\n Used to send a request to a target ip, to request to join the IP_CHAIN\n \n \"\"\"\n def ping(self, ip, port=None):\n\n if port == None:\n port = self.COMM_IN\n\n s = socket.socket()\n # Tested to have a 92% success rate on LAN\n s.settimeout(0.1)\n try:\n # Send self.IP_CHAIN\n s.connect((ip, port))\n s.send((f'0{self.SEPARATOR}{self.IP_CHAIN}').encode())\n # Get updated IP_CHAIN in response\n response = s.recv(self.BUFFER_SIZE).decode()\n data = response.replace('{', '').replace('}', '').replace(\"'\", '').replace(' ', '').split(',')\n for address in data:\n self.IP_CHAIN.add(address)\n print(f'Updated IP_CHAIN: {self.IP_CHAIN}')\n s.close()\n return True\n except Exception as e:\n print(f'{ip}:{port} did not respond !! {e}')\n # print(f'Error: {e}')\n # return False\n \n\n # Scan the desired IP range by pinging all targets at the communication port\n def connect_to_network(self, attempts=1, port=None):\n\n # If 0 attempts is sent, do not attempt to connect to the network\n if attempts == 0:\n return None\n\n if port == None:\n port = self.COMM_IN\n\n # Ping all ips 'attempts' times in order to prevent from timeouts\n # resulting in invalid data\n for i in range(attempts):\n for i in range(self.IP_RANGE_START, self.IP_RANGE_END):\n ip = '192.168.254.%s' % i\n if self.ping(ip, port):\n return None\n\n\n\n \"\"\"\n FILE LISTENER\n File listener will listen for \n 1. File system updates from other machines\n 2. Compare file system data\n 3. Request/ send files (lock client thread to prevent sending updates in the middle of updating own file system)\n \"\"\"\n def file_listener(self, root=None):\n # Loop as long as the exit event (ctrl + c) is not sent\n while not self.exit_event.is_set(): \n \n if root == None:\n root = self.ROOT\n\n s = socket.socket()\n s.bind(('0.0.0.0', self.FILE_IN))\n s.listen(5)\n print(\"Listening as %s:%d for files\" % (socket.gethostbyname_ex(socket.gethostname())[-1][-1], self.FILE_IN))\n \n try:\n client_socket = s.accept()\n\n # '0{[filepath, filepath2, filepath3]}filename.extfilesize'\n received = client_socket[0].recv(self.BUFFER_SIZE).decode().split(self.SEPARATOR)\n other_ip = client_socket[1][0]\n # -1 Exit call\n if received[0] == '-1':\n client_socket[0].close()\n s.close()\n return None\n if received[0] == 'FILE-REQ':\n # Split the request into individual files\n split_data = received[1][2:-1].replace('}', '').split(', {')\n self.send_files(client_socket, split_data, other_ip)\n # 1: Update file system (request comes from self.client_logic())\n # Request format: '1{time.time_ns()}{JSON_OBJECT_DIR}'\n if received[0] == '1':\n request = self.dir_json_compare((os.getcwd() + '\\\\dir.json', os.getcwd() + '\\\\metadata.txt'), (received[2], int(received[1])), other_ip, False)\n\n # If the request is not empty, lock threads -> delete and request files\n if request != []:\n # Lock threads\n self.GLOBAL_FLAG = True\n\n files_needed = []\n # Iterate through and decipher each requested operation \n # to attain matching file systems\n for operation in request:\n op_path = os.getcwd()\n path_list = operation[1].replace('[', '').replace(']', '').replace(\"'\", '').replace(' ', '').split(',')\n for path_fragment in path_list: \n op_path = os.path.join(op_path, path_fragment)\n\n # Delete from local directory\n if operation[0] == '-':\n if operation[2] == {}: # Delete subdirectory and all contents\n shutil.rmtree(op_path)\n else: # Delete file\n os.remove(os.path.join(op_path, operation[2]))\n # Request files to add to local directory\n if operation[0] == '+':\n # If not an empty subdirectory, add the file to the request\n if operation[2] != {}: \n files_needed.append({operation[1]: operation[2]})\n # Otherwise generate the empty subdirectory\n else: \n os.mkdir(op_path)\n\n # Close previous socket connection\n s.close()\n # Request files if needed\n if files_needed != []:\n print(f'file request to be sent: {files_needed}')\n self.request_files(files_needed, other_ip)\n \n except Exception as e:\n print(f'File listener exception: {e}')\n finally:\n client_socket[0].close()\n s.close()\n \n # Unlock the client thread to allow file system checks/ updates to continue\n self.GLOBAL_FLAG = False\n \n\n\n \"\"\"\n 1. REQUEST FILE(S)\n 2. RECIEVE FILE(S) \n :param files_needed: list of files needed along with their relative path and file name\n :param target_ip: target_ip is the ip of the device that has the files \n \"\"\"\n def request_files(self, files_needed, target_ip):\n try:\n self.GLOBAL_FLAG = True\n s = socket.socket()\n print(f'FILE-REQ{self.SEPARATOR}{files_needed}')\n s.connect((target_ip, self.FILE_IN))\n\n # Send a request for the files that are needed in order to have matching files\n s.send(f'FILE-REQ{self.SEPARATOR}{files_needed}'.encode())\n s.close()\n\n # Listen for incoming files\n try:\n s = socket.socket()\n s.bind(('0.0.0.0', self.FILE_IN))\n s.listen(5)\n print('LISTEN FOR INCOMING FILES')\n while True:\n client_socket = s.accept()\n \n chunk = client_socket[0].recv(self.BUFFER_SIZE)\n # print(f'chunk not decoded: {chunk}')\n chunk = chunk.decode().split(self.SEPARATOR)\n # If files are done sending break out of loop and match last update time\n if chunk[0] == '':\n # Make the metadata.txt time match with the device that sent the update\n with open(os.getcwd() + '\\\\metadata.txt', 'w') as f:\n f.write(chunk[1])\n client_socket[0].close()\n s.close()\n break\n \n # Store data into local variables\n relative_path = chunk[1].split('\\\\')[0:-1]\n file_size = int(chunk[2])\n file_name = chunk[1].split('\\\\')[-1]\n \n # If the directory does not exist, make it\n path_accumulator = os.getcwd()\n for path_fragment in relative_path:\n path_accumulator = os.path.join(path_accumulator, path_fragment)\n if not os.path.exists(path_accumulator):\n os.mkdir(path_accumulator)\n \n # Write file bytes to their designated path as they are sent\n with open(os.path.join(os.getcwd(), chunk[1]), 'wb') as f:\n while True:\n bytes_read = client_socket[0].recv(self.BUFFER_SIZE)\n if not bytes_read:\n break\n f.write(bytes_read)\n \n except Exception as e:\n s.close()\n print(f'File recv exception {e}')\n except Exception as e:\n s.close()\n print(f'File request exception {e}')\n finally:\n s.close()\n\n self.GLOBAL_FLAG = False\n\n\n \"\"\"\n 1. LISTEN FOR FILE(S) REQUEST\n 2. SENDS FILE(S) TO REQUESTEE\n send_files() will send any amount of files to the device that requested them\n \"\"\"\n def send_files(self, client_socket, request, other_ip):\n \n try:\n \n # Create new socket for sending files\n s = socket.socket()\n\n\n # Further format the request data to generate the local file path\n for item in request:\n path_raw, file_name_raw = item.split(':')\n # Further format request string\n path = path_raw.replace('\"', '').replace('[', '').replace(']', '').replace(\"'\", '').replace(' ', '').split(',')\n file_name = file_name_raw.replace(\"'\", '').replace(' ', '')\n\n # Generate local, absolute path\n local_path = os.getcwd()\n # Generate relative path\n rel_path = ''\n for path_fragment in path:\n local_path = os.path.join(local_path, path_fragment)\n rel_path = os.path.join(rel_path, path_fragment)\n\n # EMPTY DIRECTORY -- GENERATE DIR BEFORE SENDING FILE REQUEST\n if file_name == '{':\n os.mkdir(rel_path)\n s.close() \n continue\n \n s.close() \n\n\n # SEND FILE\n file_location = os.path.join(local_path, file_name)\n if os.path.isfile(file_location):\n s = socket.socket()\n s.connect((other_ip, self.FILE_IN))\n filesize = os.path.getsize(file_location)\n # Send file meta data\n payload = f'9{self.SEPARATOR}{os.path.join(rel_path, file_name)}{self.SEPARATOR}{filesize}{self.SEPARATOR}'\n for i in range(self.BUFFER_SIZE - len(payload)):\n payload += '0'\n s.send(payload.encode())\n \n # Send file bytes\n with open(file_location, 'rb') as f:\n while True:\n bytes_read = f.read(self.BUFFER_SIZE)\n if not bytes_read:\n break\n s.sendall(bytes_read)\n s.close()\n \n \n # Send file transfer terminate signal\n s = socket.socket()\n s.connect((other_ip, self.FILE_IN))\n\n with open('metadata.txt', 'r') as f:\n timestamp = f.read()\n s.send(f'{self.SEPARATOR}{timestamp}'.encode())\n s.close()\n\n \n\n\n except Exception as e:\n print(f'Exception: {e}')\n client_socket[0].close()\n s.close()\n finally:\n client_socket[0].close()\n s.close()\n print('Connection closed')\n\n \"\"\"\n COMM LISTENER\n Comm listener will\n Listen for devices wanting to join the network and add them to the IP_CHAIN\n \"\"\"\n def comm_listener(self):\n\n # Continuously run this listener\n while not self.exit_event.is_set():\n s = socket.socket()\n s.bind(('0.0.0.0', self.COMM_IN))\n s.listen(5)\n print(\"Listening as %s:%d for communication\" % (socket.gethostbyname_ex(socket.gethostname())[-1][-1], self.COMM_IN))\n\n try:\n client_socket = s.accept()\n\n # Format of requests will be '0['ip0', 'ip2', 'ip3'] \n # or '1{time.time_ns()}{JSON_OBJECT_DIR}'\n received = client_socket[0].recv(self.BUFFER_SIZE).decode().split(self.SEPARATOR)\n other_ip = client_socket[1][0]\n # 0: Update IP_CHAIN (devices on the file sharing network)\n if received[0] == '0':\n \n print(f'Add {other_ip} to the network')\n data = received[1].replace('{', '').replace('}', '').replace(\"'\", '').replace(' ', '').split(',')\n if len(data) == 1:\n self.IP_CHAIN.add(data[0])\n elif len(data) > 1:\n for ip in data:\n self.IP_CHAIN.add(ip)\n client_socket[0].send(str(self.IP_CHAIN).encode())\n print(self.IP_CHAIN)\n \n # -1 Exit call\n if received[0] == '-1':\n client_socket[0].close()\n s.close()\n return None\n except Exception as e:\n print(f'!! Comm listener error: {e}')\n finally:\n s.close()\n client_socket[0].close()\n\n \n\nif __name__ == '__main__':\n Node()\n","repo_name":"kevvvinreed/327-P2P","sub_path":"FileSync.py","file_name":"FileSync.py","file_ext":"py","file_size_in_byte":27545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"31682434881","text":"import unittest\nimport hypothesis\nfrom hypothesis import given, assume, settings\nimport hypothesis.strategies as st\n\ndef sort_nums(incoming):\n \"\"\"side effect: deletes incoming as it processes it. If we wanted to avoid that,\n we could copy incoming to a local variable.\"\"\"\n sought = 1\n store_early = {}\n while len(incoming) > 0:\n key, value = incoming.popitem()\n if key == sought:\n yield value\n sought += 1\n for a, b in check_store(sought, store_early):\n yield a\n sought = b\n\n else:\n store_early[key] = value\n\n\n\n\ndef check_store(seek, store):\n while seek in store:\n found = store[seek]\n del store[seek]\n seek += 1\n yield found, seek\n\n\n\nclass TestOrders(unittest.TestCase):\n @settings(deadline=None, timeout=hypothesis.unlimited, suppress_health_check = hypothesis.HealthCheck.all())\n @given(test_dict = st.dictionaries(st.integers(min_value = 1),\n st.characters()))\n def test_sorting(self, test_dict):\n for i in range(len(test_dict) + 1):\n assume(i == 0 or i in test_dict)\n\n copy_of_test_dict = test_dict.copy()\n ordered_values = [i for i in sort_nums(test_dict)]\n if len(copy_of_test_dict) > 0:\n comparison_list = [copy_of_test_dict[i+1] for i in range(len(copy_of_test_dict))]\n self.assertEqual(ordered_values, comparison_list)\n else:\n self.assertEqual(ordered_values, [])\n\n\n\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"BenDBush/pynumord","sub_path":"pynumord.py","file_name":"pynumord.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"24769256049","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\n\ndef query_class(HSCode):\n # 发起网络请求获取网页内容\n url = f'https://hsciq.com/HSCN/Code/{HSCode}'\n response = requests.get(url)\n html_code = response.content\n\n # 解析HTML代码\n soup = BeautifulSoup(html_code, 'html.parser')\n\n # 提取信息\n rows = soup.find_all('tr')\n result = []\n for row in rows:\n tds = row.find_all('td')\n if len(tds) == 2:\n code = tds[0].text.strip()\n description = tds[1].text.strip()\n result.append((code, description))\n\n # 输出递进式信息\n progressive_info = '->'.join([f'{code} {description}' for code, description in result])\n # print(progressive_info)\n filtered_info = re.findall(r'(第\\d+章[\\s\\S]*)', progressive_info)[0]\n\n return(filtered_info)\n\n","repo_name":"noobwei/HS-Code-Tool","sub_path":"hs2class.py","file_name":"hs2class.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"14606124006","text":"from setuptools import setup, find_packages\nfrom pybind11.setup_helpers import Pybind11Extension,build_ext\nfrom setuptools.command.install import install\nfrom pathlib import Path\ndef get_project_root() -> Path:\n print(Path(__file__).absolute())\n return Path(__file__).parent.absolute()\n\nprojectPath = get_project_root()\n\nprint(projectPath)\n\nmodule2 = [\n Pybind11Extension(\n \"cgcCALC\",\n [\"%s/%s\"%(projectPath,'src/getCGC.cpp'),\n \"%s/%s\"%(projectPath,'src/cgc.cpp'),\n \"%s/%s\"%(projectPath,'src/latex.cpp'),\n #\"%s/%s\"%(projectPath,'src/layout.cpp'),\n \"%s/%s\"%(projectPath,'src/utilities.cpp'),], # Sort source files for reproducibility\n include_dirs = [\"%s/%s\"%(projectPath,'include')],\n #\n ),\n]\n\nsetup(name='SymbolicCI',\n version='1.0',\n description='Python Distribution Utilities',\n author='Anurag Singh',\n author_email='anuragsingh291293@gmail.com',\n url='https://github.com/darkcoordinate',\n install_requires=[\n 'numpy',\n 'sympy',\n 'torch',\n 'pybind11',\n 'npyscreen',\n 'pathos',\n ],\n\n cmdclass={\"build_ext\": build_ext},\n #packages=[\"cgcCALC\"],\n scripts=[\"%s/%s\"%(projectPath,\"src/SymbolicCI-Coupling.py\"),\n \"%s/%s\"%(projectPath,\"SymbolicCI-TrimerCoupling.py\"),\n \"%s/%s\"%(projectPath,\"SymbolicCI-TrimerPlot.py\"),\n \"%s/%s\"%(projectPath,\"TrimerCalculate.sh\"),],\n ext_modules = module2\n )\n","repo_name":"roehr-lab/SymbolicCI","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"8690472375","text":"# Use the file name mbox-short.txt as the file name\n\n#Write a program that prompts for a file name, then opens that file and reads through the file, looking for lines of the form:\n#X-DSPAM-Confidence: 0.8475\n\nfname = input(\"Enter file name: \")\n#fname = 'mbox-short.txt'\nfh = open(fname)\n\nnums = []\n\nfor line in fh:\n if not line.startswith(\"X-DSPAM-Confidence:\") : continue\n x = line.find(\":\")\n num = float(line[x+1:len(line)])\n\n nums.append(num)\n\n#Count these lines and extract the floating point values from each of the lines and compute the average of those values and produce an output as shown below. Do not use the sum() function or a variable named sum in your solution.\n\nif len(nums) > 0 :\n total = 0\n lineNum = len(nums)\n\n for n in nums :\n total = total + n\n\n average = total / lineNum\n\n print('Average spam confidence:',average)\n\n\n\n","repo_name":"threadkind/py4e","sub_path":"ex_07_02.py","file_name":"ex_07_02.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"24966901049","text":"\"\"\"\nA direct translation of the webvtt file parsing algorithm.\n\nSee https://w3c.github.io/webvtt/#file-parsing for documentation\n\"\"\"\nimport re\nimport string\n\nSPACE_CHARACTERS = [' ', '\\t', '\\n', '\\f', '\\r']\nSPACE_SPLIT_PATTERN = r\"[{}]*\".format(''.join(SPACE_CHARACTERS))\nDIGITS = string.digits\n\nclass DictInit:\n def __init__(self, **dict):\n self.__dict__.update(dict)\n\nclass VTTCue(DictInit): pass\nclass VTTRegion(DictInit): pass\nclass Stylesheet(DictInit): pass\n\nclass W3CParser:\n input = None\n position = None\n\n def collect_characters(self, condition):\n result = \"\"\n while self.position < len(self.input) and condition(self.input[self.position]):\n result += self.input[self.position]\n self.position += 1\n return result\n\n def skip_whitespace(self):\n self.collect_characters(lambda c: c in SPACE_CHARACTERS)\n\n def parse_percentage_string(self, input):\n 'parse a percentage string'\n\n # 1.\n input = input\n\n # 2.\n if not re.match(r'^\\d+(\\.\\d+)?%$', input):\n return None\n\n # 3.\n percentage = float(input[:-1])\n\n # 4.\n if percentage < 0 or percentage > 100:\n return None\n\n # 5.\n return percentage\n\nclass VTTParser(W3CParser):\n def __init__(self, input):\n self.input = input\n self.position = 0\n self.seen_cue = False\n\n self.text_tracks = []\n self.stylesheets = []\n self.regions = []\n self.errors = []\n\n def parse(self):\n 'WebVTT parser algorithm'\n\n # 1.\n self.input = self.input.replace('\\0', '\\ufffd').replace('\\r\\n', '\\n').replace('\\r', '\\n')\n\n # 2.\n self.position = 0\n\n # 3.\n self.seen_cue = False\n\n # 4.\n if len(self.input) < 6:\n self.errors.append('input too small for webvtt')\n return\n\n # 5.\n if len(self.input) == 6 and self.input != 'WEBVTT':\n self.errors.append('invalid webvtt header')\n return\n\n # 6.\n if len(self.input) > 6:\n if not (self.input[0:6] == 'WEBVTT' and self.input[6] in ['\\u0020', '\\u0009', '\\u000A']):\n self.errors.append('invalid webvtt header')\n return\n\n # 7.\n self.collect_characters(lambda c: c != '\\n')\n\n # 8.\n if self.position >= len(self.input):\n return\n\n # 9.\n if self.input[self.position] == '\\n':\n self.position += 1\n\n # 10.\n if self.position >= len(self.input):\n return\n\n # 11.\n if self.input[self.position] != '\\n':\n self.collect_block(in_header = True)\n else:\n self.position += 1\n\n # 12.\n self.collect_characters(lambda c: c == '\\n')\n\n # 13.\n self.regions = []\n\n # 14.\n while self.position < len(self.input):\n # 1.\n block = self.collect_block()\n\n # 2.\n if isinstance(block, VTTCue):\n self.text_tracks.append(block)\n\n # 3.\n elif isinstance(block, Stylesheet):\n self.stylesheets.append(block)\n\n # 4.\n elif isinstance(block, VTTRegion):\n self.regions.append(block)\n\n # 5.\n self.collect_characters(lambda c: c == '\\n')\n\n # 15.\n return\n\n def collect_block(self, in_header = False):\n 'collect a WebVTT block'\n\n # 1. (done by class)\n\n line_count = 0 # 2.\n previous_position = self.position # 3.\n line = \"\" # 4.\n buffer = \"\" # 5.\n seen_eof = False # 6.\n seen_arrow = False # 7.\n cue = None # 8.\n stylesheet = None # 9.\n region = None # 10.\n\n # 11.\n while True:\n # 1.\n line = self.collect_characters(lambda c: c != '\\n')\n\n # 2.\n line_count += 1\n\n # 3.\n if self.position >= len(self.input):\n seen_eof = True\n else:\n self.position += 1\n\n # 4.\n if '-->' in line:\n # 1.\n if not in_header and (line_count == 1 or line_count == 2 and not seen_arrow):\n # 1.\n seen_arrow = True\n\n # 2.\n previous_position = self.position\n\n # 3.\n cue = VTTCue(\n id = buffer,\n pause_on_exit = False,\n region = None,\n writing_direction = 'horizontal',\n snap_to_lines = True,\n line = 'auto',\n line_alignment = 'start alignment',\n position = 'auto',\n position_alignment = 'auto',\n cue_size = 100,\n text_alignment = 'center',\n text = '',\n )\n\n # 4.\n if not VTTCueParser(self, line, cue).collect_cue_timings_and_settings():\n cue = None\n else:\n buffer = ''\n self.seen_cue = True # DIFFERENCE\n\n else:\n self.errors.append('invalid webvtt cue block')\n self.position = previous_position\n break\n\n # 5.\n elif line == '':\n break\n\n # 6.\n else:\n # 1.\n if not in_header and line_count == 2:\n # 1.\n if not self.seen_cue and re.match(r'^STYLE\\s*$', buffer):\n stylesheet = Stylesheet(\n location = None,\n parent = None,\n owner_node = None,\n owner_rule = None,\n media = None,\n title = None,\n alternate = False,\n origin_clean = True,\n source = None,\n )\n buffer = ''\n # 2.\n elif not self.seen_cue and re.match(r'^REGION\\s*$', buffer):\n region = VTTRegion(\n id = '',\n width = 100,\n lines = 3,\n anchor_point = (0, 100),\n viewport_anchor_point = (0, 100),\n scroll_value = None,\n )\n buffer = ''\n\n # 2.\n if buffer != '':\n buffer += '\\n'\n\n # 3.\n buffer += line\n\n # 4.\n previous_position = self.position\n\n # 7.\n if seen_eof:\n break\n\n # 12.\n if cue is not None:\n cue.text = buffer\n return cue\n\n # 13.\n elif stylesheet is not None:\n stylesheet.source = buffer\n return stylesheet\n\n # 14.\n elif region is not None:\n self.collect_region_settings(region, buffer)\n return region\n\n # 15.\n return None\n\n def collect_region_settings(self, region, input):\n 'collect WebVTT region settings'\n\n # 1.\n settings = re.split(SPACE_SPLIT_PATTERN, input)\n\n # 2.\n for setting in settings:\n # 1.\n if ':' not in setting:\n continue\n\n index = setting.index(':')\n if index in [0, len(setting) - 1]:\n continue\n\n # 2.\n name = setting[:index]\n\n # 3.\n value = setting[index + 1:]\n\n # 4.\n if name == \"id\":\n region.id = value\n\n elif name == \"width\":\n percentage = self.parse_percentage_string(value)\n if percentage is not None:\n region.width = percentage\n\n elif name == \"lines\":\n # 1.\n if not re.match(r'^\\d+$', value):\n continue\n\n # 2.\n number = int(value)\n\n # 3.\n region.lines = number\n\n elif name == \"regionanchor\":\n # 1.\n if ',' not in value:\n continue\n\n #. 2.\n index = value.index(',')\n anchorX = value[:index]\n\n # 3.\n anchorY = value[index + 1:]\n\n # 4.\n percentageX = self.parse_percentage_string(anchorX)\n percentageY = self.parse_percentage_string(anchorY)\n if None in [percentageX, percentageY]:\n continue\n\n # 5.\n region.anchor_point = (percentageX, percentageY)\n\n elif name == \"viewportanchor\":\n # 1.\n if ',' not in value:\n continue\n\n #. 2.\n index = value.index(',')\n viewportanchorX = value[:index]\n\n # 3.\n viewportanchorY = value[index + 1:]\n\n # 4.\n percentageX = self.parse_percentage_string(viewportanchorX)\n percentageY = self.parse_percentage_string(viewportanchorY)\n if None in [percentageX, percentageY]:\n continue\n\n # 5.\n region.viewport_anchor_point = (percentageX, percentageY)\n\n elif name == \"scroll\":\n # 1.\n if value == \"up\":\n region.scroll_value = \"up\"\n\n # 5.\n continue\n\n\nclass VTTCueParser(W3CParser):\n def __init__(self, parent, input, cue):\n self.parent = parent\n self.errors = self.parent.errors\n self.input = input\n self.position = 0\n self.cue = cue\n\n def collect_cue_timings_and_settings(self):\n 'collect WebVTT cue timings and settings'\n\n # 1. (handled by class)\n\n # 2.\n self.position = 0\n\n # 3.\n self.skip_whitespace()\n\n # 4.\n timestamp = self.collect_timestamp()\n if timestamp is None:\n self.errors.append('invalid start time for VTTCue')\n return False\n self.cue.start_time = timestamp\n\n # 5.\n self.skip_whitespace()\n\n # 6.\n if self.input[self.position] != '-':\n return False\n self.position += 1\n\n # 7.\n if self.input[self.position] != '-':\n return False\n self.position += 1\n\n # 8.\n if self.input[self.position] != '>':\n return False\n self.position += 1\n\n # 9.\n self.skip_whitespace()\n\n # 10.\n timestamp = self.collect_timestamp()\n if timestamp is None:\n self.errors.append('invalid end time for VTTCue')\n return False\n self.cue.end_time = timestamp\n\n # 11.\n remainder = self.input[self.position:]\n\n # 12.\n self.parse_settings(remainder)\n\n # Extra\n return True\n\n def parse_settings(self, input):\n 'parse the WebVTT cue settings'\n\n # 1.\n\n settings = re.split(SPACE_SPLIT_PATTERN, input)\n\n # 2.\n for setting in settings:\n # 1.\n if ':' not in setting:\n continue\n\n index = setting.index(':')\n if index in [0, len(setting) - 1]:\n continue\n\n # 2.\n name = setting[:index]\n\n # 3.\n value = setting[index + 1:]\n\n # 4.\n if name == 'region':\n # 1.\n last_regions = (region for region in reversed(self.parent.regions) if region.id == value)\n self.cue.region = next(last_regions, None)\n\n elif name == 'vertical':\n # 1. and 2.\n if value in ['rl', 'lr']:\n self.cue.writing_direction = value\n\n elif name == 'line':\n # 1.\n if ',' in value:\n index = value.index(',')\n linepos = value[:index]\n linealign = value[index + 1:]\n\n # 2.\n else:\n linepos = value\n linealign = None\n\n # 3.\n if not re.search(r'\\d', linepos):\n continue\n\n # 4.\n if linepos[-1] == '%':\n number = self.parse_percentage_string(linepos)\n if number is None:\n continue\n else:\n # 1.\n if not re.match(r'^[-\\.\\d]*$', linepos):\n continue\n\n # 2.\n if '-' in linepos[1:]:\n continue\n\n # 3.\n if linepos.count('.') > 1:\n continue\n\n # 4.\n if '.' in linepos:\n if not re.search(r'\\d\\.\\d', linepos):\n continue\n\n # 5.\n number = float(linepos)\n\n # 5.\n if linealign == \"start\":\n self.cue.line_alignment = 'start'\n\n # 6.\n elif linealign == \"center\":\n self.cue.line_alignment = 'center'\n\n # 7.\n elif linealign == \"end\":\n self.cue.line_alignment = 'end'\n\n # 8.\n elif linealign != None:\n continue\n\n # 9.\n self.cue.line = number\n\n # 10.\n if linepos[-1] == '%':\n self.cue.snap_to_lines = False\n else:\n self.cue.snap_to_lines = True\n\n elif name == 'position':\n # 1.\n if ',' in value:\n index = value.index(',')\n colpos = value[:index]\n colalign = value[index + 1:]\n\n # 2.\n else:\n colpos = value\n colalign = None\n\n # 3.\n number = self.parse_percentage_string(colpos)\n if number is None:\n continue\n\n # 4.\n if colalign == \"line-left\":\n self.cue.line_alignment = 'line-left'\n\n # 5.\n elif colalign == \"center\":\n self.cue.line_alignment = 'center'\n\n # 6.\n elif colalign == \"line-right\":\n self.cue.line_alignment = 'line-right'\n\n # 7.\n elif colalign != None:\n continue\n\n # 8.\n self.cue.position = number\n\n elif name == 'size':\n # 1.\n number = self.parse_percentage_string(value)\n if number is None:\n continue\n\n # 2.\n self.cue.cue_size = number\n\n elif name == 'align':\n # 1.\n if value == 'start':\n self.cue.text_alignment = 'start'\n\n # 2.\n if value == 'center':\n self.cue.text_alignment = 'center'\n\n # 3.\n if value == 'end':\n self.cue.text_alignment = 'end'\n\n # 4.\n if value == 'left':\n self.cue.text_alignment = 'left'\n\n # 5.\n if value == 'right':\n self.cue.text_alignment = 'right'\n\n # 5.\n continue\n\n def collect_timestamp(self):\n 'collect a WebVTT timestamp'\n\n # 1. (handled by class)\n\n # 2.\n most_significant_units = 'minutes'\n\n # 3.\n if self.position >= len(self.input):\n return None\n\n # 4.\n if self.input[self.position] not in DIGITS:\n return None\n\n # 5.\n string = self.collect_characters(lambda c: c in DIGITS)\n\n # 6.\n value_1 = int(string)\n\n # 7.\n if len(string) != 2 or value_1 > 59:\n most_significant_units = 'hours'\n\n # 8.\n if self.position >= len(self.input) or self.input[self.position] != ':':\n return None\n self.position += 1\n\n # 9.\n string = self.collect_characters(lambda c: c in DIGITS)\n\n # 10.\n if len(string) != 2:\n return None\n\n # 11.\n value_2 = int(string)\n\n # 12.\n if most_significant_units == 'hours' or self.position < len(self.input) and self.input[self.position] == ':':\n # 1.\n if self.position >= len(self.input) or self.input[self.position] != ':':\n return None\n self.position += 1\n\n # 2.\n string = self.collect_characters(lambda c: c in DIGITS)\n\n # 3.\n if len(string) != 2:\n return None\n\n # 4.\n value_3 = int(string)\n else:\n value_3 = value_2\n value_2 = value_1\n value_1 = 0\n\n # 13.\n if self.position >= len(self.input) or self.input[self.position] != '.':\n return None\n self.position += 1\n\n # 14.\n string = self.collect_characters(lambda c: c in DIGITS)\n\n # 15.\n if len(string) != 3:\n return None\n\n # 16.\n value_4 = int(string)\n\n # 17.\n if value_2 >= 59 or value_3 >= 59:\n return None\n\n # 18.\n result = value_1 * 60 * 60 + value_2 * 60 + value_3 + value_4 / 1000\n\n # 19.\n return result\n\n\ndef main(argv):\n files = [open(path, 'r') for path in argv[1:]]\n\n try:\n for file in files:\n parser = VTTParser(file.read())\n parser.parse()\n\n print(\"Results: {}\".format(file))\n print(\" Cues: {}\".format(parser.text_tracks))\n print(\" StyleSheets: {}\".format(parser.stylesheets))\n print(\" Regions: {}\".format(parser.regions))\n print(\" Errors: {}\".format(parser.errors))\n finally:\n for file in files:\n file.close()\n\nif __name__ == '__main__':\n import sys\n main(sys.argv);\n","repo_name":"servo/servo","sub_path":"tests/wpt/tests/webvtt/parsing/file-parsing/tools/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":18918,"program_lang":"python","lang":"en","doc_type":"code","stars":24247,"dataset":"github-code","pt":"86"} +{"seq_id":"7384767777","text":"from django.db import models\nfrom django.db.models.deletion import CASCADE\nfrom django.db.models import DateTimeField\nfrom django.core.validators import MinValueValidator\n\nfrom rest_framework.exceptions import ValidationError\nfrom colorfield.fields import ColorField\n\nfrom users.models import CustomUser\nfrom foodgram.constants import (NUMBER_SYMBOLS,\n NUMBER_SYMBOL_FOR_COLORS,\n COOKING_TIME_MIN_VALUE,\n INGREDIENT_MIN_AMOUNT)\n\n\nclass Recipe(models.Model):\n \"\"\"Модель рецепта.\"\"\"\n\n author = models.ForeignKey(\n CustomUser, on_delete=CASCADE,\n related_name='recipes',\n verbose_name='Автор'\n )\n name = models.CharField(\n max_length=NUMBER_SYMBOLS,\n unique=True,\n verbose_name='Название рецепта',\n help_text='Введите название рецепта',\n )\n image = models.ImageField(\n upload_to='recipe_images/',\n blank=True,\n verbose_name='Фото',\n help_text='Загрузите фото',\n )\n text = models.TextField(\n verbose_name='Описание',\n help_text='Введите описание рецепта',\n )\n ingredients = models.ManyToManyField(\n 'Ingredient',\n through='IngredientRecipe',\n verbose_name='Ингридиенты',\n help_text='Перечислите ингредиенты',\n )\n tags = models.ManyToManyField(\n 'Tag', related_name='recipes',\n verbose_name='Теги'\n )\n cooking_time = models.PositiveSmallIntegerField(\n validators=[MinValueValidator(\n COOKING_TIME_MIN_VALUE, 'Количество должно превышать 0',)],\n verbose_name='Время приготовления',\n help_text='Укажите время приготовления',\n )\n pub_date = DateTimeField(\n verbose_name='Дата публикации',\n auto_now_add=True,\n )\n\n class Meta:\n ordering = ['-pub_date']\n verbose_name = 'Рецепт'\n verbose_name_plural = 'Рецепты'\n\n def __str__(self):\n return self.name\n\n\nclass Tag(models.Model):\n \"\"\"Модель тега.\"\"\"\n\n name = models.CharField(\n max_length=NUMBER_SYMBOLS,\n unique=True,\n verbose_name='Тег'\n )\n color = ColorField(\n max_length=NUMBER_SYMBOL_FOR_COLORS,\n unique=True,\n verbose_name='Цвет тега',\n )\n slug = models.SlugField(\n max_length=NUMBER_SYMBOLS,\n unique=True,\n verbose_name='Слаг тега'\n )\n\n def __str__(self):\n return f'{self.name}'\n\n\nclass Ingredient(models.Model):\n \"\"\"Модель ингридиентов.\"\"\"\n\n name = models.CharField(\n max_length=NUMBER_SYMBOLS,\n verbose_name='Ингридиент',\n help_text='Введите ингредиент',\n )\n measurement_unit = models.CharField(\n max_length=NUMBER_SYMBOLS,\n verbose_name='Единица измерения',\n help_text='Введите единицу измерения',\n )\n\n class Meta:\n verbose_name = 'Ингредиент'\n verbose_name_plural = 'Ингредиенты'\n ordering = ('name',)\n constraints = (\n models.UniqueConstraint(\n fields=('name', 'measurement_unit',),\n name='unique_name_measurement_unit',\n ),\n )\n\n def __str__(self):\n return self.name\n\n\nclass IngredientRecipe(models.Model):\n ingredient = models.ForeignKey(\n Ingredient,\n on_delete=models.CASCADE,\n related_name='ingredient_recipe',\n verbose_name='Ингредиент'\n )\n amount = models.PositiveSmallIntegerField(\n 'Количество', validators=[MinValueValidator(\n INGREDIENT_MIN_AMOUNT, message='Минимальное количество 1!')])\n recipe = models.ForeignKey(\n Recipe,\n on_delete=models.CASCADE,\n related_name='ingredient_recipe',\n verbose_name='Рецепт'\n )\n\n class Meta:\n verbose_name = 'Содержание ингредиента'\n verbose_name_plural = 'Содержание ингредиентов'\n constraints = (\n models.UniqueConstraint(\n fields=('ingredient', 'amount',),\n name='unique_ingredient_amount',\n ),\n )\n\n def __str__(self):\n return (\n f'{self.ingredient.name} ({self.ingredient.measurement_unit})'\n f' - {self.amount} '\n )\n\n\nclass Follow(models.Model):\n \"\"\"\"Модель подписки.\"\"\"\n user = models.ForeignKey(CustomUser,\n on_delete=CASCADE,\n related_name='follower',\n verbose_name='Подписчик',\n help_text='Подписчик на автора рецепта')\n author = models.ForeignKey(CustomUser, on_delete=CASCADE,\n related_name='followed',\n verbose_name='Автор',\n help_text='Автор рецепта')\n\n class Meta:\n constraints = [models.UniqueConstraint(\n fields=['author', 'user'],\n name='unique_object'\n )]\n\n def __str__(self):\n return f'{self.user} подписался на {self.author}'\n\n def save(self, **kwargs):\n if self.user == self.author:\n raise ValidationError(\"Невозможно подписаться на себя\")\n super().save()\n\n\nclass Favorite(models.Model):\n \"\"\"Модель избранное.\"\"\"\n\n user = models.ForeignKey(\n CustomUser,\n on_delete=CASCADE,\n related_name='favorites',\n verbose_name='Пользователь'\n )\n recipe = models.ForeignKey(\n Recipe,\n on_delete=CASCADE,\n related_name='favorites',\n verbose_name='Рецепт'\n )\n\n class Meta:\n verbose_name = 'Избранное'\n verbose_name_plural = 'Избранное'\n constraints = [models.UniqueConstraint(\n fields=['user', 'recipe'], name='unique_favourite')\n ]\n\n def __str__(self):\n return f'{self.user} добавил \"{self.recipe}\" в Избранное'\n\n\nclass ShoppingCart(models.Model):\n \"\"\"Модель корзины.\"\"\"\n\n user = models.ForeignKey(\n CustomUser,\n on_delete=models.CASCADE,\n verbose_name='Пользователь',\n related_name='shopping_cart',\n )\n recipe = models.ForeignKey(\n Recipe,\n on_delete=models.CASCADE,\n verbose_name='Рецепт',\n related_name='shopping_cart',\n )\n\n class Meta:\n verbose_name = 'Покупка'\n verbose_name_plural = 'Покупки'\n ordering = ('-id',)\n\n def __str__(self):\n return f'{self.user} добавил \"{self.recipe}\" в Корзину'\n","repo_name":"Alex09k/foodgram-project-react","sub_path":"backend/recipes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"10351162705","text":"import csv\n\nfrom logchimera.heterogeneity import estimate_heterogeneity_csv_file, estimate_heterogeneity_generic_file_using_log_parsing\nfrom logchimera.mixing import mixing_labeled_data, mixing_unlabeled_data\nfrom logchimera.fuzzing import fuzz_data\n\ndef estimate_heterogeneity(file_path, csv_file=False):\n \"\"\"\n Estimate heterogeneity for a log file.\n\n Parameters:\n -----------\n file_path : str\n The path to the log file to be analyzed.\n\n csv_file : bool, optional (default=False)\n Specifies whether the input file is in CSV format or not. If set to True, the function expects\n the log file to contain the following three columns: Content, EventTemplate, Variables. If set\n to False, a generic log file format will be assumed, and the function will attempt to estimate\n heterogeneity based on the file's content (each log on a new line).\n\n Returns:\n --------\n h_level : float\n The estimated level of heterogeneity in the log file, with higher values indicating greater\n heterogeneity (from 0 to 1).\n\n Notes:\n ------\n - Heterogeneity is estimated based on the log file's content and structure.\n - When `csv_file` is set to True, the function assumes a specific CSV format with predefined columns (three columns named: Content, EventTemplate, Variables).\n - When `csv_file` is set to False, the function attempts to estimate heterogeneity from the generic log\n file format, with each log entry separated by a new line character.\n\n Example Usage:\n -------------\n To estimate heterogeneity for a generic log file:\n >>> h_level = estimate_heterogeneity(\"generic_log.txt\")\n\n To estimate heterogeneity for a CSV-formatted log file:\n >>> h_level = estimate_heterogeneity(\"csv_log.csv\", csv_file=True)\n \"\"\"\n h_level = 0\n\n if csv_file:\n h_level = estimate_heterogeneity_csv_file(file_path)\n else:\n h_level = estimate_heterogeneity_generic_file_using_log_parsing(file_path)\n \n return h_level\n\ndef mixing(percentage, file_path, labels=False, dataset_name=\"Apache\"):\n \"\"\"\n Increase log heterogeneity through mixing.\n\n This function takes a file path and a percentage value as input.\n \n Parameters:\n file_path (str): The path to the file to be changed.\n percentage (float): The amount of logs to be replaced, ranging from 1 to 25.\n\n Returns:\n float: The new heterogeneity level after mixing the logs.\n \"\"\"\n print(\"Computing initial heterogeneity...\")\n estimate_heterogeneity(file_path)\n\n perc = 0\n if not labels:\n print(\"No labels functionality not available\")\n return \"No labels functionality not available\"\n else:\n print(\"\\nMixing...\")\n mixed_file_save_path = mixing_labeled_data(percentage, file_path)\n\n estimate_heterogeneity(mixed_file_save_path)\n\ndef fuzzing(file_path):\n \"\"\"\n Increase log heterogeneity through fuzzing.\n\n This function takes a file path as input.\n \n Parameters:\n file_path (str): The path to the file to be fuzzed.\n\n Returns:\n float: The new heterogeneity level after fuzzing the file.\n \"\"\"\n fuzzed_file_path = fuzz_data(file_path)\n return fuzzed_file_path\n\ndef function_test(test_string):\n '''\n '''\n return \"\"","repo_name":"spetrescu/logchimera","sub_path":"src/logchimera/logchimera.py","file_name":"logchimera.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"34511803839","text":"import os\n# import pyodbc \nimport pandas as pd\nimport numpy as np\n\n__path__ = os.path.dirname(os.path.realpath(__file__))\nos.chdir(__path__)\n\ndata = pd.read_csv(\n 'input/WED_setup_data.csv', sep=';', header=0)\n\nschema = pd.read_csv('input/WED_setup_data_headers.csv', \n sep=';', \n header=0,\n encoding = \"ISO-8859-1\", \n engine='python')\n\ndef set_dummies(data, dummies):\n for dummy in dummies:\n df_dummies = pd.get_dummies(data[dummy], prefix=dummy)\n \n return pd.concat([data, df_dummies], axis=1)\n\ndrop = schema.loc[schema['KEEP/DROP']=='drop']['ID'].to_list() \ncontinuous = schema.loc[(schema['DATA_TYPE']=='continuous') & (schema['KEEP/DROP']!='drop')]['ID'].to_list() \nid = ['id']\ncategorical = schema.loc[(schema['DATA_TYPE']=='categorical') & (schema['KEEP/DROP']!='drop')]['ID'].to_list() \nbinary = schema.loc[(schema['DATA_TYPE']=='binary') & (schema['KEEP/DROP']!='drop')]['ID'].to_list() \n\n\ndata[continuous] = data[continuous].astype(np.float64)\ndata[categorical] = data[categorical].astype(np.object)\ndata[binary] = data[binary].astype(np.uint8)\ndata[id] = data[id].astype(np.object)\n\n\n\n\n\n# data = set_dummies(data, categorical)\n# data = data.drop(drop+categorical, axis=1)\n\n# data.to_csv('../output/mungy/data1.csv')\n\n\n# data[continuous] = data[continuous].apply(lambda x: (x - np.mean(x)) / (np.max(x) - np.min(x)))\n\n# data.to_csv('../output/mungy/data2.csv', index = False)","repo_name":"bankauskas/optimisation_shortest_path","sub_path":"scripts/mungy.py","file_name":"mungy.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"45807269864","text":"from pwn import *\n\nr=process('./split_armv5')\n\nr.recvuntil('> ')\nflag= 0x0002103c\npop_r3_pc=0x000103a4\nmov_r0_r3_pop_fp_pc=0x00010558\nsystem=0x000105e0\npayload='a'*36\npayload+=p32(pop_r3_pc)\npayload+=p32(flag)\npayload+=p32(mov_r0_r3_pop_fp_pc)\npayload+=p32(0)\npayload+=p32(system)\n\n#x=open('payload','w')\n#x.write(payload)\n#x.close()\nr.sendline(payload)\n\nr.interactive()\n","repo_name":"lunashci/ropemporium","sub_path":"split/armv5/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"72973108445","text":"import cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(1)\n\nwhile True:\n durum,cerceve = cap.read()\n gri=cv2.cvtColor(cerceve,cv2.COLOR_BGR2GRAY)\n t,sb=cv2.threshold(gri,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n cv2.imshow(\"kamera\",sb)\n if cv2.waitKey(50)==27:\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"serkancam/bilsem23_24","sub_path":"O2A2_2324/goruntu_isleme/kamera_goruntu_alma.py","file_name":"kamera_goruntu_alma.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"19331958989","text":"import torch\r\nimport numpy as np\r\nfrom torch_geometric.data import Data\r\nfrom pymatgen.core import Structure\r\nfrom pymatgen.core.periodic_table import DummySpecies\r\nfrom pymatgen.optimization.neighbors import find_points_in_spheres\r\n\r\n\r\nclass MyTensor(torch.Tensor):\r\n \"\"\"\r\n this class is needed to work with graphs without edges\r\n \"\"\"\r\n def max(self, *args, **kwargs):\r\n if torch.numel(self) == 0:\r\n return 0\r\n else:\r\n return torch.max(self, *args, **kwargs)\r\n\r\n\r\nclass SimpleCrystalConverter:\r\n def __init__(\r\n self,\r\n target_name,\r\n atom_converter=None,\r\n bond_converter=None,\r\n add_z_bond_coord=False,\r\n cutoff=5.0\r\n ):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n atom_converter: converter that converts pymatgen structure to node features\r\n bond_converter: converter that converts distances to edge features\r\n add_z_bond_coord: use z-coordinate feature or no\r\n cutoff: cutoff radius\r\n \"\"\"\r\n self.target_name = target_name\r\n self.cutoff = cutoff\r\n self.atom_converter = atom_converter if atom_converter else DummyConverter()\r\n self.bond_converter = bond_converter if bond_converter else DummyConverter()\r\n self.add_z_bond_coord = add_z_bond_coord\r\n\r\n def convert(self, d):\r\n lattice_matrix = np.ascontiguousarray(np.array(d.lattice.matrix), dtype=float)\r\n pbc = np.array([1, 1, 1], dtype=int)\r\n cart_coords = np.ascontiguousarray(np.array(d.cart_coords), dtype=float)\r\n\r\n center_indices, neighbor_indices, _, distances = find_points_in_spheres(\r\n cart_coords, cart_coords, r=self.cutoff, pbc=pbc, lattice=lattice_matrix, tol=1e-8\r\n )\r\n\r\n exclude_self = (center_indices != neighbor_indices)\r\n\r\n edge_index = torch.Tensor(np.stack((center_indices[exclude_self], neighbor_indices[exclude_self]))).long()\r\n\r\n x = torch.Tensor(self.atom_converter.convert(d)).long()\r\n\r\n distances_preprocessed = distances[exclude_self]\r\n if self.add_z_bond_coord:\r\n z_coord_diff = np.abs(cart_coords[edge_index[0], 2] - cart_coords[edge_index[1], 2])\r\n distances_preprocessed = np.stack(\r\n (distances_preprocessed, z_coord_diff), axis=0\r\n )\r\n\r\n edge_attr = torch.Tensor(self.bond_converter.convert(distances_preprocessed))\r\n state = getattr(d, \"state\", None) or [[0.0, 0.0]]\r\n y = getattr(d, self.target_name) if hasattr(d, self.target_name) else 0\r\n bond_batch = MyTensor(np.zeros(edge_index.shape[1])).long()\r\n\r\n return Data(\r\n x=x, edge_index=edge_index, edge_attr=edge_attr, state=torch.Tensor(state), y=y, bond_batch=bond_batch\r\n )\r\n\r\n def __call__(self, d):\r\n return self.convert(d)\r\n\r\n\r\nclass DummyConverter:\r\n def convert(self, d):\r\n return d.reshape((-1, 1))\r\n\r\n\r\nclass GaussianDistanceConverter:\r\n def __init__(self, centers=np.linspace(0, 5, 100), sigma=0.5):\r\n self.centers = centers\r\n self.sigma = sigma\r\n\r\n def convert(self, d):\r\n return np.exp(\r\n -((d.reshape((-1, 1)) - self.centers.reshape((1, -1))) / self.sigma) ** 2\r\n )\r\n\r\n def get_shape(self):\r\n return len(self.centers)\r\n\r\n\r\nclass FlattenGaussianDistanceConverter(GaussianDistanceConverter):\r\n def __init__(self, centers=np.linspace(0, 5, 100), sigma=0.5):\r\n super().__init__(centers, sigma)\r\n\r\n def convert(self, d):\r\n res = []\r\n for arr in d:\r\n res.append(super().convert(arr))\r\n return np.hstack(res)\r\n\r\n def get_shape(self):\r\n return 2 * len(self.centers)\r\n\r\n\r\nclass AtomFeaturesExtractor:\r\n def __init__(self, atom_features):\r\n self.atom_features = atom_features\r\n\r\n def convert(self, structure: Structure):\r\n if self.atom_features == \"Z\":\r\n return np.array(\r\n [0 if isinstance(i, DummySpecies) else i.Z for i in structure.species]\r\n ).reshape(-1, 1)\r\n elif self.atom_features == 'werespecies':\r\n return np.array([\r\n [\r\n 0 if isinstance(i, DummySpecies) else i.Z,\r\n i.properties[\"was\"],\r\n ] for i in structure.species\r\n ])\r\n else:\r\n return np.array(\r\n [0 if isinstance(i, DummySpecies) else i.Z for i in structure.species]\r\n ).reshape(-1, 1)\r\n\r\n def get_shape(self):\r\n if self.atom_features == \"Z\":\r\n return 1\r\n elif self.atom_features == 'werespecies':\r\n return 2\r\n else:\r\n return None\r\n","repo_name":"RomanovIgnat/MEGNet_PyTorch","sub_path":"MEGNet/utils/Struct2Graph.py","file_name":"Struct2Graph.py","file_ext":"py","file_size_in_byte":4718,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"86"} +{"seq_id":"3386924809","text":"# streamlit\nimport streamlit as st\nfrom streamlit_multipage import MultiPage\n\nimport random\nimport pandas as pd\n\nimport loadWebsite, deriveResult\n\nquestionNumber = 5\n\nquestions = pd.read_csv('면접 답안.csv')\n\ndef home(st, **state):\n loadWebsite.resetQuestion()\n\n randNumList = []\n ran_num = random.randint(0, len(questions)-1)\n\n for i in range(questionNumber):\n while ran_num in randNumList:\n ran_num = random.randint(0, len(questions)-1)\n randNumList.append(ran_num)\n\n for i in randNumList:\n loadWebsite.addQuestion(i, questions['질문 문항'][i])\n\n loadWebsite.loadHomePage()\n\ndef test(st, **state):\n loadWebsite.loadTestPage()\n\ndef about(st, **state):\n loadWebsite.loadAboutPage()\n\ndef result(st, **state):\n loadWebsite.resetScoreList()\n\n for i in range(questionNumber):\n replyScore = deriveResult.score(loadWebsite.infoReply(i+1))\n replyIdx = loadWebsite.infoQuestion(i, 'idx')\n\n loadWebsite.addScore({\n 'originQuestion': [replyIdx, loadWebsite.infoQuestion(i, 'str')],\n 'replyAnswer': deriveResult.drawBlueColor(loadWebsite.infoReply(i+1)),\n \n 'gradingResult': {\n 'replyScore': replyScore,\n '자기 표현력': questions['자기 표현력'][replyIdx],\n '리더십 역량': questions['리더십 역량'][replyIdx],\n '직무 역량': questions['직무 역량'][replyIdx],\n '태도 역량': questions['태도 역량'][replyIdx],\n '인간 관계': questions['인간 관계'][replyIdx]\n }\n })\n\n loadWebsite.loadResultPage()\n\napp = MultiPage()\napp.st = st\n\napp.add_app(\"Home\", home)\napp.add_app(\"Test\", test)\napp.add_app(\"About\", about)\napp.add_app(\"Result\", result)\n\napp.run()\n","repo_name":"Checking-pks/Korean-Interview-Practice-Service","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"42800387864","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom models import db\n\nGREY_BACKGROUND = \"#DCDCDC\"\n\n\nclass ElevatorFloors(ttk.Frame):\n def __init__(self, parent, controller, show_frame):\n super().__init__(parent)\n\n self.controller = controller\n self.curr_floor = tk.StringVar(value=\"WAITING\")\n self.dest_floor = tk.StringVar()\n self.curr_action = tk.StringVar(value=f\"STOPPED\")\n self.user = tk.StringVar()\n self._elevator_job = None\n\n # input to ride as a user\n prompt_container = tk.Frame(self)\n prompt_container.grid(row=0, column=0, columnspan=2, sticky=\"\")\n prompt_container[\"background\"] = GREY_BACKGROUND\n prompt_label = ttk.Label(prompt_container, text=\"RIDE AS: \", font=(\"Helvetica\", 10))\n prompt_entry = ttk.Entry(prompt_container, width=5, textvariable=self.user, font=(\"Segoe UI\", 15))\n prompt_label.grid(row=0, column=0, sticky=\"nsew\")\n prompt_entry.grid(row=0, column=1, padx=(20,20), sticky=\"nsew\")\n prompt_submit = tk.Button(\n prompt_container,\n text=\"GO\",\n command=self.ride,\n cursor=\"hand2\",\n borderwidth=4\n )\n prompt_submit.grid(row=0, column=2, sticky=\"nsew\")\n # input for destination floor\n dest_floor_label = ttk.Label(self, text=\"Destination Floor: \")\n dest_floor_input = ttk.Entry(self, width=3, textvariable=self.dest_floor, font=(\"Segoe UI\", 15))\n dest_floor_label.grid(row=4, column=0, sticky=\"e\")\n dest_floor_input.grid(row=4, column=1, sticky=\"ew\")\n # buttons\n button_container = ttk.Frame(self, padding=15)\n button_container.grid(row=5, column=0, columnspan=2, sticky=\"ew\")\n button_container.columnconfigure((0, 1, 2), weight=1)\n self.start_button = tk.Button(\n button_container,\n text=\"START\",\n command=self.move,\n cursor=\"hand2\",\n borderwidth=4\n\n )\n self.start_button.grid(row=0, column=0, padx=5, pady=5, sticky=\"ew\")\n self.stop_button = tk.Button(\n button_container,\n text=\"STOP\",\n command=self.stop,\n cursor=\"hand2\",\n borderwidth=4\n )\n self.stop_button.grid(row=0, column=1, padx=5, pady=5, sticky=\"ew\")\n self.back_button = tk.Button(\n button_container,\n text=\"-> Back\",\n cursor=\"hand2\",\n borderwidth=4,\n command=show_frame\n )\n self.back_button.grid(row=0, column=2, padx=5, pady=5, sticky=\"ew\")\n # showing the state of the elevator (going up, down, etc)\n elevator_description = ttk.Label(\n self,\n textvariable=self.curr_action,\n style=\"ElevatorAction.TLabel\",\n )\n elevator_description.grid(row=1, column=0, columnspan=2)\n\n # showing the curr floor in real time as it goes up/down\n floor_frame = ttk.Frame(self, height=150)\n floor_frame.grid(row=2, column=0, pady=(10, 0), padx=(30, 0), columnspan=2, sticky=\"nsew\")\n floor_display = ttk.Label(\n floor_frame,\n textvariable=self.curr_floor,\n style=\"ElevatorFloors.TLabel\"\n )\n floor_display.place(relx=0.5, rely=0.5, anchor=\"center\")\n\n for child in self.winfo_children():\n child.grid_configure(padx=70, pady=50)\n\n def move(self):\n try:\n user = db.get_user(self.user.get())\n except IndexError:\n self.curr_action.set(\"User not found\")\n return\n\n new_floor = 0\n current_floor = int(self.curr_floor.get().split(\" \")[1])\n if current_floor == int(self.dest_floor.get()):\n self.curr_action.set(f\"{user.name.upper()} IS STOPPED\")\n else:\n if current_floor > int(self.dest_floor.get()):\n self.curr_action.set(f\"{user.name.upper()} IS GOING DOWN\")\n new_floor = current_floor - 1\n elif current_floor < int(self.dest_floor.get()):\n self.curr_action.set(f\"{user.name.upper()} IS GOING UP\")\n new_floor = current_floor + 1\n\n self.curr_floor.set(f\"FLOOR {new_floor}\")\n user.wait_floor = new_floor\n self._elevator_job = self.after(1000, self.move)\n\n def stop(self):\n if self._elevator_job:\n self.after_cancel(self._elevator_job)\n self.curr_action.set(\"STOPPED\")\n self._elevator_job = None\n\n def ride(self):\n self.curr_action.set(\"STOPPED\")\n try:\n user = db.get_user(self.user.get())\n except IndexError:\n self.curr_action.set(\"User not found\")\n return\n self.curr_floor.set(f\"FLOOR {user.wait_floor}\")\n self.dest_floor.set(user.dest_floor)","repo_name":"willsmckenna/elevator-gui","sub_path":"frames/ElevatorFloors.py","file_name":"ElevatorFloors.py","file_ext":"py","file_size_in_byte":4843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"9900848638","text":"from ansiblemetrics.ansible_modules import FACT_MODULES_LIST\nfrom ansiblemetrics.ansible_metric import AnsibleMetric\n\n\nclass NumFactModules(AnsibleMetric):\n \"\"\" This class measures the number of fact modules in a playbook.\n\n Fact modules are modules that do not alter state but rather return data.\n Knowing the number of fact modules in a playbook could represent a measure of the responsibility of the playbook.\n The assumption is that the lower the fact modules wrt the total number of modules in the script,\n the more unstable is the class behaviour, as the other modules alter its state.\n \"\"\"\n\n def count(self):\n \"\"\"Return the number of external modules.\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n from ansiblemetrics.general.num_fact_modules import NumFactModules\n\n playbook = '''\n - name: Find all instances in the specified region\n ali_instance_facts: # Fact module\n alicloud_access_key: \"{{ alicloud_access_key }}\"\n alicloud_secret_key: \"{{ alicloud_secret_key }}\"\n alicloud_region: '{{ alicloud_region }}'\n register: all_instances\n\n - name: Print data to terminal window\n debug: # not fact module\n msg: 'End of tasks'\n '''\n\n NumFactModules(playbook).count()\n\n >> 1\n\n Returns\n -------\n int\n number of external modules\n\n \"\"\"\n facts_modules = 0\n\n for task in self.tasks:\n if not task:\n continue\n\n if any(k in FACT_MODULES_LIST for k in task):\n facts_modules += 1\n\n return facts_modules\n","repo_name":"radon-h2020/radon-ansible-metrics","sub_path":"ansiblemetrics/playbook/num_fact_modules.py","file_name":"num_fact_modules.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"86"} +{"seq_id":"25750543781","text":"import pygame\n\nfrom src.events.event_handler import EventHandler\n\n\nclass Game:\n\n def __init__(self):\n pygame.init()\n pygame.display.set_caption('RPG')\n self.event_handler = EventHandler()\n self.event_loop()\n\n def event_loop(self):\n clock = pygame.time.Clock()\n while not self.event_handler.crashed:\n events = pygame.event.get()\n for event in events:\n self.event_handler.handle_event(event)\n clock.tick(60)\n","repo_name":"Naatoo/pygame-RPG2d","sub_path":"src/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"71528195163","text":"challenge_input = [\n \"The daily diary of the American dream\",\n \"For the sky and the sea, and the sea and the sky\",\n \"Three grey geese in a green field grazing, Grey were the geese and green was the grazing.\",\n \"But a better butter makes a batter better.\",\n \"His soul swooned slowly as he heard the snow falling faintly through the universe and faintly falling, like the descent of their last end, upon all the living and the dead.\",\n \"Whisper words of wisdom, let it be.\",\n \"They paved paradise and put up a parking lot.\",\n \"So what we gonna have, dessert or disaster?\",\n]\n\n\nstop_words = [\n \"i\",\n \"a\",\n \"about\",\n \"an\",\n \"and\",\n \"are\",\n \"as\",\n \"at\",\n \"be\",\n \"by\",\n \"com\",\n \"for\",\n \"from\",\n \"how\",\n \"in\",\n \"is\",\n \"it\",\n \"of\",\n \"on\",\n \"or\",\n \"that\",\n \"the\",\n \"this\",\n \"to\",\n \"was\",\n \"what\",\n \"when\",\n \"where\",\n \"who\",\n \"will\",\n \"with\",\n]\n\n\ndef alit(sentence):\n alit = []\n words = sentence.split()\n words = [w.lower() for w in words if w.lower() not in stop_words]\n for i, word in enumerate(words[:-1], start=1):\n if word[0] == words[i][0]:\n if word not in alit:\n alit.append(word)\n if words[i] not in alit:\n alit.append(words[i])\n return alit\n\n\nfor sentence in challenge_input:\n print(alit(sentence))\n","repo_name":"avidit/r_dailyprogramer","sub_path":"288/easy/288_easy_detecting_alliteration.py","file_name":"288_easy_detecting_alliteration.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"16003621933","text":"def binary_search(target, start, end, array):\n while start <= end:\n mid = (start + end) // 2\n if array[mid] == target:\n return mid\n if array[mid] < target:\n start = mid + 1\n else:\n end = mid - 1\n return None\n\ndef solution(customer, store):\n answer = []\n for custom in customer:\n if binary_search(custom, 0, len(store) - 1, store) != None:\n answer.append('yes')\n else:\n answer.append('no')\n\n return ' '.join(answer)\n\nif __name__ == '__main__':\n TESTCASES = [\n [[5, 7, 9], [8, 3, 7, 9, 2]]\n ]\n for idx, (customer, store) in enumerate(TESTCASES):\n if solution(customer, store) == 'no yes yes':\n print(f'TESTCASE {idx+1} PASSED!')","repo_name":"JoSihun/ThisIsCodingTest","sub_path":"Part2_Chapter07/p197_7-2.py","file_name":"p197_7-2.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"70681685083","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\n### 这段代码很奇怪的是为什么执行结束会改变 l1\nimport copy\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode):\n p1,p2 = l1,l2\n # if not p1 and not p2:\n # return\n # if not p1:\n # return p2\n # if not p2:\n # return p1\n dHead = ListNode(-5)\n pd = dHead\n while(p1 and p2):\n if p1.val + + c \\\\rightarrow \\min,\n .. math:: lb \\leqslant Ax \\leqslant ub,\n .. math:: Bx = b\n\n \"\"\"\n \n H = __transform_matrix(H)\n A = __transform_matrix(A)\n B = __transform_matrix(B)\n \n fun = lambda x: 0.5 * np.dot(x, np.dot(H,x)) + np.dot(f, x) + c\n \n jac = lambda x: 0.5 * np.dot(H + H.T, x) + f\n \n hess = lambda x: 0.5 * (H + H.T)\n \n constr_ineq = LinearConstraint(A, lb, ub)\n \n constr_eq = LinearConstraint(B, b-1e-12, b+1e-12)\n\n# iter = 0\n# def callback_F(x, res):\n# global iter\n# iter += 1\n# print('x = ', x)\n# print('jac = ', jac(x))\n# print('hess = ', hess(x))\n \n return minimize(fun, x0, method='trust-constr', jac=jac, hess=hess, constraints=[constr_ineq, constr_eq], **kwargs)\n ","repo_name":"andreevnick/robust-financial-portfolio-management-framework","sub_path":"robustfpm/cxhull/quadprog.py","file_name":"quadprog.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"5065401357","text":"import numpy as np\nimport pytest\n\nfrom src.app.core.api import Features\nfrom src.app.core.calculator import Calculator\n\n\nclass TestCalculator(object):\n \"\"\"Тест калькулятора.\"\"\"\n\n @pytest.mark.parametrize(\n 'proba, inc_per_adult, home_feature, annuity_to_income_ratio, years_old, expected_value',\n [\n (0, 20000, 1, 0.1, 25, 20000 * 8 * 1.2 * 1.2 * 1.2),\n (0.1, 20000, 1, 0.1, 25, 20000 * 5 * 1.2 * 1.2 * 1.2),\n (0.2, 20000, 1, 0.1, 25, 20000 * 3 * 1.2 * 1.2 * 1.2),\n (1, 20000, 1, 0.1, 25, 20000 * 3 * 1.2 * 1.2 * 1.2),\n (1, np.nan, 1, 0.1, 25, 10000 * 1.2 * 1.2 * 1.2),\n (1, np.nan, 0, 0.1, 25, 10000 * 0.8 * 1.2 * 1.2),\n (1, np.nan, 0, 0.1, 24, 10000 * 0.8 * 1.2 * 0.8),\n (1, np.nan, 0, 0.1, 50, 10000 * 0.8 * 1.2 * 0.8),\n (1, np.nan, 0, 0.2, 50, 10000 * 0.8 * 0.8 * 0.8),\n ],\n )\n def test_calc_amount(\n self,\n proba,\n inc_per_adult,\n home_feature,\n annuity_to_income_ratio,\n years_old,\n expected_value,\n ):\n \"\"\"\n Для тестирования калькулятора.\n\n :param proba: граничная проба\n :param inc_per_adult: доход на взрослого\n :param home_feature: наличие дома\n :param annuity_to_income_ratio: соотношение платежей к зарплате\n :param years_old: количество\n :param expected_value: сколько хочет денег\n \"\"\"\n features = Features(\n INC_PER_ADULT=inc_per_adult,\n HOME_FEATURE=home_feature,\n ANNUITY_TO_INCOME_RATIO=annuity_to_income_ratio,\n YEARS_OLD=years_old,\n )\n calculator = Calculator() # чтобы МР можно было делать\n assert calculator.calc_amount(\n proba,\n features,\n ) == expected_value\n","repo_name":"TarelkinEvgenii/Curriculum-vitae","sub_path":"credit_scoring/tests/src/app/core/test_calculator.py","file_name":"test_calculator.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"75048232923","text":"class Solution:\n def sortColors(self, arr: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n \n low,mid,high = 0,0,len(arr)-1\n while(mid <= high):\n if arr[mid] == 2:\n arr[mid],arr[high] = arr[high],arr[mid]\n high -= 1\n elif arr[mid] == 0:\n arr[mid],arr[low] = arr[low], arr[mid]\n low += 1\n mid += 1\n else:\n mid += 1\n","repo_name":"amitrajitbose/Competitive_Programming","sub_path":"Searching_Sorting/SortColors.py","file_name":"SortColors.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"86"} +{"seq_id":"40145833932","text":"'''\nAuthor: Thyssen Wen\nDate: 2022-04-27 15:27:42\nLastEditors : Thyssen Wen\nLastEditTime : 2022-11-09 13:22:09\nDescription: dataset builder\nFilePath : /SVTAS/svtas/loader/builder.py\n'''\nfrom ..utils.build import Registry\nfrom ..utils.build import build\n\nDATASET = Registry('dataset')\nPIPLINE = Registry('pipline')\nDECODE = Registry('decode')\nCONTAINER = Registry('container')\nSAMPLER = Registry('sampler')\nTRANSFORM = Registry('transform')\n\ndef build_dataset(cfg):\n \"\"\"Build dataset.\"\"\"\n args = cfg.copy()\n obj_type = args.get('name')\n if obj_type in DATASET:\n return build(cfg, DATASET)\n raise ValueError(f'{obj_type} is not registered in '\n 'DATASET')\n\ndef build_pipline(cfg):\n \"\"\"Build dataset.\"\"\"\n args = cfg.copy()\n obj_type = args.get('name')\n if obj_type in PIPLINE:\n return build(cfg, PIPLINE)\n raise ValueError(f'{obj_type} is not registered in '\n 'PIPLINE')\n\ndef build_decode(cfg):\n \"\"\"Build decode.\"\"\"\n args = cfg.copy()\n obj_type = args.get('name')\n if obj_type in DECODE:\n return build(cfg, DECODE)\n raise ValueError(f'{obj_type} is not registered in '\n 'DECODE')\n\ndef build_container(cfg):\n \"\"\"Build container.\"\"\"\n args = cfg.copy()\n obj_type = args.get('name')\n if obj_type in CONTAINER:\n return build(cfg, CONTAINER)\n raise ValueError(f'{obj_type} is not registered in '\n 'CONTAINER')\n\ndef build_sampler(cfg):\n \"\"\"Build sampler.\"\"\"\n args = cfg.copy()\n obj_type = args.get('name')\n if obj_type in SAMPLER:\n return build(cfg, SAMPLER)\n raise ValueError(f'{obj_type} is not registered in '\n 'SAMPLER')\n\ndef build_transform(cfg):\n \"\"\"Build transform.\"\"\"\n args = cfg.copy()\n obj_type = args.get('name')\n if obj_type in TRANSFORM:\n return build(cfg, TRANSFORM)\n raise ValueError(f'{obj_type} is not registered in '\n 'TRANSFORM')","repo_name":"Thinksky5124/SVTAS","sub_path":"svtas/loader/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"86"} +{"seq_id":"37068619302","text":"#!/usr/bin/env python\n\nimport re\nfrom math import sqrt\nfrom itertools import count, islice\n\n##\n #function isPrime\n #takes number input\n #returns true if number is prime\n##\n\ndef isPrime(num):\n if num < 2:\n return False\n elif num == 2:\n return True\n for number in range(2,num):\n if num%number == 0:\n return False\n return True\n\n#class PrimeDates\n#\nclass PrimeDates:\n\n #dictionary that contains the month and total days from Jan 1st to the first day in the month\n #keys are lowercase\n #\n months = {'january': 0,'february': 31,'march':59,'april':90,'may':120,'june':151,'july':181,'august':212,'september':244,'october':273,'november':304,'december':334 }\n\n #dictionary containing the max number of days in each month\n #for error checking\n #keys are lowercase\n #\n days = {'january': 31,'february': 28,'march':31,'april':30,'may':31,'june':30,'july':31,'august':31,'september':30,'october':31,'november':30,'december':31 }\n\n\n #Constructor\n #intialize values to -1\n #delcare empty lists\n #\n def __init__(self):\n self.data = []\n self.splitData = []\n self.num = -1\n\n #read method\n #\n def read(self,fp):\n #open file\n #\n with open(fp, \"r\") as f:\n #loop over file\n #\n for line in f:\n #grab line and make it lowercase\n #\n line = line.lower()\n #remove all punctuation\n #\n line = re.sub(\"[^a-z0-9]+\" ,' ',line)\n #append date to list for printing purposes later\n #\n self.data.append(line)\n\n #convert method\n #\n def convert(self):\n #loop over list of dates\n #\n for line2 in self.data:\n #split string into string array\n #\n line2 = line2.split()\n #check if the month is valid\n #\n if self.months.has_key(line2[0]):\n #check if date is valid\n #\n if int(line2[1]) <= self.days[line2[0]]:\n #Convert date to integer\n #\n self.num =self.months[line2[0]] + int(line2[1])\n #print error message\n #\n else:\n print(\"date out of range. Enter in format month day year\")\n break;\n #print error message\n #\n else:\n print(\"Incorrect date enter in format month day year (check your spelling)\")\n break;\n\n #check if number is prime\n #\n if isPrime(self.num):\n print(\"%s %s, %s corresponds to the integer %d, and is a prime number\" %(line2[0].title(),line2[1], line2[2], self.num))\n else:\n print(\"%s %s, %s corresponds to the integer %d, and is not a prime number\" %(line2[\\\n0].title(),line2[1],line2[2], self.num))\n\n\n\n\n","repo_name":"SnazyMan/School","sub_path":"olivieri_tyler/qu_10/foo_00.py","file_name":"foo_00.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"22999088036","text":"#!/usr/bin/env python3\n'''\nCreated on Oct 10, 2016\n\n@author: huaminli\n'''\nfrom keras.layers import Input, Dense\nfrom keras.models import Model\nimport keras.optimizers\nfrom keras.regularizers import l2\nfrom keras import callbacks as cb\nfrom keras.callbacks import LearningRateScheduler\nimport numpy as np\nfrom Util import Monitoring as mn\nimport sklearn.metrics\nimport matplotlib\nimport pylab as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import pyplot as plt1\nimport math\nimport random\nimport tensorflow as tf\nfrom keras.metrics import sparse_categorical_crossentropy\nfrom keras.metrics import sparse_categorical_accuracy\nimport keras.backend as K\nimport os.path\nfrom Util import FileIO as io\n\n\ndef step_decay(epoch):\n '''\n Learning rate schedule.\n '''\n initial_lrate = 1e-3\n drop = 0.5\n epochs_drop = 50.0\n lrate = initial_lrate * math.pow(drop,math.floor((1+epoch)/epochs_drop))\n return lrate\n\ndef f1score(confusionMatrix):\n '''\n Calculate the F1 score of a given confusion matrix.\n '''\n# col1 = confusionMatrix[1:,:1]\n# confusionMatrix = confusionMatrix[1:,1:]\n# temp = np.zeros(confusionMatrix.shape)\n# \n# for i in range(0, confusionMatrix.shape[0]):\n# for j in range(0, confusionMatrix.shape[1]):\n# if col1[i,0] > 0:\n# temp[i,j] = random.randint(0, col1[i,0])\n# col1[i,0] = col1[i,0] - temp[i,j]\n \n# confusionMatrix = confusionMatrix + temp\n# confusionMatrix = confusionMatrix.astype(int)\n \n sum_C = np.sum(confusionMatrix, axis = 1) # sum of each row\n sum_K = np.sum(confusionMatrix, axis = 0) # sum of each column\n \n Pr = np.divide(confusionMatrix, np.matlib.repmat(np.array([sum_C]).T, 1, \n confusionMatrix.shape[0]))\n Re = np.divide(confusionMatrix, \n np.matlib.repmat(sum_K, confusionMatrix.shape[1], 1))\n\n F = np.divide(2 * np.multiply(Pr, Re), Pr + Re)\n\n for i in range(0, F.shape[0]):\n for j in range(0, F.shape[1]):\n if np.isnan(F[i,j]):\n F[i,j] = 0\n \n F = np.max(F, axis = 1)\n return np.dot(sum_C, F)/np.sum(sum_C)\n\ndef trainClassifier(trainSample, mode = 'None', i = 0,\n hiddenLayersSizes = [12, 6, 3],\n activation = 'softplus', l2_penalty = 1e-4,\n path = 'None'):\n # Remove unlabeled cells for training.\n x_train = trainSample.X[trainSample.y != 0]\n y_train = trainSample.y[trainSample.y != 0]\n \n # Labels start from 0.\n y_train = np.int_(y_train) - 1\n\n \n # Special case in GvHD: label in those files are 0,1,3,4 with no 2.\n if mode == 'GvHD' and (i == 5 or i == 9 or \n i == 10 or i == 11):\n y_train[y_train != 0] = y_train[y_train != 0] - 1\n\n # Expand labels, to work with sparse categorical cross entropy.\n y_train = np.expand_dims(y_train, -1)\n \n # Construct a feed-forward neural network.\n inputLayer = Input(shape = (x_train.shape[1],))\n hidden1 = Dense(hiddenLayersSizes[0], activation = activation,\n kernel_regularizer = l2(l2_penalty))(inputLayer)\n hidden2 = Dense(hiddenLayersSizes[1], activation = activation,\n kernel_regularizer = l2(l2_penalty))(hidden1)\n hidden3 = Dense(hiddenLayersSizes[2], activation = activation,\n kernel_regularizer = l2(l2_penalty))(hidden2)\n# numClasses = len(np.unique(trainSample.y)) - 1 # with 0 class\n numClasses = len(np.unique(trainSample.y)) # without 0 class\n# numClasses = 57 # for HMIS-2\n outputLayer = Dense(numClasses, activation = 'softmax')(hidden3)\n \n encoder = Model(inputs = inputLayer, outputs = outputLayer)\n net = Model(inputs = inputLayer, outputs = outputLayer)\n lrate = LearningRateScheduler(step_decay)\n optimizer = keras.optimizers.rmsprop(lr = 0.0)\n\n net.compile(optimizer = optimizer, \n loss = 'sparse_categorical_crossentropy')\n net.fit(x_train, y_train, epochs = 80, batch_size = 128, shuffle = True,\n validation_split = 0.1, verbose = 0, \n callbacks=[lrate, mn.monitor(),\n cb.EarlyStopping(monitor = 'val_loss',\n patience = 25, mode = 'auto')])\n try:\n net.save(os.path.join(io.DeepLearningRoot(),\n 'savemodels/' + path + '/cellClassifier.h5'))\n except OSError:\n pass\n #plt.close('all')\n \n return net\n\ndef prediction(testSample, mode, i, net):\n # Labels start from 0.\n y_test = np.int_(testSample.y)\n \n # Special case in GvHD: label in those files are 0,1,3,4 with no 2.\n if mode == 'GvHD' and (i == 5 or i == 9 or \n i == 10 or i == 11):\n y_test[y_test > 1] = y_test[y_test > 1] - 1\n \n # Expand labels, to work with sparse categorical cross entropy.\n y_test = np.expand_dims(y_test, -1)\n \n y_test_pred_prob = net.predict(testSample.X, verbose = 0)\n y_test_pred = np.argmax(y_test_pred_prob, axis = 1) + 1\n y_test_pred[np.max(y_test_pred_prob, axis = 1) < .4] = 0\n y_test = np.squeeze(y_test)\n \n # Calculate accuracy.\n acc = np.mean(y_test[y_test!=0] == y_test_pred[y_test!=0])\n# acc = np.mean(y_test == y_test_pred)\n confusionMatrix = sklearn.metrics.confusion_matrix(y_test, y_test_pred,\n labels=None)\n# confusionMatrix = sklearn.metrics.confusion_matrix(y_test[y_test!=0], \n# y_test_pred[y_test!=0],\n# labels=None) # for HMIS-2\n F1 = f1score(confusionMatrix)\n \n y_true = y_test[y_test!=0]\n y_true = np.int_(y_true) - 1\n \n print('sample ', i+1)\n print('accuracy: ',np.round(acc*100, 2), '%')\n\n print('F-measure: ',np.round(F1*100, 2))\n print('confusion matrix:\\n', confusionMatrix)\n \n return acc, F1, y_test_pred\n\ndef plotHidden(trainSample, testSample, mode = 'None', i = 0,\n hiddenLayersSizes = [12, 6, 3],\n activation = 'softplus', l2_penalty = 1e-4,\n path = 'None'):\n # Remove unlabeled cells for training.\n x_train = trainSample.X[trainSample.y != 0]\n y_train = trainSample.y[trainSample.y != 0]\n x_test = testSample.X[testSample.y != 0]\n y_test = testSample.y[testSample.y != 0]\n \n # Labels start from 0.\n y_train = np.int_(y_train) - 1\n y_test = np.int_(y_test) - 1\n\n \n # Special case in GvHD: label in those files are 0,1,3,4 with no 2.\n if mode == 'GvHD' and (i == 5 or i == 9 or \n i == 10 or i == 11):\n y_train[y_train != 0] = y_train[y_train != 0] - 1\n\n # Expand labels, to work with sparse categorical cross entropy.\n y_train = np.expand_dims(y_train, -1)\n y_test = np.expand_dims(y_test, -1)\n \n # Construct a feed-forward neural network.\n inputLayer = Input(shape = (x_train.shape[1],))\n hidden1 = Dense(hiddenLayersSizes[0], activation = activation,\n W_regularizer = l2(l2_penalty))(inputLayer)\n hidden2 = Dense(hiddenLayersSizes[1], activation = activation,\n W_regularizer = l2(l2_penalty))(hidden1)\n hidden3 = Dense(hiddenLayersSizes[2], activation = activation,\n W_regularizer = l2(l2_penalty))(hidden2)\n numClasses = len(np.unique(trainSample.y)) - 1\n outputLayer = Dense(numClasses, activation = 'softmax')(hidden3)\n \n encoder = Model(input = inputLayer, output = hidden3)\n # plot data in the 3rd hidden layer\n h3_data = encoder.predict(x_test, verbose = 0)\n #fig, (ax1) = plt1.subplots(1,1, subplot_kw={'projection':'3d'})\n #ax1.scatter(h3_data[:,0], h3_data[:,1], h3_data[:,2], s = 20, c = np.squeeze(y_test))\n \n fig = plt1.figure()\n ax = fig.add_subplot(111, projection = '3d')\n ax.scatter(h3_data[:,0], h3_data[:,1], h3_data[:,2], s = 20, c = np.squeeze(y_test))\n #ax1.set_title('data in 3rd hidden layer')\n plt1.show()\n \n net = Model(input = inputLayer, output = outputLayer)\n lrate = LearningRateScheduler(step_decay)\n optimizer = keras.optimizers.rmsprop(lr = 0.0)\n\n net.compile(optimizer = optimizer, \n loss = 'sparse_categorical_crossentropy')\n net.fit(x_train, y_train, nb_epoch = 80, batch_size = 128, shuffle = True,\n validation_split = 0.1, verbose = 0, \n callbacks=[lrate, mn.monitor(),\n cb.EarlyStopping(monitor = 'val_loss',\n patience = 25, mode = 'auto')])\n try:\n net.save(os.path.join(io.DeepLearningRoot(),\n 'savemodels/' + path + '/cellClassifier.h5'))\n except OSError:\n pass\n #plt.close('all')\n","repo_name":"tabdelaal/CyTOF-Linear-Classifier","sub_path":"DeepCyTOF_on_HMIS/Util/feedforwadClassifier.py","file_name":"feedforwadClassifier.py","file_ext":"py","file_size_in_byte":8877,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"86"} +{"seq_id":"40591956976","text":"import matplotlib.pylab as pylab\nimport math\nimport numpy as np\nfrom numpy.linalg import inv\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d\ndef xval(x, y):\n resx = ((1 / (1 - x - y)) - 1 / x)\n return resx\n\ndef yval(x, y):\n resy = ((1 / (1 - x - y)) - 1 / y)\n return resy\n\ndef hmatrix(x, y):\n htemp = np.matrix([[math.pow((1 / (1 - x - y)), 2) + math.pow((1 / x), 2), math.pow((1 / (1 - x - y)), 2)],\n [math.pow((1 / (1 - x - y)), 2), math.pow((1 / (1 - x - y)), 2) + math.pow((1 / y), 2)]])\n hinverse = inv(np.matrix(htemp))\n return hinverse\n\nxinit = 0.75\nyinit = 0.20\n\nxlist = []\nylist = []\nevalues = []\nhmat = []\nhinverse = []\nenergy = []\n\ndef calculateEnergy(x, y):\n res = (-(np.log10(1-x-y)))-np.log10(x)-np.log10(y)\n return res\n\nfor i in range(50):\n deltafx = [[xval(xinit, yinit)], [yval(xinit, yinit)]]\n hessian_gradient = np.dot(hmatrix(xinit, yinit), deltafx)\n xtemp = xinit - hessian_gradient.item(0)\n ytemp = yinit - hessian_gradient.item(1)\n energy.append(calculateEnergy(xtemp, ytemp))\n xlist.append(xtemp)\n ylist.append(ytemp)\n\n xinit = xtemp\n yinit = ytemp\n\npylab.plot(energy)\npylab.show()\n# pylab.plot(xlist, ylist)\n# pylab.show()\ndef plotTrajectory():\n z = []\n for i in range(0, 50):\n i += 1\n z.append(i)\n\n pylab.style.use('fivethirtyeight')\n fig = plt.figure()\n ax1 = fig.add_subplot(111, projection='3d')\n ax1.plot_wireframe(xlist,ylist,z)\n\n ax1.set_xlabel('x ->')\n ax1.set_ylabel('y ->')\n ax1.set_zlabel('z ->')\n\n plt.show()\nplotTrajectory()\nprint(xlist)\nprint(ylist)","repo_name":"ritndgd/Neural-Network","sub_path":"GradientDescent and NewtonsMethod/Hessian.py","file_name":"Hessian.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"17505763777","text":"from statistics import mode\nfrom attr import field\nfrom django.forms import ModelForm\n\nfrom .models import Movie\n\n\nclass MovieForm(ModelForm):\n class Meta:\n model = Movie\n fields = [\n \"title\",\n \"year\",\n \"rotten_tomatoes\",\n \"metacritic\",\n \"imdb\",\n \"fandango\",\n \"flyer\",\n ]","repo_name":"thash-jkr/MovieRating-Website-Django","sub_path":"movies/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"1324932267","text":"import matplotlib.pyplot as plt\n\nx = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\ny = [5.30, 5.80, 5.60, 5.40, 5.20, 4.80, 4.80, 5.10, 4.70, 4.60, 4.80, 4.50, 4.10, 4.10]\n\ndef ma(y, window_size, predict_window):\n \"\"\"Функция скользящего среднего\"\"\"\n output_arr = []\n tmp = 0\n pw_tmp=predict_window\n yy = y[-predict_window:]\n for i in range(window_size - 1, len(y)):\n for j in range(window_size):\n tmp += y[i - j]\n tmp = tmp / window_size\n output_arr.append(tmp)\n tmp = 0\n if i == len(y) - 1:\n predict_window = predict_window - 1\n yy.append(output_arr[-1])\n for i in range(predict_window):\n for j in range(window_size):\n tmp += yy[len(yy) - 1 - j]\n tmp = tmp / window_size\n yy.append(tmp)\n output_arr.append(tmp)\n tmp = 0\n xx = [i for i in range(window_size, len(output_arr) + window_size)]\n plt.plot(x, y)\n plt.plot(xx, output_arr)\n plt.scatter(x, y)\n plt.scatter(xx, output_arr)\n plt.legend(['original', 'MA'])\n plt.xlabel('x')\n plt.ylabel('y')\n plt.grid()\n plt.show()\n for i in range(pw_tmp,0,-1):\n print(output_arr[-i])\n\n f = open(ma.__name__ + \".txt\", 'w')\n for i in output_arr:\n f.write((str(i) + '\\n'))\n f.close()\n\n\ndef trend(x, y, predict_window):\n \"\"\"Функция линейного тренда\"\"\"\n x_sum = sum(x)\n y_sum = sum(y)\n xy = [i[0] * i[1] for i in zip(x, y)]\n xx = [i ** 2 for i in x]\n xx_sum = sum(xx)\n xy_sum = sum(xy)\n a0 = (y_sum * xx_sum - x_sum * xy_sum) / (len(x) * xx_sum - x_sum ** 2)\n a1 = (len(x) * xy_sum - x_sum * y_sum) / (len(x) * xx_sum - x_sum ** 2)\n print(\"a0 = \", a0)\n print(\"a1 = \", a1)\n yy = [a0 + i * a1 for i in range(1, len(x) + 1 + predict_window)]\n xx = [i for i in range(len(yy))]\n # for i in range(len(x)):\n # print(x[i], yy[i])\n\n \"\"\"Вывод на консоль предсказанных значений\"\"\"\n for i in range(predict_window, 0, -1):\n print(yy.index(yy[-i]), \"= \", yy[-i])\n\n plt.plot(x, y)\n plt.plot(xx, yy)\n plt.scatter(x, y)\n plt.scatter(xx, yy)\n plt.legend(['Original', trend.__name__])\n plt.xlabel('x')\n plt.ylabel('y')\n plt.grid()\n plt.show()\n f = open(trend.__name__ + \".txt\", 'w')\n for i in yy:\n f.write((str(i) + '\\n'))\n f.close()\n\n\ndef autoregression(x, y, predict_window):\n \"\"\"Функция авторегрессии 1-го порядка\"\"\"\n tmp_x = y[:-1]\n tmp_y = y[1:]\n x_sum = sum(tmp_x)\n y_sum = sum(tmp_y)\n xy = [i[0] * i[1] for i in zip(tmp_x, tmp_y)]\n xx = [i ** 2 for i in tmp_x]\n xx_sum = sum(xx)\n xy_sum = sum(xy)\n a0 = (y_sum * xx_sum - x_sum * xy_sum) / (len(tmp_x) * xx_sum - x_sum ** 2)\n a1 = (len(tmp_x) * xy_sum - x_sum * y_sum) / (len(tmp_x) * xx_sum - x_sum ** 2)\n print(\"a0 = \", a0)\n print(\"a1 = \", a1)\n\n yy = [a0 + a1 * i for i in y] # Calculated new array of y\n \"\"\"Заполняем предсказанные значения\"\"\"\n for i in range(1, predict_window):\n yy.append(a0 + a1 * yy[-1])\n xx = [i for i in range(1, len(yy) + 1)]\n\n # print(\"x,y,y*\")\n # for i in range(len(x)):\n # print(x[i], y[i], yy[i])\n \"\"\"Вывод на консоль предсказанных значений\"\"\"\n for i in range(predict_window, 0, -1):\n print(yy[-i])\n plt.plot(x, y)\n plt.plot(xx, yy)\n plt.scatter(x, y)\n plt.scatter(xx, yy)\n plt.legend(['Original', autoregression.__name__])\n plt.xlabel('x')\n plt.ylabel('y')\n plt.grid()\n plt.show()\n f = open(autoregression.__name__ + \".txt\", 'w')\n for i in yy:\n f.write((str(i) + '\\n'))\n f.close()\n\n\n\n","repo_name":"ISAchainSAW/System_analysis","sub_path":"Sa_3/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"72091289244","text":"from django.http import JsonResponse\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom logistic import settings\nimport jwt\n\n\nclass AuthMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n \n def __call__(self, request):\n # print('before view')\n if '/login' in request.path or '/register' in request.path or '/api-docs' in request.path:\n response = self.get_response(request)\n # print('after view')\n return response \n \n token = request.headers.get('Authorization')\n # token = request.COOKIE.get('token')\n try:\n payload = jwt.decode(\n jwt=token,\n algorithms=['HS256'],\n key=settings.JWT_KEY\n )\n except jwt.DecodeError:\n return JsonResponse({'ok':'False', 'error': 'Not authorized'}, safe=False, status=401)\n # return Response({\n # 'ok' : False,\n # 'error' : 'you are not authorized'\n # }, status=status.HTTP_401_UNAUTHORIZED)\n request.username = payload['username']\n request.com_id = payload['com_id']\n response = self.get_response(request)\n # print('after view')\n return response","repo_name":"saeedseyedhossein/logistic_management","sub_path":"middlewares/auth_middleware.py","file_name":"auth_middleware.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"32656502715","text":"import os\n# These have to be set before importing any mixcoatl modules\nos.environ['ES_ACCESS_KEY'] = 'abcdefg'\nos.environ['ES_SECRET_KEY'] = 'gfedcba'\n\nimport unittest\nfrom mock import patch\nfrom mock import Mock\nimport mixcoatl.geography.region as region\nimport tests.data.region as region_data\n\n@patch('mixcoatl.resource.Resource.get')\nclass TestRegions(unittest.TestCase):\n def setUp(self):\n self.last_error = 'job terminated unexpectedly'\n\n def test_has_all_regions_and_is_Region(self, mock_data):\n '''test all() returns a list of Region'''\n mock_data.return_value = region_data.all_regions\n d = region.Region.all()\n\n def test_has_a_region(self, mock_data):\n mock_data.return_value = region_data.one_region\n\n d = region.Region(19341)\n # test primary_key\n assert d.region_id == 19341\n # test lazy property\n assert d.status == 'ACTIVE'\n # test uncameled property\n assert d.provider_id == 'ap-northeast-1'\n # test nested uncameled key\n assert d.customer['customer_id'] == 11111\n","repo_name":"QSFT/mixcoatl","sub_path":"tests/unit/geography/test_region.py","file_name":"test_region.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"8718214227","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\nclass Aadhar:\n org=\"UIDI\"\n \n#CONSTRUCTOR HELPS YOU TO INITIALIZE VALUES TO YOUR OBJECTS\n#__init__ \n def __init__(self,ano,name,address):\n self.ano=ano\n self.name=name\n self.address=address \na1=Aadhar(123,\"Sagar\",\"Noida\")\n#self--reference word not a keyword--so you can change it\nprint(a1.ano)\nprint(a1.name)\nprint(a1.address)\nprint(a1.org)\na2=Aadhar(113,\"Preeti\",\"Noida\") \nprint(a2.ano)\nprint(a2.name)\nprint(a2.address)\nprint(a2.org)\nprint(a2)\n\n\n# In[10]:\n\n\nclass EdYoda:\n cName=\"MLDS\"\n \n def __init__(self,sName,sID,sMark):\n self.sName=sName\n self.sID=sID\n self.sMark=sMark\n \n def __init__(self):\n print(\"Def constr called\")\n \n def pme(self):\n print(self.cName)\n \n#default constr\n\nS1=EdYoda()\nprint(S1.cName)\nS1.pme()\n#S2=EdYoda(\"Shreya\",123,90)\n#print(S2.sName) \n\n\n# In[15]:\n\n\n#inheritence\n#inheriting properties and beha from parent\n\n#single\n\n# Base class\nclass Parent:\n\tdef func1(self):\n\t\tprint(\"This function is in parent class.\")\n\n# Derived class\nclass Child(Parent):\n\tdef func2(self):\n\t\tprint(\"This function is in child class.\")\n \nO=Child()\nO.func1()\nO.func2()\nT=Parent()\n#T.func2()\nT.func1()\n\n\n# In[21]:\n\n\n#Multiple Inheritence\n\n# Python program to demonstrate\n# multiple inheritance\n# Base class1\nclass Mother:\n\tmothername = \"Mothr\"\n\tdef mother(self):\n\t\tprint(self.mothername)\n# Base class2\nclass Father:\n\tfathername = \"Father\"\n\tdef father(self):\n\t\tprint(self.fathername)\n# Derived class\nclass Son(Mother, Father):\n\tdef parents(self):\n\t\tprint(\"Father :\", self.fathername)\n\t\tprint(\"Mother :\", self.mothername) \nP=Son()\nP.parents()\nQ=Father()\n#Q.parents()\nQ.father()\nR=Mother()\n#R.parents()\nR.mother()\n\n\n# In[30]:\n\n\n# Base class\nclass Grandfather:\n grandfathername=\"OOOO\"\n\n# Intermediate class\nclass Father(Grandfather):\n fathername=\"PPPPPPP\"\n\n\n# Derived class\nclass Son(Father):\n\tdef __init__(self,sonname):\n\t\tself.sonname = sonname\n \n\tdef print_name(self):\n\t\tprint('Grandfather name :', self.grandfathername)\n\t\tprint(\"Father name :\", self.fathername)\n\t\tprint(\"Son name :\", self.sonname)\n \n \nP=Father() \n\nR=Son(\"RRRRR\")\nR.sonname\nR.print_name()\n\n\n# In[34]:\n\n\n# Base class\nclass Parent:\n\tdef func1(self):\n\t\tprint(\"This function is in parent class.\")\n\n# Derived class1\nclass Child1(Parent):\n\tdef func2(self):\n\t\tprint(\"This function is in child 1.\")\n\n# Derivied class2\nclass Child2(Parent):\n\tdef func3(self):\n\t\tprint(\"This function is in child 2.\")\nP=Child2()\n#P.func2()\nP.func1()\nQ=Child1()\nQ.func2()\nQ.func1()\n\n\n\n# In[35]:\n\n\n#Polymorphism\n\n#Polymorphism and Method Overriding---comes from Multiple Inheritence\n\n#polymorphism --one method diff behaviours\n\n#len()\n\nl=[3,4,5,6]\nt=(3,4)\ns=\"7879\"\nprint(len(t))\nprint(len(l))\nprint(len(s))\n\n\n# In[45]:\n\n\n#Method Overriding \n\n#when you try to redefine a method with same name but diff implementation\n\n# Defining parent class\nclass Parent:\n\t\n\t# Constructor\n\tdef __init__(self):\n\t\tself.value = \"Inside Parent\"\t\t\n\t# Parent's show method\n\tdef show(self):\n\t\tprint(\"Parent class implementation\")\t\t\n# Defining child class\nclass Child(Parent):\t\n\t# Constructor\n\tdef __init__(self):\n\t\tself.value = \"Inside Child\"\t\t\n\t# Child's show method\n\tdef show(self):\n\t\tprint(\"Child Class imple\") \nP=Child()\nP.show()\nQ=Parent()\nQ.shown()\n\n \n\n\n# In[46]:\n\n\n# Defining parent class 1\nclass Parent1():\n\t\t\n\t# Parent's show method\n\tdef show(self):\n\t\tprint(\"Inside Parent1\")\n\t\t\n# Defining Parent class 2\nclass Parent2():\n\t\t\n\t# Parent's show method\n\tdef display(self):\n\t\tprint(\"Inside Parent2\")\n\t\t\n\t\t\n# Defining child class\nclass Child(Parent1, Parent2):\n\t\t\n\t# Child's show method\n\tdef show(self):\n\t\tprint(\"Inside Child\")\n\t\n\t\t\n# Driver's code\nobj = Child()\n\nobj.show()\nobj.display()\n\n\n# In[51]:\n\n\nclass Parent():\n\t\n\tdef show(self):\n\t\tprint(\"Inside Parent\")\n\t\t\nclass Child(Parent):\n\t\n\tdef show(self):\n\t\t\n\t\t# Calling the parent's class\n\t\t# method\n\t\tParent.show(self)\n\t\tprint(\"Inside Child\")\n\t\t\n# Driver's code\nobj = Child()\nobj.show()\nP=Parent()\nP.show()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Shakhthi/Python","sub_path":"Edyoda/Day28_Python_10May/Day28_Python_10May.py","file_name":"Day28_Python_10May.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"5095381467","text":"#---------------------------------------#\n#\tThis file is part of EbmLib.\n#\n#\tEbmLib is free software: you can redistribute it and/or modify\n#\tit under the terms of the GNU General Public License as published by\n#\tthe Free Software Foundation, either version 3 of the License, or\n#\t(at your option) any later version.\n#\n#\tEbmLib is distributed in the hope that it will be useful,\n#\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n#\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#\tGNU General Public License for more details.\n#\n#\tYou should have received a copy of the GNU General Public License\n#\talong with EbmLib. If not, see .\n#---------------------------------------#\n# author:\n#\ttllake \n# email:\n#\t\n#\t\n# date:\n#\t2011.08.30\n# file:\n#\tsrautoencoder.py\n# description:\n#\tSimple Recursive Autoencoder Class\n#---------------------------------------#\n\nimport numpy as np\nfrom .. units import unittypes\n\nclass SimpleRecursiveAutoencoder(object):\n\t\"\"\"autoencoder class\n\n\t:param nin: number of input units\n\t:param nhid: number of hidden units\n\t:param htype: hidden unit type, see units.py for available types\n\t:param otype: output unit type, see units.py for available types\n\n\t:type nin: int\n\t:type nhid: int\n\t:type htype: string\n\t:type otype: string\n\t\"\"\"\n\tdef __init__(self, nin, nhid, htype = 'tanh', otype = 'sigmoid'):\n\t\tself.nin = nin\n\t\tself.nhid = nhid\n\t\tself.i = np.zeros(nin)\n\t\tself.c = np.zeros(nhid)\n\t\tself.h = np.zeros(nhid)\n\t\tself.oi = np.zeros(nin)\n\t\tself.oc = np.zeros(nhid)\n\n\t\tself.hb = np.zeros(nhid)\n\t\tself.oib = np.zeros(nin)\n\t\tself.ocb = np.zeros(nhid)\n\n\t\tself.whi = np.random.normal(0., 0.2, (nhid, nin))\n\t\tself.whc = np.random.normal(0., 0.2, (nhid, nhid))\n\t\tself.woih = np.random.normal(0., 0.2, (nin, nhid))\n\t\tself.woch = np.random.normal(0, 0.2, (nhid, nhid))\n\n\t\tself.dwhi = np.zeros((nhid, nin))\n\t\tself.dwhc = np.zeros((nhid, nhid))\n\t\tself.dwoih = np.zeros((nin, nhid))\n\t\tself.dwoch = np.zeros((nhid, nhid))\n\t\t\n\t\tself.dhb = np.zeros(nhid)\n\t\tself.doib = np.zeros(nin)\n\t\tself.docb = np.zeros(nhid)\n\n\t\tself.htype = htype\n\t\tself.otype = otype\n\n\t\tself.hact = unittypes[htype]\n\t\tself.oact = unittypes[otype]\n\n\tdef ff(self, x, c):\n\t\t\"\"\"get reconstruction of x\n\n\t\t:param x: input\n\t\t:param c: context\n\t\t:type x: numpy.array\n\t\t:type c: numpy.array\n\t\t:returns: reconstruction of x and c\n\t\t:rtype: (numpy.array, numpy.array)\n\t\t\"\"\"\n\t\tself.h = self.hact(np.dot(self.whi, x) + np.dot(self.whc, c) + self.hb)\n\t\tself.oi = self.oact(np.dot(self.woih, self.h) + self.oib)\n\t\tself.oc = self.hact(np.dot(self.woch, self.h) + self.ocb)\n\t\treturn self.oi, self.oc\n\n\tdef push(self, x):\n\t\t\"\"\"push an input x\n\n\t\t:param x: input\n\t\t:type x: numpy.array\n\t\t:returns: encoding of the current context given x\n\t\t:rtype: numpy.array\n\t\t\"\"\"\n\t\tself.h = self.hact(np.dot(self.whi, x) + np.dot(self.whc, self.h) + self.hb)\n\t\treturn self.h\n\n\tdef pop(self):\n\t\t\"\"\"pop an input vector and return the system to the previous context\n\n\t\t:returns: decoding of the most recent input\n\t\t:rtype: numpy.array\n\t\t\"\"\"\n\t\tself.oi = self.oact(np.dot(self.woih, self.h) + self.oib)\n\t\tself.h = self.hact(np.dot(self.woch, self.h) + self.ocb)\n\t\treturn self.oi\n\n\tdef reset(self):\n\t\t\"\"\"reset the netowrks stateful hidden units to 0\n\t\t\n\t\t:rtype: None\n\t\t\"\"\"\n\t\tself.h = np.zeros(self.nhid)\n\n\tdef __getstate__(self):\n\t\td = {\n\t\t\t'nin':\t\tself.nin,\n\t\t\t'nhid':\t\tself.nhid,\n\t\t\t'htype':\tself.htype,\n\t\t\t'otype':\tself.otype,\n\t\t\t'whi':\t\tself.whi.copy(),\n\t\t\t'whc':\t\tself.whc.copy(),\n\t\t\t'woih':\t\tself.woih.copy(),\n\t\t\t'woch':\t\tself.woch.copy(),\n\t\t\t'hb':\t\tself.hb.copy(),\n\t\t\t'oib':\t\tself.oib.copy(),\n\t\t\t'ocb':\t\tself.ocb.copy(),\n\t\t\t'dwhi':\t\tself.dwhi.copy(),\n\t\t\t'dwhc':\t\tself.dwhc.copy(),\n\t\t\t'dwoih':\tself.dwoih.copy(),\n\t\t\t'dwoch':\tself.dwoch.copy(),\n\t\t\t'dhb':\t\tself.dhb.copy(),\n\t\t\t'doib':\t\tself.doib.copy(),\n\t\t\t'docb':\t\tself.docb.copy(),\n\t\t\t'hstate':\tself.h.copy()}\n\t\treturn d\n\n\tdef __setstate__(self, d):\n\t\tself.nin =\t\td['nin']\n\t\tself.nhid =\t\td['nhid']\n\t\tself.htype =\td['htype']\n\t\tself.otype =\td['otype']\n\t\tself.whi =\t\td['whi']\n\t\tself.whc =\t\td['whc']\n\t\tself.woih =\t\td['woih']\n\t\tself.woch =\t\td['woch']\n\t\tself.hb =\t\td['hb']\n\t\tself.oib =\t\td['oib']\n\t\tself.ocb =\t\td['ocb']\n\t\tself.dwhi =\t\td['dwhi']\n\t\tself.dwhc =\t\td['dwhc']\n\t\tself.dwoih =\td['dwoih']\n\t\tself.dwoch =\td['dwoch']\n\t\tself.dhb =\t\td['dhb']\n\t\tself.doib =\t\td['doib']\n\t\tself.docb =\t\td['docb']\n\t\t\n\t\tself.hact = unittypes[self.htype]\n\t\tself.oact = unittypes[self.otype]\n\n\t\tself.i = np.zeros(self.nin)\n\t\tself.c = np.zeros(self.nhid)\n\t\tself.h = d['hstate']\n\t\tself.oi = np.zeros(self.nin)\n\t\tself.oc = np.zeros(self.nhid)\n\n\n","repo_name":"thomlake/EbmLib","sub_path":"ebmlib/srautoencoder/srautoencoder.py","file_name":"srautoencoder.py","file_ext":"py","file_size_in_byte":4616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"5674748229","text":"import requests\nfrom news_apis.reddit import RedditNews\nfrom news_apis.newsapi import NewsapiNews\nimport config\nimport functools\n\nSIZE_LRU_CACHES = 128\n\nclass NewsAggregator:\n def __init__(self):\n self.apis = []\n self.apis.append(RedditNews(config.reddit_client_id, config.reddit_client_secret, config.reddit_user_agent, 5))\n self.apis.append(NewsapiNews(config.news_api_key, 5))\n\n @functools.lru_cache(maxsize=SIZE_LRU_CACHES)\n def listing(self):\n news = []\n for api in self.apis:\n news.extend(api.listing())\n return news\n\n @functools.lru_cache(maxsize=SIZE_LRU_CACHES)\n def search(self, query):\n news = []\n for api in self.apis:\n news.extend(api.search(query))\n return news","repo_name":"wizarniak/newsaggregator","sub_path":"aggregator.py","file_name":"aggregator.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"4449380719","text":"from django.urls import reverse\n\nfrom rest_framework import status\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom api.views.save_request_mixin import SaveRequestMixin\n\nfrom remotes.client.actions import (ACTION_COMMAND_GET,\n ACTION_COMMAND_POST,\n ACTION_COMMANDS_LIST,\n ACTION_HOST_REGISTER,\n ACTION_HOST_STATUS,\n ACTION_HOST_VERIFY)\nfrom remotes.constants import ENDPOINTS_FIELD, STATUS_FIELD, STATUS_OK\n\n\nclass DiscoverView(APIView, SaveRequestMixin):\n permission_classes = (AllowAny, )\n\n # noinspection PyMethodMayBeStatic\n def get(self, request, *args, **kwargs):\n # Save request\n self.save_request(request, args, kwargs)\n endpoints = {\n ACTION_COMMAND_GET: reverse('api.v1.command.get.generic'),\n ACTION_COMMAND_POST: reverse('api.v1.command.post.generic'),\n ACTION_COMMANDS_LIST: reverse('api.v1.commands.list'),\n ACTION_HOST_REGISTER: reverse('api.v1.host.register'),\n ACTION_HOST_STATUS: reverse('api.v1.host.status'),\n ACTION_HOST_VERIFY: reverse('api.v1.host.verify')\n }\n return Response(\n data={STATUS_FIELD: STATUS_OK,\n ENDPOINTS_FIELD: endpoints},\n status=status.HTTP_200_OK)\n","repo_name":"muflone/django-remotes","sub_path":"api/views/v1/discover.py","file_name":"discover.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"3809881633","text":"# Задача:\n# https://i.gyazo.com/f7791af0e0b2ef410526204323f2df75.png\n\n# Решение:\nn, m = map(int, input().split())\nli = []\nmax_try, row, col = 0, 0, 0\nfor i in range(n):\n li.append([int(i) for i in input().split()])\nfor j in range(n):\n max_try\n for k in range(m):\n if max_try < li[j][k]:\n max_try = li[j][k]\n row = j\n col = k\nprint(max_try)\nprint(row, col)\n\n# Альтернатива (not mine)\n# n, m = map(int, input().split())\n# a = [list(map(int, input().split())) for i in range(n)]\n#\n# b = [max(i) for i in a]\n# print(max(b))\n# print(b.index(max(b)), a[b.index(max(b))].index(max(b)))","repo_name":"raskote/Python","sub_path":"Инди-программирование на Python (stepik)/5.7 (Вложенные списки)/5.7 (10 of 15).py","file_name":"5.7 (10 of 15).py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"19472810083","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n head = ListNode(0)\n cur = head\n carry = 0\n while l1 or l2:\n if not l1:\n sum = l2.val + carry\n elif not l2:\n sum = l1.val + carry\n else:\n sum = l1.val + l2.val + carry\n if sum >= 10:\n carry = 1\n sum -= 10\n else:\n carry = 0\n cur.next = ListNode(sum)\n cur = cur.next\n if l1:\n l1 = l1.next\n if l2:\n l2 = l2.next\n if carry == 1:\n cur.next = ListNode(1)\n return head.next\n ","repo_name":"ArsalaanAli/Leetcode","sub_path":"AddTwoNumbers.py","file_name":"AddTwoNumbers.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"10287780812","text":"from datetime import datetime\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom django.views.decorators.csrf import csrf_exempt\nimport absoluteuri\nfrom django.http import JsonResponse\n\n# Decorators\nfrom django.contrib.auth.decorators import login_required\nfrom authentication.decorators import allowed_users\n\n# Models\nfrom .models import (\n RoleApplication,\n Coach,\n Training,\n RegistrationTraining,\n JoinedTraining,\n ORGANIZATION_TYPE_CHOICES,\n Feedback, TrainingType,\n)\n\nfrom users.models import (\n CustomUser, Trainer,\n Assessor,\n AcademicQualification, \n WorkExperience,\n)\n\nfrom billings.models import Payment\n\n# Forms\nfrom .forms import (\n AttendanceSheetUploadForm,\n CoachCreateForm,\n TrainingCreateForm,\n RegistrationTrainingCreateForm,\n RegistrationTrainingReviewForm,\n FeedbackCreateForm,\n RoleApplicationInterviewForm,\n EditRoleApplicationForm,\n)\n\n# Helpers\nfrom .helpers import (\n check_available_seat,\n get_trainer_application_status,\n get_assessor_application_status,\n get_trainer_application,\n get_assessor_application,\n get_role_application_supporting_documents,\n save_role_application_supporting_documents,\n # generate_role_application_number,\n)\nfrom billings.helpers import payment_response_process, get_payment_history_url\n\nimport absoluteuri\n\nfrom core.helpers import translate_malay_date, standard_date, send_email_default, send_email_with_attachment, generate_and_save_qr, get_domain\nfrom app.helpers.letter_templates import generate_document, generate_document_file, generate_training_document_file\nfrom api.soap.create_transaction import create_transaction, cancel_proforma, create_training_transaction, get_receipt_url, payment_gateway_url\n\n# Create your views here.\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'trainer', 'assessor', 'trainee', 'applicant'])\ndef dashboard_training_application_dashboard(request):\n if not request.user.is_assessor and not request.user.is_trainer and request.user.role != 'superadmin':\n messages.warning(request, 'You must be either QLASSIC Industry Assessor (QIA), QLASSIC CIDB Assessor (QCA) or Trainer in order to apply the role(s) below.')\n # application, applicable = get_trainer_application(request, request.user, 'trainer')\n application_trainer, applicable_trainer = get_trainer_application_status(request.user)\n application_assessor, applicable_assessor = get_assessor_application_status(request.user)\n assessor = Assessor.objects.all().filter(user=request.user).first()\n trainer = Trainer.objects.all().filter(user=request.user).first()\n context = {\n 'assessor':assessor,\n 'trainer':trainer,\n 'application_trainer':application_trainer,\n 'applicable_trainer':applicable_trainer,\n 'application_assessor':application_assessor,\n 'applicable_assessor':applicable_assessor,\n }\n return render(request, \"dashboard/training/role_application_dashboard.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'cidb_reviewer'])\ndef dashboard_training_role_application_list(request):\n applications = RoleApplication.objects.all().exclude(application_status='').order_by('-modified_date')\n context = {\n 'applications':applications,\n }\n return render(request, \"dashboard/training/role_application_list.html\", context)\n\n@login_required\ndef edit_role_application(request, id):\n obj= get_object_or_404(RoleApplication, id=id)\n \n form = EditRoleApplicationForm(request.POST or None, instance= obj)\n context= {'form': form}\n\n if form.is_valid():\n obj= form.save(commit= False)\n\n obj.save()\n\n messages.success(request, \"You successfully updated the data\")\n return redirect('dashboard_training_role_application_list')\n else:\n context= {'form': form,\n 'error': 'The form was not updated successfully. Please enter in a title and content'}\n return render(request,\"dashboard/training/edit_role_application_list.html\", context)\n\n# @allowed_users(allowed_roles=['superadmin', 'trainer'])\n# @login_required(login_url=\"/login/\")\n# def dashboard_training_application_new(request):\n# context = {\n# 'mode': 'step_1',\n# }\n# # if request.method == 'POST':\n# # application_type = \n# # application, applicable = get_trainer_application(request, request.user, 'trainer')\n# return render(request, \"dashboard/training/application_form.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'trainer','assessor'])\ndef dashboard_training_application_new(request, application_type, step):\n context = {\n 'application_type': application_type,\n 'mode': step,\n }\n\n applicable = False\n application = None\n if application_type == 'qca':\n application, applicable = get_assessor_application(request, request.user)\n context['application'] = application\n if application_type == 'trainer':\n application, applicable = get_trainer_application(request, request.user)\n context['application'] = application\n if applicable:\n pass\n else:\n messages.warning(request, 'Unable to apply the role.')\n return redirect('dashboard_training_application_dashboard')\n \n if step == 'step-2':\n academic_qualifications = AcademicQualification.objects.all().filter(user=request.user)\n context['academic_qualifications'] = academic_qualifications\n \n if step == 'step-3':\n work_experiences = WorkExperience.objects.all().filter(user=request.user)\n context['work_experiences'] = work_experiences\n \n if step == 'step-4':\n registration_trainings = RegistrationTraining.objects.all().filter(user=request.user, status='accepted', attendance_full=True)\n joined_trainings = JoinedTraining.objects.all().filter(user=request.user)\n context['registration_trainings'] = registration_trainings\n context['joined_trainings'] = joined_trainings\n if request.method == 'POST':\n if 'add' in request.POST:\n jt_year = request.POST['year']\n jt_course = request.POST['course']\n jt_place = request.POST['place']\n jt = JoinedTraining.objects.create(user=request.user, year=jt_year, course=jt_course, place=jt_place)\n messages.info(request, 'Added the joined training info.')\n if 'delete' in request.POST:\n jt_id = request.POST['id']\n jt = JoinedTraining.objects.get(id=jt_id)\n jt.delete()\n messages.info(request, 'Deleted the joined training.')\n return redirect('dashboard_training_application_new', application_type, step)\n\n if step == 'step-5':\n work_experiences = WorkExperience.objects.all().filter(user=request.user)\n context['supporting_documents'] = get_role_application_supporting_documents(application)\n if request.method == 'POST':\n save_role_application_supporting_documents(request, application)\n if 'save' in request.POST:\n return redirect('dashboard_training_application_new', application_type, step)\n\n if 'submit' in request.POST:\n application.application_status = 'pending'\n # generate_role_application_number(application)\n application.save()\n\n # Email\n to = []\n reviewers = CustomUser.objects.all().filter(\n Q(role='superadmin')|\n Q(role='cidb_reviewer')\n )\n for reviewer in reviewers:\n to.append(reviewer.email)\n subject = \"Role Application Submission - \" + application.application_number\n ctx_email = {\n 'application':application,\n }\n messages.info(request, 'Successfully send the role application.')\n send_email_default(subject, to, ctx_email, 'email/role-application-submission.html')\n\n return redirect('dashboard_training_application_dashboard')\n\n return render(request, \"dashboard/training/role_application_form.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'cidb_reviewer'])\ndef dashboard_training_role_application_review(request, id, step):\n application = get_object_or_404(RoleApplication, id=id)\n interview_form = RoleApplicationInterviewForm()\n context = {\n 'review': True,\n 'id': id,\n 'application': application,\n 'mode': step,\n 'interview_form':interview_form,\n }\n\n if step == 'step-2':\n academic_qualifications = AcademicQualification.objects.all().filter(user=request.user)\n context['academic_qualifications'] = academic_qualifications\n \n if step == 'step-3':\n work_experiences = WorkExperience.objects.all().filter(user=request.user)\n context['work_experiences'] = work_experiences\n \n if step == 'step-4':\n registration_trainings = RegistrationTraining.objects.all().filter(user=request.user, status='accepted', attendance_full=True)\n joined_trainings = JoinedTraining.objects.all().filter(user=request.user)\n context['registration_trainings'] = registration_trainings\n context['joined_trainings'] = joined_trainings\n\n if step == 'step-5':\n work_experiences = WorkExperience.objects.all().filter(user=request.user)\n context['supporting_documents'] = get_role_application_supporting_documents(application)\n\n if request.method == \"POST\":\n if 'interview' in request.POST:\n # Save Data\n interview_form = RoleApplicationInterviewForm(request.POST, instance=application)\n if interview_form.is_valid():\n application = interview_form.save()\n application.application_status = 'interview_invitation'\n application.reviewed_by = request.user.name\n application.save()\n\n # Get Session\n session = \"\"\n if application.interview_time_from.hour < 12:\n session = \"SESI PAGI\"\n elif application.interview_time_from.hour < 2:\n session = \"SESI TENGAH HARI\"\n else:\n session = \"SESI PETANG\"\n\n # Interview Letter\n template_ctx = {\n 'name': application.user.name,\n 'company': application.user.organization,\n 'address1': application.user.address1,\n 'address2': application.user.address2,\n 'postcode': application.user.postcode,\n 'city': application.user.city,\n 'state': application.user.state,\n 'hp_no': application.user.hp_no,\n 'fax_no': application.user.fax_no,\n 'date_now': translate_malay_date(standard_date(datetime.now())),\n 'date': translate_malay_date(standard_date(application.interview_date)),\n 'time_from': application.interview_time_from,\n 'time_to': application.interview_time_to,\n 'location': application.interview_location,\n 'session': session,\n }\n if application.application_type == 'trainer':\n response = generate_document_file(request, 'trainer_interview_letter', template_ctx, None)\n application.interview_letter_file.save('pdf', response)\n if application.application_type == 'qca':\n response = generate_document_file(request, 'qca_interview_letter', template_ctx, None)\n application.interview_letter_file.save('pdf', response)\n\n # Email\n to = [application.user.email]\n subject = \"Interview Invitation\"\n attachments = [application.interview_letter_file]\n email_ctx = {\n 'application': application,\n }\n send_email_with_attachment(subject, to, email_ctx, 'email/role-application-interview.html', attachments)\n \n messages.info(request, 'Successfully invite the applicant for interview session via email.')\n if 'reject' in request.POST:\n application.application_status = 'reject'\n application.reviewed_by = request.user.name\n application.save()\n\n # Interview Letter\n template_ctx = {\n 'name': application.user.name,\n 'company': application.user.organization,\n 'address1': application.user.address1,\n 'address2': application.user.address2,\n 'postcode': application.user.postcode,\n 'city': application.user.city,\n 'state': application.user.state,\n 'hp_no': application.user.hp_no,\n 'fax_no': application.user.fax_no,\n 'date_now': translate_malay_date(standard_date(datetime.now())),\n 'date': translate_malay_date(standard_date(application.interview_date)),\n 'time_from': application.interview_time_from,\n 'time_to': application.interview_time_to,\n 'location': application.interview_location,\n }\n if application.application_type == 'trainer':\n response = generate_document_file(request, 'trainer_reject_letter', template_ctx, None)\n application.reject_letter_file.save('pdf', response)\n if application.application_type == 'qca':\n response = generate_document_file(request, 'qca_reject_letter', template_ctx, None)\n application.reject_letter_file.save('pdf', response)\n\n # Email\n to = [application.user.email]\n subject = \"Role Application Result - \" + application.get_application_type_display()\n attachments = [application.reject_letter_file]\n email_ctx = {\n 'application': application,\n }\n send_email_with_attachment(subject, to, email_ctx, 'email/role-application-reject.html', attachments)\n messages.info(request, 'Successfully sent the rejection letter to applicant via email.')\n \n if 'accreditation' in request.POST:\n assessor_number = \"\"\n \n if application.application_type == 'trainer':\n trainer, created = Trainer.objects.get_or_create(user=application.user)\n user = application.user\n user.role = 'trainer'\n user.save()\n if application.application_type == 'qca':\n assessor, created = Assessor.objects.get_or_create(user=application.user)\n assessor.assessor_type = 'QCA'\n assessor.save()\n assessor_number = assessor.qca_id\n user = application.user\n user.role = 'assessor'\n user.save()\n\n application.application_status = 'approved'\n if application.application_type == 'trainer':\n application.accreditation_duration_year = request.POST[\"accreditation_duration_year\"]\n application.accreditation_duration_month = request.POST[\"accreditation_duration_month\"]\n application.save()\n template_ctx = {\n 'name': application.user.name,\n 'ic': application.user.icno,\n 'assessor_number': assessor_number,\n 'company': application.user.organization,\n 'address1': application.user.address1,\n 'address2': application.user.address2,\n 'postcode': application.user.postcode,\n 'city': application.user.city,\n 'state': application.user.state,\n 'duration_year': application.accreditation_duration_year,\n 'duration_month': application.accreditation_duration_month,\n 'hp_no': application.user.hp_no,\n 'fax_no': application.user.fax_no,\n 'date_now': translate_malay_date(standard_date(datetime.now())),\n 'date_accreditation': translate_malay_date(standard_date(datetime.now())),\n }\n if application.application_type == 'trainer':\n # Generate Report\n response_letter = generate_document_file(request, 'trainer_accreditation_letter', template_ctx, None)\n application.accreditation_letter_file.save('pdf', response_letter)\n if application.application_type == 'qca':\n # Generate QR\n qr_path = absoluteuri.build_absolute_uri('/certificate/role-application/'+str(application.id)+'/')\n generate_and_save_qr(qr_path, application.certificate_qr_file)\n # Generate Report\n response_letter = generate_document_file(request, 'qca_accreditation_letter', template_ctx, None)\n response_certificate = generate_document_file(request, 'qca_accreditation_certificate', template_ctx, application.certificate_qr_file)\n application.accreditation_letter_file.save('pdf', response_letter)\n application.accreditation_certificate_file.save('pdf', response_certificate)\n\n # Email\n to = [application.user.email]\n subject = \"Role Application Result - \" + application.get_application_type_display()\n attachments = []\n if application.application_type == 'trainer':\n attachments = [application.accreditation_letter_file]\n if application.application_type == 'qca':\n attachments = [application.accreditation_letter_file,application.accreditation_certificate_file]\n email_ctx = {\n 'application': application,\n }\n send_email_with_attachment(subject, to, email_ctx, 'email/role-application-accreditation.html', attachments)\n\n messages.info(request, 'Successfully approved the role application.')\n\n return render(request, \"dashboard/training/role_application_form.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'trainer', 'cidb_reviewer'])\ndef dashboard_training_list(request):\n mode = 'list'\n trainings = Training.objects.all()\n if request.user.role != 'superadmin' and request.user.role != 'cidb_reviewer':\n trainings = trainings.filter(trainer=request.user)\n\n context = {\n 'title': 'Manage Training',\n 'mode': mode,\n 'trainings': trainings,\n }\n return render(request, \"dashboard/training/manage_training.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'trainer'])\ndef dashboard_training_new(request):\n mode = 'create'\n training_form = TrainingCreateForm()\n context = {\n 'title': 'Add New Training',\n 'mode': mode,\n 'training_form': training_form,\n }\n if request.method == 'POST':\n training_form = TrainingCreateForm(request.POST)\n if training_form.is_valid():\n training = training_form.save()\n training.trainer = request.user\n training.created_by = request.user.name\n training.save()\n messages.info(request, 'Created successfully')\n return redirect('dashboard_training_update', training.id)\n else:\n messages.warning(request, 'Unable to create new training')\n return render(request, \"dashboard/training/manage_training.html\", context)\n\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'trainer'])\ndef dashboard_training_update(request, id):\n mode = 'update'\n training = get_object_or_404(Training, id=id)\n training_form = TrainingCreateForm(instance=training)\n coaches = Coach.objects.all().filter(training=training)\n coach_form = CoachCreateForm()\n context = {\n 'title': 'Update Training',\n 'mode': mode,\n 'training_form': training_form,\n 'coach_form': coach_form,\n 'training': training,\n 'coaches':coaches,\n }\n\n if request.method == 'POST':\n if 'update_training' in request.POST:\n training_form = TrainingCreateForm(request.POST, instance=training)\n if training_form.is_valid():\n form = training_form.save()\n form.modified_by = request.user.name\n form.save()\n messages.info(request, 'Updated successfully')\n else:\n messages.warning(request, 'Unable to update training')\n if 'create_coach' in request.POST:\n coach_form = CoachCreateForm(request.POST)\n if coach_form.is_valid():\n form = coach_form.save()\n form.training = training\n form.created_by = request.user.name\n form.save()\n messages.info(request, 'Added successfully')\n else:\n messages.warning(request, 'Unable to add new coach:'+coach_form.errors.as_text())\n return redirect('dashboard_training_update', training.id)\n return render(request, \"dashboard/training/manage_training.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'cidb_reviewer'])\ndef dashboard_training_review(request, id):\n mode = 'review'\n training = get_object_or_404(Training, id=id)\n training_form = TrainingCreateForm(instance=training)\n coaches = Coach.objects.all().filter(training=training)\n coach_form = CoachCreateForm()\n context = {\n 'title': 'Review Training',\n 'mode': mode,\n 'training_form': training_form,\n 'coach_form': coach_form,\n 'training': training,\n 'coaches': coaches,\n }\n\n if request.method == 'POST':\n if 'reject' in request.POST:\n training.review_status = 'rejected'\n training.save()\n messages.info(request, 'Rejected successfully')\n if 'accept' in request.POST:\n training.review_status = 'accepted'\n training.save()\n messages.info(request, 'Accept successfully')\n return redirect('dashboard_training_list')\n return render(request, \"dashboard/training/manage_training.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'trainer'])\ndef dashboard_training_coach_update(request, id):\n coach = get_object_or_404(Coach, id=id)\n training = coach.training\n coach_form = CoachCreateForm(instance=coach)\n context = {\n 'title': 'Manage Coach',\n 'training': training,\n 'coach_form': coach_form,\n }\n\n if request.method == 'POST':\n if 'update' in request.POST:\n coach_form = CoachCreateForm(request.POST, instance=coach)\n if coach_form.is_valid():\n form = coach_form.save()\n form.modified_by = request.user.name\n form.save()\n messages.info(request, 'Updated successfully')\n else:\n messages.warning(request, 'Unable to update coach information')\n if 'delete' in request.POST:\n training_id = coach.training.id\n coach.delete()\n messages.info(request, 'Delete successfully')\n return redirect('dashboard_training_update', training_id)\n return redirect('dashboard_training_coach_update', coach.id)\n return render(request, \"dashboard/training/coach_form.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'trainer', 'cidb_reviewer', 'applicant', 'trainee'])\ndef dashboard_available_training_list(request):\n mode = 'all'\n trainings = Training.objects.all().filter(review_status='accepted')\n registration_trainings = RegistrationTraining.objects.all().filter(user=request.user).order_by('-created_date')\n filtered_trainings = []\n for tr in trainings:\n tr_dict = {\n 'id': tr.id,\n 'training_name': tr.training_name,\n 'training_type': tr.training_type,\n 'fee': tr.fee,\n 'from_date': tr.from_date,\n 'to_date': tr.to_date,\n 'current_pax': tr.current_pax(),\n 'size': tr.size,\n 'review_status': tr.get_attendance_review_status_display,\n 'is_available': True,\n 'is_registered': False,\n 'is_end': False,\n }\n for rt in registration_trainings:\n if rt.training == tr:\n if rt.status==\"accepted\" or rt.status=='need_payment':\n tr_dict['is_available'] = False\n tr_dict['is_registered'] = True\n break \n if tr.attendance_review_status == 'approved':\n tr_dict['is_end'] = True\n else:\n if tr_dict['is_available'] == True:\n if tr.is_available() == False:\n tr_dict['is_available'] = False\n filtered_trainings.append(tr_dict)\n \n\n context = {\n 'title': 'Available Training List',\n 'mode': mode,\n 'trainings': filtered_trainings,\n }\n return render(request, \"dashboard/training/enroll_training.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'trainer', 'applicant', 'trainee'])\ndef dashboard_joined_training_list(request):\n mode = 'joined_training'\n trainings = RegistrationTraining.objects.all().filter(user=request.user)\n context = {\n 'title': 'Joined Training',\n 'mode': mode,\n 'trainings': trainings,\n 'payment_history_url':get_payment_history_url(request),\n }\n return render(request, \"dashboard/training/enroll_training.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'applicant', 'trainee'])\ndef dashboard_joined_training_pay(request, id):\n mode = 'payment'\n rt = get_object_or_404(RegistrationTraining, id=id)\n training = rt.training\n response = create_training_transaction(request, training.fee, 'YKSHEQ', 'YURAN KURSUS', rt.code_id, request.user)\n proforma = response.Code\n print(str(response))\n \n response_url = get_domain(request) + '/dashboard/training/joined/payment/'+id+'/response/'\n\n\n # Create Payment\n payment, created = Payment.objects.get_or_create(order_id=proforma)\n payment.user = request.user\n payment.customer_name = request.user.name\n payment.customer_email = request.user.email\n payment.rt = rt\n payment.currency = 'MYR'\n payment.payment_amount = response.Amount\n payment.save()\n\n context = {\n 'title': 'Payment - Joined Training',\n 'mode': mode,\n 'training': rt,\n 'amount': payment.payment_amount,\n 'proforma': proforma,\n 'response': response,\n 'url': payment_gateway_url,\n 'response_url': response_url,\n }\n return render(request, \"dashboard/training/enroll_training.html\", context)\n\n@csrf_exempt\ndef dashboard_joined_training_pay_response(request, id):\n mode = 'payment_response'\n payment = None\n rt = get_object_or_404(RegistrationTraining, id=id)\n if request.method == 'POST':\n payment = payment_response_process(request)\n if payment != None:\n rt.payment_status = payment.payment_status\n rt.save()\n if payment.payment_status == 1:\n rt.status = 'accepted'\n rt.save()\n elif payment.payment_status == 2:\n messages.info(request, payment.status_description)\n rt.status = 'pending'\n rt.save()\n else:\n messages.warning(request, payment.status_description)\n else:\n messages.warning(request, 'Problem with processing the transaction. Please contact with our staff to verify the transaction.')\n \n receipt_url = None\n if payment != None:\n receipt_url = get_receipt_url + payment.order_id\n\n context = {\n 'title': 'Payment Response - Joined Training',\n 'mode': mode,\n 'training': rt,\n 'receipt_url': receipt_url,\n 'payment': payment,\n }\n return render(request, \"dashboard/training/enroll_training.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'cidb_reviewer', 'trainer', 'applicant', 'trainee'])\ndef dashboard_training_participant(request, id):\n mode = 'participant'\n training = get_object_or_404(Training, id=id)\n available_seat, is_available = check_available_seat(request, training)\n participants = RegistrationTraining.objects.all().filter(training__id = id)\n context = {\n 'title': 'List of Participant',\n 'mode': mode,\n 'participants': participants,\n 'training': training,\n 'available_seat': available_seat,\n 'is_available': is_available,\n }\n return render(request, \"dashboard/training/enroll_training.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'cidb_reviewer'])\ndef dashboard_training_participant_review(request, id):\n mode = 'participant_review'\n rt = get_object_or_404(RegistrationTraining, id=id)\n training = rt.training\n form_review = RegistrationTrainingReviewForm()\n available_seat, is_available = check_available_seat(request, training)\n context = {\n 'title': 'Participant Review',\n 'mode': mode,\n 'rt': rt,\n 'training': training,\n 'available_seat': available_seat,\n 'is_available': is_available,\n 'form_review': form_review,\n }\n if request.method == 'POST':\n if 'accept' in request.POST:\n form_review = RegistrationTrainingReviewForm(request.POST, instance=rt)\n if form_review.is_valid():\n form = form_review.save()\n form.reviewed_by = request.user.name\n \n if form.payment_mode == 'on':\n form.status = 'need_payment'\n else:\n form.status = 'accepted'\n form.payment_status = -2\n form.save()\n messages.info(request, 'Accepted the participant successfully')\n\n # Email\n to = []\n to.append(rt.user.email)\n subject = \"Request to Join Training - \" + rt.code_id + \" (\" + training.training_name + \")\"\n ctx_email = {\n 'training':training,\n 'rt': rt,\n }\n send_email_default(subject, to, ctx_email, 'email/training-join-response.html')\n\n else:\n messages.warning(request, 'Unable to review the participant')\n if 'reject' in request.POST:\n form_review = RegistrationTrainingReviewForm(request.POST, instance=rt)\n if form_review.is_valid():\n form = form_review.save()\n form.reviewed_by = request.user.name\n form.status = 'rejected'\n form.save()\n messages.info(request, 'Rejected the participant successfully')\n\n # Email\n to = []\n to.append(rt.user.email)\n subject = \"Request to Join Training - \" + rt.code_id + \" (\" + training.training_name + \")\"\n ctx_email = {\n 'training':training,\n 'user': rt.user,\n }\n send_email_default(subject, to, ctx_email, 'email/training-join-response.html')\n else:\n messages.warning(request, 'Unable to review the participant')\n return redirect('dashboard_training_participant', training.id)\n return render(request, \"dashboard/training/enroll_training.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'trainee', 'applicant'])\ndef dashboard_training_join(request, id):\n training = get_object_or_404(Training, id=id)\n existing = RegistrationTraining.objects.all().filter(training=training, user=request.user).order_by('-created_date')\n if len(existing) > 0:\n current = existing[0]\n if current.status == 'pending':\n messages.warning(request, 'Unable to register the training. You already applied this training. Please wait for the approval.')\n return redirect('dashboard_available_training_list')\n elif current.status == 'accepted':\n messages.warning(request, 'Unable to register the training. You have been accepted to join this training.')\n return redirect('dashboard_available_training_list')\n elif current.status == 'need_payment':\n messages.warning(request, 'Unable to register the training. You already applied this training. Please proceed with the payment.')\n return redirect('dashboard_available_training_list')\n else:\n pass\n\n available_seat, is_available = check_available_seat(request, training)\n mode = 'register'\n # print(type(ORGANIZATION_TYPE_CHOICES))\n context = {\n 'title': 'Join Training',\n 'mode': mode,\n 'organization_type_choices': ORGANIZATION_TYPE_CHOICES,\n 'training': training,\n 'available_seat': available_seat,\n 'is_available': is_available,\n }\n\n if request.method == 'POST':\n rt = RegistrationTraining.objects.create(training=training, user=request.user)\n rt.participant_name = request.user.name\n rt.participant_icno = request.user.icno\n rt.participant_email = request.user.email\n rt.participant_hpno = request.user.hp_no\n rt.participant_organization = request.user.organization\n rt.participant_organization_type = request.POST['participant_organization_type']\n rt.participant_designation = request.POST['participant_designation']\n rt.save()\n\n # Email\n cidb_reviewers = CustomUser.objects.all().filter(\n Q(role='cidb_reviewer')|\n Q(role='superadmin')\n )\n to = []\n for reviewer in cidb_reviewers:\n to.append(reviewer.email)\n subject = \"Request to Join Training - \" + rt.code_id + \" (\" + training.training_name + \")\"\n ctx_email = {\n 'training':training,\n }\n send_email_default(subject, to, ctx_email, 'email/training-join-request.html')\n\n messages.warning(request, 'Successfully request to join the training. Please wait for approval from reviewer before proceeding with payment.')\n return redirect('dashboard_joined_training_list')\n\n return render(request, \"dashboard/training/enroll_training.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'trainer'])\ndef dashboard_training_attendance_trainer(request, id):\n training = get_object_or_404(Training, id=id)\n attendances = RegistrationTraining.objects.all().filter(training=training, status='accepted')\n attendance_sheet_form = AttendanceSheetUploadForm(instance=training)\n mode = 'trainer'\n context = {\n 'attendance_sheet_form': attendance_sheet_form,\n 'title': 'Mark Attendance List - ' + training.training_name,\n 'mode': mode,\n 'training': training,\n 'attendances': attendances,\n }\n if request.method == 'POST':\n if 'upload_attendance_sheet' in request.POST:\n attendance_sheet_form = AttendanceSheetUploadForm(request.POST, request.FILES, instance=training)\n attendance_sheet_form.save()\n messages.info(request, 'Successfully upload the attendance sheet.')\n elif 'approval' in request.POST:\n if training.attendance_sheet_file != '':\n training.attendance_review_status = 'need_approval'\n training.save()\n messages.info(request, 'Successfully send the attendance for approval.')\n else:\n messages.warning(request, 'You must upload the attendance sheet file before sending the attendance for approval.')\n elif 'generate_template' in request.POST:\n trainers = Coach.objects.all().filter(training=training)\n venue = training.address1 + ', ' + training.address2 + ', ' + training.postcode + ' ' + training.city + ', ' + training.state\n tmpl_ctx = {\n 'training_name': training.training_name,\n 'trainers': trainers,\n 'venue': venue,\n 'days': range(training.number_of_days()),\n 'from_date': translate_malay_date(standard_date(training.from_date)),\n 'to_date': translate_malay_date(standard_date(training.to_date)),\n 'from_time': training.from_time,\n 'to_time': training.to_time,\n 'participants': attendances,\n }\n response = generate_document(request, 'attendance_sheet', tmpl_ctx)\n return response \n else:\n participant_id = request.POST['id']\n rt = RegistrationTraining.objects.get(id=participant_id)\n if 'attend_full' in request.POST:\n rt.attendance_full = True\n rt.save()\n messages.info(request, 'Changed attendance as FULL')\n if 'attend_not_full' in request.POST:\n rt.attendance_full = False\n rt.save()\n messages.info(request, 'Changed attendance as NOT FULL')\n if 'mark' in request.POST:\n rt.marks = request.POST['marks']\n rt.save()\n messages.info(request, 'Changed marks')\n return redirect('dashboard_training_attendance_trainer', training.id)\n\n return render(request, \"dashboard/training/attendance.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'cidb_reviewer'])\ndef dashboard_training_attendance_review(request, id):\n training = get_object_or_404(Training, id=id)\n attendances = RegistrationTraining.objects.all().filter(training=training, status='accepted')\n mode = 'reviewer'\n context = {\n 'title': 'Review Attendance List - ' + training.training_name,\n 'mode': mode,\n 'training': training,\n 'attendances': attendances,\n }\n\n if request.method == 'POST':\n if 'generate' in request.POST:\n for attendance in attendances:\n if attendance.attendance_full == True:\n if training.cert_type == 'pass':\n if attendance.marks >= training.passing_mark:\n attendance.pass_status = True\n else:\n attendance.pass_status = False\n else:\n attendance.pass_status = True\n else:\n attendance.pass_status = False\n \n generate_pdf = False\n\n if attendance.pass_status == True:\n generate_pdf = True\n else:\n if training.cert_type == 'pass':\n generate_pdf = True\n \n if generate_pdf == True:\n pass_status = \"\"\n training_location = training.address1 + ' ' + training.address2 + ', ' + training.postcode + ' ' + training.city + ', ' + training.state\n # Check if PASS or FAIL\n if attendance.pass_status == True:\n pass_status = 'LULUS'\n else:\n pass_status = 'GAGAL'\n \n # Set training date display\n training_date = None\n if training.from_date == training.to_date:\n training_date = translate_malay_date(standard_date(training.from_date))\n else:\n training_date = translate_malay_date(standard_date(training.from_date)) + ' - ' + translate_malay_date(standard_date(training.to_date))\n \n template_ctx = {\n 'pass': pass_status,\n 'name': attendance.user.name,\n 'hp_no': attendance.user.hp_no,\n 'fax_no': attendance.user.fax_no,\n 'ic': attendance.user.icno,\n 'address1': attendance.user.address1,\n 'address2': attendance.user.address2,\n 'postcode': attendance.user.postcode,\n 'city': attendance.user.city,\n 'state': attendance.user.state,\n 'company': attendance.user.organization,\n 'location': training_location,\n 'date_now': translate_malay_date(standard_date(datetime.now())),\n 'date': training_date,\n }\n response = generate_training_document_file(request, training.training_type, template_ctx, None)\n attendance.certificate_file.save('pdf', response)\n\n attendance.save()\n messages.info(request, 'Successfully generated the document for participants.') \n if 'approve' in request.POST:\n training.attendance_review_status = 'approved'\n training.save()\n\n training_types = TrainingType.objects.all().filter(required_for_assessor=True)\n\n # Email\n for attendance in attendances:\n \n # Check Eligible for QIA\n qia_eligible = True\n all_trainings = RegistrationTraining.objects.all().filter(user=attendance.user)\n if len(all_trainings) > 1:\n for training_type in training_types:\n found = False\n for all_training in all_trainings:\n if training_type == all_training.training.training_type:\n found = True\n break\n if found == False:\n qia_eligible = False\n break\n else:\n qia_eligible = False\n\n if qia_eligible == True:\n user = attendance.user\n user.qia_status = 'need_review'\n user.save() \n\n # Email\n cidb_reviewers = CustomUser.objects.all().filter(\n Q(role='superadmin')|\n Q(role='cidb_reviewer')\n )\n to = []\n for cidb_reviewer in cidb_reviewers:\n to.append(cidb_reviewer.email)\n\n subject = \"Request for QIA Certificate - \" + attendance.code_id\n email_ctx = {\n 'attendance': attendance,\n }\n send_email_default(subject, to, email_ctx, 'email/training-qia-review.html')\n \n # Email\n to = [attendance.user.email]\n subject = \"Training Certificate - \" + training.training_name\n attachments = [attendance.certificate_file]\n email_ctx = {\n 'training': training,\n 'attendance': attendance,\n }\n send_email_with_attachment(subject, to, email_ctx, 'email/training-certificate.html', attachments)\n\n\n\n messages.info(request, 'Successfully approved the attendance.')\n return redirect('dashboard_training_attendance_review', training.id)\n\n return render(request, \"dashboard/training/attendance.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['trainee', 'applicant'])\ndef dashboard_training_feedback_list_trainee(request):\n feedbacks = Feedback.objects.all().filter(user=request.user)\n\n context = {\n 'role': 'trainee',\n 'feedbacks':feedbacks,\n }\n\n return render(request, \"dashboard/training/feedback_list.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'cidb_reviewer'])\ndef dashboard_training_feedback_list_staff(request):\n feedbacks = Feedback.objects.all()\n\n context = {\n 'role': 'staff',\n 'feedbacks':feedbacks,\n }\n\n return render(request, \"dashboard/training/feedback_list.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['trainee', 'applicant'])\ndef dashboard_training_feedback_application(request, id):\n training = get_object_or_404(Training, id=id)\n context = {\n 'training':training,\n }\n\n if request.method == 'POST':\n form = FeedbackCreateForm(request.POST)\n if form.is_valid():\n data = form.save()\n data.user = request.user\n data.training = training\n data.save()\n messages.info(request, 'Successfully send the feedback.')\n return redirect('dashboard_joined_training_list')\n else:\n messages.warning(request, 'Problem with sending the feedback.')\n return redirect('dashboard_training_feedback_application', training.id)\n\n return render(request, \"dashboard/training/feedback_application.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'cidb_reviewer'])\ndef dashboard_training_feedback_review(request, id):\n feedback = get_object_or_404(Feedback, id=id)\n trainers = Coach.objects.all().filter(training=feedback.training)\n print(len(trainers))\n mode = 'review'\n context = {\n 'mode': mode,\n 'feedback':feedback,\n 'trainers':trainers,\n }\n\n if request.method == 'POST':\n feedback.warning = request.POST['warning']\n feedback.warning_delivered = True\n feedback.warning_delivered_date = datetime.now()\n feedback.reviewer = request.user\n feedback.save()\n\n # Email\n to = []\n for trainer in trainers:\n to.append(trainer.email)\n subject = \"Complaint From Trainee - \" + feedback.training.training_name\n ctx_email = {\n 'feedback':feedback,\n }\n messages.info(request, 'Successfully delivered an email to trainer(s).')\n send_email_default(subject, to, ctx_email, 'email/training-complaint.html')\n \n return redirect('dashboard_training_feedback_review', feedback.id)\n\n return render(request, \"dashboard/training/feedback_application.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'cidb_reviewer', 'trainee', 'applicant', 'trainer', 'assessor'])\ndef dashboard_joined_training_certificate(request):\n trainings = RegistrationTraining.objects.all().filter(user=request.user, pass_status=True)\n assessor_all = Assessor.objects.all().filter(user=request.user)\n assessor = None\n if len(assessor_all) > 0:\n assessor = assessor_all[0]\n context = {\n 'title': 'Training Certificate List',\n 'mode': 'trainee',\n 'trainings':trainings,\n 'assessor': assessor,\n 'user': request.user,\n 'payment_history_url':get_payment_history_url(request),\n }\n\n return render(request, \"dashboard/training/certificate_list.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'cidb_reviewer'])\ndef dashboard_qia_application_list(request):\n users = CustomUser.objects.all().filter(qia_status='need_review')\n context = {\n 'title': 'QLASSIC Industry Assessor Candidates',\n 'mode': 'list',\n 'users':users,\n }\n return render(request, \"dashboard/training/qia_application.html\", context)\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'cidb_reviewer'])\ndef dashboard_qia_application_review(request, id):\n user = get_object_or_404(CustomUser, id=id, qia_status='need_review')\n trainings = RegistrationTraining.objects.all().filter(user=user, pass_status=True)\n\n context = {\n 'title': 'QIA Candidate Review',\n 'mode': 'review',\n 'trainings':trainings,\n 'user': user,\n }\n\n if request.method == 'POST':\n user.qia_status = 'need_payment'\n user.save()\n\n to = [user.email]\n subject = \"Payment for QLASSIC Industry Assessor Certificate\"\n email_ctx = {\n 'user': user,\n }\n send_email_default(subject, to, email_ctx, 'email/training-qia-payment.html')\n\n messages.info(request, 'Successfully notify the trainee to make payment.')\n return redirect('dashboard_qia_application_list')\n return render(request, \"dashboard/training/certificate_list.html\", context)\n\n\n@login_required(login_url=\"/login/\")\n@allowed_users(allowed_roles=['superadmin', 'trainer', 'assessor', 'trainee', 'applicant'])\ndef dashboard_qia_application_pay(request, id):\n mode = 'payment'\n user = get_object_or_404(CustomUser, id=id)\n response = create_transaction(request, 1, 'QLC-PUP', 'PENTAULIAHAN QIA','QIA-'+user.code_id, request.user)\n proforma = response.Code\n\n response_url = get_domain(request) + '/dashboard/training/qia/application/payment/'+id+'/response/'\n \n # Create Payment\n payment, created = Payment.objects.get_or_create(order_id=proforma)\n payment.user = request.user\n payment.customer_name = request.user.name\n payment.customer_email = request.user.email\n payment.currency = 'MYR'\n payment.payment_amount = response.Amount\n payment.save()\n\n context = {\n 'title': 'Payment - QLASSIC Industry Assessor Certificate',\n 'mode': mode,\n 'user': user,\n 'proforma': proforma,\n 'amount': response.Amount,\n 'response': response,\n 'url': payment_gateway_url,\n 'response_url': response_url,\n }\n return render(request, \"dashboard/training/qia_application.html\", context)\n\n@csrf_exempt\ndef dashboard_qia_application_pay_response(request, id):\n mode = 'payment_response'\n payment = None\n user = get_object_or_404(CustomUser, id=id)\n if request.method == 'POST':\n payment = payment_response_process(request)\n if payment != None:\n if payment.payment_status == 1:\n user.qia_status = 'accepted'\n user.save()\n assessor, created = Assessor.objects.get_or_create(user=user)\n assessor.assessor_type = 'QIA'\n assessor.save()\n # Generate QR\n qr_path = absoluteuri.build_absolute_uri('/certificate/qia/'+str(user.id)+'/')\n generate_and_save_qr(qr_path, assessor.qia_certificate_qr_file)\n\n # Cert Generation\n template_ctx = {\n 'name': user.name,\n 'ic': user.icno,\n 'assessor_number': assessor.qia_id,\n 'date_accreditation': translate_malay_date(standard_date(datetime.now())),\n }\n response = generate_document_file(request, 'qia_accreditation_certificate', template_ctx, assessor.qia_certificate_qr_file)\n assessor.qia_certificate_file.save('pdf', response)\n \n # Email\n to = [user.email]\n subject = \"QLASSIC Industry Application Successful\"\n attachments = [assessor.qia_certificate_file]\n email_ctx = {\n 'assessor': assessor,\n 'user': user,\n }\n send_email_with_attachment(subject, to, email_ctx, 'email/training-qia-accreditation.html', attachments)\n\n messages.info(request, 'You are successfully certified as QLASSIC Industry Assessor.')\n else:\n messages.warning(request, payment.status_description)\n else:\n messages.warning(request, 'Problem with processing the transaction. Please contact with our staff to verify the transaction.')\n \n receipt_url = None\n if payment != None:\n receipt_url = get_receipt_url + payment.order_id\n \n context = {\n 'title': 'Payment Response - QLASSIC Industry Assessor Certificate',\n 'mode': mode,\n 'user': user,\n 'receipt_url': receipt_url,\n 'payment': payment,\n }\n return render(request, \"dashboard/training/qia_application.html\", context)\n\n\n## AJAX\n@login_required(login_url=\"/login/\")\ndef ajax_api_training_payment_request(request):\n if request.method == 'POST':\n id = request.POST['id']\n \n rt = get_object_or_404(RegistrationTraining, id=id)\n training = rt.training\n response = create_training_transaction(request, training.fee, 'YKSHEQ', 'YURAN KURSUS QLASSIC', rt.code_id, request.user)\n # response = create_transaction(request, 1, 'QLC-PUP', 'YURAN KURSUS QLASSIC', rt.code_id, request.user)\n \n proforma = response.Code\n payment, created = Payment.objects.get_or_create(order_id=proforma)\n if created == False:\n if payment.payment_amount != response.Amount:\n cancel_proforma(proforma)\n response = create_training_transaction(request, training.fee, 'YKSHEQ', 'YURAN KURSUS QLASSIC', rt.code_id, request.user)\n proforma = response.Code\n payment, created = Payment.objects.get_or_create(order_id=proforma)\n\n # Create Payment\n payment.user = request.user\n payment.customer_name = request.user.name\n payment.customer_email = request.user.email\n payment.rt = rt\n payment.currency = 'MYR'\n payment.payment_amount = response.Amount\n payment.save()\n\n result = response.TransactionResult\n error = response.ErrorMessage\n print(str(response))\n response_url = get_domain(request) + '/dashboard/training/joined/payment/'+id+'/response/'\n postdata = {\n 'payment_gateway_url':payment_gateway_url,\n 'ClientReturnURL':response_url,\n 'IcOrRoc':request.user.code_id,\n 'OrderID':proforma,\n 'Currency':\"MYR\",\n 'TransactionType':\"SALE\",\n 'ClientRef0':\"\",\n 'ClientRef1':\"\",\n 'ClientRef2':\"\",\n 'ClientRef3':\"\",\n 'ClientRef4':\"\",\n 'Amount': payment.payment_amount,\n 'CustomerName':request.user.name,\n 'CustomerEmail':request.user.email,\n 'CustomerPhoneNo':request.user.hp_no,\n 'result':result,\n 'error':error,\n }\n\n return JsonResponse(postdata)\n else:\n return JsonResponse({})\n\n@login_required(login_url=\"/login/\")\ndef ajax_api_qia_payment_request(request):\n if request.method == 'POST':\n id = request.POST['id']\n \n user = get_object_or_404(CustomUser, id=id)\n response = create_transaction(request, 1, 'QLC-PUP', 'PENTAULIAHAN QIA','QIA-'+user.code_id, request.user)\n proforma = response.Code\n payment, created = Payment.objects.get_or_create(order_id=proforma)\n if created == False:\n if payment.payment_amount != response.Amount:\n cancel_proforma(proforma)\n response = create_transaction(request, 1, 'QLC-PUP', 'PENTAULIAHAN QIA','QIA-'+user.code_id, request.user)\n proforma = response.Code\n payment, created = Payment.objects.get_or_create(order_id=proforma)\n \n # Create Payment\n payment.user = request.user\n payment.customer_name = request.user.name\n payment.customer_email = request.user.email\n payment.currency = 'MYR'\n payment.payment_amount = response.Amount\n payment.save()\n\n result = response.TransactionResult\n error = response.ErrorMessage\n response_url = get_domain(request) + '/dashboard/training/qia/application/payment/'+id+'/response/'\n postdata = {\n 'payment_gateway_url':payment_gateway_url,\n 'ClientReturnURL':response_url,\n 'IcOrRoc':request.user.code_id,\n 'OrderID':proforma,\n 'Currency':\"MYR\",\n 'TransactionType':\"SALE\",\n 'ClientRef0':\"\",\n 'ClientRef1':\"\",\n 'ClientRef2':\"\",\n 'ClientRef3':\"\",\n 'ClientRef4':\"\",\n 'Amount': payment.payment_amount,\n 'CustomerName':request.user.name,\n 'CustomerEmail':request.user.email,\n 'CustomerPhoneNo':request.user.hp_no,\n 'result':result,\n 'error':error,\n }\n\n return JsonResponse(postdata)\n else:\n return JsonResponse({})","repo_name":"rehmat11872/classic-project","sub_path":"trainings/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":57436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"25432136551","text":"# encoding: utf-8\nimport datetime\nfrom decimal import Decimal\n\n# Django imports\nfrom django.http import Http404\nfrom django.test import TestCase\nfrom django.test.client import RequestFactory\nfrom django.core.urlresolvers import reverse\nfrom django.views.generic import View\n\n# App imports\nfrom citizenconnect.browser_testing import SeleniumTestCase\nfrom issues.models import Problem\n\nfrom ..models import FriendsAndFamilySurvey\nfrom ..views.organisations import ActiveOrganisationAwareViewMixin\n\nfrom .lib import (\n create_test_problem,\n create_test_service,\n create_test_organisation,\n create_test_organisation_parent,\n AuthorizationTestCase\n)\n\n\nclass OrganisationSummaryTests(AuthorizationTestCase):\n\n def setUp(self):\n super(OrganisationSummaryTests, self).setUp()\n\n self.service = create_test_service({'organisation': self.test_hospital})\n\n # Problems\n atts = {'organisation': self.test_hospital}\n atts.update({'category': 'cleanliness',\n 'happy_service': True,\n 'happy_outcome': None,\n 'time_to_acknowledge': 5100,\n 'time_to_address': 54300})\n self.cleanliness_problem = create_test_problem(atts)\n atts.update({'category': 'staff',\n 'happy_service': True,\n 'happy_outcome': True,\n 'time_to_acknowledge': None,\n 'time_to_address': None})\n self.staff_problem = create_test_problem(atts)\n atts.update({'category': 'other',\n 'service_id': self.service.id,\n 'happy_service': False,\n 'happy_outcome': True,\n 'time_to_acknowledge': 7100,\n 'time_to_address': None})\n self.other_dept_problem = create_test_problem(atts)\n atts.update({'category': 'access',\n 'service_id': None,\n 'happy_service': False,\n 'happy_outcome': False,\n 'time_to_acknowledge': 2100,\n 'time_to_address': 2300,\n 'status': Problem.ABUSIVE})\n self.hidden_status_access_problem = create_test_problem(atts)\n\n self.public_summary_url = reverse('public-org-summary', kwargs={'ods_code': self.test_hospital.ods_code,\n 'cobrand': 'choices'})\n self.private_summary_url = reverse('private-org-summary', kwargs={'ods_code': self.test_hospital.ods_code})\n self.urls = [self.public_summary_url, self.private_summary_url]\n\n def test_summary_page_exists(self):\n for url in self.urls:\n self.login_as(self.trust_user)\n resp = self.client.get(url)\n self.assertEqual(resp.status_code, 200)\n\n def test_raises_404_not_500(self):\n # Issue #878 - views inheriting from OrganisationAwareViewMixin\n # didn't catch Organisation.DoesNotExist and raise an Http404\n # so we got a 500 instead\n missing_url = reverse('public-org-summary', kwargs={'ods_code': 'missing',\n 'cobrand': 'choices'})\n resp = self.client.get(missing_url)\n self.assertEqual(resp.status_code, 404)\n\n def test_summary_page_shows_organisation_name(self):\n for url in self.urls:\n self.login_as(self.trust_user)\n resp = self.client.get(url)\n self.assertTrue(self.test_hospital.name in resp.content)\n\n def test_private_summary_page_shows_all_problems(self):\n self.login_as(self.trust_user)\n resp = self.client.get(self.private_summary_url)\n\n problems_by_status = resp.context['problems_by_status']\n self.assertEqual(problems_by_status[0]['all_time'], 3)\n self.assertEqual(problems_by_status[0]['week'], 3)\n self.assertEqual(problems_by_status[0]['four_weeks'], 3)\n self.assertEqual(problems_by_status[0]['six_months'], 3)\n self.assertEqual(problems_by_status[0]['description'], 'Open')\n\n self.assertEqual(problems_by_status[1]['all_time'], 0)\n self.assertEqual(problems_by_status[1]['week'], 0)\n self.assertEqual(problems_by_status[1]['four_weeks'], 0)\n self.assertEqual(problems_by_status[1]['six_months'], 0)\n self.assertEqual(problems_by_status[1]['description'], 'In Progress')\n\n self.assertEqual(problems_by_status[2]['all_time'], 0)\n self.assertEqual(problems_by_status[2]['week'], 0)\n self.assertEqual(problems_by_status[2]['four_weeks'], 0)\n self.assertEqual(problems_by_status[2]['six_months'], 0)\n self.assertEqual(problems_by_status[2]['description'], 'Closed')\n\n self.assertEqual(problems_by_status[6]['all_time'], 1)\n self.assertEqual(problems_by_status[6]['week'], 1)\n self.assertEqual(problems_by_status[6]['four_weeks'], 1)\n self.assertEqual(problems_by_status[6]['six_months'], 1)\n self.assertEqual(problems_by_status[6]['description'], 'Abusive/Vexatious')\n\n def test_public_summary_page_only_shows_visible_problems(self):\n self.login_as(self.trust_user)\n resp = self.client.get(self.public_summary_url)\n\n problems_by_status = resp.context['problems_by_status']\n self.assertEqual(problems_by_status[0]['all_time'], 3)\n self.assertEqual(problems_by_status[0]['week'], 3)\n self.assertEqual(problems_by_status[0]['four_weeks'], 3)\n self.assertEqual(problems_by_status[0]['six_months'], 3)\n self.assertEqual(problems_by_status[0]['description'], 'Open')\n\n self.assertEqual(problems_by_status[1]['all_time'], 0)\n self.assertEqual(problems_by_status[1]['week'], 0)\n self.assertEqual(problems_by_status[1]['four_weeks'], 0)\n self.assertEqual(problems_by_status[1]['six_months'], 0)\n self.assertEqual(problems_by_status[1]['description'], 'In Progress')\n\n self.assertEqual(problems_by_status[2]['all_time'], 0)\n self.assertEqual(problems_by_status[2]['week'], 0)\n self.assertEqual(problems_by_status[2]['four_weeks'], 0)\n self.assertEqual(problems_by_status[2]['six_months'], 0)\n self.assertEqual(problems_by_status[2]['description'], 'Closed')\n\n def test_summary_page_applies_problem_category_filter(self):\n for url in self.urls:\n self.login_as(self.trust_user)\n resp = self.client.get(url + '?category=cleanliness')\n\n problems_by_status = resp.context['problems_by_status']\n self.assertEqual(problems_by_status[0]['all_time'], 1)\n self.assertEqual(problems_by_status[0]['week'], 1)\n self.assertEqual(problems_by_status[0]['four_weeks'], 1)\n self.assertEqual(problems_by_status[0]['six_months'], 1)\n\n def test_summary_page_applies_department_filter(self):\n for url in self.urls:\n self.login_as(self.trust_user)\n resp = self.client.get(url + '?service_id=%s' % self.service.id)\n\n problems_by_status = resp.context['problems_by_status']\n self.assertEqual(problems_by_status[0]['all_time'], 1)\n self.assertEqual(problems_by_status[0]['week'], 1)\n self.assertEqual(problems_by_status[0]['four_weeks'], 1)\n self.assertEqual(problems_by_status[0]['six_months'], 1)\n\n def test_summary_page_applies_breach_filter_on_private_pages(self):\n # Add a breach problem\n create_test_problem({'organisation': self.test_hospital,\n 'breach': True})\n\n self.login_as(self.trust_user)\n resp = self.client.get(self.private_summary_url + '?flags=breach')\n\n problems_by_status = resp.context['problems_by_status']\n self.assertEqual(problems_by_status[0]['all_time'], 1)\n self.assertEqual(problems_by_status[0]['week'], 1)\n self.assertEqual(problems_by_status[0]['four_weeks'], 1)\n self.assertEqual(problems_by_status[0]['six_months'], 1)\n\n def test_summary_page_applies_formal_complaint_filter_on_private_pages(self):\n create_test_problem({'organisation': self.test_hospital,\n 'formal_complaint': True})\n\n self.login_as(self.trust_user)\n resp = self.client.get(self.private_summary_url + '?flags=formal_complaint')\n\n problems_by_status = resp.context['problems_by_status']\n self.assertEqual(problems_by_status[0]['all_time'], 1)\n self.assertEqual(problems_by_status[0]['week'], 1)\n self.assertEqual(problems_by_status[0]['four_weeks'], 1)\n self.assertEqual(problems_by_status[0]['six_months'], 1)\n\n def test_public_summary_page_does_not_have_breach_filter(self):\n resp = self.client.get(self.public_summary_url)\n self.assertNotContains(resp, '
    Begin', unsafe_allow_html=True)\r\n\r\nelse:\r\n # Agregar HTML y CSS personalizado para posicionar la imagen en la esquina superior derecha\r\n st.markdown(\r\n \"\"\"\r\n \r\n \"\"\"\r\n , unsafe_allow_html=True\r\n )\r\n\r\n # Mostrar la imagen dentro del contenedor personalizado\r\n with st.container():\r\n st.markdown('
    ', unsafe_allow_html=True)\r\n\r\n st.title(\"Dino Blocks\")\r\n\r\n # body\r\n st.header(\"Datos del nivel \" + str(nivel))\r\n\r\n #Columnas\r\n col1, col2, col3 = st.columns(3)\r\n with col1:\r\n st.markdown(\"

    Proyectiles

    \", unsafe_allow_html=True)\r\n # grafica\r\n x1 = obtenerTabla(int(nivel))['num_balas']\r\n \r\n hist_data = [x1]\r\n \r\n group_labels = ['Proyectiles']\r\n colors = ['#A56CC1']\r\n \r\n # Create distplot with curve_type set to 'normal'\r\n fig = px.histogram(hist_data, x=x1, color_discrete_sequence=colors)\r\n\r\n # Obtén el valor máximo de la frecuencia en el histograma\r\n max_frecuencia = x1.value_counts().max()\r\n\r\n # Obtén el valor del último registro\r\n ultimo_registro = x1.iloc[-1]\r\n\r\n # Agrega una línea vertical en la posición del último registro\r\n fig.add_shape(\r\n type='line',\r\n x0=ultimo_registro,\r\n x1=ultimo_registro,\r\n y0=0,\r\n y1=max_frecuencia, \r\n line=dict(color='red', width=2) # Puedes personalizar el color y el grosor de la línea.\r\n )\r\n\r\n st.plotly_chart(fig, use_container_width=True)\r\n\r\n if(int(nivel)-1 >= 1):\r\n st.link_button(\"Previous Level\", \"./?num_nivel=\" + str(int(nivel)-1))\r\n\r\n with col2:\r\n st.markdown(\"

    Ciclos

    \", unsafe_allow_html=True)\r\n # grafica\r\n x2 = obtenerTabla(int(nivel))['num_iteraciones']\r\n hist_data2 = [x2]\r\n group_labels2 = ['Ciclos']\r\n colors2 = ['#A6ACEC']\r\n # Create distplot with curve_type set to 'normal'\r\n fig2 = px.histogram(hist_data2, x=x2, color_discrete_sequence=colors2)\r\n\r\n # Obtén el valor máximo de la frecuencia en el histograma\r\n max_frecuencia = x2.value_counts().max()\r\n\r\n # Obtén el valor del último registro\r\n ultimo_registro = x2.iloc[-1]\r\n\r\n # Agrega una línea vertical en la posición del último registro\r\n fig2.add_shape(\r\n type='line',\r\n x0=ultimo_registro,\r\n x1=ultimo_registro,\r\n y0=0,\r\n y1=max_frecuencia, \r\n line=dict(color='red', width=2) # Puedes personalizar el color y el grosor de la línea.\r\n )\r\n st.plotly_chart(fig2, use_container_width=True)\r\n\r\n st.link_button(\"Launch Game\", \"https://danaesg.github.io/HACK2023/Dinosaurio/index.html\")\r\n \r\n with col3:\r\n st.markdown(\"

    Bloques

    \", unsafe_allow_html=True)\r\n # grafica\r\n x3 = obtenerTabla(int(nivel))['num_bloques']\r\n hist_data3 = [x3]\r\n group_labels3 = ['Bloques']\r\n colors3 = ['#63F5EF']\r\n\r\n # Create distplot with curve_type set to 'normal'\r\n fig3 = px.histogram(hist_data3, x=x3, color_discrete_sequence=colors3)\r\n\r\n # Obtén el valor máximo de la frecuencia en el histograma\r\n max_frecuencia = x3.value_counts().max()\r\n\r\n # Obtén el valor del último registro\r\n ultimo_registro = x3.iloc[-1]\r\n\r\n # Agrega una línea vertical en la posición del último registro\r\n fig3.add_shape(\r\n type='line',\r\n x0=ultimo_registro,\r\n x1=ultimo_registro,\r\n y0=0,\r\n y1=max_frecuencia, \r\n line=dict(color='red', width=2) # Puedes personalizar el color y el grosor de la línea.\r\n )\r\n\r\n st.plotly_chart(fig3, use_container_width=True)\r\n\r\n if(int(nivel)+1 <= 3):\r\n st.link_button(\"Next Level\", \"./?num_nivel=\" + str(int(nivel)+1))\r\n\r\n st.markdown(\"

    En este primer reto aprenderás del uso de bloques para modificar las propiedades de tu nave. Debes de ayudar a Rexington a destruir todos los meteoritos que se dirigen a su planeta. Utiliza el botón “Mostrar Code” para ver las herramientas que tienes a tu disposición, podrás usar Bloques Condicionales, Temporizadores, y modificadores de propiedades para controlar el comportamiento de tu nave.

    \", unsafe_allow_html=True)\r\n","repo_name":"DanaeSG/HACK2023","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9354,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"11550230350","text":"import sys\nimport signal\nfrom functools import wraps\nimport unittest\n\nclass TimeOut(BaseException):\n \"\"\"timeout expection\"\"\"\n pass\n\ndef timeout(seconds):\n \"\"\"timeout decorator\"\"\"\n def decorator(fn):\n if hasattr(signal, 'alarm'):\n @wraps(fn)\n def wrapped_f(*args, **kw):\n current_frame = sys._getframe()\n def alarm_handler(signal, frame):\n if frame is not current_frame:\n raise TimeOut('%s seconds' % seconds)\n prev_handler = signal.signal(signal.SIGALRM, alarm_handler)\n try:\n signal.alarm(seconds)\n return fn(*args, **kw)\n finally:\n signal.alarm(0)\n signal.signal(signal.SIGALRM, prev_handler)\n return wrapped_f\n else:\n return fn\n return decorator\n\n## get reference of wrapped_f code object\n__TIMEOUT_CODE = timeout(1)(lambda: None).__code__\ndef hastimeout(func):\n \"\"\"Return True if this func had added timeout decorator\"\"\"\n if hasattr(func, \"__code__\") and \\\n getattr(func, \"__code__\") is __TIMEOUT_CODE:\n return True\n return False\n\ndef set_timeout(testsuite, seconds=None):\n \"\"\"\n add timout to test case if it didn't have one,\n @param testsuite testsuite form loader()\n @param seconds: timeout seconds\n @return: updated testsuite\n \"\"\"\n def _testset(testsuite):\n \"\"\"interate tcs in testsuite\"\"\"\n for each in testsuite:\n if not isinstance(each, unittest.BaseTestSuite):\n yield each\n else:\n for each2 in _testset(each):\n yield each2\n\n if seconds:\n for tc in _testset(testsuite):\n assert hasattr(tc, \"_testMethodName\"), \\\n \"%s is not an unittest.TestCase object\"\n testMethod = getattr(tc, tc._testMethodName)\n test_func = testMethod.im_func\n if not hastimeout(test_func):\n tc.run = timeout(seconds)(tc.run)\n return testsuite\n","repo_name":"ostroproject/ostro-os","sub_path":"meta-iotqa/lib/baserunner/util/timeout.py","file_name":"timeout.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"89"} +{"seq_id":"30526275753","text":"import json\nimport argparse\nimport torch.nn as nn\nimport torch\nimport logging\nfrom shutil import rmtree\nfrom modules.data_funcs import dataloader\nfrom modules.utils import model_init, sched_init, generate_id, gpu_init\nfrom modules.training import train_loop\n\n\ndef train_dlc():\n \"\"\"\n Trains a deep learning model to classify thyroid histopathology images as papillary thyroid carcinoma-like\n (PTC-like) or not. Evaluates the model against a given test set.\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_dir', help='Directory containing input image dataset', required=True)\n parser.add_argument('--labels', help='File name for labels json file', required=True)\n parser.add_argument('--out_dir', help='Output directory for results', required=True)\n parser.add_argument('--split_file', help='Data split json file', required=True)\n parser.add_argument('--run_id', help='Unique id for training run', required=True)\n parser.add_argument('--gan_params', help='Parameters for GAN-Generation', default=None)\n parser.add_argument('--lr', help='Learning rate', default=0.001, type=float)\n parser.add_argument('--epochs', help='No. epochs to train model', default=500, type=int)\n parser.add_argument('--batch_size', help='Data batch size within epochs', default=8, type=int)\n parser.add_argument('--workers', help='No. workers', default=8, type=int)\n parser.add_argument('--crop_size', help='Size of cropped images', default=512, type=int)\n parser.add_argument('--model_type', help='Pre-trained model to load', default='resnet101', type=str)\n parser.add_argument('--lrd_epc', help='Reduce learning rate after x epochs without improvement',\n default=10, type=int)\n parser.add_argument('--lrd_fac', help='Factor that learning rate is decayed by',\n default=0.5, type=float)\n parser.add_argument('--es_pat', help='Epochs to wait for an improvement in val_loss before stopping',\n default=50, type=int)\n opt = parser.parse_args()\n\n # ----------------\n # Initialization\n # ----------------\n model = model_init(opt.model_type) # Pre-trained model\n criterion = nn.CrossEntropyLoss() # Loss function\n optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, weight_decay=1e-6) # Model optimiser\n lr_scheduler = sched_init(optimizer, opt.lrd_epc, opt.lrd_fac) # Lr decay scheduler\n data_labels = json.load(open(opt.labels)) # Load data labels\n run_dir = generate_id(opt.out_dir, opt.run_id) # Create a unique dir for output\n log_path = \"%s/train_log.txt\" % run_dir # Save logs to run_dir\n logging.basicConfig(filename=log_path, level=logging.INFO) # Initialize logging\n logging.info(opt) # Record params in log file\n\n # Assign model to GPU if present:\n cuda = True if torch.cuda.is_available() else False\n if cuda:\n model, criterion, device = gpu_init(model, criterion)\n else:\n device = \"cpu\"\n\n # ----------------\n # Model Training\n # ----------------\n logging.info(\"[------ Beginning Model Training -------]\")\n\n # Initialise data loaders:\n train_loader, gan_dir = dataloader(opt.src_dir, data_labels['labels'], opt.split_file, 'train',\n opt.crop_size, opt.batch_size, opt.workers, opt.gan_params, opt.run_id)\n val_loader, _ = dataloader(opt.src_dir, data_labels['labels'], opt.split_file, 'val',\n opt.crop_size, opt.batch_size, opt.workers, None, opt.run_id)\n\n # Run Model training loop:\n train_loop(model, train_loader, val_loader, device, optimizer, criterion, lr_scheduler,\n opt.epochs, opt.es_pat, run_dir)\n\n # Delete any GAN-generated images if created:\n if opt.gan_params is not None:\n logging.info(\"Removing GAN Image Directory...\")\n rmtree(gan_dir, ignore_errors=True)\n\n\nif __name__ == \"__main__\":\n train_dlc()\n","repo_name":"williamdee1/ThyCa-GAN","sub_path":"dlc_main.py","file_name":"dlc_main.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"18078526707","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport numpy as np\nimport json\n\ndef get_top(word_list, model, w, top_n):\n i = word_list.index(w)\n dot_product = np.dot(model, model[i].reshape(-1))\n norm = np.linalg.norm(model, axis=1)\n result = dot_product / (norm*norm[i])\n\n return [(word_list[i], result[i]) for i in result.argsort()[::-1][:top_n + 1]]\n\ndef get_calculated_top(word_list, model, w1, w2, w3, top_n):\n wid1, wid2, wid3 = word_list.index(w1), word_list.index(w2), word_list.index(w3)\n v1, v2, v3 = model[wid1], model[wid2], model[wid3]\n vec = v1 + (v2 - v3)\n\n dot_product = np.dot(model, vec.reshape(-1))\n vec_norm = np.linalg.norm(vec)\n norm = np.linalg.norm(model, axis=1)\n result = dot_product / (norm*vec_norm)\n\n print('{} + {} - {}'.format(w1, w2, w3))\n return [(word_list[i], result[i]) for i in result.argsort()[::-1][:top_n + 2] if i not in [wid1, wid2, wid3]]\n\nif __name__ == '__main__':\n\n # with open(\"./model/w2v_tensorflow.json\", \"r\") as f:\n with open(\"./model/w2v.json\", \"r\") as f:\n model = np.array(json.loads(\"\".join(f.readlines())))\n\n with open(\"./data/word_list.json\", \"r\") as f:\n word_list = list(json.loads(\"\".join(f.readlines())))\n\n print('-----')\n for i, v in get_top(word_list, model, u\"井\", 5):\n print(i, v)\n\n print('-----')\n for i, v in get_top(word_list, model, u\"雲\", 5):\n print(i, v)\n\n print('-----')\n for i, v in get_top(word_list, model, u\"峰\", 5):\n print(i, v)\n\n print('-----')\n for i, v in get_top(word_list, model, u\"風\", 5):\n print(i, v)\n\n print('-----')\n for i, v in get_top(word_list, model, u\"母\", 5):\n print(i, v)\n\n print('-----')\n for i, v in get_top(word_list, model, u\"女\", 5):\n print(i, v)\n\n print('-----')\n for i, v in get_calculated_top(word_list, model, u\"女\", u\"父\", u\"男\", 5):\n print(i, v)\n","repo_name":"ctjoy/word2vec-tutorial","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"89"} +{"seq_id":"39448510703","text":"'''\nWrite a program in Python to find prime factors of a given integer?\n\n'''\n\ndef prime_factors(n):\n \"\"\"\n Function to find the prime factors of a given integer n.\n \"\"\"\n factors = [] # list to store the prime factors\n\n # divide the number by 2 as many times as possible\n while n % 2 == 0:\n factors.append(2)\n n //= 2\n\n # check odd numbers up to the square root of n\n for i in range(3, int(n ** 0.5) + 1, 2):\n while n % i == 0:\n factors.append(i)\n n //= i\n\n # if n is still greater than 2, it must be a prime number\n if n > 2:\n factors.append(n)\n\n return factors\n\n\nnum = int(input(\"Enter a number: \")) # take input from user\n\n# find and print the prime factors of the number\nprint(\"The prime factors of\", num, \"are:\", prime_factors(num))\n\n\n\n","repo_name":"meenun12/edube","sub_path":"practice/integer/prime_factors_two_numbers.py","file_name":"prime_factors_two_numbers.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"22809263015","text":"from sourpea.primitives import *\nfrom sourpea.util import *\nimport pandas as pd\nimport os\n_dir=os.path.dirname(__file__)\n### START\nkgx = Factor(\"kgx\", [\"rxrgv\", \"xdqhc\"])\nujbe = Factor(\"ujbe\", [\"jaehg\", \"gmnopl\"])\ncbiwgn = Factor(\"cbiwgn\", [\"rxrgv\", \"xdqhc\"])\ntqlnk = Factor(\"tqlnk\", [\"jaehg\", \"gmnopl\"])\npdhel = Factor(\"pdhel\", [\"hpziox\", \"wisqo\"])\nsfe = Factor(\"sfe\", [\"nep\", \"oqcus\"])\ndef lgu (pdhel, ujbe):\n return pdhel == ujbe\ndef mxuj (pdhel, ujbe):\n return not lgu(pdhel, ujbe)\nlatl = Factor(\"latl\", [DerivedLevel(\"dgcdsk\", WithinTrial(lgu, [pdhel, ujbe])), DerivedLevel(\"zxrpht\", WithinTrial(mxuj, [pdhel, ujbe]))])\ndesign=[latl,kgx,ujbe,cbiwgn,tqlnk,pdhel,sfe]\nconstraints=[AtMostKInARow(2, sfe),AtLeastKInARow(2, ujbe)]\ncrossing=[cbiwgn,latl]\nsequence_code_1=trials_from_csv(os.path.join(_dir,\"out_code_1/6_1_2_0.csv\"))\nsequence_code_2=trials_from_csv(os.path.join(_dir,\"out_code_2/6_1_2_0.csv\"))\nnr_regular=6\nnr_derived=1\nnr_constraints=2\nblock = Block(design,crossing,constraints)\ntest_1 = block.test(sequence_code_1)\ntest_2 = block.test(sequence_code_2)\ndf=pd.read_csv(os.path.join(_dir,\"result_sour.csv\"))\ndf.loc[len(df.index)] = [test_1[\"pValue\"], test_2[\"pValue\"], test_1[\"levels\"], test_2[\"levels\"], test_1[\"constraints\"], test_2[\"constraints\"],nr_regular, nr_derived, nr_constraints]\ndf.to_csv(os.path.join(_dir,\"result_sour.csv\"), index=False)\n","repo_name":"DoubleBlindCogSci/autoos-evaluation","sub_path":"validation/out_data/full_experiment/source/cycle_3/6_1_2_sour.py","file_name":"6_1_2_sour.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"37103602904","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\n\nmnist = input_data.read_data_sets(\"MNIST_data/\",one_hot=True)\nin_units=784\nh1_units=300\nx=tf.placeholder(tf.float32,[None,in_units])\nw1=tf.Variable(tf.truncated_normal([in_units,h1_units],stddev=0.1))\nb1=tf.Variable(tf.zeros([h1_units]))\nh1=tf.nn.relu(tf.matmul(x,w1)+b1)\nw2=tf.Variable(tf.zeros([h1_units,10]))\nb2=tf.Variable(tf.zeros([10]))\ny=tf.nn.softmax(tf.matmul(h1,w2)+b2)\n\ny_=tf.placeholder(tf.float32,[None,10])\ncross_entropy=tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y),reduction_indices=[1]))\n\ntrain_step=tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\ninit=tf.global_variables_initializer()\n\nsess = tf.Session()\nsess.run(init)\n\ncorrect_prediction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))\naccuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n\nfor i in range(10000):\n batch_xs,batch_ys=mnist.train.next_batch(100)\n sess.run(train_step,feed_dict={x:batch_xs,y_:batch_ys})\n if i%100==0:\n print(sess.run(accuracy,feed_dict={x:batch_xs,y_:batch_ys}))\nprint(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})) #打印测试信息\nsess.close()\n\n","repo_name":"chrisqian1/MNIST-recognition","sub_path":"mist-qc.py","file_name":"mist-qc.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"74243762209","text":"'''\ndecoder block支持堆叠, 每个block都输入emb序列并输出emb序列(1:1对应)\n'''\nfrom torch import nn \nimport torch \nfrom multihead_attn import MultiHeadAttention\nfrom emb import EmbeddingWithPosition\nfrom dataset import de_preprocess,en_preprocess,train_dataset,de_vocab,PAD_IDX,en_vocab\nfrom encoder import Encoder\nfrom config import DEVICE\n\nclass DecoderBlock(nn.Module):\n def __init__(self,emb_size,q_k_size,v_size,f_size,head):\n super().__init__()\n\n # 第1个多头注意力\n self.first_multihead_attn=MultiHeadAttention(emb_size,q_k_size,v_size,head) \n self.z_linear1=nn.Linear(head*v_size,emb_size) \n self.addnorm1=nn.LayerNorm(emb_size)\n\n # 第2个多头注意力\n self.second_multihead_attn=MultiHeadAttention(emb_size,q_k_size,v_size,head) \n self.z_linear2=nn.Linear(head*v_size,emb_size) \n self.addnorm2=nn.LayerNorm(emb_size)\n\n # feed-forward结构\n self.feedforward=nn.Sequential(\n nn.Linear(emb_size,f_size),\n nn.ReLU(),\n nn.Linear(f_size,emb_size)\n )\n self.addnorm3=nn.LayerNorm(emb_size)\n\n def forward(self,x,encoder_z,first_attn_mask,second_attn_mask): # x: (batch_size,seq_len,emb_size)\n # 第1个多头\n z=self.first_multihead_attn(x,x,first_attn_mask) # z: (batch_size,seq_len,head*v_size) , first_attn_mask用于遮盖decoder序列的pad部分,以及避免decoder Q到每个词后面的词\n z=self.z_linear1(z) # z: (batch_size,seq_len,emb_size)\n output1=self.addnorm1(z+x) # x: (batch_size,seq_len,emb_size)\n \n # 第2个多头\n z=self.second_multihead_attn(output1,encoder_z,second_attn_mask) # z: (batch_size,seq_len,head*v_size) , second_attn_mask用于遮盖encoder序列的pad部分,避免decoder Q到它们\n z=self.z_linear2(z) # z: (batch_size,seq_len,emb_size)\n output2=self.addnorm2(z+output1) # x: (batch_size,seq_len,emb_size)\n\n # 最后feedforward\n z=self.feedforward(output2) # z: (batch_size,seq_len,emb_size)\n return self.addnorm3(z+output2) # (batch_size,seq_len,emb_size)\n\nif __name__=='__main__':\n # 取2个de句子转词ID序列,输入给encoder\n de_tokens1,de_ids1=de_preprocess(train_dataset[0][0]) \n de_tokens2,de_ids2=de_preprocess(train_dataset[1][0]) \n # 对应2个en句子转词ID序列,再做embedding,输入给decoder\n en_tokens1,en_ids1=en_preprocess(train_dataset[0][1]) \n en_tokens2,en_ids2=en_preprocess(train_dataset[1][1])\n\n # de句子组成batch并padding对齐\n if len(de_ids1)len(de_ids2):\n de_ids2.extend([PAD_IDX]*(len(de_ids1)-len(de_ids2)))\n \n enc_x_batch=torch.tensor([de_ids1,de_ids2],dtype=torch.long).to(DEVICE)\n print('enc_x_batch batch:', enc_x_batch.size())\n\n # en句子组成batch并padding对齐\n if len(en_ids1)len(en_ids2):\n en_ids2.extend([PAD_IDX]*(len(en_ids1)-len(en_ids2)))\n \n dec_x_batch=torch.tensor([en_ids1,en_ids2],dtype=torch.long).to(DEVICE)\n print('dec_x_batch batch:', dec_x_batch.size())\n\n # Encoder编码,输出每个词的编码向量\n enc=Encoder(vocab_size=len(de_vocab),emb_size=128,q_k_size=256,v_size=512,f_size=512,head=8,nblocks=3).to(DEVICE)\n enc_outputs=enc(enc_x_batch)\n print('encoder outputs:', enc_outputs.size())\n\n # 生成decoder所需的掩码\n first_attn_mask=(dec_x_batch==PAD_IDX).unsqueeze(1).expand(dec_x_batch.size()[0],dec_x_batch.size()[1],dec_x_batch.size()[1]) # 目标序列的pad掩码\n first_attn_mask=first_attn_mask|torch.triu(torch.ones(dec_x_batch.size()[1],dec_x_batch.size()[1]),diagonal=1).bool().unsqueeze(0).expand(dec_x_batch.size()[0],-1,-1).to(DEVICE) # &目标序列的向后看掩码\n print('first_attn_mask:',first_attn_mask.size())\n # 根据来源序列的pad掩码,遮盖decoder每个Q对encoder输出K的注意力\n second_attn_mask=(enc_x_batch==PAD_IDX).unsqueeze(1).expand(enc_x_batch.size()[0],dec_x_batch.size()[1],enc_x_batch.size()[1]) # (batch_size,target_len,src_len)\n print('second_attn_mask:',second_attn_mask.size())\n\n first_attn_mask=first_attn_mask.to(DEVICE)\n second_attn_mask=second_attn_mask.to(DEVICE)\n\n # Decoder输入做emb先\n emb=EmbeddingWithPosition(len(en_vocab),128).to(DEVICE)\n dec_x_emb_batch=emb(dec_x_batch)\n print('dec_x_emb_batch:',dec_x_emb_batch.size())\n\n # 5个Decoder block堆叠\n decoder_blocks=[]\n for i in range(5):\n decoder_blocks.append(DecoderBlock(emb_size=128,q_k_size=256,v_size=512,f_size=512,head=8).to(DEVICE))\n\n for i in range(5):\n dec_x_emb_batch=decoder_blocks[i](dec_x_emb_batch,enc_outputs,first_attn_mask,second_attn_mask)\n print('decoder_outputs:',dec_x_emb_batch.size())","repo_name":"owenliang/pytorch-transformer","sub_path":"decoder_block.py","file_name":"decoder_block.py","file_ext":"py","file_size_in_byte":4941,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"89"} +{"seq_id":"7723584135","text":"import math\n\nM, N = map(int, input().split())\n\nfor i in range(M, N+1):\n tmp = 0\n if i > 1:\n for j in range(2, int(math.sqrt(i)) + 1):\n if i % j == 0:\n tmp = -1\n break\n if tmp == 0:\n print(i)","repo_name":"Jday4612/Baekjoon","sub_path":"Python/기본 수학2/소수 구하기.py3","file_name":"소수 구하기.py3","file_ext":"py3","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"71074440931","text":"import pygame\nfrom pygame.constants import *\nimport src.constants as const\n\npygame.font.init()\n\nclass PlainText:\n def __init__(self, text, font, size, pos=None):\n \"\"\"\n text : string - contents\n font : string - path to font file\n size : integer\n pos : (x, y) - coordinates of the center *optional*\n \"\"\"\n self.text = text\n self.font = font\n self.size = size\n font = pygame.font.Font(self.font, self.size)\n self.surface = font.render(self.text, True, const.WHITE)\n self.rect = self.surface.get_rect()\n if pos is not None:\n self.position_center(pos)\n\n def position_center(self, pos):\n self.rect.center = pos\n\n def recenter(self, width, height):\n ratio_w, ratio_h = width / const.WIDTH, height / const.HEIGHT\n self.size = int(self.size * ratio_w)\n font = pygame.font.Font(self.font, self.size)\n self.surface = font.render(self.text, True, const.WHITE)\n old_rect = self.rect\n self.rect = self.surface.get_rect()\n self.position_center((old_rect.center[0] * ratio_w, old_rect.center[1] * ratio_h))\n \n def set_text(self, text, size=None):\n if size:\n self.size = size\n font = pygame.font.Font(self.font, self.size)\n self.text = text\n self.surface = font.render(self.text, True, const.WHITE)\n old_rect = self.rect\n self.rect = self.surface.get_rect()\n self.position_center((old_rect.center[0], old_rect.center[1]))\n \n def draw(self, screen):\n screen.blit(self.surface, self.rect)\n \n def move(self, new_x, new_y):\n self.position_center((new_x, new_y))\n \n def out_of_bounds(self):\n return self.rect.center[0] <= 0 or self.rect.center[1] <= 0 or self.rect.center[0] >= const.WIDTH or self.rect.center[1] >= const.HEIGHT\n\n\nclass Button(PlainText):\n def __init__(self, text, font, size, flag, func=None, params=None, pos=None):\n \"\"\"\n text : string - contents\n font : string - path to font file\n size : int\n flag : constant - used for game state switching\n func : function - called when button is pressed *optional*\n params : tuple of any length and any type - is passed into func when button is pressed *optional*\n pos : tuple (int/float, int/float) - coordinates of the center *optional*\n \"\"\"\n super().__init__(text, font, size, pos)\n self.flag = flag\n self.func = func\n self.params = params\n self.selected = False\n\n def trigger_bttn(self):\n return (self.flag, self.func(*self.params) if self.func is not None else None)\n \n def draw(self, screen):\n if self.selected:\n pygame.draw.circle(screen, const.WHITE, (self.rect.left - 30, self.rect.center[1]), const.WIDTH * 0.008)\n screen.blit(self.surface, self.rect)","repo_name":"Costinteo/the-open-scrolls","sub_path":"src/uiobj.py","file_name":"uiobj.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"32281791188","text":"a = 1\nb = 1\nc = a+b\ncount = 3\nwhile True:\n # a, b, c = Fx, Fx+1, Fx+2\n a = b+c\n b = a+c\n c = a+b\n count += 3 # count countains index of c\n if len(str(c)) == 1000:\n if len(str(b)) == 1000:\n if len(str(a)) == 1000:\n print(count-2)\n else:\n print(count-1)\n else:\n print(count)\n break\n","repo_name":"93Pd9s8Jt/Project-Euler","sub_path":"25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"20401161481","text":"no1=float(input(\"Enter First Number: \"))\nno2=float(input(\"Enter Second Number: \"))\nno3=float(input(\"Enter Third Number: \"))\n\nmax=no1\nif no2>max:\n max=no2\nif no3>max:\n max=no3\nprint(\"Maximum of 3 Number is \",max)\n\nif(no1>no2 and no1>no3):\n alertText=\"A (\"+no1+\") is Maximum\"\nelif(no2>no3 and no2>no1):\n alertText=\"B (\" +no2+ \") is Maximum\"\nelif(no3>no1 and no3>no2):\n alertText=\"C (\"+no3+\") is Maximum\"\nelif(no1==no2 and no1==no3):\n alertText=\"All (\"+no1+\") are Equal\"\nelif(no1==no2):\n if(no1>no3):\n alertText=\"A & B (\"+no1+\") are Maximum\"\n else:\n alertText=\"C (\"+no3+\") is Maximum\"\nelif(no1==no3):\n if(no1>no2):\n alertText=\"A & C (\"+no1+\") are Maximum\"\n else:\n alertText=\"B (\"+no2+\") is Maximum\"\t\t\t\t\t\nelif(no2==no3):\n if(no2>no1):\n alertText=\"B & C (\"+no2+\") are Maximum\"\n else:\n alertText=\"A (\"+no1+\") is Maximum\"\nprint(alertText)","repo_name":"KushalKatta/MscCA","sub_path":"Python/MaxOfThree.py","file_name":"MaxOfThree.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"72633356449","text":"from django.utils import timezone\n\nfrom django_tenants.test.cases import TenantTestCase\n\nfrom quest_manager.forms import QuestForm\n\n\nclass QuestFormTest(TenantTestCase):\n\n def setUp(self):\n self.minimal_valid_data = {\n \"name\": \"Test Quest\",\n \"xp\": 0,\n \"max_repeats\": 0,\n \"max_xp\": -1,\n \"hours_between_repeats\": 0,\n \"sort_order\": 0,\n \"date_available\": str(timezone.now().date()),\n \"time_available\": \"0:00:00\",\n \"tags\": \"\",\n }\n\n def test_minimal_valid_data(self):\n \"\"\"The minimal_valid_data provided in the setup method should be valid!\"\"\"\n form = QuestForm(data=self.minimal_valid_data)\n self.assertTrue(form.is_valid())\n\n def test_hideable_blocking_both_true(self):\n \"\"\"If a quest is Blocking then it should not validate if it is also Hideable\"\"\"\n form_data = self.minimal_valid_data\n\n form_data[\"hideable\"] = True\n form_data[\"blocking\"] = True\n\n form = QuestForm(data=form_data)\n self.assertFalse(form.is_valid())\n self.assertIn(\"Blocking quests cannot be Hideable.\", form.errors['__all__'][0])\n","repo_name":"bytedeck/bytedeck","sub_path":"src/quest_manager/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"89"} +{"seq_id":"38475814940","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport random\nimport string\n\nletters = string.ascii_lowercase\nrandom_string = ''.join(random.choice(letters) for i in range(7))\n\n#print(random_string)\n\n# Ask for file name\nfilename = input(\"Enter file name: \")\n\n# Define roulette sequence\nroulette_sequence = [0, 32, 15, 19, 4, 21, 2, 25, 17, 34, 6, 27, 13, 36, 11, 30, 8, 23, 10, 5, 24, 16, 33, 1, 20, 14,\n 31, 9, 22, 18, 29, 7, 28, 12, 35, 3, 26] # np.arange(0, 37)\n\n# Ask for current roulette number\ndef enter_number():\n while True:\n current_roulette = int(input(\"Enter current roulette number: \"))\n if not(current_roulette>=0 and current_roulette<=36):\n print('Invalid values. Acceptable values 0,1,2,3,..., 34,35,36')\n else:\n break\n return current_roulette\n\nprevious_roulette = enter_number()\n\n# Initialize arrays\ndifference = []\ndirection = []\nbounce = []\nbnc='n'\n# Loop to ask for input\nwhile True:\n # Ask for direction, roulette number and bounce\n print('======================== New Round? ======================')\n while True:\n dir = input(\"Enter direction or exit (l/r/exit): \")\n if dir != 'l' and dir!= 'r' and dir!='exit':\n print('Invalid values. Acceptable values are l,r,exit.')\n else:\n break\n if dir == \"exit\":\n break\n num = enter_number()\n #bnc = input(\"Did the ball bounce? (y/n): \")\n\n # Print entered values and ask for confirmation\n print(f\"Direction: {dir}, Number: {num}, Bounced: {bnc}\")\n confirm = input(\"Press any key if this is correct. If there was an error press z \")\n\n if confirm == \"z\":\n print('Error noted, you can try again.')\n continue\n\n # Append values to arrays\n direction.append(dir)\n bounce.append(bnc)\n\n # Calculate slot difference and append to difference array\n p1 = roulette_sequence.index(previous_roulette)\n p2 = roulette_sequence.index(num)\n slot_diff = p2 - p1\n if slot_diff < 0:\n slot_diff = slot_diff + 37\n print(f\"Position difference: {slot_diff}\")\n difference.append(slot_diff)\n previous_roulette = num\n\n# Convert bounce array to boolean\nbounce = np.array(bounce) == \"y\"\n\n\ndf = pd.DataFrame({'difference': difference, 'bounce': bounce, 'direction': direction})\ndf.to_csv(f'{filename}_{random_string}.csv')\n# Group by the categories and calculate the statistics\ncategories = [('True', 'l'), ('True', 'r'), ('False', 'r'), ('False', 'l')]\nfor b, d in categories:\n group = df[(df['bounce'] == (b == 'True')) & (df['direction'] == d)]\n if not group.empty:\n avg = group['difference'].mean()\n std = group['difference'].std()\n group.hist(column='difference', bins=np.arange(-1,37)+0.5, ec=\"k\")\n print(group)\n print(f\"Category ({b}, {d}): Avg={avg:.2f}, Std={std:.2f}\")\n plt.show()\n","repo_name":"ioageotolias/roulette-pattern-recogniser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"5582928035","text":"import cv2\nimport numpy as np\n\ndef nothing(x):\n pass\n\n# Create a black image, a window\ncv2.namedWindow('image')\n\n# create trackbars for color change\ncv2.createTrackbar('H_low','image',0,179,nothing)\ncv2.createTrackbar('H_high','image',179,179,nothing)\n\ncv2.createTrackbar('S_low','image',0,255,nothing)\ncv2.createTrackbar('S_high','image',255,255,nothing)\n\ncv2.createTrackbar('V_low','image',0,255,nothing)\ncv2.createTrackbar('V_high','image',255,255,nothing)\n\ncap = cv2.VideoCapture(0)\n\nalpha = 0.9 # Contrast control (1.0-3.0)\nbeta = 0 # Brightness control (0-100)\n\nwhile(1):\n ret, frame = cap.read()\n\n frame = cv2.convertScaleAbs(frame, alpha=alpha, beta=beta)\n\n if not ret:\n break\n\n # Convert BGR to HSV\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # get current positions of four trackbars\n h_low = cv2.getTrackbarPos('H_low','image')\n h_high = cv2.getTrackbarPos('H_high','image')\n s_low = cv2.getTrackbarPos('S_low','image')\n s_high = cv2.getTrackbarPos('S_high','image')\n v_low = cv2.getTrackbarPos('V_low','image')\n v_high = cv2.getTrackbarPos('V_high','image')\n\n # define range of color in HSV\n lower = np.array([h_low,s_low,v_low])\n upper = np.array([h_high,s_high,v_high])\n\n # Threshold the HSV image to get only selected colors\n mask = cv2.inRange(hsv, lower, upper)\n\n cv2.imshow('image',mask)\n k = cv2.waitKey(1) & 0xFF\n if k == 27: # ESC key to exit\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"ChviChvi/GOLFBOT-CDIO2023","sub_path":"Tools/HSV-test.py","file_name":"HSV-test.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"16508508125","text":"from ...util import mkdir\nimport time\nimport numpy as np\nfrom sklearn.metrics import accuracy_score, roc_auc_score, f1_score, confusion_matrix\nfrom catboost import CatBoostClassifier, metrics\nimport os\n\nMETRICS = [\n \"loss\",\n \"accuracy\",\n \"auc\",\n \"f1_score\",\n]\nVAL_METRICS = [f\"val_{m}\" for m in METRICS]\nTARGETS = [\"victory\", \"score\", \"duration\"]\n\nclass ResultPredictor:\n def __init__(\n self,\n *args,\n epochs=1000,\n lr=0.1,\n bin_crit=metrics.CrossEntropy(),\n metric=metrics.Accuracy(),\n random_seed=42,\n od_wait=50,\n od_type=\"Iter\",\n logging_level=\"Silent\",\n model=CatBoostClassifier,\n **kwargs\n ):\n self.lr = lr\n self.epochs = epochs\n self.epoch = 0\n self.od_wait = od_wait\n self.model = model(\n *args, \n iterations=epochs,\n learning_rate=lr,\n loss_function=bin_crit,\n eval_metric=metric,\n random_seed=random_seed,\n od_wait=od_wait,\n use_best_model=True,\n od_type=od_type,\n logging_level=logging_level,\n **kwargs\n )\n self.training_prepared = False\n\n def prepare_training(\n self,\n train_loader,\n val_loader=None,\n checkpoint_dir=\"checkpoints\",\n ):\n self.train_loader = train_loader\n self.val_loader = val_loader\n\n self.prepare_checkpoint(checkpoint_dir)\n\n self.training_prepared = True\n\n def prepare_checkpoint(self, checkpoint_dir=\"checkpoints\"):\n mkdir(checkpoint_dir)\n self.checkpoint_dir = checkpoint_dir\n\n def load_model(self, file_name=\"best.dump\"):\n self.model.load_model(os.path.join(self.checkpoint_dir, file_name))\n\n def save_model(self, file_name=\"best.dump\"):\n self.model.save_model(os.path.join(self.checkpoint_dir, file_name))\n\n def get_lr(self):\n return self.lr\n\n def train(self, val=False, val_loader=None, autosave=True, true_threshold=0.5):\n assert self.training_prepared\n val_loader = self.val_loader if val_loader is None else val_loader\n if val:\n assert val_loader is not None, \"Please provide validation dataloader\"\n start_time = time.time()\n\n bin_true = []\n bin_pred = []\n loader = val_loader if val else self.train_loader\n\n if not val:\n self.model.fit(\n loader,\n eval_set=val_loader,\n logging_level=\"Verbose\",\n plot=True\n )\n if self.od_wait:\n self.epoch = self.model.get_best_iteration() + self.od_wait\n else:\n self.epoch = self.epochs\n\n bin_true = loader.y > true_threshold\n bin_pred = self.model.predict(loader) > true_threshold\n\n cm = confusion_matrix(bin_true, bin_pred)\n cm_labels = [\"tn\", \"fp\", \"fn\", \"tp\"]\n\n loss = np.mean(self.model.eval_metrics(loader, [metrics.CrossEntropy()])['CrossEntropy'])\n\n cur_metrics = {\n \"epoch\": self.epoch,\n \"loss\": loss,\n \"victory_loss\": loss,\n \"accuracy\": accuracy_score(bin_true, bin_pred),\n \"auc\": roc_auc_score(bin_true, bin_pred),\n \"f1_score\": f1_score(bin_true, bin_pred),\n **{cm_labels[i]: x for i, x in enumerate(cm.ravel())}\n }\n \n if val:\n cur_metrics = {f\"val_{k}\": v for k, v in cur_metrics.items()}\n else:\n lr = self.get_lr()\n ms = (time.time() - start_time) * 1000\n print(f'| epoch {self.epoch:3d} | '\n f'lr {lr} | ms {ms:5.2f} | ')\n \n if autosave:\n self.save_model()\n\n return self.epoch, cur_metrics\n \n def feature_importance(self, columns):\n feature_importances = self.model.get_feature_importance(self.train_loader)\n feature_names = [f\"{l}_{c}\" for c in columns for l in (\"left\", \"right\")]\n assert (len(feature_importances) == len(feature_names))\n return {n: s for s, n in sorted(zip(feature_importances, feature_names), reverse=True)}\n\n def predict(self, data):\n return self.model.predict(data)\n\n def predict_prob(self, data):\n return self.model.predict_proba(data)\n\n def eval(self, val_loader=None, true_threshold=0.5):\n assert self.training_prepared\n val_loader = self.val_loader if val_loader is None else val_loader\n assert val_loader is not None, \"Please provide validation dataloader\"\n\n bin_true = []\n bin_pred = []\n loader = val_loader\n\n victory_preds = self.model.predict(loader)\n\n bin_true = loader.y > true_threshold\n bin_pred = victory_preds > true_threshold\n\n cm = confusion_matrix(bin_true, bin_pred)\n cm_labels = [\"tn\", \"fp\", \"fn\", \"tp\"]\n\n loss = np.mean(self.model.eval_metrics(loader, [metrics.CrossEntropy()])['CrossEntropy'])\n\n cur_metrics = {\n \"epoch\": self.epoch,\n \"loss\": loss,\n \"victory_loss\": loss,\n \"accuracy\": accuracy_score(bin_true, bin_pred),\n \"auc\": roc_auc_score(bin_true, bin_pred),\n \"f1_score\": f1_score(bin_true, bin_pred),\n **{cm_labels[i]: x for i, x in enumerate(cm.ravel())}\n }\n \n cur_metrics = {f\"val_{k}\": v for k, v in cur_metrics.items()}\n\n return victory_preds, bin_pred, cur_metrics\n","repo_name":"R-N/ml_draftpick_dss","sub_path":"ml_draftpick_dss/predicting/catboost/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":5478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"26721308409","text":"from setuptools import setup, find_packages\n\nNAME = 'mezzanine-blocks'\n\nVERSION = '0.9.1'\n\nDESCRIPTION = \"\"\"\nA mezzanine flavored fork of django-flatblocks.\nThe goal of this project is to be able to easily create custom blocks of\ntext/HTML in the template, and can be editable via admin.\n\"\"\"\n\nsetup(\n name=NAME,\n description=DESCRIPTION,\n long_description=open('README.md').read(),\n version=VERSION,\n author='Renyi Khor',\n author_email='renyi.ace@gmail.com',\n url='https://github.com/renyi/mezzanine-blocks',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n requires=['mezzanine'],\n)\n","repo_name":"cajolix/mezzanine-blocks","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"89"} +{"seq_id":"21967193481","text":"from skimage import io, filter, color\nfrom skimage.filter import sobel\nimport matplotlib.pyplot as plt\n\nA = color.rgb2gray(io.imread('haai1.jpg', plugin='pil'))\nAA = sobel(A)\n\nf, (ax0, ax1) = plt.subplots(1, 2)\nplt.tight_layout()\n\nax0.imshow(A, cmap=plt.cm.gray)\nax0.set_title('Input image')\n\nax1.imshow(200*AA, cmap=plt.cm.gray)\nax1.set_title('Sobel operator applied')\n\nplt.show()\n","repo_name":"lizecillie/Progress_reports","sub_path":"sobel.py","file_name":"sobel.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"31555604329","text":"import asyncio\nimport tkinter as tk\nimport customtkinter\nfrom tkinter import messagebox\n\nimport websockets\n\nfrom chat_app.color_theme import BUTTON, TEXT, HOVER\nfrom chat_app.settings import PORT, DOMAIN\nfrom client.messages import get_messages, create_new_message\n\n\nclass ChatMessages(tk.Frame):\n def __init__(self, master=None, discussion_list=None):\n super().__init__(master)\n master.grid(row=0, column=1, sticky=\"nsew\")\n\n self.websocket = None\n\n self.master = master\n self.chat_text = None\n self.message_entry = None\n self.send_button = None\n self.discussion_list = discussion_list\n\n self.placeholder = \"Type a message...\"\n\n self.discussion_list.listbox_discussions.bind(\"<>\", self.on_item_select)\n self.messages = []\n self.create_widget()\n\n async def connect_to_websocket_server_recv(self):\n await asyncio.sleep(2)\n try:\n async with websockets.connect(f\"ws://{DOMAIN}:{PORT}/ws/client_id\") as websocket:\n while True:\n self.websocket = websocket\n response = await websocket.recv()\n if response:\n self.on_item_select(response)\n\n except websockets.ConnectionClosed:\n messagebox.showerror(\"API error message\", \"Connection closed.\")\n except Exception as e:\n messagebox.showerror(\"API error message\", f\"Error: {str(e)}\")\n\n def on_item_select(self, event):\n selected_index = self.discussion_list.listbox_discussions.selection()\n\n if selected_index:\n selected_item = self.discussion_list.listbox_discussions.selection()[0]\n selected_discussion = self.discussion_list.listbox_discussions.item(selected_item)\n\n discussion_id = str(selected_discussion[\"values\"][0])\n\n messages = get_messages(self.discussion_list.user_id, discussion_id)\n self.display_chat_messages(messages)\n\n def create_widget(self):\n self.chat_text = tk.Text(self, height=20, width=80, bg=\"white\", fg=\"black\", font=(\"Helvetica\", 12))\n self.chat_text.pack(fill=tk.BOTH, expand=True, padx=10, pady=10)\n self.chat_text.config(cursor=\"arrow\")\n\n self.message_entry = tk.Text(self, height=4, width=70, bg=\"white\", fg=\"black\", font=(\"Helvetica\", 12))\n self.message_entry.pack(side=\"left\", fill=tk.BOTH, padx=10, pady=10)\n self.message_entry.bind(\"\", self.send_message_event)\n self.message_entry.insert(\"1.0\", self.placeholder)\n self.message_entry.config(fg=\"gray\")\n self.message_entry.bind(\"\", self.on_message_focusin)\n self.message_entry.bind(\"\", self.on_message_focusout)\n\n self.send_button = customtkinter.CTkButton(self, text=\"Send\",\n height=60,\n width=100,\n corner_radius=8,\n fg_color=BUTTON,\n border_color=BUTTON,\n border_width=1,\n hover_color=HOVER,\n text_color=TEXT,\n font=(\"Helvetica\", 18, \"bold\"),\n command=self.send_message)\n self.send_button.pack(side=\"right\", fill=tk.X)\n\n def send_message(self):\n selected_index = self.discussion_list.listbox_discussions.selection()\n\n if selected_index:\n selected_item = self.discussion_list.listbox_discussions.selection()[0]\n selected_discussion = self.discussion_list.listbox_discussions.item(selected_item)\n\n discussion_id = str(selected_discussion[\"values\"][0])\n message = self.message_entry.get(\"1.0\", tk.END)\n\n message_obj = {\n \"discussion_id\": discussion_id,\n \"user_id\": self.discussion_list.user_id,\n \"value\": message\n }\n\n self.messages.append(message_obj)\n self.create_new_chat_messages(message_obj)\n asyncio.get_event_loop().run_until_complete(self.websocket.send(\"New event\"))\n self.message_entry.delete('1.0', tk.END)\n\n def on_message_focusin(self, event):\n if self.message_entry.get(\"1.0\", \"end-1c\") == self.placeholder:\n self.message_entry.delete(\"1.0\", tk.END)\n self.message_entry.config(fg=\"black\", font=(\"Helvetica\", 12))\n\n def on_message_focusout(self, event):\n if not self.message_entry.get(\"1.0\", \"end-1c\"):\n self.message_entry.insert(\"1.0\", self.placeholder)\n self.message_entry.config(fg=\"gray\", font=(\"Helvetica\", 12))\n\n def send_message_event(self, event):\n self.send_message()\n\n def display_chat_messages(self, messages):\n self.chat_text.delete('1.0', tk.END)\n for message in messages:\n name = message[\"name\"]\n value = message[\"value\"]\n time = message[\"created_at\"]\n message_text = f\"[{time[-8:]}] {name}: {value}\\n\\n\"\n self.chat_text.insert(tk.END, message_text)\n\n def create_new_chat_messages(self, message):\n create_new_message(message)","repo_name":"DavidRotariu/chathub","sub_path":"chat_app/chat_messages.py","file_name":"chat_messages.py","file_ext":"py","file_size_in_byte":5445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"9676169108","text":"import metapath_D_PR_R\r\nfrom dev_rev_list import *\r\nimport validData_26\r\n\r\ndef test_metapath_D_PR_R():\r\n\r\n dev_list, rev_list, uq_map = dev_rev_list(\"STQ_Database\",\"giraph\")\r\n pr_list = validData_26.validData_26(\"STQ_Database\", \"giraph\")\r\n\r\n matrix_D_PR_R = metapath_D_PR_R.metapath_D_PR_R(\"STQ_Database\", pr_list, dev_list, rev_list, uq_map)\r\n\r\n assert (matrix_D_PR_R[dev_list.index(\"fd4\")][rev_list.index(\"fd11\")]) ==0\r\n assert (matrix_D_PR_R[dev_list.index(\"fd3\")][rev_list.index(\"fd10\")]) ==1\r\n assert (matrix_D_PR_R[dev_list.index(\"fd5\")][rev_list.index(\"fd11\")]) ==1","repo_name":"zevnil/STQM_Project","sub_path":"test_D_PR_R.py","file_name":"test_D_PR_R.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"74419677402","text":"from utils import file_into_list\n\nrucksacks = [ (ruck[:(len(ruck)//2)], ruck[(len(ruck)//2):]) for ruck in file_into_list(\"day3/input.txt\")]\n\ndef priority(ch):\n if 96 < ord(ch) < 123:\n return ord(ch) - 96\n else:\n return ord(ch) - 38\n\ns = sum([ priority((set(list(a)) & set(list(b))).pop()) for a, b in rucksacks])\nprint(\"Part 1: \", s)\n","repo_name":"branislavjenco/advent2022","sub_path":"day3/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"33222680541","text":"import urllib3\n\nfrom azext_aro.custom import rp_mode_development\nfrom azext_aro.vendored_sdks.azure.mgmt.redhatopenshift.v2023_09_04 import AzureRedHatOpenShiftClient\nfrom azure.cli.core.commands.client_factory import get_mgmt_service_client\n\n\ndef cf_aro(cli_ctx, *_):\n\n opt_args = {}\n\n if rp_mode_development():\n opt_args = {\n \"base_url\": \"https://localhost:8443/\",\n \"connection_verify\": False\n }\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n client = get_mgmt_service_client(\n cli_ctx, AzureRedHatOpenShiftClient, **opt_args)\n\n return client\n","repo_name":"Azure/ARO-RP","sub_path":"python/az/aro/azext_aro/_client_factory.py","file_name":"_client_factory.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"87"} +{"seq_id":"33392952233","text":"import unittest\nfrom ranking.score import linear_function,bm25\n\n\nindex = {\"karine\": {1: {\"count\": 1, \"positions\": [0]},\n 2: {\"count\": 2, \"positions\": [1, 3]}},\n \"wikipédia\": {1: {\"count\": 1, \"positions\": [1]},\n 2: {\"count\": 1, \"positions\": [0]}},\n \"ok\": {2: {\"count\": 2, \"positions\": [2, 4]}}}\n\nclass TestScore(unittest.TestCase):\n def test_linear_function(self):\n doc = 1\n querry = [\"karine\"]\n self.assertEqual(linear_function(querry,doc,index),4)\n querry =[\"la\",\"karine\"]\n self.assertEqual(linear_function(querry,doc,index),3)\n querry =[\"zer\",\"la\",\"karine\"]\n self.assertEqual(linear_function(querry,doc,index),2)\n querry =[\"karine\",\"wikipédia\",\"karine\",\"ok\",\"ok\"]\n self.assertEqual(linear_function(querry,doc,index),10)\n doc=2\n querry =[\"ok\",\"ok\"]\n self.assertEqual(linear_function(querry,doc,index),7)\n\n\n def test_linear_bm25(self):\n querry =[\"ok\",\"ok\"]\n doc =2\n avg_doc_len = 5.2\n doc_len = 5\n N=4\n self.assertAlmostEqual(bm25(doc,querry,index,avg_doc_len,doc_len,N) , 1.704866366424662 ) \n save_score= bm25(doc,querry,index,avg_doc_len,doc_len,N)\n\n doc_len = 78\n self.assertLess(bm25(doc,querry,index,avg_doc_len,doc_len,N) , save_score ) \n save_score= bm25(doc,querry,index,avg_doc_len,doc_len,N)\n \n avg_doc_len = 52\n doc_len = 5\n self.assertGreater(bm25(doc,querry,index,avg_doc_len,doc_len,N) ,save_score ) \n save_score= bm25(doc,querry,index,avg_doc_len,doc_len,N)\n\n N=400\n self.assertGreater(bm25(doc,querry,index,avg_doc_len,doc_len,N) , save_score )\n save_score= bm25(doc,querry,index,avg_doc_len,doc_len,N)\n\n querry =[\"ok\",\"kqrine\",'888']\n self.assertLess(bm25(doc,querry,index,avg_doc_len,doc_len,N) , save_score)","repo_name":"AdrienMarquer22/TP3_Ranking","sub_path":"test/test_score.py","file_name":"test_score.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"2836992060","text":"from flask import flash, redirect, render_template,url_for\nfrom flask_login import login_required, login_user, logout_user\nfrom datetime import datetime, timedelta\n\nfrom . import auth\nfrom .token import generate_confirmation_token, confirm_token\nfrom .forms import LoginForm, RegistrationForm\nfrom .. import db, scheduler\nfrom ..models import User\nfrom ..email import send_email\n\n\ndef delete_unconfirmed_user(id):\n\twith db.app.app_context():\n\t\tuser = User.query.get(id)\n\t\tif not user.confirmed:\n\t\t\tdb.session.delete(user)\n\t\t\tdb.session.commit()\t\n\n@auth.route('/register', methods=['GET','POST'])\ndef register():\n\tform = RegistrationForm()\n\tif form.validate_on_submit():\n\t\tuser = User(email = form.email.data,\n\t\t\t\t\tusername = form.username.data,\n\t\t\t\t\tpassword = form.password.data)\n\t\tdb.session.add(user)\n\t\tdb.session.commit()\n\t\t\n\t\ttoken = generate_confirmation_token(user.email)\n\t\tconfirm_url = url_for('auth.confirm_email', token=token, _external=True)\n\t\thtml = render_template('auth/email.html',confirm_url = confirm_url)\n\t\tsubject = \"Please confirm your email\"\n\t\tsend_email(user.email,subject,html)\n\t\t\n\t\td = datetime.now() + timedelta(days=60)\n\t\tscheduler.add_job(delete_unconfirmed_user, 'date', run_date=d, args=[user.id], id=str(user.id))\n\t\t\n\t\tflash('Thou hast successfully registered! Email for authentication hath been send.')\n\t\t\n\t\treturn redirect(url_for('auth.login'))\n\treturn render_template('auth/register.html',form=form,title='Registration')\n\n@auth.route('/confirm/', methods=['GET','POST'])\ndef confirm_email(token):\n\ttry:\n\t\tx = confirm_token(token)\n\t\tuser = User.query.filter_by(email=x).first_or_404()\n\t\tif user.confirmed:\n\t\t\tflash('Account hath been already confirmed. Please login.','success')\n\t\telse:\n\t\t\tuser.confirmed = True\n\t\t\tdb.session.add(user)\n\t\t\tdb.session.commit()\n\t\t\tflash('Thou hast confirmed thine account, Thanks!','success')\n\texcept:\n\t\tflash('This confirmation link is invalid or hath expired','danger')\n\treturn redirect(url_for('auth.login'))\n\t\n@auth.route('/login', methods=['GET','POST'])\ndef login():\n\tform = LoginForm()\n\tif form.validate_on_submit():\n\t\tuser = User.query.filter_by(email = form.email.data).first()\n\t\tif user is not None and user.verify_password(form.password.data):\n\t\t\tlogin_user(user)\n\t\t\treturn redirect(url_for('home.homepage'))\n\t\telse:\n\t\t\tflash('Either password or email is incorrect or unconfirmed.')\n\t\t\t\n\treturn render_template('auth/login.html',form=form,title='Login')\n\t\t\t\n@auth.route('/logout', methods=['GET','POST'])\n@login_required\ndef logout():\n\tlogout_user()\n\tflash('Thou hast been successfully logged out')\n\treturn redirect(url_for('auth.login'))","repo_name":"damjur/Biblioteka-na-AI","sub_path":"0.Projekt/app/auth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"39068744630","text":"import sys\nsys_input=sys.stdin.readline\n\nn=int(sys_input())\nlim=int(sys_input())\nd=8 #1,5 배수 2~4 두칸사이의 값\nif n==1 or n==5:\n cur_n=n+lim*d\n\nelse:\n cur_n=n+(lim//2)*d\n\n if lim%2!=0:\n cur_n+=-2*n+10\n\nprint(cur_n-1)\n","repo_name":"shin5774/algorithm_study","sub_path":"python/1614.py","file_name":"1614.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"46005358864","text":"#!/usr/bin/env python3.10\nfrom loguru import logger\nfrom aocd import get_data, submit\n\nINPUT = [list(map(int, box.split('x'))) for box in get_data(day=2, year=2015).splitlines()] # Get individual box dimensions and convert to ints\nlogger.debug(\"Loaded the input data\")\n\nribbon = 0\nfor box in INPUT:\n box.sort()\n ribbon += 2 * box[0] + 2 * box[1] # Add the smallest perimeter\n ribbon += box[0] * box[1] * box[2] # Add the volume\nlogger.debug(\"Successfully parsed the input\")\n\nlogger.info(f\"Result: {ribbon}\")\nsubmit(ribbon, day=2, year=2015)\nlogger.debug(\"Successfully submitted the result\")\n","repo_name":"conelul/advent-of-code","sub_path":"2015/day2/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"28383534626","text":"base_config = {\n 'extensions' : {},\n 'title' : \"test\",\n 'extra_args' : \"\",\n 'browser_wait' : 5,\n \"env\" : {'NO_EM_RESTART' : 1},\n 'branch': 'testbranch',\n 'buildid': 'testbuildid',\n 'dirs': {},\n 'preferences' : { 'browser.EULA.override' : True,\n 'security.fileuri.strict_origin_policy' : False,\n 'browser.shell.checkDefaultBrowser' : False,\n 'browser.warnOnQuit' : False,\n 'browser.link.open_newwindow' : 2,\n 'dom.allow_scripts_to_close_windows' : True,\n 'dom.disable_open_during_load': False,\n 'dom.max_script_run_time' : 0,\n 'dom.max_chrome_script_run_time' : 0,\n 'browser.dom.window.dump.enabled': True,\n 'network.proxy.type' : 1,\n 'network.proxy.http' : 'localhost',\n 'network.proxy.http_port' : 80,\n 'dom.disable_window_flip' : True,\n 'dom.disable_window_move_resize' : True,\n 'security.enable_java' : False,\n 'extensions.checkCompatibility' : False,\n 'extensions.update.notifyUser': False,\n \"capability.principal.codebase.p0.granted\" : \"UniversalPreferencesWrite UniversalXPConnect UniversalPreferencesRead\",\n \"capability.principal.codebase.p0.id\" : \"file://\",\n \"capability.principal.codebase.p0.subjectName\": \"\",\n \"capability.principal.codebase.p1.granted\" : \"UniversalPreferencesWrite UniversalXPConnect UniversalPreferencesRead\",\n \"capability.principal.codebase.p1.id\" : \"http://localhost\",\n \"capability.principal.codebase.p1.subjectName\" : \"\",\n \"signed.applets.codebase_principal_support\" : True},\n 'tests' : [],\n 'copy_profile' : False\n }\n\nts_config = {\"name\" : \"ts\",\n \"url\" : \"talos/startup_test/startup_test.html?begin=\",\n \"url_mod\" : \"str(int(time.time()*1000))\",\n \"resolution\" : 1,\n \"timeout\": 150,\n \"win_counters\" : [],\n \"unix_counters\" : [],\n \"mac_counters\" : [],\n \"shutdown\" : False,\n \"timeout\" : 10\n }\n\n\n","repo_name":"jonallengriffin/dirtyharry","sub_path":"talos_config.py","file_name":"talos_config.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"87"} +{"seq_id":"4354905120","text":"def count_substrings(haystack, needle):\n return haystack.count(needle, 0)\n\n\ndef sum_matrix(m):\n return sum(sum(m[row]) for row in range(0, len(m)))\n\n\ndef nan_expand(times):\n if times <= 0:\n return\n if times == 1:\n print('Not a NaN')\n return\n print('Not a ', end = \"\")\n nan_expand(times - 1)\n\n\ndef prime(n):\n for x in range(2, n):\n if n % x == 0:\n return False\n\n return True\n\n\ndef prime_factorization(n):\n primes = []\n for x in range(2, n + 1):\n if n % x == 0 and prime(x):\n count = 0\n while(n % x == 0):\n count += 1\n n /= x\n primes.append((x, count))\n\n return primes\n\n\ndef group(lst):\n current_group = [lst[0]]\n result = []\n for item in lst[1:]:\n if item == current_group[0]:\n current_group.append(item)\n else:\n result.append(current_group)\n current_group = [item]\n result.append(current_group)\n\n return result\n\n\ndef max_consecutive(items):\n return max(map(len, group(items)))\n\n\ndef palindrome(item):\n return str(item) == str(item)[::-1]\n\n\ndef help_rowwise(search_place, word):\n return search_place.count(word, 0, len(search_place))\n\n\ndef vertical(m, col_inx, word):\n col = ''.join(map(str, [m[row][col_inx] for row in range(0, len(m))]))\n if palindrome(word):\n return help_rowwise(col, word)\n return help_rowwise(col, word) + help_rowwise(col[::-1], word)\n\n\ndef diagonal(m, row_inx, col_inx, word):\n diagonal_word = ''.join(m[row_inx + i][col_inx + i]\n for i in range(0, min(len(m) - row_inx, len(m[0]) - col_inx)))\n counter = help_rowwise(diagonal_word, word)\n diagonal_word = ''.join(m[row_inx + i][col_inx - i]\n for i in range(0, min(len(m) - row_inx, col_inx)))\n counter += help_rowwise(diagonal_word, word)\n return counter\n\n\ndef word_counter(m, word):\n counter = 0\n for row in range(0, len(m)):\n counter += help_rowwise(m[row], word)\n counter += diagonal(m, row, 0, word)\n if not palindrome(m[row]):\n counter += help_rowwise(m[row][::-1], word)\n for col in range(0, len(m[0])):\n counter += diagonal(m, 0, col, word)\n counter += vertical(m, col, word)\n counter -= diagonal(m, 0, 0, word)\n return counter\n\n\ndef gas_stations(distance, tank_size, stations):\n check_points = stations + [distance]\n distance_covered = 0\n shortest = []\n tank = tank_size\n start = 1\n\n while(start < len(check_points)):\n distance_covered += tank\n tank = 0\n if(distance_covered < check_points[start]):\n shortest.append(check_points[start - 1])\n distance_covered = check_points[start - 1]\n tank = tank_size\n start += 1\n return shortest\n","repo_name":"nikolovv97/HackBulgariaPython101","sub_path":"week1/second.py","file_name":"second.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"35982131031","text":"from django.contrib import admin\nfrom django.contrib.flatpages.models import FlatPage\nfrom django import forms\n\nfrom preferences.admin import PreferencesAdmin\n\nfrom ckeditor.widgets import CKEditorWidget\n\nfrom guinnessnigeria import models\n\n\nclass FlatPageAdminForm(forms.ModelForm):\n content = forms.CharField(widget=CKEditorWidget())\n\n class Meta:\n model = FlatPage\n\n\nclass FlatPageAdmin(admin.ModelAdmin):\n form = FlatPageAdminForm\n\ntry:\n admin.site.unregister(FlatPage)\nexcept:\n pass\n\n\nclass FrontPageBannerAdmin(admin.ModelAdmin):\n fieldsets = (\n ('', {\n 'fields': ('image', 'state', 'description', 'url', 'order')\n }),\n )\n\n list_display = [\n 'order',\n 'image',\n 'url',\n 'state',\n ]\n\nadmin.site.register(FlatPage, FlatPageAdmin)\nadmin.site.register(models.SitePreferences, PreferencesAdmin)\nadmin.site.register(models.FrontPageBanner, FrontPageBannerAdmin)\nadmin.site.register(models.SocialMedia)\n\n\n# # Unregister irrelevant models\n\n# from photologue import models as photologue_models # noqa\n\n# admin.site.unregister(photologue_models.Gallery)\n# admin.site.unregister(photologue_models.GalleryUpload)\n# admin.site.unregister(photologue_models.PhotoEffect)\n# admin.site.unregister(photologue_models.Photo)\n# admin.site.unregister(photologue_models.Watermark)\n\n# # from django_evolution import models as evolution_models\n\n# # admin.site.unregister(evolution_models.Evolution)\n# # admin.site.unregister(evolution_models.Version)\n\n# from unobase import models as unobase_models # noqa\n\n# admin.site.unregister(unobase_models.DefaultImage)\n# admin.site.unregister(unobase_models.TagModel)\n\n# from unobase.tagging import models as tagging_models # noqa\n\n# admin.site.unregister(tagging_models.Tag)\n","repo_name":"jojaycool/Tuteria-Application","sub_path":"guinnessnigeria/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"6830073722","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport socket\n\nHOST = 'localhost'\nPORT = 8080\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.settimeout(10)\ntry:\n s.connect((HOST, PORT))\n\n try:\n while True:\n outdata = input('please input message: ')\n if not outdata:\n break\n print('send: ' + outdata)\n s.send(outdata.encode())\n \n indata = s.recv(1024)\n if len(indata) == 0: # connection closed\n s.close()\n print('server closed connection.')\n break\n print('recv: ' + indata.decode())\n \n except KeyboardInterrupt:\n print('client stop')\n \nexcept ConnectionRefusedError as error:\n print(\"ERROR\",error)\n\n","repo_name":"coolandy345/time_server","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"72279546841","text":"\"\"\"empty message\n\nRevision ID: 340c49be93ec\nRevises: e565f5e6539d\nCreate Date: 2023-10-17 10:37:08.624030\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"340c49be93ec\"\ndown_revision = \"e565f5e6539d\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"pr_commit_association\",\n sa.Column(\"pull_request_number\", sa.Integer(), nullable=False),\n sa.Column(\"commit_sha\", sa.String(length=40), nullable=False),\n sa.Column(\"order\", sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(\n [\"commit_sha\"],\n [\"commit.sha\"],\n name=op.f(\"fk_pr_commit_association_commit_sha_commit\"),\n ),\n sa.ForeignKeyConstraint(\n [\"pull_request_number\"],\n [\"pull_request.number\"],\n name=op.f(\"fk_pr_commit_association_pull_request_number_pull_request\"),\n ),\n sa.PrimaryKeyConstraint(\n \"pull_request_number\", \"commit_sha\", name=op.f(\"pk_pr_commit_association\")\n ),\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table(\"pr_commit_association\")\n # ### end Alembic commands ###\n","repo_name":"paulgessinger/apogee","sub_path":"migrations/versions/340c49be93ec_.py","file_name":"340c49be93ec_.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"40756655067","text":"from django.urls import path\nfrom inventory import views as iviews\n\nurlpatterns = [\n path('', iviews.inventory_home, name='trip_inventory-home'),\n path('company/', iviews.company, name='trip_inventory-company'),\n path('out/', iviews.out, name='trip_inventory-out'),\n path('personal/', iviews.personal, name='trip_inventory-personal'),\n\n # add views for simpleisbetter... dynamic dropdown method\n path('fixture/', iviews.FixtureListView.as_view(), name='fixture-list'),\n path('fixture/add/', iviews.FixtureCreateView.as_view(), name='fixture-add'),\n path('fixture//', iviews.FixtureUpdateView.as_view(),\n name='fixture_update'),\n path('ajax/load-fixture-types/', iviews.load_fixture_types,\n name='ajax_load_fixture_types'),\n path('ajax/load-source-types/', iviews.load_source_types,\n name='ajax_load_source_types'),\n]\n","repo_name":"robotlightsyou/trip","sub_path":"tripcore/inventory/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"74397143961","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nimport logging\nimport cherrypy\n\nfrom . import ApiController, RESTController, \\\n allow_empty_body\nfrom .. import mgr\nfrom ..exceptions import DashboardException\nfrom ..services.auth import AuthManager, JwtManager\n\n\nlogger = logging.getLogger('controllers.auth')\n\n\n@ApiController('/auth', secure=False)\nclass Auth(RESTController):\n \"\"\"\n Provide authenticates and returns JWT token.\n \"\"\"\n\n def create(self, username, password):\n user_data = AuthManager.authenticate(username, password)\n user_perms, pwd_expiration_date, pwd_update_required = None, None, None\n if user_data:\n user_perms = user_data.get('permissions')\n pwd_expiration_date = user_data.get('pwdExpirationDate', None)\n pwd_update_required = user_data.get('pwdUpdateRequired', False)\n\n if user_perms is not None:\n logger.debug('Login successful')\n token = JwtManager.gen_token(username)\n token = token.decode('utf-8')\n cherrypy.response.headers['Authorization'] = \"Bearer: {}\".format(token)\n return {\n 'token': token,\n 'username': username,\n 'permissions': user_perms,\n 'pwdExpirationDate': pwd_expiration_date,\n 'sso': mgr.SSO_DB.protocol == 'saml2',\n 'pwdUpdateRequired': pwd_update_required\n }\n\n logger.debug('Login failed')\n raise DashboardException(msg='Invalid credentials',\n code='invalid_credentials',\n component='auth')\n\n @RESTController.Collection('POST')\n @allow_empty_body\n def logout(self):\n logger.debug('Logout successful')\n token = JwtManager.get_token_from_header()\n JwtManager.blacklist_token(token)\n redirect_url = '#/login'\n if mgr.SSO_DB.protocol == 'saml2':\n redirect_url = 'auth/saml2/slo'\n return {\n 'redirect_url': redirect_url\n }\n\n def _get_login_url(self):\n if mgr.SSO_DB.protocol == 'saml2':\n return 'auth/saml2/login'\n return '#/login'\n\n @RESTController.Collection('POST')\n def check(self, token):\n if token:\n user = JwtManager.get_user(token)\n if user:\n return {\n 'username': user.username,\n 'permissions': user.permissions_dict(),\n 'sso': mgr.SSO_DB.protocol == 'saml2',\n 'pwdUpdateRequired': user.pwd_update_required\n }\n return {\n 'login_url': self._get_login_url(),\n }\n","repo_name":"denise813/storage-ceph","sub_path":"src/pybind/mgr/dashboard/controllers/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"87"} +{"seq_id":"33581082760","text":"#!/usr/bin/env python3\n\n\"\"\"\nThis module contains a method that performs a valid\nconvolution on grayscale images with \"same\" padding.\n\"\"\"\n\nimport numpy as np\n\n\ndef convolve_grayscale_same(images, kernel):\n \"\"\"\n Perform a valid convolution on grayscale images with \"same\" padding.\n\n Args:\n images (numpy.ndarray): Input images with shape (m, h, w).\n m: Number of images.\n h: Height in pixels of the images.\n w: Width in pixels of the images.\n kernel (numpy.ndarray): Convolution kernel with shape (kh, kw).\n kh: Height of the kernel.\n kw: Width of the kernel.\n\n Returns:\n numpy.ndarray: Convolved images with shape (m, hm, wm).\n m: Number of images.\n hm: Height of the convolved images.\n wm: Width of the convolved images.\n \"\"\"\n m, h, w = images.shape\n kh, kw = kernel.shape\n m, h, w = images.shape\n kh, kw = kernel.shape\n\n # Calculate the padding required to maintain the same output size\n # padding_h = kh // 2\n # padding_w = kw // 2\n # padded_images = np.pad(\n # images, ((0, 0), (padding_h, padding_h), (padding_w, padding_w)),\n # mode='constant')\n\n # Perform convolution\n # convolved_images = np.zeros((m, h, w))\n # for i in range(m):\n # for j in range(h):\n # for k in range(w):\n # patch = padded_images[i, j:j + kh, k:k + kw]\n # convolved_images[i, j, k] = np.sum(patch * kernel)\n # print(convolved_images)\n\n # return convolved_images\n\n kh, kw = kernel.shape\n m, hm, wm = images.shape\n ph = int(kh / 2)\n pw = int(kw / 2)\n padded = np.pad(images, ((0, 0), (ph, ph), (pw, pw)), 'constant')\n convoluted = np.zeros((m, hm, wm))\n for h in range(hm):\n for w in range(wm):\n square = padded[:, h: h + kh, w: w + kw]\n insert = np.sum(square * kernel, axis=1).sum(axis=1)\n convoluted[:, h, w] = insert\n return convoluted\n","repo_name":"Husnafazal/alu-machine_learning","sub_path":"math/convolutions_and_pooling/1-convolve_grayscale_same.py","file_name":"1-convolve_grayscale_same.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"30667613627","text":"from dataclasses import fields\nfrom pathlib import Path\nfrom typing import Any, Callable, Tuple\nimport functools\n\nfrom .dict_with_index import DictWithIndex\n\nfrom .token import InvalidKeywordError, MissingKeywordError, Tokens, UnknownTokenError\n\nfrom ..model.compu_methods import (\n A2LCompuMethod,\n A2LCompuMethodFormula,\n A2LCompuMethodLinear,\n A2LCompuMethodRational,\n A2LCompuMethodTableInterpolation,\n A2LCompuMethodTableNoInterpolation,\n A2LCompuMethodVerbalTable,\n)\n\nfrom .util import (\n Lexer,\n add_key_values,\n is_number,\n parse_list_of_numbers,\n parse_members,\n parse_number,\n parse_string,\n parse_with_lexer,\n)\nfrom ..model.model import (\n A2LAnnotation,\n A2LBlob,\n A2LCompuTab,\n A2LCompuVTab,\n A2LCompuVTabRange,\n A2LInstance,\n A2LModCommon,\n A2LRecordLayout,\n A2LRecordLayoutAxisPts,\n A2LRecordLayoutNoAxisPts,\n A2LStructure,\n A2LStructureComponent,\n A2LTransformer,\n A2lFncValues,\n A2lLRescaleAxis,\n ByteOrder,\n)\nfrom ..model.characteristic_model import (\n A2LAxisDescription,\n A2LAxisDescriptionComAxis,\n A2LAxisDescriptionCurveAxis,\n A2LAxisDescriptionFixAxis,\n A2LAxisDescriptionResAxis,\n A2LAxisPts,\n A2LCharacteristic,\n A2LCharacteristicArray,\n A2LCharacteristicCube4,\n A2LCharacteristicCuboid,\n A2LCharacteristicCurve,\n A2LCharacteristicMap,\n A2LCharacteristicTypedef,\n A2LCharacteristicValue,\n A2LCharacteristicAscii,\n A2LMeasurement,\n A2LTypedefAxis,\n DependentCharacteristic,\n VirtualCharacteristic,\n VirtualMeasurement,\n)\n\nfrom ..model.project_model import (\n A2ML,\n A2LFunction,\n A2LGroup,\n A2LHeader,\n A2LModule,\n A2LProject,\n A2lFile,\n)\nfrom ..model.mod_par_model import (\n A2LIfData,\n A2LMemorySegment,\n A2LModPar,\n)\n\n\ndef a2ml(tokens: Tokens) -> Tuple[dict, Tokens]:\n content = tokens.return_tokens_until(\"/end A2ML\")\n content = \"\".join(content)\n return {\"a2ml\": [A2ML(content)]}, tokens\n\n\ndef project(tokens: Tokens) -> Tuple[dict, Tokens]:\n if tokens[0] != \"PROJECT\":\n raise Exception(\"PROJECT expected, got \" + tokens[0] + \"\")\n\n params = {}\n lexer: Lexer = {\n \"/begin\": lambda x: ({}, x[1:]),\n \"HEADER\": header,\n \"MODULE\": module,\n }\n\n params[\"name\"] = tokens[1]\n params[\"description\"], tokens = parse_string(tokens[2:])\n\n tokens = parse_with_lexer(lexer=lexer, name=\"PROJECT\", tokens=tokens, params=params)\n return (\n {\"project\": A2LProject(**params)},\n tokens[2:],\n )\n\n\ndef header(tokens: Tokens) -> Tuple[dict, Tokens]:\n if tokens[0] != \"HEADER\":\n raise Exception(\"HEADER expected\")\n tokens = tokens[1:]\n params = {}\n params[\"description\"], tokens = parse_string(tokens)\n\n def version(tokens: Tokens) -> Tuple[dict, Tokens]:\n version, tokens = parse_string(tokens[1:])\n return {\"version\": version}, tokens\n\n lexer: Lexer = {\n \"VERSION\": version,\n \"PROJECT_NO\": lambda x: ({\"project_number\": x[1]}, x[2:]),\n }\n tokens = parse_with_lexer(lexer=lexer, name=\"HEADER\", tokens=tokens, params=params)\n return {\"header\": A2LHeader(**params)}, tokens\n\n\ndef group(tokens: Tokens) -> Tuple[dict, Tokens]:\n if tokens[0] != \"GROUP\":\n raise Exception(\"GROUP expected, got \" + tokens[0])\n tokens = tokens[1:]\n\n params = {}\n params[\"name\"] = tokens[0]\n params[\"description\"], tokens = parse_string(tokens[1:])\n\n lexer = {\n \"ROOT\": lambda x: ({\"root\": True}, x[1:]),\n \"/begin\": lambda x: ({}, x[1:]),\n \"SUB_GROUP\": functools.partial(\n parse_members, field=\"sub_groups\", name=\"SUB_GROUP\"\n ),\n \"REF_CHARACTERISTIC\": functools.partial(\n parse_members, field=\"characteristics\", name=\"REF_CHARACTERISTIC\"\n ),\n \"REF_MEASUREMENT\": functools.partial(\n parse_members, field=\"measurements\", name=\"REF_MEASUREMENT\"\n ),\n \"FUNCTION_LIST\": functools.partial(\n parse_members, field=\"function_lists\", name=\"FUNCTION_LIST\"\n ),\n }\n\n tokens = parse_with_lexer(lexer=lexer, name=\"GROUP\", tokens=tokens, params=params)\n return {\"groups\": [A2LGroup(**params)]}, tokens\n\n\ndef transformer(tokens: Tokens) -> Tuple[dict, Tokens]:\n if tokens[0] != \"TRANSFORMER\":\n raise Exception(\"TRANSFORMER expected, got \" + tokens[0])\n tokens = tokens[1:]\n\n params = {}\n params[\"name\"] = tokens[0]\n params[\"version\"], tokens = parse_string(tokens[1:])\n params[\"name_32bit_dll\"], tokens = parse_string(tokens)\n params[\"name_64bit_dll\"], tokens = parse_string(tokens)\n params[\"timeout_in_ms\"] = parse_number(tokens.get_keyword(0))\n params[\"event\"] = tokens[1]\n params[\"reverse_transformer\"] = tokens[2]\n tokens = tokens[3:]\n lexer = {\n \"/begin\": lambda x: ({}, x[1:]),\n \"TRANSFORMER_IN_OBJECTS\": functools.partial(\n parse_members, field=\"in_objects\", name=\"TRANSFORMER_IN_OBJECTS\"\n ),\n \"TRANSFORMER_OUT_OBJECTS\": functools.partial(\n parse_members, field=\"out_objects\", name=\"TRANSFORMER_OUT_OBJECTS\"\n ),\n }\n tokens = parse_with_lexer(\n lexer=lexer, name=\"TRANSFORMER\", tokens=tokens, params=params\n )\n return {\"transformers\": [A2LTransformer(**params)]}, tokens\n\n\ndef blob(tokens: Tokens) -> Tuple[dict, Tokens]:\n if tokens[0] != \"BLOB\":\n raise Exception(\"BLOB expected, got \" + tokens[0])\n tokens = tokens[1:]\n\n params = {}\n params[\"name\"] = tokens[0]\n params[\"description\"], tokens = parse_string(tokens[1:])\n params[\"ecu_address\"] = parse_number(tokens.get_keyword(0))\n params[\"number_of_bytes\"] = parse_number(tokens.get_keyword(1))\n tokens = tokens[2:]\n lexer = {\n \"CALIBRATION_ACCESS\": lambda x: ({\"calibration_access\": x[1]}, x[2:]),\n }\n tokens = parse_with_lexer(lexer=lexer, name=\"BLOB\", tokens=tokens, params=params)\n return {\"blobs\": [A2LBlob(**params)]}, tokens\n\n\ndef typedef_structure(tokens: Tokens) -> Tuple[dict, Tokens]:\n if tokens[0] != \"TYPEDEF_STRUCTURE\":\n raise Exception(\"TYPEDEF_STRUCTURE expected, got \" + tokens[0])\n tokens = tokens[1:]\n\n params = {}\n params[\"name\"] = tokens[0]\n params[\"description\"], tokens = parse_string(tokens[1:])\n params[\"size\"] = parse_number(tokens.get_keyword(0))\n tokens = tokens[1:]\n\n def structure_component(tokens: Tokens) -> Tuple[dict, Tokens]:\n params = {}\n params[\"name\"] = tokens[0]\n params[\"datatype\"] = tokens[1]\n params[\"offset\"] = parse_number(tokens.get_keyword(2))\n if tokens[3] == \"MATRIX_DIM\":\n params2, tokens = parse_matrix_dim(tokens[3:])\n params.update(params2)\n else:\n tokens = tokens[3:]\n return {\"components\": [A2LStructureComponent(**params)]}, tokens[2:]\n\n lexer = {\n \"/begin\": lambda x: ({}, x[1:]),\n \"STRUCTURE_COMPONENT\": lambda x: structure_component(x[1:]),\n }\n tokens = parse_with_lexer(\n lexer=lexer, name=\"TYPEDEF_STRUCTURE\", tokens=tokens, params=params\n )\n return {\"typedef_structures\": [A2LStructure(**params)]}, tokens\n\n\ndef typedef_axis(tokens: Tokens) -> Tuple[dict, Tokens]:\n if tokens[0] != \"TYPEDEF_AXIS\":\n raise Exception(\"TYPEDEF_AXIS expected, got \" + tokens[0])\n tokens = tokens[1:]\n params = {}\n params[\"name\"] = tokens[0]\n params[\"description\"], tokens = parse_string(tokens[1:])\n params[\"measurement\"] = tokens[0]\n params[\"record_layout\"] = tokens[1]\n params[\"max_diff\"] = parse_number(tokens.get_keyword(2))\n params[\"compu_method\"] = tokens[3]\n params[\"max_number_of_axis_points\"] = parse_number(tokens.get_keyword(4))\n params[\"lower_limit\"] = parse_number(tokens.get_keyword(5))\n params[\"upper_limit\"] = parse_number(tokens.get_keyword(6))\n tokens = tokens[9:]\n return {\"typedef_axes\": [A2LTypedefAxis(**params)]}, tokens\n\n\ndef module(tokens: Tokens) -> Tuple[dict, Tokens]:\n if tokens[0] != \"MODULE\":\n raise Exception(\"MODULE expected, got \" + tokens[0])\n\n params = DictWithIndex()\n params[\"name\"] = tokens[1]\n params[\"description\"], tokens = parse_string(tokens[2:])\n\n lexer: Lexer = {\n \"/begin\": lambda x: ({}, x[1:]),\n \"MOD_PAR\": mod_par,\n \"MOD_COMMON\": mod_common,\n \"IF_DATA\": if_data,\n \"COMPU_METHOD\": compu_method,\n \"COMPU_TAB\": compu_tab,\n \"COMPU_VTAB\": compu_vtab,\n \"COMPU_VTAB_RANGE\": compu_vtab_range,\n \"MEASUREMENT\": measurement,\n \"RECORD_LAYOUT\": record_layout,\n \"CHARACTERISTIC\": characteristic,\n \"FUNCTION\": function_type,\n \"GROUP\": group,\n \"TYPEDEF_CHARACTERISTIC\": typedef_characteristic,\n \"INSTANCE\": instance,\n \"AXIS_PTS\": axis_pts,\n \"TYPEDEF_AXIS\": typedef_axis,\n \"TYPEDEF_STRUCTURE\": typedef_structure,\n \"TRANSFORMER\": transformer,\n \"BLOB\": blob,\n \"A2ML\": a2ml,\n }\n\n tokens = parse_with_lexer(lexer=lexer, name=\"MODULE\", tokens=tokens, params=params)\n return {\"modules\": [A2LModule(**params, global_list=params.global_list)]}, tokens\n\n\ndef if_data(tokens: Tokens) -> Tuple[dict, Tokens]:\n if tokens[0] != \"IF_DATA\":\n raise Exception(\"IF_DATA expected, got \" + tokens[0])\n tokens = tokens[1:]\n\n params = {}\n params[\"name\"] = tokens[0]\n params[\"content\"] = tokens.return_tokens_until(\"/end IF_DATA\")[1:]\n return {\"if_data\": [A2LIfData(**params)]}, tokens\n\n\ndef memory_segment(tokens: Tokens) -> Tuple[dict, Tokens]:\n params = {}\n params[\"name\"] = tokens[1]\n params[\"description\"], tokens = parse_string(tokens[2:])\n params[\"program_type\"] = tokens[0]\n params[\"memory_type\"] = tokens[1]\n params[\"location\"] = tokens[2]\n params[\"address\"] = parse_number(tokens.get_keyword(3))\n params[\"size\"] = parse_number(tokens.get_keyword(4))\n params[\"offsets\"], tokens = parse_list_of_numbers(tokens[5:])\n\n lexer = {\"/begin\": lambda x: ({}, x[1:]), \"IF_DATA\": if_data}\n tokens = parse_with_lexer(\n lexer=lexer, name=\"MEMORY_SEGMENT\", tokens=tokens, params=params\n )\n return {\"memory_segments\": [A2LMemorySegment(**params)]}, tokens\n\n\ndef mod_par(tokens: Tokens) -> Tuple[Any, Tokens]:\n if tokens[0] != \"MOD_PAR\":\n raise Exception(\"MOD_PAR expected\")\n\n params = {}\n params[\"description\"], tokens = parse_string(tokens[1:])\n\n def system_constant(tokens: Tokens) -> Tuple[dict, Tokens]:\n name, tokens = parse_string(tokens[1:])\n val, tokens = parse_string(tokens)\n return {\"system_constants\": {name: val}}, tokens\n\n lexer = {\n \"NO_OF_INTERFACES\": lambda x: (\n {\"number_of_interfaces\": parse_number(x.get_keyword(1))},\n x[2:],\n ),\n \"/begin\": lambda x: ({}, x[1:]),\n \"MEMORY_SEGMENT\": memory_segment,\n \"SYSTEM_CONSTANT\": system_constant,\n }\n tokens = parse_with_lexer(lexer=lexer, name=\"MOD_PAR\", tokens=tokens, params=params)\n\n return {\"mod_par\": [A2LModPar(**params)]}, tokens\n\n\ndef mod_common(tokens: Tokens) -> Tuple[dict, Tokens]:\n if tokens[0] != \"MOD_COMMON\":\n raise Exception(\"MOD_COMMON expected\")\n tokens = tokens[1:]\n\n params = {}\n params[\"description\"], tokens = parse_string(tokens)\n lexer = {\n \"DEPOSIT\": lambda x: ({\"deposit\": x[1]}, x[2:]),\n \"BYTE_ORDER\": lambda x: ({\"byte_order\": ByteOrder(x[1])}, x[2:]),\n \"ALIGNMENT_BYTE\": lambda x: (\n {\"alignment_byte\": parse_number(x.get_keyword(1))},\n x[2:],\n ),\n \"ALIGNMENT_WORD\": lambda x: (\n {\"alignment_word\": parse_number(x.get_keyword(1))},\n x[2:],\n ),\n \"ALIGNMENT_LONG\": lambda x: (\n {\"alignment_long\": parse_number(x.get_keyword(1))},\n x[2:],\n ),\n \"ALIGNMENT_FLOAT32_IEEE\": lambda x: (\n {\"alignment_float32_ieee\": parse_number(x.get_keyword(1))},\n x[2:],\n ),\n \"ALIGNMENT_FLOAT64_IEEE\": lambda x: (\n {\"alignment_float64_ieee\": parse_number(x.get_keyword(1))},\n x[2:],\n ),\n }\n tokens = parse_with_lexer(\n lexer=lexer, name=\"MOD_COMMON\", tokens=tokens, params=params\n )\n return {\"mod_common\": [A2LModCommon(**params)]}, tokens\n\n\ndef tab_intp(tokens: Tokens) -> Tuple[Any, Tokens]:\n params = {}\n if tokens[0] == \"COMPU_TAB_REF\":\n params[\"compu_tab_ref\"] = tokens[1]\n tokens = tokens[2:]\n else:\n values = {}\n size = parse_number(tokens.get_keyword(0))\n tokens = tokens[1:]\n for _ in range(size):\n x = parse_number(tokens.get_keyword(0))\n y = parse_number(tokens.get_keyword(1))\n values[x] = y\n tokens = tokens[2:]\n params[\"values\"] = values\n\n if tokens[0] == \"DEFAULT_VALUE_NUMERIC\":\n params[\"default_value\"] = parse_number(tokens.get_keyword(1))\n tokens = tokens[2:]\n\n return params, tokens[2:]\n\n\ndef compu_method(tokens: Tokens) -> Tuple[Any, Tokens]:\n if tokens[0] != \"COMPU_METHOD\":\n raise Exception(\"COMPU_METHOD expected, got \" + tokens[0])\n\n params = {}\n params[\"name\"] = tokens[1]\n params[\"description\"], tokens = parse_string(tokens[2:])\n\n compu_method_types = {\n \"IDENTICAL\": A2LCompuMethod,\n \"LINEAR\": A2LCompuMethodLinear,\n \"RAT_FUNC\": A2LCompuMethodRational,\n \"FORM\": A2LCompuMethodFormula,\n \"TAB_INTP\": A2LCompuMethodTableInterpolation,\n \"TAB_NOINTP\": A2LCompuMethodTableNoInterpolation,\n \"TAB_VERB\": A2LCompuMethodVerbalTable,\n }\n compu_method_type = tokens[0]\n\n params[\"format\"], tokens = parse_string(tokens[1:])\n params[\"unit\"], tokens = parse_string(tokens)\n\n def coeffs(tokens: Tokens) -> Tuple[Any, Tokens]:\n coeffs = []\n coeffs, tokens = parse_list_of_numbers(tokens[1:])\n return {\"coeffs\": coeffs}, tokens\n\n def formula(tokens: Tokens) -> Tuple[Any, Tokens]:\n tokens = tokens[1:]\n formula_inv = None\n formula = None\n while tokens[0] != \"/end\" or tokens[1] != \"FORMULA\":\n if tokens[0] == \"FORMULA_INV\":\n formula_inv, tokens = parse_string(tokens[1:])\n else:\n formula, tokens = parse_string(tokens)\n return {\"formula\": formula, \"formula_inv\": formula_inv}, tokens[2:]\n\n if compu_method_type == \"TAB_INTP\" or compu_method_type == \"TAB_NOINTP\":\n params2, tokens = tab_intp(tokens)\n params.update(params2)\n else:\n lexer = {\n \"COEFFS_LINEAR\": coeffs,\n \"COEFFS\": coeffs,\n \"STATUS_STRING_REF\": lambda x: ({\"status_string_ref\": x[1]}, x[2:]),\n \"/begin\": lambda x: ({}, x[1:]),\n \"FORMULA\": formula,\n \"COMPU_TAB_REF\": lambda x: ({\"compu_tab_ref\": x[1]}, x[2:]),\n }\n tokens = parse_with_lexer(\n lexer=lexer, name=\"COMPU_METHOD\", params=params, tokens=tokens\n )\n class_ = compu_method_types[compu_method_type]\n return {\"compu_methods\": [class_(**params)]}, tokens\n\n\ndef compu_tab(tokens: Tokens) -> Tuple[Any, Tokens]:\n if tokens[0] != \"COMPU_TAB\":\n raise Exception(\"COMPU_TAB expected, got \" + tokens[0])\n\n params = {}\n params[\"name\"] = tokens[1]\n params[\"description\"], tokens = parse_string(tokens[2:])\n params[\"table_type\"] = tokens[0]\n\n params2, tokens = tab_intp(tokens[1:])\n params.update(params2)\n\n return {\"compu_tabs\": [A2LCompuTab(**params)]}, tokens\n\n\ndef compu_vtab(tokens: Tokens) -> Tuple[Any, Tokens]:\n if tokens[0] != \"COMPU_VTAB\":\n raise Exception(\"COMPU_VTAB expected, got \" + tokens[0])\n\n params = {}\n params[\"name\"] = tokens[1]\n params[\"description\"], tokens = parse_string(tokens[2:])\n\n if tokens[0] != \"TAB_VERB\":\n raise Exception(\"TAB_VERB expected, got \" + tokens[0])\n\n tokens = tokens[1:]\n size = parse_number(tokens.get_keyword(0))\n tokens = tokens[1:]\n values = {}\n for _ in range(size):\n val = parse_number(tokens.get_keyword(0))\n name, tokens = parse_string(tokens[1:])\n values[val] = name\n params[\"values\"] = values\n\n if tokens[0] == \"DEFAULT_VALUE\":\n params[\"default_value\"], tokens = parse_string(tokens[1:])\n\n assert tokens[0] == \"/end\", \"Expected /end, got \" + tokens[0]\n\n return {\"compu_vtabs\": [A2LCompuVTab(**params)]}, tokens[2:]\n\n\ndef compu_vtab_range(tokens: Tokens) -> Tuple[Any, Tokens]:\n if tokens[0] != \"COMPU_VTAB_RANGE\":\n raise Exception(\"COMPU_VTAB_RANGE expected, got \" + tokens[0])\n\n params = {}\n params[\"name\"] = tokens[1]\n params[\"description\"], tokens = parse_string(tokens[2:])\n\n size = parse_number(tokens.get_keyword(0))\n tokens = tokens[1:]\n values = {}\n for _ in range(size):\n min = parse_number(tokens.get_keyword(0))\n max = parse_number(tokens.get_keyword(1))\n name, tokens = parse_string(tokens[2:])\n values[(min, max)] = name\n params[\"values\"] = values\n\n if tokens[0] == \"DEFAULT_VALUE\":\n params[\"default_value\"], tokens = parse_string(tokens[1:])\n\n assert tokens[0] == \"/end\", \"Expected /end, got \" + tokens[0]\n\n return {\"compu_vtab_ranges\": [A2LCompuVTabRange(**params)]}, tokens[2:]\n\n\ndef parse_matrix_dim(tokens: Tokens) -> Tuple[Any, Tokens]:\n if tokens[0] != \"MATRIX_DIM\":\n raise Exception(\"MATRIX_DIM expected, got \" + tokens[0])\n\n dimensions = []\n\n tokens = tokens[1:]\n while is_number(tokens[0]):\n dimensions.append(parse_number(tokens.get_keyword(0)))\n tokens = tokens[1:]\n\n return {\"matrix_dim\": dimensions}, tokens\n\n\ndef measurement(tokens: Tokens) -> Tuple[Any, Tokens]:\n if tokens[0] != \"MEASUREMENT\":\n raise Exception(\"MEASUREMENT expected, got \" + tokens[0])\n\n params = {}\n params[\"name\"] = tokens[1]\n description, tokens = parse_string(tokens[2:])\n params[\"description\"] = description\n\n params[\"datatype\"] = tokens[0]\n params[\"compu_method\"] = tokens[1]\n params[\"offset_1\"] = tokens[2]\n params[\"offset_2\"] = tokens[3]\n params[\"min\"] = parse_number(tokens.get_keyword(4))\n params[\"max\"] = parse_number(tokens.get_keyword(5))\n\n tokens = tokens[6:]\n\n def virtual_measurement(tokens: Tokens) -> Tuple[Any, Tokens]:\n tokens = tokens[1:]\n variables = []\n while tokens[0] != \"/end\" or tokens[1] != \"VIRTUAL\":\n variables.append(tokens[0])\n tokens = tokens[1:]\n return {\"virtual\": VirtualMeasurement(variables)}, tokens[2:]\n\n def format(tokens: Tokens) -> Tuple[Any, Tokens]:\n tokens = tokens[1:]\n f, tokens = parse_string(tokens)\n return {\"format\": f}, tokens\n\n lexer = {\n \"EXTENDED_LIMITS\": lambda x: (\n {\n \"extended_min\": parse_number(x.get_keyword(1)),\n \"extended_max\": parse_number(x.get_keyword(2)),\n },\n x[3:],\n ),\n \"FORMAT\": format,\n \"DISPLAY_IDENTIFIER\": lambda x: ({\"display_identifier\": x[1]}, x[2:]),\n \"BIT_MASK\": lambda x: ({\"bitmask\": parse_number(x.get_keyword(1))}, x[2:]),\n \"PHYS_UNIT\": lambda x: ({\"phys_unit\": x[1]}, x[2:]),\n \"ECU_ADDRESS_EXTENSION\": lambda x: (\n {\"ecu_address_extension\": parse_number(x.get_keyword(1))},\n x[2:],\n ),\n \"DISCRETE\": lambda x: ({\"discrete\": True}, x[1:]),\n \"/begin\": lambda x: ({}, x[1:]),\n \"MATRIX_DIM\": lambda x: parse_matrix_dim(x),\n \"ANNOTATION\": lambda x: parse_annotation(x),\n \"ECU_ADDRESS\": lambda x: (\n {\"ecu_address\": parse_number(x.get_keyword(1))},\n x[2:],\n ),\n \"IF_DATA\": if_data,\n \"VIRTUAL\": lambda x: virtual_measurement(x),\n }\n\n tokens = parse_with_lexer(\n lexer=lexer, name=\"MEASUREMENT\", tokens=tokens, params=params\n )\n\n return {\"measurements\": [A2LMeasurement(**params)]}, tokens\n\n\ndef record_layout(tokens: Tokens) -> Tuple[Any, Tokens]:\n if tokens[0] != \"RECORD_LAYOUT\":\n raise Exception(\"RECORD_LAYOUT expected, got \" + tokens[0])\n\n params = {}\n params[\"name\"] = tokens[1]\n tokens = tokens[2:]\n\n def fnc_value(tokens: Tokens) -> Tuple[Any, Tokens]:\n params = {}\n params[\"position\"] = parse_number(tokens.get_keyword(1))\n params[\"datatype\"] = tokens[2]\n params[\"index_mode\"] = tokens[3]\n params[\"addressing_mode\"] = tokens[4]\n return {\"fields\": [A2lFncValues(**params)]}, tokens[5:]\n\n def axis_value(tokens: Tokens) -> Tuple[Any, Tokens]:\n params = {}\n params[\"axis\"] = tokens[0]\n params[\"position\"] = parse_number(tokens.get_keyword(1))\n params[\"datatype\"] = tokens[2]\n params[\"index_mode\"] = tokens[3]\n params[\"addressing_mode\"] = tokens[4]\n return {\"fields\": [A2LRecordLayoutAxisPts(**params)]}, tokens[5:]\n\n def rescale_axis(tokens: Tokens) -> Tuple[Any, Tokens]:\n params = {}\n params[\"axis\"] = tokens[0]\n params[\"position\"] = parse_number(tokens.get_keyword(1))\n params[\"datatype\"] = tokens[2]\n params[\"map_position\"] = parse_number(tokens.get_keyword(3))\n params[\"index_mode\"] = tokens[4]\n params[\"addressing_mode\"] = tokens[5]\n return {\"fields\": [A2lLRescaleAxis(**params)]}, tokens[6:]\n\n def no_axis_value(tokens: Tokens) -> Tuple[Any, Tokens]:\n params = {}\n params[\"axis\"] = tokens[0]\n params[\"position\"] = parse_number(tokens.get_keyword(1))\n params[\"datatype\"] = tokens[2]\n return {\"fields\": [A2LRecordLayoutNoAxisPts(**params)]}, tokens[3:]\n\n lexer = {\n \"FNC_VALUES\": fnc_value,\n \"AXIS_PTS_X\": axis_value,\n \"AXIS_PTS_Y\": axis_value,\n \"AXIS_PTS_Z\": axis_value,\n \"AXIS_PTS_4\": axis_value,\n \"AXIS_PTS_5\": axis_value,\n \"AXIS_RESCALE_X\": axis_value,\n \"NO_AXIS_PTS_X\": no_axis_value,\n \"NO_AXIS_PTS_Y\": no_axis_value,\n \"NO_AXIS_PTS_Z\": no_axis_value,\n \"NO_AXIS_PTS_4\": no_axis_value,\n \"NO_AXIS_PTS_5\": no_axis_value,\n \"NO_RESCALE_X\": no_axis_value,\n \"RESERVED\": no_axis_value,\n \"AXIS_RESCALE_X\": rescale_axis,\n }\n\n tokens = parse_with_lexer(\n lexer=lexer, name=\"RECORD_LAYOUT\", tokens=tokens, params=params\n )\n\n return {\"record_layouts\": [A2LRecordLayout(**params)]}, tokens\n\n\ndef parse_annotation(tokens: Tokens) -> Tuple[Any, Tokens]:\n if tokens[0] != \"ANNOTATION\":\n raise Exception(\"ANNOTATION expected, got \" + tokens[0])\n\n tokens = tokens[1:]\n\n params = {}\n\n def parse_s(tokens: Tokens, field: str) -> Tuple[dict, Tokens]:\n val, tokens = parse_string(tokens)\n return {field: val}, tokens\n\n def parse_annotation_text(tokens: Tokens) -> Tuple[dict, Tokens]:\n text = None\n while tokens[0] != \"/end\" or tokens[1] != \"ANNOTATION_TEXT\":\n text, tokens = parse_string(tokens)\n return {\"text\": text}, tokens[2:]\n\n lexer = {\n \"ANNOTATION_LABEL\": lambda x: functools.partial(parse_s, field=\"label\")(x[1:]),\n \"ANNOTATION_ORIGIN\": lambda x: functools.partial(parse_s, field=\"origin\")(\n x[1:]\n ),\n \"ANNOTATION_TEXT\": lambda x: parse_annotation_text(x[1:]),\n \"/begin\": lambda x: ({}, x[1:]),\n }\n\n tokens = parse_with_lexer(\n lexer=lexer, name=\"ANNOTATION\", tokens=tokens, params=params\n )\n return {\"annotations\": [A2LAnnotation(**params)]}, tokens\n\n\ndef parse_axis_descr(tokens: Tokens) -> Tuple[dict, Tokens]:\n if tokens[0] != \"AXIS_DESCR\":\n raise Exception(\"AXIS_DESCR expected, got \" + tokens[0])\n\n tokens = tokens[1:]\n\n axis_types = {\n \"STD_AXIS\": A2LAxisDescription,\n \"FIX_AXIS\": A2LAxisDescriptionFixAxis,\n \"COM_AXIS\": A2LAxisDescriptionComAxis,\n \"CURVE_AXIS\": A2LAxisDescriptionCurveAxis,\n \"RES_AXIS\": A2LAxisDescriptionResAxis,\n }\n if tokens[0] not in axis_types:\n raise Exception(\"Unknown axis type \" + tokens[0])\n\n axis_type = axis_types[tokens[0]]\n params = {}\n params[\"measurement\"] = tokens[1]\n params[\"compu_method\"] = tokens[2]\n params[\"size\"] = parse_number(tokens.get_keyword(3))\n params[\"min\"] = parse_number(tokens.get_keyword(4))\n params[\"max\"] = parse_number(tokens.get_keyword(5))\n\n tokens = tokens[6:]\n\n def fix_axis_par_dist(tokens: Tokens) -> Tuple[Any, Tokens]:\n numbers, tokens = parse_list_of_numbers(tokens[1:])\n return {\"par_dist\": numbers}, tokens\n\n def fix_axis_par_list(tokens: Tokens) -> Tuple[Any, Tokens]:\n numbers, tokens = parse_list_of_numbers(tokens[1:])\n return {\"par_list\": numbers}, tokens[2:]\n\n lexer = {\n \"AXIS_PTS_REF\": lambda x: ({\"axis_pts_ref\": x[1]}, x[2:]),\n \"CURVE_AXIS_REF\": lambda x: ({\"curve_axis_ref\": x[1]}, x[2:]),\n \"MONOTONY\": lambda x: ({\"monotony\": x[1]}, x[2:]),\n \"/begin\": lambda x: ({}, x[1:]),\n \"FIX_AXIS_PAR_DIST\": fix_axis_par_dist,\n \"FIX_AXIS_PAR_LIST\": fix_axis_par_list,\n }\n\n tokens = parse_with_lexer(\n lexer=lexer, name=\"AXIS_DESCR\", tokens=tokens, params=params\n )\n return {\"axis_descriptions\": [axis_type(**params)]}, tokens\n\n\ndef characteristic(tokens: Tokens) -> Tuple[Any, Tokens]:\n type_token = tokens.get(0)\n\n params = {}\n params[\"name\"] = tokens[1]\n params[\"description\"], tokens = parse_string(tokens[2:])\n\n characteristic_type = tokens[0]\n\n char_types = {\n \"VALUE\": (A2LCharacteristicValue, []),\n \"VAL_BLK\": (A2LCharacteristicArray, [\"MATRIX_DIM\"]),\n \"ASCII\": (A2LCharacteristicAscii, [\"NUMBER\"]),\n \"CURVE\": (A2LCharacteristicCurve, [\"AXIS_DESCR\"]),\n \"MAP\": (A2LCharacteristicMap, [\"AXIS_DESCR\"]),\n \"CUBOID\": (A2LCharacteristicCuboid, [\"AXIS_DESCR\"]),\n \"CUBE_4\": (A2LCharacteristicCube4, [\"AXIS_DESCR\"]),\n }\n\n if not characteristic_type in char_types:\n raise UnknownTokenError(\n tokens.get(0),\n expected=char_types.keys(),\n )\n\n char_type, expected_keywords = char_types[characteristic_type]\n\n params[\"ecu_address\"] = parse_number(tokens.get_keyword(1))\n params[\"record_layout\"] = tokens[2]\n params[\"maxdiff\"] = parse_number(tokens.get_keyword(3))\n params[\"compu_method\"] = tokens[4]\n params[\"min\"] = parse_number(tokens.get_keyword(5))\n params[\"max\"] = parse_number(tokens.get_keyword(6))\n\n tokens = tokens[7:]\n\n def dependent_characteristic(tokens: Tokens) -> Tuple[Any, Tokens]:\n tokens = tokens[1:]\n formula, tokens = parse_string(tokens)\n variables = []\n while tokens[0] != \"/end\" or tokens[1] != \"DEPENDENT_CHARACTERISTIC\":\n variables.append(tokens[0])\n tokens = tokens[1:]\n return {\n \"dependent_characteristic\": DependentCharacteristic(formula, variables)\n }, tokens[2:]\n\n def virtual_characteristic(tokens: Tokens) -> Tuple[Any, Tokens]:\n tokens = tokens[1:]\n formula, tokens = parse_string(tokens)\n variables = []\n while tokens[0] != \"/end\" or tokens[1] != \"VIRTUAL_CHARACTERISTIC\":\n variables.append(tokens[0])\n tokens = tokens[1:]\n return {\n \"virtual_characteristic\": VirtualCharacteristic(formula, variables)\n }, tokens[2:]\n\n lexer = {\n \"EXTENDED_LIMITS\": lambda x: (\n {\n \"extended_min\": parse_number(x.get_keyword(1)),\n \"extended_max\": parse_number(x.get_keyword(2)),\n },\n x[3:],\n ),\n \"FORMAT\": lambda x: ({\"format\": x[1]}, x[2:]),\n \"DISPLAY_IDENTIFIER\": lambda x: ({\"display_identifier\": x[1]}, x[2:]),\n \"BIT_MASK\": lambda x: ({\"bitmask\": parse_number(x.get_keyword(1))}, x[2:]),\n \"NUMBER\": lambda x: ({\"size\": parse_number(x.get_keyword(1))}, x[2:]),\n \"PHYS_UNIT\": lambda x: ({\"phys_unit\": x[1]}, x[2:]),\n \"ECU_ADDRESS_EXTENSION\": lambda x: (\n {\"ecu_address_extension\": parse_number(x.get_keyword(1))},\n x[2:],\n ),\n \"DISCRETE\": lambda x: ({\"discrete\": True}, x[1:]),\n \"/begin\": lambda x: ({}, x[1:]),\n \"MATRIX_DIM\": lambda x: parse_matrix_dim(x),\n \"AXIS_DESCR\": lambda x: parse_axis_descr(x),\n \"ANNOTATION\": lambda x: parse_annotation(x),\n \"DEPENDENT_CHARACTERISTIC\": lambda x: dependent_characteristic(x),\n \"VIRTUAL_CHARACTERISTIC\": lambda x: virtual_characteristic(x),\n \"MODEL_LINK\": lambda x: ({\"model_link\": x[1]}, x[2:]),\n }\n\n found_keywords = []\n\n tokens = parse_with_lexer(\n lexer=lexer,\n name=\"CHARACTERISTIC\",\n tokens=tokens,\n params=params,\n found_keywords=found_keywords,\n )\n\n for e in expected_keywords:\n if e not in found_keywords:\n raise MissingKeywordError(e, \"CHARACTERISTIC\", type_token)\n for v in char_types.values():\n _, keywords = v\n for k in keywords:\n if k not in expected_keywords and k in found_keywords:\n raise InvalidKeywordError(k, \"CHARACTERISTIC\", type_token)\n\n field_names = [field.name for field in fields(char_type)]\n char_type_params = {k: v for k, v in params.items() if k in field_names}\n params = {k: v for k, v in params.items() if k not in field_names}\n params[\"typedef\"] = char_type(**char_type_params)\n\n return {\"characteristics\": [A2LCharacteristic(**params)]}, tokens\n\n\ndef typedef_characteristic(tokens: Tokens) -> Tuple[Any, Tokens]:\n if tokens[0] != \"TYPEDEF_CHARACTERISTIC\":\n raise Exception(\"TYPEDEF_CHARACTERISTIC expected, got \" + tokens[0])\n\n tokens = tokens[1:]\n\n params = {}\n params[\"name\"] = tokens[0]\n params[\"description\"], tokens = parse_string(tokens[1:])\n\n characteristic_type = tokens[0]\n\n char_types = {\n \"VALUE\": (A2LCharacteristicValue, []),\n \"VAL_BLK\": (A2LCharacteristicArray, [\"MATRIX_DIM\"]),\n \"ASCII\": (A2LCharacteristicAscii, [\"NUMBER\"]),\n \"CURVE\": (A2LCharacteristicCurve, [\"AXIS_DESCR\"]),\n \"MAP\": (A2LCharacteristicMap, [\"AXIS_DESCR\"]),\n \"CUBOID\": (A2LCharacteristicCuboid, [\"AXIS_DESCR\"]),\n \"CUBE_4\": (A2LCharacteristicCube4, [\"AXIS_DESCR\"]),\n }\n\n if not characteristic_type in char_types:\n raise UnknownTokenError(\n tokens.get(0),\n expected=\"VALUE | VAL_BLK | ASCII | CURVE | MAP | CUBOID | CUBE_4\",\n )\n\n char_type, expected_keywords = char_types[characteristic_type]\n\n params[\"record_layout\"] = tokens[1]\n params[\"maxdiff\"] = parse_number(tokens.get_keyword(2))\n params[\"compu_method\"] = tokens[3]\n params[\"min\"] = parse_number(tokens.get_keyword(4))\n params[\"max\"] = parse_number(tokens.get_keyword(5))\n\n tokens = tokens[6:]\n\n lexer = {\n \"EXTENDED_LIMITS\": lambda x: (\n {\n \"extended_min\": parse_number(x.get_keyword(1)),\n \"extended_max\": parse_number(x.get_keyword(2)),\n },\n x[3:],\n ),\n \"FORMAT\": lambda x: ({\"format\": x[1]}, x[2:]),\n \"DISPLAY_IDENTIFIER\": lambda x: ({\"display_identifier\": x[1]}, x[2:]),\n \"BIT_MASK\": lambda x: ({\"bitmask\": parse_number(x.get_keyword(1))}, x[2:]),\n \"NUMBER\": lambda x: ({\"size\": parse_number(x.get_keyword(1))}, x[2:]),\n \"PHYS_UNIT\": lambda x: ({\"phys_unit\": x[1]}, x[2:]),\n \"DISCRETE\": lambda x: ({\"discrete\": True}, x[1:]),\n \"/begin\": lambda x: ({}, x[1:]),\n \"MATRIX_DIM\": lambda x: parse_matrix_dim(x),\n \"AXIS_DESCR\": lambda x: parse_axis_descr(x),\n \"ANNOTATION\": lambda x: parse_annotation(x),\n }\n\n tokens = parse_with_lexer(\n lexer=lexer, name=\"TYPEDEF_CHARACTERISTIC\", tokens=tokens, params=params\n )\n\n field_names = [field.name for field in fields(char_type)]\n char_type_params = {k: v for k, v in params.items() if k in field_names}\n params = {k: v for k, v in params.items() if k not in field_names}\n params[\"typedef\"] = char_type(**char_type_params)\n\n return {\"characteristics\": [A2LCharacteristicTypedef(**params)]}, tokens\n\n\ndef instance(tokens: Tokens) -> Tuple[Any, Tokens]:\n if tokens[0] != \"INSTANCE\":\n raise Exception(\"INSTANCE expected, got \" + tokens[0])\n tokens = tokens[1:]\n\n params = {}\n params[\"name\"] = tokens[0]\n params[\"description\"], tokens = parse_string(tokens[1:])\n params[\"reference\"] = tokens[0]\n params[\"ecu_address\"] = parse_number(tokens.get_keyword(1))\n lexer = {\n \"MATRIX_DIM\": parse_matrix_dim,\n \"DISPLAY_IDENTIFIER\": lambda x: ({\"display_identifier\": x[1]}, x[2:]),\n }\n tokens = parse_with_lexer(\n lexer=lexer, name=\"INSTANCE\", tokens=tokens[2:], params=params\n )\n\n return {\"instances\": [A2LInstance(**params)]}, tokens\n\n\ndef axis_pts(tokens: Tokens) -> Tuple[Any, Tokens]:\n if tokens[0] != \"AXIS_PTS\":\n raise Exception(\"AXIS_PTS expected, got \" + tokens[0])\n\n tokens = tokens[1:]\n params = {}\n params[\"name\"] = tokens[0]\n params[\"description\"], tokens = parse_string(tokens[1:])\n params[\"ecu_address\"] = parse_number(tokens.get_keyword(0))\n params[\"measurement\"] = tokens[1]\n params[\"record_layout\"] = tokens[2]\n params[\"offset\"] = parse_number(tokens.get_keyword(3))\n params[\"compu_method\"] = tokens[4]\n params[\"max_number_sample_points\"] = parse_number(tokens.get_keyword(5))\n params[\"min\"] = parse_number(tokens.get_keyword(6))\n params[\"max\"] = parse_number(tokens.get_keyword(7))\n tokens = tokens[8:]\n\n lexer = {\n \"DISPLAY_IDENTIFIER\": lambda x: ({\"display_identifier\": x[1]}, x[2:]),\n }\n tokens = parse_with_lexer(\n lexer=lexer, name=\"AXIS_PTS\", tokens=tokens, params=params\n )\n return {\"axis_pts\": [A2LAxisPts(**params)]}, tokens\n\n\ndef function_type(tokens: Tokens) -> Tuple[Any, Tokens]:\n if tokens[0] != \"FUNCTION\":\n raise Exception(\"FUNCTION expected, got \" + tokens[0])\n tokens = tokens[1:]\n\n params = {}\n params[\"name\"] = tokens[0]\n params[\"description\"], tokens = parse_string(tokens[1:])\n\n lexer = {\n \"/begin\": lambda x: ({}, x[1:]),\n \"SUB_FUNCTION\": functools.partial(\n parse_members, field=\"sub_functions\", name=\"SUB_FUNCTION\"\n ),\n \"REF_CHARACTERISTIC\": functools.partial(\n parse_members, field=\"ref_characteristics\", name=\"REF_CHARACTERISTIC\"\n ),\n \"DEF_CHARACTERISTIC\": functools.partial(\n parse_members, field=\"def_characteristics\", name=\"DEF_CHARACTERISTIC\"\n ),\n \"IN_MEASUREMENT\": functools.partial(\n parse_members, field=\"in_measurements\", name=\"IN_MEASUREMENT\"\n ),\n \"OUT_MEASUREMENT\": functools.partial(\n parse_members, field=\"out_measurements\", name=\"OUT_MEASUREMENT\"\n ),\n \"LOC_MEASUREMENT\": functools.partial(\n parse_members, field=\"loc_measurements\", name=\"LOC_MEASUREMENT\"\n ),\n }\n\n tokens = parse_with_lexer(\n lexer=lexer, name=\"FUNCTION\", tokens=tokens, params=params\n )\n return {\"functions\": [A2LFunction(**params)]}, tokens\n\n\ndef assp2_version(tokens: Tokens) -> Tuple[dict, Tokens]:\n if tokens[0] != \"ASAP2_VERSION\":\n raise Exception(\"ASAP2_VERSION expected\")\n\n major = tokens[1]\n minor = tokens[2]\n\n return {\"asap2_version\": f\"{major}.{minor}\"}, tokens[3:]\n\n\ndef read_a2l(path: Path) -> A2lFile:\n tokens = Tokens.from_file(path)\n\n lexer = {\n \"ASAP2_VERSION\": assp2_version,\n \"/begin\": lambda x: ({}, x[1:]),\n \"PROJECT\": project,\n }\n\n params = {}\n parse_with_lexer(\n lexer=lexer, tokens=tokens, params=params, end_condition=lambda x: len(x) == 0\n )\n\n return A2lFile(**params)\n","repo_name":"oliverbl/pya2ltools","sub_path":"pya2ltools/a2l/reader/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":35723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"40363576062","text":"# -!- coding: utf-8 -!-\r\nimport requests\r\nimport re\r\nimport pandas as pd\r\nimport string\r\nimport jieba\r\n\r\n\r\ndef get_data(cid):\r\n # 分析网页,并获取网页文件\r\n url = 'https://comment.bilibili.com/{}.xml'.format(cid)\r\n headers = {\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0;Win64;x64) AppleWebKit/537.36(KHTML, likeGecko) Chrome/80.0.3987.163Safari/537.36\"\r\n }\r\n response = requests.get(url, headers=headers).content.decode('utf-8')\r\n return response\r\n\r\ndef parse_html(W_response):\r\n # 解读网页文件,获取关键信息\r\n # soup = bs4.BeautifulSoup(response)\r\n # lst = [soup.find_all(name='d')]\r\n # danmuku = [i.text for i in lst]\r\n\r\n W_pattern = re.compile(r'(.*?)')\r\n W_danmuku = re.findall(W_pattern, W_response)\r\n return W_danmuku\r\n\r\n\r\ndef save_data(W_danmuku, W_cid):\r\n # 保存数据\r\n W_Dict = {\r\n 'danmuku': W_danmuku\r\n }\r\n pd_data = pd.DataFrame(W_Dict)\r\n W_cid = str(W_cid)\r\n W_name = W_cid + '弹幕文件.csv'\r\n W_path = r'C:\\Users\\86183\\Desktop\\程序设计大作业\\{}'.format(W_name)\r\n pd_data.to_csv(W_path, index=False, header=False, mode='w', encoding='utf-8-sig')\r\n\r\n\r\ndef data_preprocess(danmuku, cid):\r\n cid = str(cid)\r\n name = cid + '弹幕文件.csv'\r\n path = r'C:\\Users\\86183\\Desktop\\程序设计大作业\\{}'.format(name)\r\n with open(path , mode='r', encoding='utf-8') as f:\r\n # 加载用户自定义字典\r\n #jieba.load_userdict(r'D:\\爬虫\\userdict')\r\n reader = f.read().replace('\\n', '')\r\n # 加载停用词词表\r\n stopwords = []#line.strip() for line in\r\n #open(r'D:\\爬虫\\stop_wordslst', encoding='gbk').readlines()]\r\n # 去标点,去数字,去空白\r\n pun_num = string.punctuation + string.digits\r\n table = str.maketrans('', '', pun_num)\r\n reader = reader.translate(table)\r\n seg_list = jieba.cut(reader, cut_all=False)\r\n sentence = ''\r\n for word in seg_list:\r\n if word not in stopwords and word.isspace() == False:\r\n sentence += word\r\n sentence += ','\r\n sentence = sentence[:-1]\r\n return sentence\r\n\r\n\r\ndef count_words(txt, cid):\r\n cid = str(cid)\r\n name = cid + '弹幕词汇数统计.csv'\r\n #path = 'D:\\爬虫\\{}'.format(name)\r\n aDict = {}\r\n words = txt.split(',')\r\n for word in words:\r\n aDict[word] = aDict.get(word, 0) + 1\r\n #pd_count = pd.DataFrame(aDict, index=['times']).T.sort_values('times', ascending=False)\r\n #pd_count.to_csv(path,encoding='utf-8')\r\n with open(name + '.csv',mode = 'w',encoding='gbk') as f:\r\n d_order=sorted(aDict.items(),key=lambda x:x[1],reverse=True)\r\n #print(d_order)\r\n for i in d_order:\r\n for j in i:\r\n try:\r\n f.write(str(j))\r\n #print(str(j))\r\n f.write(' ')\r\n except:\r\n break\r\n f.write('\\n')\r\n\r\n\r\ndef Main():\r\n import PySimpleGUI as sg\r\n import sys\r\n layout = [\r\n [sg.T('请输入cid')],\r\n [sg.Input('')],\r\n [sg.B('确认'), sg.B('取消')]\r\n ]\r\n\r\n window = sg.Window('自定义网址界面', layout)\r\n flag = 0\r\n while True:\r\n event, values = window.read()\r\n if event == None:\r\n break\r\n if event == '取消':\r\n break\r\n if event == '确认':\r\n flag = 1\r\n break\r\n\r\n window.close()\r\n if flag == 0:\r\n sys.exit()\r\n\r\n W_cid = values[0]\r\n #cid = int(input('请输入你想查询的视频CID号:'))\r\n W_response = get_data(W_cid)\r\n W_danmuku = parse_html(W_response)\r\n save_data(W_danmuku, W_cid)\r\n W_sentence = data_preprocess(W_danmuku, W_cid)\r\n count_words(W_sentence, W_cid)","repo_name":"SyouSanGin/PDH","sub_path":"danmu.py","file_name":"danmu.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"39068650335","text":"# https://www.acmicpc.net/problem/15661\n# 145440KB, 2132ms\n\nimport sys\ninput = sys.stdin.readline\n\ndef cal(team):\n sum_team = 0\n for i in range(len(team)):\n for j in range(len(team)):\n if i != j:\n a, b = team[i], team[j]\n sum_team += stats[a][b]\n return sum_team\n\n\ndef comb(idx, start, M):\n global ans\n if idx == M:\n team1, team2 = [], []\n for i in range(N):\n if visited[i]:\n team1.append(i)\n else:\n team2.append(i)\n sum_team1 = cal(team1)\n sum_team2 = cal(team2)\n ans = min(ans, abs(sum_team1 - sum_team2))\n for j in range(start, N):\n visited[j] = 1\n comb(idx + 1, j + 1, M)\n visited[j] = 0\n\n\nN = int(input())\nstats = [list(map(int, input().split())) for _ in range(N)]\nM = N // 2\nans = 0xffffff\nvisited = [0] * N\nfor i in range(N//2 + 1):\n comb(0, 0, i)\nprint(ans)\n","repo_name":"kwanggyo/Algorithm","sub_path":"Study/기초학습/완전탐색/BOJ_15661_링크와스타트.py","file_name":"BOJ_15661_링크와스타트.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"18123255932","text":"import sys\n\ninput = sys.stdin.readline\n\n# 입력 전처리 - 숫자와 기호 분리\ns = input().rstrip()\noperands = []\nfor c in s:\n if c in ('-', '+'):\n operands.append(c)\ns = s.replace(\"+\", \" \")\ns = s.replace(\"-\", \" \")\nnums = list(map(int, s.split()))\n\n# a - b 구조 -> b를 최대로 만들고, 맨 마지막에 a - b 연산을 수행한다\na = nums[0]\nb = 0\nnums = nums[1:]\n\nfor i, operand in enumerate(operands):\n if operand == '+':\n if b == 0:\n a += nums[i]\n else: # - 연산이 이미 기존에 존재하기에 b를 최대로 만들기 위해 b에 더하기 수행\n b += nums[i]\n else:\n if b == 0:\n b += nums[i]\n else: # b 깂이 작아지는 것을 방지하기 위해 중간 연산 후 b 값 대체\n a = a - b\n b = nums[i]\n\nprint(a - b)","repo_name":"ssh00n/week04_team6","sub_path":"helen1031/1541번 잃어버린괄호.py","file_name":"1541번 잃어버린괄호.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"34433024520","text":"# Problem no. 22: Names scores\n###### SOLVED ######\n\nfrom requests import get\n\n# Get file with names\nresponse = get('https://projecteuler.net/project/resources/p022_names.txt')\n\n# Get response content and decode to string\ntext = response.content.decode()\n\n# Remove parentheses\ntext = text.replace('\"', '')\n\n# Make list of names\nnames = text.split(',')\n\n# Sort names\nnames.sort()\n\nalphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\n# Weight letters with a dictionary\nalpha_dict = dict()\n\nfor i in range(len(alphabet)):\n alpha_dict[alphabet[i]] = i + 1\n\n# Calculates word score by its letters' weights\ndef word_score(word):\n score = 0\n for ch in word:\n score += alpha_dict[ch]\n return score\n\n\ntotal = 0\n\nfor i in range(len(names)):\n total += (i+1) * word_score(names[i])\n\nprint('Total score of sorted names:', total)\n","repo_name":"poitrek/Project-Euler-Problems","sub_path":"22_names_scores.py","file_name":"22_names_scores.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"34359891423","text":"from django.db.transaction import atomic\nfrom djoser.serializers import UserSerializer\nfrom drf_base64.fields import Base64ImageField\nfrom rest_framework.serializers import (IntegerField, ModelSerializer,\n PrimaryKeyRelatedField,\n SerializerMethodField,\n SlugRelatedField,\n ValidationError)\nfrom recipes.models import (Favorite, Ingredient, IngredientRecipe,\n Recipe, ShoppingCart, Tag)\n\nfrom users.models import Follow, User\n\n\nclass FavoriteSerializer(ModelSerializer):\n class Meta:\n model = Favorite\n fields = ('user', 'recipe')\n\n def validate(self, data):\n request = self.context.get('request')\n if not request or request.user.is_anonymous:\n return False\n recipe = data['recipe']\n if Favorite.objects.filter(user=request.user, recipe=recipe).exists():\n raise ValidationError(\n {'errors': self.context['errors']['recipe_alrdy_in']})\n return data\n\n def to_representation(self, instance):\n request = self.context.get('request')\n context = {'request': request}\n return RecipeShortInfo(instance.recipe, context=context).data\n\n\nclass IngredientSerializer(ModelSerializer):\n class Meta:\n model = Ingredient\n fields = (\n 'id',\n 'name',\n 'measurement_unit')\n\n\nclass IngredientRecipeSerializer(ModelSerializer):\n id = PrimaryKeyRelatedField(\n source='ingredient',\n read_only=True)\n measurement_unit = SlugRelatedField(\n source='ingredient',\n slug_field='measurement_unit',\n read_only=True,)\n name = SlugRelatedField(\n source='ingredient',\n slug_field='name',\n read_only=True,)\n\n class Meta:\n model = IngredientRecipe\n fields = (\n 'id',\n 'name',\n 'measurement_unit',\n 'amount',)\n\n\nclass TagSerializer(ModelSerializer):\n class Meta:\n model = Tag\n fields = ('id', 'name', 'color', 'slug')\n\n\nclass UsersSerializer(UserSerializer):\n is_subscribed = SerializerMethodField()\n\n class Meta:\n model = User\n fields = [\n 'email', 'id', 'username', 'first_name',\n 'last_name', 'is_subscribed'\n ]\n\n def get_is_subscribed(self, obj: User):\n request = self.context.get('request')\n if not request or request.user.is_anonymous:\n return False\n return Follow.objects.filter(\n user=request.user, author=obj).exists()\n\n\nclass RecipeSerializer(ModelSerializer):\n tags = TagSerializer(many=True, read_only=True)\n ingredients = IngredientRecipeSerializer(\n many=True,\n read_only=True,\n source='ingredients_recipe',)\n image = Base64ImageField()\n author = UsersSerializer(read_only=True)\n is_in_shopping_cart = SerializerMethodField(read_only=True)\n is_favorited = SerializerMethodField(read_only=True)\n\n class Meta:\n model = Recipe\n fields = (\n 'id', 'tags', 'author', 'ingredients',\n 'is_favorited', 'is_in_shopping_cart',\n 'name', 'image', 'text', 'cooking_time')\n\n def get_is_favorited(self, obj):\n request = self.context.get('request')\n if request.user.is_anonymous:\n return False\n return Favorite.objects.filter(\n user=request.user, recipe__id=obj.id).exists()\n\n def get_is_in_shopping_cart(self, obj):\n request = self.context.get('request')\n if request.user.is_anonymous:\n return False\n return ShoppingCart.objects.filter(\n user=request.user, recipe__id=obj.id).exists()\n\n\nclass CreateIngredientRecipeSerializer(ModelSerializer):\n id = PrimaryKeyRelatedField(\n source='ingredient',\n queryset=Ingredient.objects.all())\n\n class Meta:\n model = IngredientRecipe\n fields = (\n 'id',\n 'amount',)\n\n def create(self, validated_data):\n return IngredientRecipe.objects.create(\n ingredient=validated_data.get('id'),\n amount=validated_data.get('amount'))\n\n\nclass RecipeShortInfo(ModelSerializer):\n class Meta:\n model = Recipe\n fields = ('id', 'name', 'image', 'cooking_time')\n\n\nclass FollowListSerializer(ModelSerializer):\n recipes = SerializerMethodField()\n recipes_count = SerializerMethodField()\n is_subscribed = SerializerMethodField(read_only=True)\n\n class Meta:\n model = User\n fields = (\n 'email', 'id', 'username', 'first_name', 'last_name',\n 'is_subscribed', 'recipes', 'recipes_count'\n )\n\n def get_recipes_count(self, author):\n return Recipe.objects.filter(author=author).count()\n\n def get_recipes(self, author):\n queryset = self.context.get('request')\n recipes_limit = queryset.query_params.get('recipes_limit')\n if not recipes_limit:\n return RecipeShortInfo(\n Recipe.objects.filter(author=author),\n many=True, context={'request': queryset}\n ).data\n return RecipeShortInfo(\n Recipe.objects.filter(author=author)[:int(recipes_limit)],\n many=True,\n context={'request': queryset}\n ).data\n\n def get_is_subscribed(self, author):\n return Follow.objects.filter(\n user=self.context.get('request').user,\n author=author\n ).exists()\n\n\nclass FollowSerializer(ModelSerializer):\n class Meta:\n model = Follow\n fields = ('user', 'author')\n\n def validate(self, data):\n if self.context['request'].user.id == data['author']:\n raise ValidationError({\n 'errors': 'Вы не можете подписаться на себя.'\n })\n if Follow.objects.filter(\n user=self.context['request'].user,\n author=data['author']\n ):\n raise ValidationError({\n 'errors': 'Уже подписался.'\n })\n return data\n\n def to_representation(self, instance):\n return FollowListSerializer(\n instance.author,\n context={'request': self.context.get('request')}\n ).data\n\n\nclass CreateRecipeSerializer(ModelSerializer):\n image = Base64ImageField(use_url=True, max_length=None)\n author = UserSerializer(read_only=True)\n ingredients = CreateIngredientRecipeSerializer(many=True)\n tags = PrimaryKeyRelatedField(queryset=Tag.objects.all(), many=True)\n cooking_time = IntegerField()\n\n class Meta:\n model = Recipe\n fields = (\n 'id', 'image', 'tags', 'author', 'ingredients',\n 'name', 'text', 'cooking_time',)\n\n def create_ingredients(self, recipe, ingredients):\n IngredientRecipe.objects.bulk_create([\n IngredientRecipe(\n recipe=recipe,\n amount=ingredient['amount'],\n ingredient=ingredient['ingredient'],\n ) for ingredient in ingredients])\n\n @staticmethod\n def check_repit(data, errors):\n if not data:\n raise ValidationError(\n {'errors': errors['is_empty']})\n check_list = []\n for item_to_chk in data:\n if item_to_chk in check_list:\n raise ValidationError(\n {'errors': errors['is_repeat']})\n check_list.append(item_to_chk)\n return data\n\n def validate_ingredients(self, data):\n for i in data:\n if i['amount'] <= 0:\n raise ValidationError(\n f\"{i['ingredient'].name}\"\n f\" - указано: ({i['amount']})\"\n f\"{i['ingredient'].measurement_unit}.\"\n f\" Количество ингредиента должно быть больше 0.\"\n )\n self.check_repit(\n data,\n {'is_repeat': 'Ингредиенты не должны повторяться в рецепте.',\n 'is_empty': 'Должен быть хотя бы 1 ингредиент.'})\n return data\n\n def validate_tags(self, data):\n\n self.check_repit(\n data,\n {'is_repeat': 'Тэги не должны повторяться в рецепте.',\n 'is_empty': 'Теги не должны быть пустыми'})\n return data\n\n def validate_cooking_time(self, data):\n if data < 1:\n raise ValidationError(\n 'Готовить надо не менее 1 мин.')\n if data > 500:\n raise ValidationError(\n 'Время приготовления не может быть более 500 мин.')\n return data\n\n @atomic\n def create(self, validated_data):\n request = self.context.get('request')\n ingredients = validated_data.pop('ingredients')\n tags = validated_data.pop('tags')\n recipe = Recipe.objects.create(\n author=request.user,\n **validated_data\n )\n self.create_ingredients(recipe, ingredients)\n recipe.tags.set(tags)\n return recipe\n\n @atomic\n def update(self, instance, validated_data):\n ingredients = validated_data.pop('ingredients')\n instance.tags.clear()\n tags = validated_data.pop('tags')\n recipe = instance\n IngredientRecipe.objects.filter(recipe=recipe).delete()\n self.create_ingredients(recipe, ingredients)\n recipe.tags.set(tags)\n return super().update(recipe, validated_data)\n\n def to_representation(self, instance):\n return RecipeSerializer(\n instance,\n context={'request': self.context.get('request'), }).data\n\n\nclass ShoppingCartSerializer(ModelSerializer):\n class Meta:\n model = ShoppingCart\n fields = (\n 'recipe',\n 'user'\n )\n\n def validate(self, data):\n user = self.context['request'].user\n recipe_pk = data['recipe'].pk\n if ShoppingCart.objects.filter(user=user,\n recipe__pk=recipe_pk).exists():\n raise ValidationError(\n {'errors': self.context['errors']['recipe_alrdy_in']})\n return data\n\n def to_representation(self, instance):\n request = self.context.get('request')\n context = {'request': request}\n return RecipeShortInfo(instance.recipe, context=context).data\n","repo_name":"yTpuConJlu/foodgram-project-react","sub_path":"backend/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":10643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"31014596723","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n#import urllib.parse\r\nimport time\r\nfrom multiprocessing import Pool, cpu_count\r\nimport csv\r\nfrom datetime import datetime\r\n\r\ndef main():\r\n start_time = time.time()\r\n\r\n root_sitemap_url = 'https://chrome.google.com/webstore/sitemap'\r\n # get the top level sitemap shard urls\r\n root_sitemap_xml = get_sitemap(root_sitemap_url)\r\n root_sitemap_urls = parse_sitemap_xml(root_sitemap_xml)\r\n\r\n # create queue for mutliprocessing\r\n queue = root_sitemap_urls\r\n\r\n print(f\"starting work on {cpu_count()} cores\")\r\n\r\n with Pool() as pool:\r\n res = pool.map(parse_extensions, queue)\r\n # the results from the Pool returns a list within a list\r\n # eg. res = [[{id, name}],[{}],[{}],[{}]...]\r\n\r\n print(f\"--- {time.time() - start_time} seconds ---\")\r\n \r\n out_csv_file(res)\r\n\r\ndef get_sitemap(url):\r\n \"\"\"get the sitemap xml text\r\n \r\n\r\n Parameters:\r\n -----------\r\n url : str\r\n sitemap url\r\n\r\n Returns:\r\n --------\r\n response.text : str\r\n sitemap xml text\r\n \"\"\"\r\n response = requests.get(url)\r\n\r\n return response.text\r\n\r\ndef parse_sitemap_xml(sitemap_xml):\r\n \"\"\"parse the webstore urls from the sitemap xml\r\n \r\n Parameters:\r\n -----------\r\n sitemap_xml : str\r\n raw xml\r\n \r\n Returns:\r\n --------\r\n sitemap_urls : list\r\n webstore shard urls\r\n \"\"\"\r\n \r\n soup = BeautifulSoup(sitemap_xml, 'xml')\r\n urls = soup.find_all('loc')\r\n\r\n sitemap_urls = []\r\n\r\n for url in urls:\r\n sitemap_urls.append(url.text)\r\n\r\n return sitemap_urls\r\n\r\ndef parse_extensions(shard_url):\r\n \"\"\"multiprocess job to save extension data into a dict\r\n \r\n Parameters:\r\n -----------\r\n shard_url : str\r\n shard url contains potentially up to 300 extension urls\r\n \r\n Returns:\r\n --------\r\n chrome_webstore_dict : list\r\n a list containing dict\r\n \"\"\"\r\n\r\n shard_sitemap_xml = get_sitemap(shard_url)\r\n shard_sitemap_urls = parse_sitemap_xml(shard_sitemap_xml)\r\n\r\n chrome_webstore_dict = []\r\n\r\n for url in shard_sitemap_urls:\r\n parts = url.split('/')\r\n #extension_name = urllib.parse.unquote(parts[5]) # url decode extension names\r\n extension_name = parts[5]\r\n extension_id = parts[6]\r\n\r\n if '?' in extension_id:\r\n id_parts = extension_id.split('?')\r\n extension_id = id_parts[0]\r\n\r\n chrome_webstore_dict.append({'id' : extension_id, 'name' : extension_name})\r\n\r\n return chrome_webstore_dict\r\n\r\ndef out_csv_file(data):\r\n \"\"\"save chrome extensions to csv file.\r\n \r\n Parameters:\r\n -----------\r\n data : list\r\n a list of lists containing dicts\r\n \"\"\"\r\n \r\n field_names = ['id', 'name']\r\n file_name = f\"chrome_webstore_extensions_{datetime.now().strftime('%Y-%m-%d')}.csv\"\r\n\r\n with open(file_name, 'a', newline='') as csvfile:\r\n writer = csv.DictWriter(csvfile, fieldnames=field_names)\r\n writer.writeheader()\r\n\r\n for item in data:\r\n writer.writerows(item)\r\n\r\n print(f\"file saved: {file_name}\")\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"adamcysec/Scrape-Browser-Extensions","sub_path":"scrape_chromeWebstore.py","file_name":"scrape_chromeWebstore.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"87"} +{"seq_id":"35003791482","text":"\"\"\"\nevaluates everything\n\"\"\"\nfrom typing import Any, Dict, List, Tuple, Union\nfrom common.train_pipeline.config import ModelConfig\nfrom common.train_pipeline.model.model import get_model\nimport numpy as np\nimport torch\nfrom torch.nn import Module\nfrom tqdm import tqdm\nfrom timm.loss import SoftTargetCrossEntropy\nfrom torchmetrics import Metric\nfrom torchmetrics.classification import Accuracy, MulticlassPrecision, MulticlassRecall\nfrom scipy.io import savemat\nimport matlab\nimport matlab.engine\n\n# from common.data_pipeline.mmcbnu.dataset import DatasetLoader as mmcbnu\n# from common.data_pipeline.fvusm.dataset import DatasetLoader as fvusm\nfrom common.data_pipeline.dataset import get_dataset\n\nfrom common.metrics.eer import EER\nfrom common.util.logger import logger\nfrom common.util.data_pipeline.dataset_chainer import DatasetChainer\nfrom common.util.enums import EnvironmentType\nfrom train import get_config\n\n# To watch nvidia-smi continuously after every 2 seconds: watch -n 2 nvidia-smi\n\n\ndef get_loss() -> Module:\n \"\"\"\n Gets a loss function.\n \"\"\"\n return SoftTargetCrossEntropy()\n\n\ndef get_metrics(n_classes: int, eng: Any) -> list[Metric]:\n \"\"\"\n Returns list of validation metrics.\n \"\"\"\n return [\n Accuracy(\n task=\"multiclass\",\n num_classes=n_classes,\n ),\n EER(eng, genuine_class_label=1 if n_classes == 2 else None),\n # ConfusionMatrix().to(device),\n ]\n\n\ndef add_label(metric: Dict[str, Any], label: str = \"\") -> Dict[str, Any]:\n \"\"\"\n Adds provided label as prefix to the keys in the metric dictionary.\n \"\"\"\n return {f\"{label}_{k}\": v for k, v in metric.items()}\n\n\ndef cuda_info():\n \"\"\"\n Prints cuda info.\n \"\"\"\n\n device = torch.device( # pylint: disable=E1101\n \"cuda\" if torch.cuda.is_available() else \"cpu\"\n ) # pylint: disable=E1101\n logger.info(\"Using device: %s\", device)\n\n # Additional Info when using cuda\n if device.type == \"cuda\":\n logger.info(torch.cuda.get_device_name(0))\n logger.info(\"Memory Usage:\")\n logger.info(\n \"Allocated: %s GB\",\n round(torch.cuda.memory_allocated(0) / 1024**3, 1),\n )\n logger.info(\n \"Cached: %s GB\", round(torch.cuda.memory_reserved(0) / 1024**3, 1)\n )\n return device\n\n\ndef evaluate(\n datasets: Union[str, Any],\n model_path: str,\n config: ModelConfig,\n batch_size: int = 10,\n environment: EnvironmentType = EnvironmentType.PYTORCH,\n n_classes: int = 301,\n height: int = 60,\n width: int = 120,\n) -> Dict[str, Any]:\n \"\"\"\n Contains the training loop.\n \"\"\"\n eng = matlab.engine.start_matlab()\n try:\n script_dir = \"./EER\"\n eng.addpath(script_dir)\n except Exception:\n logger.exception(\"Cannot initialise matlab engine\")\n\n device = cuda_info()\n model = get_model(config)\n model.to(device)\n model.load_state_dict(torch.load(model_path, map_location=device))\n print(model)\n\n if isinstance(datasets, str):\n train_dataset, test_dataset, validation_dataset = DatasetChainer(\n datasets=[\n get_dataset(\n datasets,\n environment=environment,\n augment_times=0,\n height=height,\n width=width,\n )\n ],\n ).get_dataset(\n batch_size=batch_size,\n dataset_type=environment,\n )\n else:\n train_dataset, test_dataset, validation_dataset = datasets\n\n model.eval()\n # logger.info(model)\n loss_fn = get_loss().to(device)\n metrics = [metric.to(device) for metric in get_metrics(n_classes, eng)]\n # Training loop\n with torch.no_grad():\n all_results: Dict[str, Any] = {}\n dataset_names = [\"test\", \"validation\"]\n for index, dataset in enumerate([test_dataset, validation_dataset]):\n all_loss = []\n if not dataset:\n continue\n for inputs, labels in tqdm(dataset if dataset else [], desc=\"Train:\"):\n if inputs.shape[0] == 1:\n inputs = torch.cat((inputs, inputs), 0) # pylint: disable=E1101\n labels = torch.cat((labels, labels), 0) # pylint: disable=E1101\n inputs = inputs.to(device).float()\n labels = labels.to(device).float()\n outputs = model(inputs) # pylint: disable=E1102\n loss = loss_fn(outputs, labels) # pylint: disable=E1102\n all_loss.append(loss.item())\n metrics[1].update(outputs, labels)\n predicted = outputs.argmax(dim=1)\n labels = labels.argmax(dim=1)\n metrics[0].update(predicted, labels)\n\n accuracy = metrics[0].compute().item()\n eer, best_one, best_pointone, best_pointzerone = metrics[1].compute()\n logger.info(\"Evaluation results: %s\", dataset_names[index])\n logger.info(\"EER: %s\", eer)\n logger.info(\"Best TAR 1: %s\", best_one)\n logger.info(\"Best TAR 0.1: %s\", best_pointone)\n logger.info(\"Best TAR 0.01: %s\", best_pointzerone)\n for metric in metrics:\n metric.reset()\n\n all_results[dataset_names[index]] = {\n \"accuracy\": accuracy,\n \"eer\": eer,\n \"tar1\": best_one,\n \"tar0.1\": best_pointone,\n \"tar0.01\": best_pointzerone,\n }\n return all_results\n","repo_name":"Blazkowiz47/finger-vein-quality-assessement","sub_path":"common/evaluate_pipeline/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":5536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"40294730820","text":"from flask import request, jsonify\nimport uuid\nimport logging\nimport datetime\nimport json\nfrom src.handler import utils\nfrom src.dal import mongo_dal\n\n\ndef transaction(request):\n ret = utils.generate_ret()\n try:\n auth_header = request.headers.get('Authorization')\n auth_token = ''\n if auth_header:\n auth_header_list = auth_header.split(\" \")\n if len(auth_header_list) > 1:\n auth_token = auth_header_list[1]\n\n if auth_token:\n resp, err_msg = utils.decode_auth_token(auth_token)\n if err_msg != \"\":\n ret[\"status\"] = \"ERROR\",\n ret['message'] = err_msg\n return ret\n if resp:\n db = mongo_dal.get_db()\n db_result = db.topup_tb.find({\"user_id\": resp}).sort(\"created_date\", -1)\n\n result = []\n for i in db_result:\n obj = {}\n obj[\"amount\"] = i.get(\"amount\", 0)\n obj[\"balance_before\"] = i.get(\"balance_before\", 0)\n obj[\"balance_after\"] = i.get(\"balance_after\", 0)\n obj[\"transaction_type\"] = i.get(\"transaction_type\", 0)\n obj[\"remarks\"] = i.get(\"remarks\", \"\")\n obj[\"status\"] = \"SUCCESS\"\n obj[\"created_date\"] = i.get(\"created_date\", \"\")\n obj[\"updated_date\"] = i.get(\"updated_date\", \"\")\n\n top_up_id = i.get(\"top_up_id\", \"\")\n transfer_id = i.get(\"transfer_id\", \"\")\n payment_id = i.get(\"payment_id\", \"\")\n\n if top_up_id != \"\":\n obj[\"top_up_id\"] = i.get(\"top_up_id\", \"\")\n\n elif transfer_id != \"\":\n obj[\"transfer_id\"] = i.get(\"transfer_id\", \"\")\n\n elif payment_id != \"\":\n obj[\"payment_id\"] = i.get(\"payment_id\", \"\")\n\n result.append(obj)\n\n ret['result'] = result\n ret['user_id'] = resp\n ret['status'] = \"SUCCESS\"\n return ret\n else:\n return {\n 'status': 'ERROR',\n 'message': 'Unauthenticated'\n }\n\n except Exception as e:\n logging.error(e, exc_info=True)\n return ret\n","repo_name":"hasimwebapps/flask_api_docker","sub_path":"src/handler/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"71755492120","text":"import pandas as pd\nimport numpy as np\nfrom web3.providers import JSONBaseProvider\nimport time\nfrom app.providers import Neo4jProvider, Web3Provider\nfrom .ethereum_utils import send_batch\nfrom .contract_service import ContractService\nimport os\nimport itertools\nfrom typing import List\nfrom .neo4j_queries import reset_data_query, load_block_from_csv_query, load_txs_from_csv_query, \\\n load_addresses_from_csv_query, create_tx_account_relations_query, create_block_tx_relations_query\nfrom app.helpers.file_helper import is_file_empty\n\nfrom . import constants\n\n\nclass EthereumService:\n \"\"\"Ethereum service using Pandas\"\"\"\n def __init__(self, blocks_to_export):\n self.infura_url = Web3Provider().infura_node_url\n self.web3 = Web3Provider().provider\n self.base_provider = JSONBaseProvider()\n self.neo4j_driver = Neo4jProvider().driver\n self.contract_service = ContractService()\n self.api_key = os.getenv('ETHERSCAN_API_KEY')\n self.BLOCK_TO_EXPORT = blocks_to_export\n\n def __del__(self):\n self.neo4j_driver.close()\n\n @staticmethod\n def _get_current_block_numbers():\n return (pd.read_csv(constants.BLOCKS_CSV_PATH, sep='\\t')['number'].values\n if not is_file_empty(constants.BLOCKS_CSV_PATH)\n else pd.DataFrame()\n )\n\n @staticmethod\n def _get_current_txs():\n return (pd.read_csv(constants.TXS_CSV_PATH, sep='\\t')\n if not is_file_empty(constants.TXS_CSV_PATH)\n else pd.DataFrame()\n )\n\n @staticmethod\n def _save_blocks_to_csv(blocks):\n if not is_file_empty(constants.BLOCKS_CSV_PATH):\n blocks.to_csv(constants.BLOCKS_CSV_PATH, sep='\\t', index=True, mode='a', header=None)\n else:\n blocks.to_csv(constants.BLOCKS_CSV_PATH, sep='\\t', index=True)\n\n @staticmethod\n def _save_txs_to_csv(txs):\n if not is_file_empty(constants.TXS_CSV_PATH):\n txs.to_csv(constants.TXS_CSV_PATH, sep='\\t', index=False, mode='a', header=None)\n else:\n txs.to_csv(constants.TXS_CSV_PATH, sep='\\t', index=False)\n\n @staticmethod\n def _save_addresses_to_csv(addresses):\n if not is_file_empty(constants.ADDRESSES_CSV_PATH):\n addresses.to_csv(constants.ADDRESSES_CSV_PATH, sep='\\t', index=False, mode='a', header=None)\n else:\n addresses.to_csv(constants.ADDRESSES_CSV_PATH, sep='\\t', index=False)\n\n @staticmethod\n def _save_abis_to_csv(abis):\n if not is_file_empty(constants.CONTRACTS_CSV_PATH):\n abis.to_csv(constants.CONTRACTS_CSV_PATH, sep='\\t', index=False, mode='a', header=None)\n else:\n abis.to_csv(constants.CONTRACTS_CSV_PATH, sep='\\t', index=False)\n\n def _send_batch_transactions(self, tx_hashes):\n results = send_batch(\n self.infura_url,\n self.base_provider,\n 'eth_getTransactionByHash',\n tx_hashes\n )\n return [tx['result'] for tx in results]\n\n def _send_batch_blocks(self, block_numbers):\n results = send_batch(\n self.infura_url,\n self.base_provider,\n 'eth_getBlockByNumber',\n block_numbers\n )\n return [block['result'] for block in results]\n\n def _get_n_latest_blocks(self, n) -> (pd.DataFrame, List[str]):\n current_blocks_numbers = self._get_current_block_numbers()\n\n latest_block_number = self.web3.eth.get_block('latest')['number']\n batch_blocks_arguments = [\n [hex(number), False]\n for number in range(latest_block_number - n, latest_block_number)\n ]\n\n blocks_data = self._send_batch_blocks(batch_blocks_arguments)\n\n tx_hashes = list(itertools.chain.from_iterable(\n block['transactions']\n for block in blocks_data if block['number'] not in current_blocks_numbers or current_blocks_numbers.empty)\n )\n\n return (pd.DataFrame(\n data=blocks_data,\n columns=['hash', 'size', 'number']\n )\n .fillna({'hash': 0})\n .assign(hash=lambda df_: df_['hash'].astype(str).apply(int, base=16))\n .loc[lambda df_: ~df_['number'].isin(current_blocks_numbers)]\n .set_index('number')\n ), tx_hashes\n\n def _get_txs_details(self, tx_hashes):\n return (pd.DataFrame(\n data=self._send_batch_transactions(tx_hashes),\n columns=['from', 'to', 'hash', 'blockNumber', 'value', 'blockHash', 'chainId', 'input'],\n )\n .fillna({'chainId': 0, 'blockNumber': 0})\n .assign(\n chainId=lambda df_: df_['chainId'].astype(str).apply(int, base=16),\n blockNumber=lambda df_: df_['blockNumber'].astype(str).apply(int, base=16)\n )\n .merge(pd.read_csv(constants.CHAINLIST_CSV_PATH, sep=\"\\t\", header=0), left_on='chainId',\n right_on='chain_id')\n )\n\n def _get_addresses_from_txs(self, txs):\n current_txs = self._get_current_txs()\n\n def _get_account_type(df_):\n results = send_batch(\n self.infura_url,\n self.base_provider,\n 'eth_getCode',\n [[self.web3.toChecksumAddress(address), 'latest'] for address in df_['address'].values]\n )\n\n df_ = df_.join(pd.DataFrame.from_records(data=results, columns=['result']))\n df_['type'] = np.where(df_['result'] == '0x', 'private', 'contract')\n\n return (df_['type']\n .astype('category'))\n\n def _get_unique_addresses(df_):\n return (df_\n .assign(address=list(txs['to']) + list(txs['from']))\n .dropna()\n .drop_duplicates(subset=['address'])\n .pipe(\n lambda df__: df__.loc[~df__['address'].isin([current_txs['from'], current_txs['to']])]\n if not current_txs.empty else df__\n )\n ['address']\n )\n\n return (pd.DataFrame()\n .assign(address=_get_unique_addresses)\n .assign(type=_get_account_type)\n )\n\n def _get_abis_from_addresses(self, addresses):\n return (pd.DataFrame()\n .assign(address=addresses['address'])\n .loc[lambda df_: addresses['type'] == 'contract']\n .assign(abi=lambda df_: self.contract_service.get_contracts_abi(df_['address']))\n .loc[lambda df_: df_['abi'] != 'Contract source code not verified']\n )\n\n def _get_data_to_export(self):\n def _decode_tx_functions(df_):\n decoded_tx = self.contract_service.decode_tx(df_['to'], df_['input'], df_['abi'])\n df_['function_name'] = decoded_tx[0]\n df_['function_params'] = decoded_tx[1]\n df_['target_schema'] = decoded_tx[2]\n return df_\n\n blocks, tx_hashes = self._get_n_latest_blocks(self.BLOCK_TO_EXPORT)\n txs = self._get_txs_details(tx_hashes)\n addresses = self._get_addresses_from_txs(txs)\n abis = self._get_abis_from_addresses(addresses)\n\n txs = (txs\n .merge(abis, left_on='to', right_on='address', how='inner')\n .assign(function_name='', function_params='', target_schema='')\n .apply(_decode_tx_functions, axis=1)\n .drop(columns=['address'])\n )\n\n return blocks, txs, addresses, abis\n\n def _import_csv_to_neo4j(self):\n with self.neo4j_driver.session() as session:\n session.execute_write(reset_data_query)\n session.execute_write(load_block_from_csv_query)\n session.execute_write(load_txs_from_csv_query)\n session.execute_write(load_addresses_from_csv_query)\n session.execute_write(create_tx_account_relations_query)\n session.execute_write(create_block_tx_relations_query)\n\n def save_txs_to_neo4j(self):\n st = time.time()\n\n blocks, txs, addresses, abis = self._get_data_to_export()\n\n self._save_blocks_to_csv(blocks)\n self._save_txs_to_csv(txs)\n self._save_addresses_to_csv(addresses)\n self._save_abis_to_csv(abis)\n\n et = time.time()\n print('Execution time:', et - st, 'seconds')\n\n self._import_csv_to_neo4j()\n\n def sandbox(self):\n print('test', self)\n","repo_name":"macieksitko/crypto-stats","sub_path":"app/ethereum/ethereum_service.py","file_name":"ethereum_service.py","file_ext":"py","file_size_in_byte":8582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"22345420744","text":"from nltk.stem.porter import PorterStemmer\r\nimport jieba\r\nimport math\r\nimport sys\r\nimport os\r\nimport json\r\nfrom pathlib import Path\r\nimport re\r\n\r\nfolder_name = ['Denial of Service & Proof of Concept Exploits', 'Exploit Shellcode Archive'\r\n , 'Local & Privilege Escalation Exploits', 'Remote Exploits', 'Web Application Exploits']\r\npath = 'C:\\\\Users\\\\dlwog\\\\exploitdb_crawling\\\\exploitdb'\r\n\r\nall_data = {}\r\nfor fold in folder_name:\r\n tmp_reader = {}\r\n file_list = os.listdir(path + '\\\\' + fold)\r\n\r\n tmp_list = []\r\n for file_name in file_list:\r\n if '.json' in file_name:\r\n tmp_list.append(file_name)\r\n\r\n all_data[fold] = []\r\n\r\n for tmp in tmp_list:\r\n print(tmp)\r\n\r\n tmp_str = \"\"\r\n with open(path + '\\\\' + fold + '\\\\' + tmp, 'r', encoding='utf-8-sig') as json_file:\r\n for line in json_file.readlines():\r\n if line == '}{\\n':\r\n tmp_str = tmp_str + '}ㅂ{'\r\n else:\r\n tmpstring = re.sub('\\n', '', line)\r\n while (1):\r\n if tmpstring[0] == ' ' or tmpstring[0] == '\\xa0':\r\n tmpstring = tmpstring[1:]\r\n else:\r\n break\r\n tmp_str = tmp_str + tmpstring\r\n\r\n if tmp_str == \"\":\r\n continue\r\n\r\n new_list = tmp_str.split('ㅂ')\r\n for lis in new_list:\r\n all_data[fold].append(json.loads(lis))\r\n\r\n\r\n\r\nwith open(path + '\\\\' + 'exploitdb.json', 'w', encoding='utf-8') as json_file:\r\n json.dump(all_data, json_file, ensure_ascii=False)","repo_name":"dlwogus5038/exploitdb_crawling","sub_path":"merge_all.py","file_name":"merge_all.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"38970174884","text":"import glob\nfrom fpdf import FPDF\nfrom PyPDF2 import PdfFileMerger\nfrom PIL import Image\nimport os\n\n\noutput_filename = 'Result'\n\nextensions = ('*.jpg','*.png','*.gif')\n\nimagelist=[]\nfor ext in extensions:\n imagelist.extend(glob.glob(ext))\n\npdf = FPDF()\n\nprint(imagelist)\nfor imagePath in imagelist:\n cover = Image.open(imagePath)\n width, height = cover.size\n if height > width:\n pdf = FPDF(unit = \"mm\", format = \"A4\")\n pdf.add_page()\n pdf.image(imagePath, 0, 0, 210, 297)\n elif width > height:\n pdf = FPDF(\"L\", unit = \"mm\", format = \"A4\")\n pdf.add_page()\n pdf.image(imagePath, 0, 0, 297, 210)\n pdf.output(imagePath.split('.')[0] + '.pdf', 'F')\n\nmerger = PdfFileMerger()\n\npdfs = [imagepath.split('.')[0] + '.pdf' for imagepath in imagelist]\n\nfor pdf in pdfs:\n merger.append(open(pdf, 'rb'))\n os.remove(pdf)\n\nwith open(output_filename + '.pdf', 'wb') as fout:\n merger.write(fout)\n","repo_name":"m-javed/img2pdf","sub_path":"img2pdf.py","file_name":"img2pdf.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"36579784470","text":"#!/usr/bin/env python3\n# coding=utf-8\n'''\nAuthor: HoveXb\nDate: 2021-6-26 \nLastEditTime: 2021-6-26 \nDesciption:侧视摄像头目标检测与位置估计\nVersion: \nEmail: hovexb428@hnu.edu.cn\n'''\n\nfrom numpy.matrixlib.defmatrix import mat\nimport rospy\nfrom sensor_msgs.msg import Image\nfrom camera_detect.msg import BoundingBox\nfrom camera_detect.msg import BoundingBoxes\nfrom std_msgs.msg import Header\nimport torch\nimport numpy as np\nfrom models.experimental import attempt_load\nfrom utils.datasets import letterbox\nfrom utils.general import check_img_size, non_max_suppression,scale_coords\nfrom utils.plots import plot_one_box\nfrom utils.torch_utils import select_device\n\nclass Sidecam_detect(object):\n def __init__(self,hyps):\n super(Sidecam_detect).__init__()\n #load hyperparameters\n self.device = hyps['device']\n self.weights = hyps['weights']\n self.img_size = hyps['img_size']\n self.conf_thres = hyps['conf_thres']\n self.iou_thres = hyps['iou_thres']\n self.draw_result=hyps['draw_result']\n self.view_img = hyps['view_img']\n self.save_img = hyps['save_img']\n self.save_path = hyps['save_path']\n self.classes = hyps['classes']\n self.agnostic_nms = hyps['agnostic_nms']\n self.augment = hyps['augment']\n self.id = hyps['camera_id']\n self.half = hyps['half']# 1660s不支持float16\n self.class_name = hyps['class_name']\n\n #PNP parameters\n self.camera_matrix = np.load(hyps['camera_matrix'])\n self.dist_coefs = np.load(hyps['dist_coefs'])\n self.rotM = np.load(hyps['rotM'])\n self.tvec = np.load(hyps['tvec'])\n\n self.devices = select_device(self.device)\n\n if self.half:\n rospy.loginfo(\"Warning:1660s cann't float16!Change to float32 by default.\\n\")\n self.half = False\n self.model = attempt_load(self.weights, map_location=self.devices) # load FP32 model\n\n #check image size\n stride = int(self.model.stride.max()) # model stride\n imgsz = check_img_size(self.img_size, s=stride) # check img_size\n\n self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names\n self.colors = [[np.random.randint(0, 255) for _ in range(3)] for _ in self.names]\n\n # run once, warm up\n if self.devices.type != 'cpu':\n self.model(torch.zeros(1, 3, imgsz, imgsz).to(self.devices).type_as(next(self.model.parameters())))\n\n self.image_sub = rospy.Subscriber(f'msg_camera_prep_{self.id}', Image, self.detect,queue_size=1)\n self.obj_pub = rospy.Publisher(f'msg_camera_obj_{self.id}', BoundingBoxes, queue_size=1)\n if self.view_img:\n self.image_pub = rospy.Publisher(f'msg_camera_result_{self.id}',Image,queue_size=1)\n self.ros_frame = Image()\n\n @torch.no_grad()\n def detect(self,data):\n pass\n boxes = BoundingBoxes()\n box = BoundingBox()\n if len(data.data) == 0:\n boxes.camera_status = 0\n else:\n boxes.camera_status = 1\n\n img0 = np.ndarray((data.height, data.width, 3), dtype=np.uint8, buffer=data.data)\n\n #备份原始图像,供后续绘制结果使用\n im0s = img0\n # 去噪\n # img0 = cv2.fastNlMeansDenoisingColored(img0, None, 10, 10, 7, 21)\n\n img = letterbox(img0, new_shape=self.img_size)[0]\n # Convert\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n img = np.ascontiguousarray(img)\n\n img = torch.from_numpy(img).to(self.devices)\n img = img.half() if self.half else img.float() # uint8 to fp16/32\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n\n # Inference\n pred = self.model(img, augment=self.augment)[0]\n\n # Apply NMS\n pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes=self.classes, agnostic=self.agnostic_nms)\n\n # Process detections\n for i, det in enumerate(pred): # detections per image\n im0 = im0s\n if len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()\n\n boxes.count = det.shape[0]\n for j in range(det.shape[0]):\n # box.camera_obj_class = self.class_name[int(det[j, -1])]\n box.camera_obj_class = int(det[j, -1])\n box.camera_obj_prob = float(det[j, -2])\n box.camera_obj_xmin = float(det[j, 0])\n box.camera_obj_ymin = float(det[j, 1])\n box.camera_obj_xmax = float(det[j, 2])\n box.camera_obj_ymax = float(det[j, 3])\n # box.camera_obj_id = j\n\n #PNP to get 3D location\n twoD_matric = [int(det[j, 2]), int(det[j, 3]),1]\n temmat = mat(self.rotM).I * mat(self.camera_matrix).I * mat(twoD_matric).T\n temmat2 = mat(self.rotM).I * mat(self.tvec)\n s = temmat2[2]\n s = s / temmat[2]\n wcpoint = mat(self.rotM).I * (s[0, 0] * mat(self.camera_matrix).I * mat(twoD_matric).T - mat(self.tvec))\n\n box.camera_obj_x = float(wcpoint[0])\n box.camera_obj_y = float(wcpoint[1])\n box.camera_obj_z = 0\n\n boxes.bounding_boxes.append(box)\n if self.draw_result:\n #draw the result\n for *xyxy, conf, cls in reversed(det):\n label = f'{self.names[int(cls)]} {conf:.2f}'\n plot_one_box(xyxy, im0, label=label, color=self.colors[int(cls)], line_thickness=2,\n camera_matrix=self.camera_matrix,rotM=self.rotM,tvec=self.tvec)\n\n #publish the detection result img\n if self.view_img:\n # cv2.imshow(\"result\", im0)\n # cv2.waitKey(1)\n header = Header(stamp=rospy.Time.now())\n # header.frame_id = \"Camera3\"\n self.ros_frame.header = header\n self.ros_frame.width = im0.shape[:2][1]\n self.ros_frame.height = im0.shape[:2][0]\n self.ros_frame.encoding = \"bgr8\"\n #self.ros_frame.step = im0.shape[:2][1]\n self.ros_frame.data = np.array(im0).tostring() # 图片格式转换\n self.image_pub.publish(self.ros_frame) # 发布消息\n\n if self.save_img:\n import cv2\n import time\n import os\n cv2.imwrite(os.path.join(self.save_path,str(time.strftime(\"%Y-%m-%d-%H:%M:%S\", time.localtime()))+\".png\"), im0)\n\n self.obj_pub.publish(boxes)\nif __name__ == '__main__':\n\n #initialize ros_node\n rospy.init_node('n_camera_obj_3')\n #load hyperparameters\n import yaml\n config_path = rospy.get_param('n_camera_obj_config_3')\n with open(config_path) as f:\n hyps = yaml.load(f, Loader=yaml.SafeLoader) # load hyps\n \n sideCamDetector = Sidecam_detect(hyps)\n\n try:\n rospy.spin()\n except KeyboardInterrupt:\n rospy.loginfo('KeyboardIterrupt,see you again!')\n","repo_name":"HoveXb/Monocular_camera_3Ddetection","sub_path":"scripts/sidecamera_detect.py","file_name":"sidecamera_detect.py","file_ext":"py","file_size_in_byte":7776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"7122899051","text":"from datetime import datetime, timedelta\nfrom .interfaces.token_interface import TokenServiceInterface\nfrom jose import jwt, JWTError\nfrom fastapi import status\nfrom common_exceptions import raise_exception\n\n\nclass TokenService(TokenServiceInterface):\n async def decode_token(self, token: str) -> dict:\n try:\n return jwt.decode(\n token=token, key=self.secret_key, algorithms=self.algorithm)\n except JWTError:\n raise_exception(status.HTTP_401_UNAUTHORIZED,\n 'Could not validate credentials')\n\n async def encode_token(self, email: str) -> str:\n data = {\n 'sub': email,\n 'exp': datetime.utcnow() + timedelta(minutes=self.exp_time)\n }\n return jwt.encode(\n claims=data, key=self.secret_key, algorithm=self.algorithm\n )\n","repo_name":"sb-elliot-7s/hotel-fastapi","sub_path":"account/token_service.py","file_name":"token_service.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"87"} +{"seq_id":"37467815272","text":"import os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport tensorflow as tf\nimport pandas as pd\n\n# 二元分类\ntrain_url = \"http://download.tensorflow.org/data/iris_training.csv\"\ntrain_path = tf.keras.utils.get_file(train_url.split('/')[-1], train_url)\n\ndf_data = pd.read_csv(train_path)\nx_train, y_train = df_data.values[:, 0:4], df_data.values[:, 4:]\nx_train = tf.cast(tf.expand_dims(x_train, axis=1), tf.float32)\nx_train, y_train = x_train[y_train < 2], tf.cast(tf.reshape(y_train[y_train < 2], (-1, 1)), tf.float32)\n\nw = tf.Variable(tf.random.normal((4, 1)), dtype=tf.float32)\nb = tf.Variable(tf.zeros(1), dtype=tf.float32)\n\nepochs = 50\nlearning_rate = 0.001\n\nfor i in range(epochs):\n with tf.GradientTape() as tape:\n pre = 1/(1 + tf.exp(-(x_train @ w + b)))\n loss = -tf.reduce_sum(y_train * tf.math.log(pre) + (1 - y_train) * tf.math.log(1 - pre))\n\n acc = tf.reduce_mean(tf.cast(tf.equal(tf.where(pre.numpy() < 0.5, 0.0, 1.0), y_train), tf.float32))\n delta = tape.gradient(loss, [w, b])\n w.assign_sub(delta[0] * learning_rate), b.assign_sub(delta[1] * learning_rate)\n\n print(\"epoch:{},loss:{},accuracy:{}\".format(i, loss, acc))\n\nprint(tf.reshape(tf.where(pre.numpy() < 0.5, 0, 1), (-1,)))\nprint(tf.cast(tf.reshape(y_train, (-1,)), tf.int32))\n","repo_name":"ppx9527/Deep-Learning-Course","sub_path":"2020-4-26/11-2.py","file_name":"11-2.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"19575089274","text":"from resources.game import Game\nfrom tkinter import *\nfrom collections import namedtuple\n\nRect = namedtuple('Rect', 'x0, y0, x1, y1')\ngame = Game()\ngame.create_deck()\n\nhand_ranking_names = {0: 'royal flush', 1: 'straight flush', 2: 'four of a kind', 3: 'full house', 4: 'flush',\n 5: 'straight', 6: 'three of a kind', 7: 'two pair', 8: 'one pair', 9: 'high card'}\n\n\ndef print_poker():\n print()\n print('---------TEXAS-HOLDEM-POKER--------')\n print()\n print('Main player cards: ' + str(game.get_main_player_cards()))\n print('Table open cards: ' + str(game.get_table_open_cards()))\n print('Hand Rank => Your Rank: ' + str(game.recognize_hand_ranking()) + ', ' + str(\n hand_ranking_names[game.recognize_hand_ranking()]).upper() + ' !')\n print('Your pre flop rank (1 is best and 10 is worst): ' + str(game.pre_flop_hand_analyze()))\n print('------OPPONENT-POSSIBILITIES-------')\n game.get_hand_ranking_counts()\n print('-----------------------------------')\n\n\ndef new_print():\n print()\n print('###################################')\n print('############# NEXT ################')\n print('###################################')\n\n\nclass ImageMapper(object):\n def __init__(self, image, img_rects):\n self.width, self.height = image.width(), image.height()\n self.img_rects = img_rects\n\n def find_rect(self, x, y):\n for i, r in enumerate(self.img_rects):\n if (r.x0 <= x <= r.x1) and (r.y0 <= y <= r.y1):\n return i, r.x0, r.x1, r.y0, r.y1\n return None\n\n\nclass Window(Frame):\n selected_cards = []\n canvas_rectangles = []\n burned_cards = []\n canvas_burned_rectangles = []\n colors = {0: 'LightGreen', 1: 'LightGreen', 2: 'red', 3: 'red', 4: 'red', 5: 'red', 6: 'red', 7: 'lightgrey'}\n cards = {0: ('C', 'A'), 1: ('C', '2'), 2: ('C', '3'), 3: ('C', '4'), 4: ('C', '5'), 5: ('C', '6'),\n 6: ('C', '7'), 7: ('C', '8'), 8: ('C', '9'), 9: ('C', '10'), 10: ('C', 'J'), 11: ('C', 'Q'),\n 12: ('C', 'K'), 13: ('D', 'A'), 14: ('D', '2'), 15: ('D', '3'), 16: ('D', '4'), 17: ('D', '5'),\n 18: ('D', '6'), 19: ('D', '7'), 20: ('D', '8'), 21: ('D', '9'), 22: ('D', '10'), 23: ('D', 'J'),\n 24: ('D', 'Q'), 25: ('D', 'K'), 26: ('H', 'A'), 27: ('H', '2'), 28: ('H', '3'), 29: ('H', '4'),\n 30: ('H', '5'), 31: ('H', '6'), 32: ('H', '7'), 33: ('H', '8'), 34: ('H', '9'), 35: ('H', '10'),\n 36: ('H', 'J'), 37: ('H', 'Q'), 38: ('H', 'K'), 39: ('S', 'A'), 40: ('S', '2'), 41: ('S', '3'),\n 42: ('S', '4'), 43: ('S', '5'), 44: ('S', '6'), 45: ('S', '7'), 46: ('S', '8'), 47: ('S', '9'),\n 48: ('S', '10'), 49: ('S', 'J'), 50: ('S', 'Q'), 51: ('S', 'K')}\n\n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.master = master\n self.grid()\n\n self.msg_text = StringVar()\n self.msg = Message(self, textvariable=self.msg_text, width=640, bg='lightgrey')\n self.msg.grid(row=0, column=0)\n\n self.quit_button = Button(self, text='CLEAR', command=self.clear_selected_card, fg='white', bg='grey')\n self.quit_button.grid(row=1, column=0)\n\n self.canvas = Canvas(bg='grey', height=(4 * 76.75), width=(13 * 49.231))\n self.picture = PhotoImage(file='resources/card_deck.png')\n img_rects = []\n for y in range(4):\n for x in range(13):\n x0 = x * 49.231\n y0 = y * 76.75\n img_rects.append(Rect(x0, y0, x0 + 49.231, y0 + 76.75))\n self.image_mapper = ImageMapper(self.picture, img_rects)\n\n self.canvas.create_image(0, 0, image=self.picture, anchor=NW)\n self.canvas.bind('', self.image_click)\n self.canvas.bind(\"\", self.right_click)\n self.canvas.grid(row=2, column=0)\n\n def right_click(self, event):\n x0 = self.image_mapper.find_rect(event.x, event.y)[1]\n x1 = self.image_mapper.find_rect(event.x, event.y)[2]\n y0 = self.image_mapper.find_rect(event.x, event.y)[3]\n y1 = self.image_mapper.find_rect(event.x, event.y)[4]\n hit = self.image_mapper.find_rect(event.x, event.y)[0]\n card = self.cards[hit]\n if card in self.burned_cards or card in self.selected_cards:\n return\n game.burn_card(card)\n self.burned_cards.append(card)\n self.canvas_burned_rectangles.append(self.canvas.create_rectangle(x0, y0, x1, y1, fill=self.colors[7]))\n if len(self.selected_cards) >= 2:\n print_poker()\n\n def image_click(self, event):\n hit = self.image_mapper.find_rect(event.x, event.y)[0]\n hit = self.cards[hit]\n # avoid double selection\n if hit in self.selected_cards or hit in self.burned_cards:\n return\n self.selected_cards.append(hit)\n if len(self.selected_cards) == 2:\n new_print()\n game.distribute_cards(self.selected_cards)\n print_poker()\n self.msg_text.set('{} selected.'.format('Cards {}'.format(self.selected_cards)))\n if game.pre_flop_hand_analyze() < 8:\n print('Good pre flop hand. You should play!')\n else:\n print('Not a good idea to play with this pre flop hand. Bluff or fold!')\n elif len(self.selected_cards) == 5:\n new_print()\n game.flop_cards(self.selected_cards[2], self.selected_cards[3], self.selected_cards[4])\n self.msg_text.set('{} selected.'.format('Cards {}'.format(self.selected_cards)))\n print_poker()\n game.get_main_player_hand_ranking_probability(2)\n elif len(self.selected_cards) == 6:\n new_print()\n game.flop_cards(self.selected_cards[2], self.selected_cards[3],\n self.selected_cards[4], self.selected_cards[5])\n self.msg_text.set('{} selected.'.format('Cards {}'.format(self.selected_cards)))\n print_poker()\n game.get_main_player_hand_ranking_probability(1)\n elif len(self.selected_cards) == 7:\n new_print()\n game.flop_cards(self.selected_cards[2], self.selected_cards[3],\n self.selected_cards[4], self.selected_cards[5], self.selected_cards[6])\n self.msg_text.set('{} selected.'.format('Cards {}'.format(self.selected_cards)))\n print_poker()\n elif len(self.selected_cards) > 7:\n self.start_new_round()\n self.msg_text.set('{} selected.'.format('Cards {}'.format(self.selected_cards)))\n return\n x0 = self.image_mapper.find_rect(event.x, event.y)[1]\n x1 = self.image_mapper.find_rect(event.x, event.y)[2]\n y0 = self.image_mapper.find_rect(event.x, event.y)[3]\n y1 = self.image_mapper.find_rect(event.x, event.y)[4]\n self.canvas_rectangles.append(\n self.canvas.create_rectangle(x0, y0, x1, y1, outline=self.colors[len(self.selected_cards) - 1],\n fill='', width=5))\n\n def clear_selected_card(self):\n self.start_new_round()\n self.msg_text.set('{} selected.'\n .format('nothing' if self.selected_cards is None else 'Cards {}'.format(self.selected_cards)))\n\n def start_new_round(self):\n for r in self.canvas_rectangles:\n self.canvas.delete(r)\n for b in self.canvas_burned_rectangles:\n self.canvas.delete(b)\n game.reset()\n self.selected_cards = []\n self.canvas_burned_rectangles = []\n self.burned_cards = []\n new_print()\n\n\n# Diamonds ('D'), Clubs ('C') Kreuz, Hearts ('H'), Spades ('S') Piek\n# Jack ('J'), Queen ('Q'), King ('K'), Ass ('A')\n# TODO ('SUIT', 'ICON')\n# game.distribute_cards([('C', 'J'), ('D', '10')])\n# game.flop_cards(('D', '8'), ('S', 'K'), ('H', 'A'), ('D', 'Q'))\n# This creates the main window of an application\nroot = Tk()\napp = Window(root)\nroot.title(\"Poker Possibilities\")\nroot.geometry(\"640x400\")\nroot.configure(background='lightgrey')\n# Start the GUI\nroot.mainloop()\n","repo_name":"Chralt98/python-poker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"30371839993","text":"import socket\nimport json\n\ndef stablish_communication(raspberry, port):\n HOST = raspberry\n PORT = port\n \n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n print(f\"Tentativa de conexão no IP {raspberry} e porta {port}\")\n conn.connect((HOST, PORT))\n \n room_name = conn.recv(1024)\n conn.sendall((bytes(\"Sucesso\", encoding='utf-8')))\n \n outputs = conn.recv(1024)\n conn.sendall((bytes(\"Sucesso\", encoding='utf-8')))\n\n inputs = conn.recv(1024)\n conn.sendall((bytes(\"Sucesso\", encoding='utf-8')))\n \n temp_sensor = conn.recv(1024)\n conn.sendall((bytes(\"Sucesso\", encoding='utf-8')))\n \n return conn, \"\", str(room_name, encoding='utf-8'), json.loads(outputs), json.loads(inputs), json.loads(temp_sensor)\n\ndef send_data_to_client(data, conn, addr):\n sensor_status = (bytes(json.dumps(data), encoding='utf-8'))\n conn.sendall(sensor_status)\n\ndef receive_data_from_client(conn):\n data = conn.recv(1024)\n return json.loads(data)","repo_name":"renan601/FSE-Trabalho1-AutomacaoPredial","sub_path":"Central/comunication.py","file_name":"comunication.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"42192862214","text":"import os\nimport sys\n\n# look for the .glade file and the resources in a 'images' subdirectory below\n# this source file.\nGLADE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'images')\n\nENGRAVING_FONTS_DIR = os.path.join(os.environ['HOME'], 'gcode', 'engraving_fonts')\n\nUSER_FS_TABLES_DIR = os.path.join(os.environ['HOME'], 'gcode', 'fs_tables')\n\n# EMC2_HOME should have been prepared already but just in case it has not assume $HOME/tmc\ntry:\n LINUXCNC_HOME_DIR = os.environ['EMC2_HOME']\nexcept KeyError:\n os.environ['EMC2_HOME'] = os.path.join(os.environ['HOME'], 'tmc')\n\nLINUXCNC_HOME_DIR = os.environ['EMC2_HOME']\n\n# config file\nPATHPILOTJSON_FILEPATH = os.path.join(os.environ['HOME'], 'pathpilot.json')\n\n# eula agreed marker file\n# be sure to update eula.py and Makefile if you change the name of this.\nEULA_AGREED_FILEPATH = os.path.join(LINUXCNC_HOME_DIR, 'eula_agreed.txt')\n\n# test automation file\nPATHPILOT_TEST_AUTOMATION_JSON_FILEPATH = os.path.join(os.environ['HOME'], 'pathpilot_testautomation.json')\n\n# localized string resources directory\nRES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'res')\n\n# NetBIOS name configuration file\nnetbios_name_conf_file = os.path.join(os.environ['HOME'], 'smb.conf.netbios-name')\n\n# base path to user's gcode file on disk\nGCODE_BASE_PATH = os.path.join(os.environ['HOME'], 'gcode')\n\n# where release note pdfs are located\n# if you modify this path, you must look at postinstall.sh for string/name dependencies (and maybe Makefile)\nRELEASE_NOTE_PDFS_PATH = os.path.join(GCODE_BASE_PATH, 'ReleaseNotes')\n\n# where log files are located\nLOGFILE_BASE_PATH = os.path.join(GCODE_BASE_PATH, 'logfiles')\nGCODELOGFILE_BASE_PATH = os.path.join(LOGFILE_BASE_PATH, 'gcode_log.txt')\nSTATSFILE_BASE_PATH = os.path.join(LOGFILE_BASE_PATH, 'gcode_stats_cache.json')\n\n# where screen captures are located\nSCREENSHOT_BASE_PATH = os.path.join(GCODE_BASE_PATH, 'logfiles')\n\n# where images are located\nIMAGES_BASE_PATH = os.path.join(GCODE_BASE_PATH, 'images')\n\n# where is touchscreen calibration data located\nTOUCHSCREEN_CALIBRATION_FILE = os.path.join(GCODE_BASE_PATH, 'pointercal.xinput')\n\n# where to look for USB stick mount point\nUSB_MEDIA_MOUNT_POINT = '/media'\n# automount puts it at /media in Lucid 10.04 and /media/$USER for 14.04 or newer\n# if /etc/debian_version == 'squeeze/sid' then it's Lucid\n# otherwise it's Mint 17.3 (14.04 based) or newer and the mount is point is /media/$USER\nDEBIAN_VERSION = 'unknown'\nwith open('/etc/debian_version', 'r') as f:\n DEBIAN_VERSION = f.readline().strip()\nif DEBIAN_VERSION != 'squeeze/sid':\n USB_MEDIA_MOUNT_POINT += '/' + os.getenv('USER')\n\nPATHPILOT_UPDATE_EXTENSION = 'tgp'\n\n# where to look for CAM Toollib updates\nCAM_TOOLIB_BASE_PATH = USB_MEDIA_MOUNT_POINT\nCAM_TOOLIB_EXTENSION = 'json'\n\n# where to look for software updates\nSOFTWARE_UPDATE_BASE_PATH = USB_MEDIA_MOUNT_POINT\nSOFTWARE_UPDATES_ON_HD_PATH = os.path.join(os.environ['HOME'], 'updates')\n\n# where we download update checking data from the internet\nSOFTWARE_UPDATE_CHECK_PATH = os.path.join(os.environ['HOME'], 'updatecheck')\n\nLINUXCNC_GCODE_FILE_NAME = 'very-unlikely-pathpilot-gcode.file'\nLINUXCNC_GCODE_FILE_PATH = os.path.join('/tmp', LINUXCNC_GCODE_FILE_NAME)\n\n# remote screen sharing program\n# make uppercase foe MDI\nREMOTE_SCREEN_PROGRAM = 'teamviewer'\nMDI_REMOTE_SCREEN_PROGRAM = str.upper(REMOTE_SCREEN_PROGRAM)\n\nCLEAR_CURRENT_PROGRAM = 'Clear Current Program'\nEMPTY_GCODE_FILENAME = 'empty.ngc'\n\nLOG_CPU_USAGE_THRESHOLD_NOISEFLOOR = 18\nLOG_CPU_USAGE_THRESHOLD_ALWAYS = 70\n\n# thread data\nTHREAD_BASE_PATH = os.path.join(GCODE_BASE_PATH, 'thread_data')\nTHREAD_DATA_SAE = os.path.join(THREAD_BASE_PATH, 'threads_sae.txt')\nTHREAD_DATA_METRIC = os.path.join(THREAD_BASE_PATH, 'threads_metric.txt')\nTHREAD_DATA_SAE_CUSTOM = os.path.join(THREAD_BASE_PATH, 'custom_threads_sae.txt')\nTHREAD_DATA_METRIC_CUSTOM = os.path.join(THREAD_BASE_PATH, 'custom_threads_metric.txt')\n\n\n# material data\n# For now, read out of the ~/tmc directory instead of the user facing gcode directory\n# until we have a better feel of how we want users customizing the data\nMATERIAL_BASE_PATH = os.path.join(LINUXCNC_HOME_DIR, 'material_data')\n\nTHREAD_CUSTOM_DELIMITER = 'USER'\nTHREAD_TORMACH_DELIMITER = 'TORMACH'\nTHREAD_MAX_PASSES = 99\n\n# Constants\nALARM_LEVEL_NONE = -1\nALARM_LEVEL_DEBUG = 0\nALARM_LEVEL_QUIET = 1\nALARM_LEVEL_LOW = 2\nALARM_LEVEL_MEDIUM = 3\nALARM_LEVEL_HIGH = 4\n\n# normal exit\nEXITCODE_SHUTDOWN = 0\n# 1 and 2 used by python\nEXITCODE_RESERVED_1 = 1\nEXITCODE_RESERVED_2 = 2\n# used to tell operator login to change something\nEXITCODE_CONFIG_CHOOSER = 11\nEXITCODE_MILL2RAPIDTURN = 12\nEXITCODE_RAPIDTURN2MILL = 13\nEXITCODE_SETTINGSRESTORE = 14\nEXITCODE_REBOOTAFTERUPDATE = 15\nEXITCODE_RESOLUTIONCHANGE = 16\nEXITCODE_CONFIG_FAILED = 17\nEXITCODE_UPDATE_ATC_FIRMWARE = 18\nEXITCODE_ATC_FIRMWARE_INIT = 19\n\n# 137 used by python when process gets kill signal\nEXITCODE_PROCESS_WAS_KILLED = 137\n\n# *BASIC* MACHINE Type used by self.machine_type\n# used when sharing code between lathe & mill, e.g. when\n# code is conditionally executed based on machine type\nMACHINE_TYPE_UNDEF = 0 # not yet defined\nMACHINE_TYPE_MILL = 1\nMACHINE_TYPE_LATHE = 2\n\n# these must match in the tool changer HAL component\nTOOL_CHANGER_TYPE_MANUAL = 0\nTOOL_CHANGER_TYPE_GANG = 1\nTOOL_CHANGER_TYPE_TURRET = 2\nTOOL_CHANGER_TYPE_ZBOT = 3\n\n#To fully define a TURRET type, see hal and specific.ini files\n#must set the following:\n#[TURRET]POSITION_FB_TYPE\n#[TURRET]POSITIONS\n#[TURRET]HAS_LOCKING_FB\n\n# not used in python code, but keep in sync for use in specific.ini's and hals. Values are for POSITION_FB_TYPE\n#POSITION_FEEDBACK_BINARY = 0\n#POSITION_FEEDBACK_1HOT = 1\n\n# redis string for machine_prefs, toolchanger_type\nMILL_TOOLCHANGE_TYPE_REDIS_ZBOT = 'zbot'\nMILL_TOOLCHANGE_TYPE_REDIS_MANUAL = 'manual'\n\n# tool table\nMAX_LATHE_TOOL_NUM = 99\nMAX_NUM_MILL_TOOL_NUM = 1000\nNOSE_RADIUS_STANDARD = 0.0315/2\nMILL_PROBE_TOOL_NUM = 99\nMILL_PROBE_TOOL_DESCRIPTION = \"Mill Probe (diameter is effective, not actual)\"\n#MILL_PROBE_POCKET_NUM = 55\nMILL_PROBE_POCKET_NUM = 99\n\n# this is number of 50mS period poll loops, (time in secs)*20 = COOLANT_LOCK_OUT_PERIOD\nCOOLANT_LOCK_OUT_PERIOD = 20\n\n# Mill spindle types - do not change values. These are stored in redis as\n# the user chosen setting - but also used in linuxcnc tormach spindle components.\nSPINDLE_TYPE_STANDARD = 0\nSPINDLE_TYPE_SPEEDER = 1\nSPINDLE_TYPE_HISPEED = 2\nSPINDLE_TYPE_RAPID_TURN = 3\n\n# DRO 'has focus' color\nHIGHLIGHT = '#24F7ED'\nORANGE = \"#F57F0A\"\nBLUE = \"#40AFFF\"\nGREY = '#918C8E'\nBLACK = '#000000'\nRED = '#C53232'\nWHITE = '#F3F5EB'\nROW_HIGHLIGHT = '#F0ED99'\n\n\n# Button permisions\n#\n# First we define mutually exclusive states of the machine (STATE_xxx)\n# Then each button has permitted_states attribute which is a bit mask built\n# from the STATE_xxx constants. If the machine is in a state that corresponds\n# to one of the permitted ones, then the widget action is allowed.\n#\n# Checking valid state is easy - just bitwise AND the current state of the machine\n# with the button mask. If the result is non-zero, the button is allowed.\n#\n# This also enables easy common error messaging where the permitted states for the\n# widget can be described.\n\nSTATE_ESTOP = 0x00000001\nSTATE_IDLE = 0x00000002\nSTATE_IDLE_AND_REFERENCED = 0x00000004\nSTATE_HOMING = 0x00000008\nSTATE_MOVING = 0x00000010\nSTATE_PAUSED_PROGRAM = 0x00000020\nSTATE_RUNNING_PROGRAM = 0x00000040\nSTATE_RUNNING_PROGRAM_TOOL_CHANGE_WAITING_ON_OPERATOR = 0x00000080\n\n# handy sets of the above bitfields can easily be defined also\nSTATE_ANY = 0xFFFFFFFF\n\n\n\n# -------------------------------------------\n# Zbot ATC\n# -------------------------------------------\n\nATCFIRMWARE_PATH = os.path.join(LINUXCNC_HOME_DIR, 'firmware/atc')\nATCFIRMWARE_FILENAME = 'atcfirmware-2.13.9.zip'\nATCFIRMWARE_VERSION = '2.13.9'\nATCFIRMWARE_VERSION_SPECIALCASE = '2.11.5'\n\n# -------------------------------------------\n# ATC/TTS DIMENSIONS\n# -------------------------------------------\nATC_TRAY_TOOLS = 10\nATC_COMPRESSION = .015 #squish constant\nATC_BLAST_DISTANCE = .75 #distance from tool holder rim\nATC_SHANK_JOG_TTS = 1.575 # a bit over shank length\nATC_SHANK_JOG_BT30 = 2.85 # BT30\nATC_SHANK_JOG_ISO20 = 2.175 # ISO20 High speed\nATC_JOG_SPEED = 120 # Straight Shank Tooling\nATC_TAPER_TOOLING_SPEED = 300 # Taper Tooling w pullstuds\nATC_UP_A_BIT = .010 # small amount to jog to clear tool tip\n\n#HAL pin commands - for request pin\nATC_SOLENOID = 1\nATC_DRAW_BAR = 2\nATC_INDEX_TRAY = 3\nATC_QUERY_SENSOR = 4\nATC_FIND_HOME = 5\nATC_OFFSET_HOME = 6\nATC_REPORT_STATUS = 7\nATC_SPINDLE_LOCK = 8\n\n\n#Special command\nATC_KILL_SPINDLE = 0\n\n\n# ATC Data map - for NGC request_data pin\nATC_TRAY_SOLENOID = 1\nATC_BLAST_SOLENOID = 2\nATC_DRAW_BAR_SOLENOID = 3\nATC_SPDL_LK_SOLENOID = 4\n\nATC_PRESSURE_SENSOR = 1\nATC_TRAY_IN_SENSOR = 3\nATC_TRAY_OUT_SENSOR = 5 #deprecated\n\nATC_VFD_SENSOR = 6\nATC_DRAW_SENSOR = 7\nATC_LOCK_SENSOR = 8\nATC_TRAYREF_SENSOR = 9\n\nATC_ALL_SENSORS = 0\nATC_ALL_SENSORS_LIST = [ ATC_PRESSURE_SENSOR,\n ATC_TRAY_IN_SENSOR,\n ATC_VFD_SENSOR,\n ATC_DRAW_SENSOR,\n ATC_LOCK_SENSOR,\n ATC_TRAYREF_SENSOR ]\nATC_ACTIVATE = ATC_SET_UP = 1\nATC_DEACTIVATE = ATC_SET_DOWN = 0\nATC_ON = True\nATC_OFF = False\n\n\n#HAL pins for ATC\n#broadcast we are in a change\nATC_HAL_IS_CHANGING = 16 #digital output pin\n\n#from Motion Control\nATC_REQUEST_SPINDLE_LOCK = \"0.din.0.request_lock\"\nATC_READ_ORIENT_EXECUTE = \"0.din.1.orient_execute\"\n\n#from TormachSpindle Component\nATC_READ_ORIENT_STATUS = \"0.ain.8.read_orient_stat\"\n\n#from NGC\nATC_HAL_REQUEST_NGC = \"0.ain.0.request\"\nATC_HAL_REQUEST_DATA_NGC =\"0.ain.1.request_data\"\n\n#from GUI\nATC_HAL_REQUEST_GUI = \"0.ain.2.request\"\nATC_HAL_REQUEST_DATA_GUI =\"0.ain.3.request_data\"\n\n\n\n#from either GUI or NGC (this is a motion control analog output pin no to send sequence)\nATC_HAL_SEQ_NO_OUT_PIN_NO= 6\nATC_HAL_COMMAND_OUT_PIN_NO= 7\nATC_HAL_DATA_OUT_PIN_NO= 8\n\n# to either GUI or NGC (this it the motion control analog input put for echo sequence - mapped in post gui hal to )\n# ATC_HAL_REQUEST_ACK )\nATC_HAL_SEQ_NO_IN_PIN_NO = 7\n\n#hal outputs\nATC_HAL_BUSY = \"0.dout.5.exec_status\" # busy?\nATC_HAL_RC = \"0.aout.0.request_rc\" # return code from last operation\nATC_HAL_TRAY_POS = \"0.aout.1.tray_position\" # current tool tray index position\nATC_PRESSURE_STAT = \"0.dout.3.pressure_status\" # current pressure switch status\nATC_HAL_TRAY_STAT = \"0.dout.0.tray_status\" # current tray actuator status - in or out?\nATC_HAL_VFD_STAT = \"0.dout.1.vfd_status\" # actual spindle feedback from VFD - running?\nATC_HAL_DRAW_STAT = \"0.dout.2.draw_status\" # draw bar solenoid - on or off?\nATC_HAL_LOCK_STAT = \"0.dout.6.lock_status\" # spindle lock solenoid\nATC_HAL_DEVICE_STAT = \"0.dout.4.device_status\" # USB communications channel status\nATC_HAL_TRAYREF_STAT = \"0.dout.9.trayref_status\" # tray referenced or not?\nATC_HAL_REQUEST_ACK = \"0.aout.2.request_ack\" # sequence number echo for last command\n\n#USB commands\n\nUSB_VERSION = \"VE\\r\" #used to get firmware version, tools, and VFD detection\nUSB_VERSION_LONG = \"VL\\r\" #used to retrieve all other data, BT30, etc...\nUSB_TRAY_IN = str(ATC_TRAY_SOLENOID) + \"+\\r\"\nUSB_TRAY_OUT = str(ATC_TRAY_SOLENOID) + \"-\\r\"\nUSB_BLAST_ON = str(ATC_BLAST_SOLENOID) + \"+\\r\"\nUSB_BLAST_OFF = str(ATC_BLAST_SOLENOID) + \"-\\r\"\nUSB_DRAW_HIGH_PRESS = str(ATC_SPDL_LK_SOLENOID) + \"+\\r\" #active high pressure\nUSB_DRAW_LOW_PRESS = str(ATC_SPDL_LK_SOLENOID) + \"-\\r\" #default is low pressure (in case of failure)\n#------------------------------------------------------------------------------------------\n# In version 1 boards, the draw bar is operated by a signal to the PDB control board\n# In version 2 boards and higher, the PDB control board is deprecated and the ATC board controls]\n# the draw bar directly\n# So there is a little asymmetry between solenoid operations. Tray, Blast, and Lock are explicitly\n# commanded, whereas solenoid 3 is implicit in the D+ and D- commands.\n#-------------------------------------------------------------------------------------------\n\nUSB_DB_ACTIVATE = \"D+\\r\" #solenoid 3 in Version 2 board and above\nUSB_DB_DEACTIVATE = \"D-\\r\" #solenoid 3 in Version 2 board and above\nUSB_INDEX_TRAY = \"T\"\nUSB_QUERY = \"Q\"\nUSB_STATUS = \"ST\\r\"\nUSB_FIND_HOME = \"FH\\r\"\nUSB_OFFSET_UP = \"H+\\r\"\nUSB_OFFSET_DOWN = \"H-\\r\"\n\n#USB response\nUSB_OK = '.'\nUSB_ON = '+'\nUSB_OFF = '-'\nUSB_REJECT = 'X'\nUSB_UNKNOWN = '?'\n\n#DIO AIO PIN NUMBERS\nDEVICE_PIN = 21 #atc is communicating\nEXEC_PIN = 16 #busy pin (0 executing, 1 working)\nREQUEST_PIN = 4 #command (solenoid, draw bar, tray index, etc..)\nREQUEST_DATA_PIN = 5 #data qualifier for command (slot number, solenoid number, etc...)\nHAL_RETURN_PIN = 5 #return code from last commmand\nPROMPT_REPLY_PIN = 10 #analog reply pin for NGC M6 prompts\nPROMPT_SET_PIN = 10 #looped back to the above to allow setting\nSPDL_ORNT_STATUS_PIN = 8 #used to detect BT30 orientation state from NGC - must be read with\n # M66 to be current state\nSPDL_IS_LOCKED = 55 #spindle is locked DIO pin\n\n#HAL COMPONENT RC VALUES\n\nATC_OK = ATC_SENSOR_OFF = 0\nATC_SENSOR_ON = 1\nATC_COMMAND_REJECTED_ERROR = -1\nATC_USB_HOMING_ERROR = -2\nATC_TIMEOUT_ERROR = -3\nATC_UNKNOWN_USB_RESP_ERROR = -4\nATC_UNKNOWN_USB_COMMAND_ERROR= -5\nATC_TRAY_ERROR = -6\nATC_USB_IO_ERROR = -7\nATC_UNKNOWN_REQUESTED_PIN = -8\nATC_PRESSURE_FAULT = -9\nATC_NOT_FOUND = -10\nATC_GENERAL_TRAP = -11\nATC_REF_FIRST = -12\nATC_USER_CANCEL = -13 #appropriate number\nATC_GENERAL_ERROR = -14\nATC_INTERFACE_BUSY = -16 # set in NGC hal interface only\nATC_INTERFACE_ERROR = -17 # set in NGC hal interface only\nATC_SPINDLE_RUNNING = -18 # set in NGC hal interface only\nATC_SPINDLE_ORIENT_ERROR = -19 # BT 30 orientation error\nATC_SPINDLE_LOCK_ERROR = -20 # BT 30 spindle lock malfunction\nATC_INTERNAL_CODE_ERROR = -21 # bad internal logic somewhere, see log\n\n#ATC HAL ERROR MESSAGES - the text key matches the rc values above\n\n\nATC_HAL_MESSAGES = {ATC_COMMAND_REJECTED_ERROR : 'ATC - Device, command rejected',\n ATC_USB_HOMING_ERROR : 'ATC - Device, homing error',\n ATC_TIMEOUT_ERROR : 'ATC - USB, comms timeout error',\n ATC_UNKNOWN_USB_RESP_ERROR : 'ATC - Device, issued invalid USB response',\n ATC_UNKNOWN_USB_COMMAND_ERROR : 'ATC - Device, issued command unknown',\n ATC_TRAY_ERROR : 'ATC - Device, tray sensor not detecting arrival',\n ATC_USB_IO_ERROR : 'ATC - USB I/O error',\n ATC_UNKNOWN_REQUESTED_PIN : 'ATC - HAL command unknown',\n ATC_PRESSURE_FAULT : 'ATC - Insufficient air pressure',\n ATC_NOT_FOUND : 'ATC - Cannot find USB device',\n ATC_REF_FIRST : 'ATC - Cannot offset - reference tool tray first',\n ATC_USER_CANCEL : 'ATC - Action cancelled by STOP/RESET',\n ATC_GENERAL_ERROR : 'ATC - Action cancelled due to error',\n ATC_SPINDLE_ORIENT_ERROR : 'ATC - Spindle orientation error',\n ATC_SPINDLE_LOCK_ERROR : 'ATC - Spindle lock error'}\n\n\nEMC_OPERATOR_ERROR_TYPE =11\nEMC_OPERATOR_TEXT_TYPE =12\nEMC_OPERATOR_DISPLAY_TYPE =13\n\n# G0 rapid moves changed to G1 feedrate moves to limit speed\nMAX_PROBE_RAPID_FEEDRATE = 200.0\nDEFAULT_PROBE_RAPID_FEEDRATE = 135.0\n\n# values for UEV checking - in imperial (machine setup) units\nMAX_PROBE_FINE_FEEDRATE = 60.0\nMAX_PROBE_ROUGH_FEEDRATE = 60.0\nDEFAULT_PROBE_ROUGH_FEEDRATE = 25.0\nDEFAULT_PROBE_FINE_FEEDRATE = 1.5\n\n# ETS fine probe feed rate is fixed for consistency\n# units are inches per minute\nETS_FINE_FEEDRATE = 2.5\n\nMAX_PROBE_RING_GAUGE_DIAMETER = 6.0\nDEFAULT_PROBE_RING_GAUGE_DIAMETER = 1.0\nDEFAULT_PROBE_TIP_ACTUAL_DIAMETER = 0.118\n\n# used to validate DRO entry\nMAX_PROBE_TIP_DIAMETER = 0.750\n\n# analog pins used by probe tab DROs\nPROBE_X_PLUS_AOUT = 11\nPROBE_X_MINUS_AOUT = 12\nPROBE_Y_PLUS_AOUT = 13\nPROBE_Y_MINUS_AOUT = 14\nPROBE_Z_MINUS_AOUT = 15\nPROBE_Y_PLUS_A_AOUT = 16\nPROBE_Y_PLUS_B_AOUT = 17\nPROBE_Y_PLUS_C_AOUT = 18\nPROBE_POCKET_DIAMETER_AOUT = 19\n\nESTOP_ERROR_MESSAGE = 'Machine has been estopped. Restore power to machine and click Reset button to continue.'\n\n# \" - \" added to prevent message from getting filtered\nX_LIMIT_ERROR_MESSAGE = 'X axis limit switch active'\nY_LIMIT_ERROR_MESSAGE = 'Y axis limit switch active'\nZ_LIMIT_ERROR_MESSAGE = 'Z axis limit switch active'\nX_Y_LIMIT_ERROR_MESSAGE = 'X/Y axis limit switch(es) active'\n\n# this is used for lathe where they are in serial\nX_Z_LIMIT_ERROR_MESSAGE = 'X/Z axis limit switch(es) active'\n\n\n########## keep these in sync with usage in motor components\n#\"Status\" codes returned from component usually as result of motor command (MFB_POSITION is the exception)\n# MFB = Motor Feed Back\nMFB_OK = 0 #motor component understood and acted on last command and so far has no error executing it.\nMFB_NO_HOMING = 1 #this axis motor has no inherent homing function\nMFB_CMD_INVALID = 2 #unexpected command, i.e. comp is in invalid state to accept it now ; we ignored it\nMFB_POSITION = 3 #axis motor reported temporary position error. PP UI is expected to clear this error with MOTOR_CMD_ACK_POS command\nMFB_CMD_UNKNOWN = 4 #unknown command; we ignored it\nMFB_FAULT = 5 #axis motor has faulted. Could be a fault or no power to motor axis. We don't know\n#note MFB_FAULT must be highest constant as code invalidates codes > MFB_FAULT\n\n#axis motor linuxcnc States keep in sync with linuxcnc motor comps\nMS_DISABLED = 0 #axis motor has been commanded disabled\nMS_WAIT = 1 #motor comp is initializing motor; please wait\nMS_HOME_SEARCHING = 2 #motor comp is expecting PP UI to be executing homing; we haven't detected home yet\nMS_HOME_WAITING = 3 #linuxcnc motor component has detected home and PP must wait until for MS_HOME_COMPLETE state before stopping motion command\nMS_HOME_COMPLETE = 4 #linuxcnc motor component has completed homing detection We expect Linuxcnc is still finishing backoff, ect.\nMS_NORMAL = 5 #linuxcnc motor component is active and is not in any hardstop mode\nMS_FAULT = 6 #linuxcnc motor component is inactive. We've detected a fault or via ESTOP has been powered off, this comp can't really tell. (but UI can infer)\n\n#Commands to linuxcnc motor components -- KEEP THESE in sync!\nMOTOR_CMD_NONE = 0 #there is no command, or cleared/did/acknowledge last command. We use this on the bi-directional i/o to \"edge detect\" commands and signal to UI, if it cares to look (it doesn't presently)\nMOTOR_CMD_DISABLE = 1 #Disable motor. The motor is immediately so it will no longer move or actively hold position. Brake will be automatically applied for Z axis\nMOTOR_CMD_NORMAL = 2 #Enable the axis motor for normal operation. The comp may pass through \"MS_WAIT\" state before assuming \"MS_NORMAL\" state\nMOTOR_CMD_HOME = 3 #Enable the axis motor for internal homing. After \"MS_WAIT\", we enter \"MS_HOME_WAITING\" state.\n #Then this comp assumes PP will issue homing commands and movements to linuxcnc. Eventually, we transistion through \"MS_HOME_WAITING\" and then \"MS_HOME_COMPLETE\"\nMOTOR_CMD_ACK_HOME = 4 #When homing is finished, use this command to transistion from \"MS_HOME_COMPLETE\" to \"MS_NORMAL\"\nMOTOR_CMD_ACK_POS = 5 #UI uses this command to acknowledge temporary MFB_POSITION \"errors\". Motor comp will clear this command by changing from MFB_POSTION to MFB_OK and doesn't change motor state\nMOTOR_CMD_ACK_ERR = 6 #UI use this command to acknowlege all other error. This comp will clear previous UI command errors to MFB_OK if axis motor hasn't faulted.\n############## end of defines that need to be kept in sync in all linuxcnc motor comps\nPOLL_ALL_AXES = 255 # used with axis_motor_poll routine and is default\n\n# axis max unhomed velocity percent for jog speed clamping on machines with servo axis motors (1 = 100%)\nAXIS_SERVOS_CLAMP_VEL_PERCENT = .05\n\n# This \"magic number\" is the index into digit into usbio_output_#_led labels\n# if ever anyone changes the labels for usbio_output_#_led and/or usbio_output_#_led_button, watch this!\nUSBIO_STR_DIGIT_INDEX = 13\n#The mapping for the USBIO M64 and M65 commands\nUSBIO_LATHE_HAL_OFFSET = 5\n\n# This protects against coding bugs that send conversational gcode generation into\n# infinite loops. Those are hard to get details on from a customer as memory is rapidly exhausted and\n# the controller goes into swap seizure and the mouse even becomes completely unresponsive.\nCONVERSATIONAL_MAX_LINES = 750000\n\nUPDATE_PTR_FILE = os.path.join(os.environ['HOME'], 'update_file.txt')\n\n# Bitfield constants for hal debug-level pin from gui\nDEBUG_LEVEL_ATC = 0x00000001\nDEBUG_LEVEL_ATC_VERBOSE = 0x00000002\nDEBUG_LEVEL_SMARTCOOL = 0x00000004\n\n# spindle orienting -- keep in sync with tormachspindlem200.comp!\nISTATE_DONE = 0x2\nISTATE_PAST = 0x10\nORIENT_ERR_MULTIPLE_COMMANDS = -1\nORIENT_ERR_NO_ZINDEX = -2\n\nBT30_OFFSET_INVALID = 0x0BAD0BAD #Invalid BT30 offset. Nominally ranges from +/- (Encoder Counts Per Rev/2)\n\n#KEEP in sync with m200.comp\nSPINDLE_NO_FAULT = (0) #M200 VFD is powered and has no detected faults. Non m200 VFDs will always return this \"OK\"\nSPINDLE_OK_POSSIBLE_WIRING_ISSUE = (1) #expected temporary VFD Fault signal not seen during VFD power up. Error effectivity is ignored. VFD has power however you may have T41 and T42 wiring issue.\nSPINDLE_FAULT_NO_K2 = (2) #24VDC from VFD is not present. K2 has not been activated yet\nSPINDLE_FAULT_VFD_FAULT_M5 = (3) #VFD signaled fault while not being commanded to move; may be spindle door opening\nSPINDLE_FAULT_LOST_PWR = (4) #VFD lost power. It had it; may be spindle door opening\nSPINDLE_FAULT_LOG_ONLY = (SPINDLE_FAULT_LOST_PWR) # spindle faults at or below this are only logged. above this point user will get indication\n\nSPINDLE_FAULT_BT30 = (5) #spindle not stationary or not in correct mode for setting BT30A\nSPINDLE_FAULT_BT30_Z_INDEX = (6) #set BT30 attempted before z index was seen\nSPINDLE_FAULT_ORIENT_TO = (7) #orient time out detected. Linuxcnc already notified user about T.O., so only log it\nSPINDLE_FAULT_ORIENT_MAX_ROTATION = (8) #orient failed due to excessive number of revolutions. Check encoder\nSPINDLE_FAULT_NO_ZINDEX = (9) #orient attempted and Z index not found. Check encoder\nSPINDLE_FAULT_INVALID_BT30_OFFSET = (10) #BT30 orientation zero offset has not been set. Read user manual.\nSPINDLE_FAULT_NO_ENCODER = (11) #M19 or spindle sync mode was requested yet machine has no encoder or incorrect machine type selected\nSPINDLE_FAULT_VFD_DOESNT_SUPPORT = (12) #This machine's VFD does not support M19 or automatic VFD mode switching.\nSPINDLE_FAULT_SPINDLE_TYPE = (13) #Orient mode requested with spindle type that doesnt support it\nSPINDLE_FAULT_VFD_RPM = (14) #VFD motor speed feedback absent. Check VFD wiring.\nSPINDLE_FAULT_NO_PWR = (15) #24VDC from VFD is still not present after K2 presumably activated. Check spindle door switch, mill circuit breakers, or VFD wiring\nSPINDLE_FAULT_VFD_PRIOR_FAULT = (16) #spindle commanded to start but VFD is faulted. Look at VFD for error code and press VFD orange button to clear fault. Or ESTOP to power cycle VFD before continuing.\nSPINDLE_FAULT_MODE_SWITCH = (17) #expected VFD Mode switch acknowledgment did not occur. Is machine configured right, do you have VFDmx, wiring errors\nSPINDLE_FAULT_CRITICAL = (SPINDLE_FAULT_MODE_SWITCH) #vfd faults that are this level and above cause software ESTOP\nSPINDLE_FAULT_VFD = (18) #VFD reported a fault during rotation. Check spindle door switch and Look for error code on VFD.\nSPINDLE_FAULT_LOCK_WITHOUT_PRIOR_ORIENT = (19) #This shouldn't happen. Linuxcnc state seems confused\n\n\nSPINDLE_ERR_MSGS = {\nSPINDLE_NO_FAULT : 'Spindle: M200 OK',\nSPINDLE_OK_POSSIBLE_WIRING_ISSUE : 'Spindle: Possible VFD wiring issue. Check T41 and T42',\nSPINDLE_FAULT_NO_K2 : 'Spindle: VFD off as K2 not yet latched',\nSPINDLE_FAULT_BT30 : 'Spindle: Not stationary or in correct spindle mode for setting BT30A',\nSPINDLE_FAULT_BT30_Z_INDEX : 'Spindle: BT30 offset attempted before z index',\nSPINDLE_FAULT_ORIENT_TO : 'Spindle: Orient time out detected. Linuxcnc already notified user about T.O. right?',\nSPINDLE_FAULT_ORIENT_MAX_ROTATION : 'Spindle: Orient moved more than 2 revs. Check encoder',\nSPINDLE_FAULT_NO_ZINDEX : 'Spindle: Z index not found during orient attempt. Check encoder',\nSPINDLE_FAULT_INVALID_BT30_OFFSET : 'Spindle: Refusing to orient as M19 R=0 position has not yet been set. Please read user manual.',\nSPINDLE_FAULT_NO_ENCODER : 'Spindle: M19 or spindle sync mode requested yet machine has no encoder. Possible incorrect machine type selected',\nSPINDLE_FAULT_VFD_DOESNT_SUPPORT : 'Spindle: VFD does not support M19 or automatic VFD mode switching.',\nSPINDLE_FAULT_SPINDLE_TYPE : 'Spindle: Orient requested with spindle type that doesnt support it',\nSPINDLE_FAULT_VFD_RPM : 'Spindle: VFD motor speed feedback absent. Check VFD wiring.',\nSPINDLE_FAULT_MODE_SWITCH : 'Spindle: Expected VFD Mode switch fault. Is machine configured right? Do you have VFDmx? Check for wiring errors',\nSPINDLE_FAULT_VFD : 'Spindle: VFD faulted while rotation commanded. Check spindle door switch then look for error code on VFD',\nSPINDLE_FAULT_NO_PWR : 'Spindle: VFD has no power after K2 activated. Check spindle door switch, mill circuit breakers, VFD wiring, or K2',\nSPINDLE_FAULT_VFD_PRIOR_FAULT : 'Spindle: VFD in fault state prior to spindle start. Check VFD for error code. Press VFD orange button to clear fault or ESTOP to power cycle VFD before continuing.',\nSPINDLE_FAULT_LOST_PWR : 'Spindle: VFD lost power. It had it. Check spindle door switch',\nSPINDLE_FAULT_LOCK_WITHOUT_PRIOR_ORIENT : 'Spindle: This should not happen. Linuxcnc state seems confused',\nSPINDLE_FAULT_VFD_FAULT_M5: 'Spindle: VFD faulted while stopped. User possibly opened spindle door. Check spindle door switch'}\n\n# ADMIN SET_AXIS_SCALE_FACTOR X 1.0001\nAXIS_SCALE_FACTOR_MIN = 0.995\nAXIS_SCALE_FACTOR_MAX = 1.005\n\n# ADMIN SET_AXIS_BACKLASH X 0.0015\n# this is inches\nAXIS_BACKLASH_MAX = 0.005\n","repo_name":"jbarillaro-vt/tormach-pcnc1100","sub_path":"operator/pathpilot.fallback/python/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":27183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"33262651318","text":"\"\"\"\nThis module houses a helper class, which performs helper operations\nrelated to the settings namespace.\n\"\"\"\nfrom base_helper import baseNamespaceHelper\n\n# due to incomplete command definitions in the muse settings namespace\n# xml, we have manually defined some settings routes here instead of using the\n# autogenerated muse_rest client\nIR_CONTROL_URL = \"/ircontrol\"\nEDID_STATUS_URL = \"/hdmi/edid/\"\nHDMI_STATUS_URL = \"/hdmi/status/\"\nHDMI_HPD_TOGGLE_URL = \"/hdmi/powercycle\"\nHH_SETTIMGS_STUB = \"/households/{}/settings/\"\nAUTO_UPDATE_URL = \"restricted/autoUpdatesEnabled\"\n\n\nclass settingsNamespaceHelpers(baseNamespaceHelper):\n \"\"\"\n Class to manage helper methods for the muse settings namespace\n \"\"\"\n def __init__(self, restClient):\n \"\"\"\n Initializer\n\n :param restClient: instance of the MuseRestClient class\n \"\"\"\n super(settingsNamespaceHelpers, self).__init__(\n restClient=restClient)\n self._ir_url = IR_CONTROL_URL\n self._edid_url = EDID_STATUS_URL\n self._hdmi_status_url = HDMI_STATUS_URL\n self._hdmi_hpd_toggle_url = HDMI_HPD_TOGGLE_URL\n self._auto_update_url = HH_SETTIMGS_STUB.format(self._baseMuseClient.hhid) + AUTO_UPDATE_URL\n\n def _generic_post(self, url, json_body):\n \"\"\"\n Generic method to make a POST request with the given json\n body as the request body\n\n :param str url: the full URL to make the request to\n :param dict json_body: the request parameters\n :return: dictionary response\n \"\"\"\n token = self._baseMuseClient.hhConfigAdminToken\n self._baseMuseClient.hitRestEndpoint(\n cmd_url=url, oauthToken=token, reqMethod=\"POST\",\n reqHeaders={\"Content-Type\": \"application/json\"},\n reqJson=json_body)\n\n def _generic_get(self, url):\n \"\"\"\n Generic method to make a GET request\n\n :param str url: the full URL to make the request to\n :return: dictionary response\n \"\"\"\n token = self._baseMuseClient.hhConfigAdminToken\n return self._baseMuseClient.hitRestEndpoint(\n cmd_url=url, oauthToken=token, reqMethod=\"GET\",\n reqHeaders={\"Content-Type\": \"application/json\"})\n\n def get_hdmi_edid_status(self):\n \"\"\"\n Request the HDMI EDID status\n\n :return: dictionary response\n \"\"\"\n return self._generic_get(self._edid_url)\n\n def get_hdmi_status(self):\n \"\"\"\n Request the HDMI status\n\n :return: dictionary response\n \"\"\"\n return self._generic_get(self._hdmi_status_url)\n\n def toggle_hdmi_hpd(self):\n \"\"\"\n Call the endpoint that toggles HPD from the ZP's 5v OUT on HDMI\n capable ZPs.\n\n :return: Boolean whether the GET return status is good.\n \"\"\"\n token = self._baseMuseClient.hhConfigAdminToken\n resp = self._baseMuseClient.hitRestEndpoint(\n cmd_url=self._hdmi_hpd_toggle_url,\n oauthToken=token, reqMethod=\"GET\",\n check=False)\n return resp.status_code == 200\n\n def get_ir_control_state(self):\n \"\"\"\n Getter for the IR remote control enabled state\n\n :return: dictionary response\n \"\"\"\n return self._generic_get(self._ir_url)\n\n def is_ir_control_enabled(self):\n \"\"\"\n Return True if the IR remote control state is enabled\n\n :return: boolean - True if IR control is enabled\n \"\"\"\n return self.get_ir_control_state()['enabled']\n\n def set_ir_control_state(self, enabled):\n \"\"\"\n Setter for the IR remote control enabled state\n\n :param bool enabled: whether or not to enable IR control\n \"\"\"\n self._generic_post(self._ir_url, {\"enabled\": enabled})\n\n def set_auto_update_state(self, enabled):\n \"\"\"\n Setter for the automatic update state of the household\n\n :param bool enabled: whether or not to enable Automatic\n updates\n \"\"\"\n token = self._baseMuseClient.hhConfigToken\n return self._baseMuseClient.hitRestEndpoint(\n cmd_url=self._auto_update_url,\n oauthToken=token,\n reqMethod=\"POST\",\n apiKey=self._baseMuseClient.api_key,\n reqData=\"true\" if enabled else \"false\",\n check=False)\n\n def get_auto_update_state(self):\n \"\"\"\n Getter for the automatic update state of the household\n\n :return: boolean - whether or not auto updates are enabled\n \"\"\"\n token = self._baseMuseClient.hhConfigToken\n resp = self._baseMuseClient.hitRestEndpoint(\n cmd_url=self._auto_update_url,\n oauthToken=token,\n reqMethod=\"GET\",\n apiKey=self._baseMuseClient.api_key,\n check=False)\n assert resp.status_code == 200\n return resp.text == 'true'\n\n","repo_name":"briankabuyesonos/pdsw-muse-api-python-client","sub_path":"sonos_museclient/muse_helpers/settings_helpers.py","file_name":"settings_helpers.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"43745040485","text":"class Adsb():\n\n def __init__(self):\n self.clearLastCallsignID()\n self.clearLastFlightData()\n\n def clearLastCallsignID(self):\n self.lastCallSign = \"\"\n self.lastID = \"\"\n\n def clearLastFlightData(self):\n self.lastDist = None\n self.lastBearing = \"\"\n self.lastLat = \"\"\n self.lastLon = \"\"\n self.lastAltitude = \"\"\n self.lastVerticalRate = \"\"\n self.lastGroundSpeed = \"\"\n self.lastSquawk = \"\"\n\n def isValidRec(self, rec):\n if rec.count(',') == 21:\n return 1\n return 0\n\n def loadData(self, rec):\n dataVals = rec.split(\",\")\n self.ICAOid = dataVals[4]\n self.theDate = dataVals[6]\n self.theTime = dataVals[7]\n self.callsign = dataVals[10]\n self.altitude = dataVals[11]\n self.groundSpeed = dataVals[12]\n self.track = dataVals[13]\n self.lat = dataVals[14]\n self.lon = dataVals[15]\n self.verticalRate = dataVals[16]\n self.squawk = dataVals[17]\n\n def loadNewCsId(self, adsbRec, adsbCallsign, adsbID):\n dataVals = adsbRec.split(\",\")\n dataVals[4] = adsbID\n dataVals[10] = adsbCallsign\n return \",\".join(dataVals)\n ","repo_name":"erikorange/adsb-scanner-pitft","sub_path":"adsb.py","file_name":"adsb.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"87"} +{"seq_id":"86665995290","text":"import json\nimport subprocess\nimport gi\nimport os\n\nfrom datetime import datetime\nfrom i3ipc import Connection\n\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\nfrom nwg_shell_config.tools import is_command, get_lat_lon, list_background_dirs, load_text_file, \\\n list_inputs_by_type, h_list_devices_by_type, gtklock_module_path, do_backup, unpack_to_tmp, restore_from_tmp, \\\n get_theme_names, get_icon_themes, get_command_output, hyprctl\n\n\ndef set_from_checkbutton(cb, settings, key):\n settings[key] = cb.get_active()\n\n\ndef set_idle_use_from_checkbutton(cb, settings):\n settings[\"lockscreen-use-settings\"] = cb.get_active()\n if not settings[\"lockscreen-use-settings\"]:\n subprocess.call(\"killall swayidle\", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)\n\n\ndef set_from_spinbutton(cb, settings, key, ndigits):\n settings[key] = round(cb.get_value(), ndigits)\n\n\ndef set_int_from_spinbutton(cb, settings, key):\n settings[key] = int(cb.get_value())\n\n\ndef set_limit_per_output(cb, settings, output_name):\n settings[\"autotiling-output-limits\"][output_name] = int(cb.get_value())\n\n\ndef set_split_per_output(cb, settings, key, output_name):\n settings[key][output_name] = round(cb.get_value(), 2)\n\n\ndef reset_autotiling(btn, l_spin_boxes, w_spin_boxes, h_spin_boxes):\n for sb in l_spin_boxes:\n sb.set_value(0)\n for sb in w_spin_boxes:\n sb.set_value(1.0)\n for sb in h_spin_boxes:\n sb.set_value(1.0)\n\n\ndef set_keywords_from_entry(entry, settings):\n txt = entry.get_text()\n # Sanitize\n if \" \" in txt:\n txt = txt.replace(\" \", \"\")\n entry.set_text(txt)\n if \",,\" in txt:\n txt = txt.replace(\",,\", \",\")\n entry.set_text(txt)\n for c in txt:\n if ord(c) > 128:\n txt = txt.replace(c, \"\")\n entry.set_text(txt)\n txt = txt.strip(\",\")\n\n settings[\"unsplash-keywords\"] = txt.split(\",\")\n\n\ndef validate_update_cmd(entry, cb, sb, settings):\n text = entry.get_text()\n if text and is_command(text):\n cb.set_active(True)\n cb.set_sensitive(True)\n sb.set_sensitive(True)\n settings[\"update-command\"] = text\n else:\n cb.set_active(False)\n cb.set_sensitive(False)\n sb.set_sensitive(False)\n\n\ndef set_timeouts(cb, cb1, settings, key):\n settings[key] = int(cb.get_value())\n if int(cb1.get_value() < settings[key] + 5):\n cb1.set_value(settings[key] + 5)\n\n\ndef set_sleep_timeout(sb, lock_timeout_sb, settings, key):\n timeout = sb.get_value()\n lock_timeout = lock_timeout_sb.get_value()\n if timeout <= lock_timeout + 5:\n sb.set_value(lock_timeout + 5)\n settings[key] = int(sb.get_value())\n\n\ndef update_lat_lon(btn, sb_lat, sb_lon):\n tz, lat, lon = get_lat_lon()\n sb_lat.set_value(lat)\n sb_lat.set_tooltip_text(tz)\n sb_lon.set_value(lon)\n sb_lon.set_tooltip_text(tz)\n\n\ndef set_from_workspaces(entry, settings):\n valid_text = \"\"\n for char in entry.get_text():\n if char.isdigit() or char == \" \":\n valid_text += char\n while ' ' in valid_text:\n valid_text = valid_text.replace(' ', ' ')\n entry.set_text(valid_text)\n settings[\"autotiling-workspaces\"] = valid_text.strip()\n print(settings[\"autotiling-workspaces\"])\n\n\ndef set_from_entry(entry, settings, key):\n settings[key] = entry.get_text()\n\n\ndef restore_defaults(btn, entry_dict):\n for key in entry_dict:\n key.set_text(entry_dict[key])\n\n\ndef set_custom_cmd_from_entry(entry, settings, key, widgets_to_lock):\n text = entry.get_text()\n for widget in widgets_to_lock:\n if text:\n widget.set_sensitive(False)\n else:\n widget.set_sensitive(True)\n settings[key] = text\n\n\ndef set_browser_from_combo(combo, entry, browsers_dict):\n entry.set_text(browsers_dict[combo.get_active_id()])\n\n\ndef set_dict_key_from_combo(combo, settings, key):\n settings[key] = combo.get_active_id()\n\n\ndef set_int_dict_key_from_combo(combo, settings, key):\n settings[key] = int(combo.get_active_id())\n\n# def set_icon_theme_from_combo(combo, settings, key, theme_names):\n# settings[key] = theme_names[combo.get_active_id()]\n\n\ndef on_custom_folder_selected(fcb, cb_custom_path, settings):\n settings[\"backgrounds-custom-path\"] = fcb.get_filename()\n cb_custom_path.set_sensitive(True)\n\n\ndef set_key_from_checkbox(cb, settings, key):\n settings[key] = cb.get_active()\n\n\ndef on_folder_btn_toggled(btn, settings):\n p = btn.get_label()\n if btn.get_active():\n if p not in settings[\"background-dirs\"]:\n settings[\"background-dirs\"].append(p)\n else:\n if p in settings[\"background-dirs\"]:\n settings[\"background-dirs\"].remove(p)\n\n\ndef launch(widget, cmd):\n print(\"Executing '{}'\".format(cmd))\n subprocess.Popen('exec {}'.format(cmd), shell=True)\n\n\nclass SideMenuRow(Gtk.ListBoxRow):\n def __init__(self, label, margin_start=9):\n super().__init__()\n self.eb = Gtk.EventBox()\n self.add(self.eb)\n lbl = Gtk.Label.new(label)\n lbl.set_property(\"halign\", Gtk.Align.START)\n lbl.set_property(\"margin-start\", margin_start)\n lbl.set_property(\"margin-end\", 9)\n self.eb.add(lbl)\n\n\ndef screen_tab(settings, voc, pending_updates):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(voc[\"common\"], voc[\"screen-settings\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 6)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 6)\n box.set_homogeneous(True)\n box.set_property(\"margin-left\", 0)\n box.set_property(\"margin-bottom\", 4)\n grid.attach(box, 0, 0, 6, 1)\n\n btn = Gtk.Button()\n btn.set_property(\"name\", \"app-btn\")\n btn.set_always_show_image(True)\n btn.set_image_position(Gtk.PositionType.TOP)\n img = Gtk.Image.new_from_icon_name(\"nwg-displays\", Gtk.IconSize.DIALOG)\n btn.set_image(img)\n btn.set_label(voc[\"displays\"])\n btn.connect(\"clicked\", launch, \"nwg-displays\")\n box.pack_start(btn, False, True, 0)\n\n btn = Gtk.Button()\n btn.set_property(\"name\", \"app-btn\")\n btn.set_always_show_image(True)\n btn.set_image_position(Gtk.PositionType.TOP)\n img = Gtk.Image.new_from_icon_name(\"azote\", Gtk.IconSize.DIALOG)\n btn.set_image(img)\n btn.set_label(voc[\"wallpapers\"])\n btn.connect(\"clicked\", launch, \"azote\")\n box.pack_start(btn, False, True, 0)\n\n btn = Gtk.Button()\n btn.set_property(\"name\", \"app-btn\")\n btn.set_always_show_image(True)\n btn.set_image_position(Gtk.PositionType.TOP)\n img = Gtk.Image.new_from_icon_name(\"nwg-look\", Gtk.IconSize.DIALOG)\n btn.set_image(img)\n btn.set_label(voc[\"look-feel\"])\n btn.connect(\"clicked\", launch, \"nwg-look\")\n box.pack_start(btn, False, True, 0)\n\n btn = Gtk.Button()\n btn.set_property(\"name\", \"app-btn\")\n btn.set_always_show_image(True)\n btn.set_image_position(Gtk.PositionType.TOP)\n img = Gtk.Image.new_from_icon_name(\"nwg-panel\", Gtk.IconSize.DIALOG)\n btn.set_image(img)\n btn.set_label(voc[\"panel-settings\"])\n btn.connect(\"clicked\", launch, \"nwg-panel-config\")\n box.pack_start(btn, False, True, 0)\n\n update_btn = Gtk.Button()\n update_btn.set_property(\"name\", \"app-btn\")\n update_btn.set_always_show_image(True)\n update_btn.set_image_position(Gtk.PositionType.TOP)\n if pending_updates == 0:\n update_btn.set_label(voc[\"updates\"])\n img = Gtk.Image.new_from_icon_name(\"nwg-shell\", Gtk.IconSize.DIALOG)\n else:\n update_btn.set_label(\"Updates ({})\".format(pending_updates))\n img = Gtk.Image.new_from_icon_name(\"nwg-shell-update\", Gtk.IconSize.DIALOG)\n update_btn.set_image(img)\n update_btn.connect(\"clicked\", launch, \"nwg-shell-updater\")\n box.pack_start(update_btn, False, True, 0)\n\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"desktop-style\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 0, 1, 1, 1)\n\n combo = Gtk.ComboBoxText()\n # combo.set_property(\"halign\", Gtk.Align.START)\n grid.attach(combo, 1, 1, 3, 1)\n if os.getenv(\"SWAYSOCK\"):\n for p in [\"preset-0\", \"preset-1\", \"preset-2\", \"preset-3\", \"custom\"]:\n combo.append(p, p)\n elif os.getenv(\"HYPRLAND_INSTANCE_SIGNATURE\"):\n for p in [\"hyprland-0\", \"hyprland-1\", \"hyprland-2\", \"hyprland-3\", \"custom-hyprland\"]:\n combo.append(p, p)\n combo.set_active_id(settings[\"panel-preset\"])\n combo.connect(\"changed\", set_dict_key_from_combo, settings, \"panel-preset\")\n combo.set_tooltip_text(voc[\"preset-tooltip\"])\n\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"night-light\"]))\n lbl.set_property(\"margin-top\", 6)\n lbl.set_property(\"margin-bottom\", 6)\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 0, 2, 1, 1)\n\n cb_night_light_on = Gtk.CheckButton.new_with_label(voc[\"on\"])\n cb_night_light_on.set_active(settings[\"night-on\"])\n cb_night_light_on.connect(\"toggled\", set_from_checkbutton, settings, \"night-on\")\n cb_night_light_on.set_tooltip_text(voc[\"night-light-tooltip\"])\n grid.attach(cb_night_light_on, 1, 2, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"latitude\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 3, 1, 1)\n\n sb_lat = Gtk.SpinButton.new_with_range(-90.0, 90.0, 0.1)\n sb_lat.set_tooltip_text(voc[\"latitude-tooltip\"])\n sb_lat.set_digits(4)\n sb_lat.set_value(settings[\"night-lat\"])\n sb_lat.connect(\"value-changed\", set_from_spinbutton, settings, \"night-lat\", 4)\n grid.attach(sb_lat, 3, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"temp-night\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 3, 1, 1)\n\n sb_temp_low = Gtk.SpinButton.new_with_range(1000, 10000, 100)\n sb_temp_low.set_tooltip_text(voc[\"night-light-night-tooltip\"])\n sb_temp_low.set_value(settings[\"night-temp-low\"])\n sb_temp_low.connect(\"value-changed\", set_int_from_spinbutton, settings, \"night-temp-low\")\n grid.attach(sb_temp_low, 1, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"longitude\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 4, 1, 1)\n\n sb_lon = Gtk.SpinButton.new_with_range(-180, 180, 0.1)\n sb_lon.set_tooltip_text(voc[\"longitude-tooltip\"])\n sb_lon.set_value(settings[\"night-long\"])\n sb_lon.connect(\"value-changed\", set_from_spinbutton, settings, \"night-long\", 4)\n sb_lon.set_digits(4)\n grid.attach(sb_lon, 3, 4, 1, 1)\n\n if (sb_lat.get_value() == -1.0 and sb_lon.get_value()) == -1.0 \\\n or (sb_lat.get_value() == 0.0 and sb_lon.get_value() == 0.0):\n update_lat_lon(None, sb_lat, sb_lon)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"temp-day\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 4, 1, 1)\n\n sb_temp_high = Gtk.SpinButton.new_with_range(1000, 10000, 100)\n sb_temp_high.set_tooltip_text(voc[\"night-light-day-tooltip\"])\n sb_temp_high.set_value(settings[\"night-temp-high\"])\n sb_temp_high.connect(\"value-changed\", set_int_from_spinbutton, settings, \"night-temp-high\")\n grid.attach(sb_temp_high, 1, 4, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"gamma\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 5, 1, 1)\n\n btn = Gtk.Button.new_with_label(voc[\"calculate-lat-long\"])\n btn.set_tooltip_text(voc[\"calculate-lat-long-tooltip\"])\n btn.connect(\"clicked\", update_lat_lon, sb_lat, sb_lon)\n grid.attach(btn, 3, 5, 1, 1)\n\n sb_gamma = Gtk.SpinButton.new_with_range(0.1, 10.0, 0.1)\n sb_gamma.set_value(settings[\"night-gamma\"])\n sb_gamma.connect(\"value-changed\", set_from_spinbutton, settings, \"night-gamma\", 1)\n sb_gamma.set_tooltip_text(voc[\"gamma-tooltip\"])\n grid.attach(sb_gamma, 1, 5, 1, 1)\n\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"help-window\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 0, 6, 1, 1)\n\n box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 0)\n grid.attach(box, 1, 6, 3, 1)\n\n cb_help_overlay = Gtk.CheckButton.new_with_label(voc[\"on-layer\"])\n cb_help_overlay.set_active(settings[\"help-layer-shell\"])\n cb_help_overlay.connect(\"toggled\", set_from_checkbutton, settings, \"help-layer-shell\")\n cb_help_overlay.set_tooltip_text(voc[\"overlay-tooltip\"])\n box.pack_start(cb_help_overlay, False, False, 0)\n\n cb_help_keyboard = Gtk.CheckButton.new_with_label(voc[\"keyboard\"])\n cb_help_keyboard.set_active(settings[\"help-keyboard\"])\n cb_help_keyboard.connect(\"toggled\", set_from_checkbutton, settings, \"help-keyboard\")\n cb_help_keyboard.set_tooltip_text(voc[\"keyboard-tooltip\"])\n box.pack_start(cb_help_keyboard, False, False, 0)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"font-size\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n box.pack_start(lbl, False, False, 6)\n\n sb_help_font_size = Gtk.SpinButton.new_with_range(6, 48, 1)\n sb_help_font_size.set_value(settings[\"help-font-size\"])\n sb_help_font_size.connect(\"value-changed\", set_int_from_spinbutton, settings, \"help-font-size\")\n box.pack_start(sb_help_font_size, True, True, 0)\n\n # The way to turn the update-indicator section in the form off, is not to provide the 'nwg-system-update' script.\n if is_command(\"nwg-system-update\"):\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"update-tray-icon\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 0, 7, 1, 1)\n\n box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 6)\n grid.attach(box, 1, 7, 3, 1)\n cb_update_indicator_on = Gtk.CheckButton.new_with_label(voc[\"on\"])\n cb_update_indicator_on.set_active(settings[\"update-indicator-on\"])\n cb_update_indicator_on.connect(\"toggled\", set_from_checkbutton, settings, \"update-indicator-on\")\n cb_update_indicator_on.set_tooltip_text(voc[\"update-tray-icon-tooltip\"])\n box.pack_start(cb_update_indicator_on, False, False, 0)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"interval\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n box.pack_start(lbl, False, False, 0)\n\n sb_update_indicator_interval = Gtk.SpinButton.new_with_range(1, 1440, 1)\n sb_update_indicator_interval.set_tooltip_text(voc[\"update-indicator-interval-tooltip\"])\n sb_update_indicator_interval.set_value(settings[\"update-indicator-interval\"])\n sb_update_indicator_interval.connect(\"value-changed\", set_int_from_spinbutton, settings,\n \"update-indicator-interval\")\n box.pack_start(sb_update_indicator_interval, False, False, 0)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"command\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n box.pack_start(lbl, False, False, 0)\n\n entry_update_cmd = Gtk.Entry()\n entry_update_cmd.set_placeholder_text(\"nwg-system-update\")\n entry_update_cmd.set_tooltip_text(voc[\"update-command-tooltip\"])\n entry_update_cmd.set_text(settings[\"update-command\"])\n entry_update_cmd.connect(\"changed\", validate_update_cmd, cb_update_indicator_on, sb_update_indicator_interval,\n settings)\n box.pack_start(entry_update_cmd, False, False, 0)\n else:\n settings[\"update-indicator-on\"] = False\n\n cb_cliphist = Gtk.CheckButton.new_with_label(voc[\"clipboard-history\"])\n cb_cliphist.set_active(settings[\"cliphist\"])\n cb_cliphist.connect(\"toggled\", set_from_checkbutton, settings, \"cliphist\")\n cb_cliphist.set_tooltip_text(voc[\"clipboard-history-tooltip\"])\n grid.attach(cb_cliphist, 0, 8, 1, 1)\n\n cb_nm_applet = Gtk.CheckButton.new_with_label(voc[\"nm-applet\"])\n cb_nm_applet.set_active(settings[\"appindicator\"])\n cb_nm_applet.connect(\"toggled\", set_from_checkbutton, settings, \"appindicator\")\n cb_nm_applet.set_tooltip_text(voc[\"nm-applet-tooltip\"])\n grid.attach(cb_nm_applet, 1, 8, 1, 1)\n\n cb_screenshot = Gtk.CheckButton.new_with_label(voc[\"screenshot-applet\"])\n cb_screenshot.set_active(settings[\"screenshot\"])\n cb_screenshot.connect(\"toggled\", set_from_checkbutton, settings, \"screenshot\")\n cb_screenshot.set_tooltip_text(voc[\"screenshot-applet-tooltip\"])\n grid.attach(cb_screenshot, 2, 8, 1, 1)\n\n frame.show_all()\n\n return frame, update_btn\n\n\ndef applications_tab(settings, voc, warn):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(voc[\"common\"], voc[\"applications\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"default-applications\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 0, 0, 2, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"terminal\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 1, 1, 1)\n\n entry_terminal = Gtk.Entry.new()\n entry_terminal.set_tooltip_text(voc[\"terminal-tooltip\"])\n entry_terminal.set_property(\"halign\", Gtk.Align.START)\n entry_terminal.connect(\"changed\", set_from_entry, settings, \"terminal\")\n if not settings[\"terminal\"]:\n for cmd in [\"foot\", \"alacritty\", \"kitty\", \"gnome-terminal\", \"sakura\", \"wterm\"]:\n if is_command(cmd):\n entry_terminal.set_text(cmd)\n break\n else:\n entry_terminal.set_text(settings[\"terminal\"])\n set_from_entry(entry_terminal, settings, \"terminal\")\n\n grid.attach(entry_terminal, 1, 1, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"file-manager\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 2, 1, 1)\n\n entry_fm = Gtk.Entry()\n entry_fm.set_tooltip_text(voc[\"file-manager-tooltip\"])\n entry_fm.set_property(\"halign\", Gtk.Align.START)\n\n entry_fm.connect(\"changed\", set_from_entry, settings, \"file-manager\")\n if not settings[\"file-manager\"]:\n for cmd in [\"thunar\", \"pcmanfm\", \"nautilus\", \"caja\"]:\n if is_command(cmd):\n entry_fm.set_text(cmd)\n break\n else:\n entry_fm.set_text(settings[\"file-manager\"])\n\n grid.attach(entry_fm, 1, 2, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"text-editor\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 3, 1, 1)\n\n entry_te = Gtk.Entry()\n entry_te.set_tooltip_text(voc[\"text-editor-tooltip\"])\n entry_te.set_property(\"halign\", Gtk.Align.START)\n\n entry_te.connect(\"changed\", set_from_entry, settings, \"editor\")\n if not settings[\"editor\"]:\n for cmd in [\"mousepad\", \"geany\", \"atom\", \"emacs\", \"gedit\"]:\n if is_command(cmd):\n entry_te.set_text(cmd)\n break\n else:\n entry_te.set_text(settings[\"editor\"])\n\n grid.attach(entry_te, 1, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"web-browser\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 4, 1, 1)\n\n entry_browser = Gtk.Entry.new()\n entry_browser.set_tooltip_text(voc[\"web-browser-tooltip\"])\n entry_browser.set_property(\"hexpand\", True)\n\n entry_browser.set_text(settings[\"browser\"])\n set_from_entry(entry_browser, settings, \"browser\")\n entry_browser.connect(\"changed\", set_from_entry, settings, \"browser\")\n grid.attach(entry_browser, 1, 4, 2, 1)\n\n combo = Gtk.ComboBoxText()\n combo.set_property(\"halign\", Gtk.Align.START)\n combo.set_tooltip_text(voc[\"web-browser-combo-tooltip\"])\n grid.attach(combo, 1, 5, 1, 1)\n browsers = get_browsers()\n for key in browsers:\n combo.append(key, key)\n if key in settings[\"browser\"]:\n combo.set_active_id(key)\n\n if entry_browser.get_text():\n for key in [\"chromium\", \"google-chrome-stable\", \"firefox\", \"qutebrowser\", \"epiphany\", \"microsoft-edge-stable\", \"surf\"]:\n if entry_browser.get_text() == key:\n combo.set_active_id(key)\n\n combo.connect(\"changed\", set_browser_from_combo, entry_browser, browsers)\n\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"powerbar-commands\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 0, 6, 2, 1)\n\n lbl = Gtk.Label.new(f'{voc[\"pb-lock-screen\"]}:')\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 7, 1, 1)\n\n entry_pb_lock = Gtk.Entry.new()\n entry_pb_lock.set_text(settings[\"pb-lock\"])\n entry_pb_lock.set_tooltip_text(voc[\"leave-blank-btn-tooltip\"])\n entry_pb_lock.set_property(\"halign\", Gtk.Align.START)\n set_from_entry(entry_pb_lock, settings, \"pb-lock\")\n entry_pb_lock.connect(\"changed\", set_from_entry, settings, \"pb-lock\")\n grid.attach(entry_pb_lock, 1, 7, 1, 1)\n\n lbl = Gtk.Label.new(f'{voc[\"exit\"]}:')\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 8, 1, 1)\n\n entry_pb_exit = Gtk.Entry.new()\n entry_pb_exit.set_text(settings[\"pb-exit\"])\n entry_pb_exit.set_tooltip_text(voc[\"leave-blank-btn-tooltip\"])\n entry_pb_exit.set_property(\"halign\", Gtk.Align.START)\n set_from_entry(entry_pb_exit, settings, \"pb-exit\")\n entry_pb_exit.connect(\"changed\", set_from_entry, settings, \"pb-exit\")\n grid.attach(entry_pb_exit, 1, 8, 1, 1)\n\n lbl = Gtk.Label.new(f'{voc[\"reboot\"]}:')\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 9, 1, 1)\n\n entry_pb_reboot = Gtk.Entry.new()\n entry_pb_reboot.set_text(settings[\"pb-reboot\"])\n entry_pb_reboot.set_tooltip_text(voc[\"leave-blank-btn-tooltip\"])\n entry_pb_reboot.set_property(\"halign\", Gtk.Align.START)\n set_from_entry(entry_pb_reboot, settings, \"pb-reboot\")\n entry_pb_reboot.connect(\"changed\", set_from_entry, settings, \"pb-reboot\")\n grid.attach(entry_pb_reboot, 1, 9, 1, 1)\n\n lbl = Gtk.Label.new(f'{voc[\"sleep\"]}:')\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 10, 1, 1)\n\n entry_pb_sleep = Gtk.Entry.new()\n entry_pb_sleep.set_text(settings[\"pb-sleep\"])\n entry_pb_sleep.set_tooltip_text(voc[\"leave-blank-btn-tooltip\"])\n entry_pb_sleep.set_property(\"halign\", Gtk.Align.START)\n set_from_entry(entry_pb_sleep, settings, \"pb-sleep\")\n entry_pb_sleep.connect(\"changed\", set_from_entry, settings, \"pb-sleep\")\n grid.attach(entry_pb_sleep, 1, 10, 1, 1)\n\n lbl = Gtk.Label.new(f'{voc[\"power-off\"]}:')\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 11, 1, 1)\n\n entry_pb_poweroff = Gtk.Entry.new()\n entry_pb_poweroff.set_text(settings[\"pb-poweroff\"])\n entry_pb_poweroff.set_property(\"halign\", Gtk.Align.START)\n set_from_entry(entry_pb_poweroff, settings, \"pb-poweroff\")\n entry_pb_poweroff.connect(\"changed\", set_from_entry, settings, \"pb-poweroff\")\n grid.attach(entry_pb_poweroff, 1, 11, 1, 1)\n\n if warn:\n lbl = Gtk.Label.new(\"If you see this warning on startup, some of the fields above\\n\"\n \"have not yet been saved. Adjust these settings to your needs,\\n\"\n \"and press the 'Apply' button, for your key bindings to work.\")\n lbl.set_property(\"halign\", Gtk.Align.CENTER)\n lbl.set_justify(Gtk.Justification.CENTER)\n lbl.set_property(\"margin-top\", 18)\n lbl.set_line_wrap(True)\n grid.attach(lbl, 0, 8, 3, 2)\n\n frame.show_all()\n\n return frame\n\n\ndef get_browsers():\n result = {}\n browsers = {\n \"brave\": \"brave --enable-features=UseOzonePlatform --ozone-platform=wayland\",\n \"chromium\": \"chromium --enable-features=UseOzonePlatform --ozone-platform=wayland\",\n \"google-chrome-stable\": \"google-chrome-stable --enable-features=UseOzonePlatform --ozone-platform=wayland\",\n \"epiphany\": \"epiphany\",\n \"falkon\": \"falkon\",\n \"firefox\": \"MOZ_ENABLE_WAYLAND=1 firefox\",\n \"konqueror\": \"konqueror\",\n \"microsoft-edge-stable\": \"microsoft-edge-stable --enable-features=UseOzonePlatform --ozone-platform=wayland\",\n \"midori\": \"midori\",\n \"opera\": \"opera\",\n \"qutebrowser\": \"qutebrowser\",\n \"seamonkey\": \"seamonkey\",\n \"surf\": \"surf\",\n \"vivaldi-stable\": \"vivaldi-stable --enable-features=UseOzonePlatform --ozone-platform=wayland\",\n }\n for key in browsers:\n if is_command(key):\n result[key] = browsers[key]\n\n return result\n\n\ndef backup_tab(config_home, data_home, backup_configs, backup_data, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(voc[\"common\"], voc[\"backup\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n lbl = Gtk.Label()\n lbl.set_property(\"halign\", Gtk.Align.START)\n lbl.set_markup(\"{}\".format(voc[\"backup-desc\"]))\n grid.attach(lbl, 0, 0, 3, 1)\n\n entry_backup = Gtk.Entry()\n entry_backup.set_width_chars(45)\n entry_backup.set_placeholder_text(voc[\"backup-path\"])\n time = datetime.now()\n entry_backup.set_text(\n os.path.join(\"{}\".format(os.getenv(\"HOME\")), time.strftime(\"nwg-shell-backup-%Y%m%d-%H%M%S\")))\n grid.attach(entry_backup, 0, 1, 2, 1)\n\n btn = Gtk.Button()\n btn.set_label(voc[\"create\"])\n btn.connect(\"clicked\", do_backup, config_home, data_home, backup_configs, backup_data, entry_backup, voc)\n grid.attach(btn, 2, 1, 1, 1)\n\n lbl = Gtk.Label()\n lbl.set_property(\"halign\", Gtk.Align.START)\n lbl.set_property(\"margin-top\", 12)\n lbl.set_markup(\"{}\".format(voc[\"backup-restore-desc\"]))\n grid.attach(lbl, 0, 2, 3, 1)\n\n restore_warning = Gtk.Label()\n restore_warning.set_markup('{}'.format(voc[\"backup-restore-warning\"]))\n grid.attach(restore_warning, 0, 4, 2, 1)\n\n restore_btn = Gtk.Button()\n\n fcb = Gtk.FileChooserButton.new(\"Select file\", Gtk.FileChooserAction.OPEN)\n fcb.set_current_folder(os.getenv(\"HOME\"))\n f_filter = Gtk.FileFilter()\n f_filter.set_name(\".tar.gz files\")\n f_filter.add_pattern(\"*.tar.gz\")\n fcb.add_filter(f_filter)\n fcb.connect(\"file-set\", unpack_to_tmp, restore_btn, restore_warning, voc)\n grid.attach(fcb, 0, 3, 3, 1)\n\n restore_btn.set_label(voc[\"backup-restore\"])\n restore_btn.connect(\"clicked\", restore_from_tmp, restore_warning, voc)\n grid.attach(restore_btn, 2, 4, 1, 1)\n\n frame.show_all()\n restore_btn.hide()\n restore_warning.hide()\n\n return frame\n\n\ndef autotiling_tab(settings, outputs, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(voc[\"common\"], voc[\"autotiling\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n cb_autotiling_use_settings = Gtk.CheckButton.new_with_label(voc[\"use-these-settings\"])\n cb_autotiling_use_settings.set_property(\"halign\", Gtk.Align.START)\n cb_autotiling_use_settings.set_property(\"margin-bottom\", 6)\n cb_autotiling_use_settings.set_tooltip_text(voc[\"autotiling-tooltip\"])\n cb_autotiling_use_settings.set_active(settings[\"autotiling-on\"])\n cb_autotiling_use_settings.connect(\"toggled\", set_from_checkbutton, settings, \"autotiling-on\")\n grid.attach(cb_autotiling_use_settings, 0, 0, 2, 1)\n\n lbl = Gtk.Label()\n lbl.set_markup(\n '{}'.format(\n voc[\"more-info\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 3, 0, 2, 1)\n\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"workspaces\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 0, 1, 1, 1)\n\n entry = Gtk.Entry()\n entry.set_placeholder_text(\"1 2 3 4 5 6 7 8\")\n entry.set_property(\"halign\", Gtk.Align.START)\n entry.set_text(settings[\"autotiling-workspaces\"])\n entry.set_tooltip_text(voc[\"workspaces-tooltip\"])\n entry.set_property(\"margin-bottom\", 6)\n entry.connect(\"changed\", set_from_workspaces, settings)\n grid.attach(entry, 1, 1, 2, 1)\n\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"workspaces\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 0, 1, 1, 1)\n\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"per-output\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 0, 2, 1, 1)\n\n lbl = Gtk.Label.new(voc[\"autotiling-depth-limit\"])\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 1, 2, 1, 1)\n\n lbl = Gtk.Label.new(voc[\"autotiling-split-width\"])\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 2, 2, 1, 1)\n\n lbl = Gtk.Label.new(voc[\"autotiling-split-height\"])\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 3, 2, 1, 1)\n\n l_spin_boxes = []\n w_spin_boxes = []\n h_spin_boxes = []\n i = 0\n for i in range(len(outputs)):\n o_name = outputs[i]\n lbl = Gtk.Label.new(\"{}:\".format(o_name))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 3 + i, 1, 1)\n\n limit = settings[\"autotiling-output-limits\"][o_name] if o_name in settings[\"autotiling-output-limits\"] else 0\n sb = Gtk.SpinButton.new_with_range(0, 256, 1)\n sb.set_property(\"halign\", Gtk.Align.START)\n sb.set_value(limit)\n sb.set_tooltip_text(voc[\"autotiling-depth-limit-tooltip\"])\n sb.connect(\"value-changed\", set_limit_per_output, settings, o_name)\n l_spin_boxes.append(sb)\n grid.attach(sb, 1, 3 + i, 1, 1)\n\n split_width = settings[\"autotiling-output-splitwidths\"][o_name] if o_name in settings[\n \"autotiling-output-splitwidths\"] else 1.0\n sb = Gtk.SpinButton.new_with_range(0.2, 1.9, 0.01)\n sb.set_property(\"halign\", Gtk.Align.START)\n sb.set_value(split_width)\n sb.set_tooltip_text(voc[\"autotiling-split-tooltip\"])\n sb.connect(\"value-changed\", set_split_per_output, settings, \"autotiling-output-splitwidths\", o_name)\n w_spin_boxes.append(sb)\n grid.attach(sb, 2, 3 + i, 1, 1)\n\n split_height = settings[\"autotiling-output-splitheights\"][o_name] if o_name in settings[\n \"autotiling-output-splitheights\"] else 1.0\n sb = Gtk.SpinButton.new_with_range(0.2, 1.9, 0.01)\n sb.set_property(\"halign\", Gtk.Align.START)\n sb.set_value(split_height)\n sb.set_tooltip_text(voc[\"autotiling-split-tooltip\"])\n sb.connect(\"value-changed\", set_split_per_output, settings, \"autotiling-output-splitheights\", o_name)\n h_spin_boxes.append(sb)\n grid.attach(sb, 3, 3 + i, 1, 1)\n\n btn = Gtk.Button()\n btn.set_label(voc[\"restore-defaults\"])\n btn.connect(\"clicked\", reset_autotiling, l_spin_boxes, w_spin_boxes, h_spin_boxes)\n grid.attach(btn, 1, 3 + i + 1, 3, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef keyboard_tab(settings, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(voc[\"common\"], voc[\"keyboard\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n cb_keyboard_use_settings = Gtk.CheckButton.new_with_label(voc[\"use-these-settings\"])\n cb_keyboard_use_settings.set_property(\"halign\", Gtk.Align.START)\n cb_keyboard_use_settings.set_property(\"margin-bottom\", 6)\n cb_keyboard_use_settings.set_tooltip_text(voc[\"keyboard-include-tooltip\"])\n cb_keyboard_use_settings.set_active(settings[\"keyboard-use-settings\"])\n cb_keyboard_use_settings.connect(\"toggled\", set_from_checkbutton, settings, \"keyboard-use-settings\")\n grid.attach(cb_keyboard_use_settings, 0, 0, 2, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"device\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 1, 1, 1)\n\n combo_device = Gtk.ComboBoxText()\n combo_device.set_property(\"halign\", Gtk.Align.START)\n combo_device.set_tooltip_text(voc[\"device-tooltip\"])\n combo_device.append(\"\", voc[\"all-of-type\"])\n keyboards = list_inputs_by_type(input_type=\"keyboard\")\n for item in keyboards:\n combo_device.append(item, item)\n combo_device.set_active_id(settings[\"keyboard-identifier\"])\n combo_device.connect(\"changed\", set_dict_key_from_combo, settings, \"keyboard-identifier\")\n grid.attach(combo_device, 1, 1, 2, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"keyboard-layout\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 2, 1, 1)\n\n entry_layout = Gtk.Entry()\n entry_layout.set_tooltip_text(voc[\"keyboard-layout-tooltip\"])\n entry_layout.set_text(settings[\"keyboard-xkb-layout\"])\n entry_layout.connect(\"changed\", set_from_entry, settings, \"keyboard-xkb-layout\")\n grid.attach(entry_layout, 1, 2, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"keyboard-variant\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 3, 1, 1)\n\n entry_variant = Gtk.Entry()\n entry_variant.set_tooltip_text(voc[\"keyboard-variant-tooltip\"])\n entry_variant.set_text(settings[\"keyboard-xkb-variant\"])\n entry_variant.connect(\"changed\", set_from_entry, settings, \"keyboard-xkb-variant\")\n grid.attach(entry_variant, 1, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"keyboard-repeat-delay\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 4, 1, 1)\n\n sb_repeat_delay = Gtk.SpinButton.new_with_range(1, 6000, 1)\n sb_repeat_delay.set_value(settings[\"keyboard-repeat-delay\"])\n sb_repeat_delay.connect(\"value-changed\", set_int_from_spinbutton, settings, \"keyboard-repeat-delay\")\n sb_repeat_delay.set_tooltip_text(voc[\"keyboard-repeat-delay-tooltip\"])\n grid.attach(sb_repeat_delay, 1, 4, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"keyboard-repeat-rate\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 5, 1, 1)\n\n sb_repeat_rate = Gtk.SpinButton.new_with_range(1, 4000, 1)\n sb_repeat_rate.set_value(settings[\"keyboard-repeat-rate\"])\n sb_repeat_rate.connect(\"value-changed\", set_int_from_spinbutton, settings, \"keyboard-repeat-rate\")\n sb_repeat_rate.set_tooltip_text(voc[\"keyboard-repeat-rate-tooltip\"])\n grid.attach(sb_repeat_rate, 1, 5, 1, 1)\n\n lbl = Gtk.Label.new(\"CapsLock:\")\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 6, 1, 1)\n\n combo_caps = Gtk.ComboBoxText()\n combo_caps.set_property(\"halign\", Gtk.Align.START)\n combo_caps.set_tooltip_text(voc[\"capslock-tooltip\"])\n for item in [\"disabled\", \"enabled\"]:\n combo_caps.append(item, voc[item])\n combo_caps.set_active_id(settings[\"keyboard-xkb-capslock\"])\n combo_caps.connect(\"changed\", set_dict_key_from_combo, settings, \"keyboard-xkb-capslock\")\n grid.attach(combo_caps, 1, 6, 1, 1)\n\n lbl = Gtk.Label.new(\"NumLock:\")\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 7, 1, 1)\n\n combo_num = Gtk.ComboBoxText()\n combo_num.set_property(\"halign\", Gtk.Align.START)\n combo_num.set_tooltip_text(voc[\"numlock-tooltip\"])\n for item in [\"disabled\", \"enabled\"]:\n combo_num.append(item, voc[item])\n combo_num.set_active_id(settings[\"keyboard-xkb-numlock\"])\n combo_num.connect(\"changed\", set_dict_key_from_combo, settings, \"keyboard-xkb-numlock\")\n grid.attach(combo_num, 1, 7, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"custom-field\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 8, 1, 1)\n\n entry_cname = Gtk.Entry()\n entry_cname.set_tooltip_text(voc[\"custom-field-name-tooltip\"])\n entry_cname.set_placeholder_text(voc[\"name\"])\n entry_cname.set_text(settings[\"keyboard-custom-name\"])\n entry_cname.connect(\"changed\", set_from_entry, settings, \"keyboard-custom-name\")\n grid.attach(entry_cname, 1, 8, 1, 1)\n\n entry_cname = Gtk.Entry()\n entry_cname.set_tooltip_text(voc[\"custom-field-value-tooltip\"])\n entry_cname.set_placeholder_text(voc[\"value\"])\n entry_cname.set_text(settings[\"keyboard-custom-value\"])\n entry_cname.connect(\"changed\", set_from_entry, settings, \"keyboard-custom-value\")\n grid.attach(entry_cname, 2, 8, 1, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef h_general_tab(settings, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(voc[\"common\"], voc[\"general-settings\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n cb_gen_use_settings = Gtk.CheckButton.new_with_label(voc[\"use-these-settings\"])\n cb_gen_use_settings.set_property(\"halign\", Gtk.Align.START)\n cb_gen_use_settings.set_property(\"margin-bottom\", 6)\n cb_gen_use_settings.set_tooltip_text(voc[\"hyprland-include-tooltip\"])\n cb_gen_use_settings.set_active(settings[\"gen-use-settings\"])\n cb_gen_use_settings.connect(\"toggled\", set_from_checkbutton, settings, \"gen-use-settings\")\n grid.attach(cb_gen_use_settings, 0, 0, 2, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"layout\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 1, 1, 1)\n\n combo_layout = Gtk.ComboBoxText()\n for item in [\"dwindle\", \"master\"]:\n combo_layout.append(item, item)\n combo_layout.set_active_id(settings[\"gen-layout\"])\n combo_layout.connect(\"changed\", set_dict_key_from_combo, settings, \"gen-layout\")\n grid.attach(combo_layout, 1, 1, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"window-border-size\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 2, 1, 1)\n\n sb_border_size = Gtk.SpinButton.new_with_range(0, 256, 1)\n sb_border_size.set_property(\"halign\", Gtk.Align.START)\n sb_border_size.set_value(settings[\"gen-border_size\"])\n sb_border_size.connect(\"value-changed\", set_int_from_spinbutton, settings, \"gen-border_size\")\n grid.attach(sb_border_size, 1, 2, 1, 1)\n\n cb_no_boarder = Gtk.CheckButton.new_with_label(voc[\"no-border-on-floating\"])\n cb_no_boarder.set_property(\"halign\", Gtk.Align.START)\n cb_no_boarder.set_property(\"margin-bottom\", 6)\n cb_no_boarder.set_active(settings[\"gen-no_border_on_floating\"])\n cb_no_boarder.connect(\"toggled\", set_from_checkbutton, settings, \"gen-no_border_on_floating\")\n grid.attach(cb_no_boarder, 2, 2, 2, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"gaps-in\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 3, 1, 1)\n\n sb_gaps_in = Gtk.SpinButton.new_with_range(0, 256, 1)\n sb_gaps_in.set_property(\"halign\", Gtk.Align.START)\n sb_gaps_in.set_value(settings[\"gen-gaps_in\"])\n sb_gaps_in.connect(\"value-changed\", set_int_from_spinbutton, settings, \"gen-gaps_in\")\n grid.attach(sb_gaps_in, 1, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"gaps-out\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 3, 1, 1)\n\n sb_gaps_out = Gtk.SpinButton.new_with_range(0, 256, 1)\n sb_gaps_out.set_property(\"halign\", Gtk.Align.START)\n sb_gaps_out.set_value(settings[\"gen-gaps_out\"])\n sb_gaps_out.connect(\"value-changed\", set_int_from_spinbutton, settings, \"gen-gaps_out\")\n grid.attach(sb_gaps_out, 3, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"color-active-border-start\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 4, 1, 1)\n\n entry_cab_start = Gtk.Entry()\n entry_cab_start.set_width_chars(8)\n entry_cab_start.set_tooltip_text(voc[\"color-border-start-tooltip\"])\n entry_cab_start.set_text(settings[\"gen-col-active_border-start\"])\n entry_cab_start.connect(\"changed\", set_from_entry, settings, \"gen-col-active_border-start\")\n grid.attach(entry_cab_start, 1, 4, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"end\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 4, 1, 1)\n\n entry_cab_end = Gtk.Entry()\n entry_cab_end.set_width_chars(8)\n entry_cab_end.set_tooltip_text(voc[\"color-border-end-tooltip\"])\n entry_cab_end.set_text(settings[\"gen-col-active_border-end\"])\n entry_cab_end.connect(\"changed\", set_from_entry, settings, \"gen-col-active_border-end\")\n grid.attach(entry_cab_end, 3, 4, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"angle\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 4, 4, 1, 1)\n\n sb_cab_angle = Gtk.SpinButton.new_with_range(0, 365, 1)\n sb_cab_angle.set_property(\"halign\", Gtk.Align.START)\n sb_cab_angle.set_tooltip_text(voc[\"color-gradient-angle\"])\n sb_cab_angle.set_value(settings[\"gen-col-active_border-deg\"])\n sb_cab_angle.connect(\"value-changed\", set_int_from_spinbutton, settings, \"gen-col-active_border-deg\")\n grid.attach(sb_cab_angle, 5, 4, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"color-inactive-border-start\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 5, 1, 1)\n\n entry_cib_start = Gtk.Entry()\n entry_cib_start.set_width_chars(8)\n entry_cib_start.set_tooltip_text(voc[\"color-border-start-tooltip\"])\n entry_cib_start.set_text(settings[\"gen-col-inactive_border-start\"])\n entry_cib_start.connect(\"changed\", set_from_entry, settings, \"gen-col-inactive_border-start\")\n grid.attach(entry_cib_start, 1, 5, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"end\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 5, 1, 1)\n\n entry_cib_end = Gtk.Entry()\n entry_cib_end.set_width_chars(8)\n entry_cib_end.set_tooltip_text(voc[\"color-border-end-tooltip\"])\n entry_cib_end.set_text(settings[\"gen-col-inactive_border-end\"])\n entry_cib_end.connect(\"changed\", set_from_entry, settings, \"gen-col-inactive_border-end\")\n grid.attach(entry_cib_end, 3, 5, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"angle\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 4, 5, 1, 1)\n\n sb_cib_angle = Gtk.SpinButton.new_with_range(0, 365, 1)\n sb_cib_angle.set_property(\"halign\", Gtk.Align.START)\n sb_cib_angle.set_tooltip_text(voc[\"color-gradient-angle\"])\n sb_cib_angle.set_value(settings[\"gen-col-inactive_border-deg\"])\n sb_cib_angle.connect(\"value-changed\", set_int_from_spinbutton, settings, \"gen-col-inactive_border-deg\")\n grid.attach(sb_cib_angle, 5, 5, 1, 1)\n\n cb_no_cursor_warps = Gtk.CheckButton.new_with_label(voc[\"no-cursor-warps\"])\n cb_no_cursor_warps.set_tooltip_text(voc[\"no-cursor-warps-tooltip\"])\n cb_no_cursor_warps.set_property(\"halign\", Gtk.Align.START)\n cb_no_cursor_warps.set_active(settings[\"gen-no_cursor_warps\"])\n cb_no_cursor_warps.connect(\"toggled\", set_from_checkbutton, settings, \"gen-no_cursor_warps\")\n grid.attach(cb_no_cursor_warps, 0, 6, 1, 1)\n\n cb_no_focus_fallback = Gtk.CheckButton.new_with_label(voc[\"no-focus-fallback\"])\n cb_no_focus_fallback.set_tooltip_text(voc[\"no-focus-fallback-tooltip\"])\n cb_no_focus_fallback.set_property(\"halign\", Gtk.Align.START)\n cb_no_focus_fallback.set_active(settings[\"gen-no_focus_fallback\"])\n cb_no_focus_fallback.connect(\"toggled\", set_from_checkbutton, settings, \"gen-no_focus_fallback\")\n grid.attach(cb_no_focus_fallback, 1, 6, 1, 1)\n\n cb_resize_on_border = Gtk.CheckButton.new_with_label(voc[\"resize-on-border\"])\n cb_resize_on_border.set_tooltip_text(voc[\"resize-on-border\"])\n cb_resize_on_border.set_property(\"halign\", Gtk.Align.START)\n cb_resize_on_border.set_active(settings[\"gen-resize_on_border\"])\n cb_resize_on_border.connect(\"toggled\", set_from_checkbutton, settings, \"gen-resize_on_border\")\n grid.attach(cb_resize_on_border, 0, 7, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"extend-border-grab-area\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 1, 7, 2, 1)\n\n sb_extend_grab_area = Gtk.SpinButton.new_with_range(0, 365, 1)\n sb_extend_grab_area.set_property(\"halign\", Gtk.Align.START)\n sb_extend_grab_area.set_tooltip_text(voc[\"extend-border-grab-area-tooltip\"])\n sb_extend_grab_area.set_value(settings[\"gen-extend_border_grab_area\"])\n sb_extend_grab_area.connect(\"value-changed\", set_int_from_spinbutton, settings, \"gen-extend_border_grab_area\")\n grid.attach(sb_extend_grab_area, 3, 7, 1, 1)\n\n cb_hover = Gtk.CheckButton.new_with_label(voc[\"hover-icon-on-border\"])\n cb_hover.set_tooltip_text(voc[\"hover-icon-on-border-tooltip\"])\n cb_hover.set_property(\"halign\", Gtk.Align.START)\n cb_hover.set_active(settings[\"gen-hover_icon_on_border\"])\n cb_hover.connect(\"toggled\", set_from_checkbutton, settings, \"gen-hover_icon_on_border\")\n grid.attach(cb_hover, 0, 8, 1, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef h_dwindle_tab(settings, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(voc[\"common\"], voc[\"dwindle-layout\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n cb_use_settings = Gtk.CheckButton.new_with_label(voc[\"use-these-settings\"])\n cb_use_settings.set_property(\"halign\", Gtk.Align.START)\n cb_use_settings.set_property(\"margin-bottom\", 6)\n cb_use_settings.set_tooltip_text(voc[\"hyprland-include-tooltip\"])\n cb_use_settings.set_active(settings[\"dwindle-use-settings\"])\n cb_use_settings.connect(\"toggled\", set_from_checkbutton, settings, \"dwindle-use-settings\")\n grid.attach(cb_use_settings, 0, 0, 2, 1)\n\n cb_pseudotiling = Gtk.CheckButton.new_with_label(voc[\"pseudotiling\"])\n cb_pseudotiling.set_property(\"halign\", Gtk.Align.START)\n cb_pseudotiling.set_tooltip_text(voc[\"pseudotiling-tooltip\"])\n cb_pseudotiling.set_active(settings[\"dwindle-pseudotile\"])\n cb_pseudotiling.connect(\"toggled\", set_from_checkbutton, settings, \"dwindle-pseudotile\")\n grid.attach(cb_pseudotiling, 0, 1, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"force-split\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 1, 1, 1)\n\n combo_force_split = Gtk.ComboBoxText()\n combo_force_split.set_tooltip_text(voc[\"force-split-tooltip\"])\n d = {\"0\": voc[\"force-split-0\"], \"1\": voc[\"force-split-1\"], \"2\": voc[\"force-split-2\"]}\n for item in [\"0\", \"1\", \"2\"]:\n combo_force_split.append(item, d[item])\n combo_force_split.set_active_id(str(settings[\"dwindle-force_split\"]))\n combo_force_split.connect(\"changed\", set_int_dict_key_from_combo, settings, \"dwindle-force_split\")\n grid.attach(combo_force_split, 3, 1, 1, 1)\n\n cb_preserve_split = Gtk.CheckButton.new_with_label(voc[\"preserve-split\"])\n cb_preserve_split.set_property(\"halign\", Gtk.Align.START)\n cb_preserve_split.set_tooltip_text(voc[\"preserve-split-tooltip\"])\n cb_preserve_split.set_active(settings[\"dwindle-preserve_split\"])\n cb_preserve_split.connect(\"toggled\", set_from_checkbutton, settings, \"dwindle-preserve_split\")\n grid.attach(cb_preserve_split, 1, 1, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"special-scale-factor\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 2, 1, 1)\n\n sb_ssf = Gtk.SpinButton.new_with_range(0.0, 1.0, 0.1)\n sb_ssf.set_value(settings[\"dwindle-special_scale_factor\"])\n sb_ssf.set_tooltip_text(voc[\"special-scale-factor-tooltip\"])\n sb_ssf.connect(\"value-changed\", set_from_spinbutton, settings, \"dwindle-special_scale_factor\", 2)\n grid.attach(sb_ssf, 1, 2, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"split-width-multiplier\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 2, 1, 1)\n\n sb_swm = Gtk.SpinButton.new_with_range(0.1, 2.0, 0.1)\n sb_swm.set_value(settings[\"dwindle-split_width_multiplier\"])\n sb_swm.set_tooltip_text(voc[\"split-width-multiplier-tooltip\"])\n sb_swm.connect(\"value-changed\", set_from_spinbutton, settings, \"dwindle-split_width_multiplier\", 2)\n grid.attach(sb_swm, 3, 2, 1, 1)\n\n cb_ngwo = Gtk.CheckButton.new_with_label(voc[\"no-gaps-when-only\"])\n cb_ngwo.set_property(\"halign\", Gtk.Align.START)\n cb_ngwo.set_tooltip_text(voc[\"no-gaps-when-only-tooltip\"])\n cb_ngwo.set_active(settings[\"dwindle-no_gaps_when_only\"])\n cb_ngwo.connect(\"toggled\", set_from_checkbutton, settings, \"dwindle-no_gaps_when_only\")\n grid.attach(cb_ngwo, 0, 3, 2, 1)\n\n cb_uafs = Gtk.CheckButton.new_with_label(voc[\"use-active-for-splits\"])\n cb_uafs.set_property(\"halign\", Gtk.Align.START)\n cb_uafs.set_tooltip_text(voc[\"use-active-for-splits-tooltip\"])\n cb_uafs.set_active(settings[\"dwindle-use_active_for_splits\"])\n cb_uafs.connect(\"toggled\", set_from_checkbutton, settings, \"dwindle-use_active_for_splits\")\n grid.attach(cb_uafs, 2, 3, 2, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"default-split-ratio\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 4, 1, 1)\n\n sb_dsr = Gtk.SpinButton.new_with_range(0.1, 1.9, 0.01)\n sb_dsr.set_value(settings[\"dwindle-default_split_ratio\"])\n sb_dsr.set_tooltip_text(voc[\"default-split-ratio-tooltip\"])\n sb_dsr.connect(\"value-changed\", set_from_spinbutton, settings, \"dwindle-default_split_ratio\", 3)\n grid.attach(sb_dsr, 1, 4, 1, 1)\n\n cb_smart_split = Gtk.CheckButton.new_with_label(voc[\"smart-split\"])\n cb_smart_split.set_property(\"halign\", Gtk.Align.START)\n cb_smart_split.set_tooltip_text(voc[\"smart-split-tooltip\"])\n cb_smart_split.set_active(settings[\"dwindle-smart_split\"])\n cb_smart_split.connect(\"toggled\", set_from_checkbutton, settings, \"dwindle-smart_split\")\n grid.attach(cb_smart_split, 2, 4, 1, 1)\n\n cb_smart_resizing = Gtk.CheckButton.new_with_label(voc[\"smart-resizing\"])\n cb_smart_resizing.set_property(\"halign\", Gtk.Align.START)\n cb_smart_resizing.set_tooltip_text(voc[\"smart-resizing-tooltip\"])\n cb_smart_resizing.set_active(settings[\"dwindle-smart_resizing\"])\n cb_smart_resizing.connect(\"toggled\", set_from_checkbutton, settings, \"dwindle-smart_resizing\")\n grid.attach(cb_smart_resizing, 3, 4, 1, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef h_master_tab(settings, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(voc[\"common\"], voc[\"master-layout\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n cb_use_settings = Gtk.CheckButton.new_with_label(voc[\"use-these-settings\"])\n cb_use_settings.set_property(\"halign\", Gtk.Align.START)\n cb_use_settings.set_property(\"margin-bottom\", 6)\n cb_use_settings.set_tooltip_text(voc[\"hyprland-include-tooltip\"])\n cb_use_settings.set_active(settings[\"master-use-settings\"])\n cb_use_settings.connect(\"toggled\", set_from_checkbutton, settings, \"master-use-settings\")\n grid.attach(cb_use_settings, 0, 0, 2, 1)\n\n cb_assp = Gtk.CheckButton.new_with_label(voc[\"allow-small-split\"])\n cb_assp.set_property(\"halign\", Gtk.Align.START)\n cb_assp.set_tooltip_text(voc[\"allow-small-split-tooltip\"])\n cb_assp.set_active(settings[\"master-allow_small_split\"])\n cb_assp.connect(\"toggled\", set_from_checkbutton, settings, \"master-allow_small_split\")\n grid.attach(cb_assp, 0, 1, 1, 1)\n\n cb_nis = Gtk.CheckButton.new_with_label(voc[\"new-is-master\"])\n cb_nis.set_property(\"halign\", Gtk.Align.START)\n cb_nis.set_tooltip_text(voc[\"new-is-master-tooltip\"])\n cb_nis.set_active(settings[\"master-new_is_master\"])\n cb_nis.connect(\"toggled\", set_from_checkbutton, settings, \"master-new_is_master\")\n grid.attach(cb_nis, 1, 1, 1, 1)\n\n cb_not = Gtk.CheckButton.new_with_label(voc[\"new-on-top\"])\n cb_not.set_property(\"halign\", Gtk.Align.START)\n cb_not.set_tooltip_text(voc[\"new-on-top-tooltip\"])\n cb_not.set_active(settings[\"master-new_on_top\"])\n cb_not.connect(\"toggled\", set_from_checkbutton, settings, \"master-new_on_top\")\n grid.attach(cb_not, 2, 1, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"master-split-factor\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 2, 1, 1)\n\n sb_msf = Gtk.SpinButton.new_with_range(0.0, 1.0, 0.01)\n sb_msf.set_value(settings[\"master-mfact\"])\n sb_msf.set_tooltip_text(voc[\"master-split-factor-tooltip\"])\n sb_msf.connect(\"value-changed\", set_from_spinbutton, settings, \"master-mfact\", 3)\n grid.attach(sb_msf, 1, 2, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"special-scale-factor\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 2, 1, 1)\n\n sb_ssf = Gtk.SpinButton.new_with_range(0.0, 1.0, 0.01)\n sb_ssf.set_value(settings[\"master-special_scale_factor\"])\n sb_ssf.set_tooltip_text(voc[\"special-scale-factor-tooltip\"])\n sb_ssf.connect(\"value-changed\", set_from_spinbutton, settings, \"master-special_scale_factor\", 3)\n grid.attach(sb_ssf, 3, 2, 1, 1)\n\n cb_ngwo = Gtk.CheckButton.new_with_label(voc[\"no-gaps-when-only\"])\n cb_ngwo.set_property(\"halign\", Gtk.Align.START)\n cb_ngwo.set_tooltip_text(voc[\"no-gaps-when-only-tooltip\"])\n cb_ngwo.set_active(settings[\"master-no_gaps_when_only\"])\n cb_ngwo.connect(\"toggled\", set_from_checkbutton, settings, \"master-no_gaps_when_only\")\n grid.attach(cb_ngwo, 0, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"master-orientation\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 3, 1, 1)\n\n combo_orientation = Gtk.ComboBoxText()\n combo_orientation.set_property(\"halign\", Gtk.Align.START)\n combo_orientation.set_tooltip_text(voc[\"master-orientation-tooltip\"])\n d = {\"left\": voc[\"left\"], \"right\": voc[\"right\"], \"top\": voc[\"top\"], \"bottom\": voc[\"bottom\"],\n \"center\": voc[\"center\"]}\n for item in [\"left\", \"right\", \"top\", \"bottom\", \"center\"]:\n combo_orientation.append(item, d[item])\n combo_orientation.set_active_id(settings[\"master-orientation\"])\n combo_orientation.connect(\"changed\", set_dict_key_from_combo, settings, \"master-orientation\")\n grid.attach(combo_orientation, 3, 3, 1, 1)\n\n cb_inherit = Gtk.CheckButton.new_with_label(voc[\"inherit-fullscreen\"])\n cb_inherit.set_property(\"halign\", Gtk.Align.START)\n cb_inherit.set_tooltip_text(voc[\"inherit-fullscreen-tooltip\"])\n cb_inherit.set_active(settings[\"master-inherit_fullscreen\"])\n cb_inherit.connect(\"toggled\", set_from_checkbutton, settings, \"master-inherit_fullscreen\")\n grid.attach(cb_inherit, 0, 4, 1, 1)\n\n cb_acenter = Gtk.CheckButton.new_with_label(voc[\"always-center-master\"])\n cb_acenter.set_property(\"halign\", Gtk.Align.START)\n cb_acenter.set_tooltip_text(voc[\"always-center-master-tooltip\"])\n cb_acenter.set_active(settings[\"master-always_center_master\"])\n cb_acenter.connect(\"toggled\", set_from_checkbutton, settings, \"master-always_center_master\")\n grid.attach(cb_acenter, 1, 4, 2, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef h_misc_tab(settings, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(voc[\"common\"], voc[\"miscellaneous\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n cb_use_settings = Gtk.CheckButton.new_with_label(voc[\"use-these-settings\"])\n cb_use_settings.set_property(\"halign\", Gtk.Align.START)\n cb_use_settings.set_property(\"margin-bottom\", 6)\n cb_use_settings.set_tooltip_text(voc[\"hyprland-include-tooltip\"])\n cb_use_settings.set_active(settings[\"misc-use-settings\"])\n cb_use_settings.connect(\"toggled\", set_from_checkbutton, settings, \"misc-use-settings\")\n grid.attach(cb_use_settings, 0, 0, 2, 1)\n\n cb_logo = Gtk.CheckButton.new_with_label(voc[\"disable-hyprland-logo\"])\n cb_logo.set_property(\"halign\", Gtk.Align.START)\n cb_logo.set_tooltip_text(voc[\"disable-hyprland-logo-tooltip\"])\n cb_logo.set_active(settings[\"misc-disable_hyprland_logo\"])\n cb_logo.connect(\"toggled\", set_from_checkbutton, settings, \"misc-disable_hyprland_logo\")\n grid.attach(cb_logo, 0, 1, 1, 1)\n\n cb_splash = Gtk.CheckButton.new_with_label(voc[\"disable-splash-rendering\"])\n cb_splash.set_property(\"halign\", Gtk.Align.START)\n cb_splash.set_active(settings[\"misc-disable_splash_rendering\"])\n cb_splash.connect(\"toggled\", set_from_checkbutton, settings, \"misc-disable_splash_rendering\")\n grid.attach(cb_splash, 1, 1, 1, 1)\n\n cb_dpms_mouse = Gtk.CheckButton.new_with_label(voc[\"mouse-move-enables-dpms\"])\n cb_dpms_mouse.set_property(\"halign\", Gtk.Align.START)\n cb_dpms_mouse.set_tooltip_text(voc[\"mouse-move-enables-dpms-tooltip\"])\n cb_dpms_mouse.set_active(settings[\"misc-mouse_move_enables_dpms\"])\n cb_dpms_mouse.connect(\"toggled\", set_from_checkbutton, settings, \"misc-mouse_move_enables_dpms\")\n grid.attach(cb_dpms_mouse, 0, 2, 1, 1)\n\n cb_dpms_key = Gtk.CheckButton.new_with_label(voc[\"key-press-enables-dpms\"])\n cb_dpms_key.set_property(\"halign\", Gtk.Align.START)\n cb_dpms_key.set_tooltip_text(voc[\"key-press-enables-dpms-tooltip\"])\n cb_dpms_key.set_active(settings[\"misc-key_press_enables_dpms\"])\n cb_dpms_key.connect(\"toggled\", set_from_checkbutton, settings, \"misc-key_press_enables_dpms\")\n grid.attach(cb_dpms_key, 1, 2, 2, 1)\n\n cb_layers_hog = Gtk.CheckButton.new_with_label(voc[\"layers-hog-keyboard-focus\"])\n cb_layers_hog.set_property(\"halign\", Gtk.Align.START)\n cb_layers_hog.set_tooltip_text(voc[\"layers-hog-keyboard-focus-tooltip\"])\n cb_layers_hog.set_active(settings[\"misc-layers_hog_keyboard_focus\"])\n cb_layers_hog.connect(\"toggled\", set_from_checkbutton, settings, \"misc-layers_hog_keyboard_focus\")\n grid.attach(cb_layers_hog, 0, 3, 1, 1)\n\n cb_focus_on_activate = Gtk.CheckButton.new_with_label(voc[\"focus-on-activate\"])\n cb_focus_on_activate.set_property(\"halign\", Gtk.Align.START)\n cb_focus_on_activate.set_tooltip_text(voc[\"focus-on-activate-tooltip\"])\n cb_focus_on_activate.set_active(settings[\"misc-focus_on_activate\"])\n cb_focus_on_activate.connect(\"toggled\", set_from_checkbutton, settings, \"misc-focus_on_activate\")\n grid.attach(cb_focus_on_activate, 1, 3, 2, 1)\n\n cb_hide_cursor = Gtk.CheckButton.new_with_label(voc[\"hide-cursor-on-touch\"])\n cb_hide_cursor.set_property(\"halign\", Gtk.Align.START)\n cb_hide_cursor.set_tooltip_text(voc[\"hide-cursor-on-touch-tooltip\"])\n cb_hide_cursor.set_active(settings[\"misc-hide_cursor_on_touch\"])\n cb_hide_cursor.connect(\"toggled\", set_from_checkbutton, settings, \"misc-hide_cursor_on_touch\")\n grid.attach(cb_hide_cursor, 0, 4, 1, 1)\n\n cb_mouse_mon = Gtk.CheckButton.new_with_label(voc[\"mouse-move-focuses-monitor\"])\n cb_mouse_mon.set_property(\"halign\", Gtk.Align.START)\n cb_mouse_mon.set_tooltip_text(voc[\"mouse-move-focuses-monitor-tooltip\"])\n cb_mouse_mon.set_active(settings[\"misc-mouse_move_focuses_monitor\"])\n cb_mouse_mon.connect(\"toggled\", set_from_checkbutton, settings, \"misc-mouse_move_focuses_monitor\")\n grid.attach(cb_mouse_mon, 1, 4, 2, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"vrr\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 5, 1, 1)\n\n combo_vrr = Gtk.ComboBoxText()\n combo_vrr.set_property(\"halign\", Gtk.Align.START)\n combo_vrr.set_tooltip_text(voc[\"vrr-tooltip\"])\n d = {0: voc[\"off\"], 1: voc[\"on\"], 2: voc[\"fullscreen-only\"]}\n for item in [0, 1, 2]:\n combo_vrr.append(str(item), d[item])\n combo_vrr.set_active_id(str(settings[\"misc-vrr\"]))\n combo_vrr.connect(\"changed\", set_int_dict_key_from_combo, settings, \"misc-vrr\")\n grid.attach(combo_vrr, 1, 5, 1, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef h_input_tab(settings, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(voc[\"common\"], voc[\"input-devices\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n cb_keyboard_use_settings = Gtk.CheckButton.new_with_label(voc[\"use-these-settings\"])\n cb_keyboard_use_settings.set_property(\"halign\", Gtk.Align.START)\n cb_keyboard_use_settings.set_property(\"margin-bottom\", 6)\n cb_keyboard_use_settings.set_tooltip_text(voc[\"hyprland-include-tooltip\"])\n cb_keyboard_use_settings.set_active(settings[\"input-use-settings\"])\n cb_keyboard_use_settings.connect(\"toggled\", set_from_checkbutton, settings, \"input-use-settings\")\n grid.attach(cb_keyboard_use_settings, 0, 0, 2, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"kb-layout\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 1, 1, 1)\n\n entry_layout = Gtk.Entry()\n entry_layout.set_tooltip_text(voc[\"keyboard-layout-tooltip\"])\n entry_layout.set_text(settings[\"input-kb_layout\"])\n entry_layout.connect(\"changed\", set_from_entry, settings, \"input-kb_layout\")\n grid.attach(entry_layout, 1, 1, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"kb-variant\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 1, 1, 1)\n\n entry_variant = Gtk.Entry()\n entry_variant.set_tooltip_text(voc[\"keyboard-variant-tooltip\"])\n entry_variant.set_text(settings[\"input-kb_variant\"])\n entry_variant.connect(\"changed\", set_from_entry, settings, \"input-kb_variant\")\n grid.attach(entry_variant, 3, 1, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"kb-model\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 2, 1, 1)\n\n entry_model = Gtk.Entry()\n entry_model.set_tooltip_text(voc[\"xkb-kb-model-tooltip\"])\n entry_model.set_text(settings[\"input-kb_model\"])\n entry_model.connect(\"changed\", set_from_entry, settings, \"input-kb_model\")\n grid.attach(entry_model, 1, 2, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"kb-options\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 2, 1, 1)\n\n entry_options = Gtk.Entry()\n entry_options.set_tooltip_text(voc[\"xkb-kb-options-tooltip\"])\n entry_options.set_text(settings[\"input-kb_options\"])\n entry_options.connect(\"changed\", set_from_entry, settings, \"input-kb_options\")\n grid.attach(entry_options, 3, 2, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"kb-rules\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 4, 1, 1)\n\n entry_rules = Gtk.Entry()\n entry_rules.set_tooltip_text(voc[\"xkb-kb-rules-tooltip\"])\n entry_rules.set_text(settings[\"input-kb_rules\"])\n entry_rules.connect(\"changed\", set_from_entry, settings, \"input-kb_rules\")\n grid.attach(entry_rules, 1, 4, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"kb-file\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 4, 1, 1)\n\n entry_file = Gtk.Entry()\n entry_file.set_tooltip_text(voc[\"xkb-kb-file-tooltip\"])\n entry_file.set_text(settings[\"input-kb_file\"])\n entry_file.connect(\"changed\", set_from_entry, settings, \"input-kb_file\")\n grid.attach(entry_file, 3, 4, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"keyboard-repeat-delay\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 5, 1, 1)\n\n sb_repeat_delay = Gtk.SpinButton.new_with_range(1, 6000, 1)\n sb_repeat_delay.set_value(settings[\"input-repeat_delay\"])\n sb_repeat_delay.connect(\"value-changed\", set_int_from_spinbutton, settings, \"input-repeat_delay\")\n sb_repeat_delay.set_tooltip_text(voc[\"keyboard-repeat-delay-tooltip\"])\n grid.attach(sb_repeat_delay, 1, 5, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"keyboard-repeat-rate\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 5, 1, 1)\n\n sb_repeat_rate = Gtk.SpinButton.new_with_range(1, 4000, 1)\n sb_repeat_rate.set_value(settings[\"input-repeat_rate\"])\n sb_repeat_rate.connect(\"value-changed\", set_int_from_spinbutton, settings, \"input-repeat_rate\")\n sb_repeat_rate.set_tooltip_text(voc[\"keyboard-repeat-rate-tooltip\"])\n grid.attach(sb_repeat_rate, 3, 5, 1, 1)\n\n cb_numlock = Gtk.CheckButton.new_with_label(voc[\"numlock-by-default\"])\n cb_numlock.set_property(\"halign\", Gtk.Align.START)\n cb_numlock.set_property(\"margin-bottom\", 6)\n cb_numlock.set_active(settings[\"input-numlock_by_default\"])\n cb_numlock.connect(\"toggled\", set_from_checkbutton, settings, \"input-numlock_by_default\")\n grid.attach(cb_numlock, 1, 7, 2, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"mouse-sensitivity\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 8, 1, 1)\n\n sb_m_sensitivity = Gtk.SpinButton.new_with_range(-1.0, 1.0, 0.1)\n sb_m_sensitivity.set_value(settings[\"input-sensitivity\"])\n sb_m_sensitivity.connect(\"value-changed\", set_from_spinbutton, settings, \"input-sensitivity\", 1)\n sb_m_sensitivity.set_tooltip_text(voc[\"scroll-factor-tooltip\"])\n grid.attach(sb_m_sensitivity, 1, 8, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"acceleration-profile\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 8, 1, 1)\n\n combo_aprofile = Gtk.ComboBoxText()\n combo_aprofile.set_tooltip_text(voc[\"acceleration-profile-tooltip\"])\n combo_aprofile.append(\"\", voc[\"default\"])\n for item in [\"flat\", \"adaptive\"]:\n combo_aprofile.append(item, voc[item])\n combo_aprofile.set_active_id(settings[\"input-accel_profile\"])\n combo_aprofile.connect(\"changed\", set_dict_key_from_combo, settings, \"input-accel_profile\")\n grid.attach(combo_aprofile, 3, 8, 1, 1)\n\n cb_left_handed = Gtk.CheckButton.new_with_label(voc[\"left-handed\"])\n cb_left_handed.set_tooltip_text(voc[\"left-handed-tooltip\"])\n cb_left_handed.set_property(\"halign\", Gtk.Align.START)\n cb_left_handed.set_active(settings[\"input-left_handed\"])\n cb_left_handed.connect(\"toggled\", set_from_checkbutton, settings, \"input-left_handed\")\n grid.attach(cb_left_handed, 1, 9, 1, 1)\n\n cb_left_handed = Gtk.CheckButton.new_with_label(voc[\"natural-scroll\"])\n cb_left_handed.set_tooltip_text(voc[\"natural-scroll-tooltip\"])\n cb_left_handed.set_property(\"halign\", Gtk.Align.START)\n cb_left_handed.set_active(settings[\"input-natural_scroll\"])\n cb_left_handed.connect(\"toggled\", set_from_checkbutton, settings, \"input-natural_scroll\")\n grid.attach(cb_left_handed, 2, 9, 1, 1)\n\n cb_mouse_refocus = Gtk.CheckButton.new_with_label(voc[\"mouse-refocus\"])\n cb_mouse_refocus.set_tooltip_text(voc[\"mouse-refocus-tooltip\"])\n cb_mouse_refocus.set_property(\"halign\", Gtk.Align.START)\n cb_mouse_refocus.set_active(settings[\"input-mouse_refocus\"])\n cb_mouse_refocus.connect(\"toggled\", set_from_checkbutton, settings, \"input-mouse_refocus\")\n grid.attach(cb_mouse_refocus, 3, 9, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"scroll-method\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 10, 1, 1)\n\n combo_scroll_method = Gtk.ComboBoxText()\n combo_scroll_method.set_tooltip_text(voc[\"scroll-method-tooltip\"])\n for item in [(\"2fg\", voc[\"two_finger\"]), (\"edge\", voc[\"edge\"]), (\"on_button_down\", voc[\"on_button_down\"]),\n (\"no_scroll\", voc[\"none\"])]:\n combo_scroll_method.append(item[0], item[1])\n combo_scroll_method.set_active_id(settings[\"input-scroll_method\"])\n combo_scroll_method.connect(\"changed\", set_dict_key_from_combo, settings, \"input-scroll_method\")\n grid.attach(combo_scroll_method, 1, 10, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"scroll-button\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 10, 1, 1)\n\n sb_scroll_button = Gtk.SpinButton.new_with_range(0, 512, 1)\n sb_scroll_button.set_value(settings[\"input-scroll_button\"])\n sb_scroll_button.connect(\"value-changed\", set_int_from_spinbutton, settings, \"input-scroll_button\")\n sb_scroll_button.set_tooltip_text(voc[\"scroll-button-tooltip\"])\n grid.attach(sb_scroll_button, 3, 10, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"focus-follow-mouse\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 11, 1, 1)\n\n combo_focus = Gtk.ComboBoxText()\n # combo_focus.set_property(\"halign\", Gtk.Align.START)\n combo_focus.set_tooltip_text(voc[\"focus-follow-mouse-tooltip\"])\n for item in [\"0\", \"1\", \"2\", \"3\"]:\n combo_focus.append(item, item)\n combo_focus.set_active_id(str(settings[\"input-follow_mouse\"]))\n combo_focus.connect(\"changed\", set_int_dict_key_from_combo, settings, \"input-follow_mouse\")\n grid.attach(combo_focus, 1, 11, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"float-switch-override-focus\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 11, 1, 1)\n\n combo_float_switch = Gtk.ComboBoxText()\n # combo_float_switch.set_property(\"halign\", Gtk.Align.START)\n combo_float_switch.set_tooltip_text(voc[\"float-switch-override-focus-tooltip\"])\n combo_float_switch.append(\"0\", voc[\"disabled\"])\n for item in [\"1\", \"2\"]:\n combo_float_switch.append(item, item)\n combo_float_switch.set_active_id(str(settings[\"input-float_switch_override_focus\"]))\n combo_float_switch.connect(\"changed\", set_int_dict_key_from_combo, settings, \"input-float_switch_override_focus\")\n grid.attach(combo_float_switch, 3, 11, 1, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef pointer_tab(settings, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(voc[\"common\"], voc[\"pointer-device\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n cb_pointer_use_settings = Gtk.CheckButton.new_with_label(voc[\"use-these-settings\"])\n cb_pointer_use_settings.set_property(\"halign\", Gtk.Align.START)\n cb_pointer_use_settings.set_property(\"margin-bottom\", 6)\n cb_pointer_use_settings.set_tooltip_text(voc[\"pointer-device-include-tooltip\"])\n cb_pointer_use_settings.set_active(settings[\"pointer-use-settings\"])\n cb_pointer_use_settings.connect(\"toggled\", set_from_checkbutton, settings, \"pointer-use-settings\")\n grid.attach(cb_pointer_use_settings, 0, 0, 2, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"device\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 1, 1, 1)\n\n combo_device = Gtk.ComboBoxText()\n combo_device.set_property(\"halign\", Gtk.Align.START)\n combo_device.set_tooltip_text(voc[\"device-tooltip\"])\n combo_device.append(\"\", voc[\"all-of-type\"])\n keyboards = list_inputs_by_type(input_type=\"pointer\")\n for item in keyboards:\n combo_device.append(item, item)\n combo_device.set_active_id(settings[\"pointer-identifier\"])\n combo_device.connect(\"changed\", set_dict_key_from_combo, settings, \"pointer-identifier\")\n grid.attach(combo_device, 1, 1, 2, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"acceleration-profile\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 2, 1, 1)\n\n combo_aprofile = Gtk.ComboBoxText()\n combo_aprofile.set_property(\"halign\", Gtk.Align.START)\n combo_aprofile.set_tooltip_text(voc[\"acceleration-profile-tooltip\"])\n for item in [\"flat\", \"adaptive\"]:\n combo_aprofile.append(item, voc[item])\n combo_aprofile.set_active_id(settings[\"pointer-accel-profile\"])\n combo_aprofile.connect(\"changed\", set_dict_key_from_combo, settings, \"pointer-accel-profile\")\n grid.attach(combo_aprofile, 1, 2, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"acceleration\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 3, 1, 1)\n\n sb_acceleration = Gtk.SpinButton.new_with_range(-1, 1, 0.1)\n sb_acceleration.set_value(settings[\"pointer-pointer-accel\"])\n sb_acceleration.connect(\"value-changed\", set_from_spinbutton, settings, \"pointer-pointer-accel\", 1)\n sb_acceleration.set_tooltip_text(voc[\"acceleration-tooltip\"])\n grid.attach(sb_acceleration, 1, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"natural-scroll\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 4, 1, 1)\n\n combo_nscroll = Gtk.ComboBoxText()\n combo_nscroll.set_property(\"halign\", Gtk.Align.START)\n combo_nscroll.set_tooltip_text(voc[\"natural-scroll-tooltip\"])\n for item in [\"disabled\", \"enabled\"]:\n combo_nscroll.append(item, voc[item])\n combo_nscroll.set_active_id(settings[\"pointer-natural-scroll\"])\n combo_nscroll.connect(\"changed\", set_dict_key_from_combo, settings, \"pointer-natural-scroll\")\n grid.attach(combo_nscroll, 1, 4, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"scroll-factor\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 5, 1, 1)\n\n sb_sfactor = Gtk.SpinButton.new_with_range(0.1, 10, 0.1)\n sb_sfactor.set_value(settings[\"pointer-scroll-factor\"])\n sb_sfactor.connect(\"value-changed\", set_from_spinbutton, settings, \"pointer-scroll-factor\", 1)\n sb_sfactor.set_tooltip_text(voc[\"scroll-factor-tooltip\"])\n grid.attach(sb_sfactor, 1, 5, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"left-handed\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 6, 1, 1)\n\n combo_left_handed = Gtk.ComboBoxText()\n combo_left_handed.set_property(\"halign\", Gtk.Align.START)\n combo_left_handed.set_tooltip_text(voc[\"left-handed-tooltip\"])\n for item in [\"disabled\", \"enabled\"]:\n combo_left_handed.append(item, voc[item])\n combo_left_handed.set_active_id(settings[\"pointer-left-handed\"])\n combo_left_handed.connect(\"changed\", set_dict_key_from_combo, settings, \"pointer-left-handed\")\n grid.attach(combo_left_handed, 1, 6, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"custom-field\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 7, 1, 1)\n\n entry_cname = Gtk.Entry()\n entry_cname.set_tooltip_text(voc[\"custom-field-name-tooltip\"])\n entry_cname.set_placeholder_text(voc[\"name\"])\n entry_cname.set_text(settings[\"pointer-custom-name\"])\n entry_cname.connect(\"changed\", set_from_entry, settings, \"pointer-custom-name\")\n grid.attach(entry_cname, 1, 7, 1, 1)\n\n entry_cname = Gtk.Entry()\n entry_cname.set_tooltip_text(voc[\"custom-field-value-tooltip\"])\n entry_cname.set_placeholder_text(voc[\"value\"])\n entry_cname.set_text(settings[\"pointer-custom-value\"])\n entry_cname.connect(\"changed\", set_from_entry, settings, \"pointer-custom-value\")\n grid.attach(entry_cname, 2, 7, 1, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef touchpad_tab(settings, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(voc[\"common\"], voc[\"touchpad\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n cb_touchpad_use_settings = Gtk.CheckButton.new_with_label(voc[\"use-these-settings\"])\n cb_touchpad_use_settings.set_property(\"halign\", Gtk.Align.START)\n cb_touchpad_use_settings.set_property(\"margin-bottom\", 6)\n cb_touchpad_use_settings.set_tooltip_text(voc[\"touchpad-device-include-tooltip\"])\n cb_touchpad_use_settings.set_active(settings[\"touchpad-use-settings\"])\n cb_touchpad_use_settings.connect(\"toggled\", set_from_checkbutton, settings, \"touchpad-use-settings\")\n grid.attach(cb_touchpad_use_settings, 0, 0, 2, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"device\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 1, 1, 1)\n\n combo_device = Gtk.ComboBoxText()\n combo_device.set_property(\"halign\", Gtk.Align.START)\n combo_device.set_tooltip_text(voc[\"device-tooltip\"])\n combo_device.append(\"\", voc[\"all-of-type\"])\n keyboards = list_inputs_by_type(input_type=\"touchpad\")\n for item in keyboards:\n combo_device.append(item, item)\n combo_device.set_active_id(settings[\"touchpad-identifier\"])\n combo_device.connect(\"changed\", set_dict_key_from_combo, settings, \"touchpad-identifier\")\n grid.attach(combo_device, 1, 1, 3, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"acceleration-profile\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 2, 1, 1)\n\n combo_aprofile = Gtk.ComboBoxText()\n combo_aprofile.set_property(\"halign\", Gtk.Align.START)\n combo_aprofile.set_tooltip_text(voc[\"acceleration-profile-tooltip\"])\n for item in [\"flat\", \"adaptive\"]:\n combo_aprofile.append(item, voc[item])\n combo_aprofile.set_active_id(settings[\"touchpad-accel-profile\"])\n combo_aprofile.connect(\"changed\", set_dict_key_from_combo, settings, \"touchpad-accel-profile\")\n grid.attach(combo_aprofile, 1, 2, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"acceleration\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 3, 1, 1)\n\n sb_acceleration = Gtk.SpinButton.new_with_range(-1, 1, 0.1)\n sb_acceleration.set_value(settings[\"touchpad-pointer-accel\"])\n sb_acceleration.connect(\"value-changed\", set_from_spinbutton, settings, \"touchpad-pointer-accel\", 1)\n sb_acceleration.set_tooltip_text(voc[\"acceleration-tooltip\"])\n grid.attach(sb_acceleration, 1, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"natural-scroll\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 4, 1, 1)\n\n combo_nscroll = Gtk.ComboBoxText()\n combo_nscroll.set_property(\"halign\", Gtk.Align.START)\n combo_nscroll.set_tooltip_text(voc[\"natural-scroll-tooltip\"])\n for item in [\"disabled\", \"enabled\"]:\n combo_nscroll.append(item, voc[item])\n combo_nscroll.set_active_id(settings[\"touchpad-natural-scroll\"])\n combo_nscroll.connect(\"changed\", set_dict_key_from_combo, settings, \"touchpad-natural-scroll\")\n grid.attach(combo_nscroll, 1, 4, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"scroll-factor\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 5, 1, 1)\n\n sb_sfactor = Gtk.SpinButton.new_with_range(0.1, 10, 0.1)\n sb_sfactor.set_value(settings[\"touchpad-scroll-factor\"])\n sb_sfactor.connect(\"value-changed\", set_from_spinbutton, settings, \"touchpad-scroll-factor\", 1)\n sb_sfactor.set_tooltip_text(voc[\"scroll-factor-tooltip\"])\n grid.attach(sb_sfactor, 1, 5, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"scroll-method\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 6, 1, 1)\n\n combo_scroll_method = Gtk.ComboBoxText()\n combo_scroll_method.set_property(\"halign\", Gtk.Align.START)\n combo_scroll_method.set_tooltip_text(voc[\"scroll-method-tooltip\"])\n for item in [(\"two_finger\", voc[\"two_finger\"]), (\"edge\", voc[\"edge\"]), (\"on_button_down\", voc[\"on_button_down\"]),\n (\"none\", \"None\")]:\n combo_scroll_method.append(item[0], item[1])\n combo_scroll_method.set_active_id(settings[\"touchpad-scroll-method\"])\n combo_scroll_method.connect(\"changed\", set_dict_key_from_combo, settings, \"touchpad-scroll-method\")\n grid.attach(combo_scroll_method, 1, 6, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"left-handed\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 7, 1, 1)\n\n combo_left_handed = Gtk.ComboBoxText()\n combo_left_handed.set_property(\"halign\", Gtk.Align.START)\n combo_left_handed.set_tooltip_text(voc[\"left-handed-tooltip\"])\n for item in [\"disabled\", \"enabled\"]:\n combo_left_handed.append(item, voc[item])\n combo_left_handed.set_active_id(settings[\"touchpad-left-handed\"])\n combo_left_handed.connect(\"changed\", set_dict_key_from_combo, settings, \"touchpad-left-handed\")\n grid.attach(combo_left_handed, 1, 7, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"tap\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 2, 1, 1)\n\n combo_tap = Gtk.ComboBoxText()\n combo_tap.set_property(\"halign\", Gtk.Align.START)\n combo_tap.set_tooltip_text(voc[\"tap-tooltip\"])\n for item in [\"enabled\", \"disabled\"]:\n combo_tap.append(item, voc[item])\n combo_tap.set_active_id(settings[\"touchpad-tap\"])\n combo_tap.connect(\"changed\", set_dict_key_from_combo, settings, \"touchpad-tap\")\n grid.attach(combo_tap, 3, 2, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"tap-button-map\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 3, 1, 1)\n\n combo_tap_btn_map = Gtk.ComboBoxText()\n combo_tap_btn_map.set_property(\"halign\", Gtk.Align.START)\n combo_tap_btn_map.set_tooltip_text(voc[\"tap-button-map-tooltip\"])\n for item in [\"lrm\", \"lmr\"]:\n combo_tap_btn_map.append(item, item)\n combo_tap_btn_map.set_active_id(settings[\"touchpad-tap-button-map\"])\n combo_tap_btn_map.connect(\"changed\", set_dict_key_from_combo, settings, \"touchpad-tap-button-map\")\n grid.attach(combo_tap_btn_map, 3, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"middle-emulation\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 4, 1, 1)\n\n combo_memulation = Gtk.ComboBoxText()\n combo_memulation.set_property(\"halign\", Gtk.Align.START)\n combo_memulation.set_tooltip_text(voc[\"middle-emulation-tooltip\"])\n for item in [\"enabled\", \"disabled\"]:\n combo_memulation.append(item, voc[item])\n combo_memulation.set_active_id(settings[\"touchpad-middle-emulation\"])\n combo_memulation.connect(\"changed\", set_dict_key_from_combo, settings, \"touchpad-middle-emulation\")\n grid.attach(combo_memulation, 3, 4, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"drag\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 5, 1, 1)\n\n combo_drag = Gtk.ComboBoxText()\n combo_drag.set_property(\"halign\", Gtk.Align.START)\n combo_drag.set_tooltip_text(voc[\"drag-tooltip\"])\n for item in [\"enabled\", \"disabled\"]:\n combo_drag.append(item, voc[item])\n combo_drag.set_active_id(settings[\"touchpad-drag\"])\n combo_drag.connect(\"changed\", set_dict_key_from_combo, settings, \"touchpad-drag\")\n grid.attach(combo_drag, 3, 5, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"drag-lock\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 6, 1, 1)\n\n combo_drag_lock = Gtk.ComboBoxText()\n combo_drag_lock.set_property(\"halign\", Gtk.Align.START)\n combo_drag_lock.set_tooltip_text(voc[\"drag-lock-tooltip\"])\n for item in [\"disabled\", \"enabled\"]:\n combo_drag_lock.append(item, voc[item])\n combo_drag_lock.set_active_id(settings[\"touchpad-drag-lock\"])\n combo_drag_lock.connect(\"changed\", set_dict_key_from_combo, settings, \"touchpad-drag-lock\")\n grid.attach(combo_drag_lock, 3, 6, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"dwt\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 2, 7, 1, 1)\n\n combo_dwt = Gtk.ComboBoxText()\n combo_dwt.set_property(\"halign\", Gtk.Align.START)\n combo_dwt.set_tooltip_text(voc[\"dwt-tooltip\"])\n for item in [\"enabled\", \"disabled\"]:\n combo_dwt.append(item, voc[item])\n combo_dwt.set_active_id(settings[\"touchpad-dwt\"])\n combo_dwt.connect(\"changed\", set_dict_key_from_combo, settings, \"touchpad-dwt\")\n grid.attach(combo_dwt, 3, 7, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"custom-field\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 8, 1, 1)\n\n entry_cname = Gtk.Entry()\n entry_cname.set_tooltip_text(voc[\"custom-field-name-tooltip\"])\n entry_cname.set_placeholder_text(voc[\"name\"])\n entry_cname.set_text(settings[\"touchpad-custom-name\"])\n entry_cname.connect(\"changed\", set_from_entry, settings, \"touchpad-custom-name\")\n grid.attach(entry_cname, 1, 8, 1, 1)\n\n entry_cname = Gtk.Entry()\n entry_cname.set_tooltip_text(voc[\"custom-field-value-tooltip\"])\n entry_cname.set_placeholder_text(voc[\"value\"])\n entry_cname.set_text(settings[\"touchpad-custom-value\"])\n entry_cname.connect(\"changed\", set_from_entry, settings, \"touchpad-custom-value\")\n grid.attach(entry_cname, 2, 8, 2, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef h_touchpad_tab(settings, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(voc[\"common\"], voc[\"touchpad\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n cb_touchpad_use_settings = Gtk.CheckButton.new_with_label(voc[\"use-these-settings\"])\n cb_touchpad_use_settings.set_property(\"halign\", Gtk.Align.START)\n cb_touchpad_use_settings.set_property(\"margin-bottom\", 6)\n cb_touchpad_use_settings.set_tooltip_text(voc[\"hyprland-include-tooltip\"])\n cb_touchpad_use_settings.set_active(settings[\"touchpad-use-settings\"])\n cb_touchpad_use_settings.connect(\"toggled\", set_from_checkbutton, settings, \"touchpad-use-settings\")\n grid.attach(cb_touchpad_use_settings, 0, 0, 2, 1)\n\n cb_dwt = Gtk.CheckButton.new_with_label(voc[\"disable-while-typing\"])\n cb_dwt.set_property(\"halign\", Gtk.Align.START)\n cb_dwt.set_tooltip_text(voc[\"dwt-tooltip\"])\n cb_dwt.set_active(settings[\"touchpad-disable_while_typing\"])\n cb_dwt.connect(\"toggled\", set_from_checkbutton, settings, \"touchpad-disable_while_typing\")\n grid.attach(cb_dwt, 1, 1, 1, 1)\n\n cb_nscroll = Gtk.CheckButton.new_with_label(voc[\"natural-scroll\"])\n cb_nscroll.set_property(\"halign\", Gtk.Align.START)\n cb_nscroll.set_tooltip_text(voc[\"natural-scroll-tooltip\"])\n cb_nscroll.set_active(settings[\"touchpad-natural_scroll\"])\n cb_nscroll.connect(\"toggled\", set_from_checkbutton, settings, \"touchpad-natural_scroll\")\n grid.attach(cb_nscroll, 1, 2, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"scroll-factor\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 3, 1, 1)\n\n sb_sfactor = Gtk.SpinButton.new_with_range(0.1, 10, 0.1)\n sb_sfactor.set_value(settings[\"touchpad-scroll_factor\"])\n sb_sfactor.connect(\"value-changed\", set_from_spinbutton, settings, \"touchpad-scroll_factor\", 1)\n sb_sfactor.set_tooltip_text(voc[\"scroll-factor-tooltip\"])\n grid.attach(sb_sfactor, 1, 3, 1, 1)\n\n cb_memulation = Gtk.CheckButton.new_with_label(voc[\"middle-emulation\"])\n cb_memulation.set_property(\"halign\", Gtk.Align.START)\n cb_memulation.set_tooltip_text(voc[\"middle-emulation-tooltip\"])\n cb_memulation.set_active(settings[\"touchpad-middle_button_emulation\"])\n cb_memulation.connect(\"toggled\", set_from_checkbutton, settings, \"touchpad-middle_button_emulation\")\n grid.attach(cb_memulation, 1, 4, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"tap-button-map\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 5, 1, 1)\n\n combo_tap_btn_map = Gtk.ComboBoxText()\n combo_tap_btn_map.set_property(\"halign\", Gtk.Align.START)\n combo_tap_btn_map.set_tooltip_text(voc[\"tap-button-map-tooltip\"])\n for item in [\"lrm\", \"lmr\"]:\n combo_tap_btn_map.append(item, item)\n combo_tap_btn_map.set_active_id(settings[\"touchpad-tap_button_map\"])\n combo_tap_btn_map.connect(\"changed\", set_dict_key_from_combo, settings, \"touchpad-tap_button_map\")\n grid.attach(combo_tap_btn_map, 1, 5, 1, 1)\n\n cb_clickfinger = Gtk.CheckButton.new_with_label(voc[\"clickfinger-behavior\"])\n cb_clickfinger.set_property(\"halign\", Gtk.Align.START)\n cb_clickfinger.set_tooltip_text(voc[\"clickfinger-behavior-tooltip\"])\n cb_clickfinger.set_active(settings[\"touchpad-clickfinger_behavior\"])\n cb_clickfinger.connect(\"toggled\", set_from_checkbutton, settings, \"touchpad-clickfinger_behavior\")\n grid.attach(cb_clickfinger, 1, 6, 1, 1)\n\n cb_tap2click = Gtk.CheckButton.new_with_label(voc[\"tap-to-click\"])\n cb_tap2click.set_property(\"halign\", Gtk.Align.START)\n cb_tap2click.set_tooltip_text(voc[\"tap-to-click-tooltip\"])\n cb_tap2click.set_active(settings[\"touchpad-tap-to-click\"])\n cb_tap2click.connect(\"toggled\", set_from_checkbutton, settings, \"touchpad-tap-to-click\")\n grid.attach(cb_tap2click, 1, 7, 1, 1)\n\n cb_drag_lock = Gtk.CheckButton.new_with_label(voc[\"drag-lock\"])\n cb_drag_lock.set_property(\"halign\", Gtk.Align.START)\n cb_drag_lock.set_tooltip_text(voc[\"drag-lock-tooltip\"])\n cb_drag_lock.set_active(settings[\"touchpad-drag_lock\"])\n cb_drag_lock.connect(\"toggled\", set_from_checkbutton, settings, \"touchpad-drag_lock\")\n grid.attach(cb_drag_lock, 1, 8, 1, 1)\n\n cb_tap_and_drag = Gtk.CheckButton.new_with_label(voc[\"tap-and-drag\"])\n cb_tap_and_drag.set_property(\"halign\", Gtk.Align.START)\n cb_tap_and_drag.set_tooltip_text(voc[\"tap-and-drag-tooltip\"])\n cb_tap_and_drag.set_active(settings[\"touchpad-tap-and-drag\"])\n cb_tap_and_drag.connect(\"toggled\", set_from_checkbutton, settings, \"touchpad-tap-and-drag\")\n grid.attach(cb_tap_and_drag, 1, 9, 1, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef lockscreen_tab(settings, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(voc[\"common\"], voc[\"idle-lock-screen\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 6)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n cb_lockscreen_use_settings = Gtk.CheckButton.new_with_label(voc[\"use-these-settings\"])\n cb_lockscreen_use_settings.set_property(\"halign\", Gtk.Align.START)\n cb_lockscreen_use_settings.set_property(\"margin-bottom\", 2)\n cb_lockscreen_use_settings.set_tooltip_text(voc[\"lock-screen-include-tooltip\"])\n cb_lockscreen_use_settings.set_active(settings[\"lockscreen-use-settings\"])\n cb_lockscreen_use_settings.connect(\"toggled\", set_idle_use_from_checkbutton, settings)\n grid.attach(cb_lockscreen_use_settings, 0, 0, 2, 1)\n\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"lock-screen\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 0, 1, 2, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"locker\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 2, 1, 1)\n\n combo_locker = Gtk.ComboBoxText()\n combo_locker.set_tooltip_text(voc[\"locker-tooltip\"])\n if not os.getenv(\"HYPRLAND_INSTANCE_SIGNATURE\"):\n combo_locker.append(\"gtklock\", \"gtklock\")\n if is_command(\"swaylock\"):\n combo_locker.append(\"swaylock\", \"swaylock\")\n else:\n combo_locker.set_tooltip_text(voc[\"locker-tooltip2\"])\n combo_locker.set_active_id(settings[\"lockscreen-locker\"])\n combo_locker.connect(\"changed\", set_dict_key_from_combo, settings, \"lockscreen-locker\")\n grid.attach(combo_locker, 1, 2, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"backgrounds\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 3, 1, 1)\n\n combo_background = Gtk.ComboBoxText()\n combo_background.set_tooltip_text(voc[\"random-wallpaper-source\"])\n combo_background.append(\"unsplash\", voc[\"unsplash\"])\n combo_background.append(\"local\", voc[\"local\"])\n combo_background.set_active_id(settings[\"lockscreen-background-source\"])\n combo_background.connect(\"changed\", set_dict_key_from_combo, settings, \"lockscreen-background-source\")\n grid.attach(combo_background, 1, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"own-command\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 4, 1, 1)\n\n entry_lock_cmd = Gtk.Entry()\n entry_lock_cmd.set_placeholder_text(voc[\"leave-blank-to-use-above\"])\n lbl.set_property(\"valign\", Gtk.Align.CENTER)\n lbl.set_property(\"vexpand\", False)\n entry_lock_cmd.set_width_chars(24)\n entry_lock_cmd.set_text(settings[\"lockscreen-custom-cmd\"])\n entry_lock_cmd.set_tooltip_text(voc[\"own-command-tooltip\"])\n grid.attach(entry_lock_cmd, 1, 4, 1, 1)\n entry_lock_cmd.connect(\"changed\", set_custom_cmd_from_entry, settings, \"lockscreen-custom-cmd\",\n [combo_locker, combo_background])\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"timeout\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 5, 1, 1)\n\n sb_lock_timeout = Gtk.SpinButton.new_with_range(5, 86400, 1)\n sb_lock_timeout.set_property(\"halign\", Gtk.Align.START)\n sb_lock_timeout.set_value(settings[\"lockscreen-timeout\"])\n # We need to validate this, and `sb_sleep_timeout` as well, so let's connect both when both already defined\n sb_lock_timeout.set_tooltip_text(voc[\"timeout-tooltip\"])\n grid.attach(sb_lock_timeout, 1, 5, 1, 1)\n\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"idle-settings\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n lbl.set_property(\"margin-top\", 6)\n grid.attach(lbl, 0, 6, 2, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"sleep-command\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 7, 1, 1)\n\n entry_sleep_cmd = Gtk.Entry()\n entry_sleep_cmd.set_max_width_chars(22)\n entry_sleep_cmd.set_text(settings[\"sleep-cmd\"])\n grid.attach(entry_sleep_cmd, 1, 7, 1, 1)\n entry_sleep_cmd.connect(\"changed\", set_from_entry, settings, \"sleep-cmd\")\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"timeout\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 8, 1, 1)\n\n sb_sleep_timeout = Gtk.SpinButton.new_with_range(10, 86400, 1)\n sb_sleep_timeout.set_property(\"halign\", Gtk.Align.START)\n sb_sleep_timeout.set_value(settings[\"sleep-timeout\"])\n\n # Sleep timeout must be longer than lock timeout; we'll validate both values\n sb_sleep_timeout.connect(\"value-changed\", set_sleep_timeout, sb_lock_timeout, settings, \"sleep-timeout\")\n sb_lock_timeout.connect(\"value-changed\", set_timeouts, sb_sleep_timeout, settings, \"lockscreen-timeout\")\n\n sb_sleep_timeout.set_tooltip_text(voc[\"sleep-timeout-tooltip\"])\n grid.attach(sb_sleep_timeout, 1, 8, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"resume-command\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 9, 1, 1)\n\n entry_resume_cmd = Gtk.Entry()\n entry_resume_cmd.set_text(settings[\"resume-cmd\"])\n grid.attach(entry_resume_cmd, 1, 9, 1, 1)\n entry_resume_cmd.connect(\"changed\", set_from_entry, settings, \"resume-cmd\")\n\n defaults_btn = Gtk.Button.new_with_label(voc[\"restore-defaults\"])\n defaults_btn.set_property(\"margin-top\", 6)\n defaults_btn.set_property(\"halign\", Gtk.Align.START)\n defaults_btn.set_tooltip_text(voc[\"restore-defaults-tooltip\"])\n defaults_btn.connect(\"clicked\", restore_defaults, {entry_sleep_cmd: 'swaymsg \"output * dpms off\"',\n entry_resume_cmd: 'swaymsg \"output * dpms on\"'})\n grid.attach(defaults_btn, 1, 6, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"before-sleep\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 10, 1, 1)\n\n entry_b4_sleep = Gtk.Entry()\n entry_b4_sleep.set_width_chars(24)\n entry_b4_sleep.set_text(settings[\"before-sleep\"])\n entry_b4_sleep.set_tooltip_text(voc[\"before-sleep-tooltip\"])\n grid.attach(entry_b4_sleep, 1, 10, 1, 1)\n entry_b4_sleep.connect(\"changed\", set_from_entry, settings, \"before-sleep\")\n\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"local-background-paths\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 2, 1, 4, 1)\n\n bcg_window = Gtk.ScrolledWindow.new(None, None)\n bcg_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.ALWAYS)\n bcg_window.set_propagate_natural_width(True)\n\n grid.attach(bcg_window, 2, 2, 4, 4)\n bcg_box = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)\n bcg_window.add(bcg_box)\n\n paths = list_background_dirs()\n # Preselect all in none preselected yet\n if not settings[\"background-dirs-once-set\"] and not settings[\"background-dirs\"]:\n settings[\"background-dirs\"] = paths\n settings[\"background-dirs-once-set\"] = True\n\n for p in paths:\n cb = Gtk.CheckButton.new_with_label(p)\n cb.set_active(p in settings[\"background-dirs\"])\n cb.connect(\"toggled\", on_folder_btn_toggled, settings)\n bcg_box.pack_start(cb, False, False, 0)\n\n box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 6)\n grid.attach(box, 2, 6, 3, 1)\n\n cb_custom_path = Gtk.CheckButton.new_with_label(voc[\"own-path\"])\n cb_custom_path.set_active(settings[\"backgrounds-use-custom-path\"])\n cb_custom_path.connect(\"toggled\", set_key_from_checkbox, settings, \"backgrounds-use-custom-path\")\n box.pack_start(cb_custom_path, False, False, 0)\n\n fc_btn = Gtk.FileChooserButton.new(voc[\"select-folder\"], Gtk.FileChooserAction.SELECT_FOLDER)\n fc_btn.set_tooltip_text(voc[\"select-folder-tooltip\"])\n if settings[\"backgrounds-custom-path\"]:\n fc_btn.set_current_folder(settings[\"backgrounds-custom-path\"])\n fc_btn.connect(\"file-set\", on_custom_folder_selected, cb_custom_path, settings)\n box.pack_start(fc_btn, False, False, 0)\n\n if not fc_btn.get_filename():\n cb_custom_path.set_sensitive(False)\n\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"unsplash-random-image\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n lbl.set_property(\"margin-top\", 6)\n grid.attach(lbl, 2, 8, 4, 1)\n\n sb_us_width = Gtk.SpinButton.new_with_range(640, 7680, 1)\n sb_us_width.set_value(settings[\"unsplash-width\"])\n sb_us_width.connect(\"value-changed\", set_int_from_spinbutton, settings, \"unsplash-width\")\n sb_us_width.set_tooltip_text(voc[\"desired-wallpaper-width\"])\n grid.attach(sb_us_width, 2, 9, 1, 1)\n\n lbl = Gtk.Label.new(\"x\")\n grid.attach(lbl, 3, 9, 1, 1)\n\n sb_us_width = Gtk.SpinButton.new_with_range(480, 4320, 1)\n sb_us_width.set_value(settings[\"unsplash-height\"])\n sb_us_width.connect(\"value-changed\", set_int_from_spinbutton, settings, \"unsplash-height\")\n sb_us_width.set_tooltip_text(voc[\"desired-wallpaper-height\"])\n grid.attach(sb_us_width, 4, 9, 1, 1)\n\n box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 6)\n grid.attach(box, 2, 10, 3, 1)\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"keywords\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n box.pack_start(lbl, False, False, 0)\n\n entry_us_keywords = Gtk.Entry()\n entry_us_keywords.set_tooltip_text(voc[\"keywords-tooltip\"])\n entry_us_keywords.set_text(\",\".join(settings[\"unsplash-keywords\"]))\n entry_us_keywords.connect(\"changed\", set_keywords_from_entry, settings)\n box.pack_start(entry_us_keywords, True, True, 0)\n\n # WARNING about 'swayidle' in sway config\n config_home = os.getenv('XDG_CONFIG_HOME') if os.getenv('XDG_CONFIG_HOME') else os.path.join(\n os.getenv(\"HOME\"), \".config/\")\n sway_config = os.path.join(config_home, \"sway\", \"config\")\n if os.path.isfile(sway_config):\n lines = load_text_file(sway_config).splitlines()\n for line in lines:\n if not line.startswith(\"#\") and \"swayidle\" in line:\n lbl = Gtk.Label()\n lbl.set_markup(\n 'To use these settings,'\n ' remove \\'swayidle\\' from your sway config file!')\n lbl.set_property(\"margin-top\", 10)\n grid.attach(lbl, 0, 11, 7, 1)\n cb_lockscreen_use_settings.set_active(False)\n # Prevent settings from exporting\n cb_lockscreen_use_settings.set_sensitive(False)\n break\n\n frame.show_all()\n\n return frame\n\n\ndef gtklock_tab(settings, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: Gtklock \".format(voc[\"common\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 6)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"modules\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 0, 2, 1, 1)\n\n box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 0)\n grid.attach(box, 1, 2, 1, 1)\n cb_gtklock_userinfo = Gtk.CheckButton.new_with_label(\"userinfo\")\n cb_gtklock_userinfo.set_active(settings[\"gtklock-userinfo\"])\n cb_gtklock_userinfo.connect(\"toggled\", set_key_from_checkbox, settings, \"gtklock-userinfo\")\n cb_gtklock_userinfo.set_tooltip_text(voc[\"userinfo-tooltip\"])\n box.pack_start(cb_gtklock_userinfo, False, False, 0)\n # Disable check button if module not installed\n if not gtklock_module_path(\"userinfo\"):\n cb_gtklock_userinfo.set_active(False)\n cb_gtklock_userinfo.set_sensitive(False)\n\n cb_gtklock_powerbar = Gtk.CheckButton.new_with_label(\"powerbar\")\n cb_gtklock_powerbar.set_active(settings[\"gtklock-powerbar\"])\n cb_gtklock_powerbar.connect(\"toggled\", set_key_from_checkbox, settings, \"gtklock-powerbar\")\n cb_gtklock_powerbar.set_tooltip_text(voc[\"powerbar-tooltip\"])\n box.pack_start(cb_gtklock_powerbar, False, False, 0)\n\n if not gtklock_module_path(\"powerbar\"):\n cb_gtklock_powerbar.set_active(False)\n cb_gtklock_powerbar.set_sensitive(False)\n\n cb_gtklock_layerctl = Gtk.CheckButton.new_with_label(\"playerctl\")\n cb_gtklock_layerctl.set_active(settings[\"gtklock-playerctl\"])\n cb_gtklock_layerctl.connect(\"toggled\", set_key_from_checkbox, settings, \"gtklock-playerctl\")\n cb_gtklock_layerctl.set_tooltip_text(voc[\"playerctl-tooltip\"])\n box.pack_start(cb_gtklock_layerctl, False, False, 0)\n\n if not gtklock_module_path(\"playerctl\"):\n cb_gtklock_layerctl.set_active(False)\n cb_gtklock_layerctl.set_sensitive(False)\n\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"powerbar\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 0, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"reboot\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 4, 1, 1)\n\n entry_layout = Gtk.Entry()\n entry_layout.set_tooltip_text(voc[\"reboot-tooltip\"])\n entry_layout.set_text(settings[\"gtklock-reboot-command\"])\n entry_layout.connect(\"changed\", set_from_entry, settings, \"gtklock-reboot-command\")\n grid.attach(entry_layout, 1, 4, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"power-off\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 5, 1, 1)\n\n entry_gtklock_poweroff_command = Gtk.Entry()\n entry_gtklock_poweroff_command.set_tooltip_text(voc[\"power-off-tooltip\"])\n entry_gtklock_poweroff_command.set_text(settings[\"gtklock-poweroff-command\"])\n entry_gtklock_poweroff_command.connect(\"changed\", set_from_entry, settings, \"gtklock-poweroff-command\")\n grid.attach(entry_gtklock_poweroff_command, 1, 5, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"suspend\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 6, 1, 1)\n\n entry_gtklock_suspend_command = Gtk.Entry()\n entry_gtklock_suspend_command.set_tooltip_text(voc[\"suspend-tooltip\"])\n entry_gtklock_suspend_command.set_text(settings[\"gtklock-suspend-command\"])\n entry_gtklock_suspend_command.connect(\"changed\", set_from_entry, settings, \"gtklock-suspend-command\")\n grid.attach(entry_gtklock_suspend_command, 1, 6, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"logout\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 7, 1, 1)\n\n entry_gtklock_logout_command = Gtk.Entry()\n entry_gtklock_logout_command.set_tooltip_text(voc[\"logout-tooltip\"])\n entry_gtklock_logout_command.set_text(settings[\"gtklock-logout-command\"])\n entry_gtklock_logout_command.connect(\"changed\", set_from_entry, settings, \"gtklock-logout-command\")\n grid.attach(entry_gtklock_logout_command, 1, 7, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"switch-user\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 8, 1, 1)\n\n entry_gtklock_switch_user_command = Gtk.Entry()\n entry_gtklock_switch_user_command.set_tooltip_text(voc[\"switch-user-tooltip\"])\n entry_gtklock_switch_user_command.set_text(settings[\"gtklock-userswitch-command\"])\n entry_gtklock_switch_user_command.connect(\"changed\", set_from_entry, settings, \"gtklock-userswitch-command\")\n grid.attach(entry_gtklock_switch_user_command, 1, 8, 1, 1)\n\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"other\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 0, 9, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"time-format\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 10, 1, 1)\n\n entry_time_format = Gtk.Entry()\n entry_time_format.set_tooltip_text(voc[\"time-format-tooltip\"])\n entry_time_format.set_text(settings[\"gtklock-time-format\"])\n entry_time_format.connect(\"changed\", set_from_entry, settings, \"gtklock-time-format\")\n grid.attach(entry_time_format, 1, 10, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"idle-timeout\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 11, 1, 1)\n\n sb_gtklock_idle_timeout = Gtk.SpinButton.new_with_range(5, 3600, 1)\n sb_gtklock_idle_timeout.set_value(settings[\"gtklock-idle-timeout\"])\n sb_gtklock_idle_timeout.connect(\"value-changed\", set_int_from_spinbutton, settings, \"gtklock-idle-timeout\")\n sb_gtklock_idle_timeout.set_tooltip_text(voc[\"idle-timeout-tooltip\"])\n grid.attach(sb_gtklock_idle_timeout, 1, 11, 1, 1)\n\n cb_disable_input_inhibitor = Gtk.CheckButton.new_with_label(voc[\"disable-input-inhibitor\"])\n cb_disable_input_inhibitor.set_active(settings[\"gtklock-disable-input-inhibitor\"])\n cb_disable_input_inhibitor.connect(\"toggled\", set_key_from_checkbox, settings, \"gtklock-disable-input-inhibitor\")\n cb_disable_input_inhibitor.set_tooltip_text(voc[\"disable-input-inhibitor-tooltip\"])\n grid.attach(cb_disable_input_inhibitor, 1, 12, 1, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef drawer_tab(preset, preset_name, outputs, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(preset_name, voc[\"app-drawer\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n cb_drawer_on = Gtk.CheckButton.new_with_label(voc[\"app-drawer-on\"])\n cb_drawer_on.set_active(preset[\"launcher-on\"])\n cb_drawer_on.connect(\"toggled\", set_from_checkbutton, preset, \"launcher-on\")\n grid.attach(cb_drawer_on, 0, 0, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"columns\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 1, 1, 1)\n\n sb_columns = Gtk.SpinButton.new_with_range(1, 9, 1)\n sb_columns.set_value(preset[\"launcher-columns\"])\n sb_columns.connect(\"value-changed\", set_int_from_spinbutton, preset, \"launcher-columns\")\n sb_columns.set_tooltip_text(voc[\"app-drawer-columns-tooltip\"])\n grid.attach(sb_columns, 1, 1, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"icon-size\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 2, 1, 1)\n\n sb_icon_size = Gtk.SpinButton.new_with_range(8, 256, 1)\n sb_icon_size.set_value(preset[\"launcher-icon-size\"])\n sb_icon_size.connect(\"value-changed\", set_int_from_spinbutton, preset, \"launcher-icon-size\")\n sb_icon_size.set_tooltip_text(voc[\"app-icon-size-tooltip\"])\n grid.attach(sb_icon_size, 1, 2, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"file-search-columns\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 3, 1, 1)\n\n sb_fs_columns = Gtk.SpinButton.new_with_range(1, 9, 1)\n sb_fs_columns.set_value(preset[\"launcher-file-search-columns\"])\n sb_fs_columns.connect(\"value-changed\", set_int_from_spinbutton, preset, \"launcher-file-search-columns\")\n sb_fs_columns.set_tooltip_text(voc[\"file-search-columns-tooltip\"])\n grid.attach(sb_fs_columns, 1, 3, 1, 1)\n\n cb_search_files = Gtk.CheckButton.new_with_label(voc[\"search-files\"])\n cb_search_files.set_active(preset[\"launcher-search-files\"])\n cb_search_files.connect(\"toggled\", set_from_checkbutton, preset, \"launcher-search-files\")\n grid.attach(cb_search_files, 2, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"gtk-theme\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 4, 1, 1)\n\n combo_gtk_theme = Gtk.ComboBoxText()\n combo_gtk_theme.append(\"\", voc[\"default\"])\n\n for name in get_theme_names():\n combo_gtk_theme.append(name, name)\n\n combo_gtk_theme.set_active_id(preset[\"launcher-gtk-theme\"])\n combo_gtk_theme.connect(\"changed\", set_dict_key_from_combo, preset, \"launcher-gtk-theme\")\n grid.attach(combo_gtk_theme, 1, 4, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"icon-theme\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 5, 1, 1)\n\n # dict: {\"theme_name\": \"folder_name\"}\n theme_names = get_icon_themes()\n names = []\n for key in theme_names:\n names.append(key)\n\n combo_gtk_icon_theme = Gtk.ComboBoxText()\n combo_gtk_icon_theme.append(\"\", voc[\"default\"])\n for name in sorted(names, key=str.casefold):\n combo_gtk_icon_theme.append(name, name)\n combo_gtk_icon_theme.set_active_id(preset[\"launcher-gtk-icon-theme\"])\n combo_gtk_icon_theme.connect(\"changed\", set_dict_key_from_combo, preset, \"launcher-gtk-icon-theme\")\n grid.attach(combo_gtk_icon_theme, 1, 5, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"output\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 6, 1, 1)\n\n combo_outputs = Gtk.ComboBoxText()\n combo_outputs.set_property(\"halign\", Gtk.Align.START)\n grid.attach(combo_outputs, 1, 6, 1, 1)\n combo_outputs.append(\"Any\", voc[\"any\"])\n for item in outputs:\n combo_outputs.append(item, item)\n combo_outputs.set_active_id(preset[\"launcher-output\"])\n combo_outputs.connect(\"changed\", set_dict_key_from_combo, preset, \"launcher-output\")\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"pb-icon-size\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 7, 1, 1)\n\n sb_pb_icon_size = Gtk.SpinButton.new_with_range(8, 256, 1)\n sb_pb_icon_size.set_value(preset[\"pb-size\"])\n sb_pb_icon_size.connect(\"value-changed\", set_int_from_spinbutton, preset, \"pb-size\")\n sb_pb_icon_size.set_tooltip_text(voc[\"app-icon-size-tooltip\"])\n grid.attach(sb_pb_icon_size, 1, 7, 1, 1)\n\n cb_categories = Gtk.CheckButton.new_with_label(voc[\"show-category-buttons\"])\n cb_categories.set_tooltip_text(voc[\"show-category-buttons-tooltip\"])\n cb_categories.set_active(preset[\"launcher-categories\"])\n cb_categories.connect(\"toggled\", set_from_checkbutton, preset, \"launcher-categories\")\n grid.attach(cb_categories, 0, 8, 2, 1)\n\n cb_resident = Gtk.CheckButton.new_with_label(voc[\"keep-resident\"])\n cb_resident.set_tooltip_text(voc[\"keep-resident-tooltip\"])\n cb_resident.set_active(preset[\"launcher-resident\"])\n cb_resident.connect(\"toggled\", set_from_checkbutton, preset, \"launcher-resident\")\n grid.attach(cb_resident, 0, 9, 2, 1)\n\n cb_super = Gtk.CheckButton.new_with_label(voc[\"open-on-super\"])\n cb_super.set_tooltip_text(voc[\"open-on-super-tooltip\"])\n cb_super.set_active(preset[\"launcher-super-key\"])\n cb_super.connect(\"toggled\", set_from_checkbutton, preset, \"launcher-super-key\")\n grid.attach(cb_super, 0, 10, 2, 1)\n\n cb_overlay = Gtk.CheckButton.new_with_label(voc[\"open-on-overlay\"])\n cb_overlay.set_tooltip_text(voc[\"open-on-overlay-tooltip\"])\n cb_overlay.set_active(preset[\"launcher-overlay\"])\n cb_overlay.connect(\"toggled\", set_from_checkbutton, preset, \"launcher-overlay\")\n grid.attach(cb_overlay, 0, 11, 2, 1)\n\n cb_force_theme = Gtk.CheckButton.new_with_label(voc[\"run-with-gtk-theme\"])\n cb_force_theme.set_tooltip_text(voc[\"run-with-gtk-theme-tooltip\"])\n cb_force_theme.set_active(preset[\"launcher-force-theme\"])\n cb_force_theme.connect(\"toggled\", set_from_checkbutton, preset, \"launcher-force-theme\")\n grid.attach(cb_force_theme, 2, 8, 2, 1)\n\n cb_run_through_compositor = Gtk.CheckButton.new_with_label(voc[\"run-through-compositor\"])\n cb_run_through_compositor.set_tooltip_text(voc[\"run-through-compositor-tooltip\"])\n cb_run_through_compositor.set_active(preset[\"launcher-run-through-compositor\"])\n cb_run_through_compositor.connect(\"toggled\", set_from_checkbutton, preset, \"launcher-run-through-compositor\")\n grid.attach(cb_run_through_compositor, 2, 9, 2, 1)\n\n cb_show_power_bar = Gtk.CheckButton.new_with_label(voc[\"show-power-bar\"])\n cb_show_power_bar.set_tooltip_text(voc[\"show-power-bar-tooltip\"])\n cb_show_power_bar.set_active(preset[\"powerbar-on\"])\n cb_show_power_bar.connect(\"toggled\", set_from_checkbutton, preset, \"powerbar-on\")\n grid.attach(cb_show_power_bar, 2, 10, 2, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef dock_tab(preset, preset_name, outputs, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(preset_name, voc[\"dock\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n cb_dock_on = Gtk.CheckButton.new_with_label(voc[\"dock-on\"])\n cb_dock_on.set_active(preset[\"dock-on\"])\n cb_dock_on.connect(\"toggled\", set_from_checkbutton, preset, \"dock-on\")\n grid.attach(cb_dock_on, 0, 0, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"position\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 1, 1, 1)\n\n combo_position = Gtk.ComboBoxText()\n combo_position.set_property(\"halign\", Gtk.Align.START)\n grid.attach(combo_position, 1, 1, 1, 1)\n for item in [\"bottom\", \"top\", \"left\"]:\n combo_position.append(item, voc[item])\n combo_position.set_active_id(preset[\"dock-position\"])\n combo_position.connect(\"changed\", set_dict_key_from_combo, preset, \"dock-position\")\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"alignment\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 2, 1, 1)\n\n combo_alignment = Gtk.ComboBoxText()\n combo_alignment.set_property(\"halign\", Gtk.Align.START)\n grid.attach(combo_alignment, 1, 2, 1, 1)\n for item in [\"center\", \"start\", \"end\"]:\n combo_alignment.append(item, voc[item])\n combo_alignment.set_active_id(preset[\"dock-alignment\"])\n combo_alignment.connect(\"changed\", set_dict_key_from_combo, preset, \"dock-alignment\")\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"icon-size\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 3, 1, 1)\n\n sb_icon_size = Gtk.SpinButton.new_with_range(8, 256, 1)\n sb_icon_size.set_value(preset[\"dock-icon-size\"])\n sb_icon_size.connect(\"value-changed\", set_int_from_spinbutton, preset, \"dock-icon-size\")\n sb_icon_size.set_tooltip_text(voc[\"app-icon-size-tooltip\"])\n grid.attach(sb_icon_size, 1, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"margin\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 4, 1, 1)\n\n sb_margin = Gtk.SpinButton.new_with_range(0, 256, 1)\n sb_margin.set_value(preset[\"dock-margin\"])\n sb_margin.connect(\"value-changed\", set_int_from_spinbutton, preset, \"dock-margin\")\n sb_margin.set_tooltip_text(voc[\"margin-tooltip\"])\n grid.attach(sb_margin, 1, 4, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"output\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 5, 1, 1)\n\n combo_outputs = Gtk.ComboBoxText()\n combo_outputs.set_property(\"halign\", Gtk.Align.START)\n grid.attach(combo_outputs, 1, 5, 1, 1)\n combo_outputs.append(\"Any\", voc[\"any\"])\n for item in outputs:\n combo_outputs.append(item, item)\n combo_outputs.set_active_id(preset[\"dock-output\"])\n combo_outputs.connect(\"changed\", set_dict_key_from_combo, preset, \"dock-output\")\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"on-layer\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 6, 1, 1)\n\n combo_layer = Gtk.ComboBoxText()\n combo_layer.set_property(\"halign\", Gtk.Align.START)\n grid.attach(combo_layer, 1, 6, 1, 1)\n combo_layer.set_tooltip_text(voc[\"on-layer-tooltip\"])\n layers = [\"overlay\", \"top\"]\n for l in layers:\n combo_layer.append(l, l)\n combo_layer.set_active_id(preset[\"dock-layer\"])\n combo_layer.connect(\"changed\", set_dict_key_from_combo, preset, \"dock-layer\")\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"hotspot-delay\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 7, 1, 1)\n\n sb_hotspot_delay = Gtk.SpinButton.new_with_range(0, 10000, 1)\n sb_hotspot_delay.set_value(preset[\"dock-hotspot-delay\"])\n sb_hotspot_delay.connect(\"value-changed\", set_int_from_spinbutton, preset, \"dock-hotspot-delay\")\n sb_hotspot_delay.set_tooltip_text(voc[\"hotspot-delay-tooltip\"])\n grid.attach(sb_hotspot_delay, 1, 7, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"startup-delay\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 8, 1, 1)\n\n sb_startup_delay = Gtk.SpinButton.new_with_range(0, 16, 0.1)\n sb_startup_delay.set_value(preset[\"dock-startup-delay\"])\n sb_startup_delay.connect(\"value-changed\", set_from_spinbutton, preset, \"dock-startup-delay\", 2)\n sb_startup_delay.set_tooltip_text(voc[\"startup-delay-tooltip\"])\n grid.attach(sb_startup_delay, 1, 8, 1, 1)\n\n cb_permanent = Gtk.CheckButton.new_with_label(voc[\"permanent\"])\n cb_permanent.set_active(preset[\"dock-permanent\"])\n cb_permanent.connect(\"toggled\", set_from_checkbutton, preset, \"dock-permanent\")\n cb_permanent.set_tooltip_text(voc[\"permanent-tooltip\"])\n grid.attach(cb_permanent, 0, 9, 2, 1)\n\n cb_full = Gtk.CheckButton.new_with_label(voc[\"full-width-height\"])\n cb_full.set_active(preset[\"dock-full\"])\n cb_full.connect(\"toggled\", set_from_checkbutton, preset, \"dock-full\")\n cb_full.set_tooltip_text(voc[\"full-width-height-tooltip\"])\n grid.attach(cb_full, 0, 10, 2, 1)\n\n cb_autohide = Gtk.CheckButton.new_with_label(voc[\"auto-show-hide\"])\n cb_autohide.set_active(preset[\"dock-autohide\"])\n cb_autohide.connect(\"toggled\", set_from_checkbutton, preset, \"dock-autohide\")\n cb_autohide.set_tooltip_text(voc[\"auto-show-hide-tooltip\"])\n grid.attach(cb_autohide, 0, 11, 2, 1)\n\n cb_exclusive = Gtk.CheckButton.new_with_label(voc[\"exclusive-zone\"])\n cb_exclusive.set_active(preset[\"dock-exclusive\"])\n cb_exclusive.connect(\"toggled\", set_from_checkbutton, preset, \"dock-exclusive\")\n cb_exclusive.set_tooltip_text(voc[\"exclusive-zone-tooltip\"])\n grid.attach(cb_exclusive, 0, 12, 1, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef bar_tab(preset, preset_name, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(preset_name, voc[\"exit-menu\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n cb_bar_on = Gtk.CheckButton.new_with_label(voc[\"exit-menu-on\"])\n cb_bar_on.set_active(preset[\"exit-on\"])\n cb_bar_on.connect(\"toggled\", set_from_checkbutton, preset, \"exit-on\")\n grid.attach(cb_bar_on, 0, 0, 2, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"position\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 1, 1, 1)\n\n combo_position = Gtk.ComboBoxText()\n combo_position.set_property(\"halign\", Gtk.Align.START)\n grid.attach(combo_position, 1, 1, 1, 1)\n for item in [\"center\", \"top\", \"bottom\", \"left\", \"right\"]:\n combo_position.append(item, voc[item])\n combo_position.set_active_id(preset[\"exit-position\"])\n combo_position.connect(\"changed\", set_dict_key_from_combo, preset, \"exit-position\")\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"alignment\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 2, 1, 1)\n\n combo_alignment = Gtk.ComboBoxText()\n combo_alignment.set_property(\"halign\", Gtk.Align.START)\n grid.attach(combo_alignment, 1, 2, 1, 1)\n for item in [\"middle\", \"start\", \"end\"]:\n combo_alignment.append(item, voc[item])\n combo_alignment.set_active_id(preset[\"exit-alignment\"])\n combo_alignment.connect(\"changed\", set_dict_key_from_combo, preset, \"exit-alignment\")\n combo_alignment.set_tooltip_text(\"Alignment in full width/height.\")\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"icon-size\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 3, 1, 1)\n\n sb_icon_size = Gtk.SpinButton.new_with_range(8, 256, 1)\n sb_icon_size.set_value(preset[\"exit-icon-size\"])\n sb_icon_size.connect(\"value-changed\", set_int_from_spinbutton, preset, \"exit-icon-size\")\n sb_icon_size.set_tooltip_text(voc[\"item-icon-size-tooltip\"])\n grid.attach(sb_icon_size, 1, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"margin\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 4, 1, 1)\n\n sb_margin = Gtk.SpinButton.new_with_range(0, 256, 1)\n sb_margin.set_value(preset[\"exit-margin\"])\n sb_margin.connect(\"value-changed\", set_int_from_spinbutton, preset, \"exit-margin\")\n sb_margin.set_tooltip_text(voc[\"margin-tooltip\"])\n grid.attach(sb_margin, 1, 4, 1, 1)\n\n cb_full = Gtk.CheckButton.new_with_label(voc[\"full-width-height\"])\n cb_full.set_active(preset[\"exit-full\"])\n cb_full.connect(\"toggled\", set_from_checkbutton, preset, \"exit-full\")\n cb_full.set_tooltip_text(voc[\"full-width-height-tooltip\"])\n grid.attach(cb_full, 0, 5, 2, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef notification_tab(preset, preset_name, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: Notification center \".format(preset_name))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"horizontal-alignment\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 1, 1, 1)\n\n combo_position_x = Gtk.ComboBoxText()\n combo_position_x.set_property(\"halign\", Gtk.Align.START)\n grid.attach(combo_position_x, 1, 1, 1, 1)\n for item in [\"left\", \"right\", \"center\"]:\n combo_position_x.append(item, voc[item])\n combo_position_x.set_active_id(preset[\"swaync-positionX\"])\n combo_position_x.connect(\"changed\", set_dict_key_from_combo, preset, \"swaync-positionX\")\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"vertical-alignment\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 2, 1, 1)\n\n combo_position_x = Gtk.ComboBoxText()\n combo_position_x.set_property(\"halign\", Gtk.Align.START)\n grid.attach(combo_position_x, 1, 2, 1, 1)\n for item in [\"top\", \"bottom\"]:\n combo_position_x.append(item, voc[item])\n combo_position_x.set_active_id(preset[\"swaync-positionY\"])\n combo_position_x.connect(\"changed\", set_dict_key_from_combo, preset, \"swaync-positionY\")\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"notification-center-width\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 3, 1, 1)\n\n sb_cc_width = Gtk.SpinButton.new_with_range(0, 1000, 1)\n sb_cc_width.set_value(preset[\"swaync-control-center-width\"])\n sb_cc_width.connect(\"value-changed\", set_int_from_spinbutton, preset, \"swaync-control-center-width\")\n grid.attach(sb_cc_width, 1, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"notification-window-width\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 4, 1, 1)\n\n sb_window_width = Gtk.SpinButton.new_with_range(0, 1000, 1)\n sb_window_width.set_value(preset[\"swaync-control-center-width\"])\n sb_window_width.connect(\"value-changed\", set_int_from_spinbutton, preset, \"swaync-notification-window-width\")\n grid.attach(sb_window_width, 1, 4, 1, 1)\n\n cb_swaync_mpris = Gtk.CheckButton.new_with_label(\"{}\".format(voc[\"enable-mpris-widget\"]))\n cb_swaync_mpris.set_active(preset[\"swaync-mpris\"])\n cb_swaync_mpris.connect(\"toggled\", set_from_checkbutton, preset, \"swaync-mpris\")\n cb_swaync_mpris.set_tooltip_text(voc[\"enable-mpris-widget-tooltip\"])\n grid.attach(cb_swaync_mpris, 0, 5, 1, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef gtklock_preset_tab(preset, preset_name, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: gtklock \".format(preset_name))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n if gtklock_module_path(\"userinfo\"):\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"userinfo-module\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 0, 1, 1, 1)\n\n box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 0)\n grid.attach(box, 1, 1, 3, 1)\n\n cb_userinfo_round_image = Gtk.CheckButton.new_with_label(voc[\"round-image\"])\n cb_userinfo_round_image.set_active(preset[\"gtklock-userinfo-round-image\"])\n cb_userinfo_round_image.connect(\"toggled\", set_from_checkbutton, preset, \"gtklock-userinfo-round-image\")\n cb_userinfo_round_image.set_tooltip_text(\"user avatar shape\")\n box.pack_start(cb_userinfo_round_image, False, False, 0)\n\n cb_userinfo_vertical_layout = Gtk.CheckButton.new_with_label(voc[\"vertical-layout\"])\n cb_userinfo_vertical_layout.set_active(preset[\"gtklock-userinfo-vertical-layout\"])\n cb_userinfo_vertical_layout.connect(\"toggled\", set_from_checkbutton, preset, \"gtklock-userinfo-vertical-layout\")\n cb_userinfo_vertical_layout.set_tooltip_text(\"user name next to the avatar\")\n box.pack_start(cb_userinfo_vertical_layout, False, False, 0)\n\n cb_userinfo_under_clock = Gtk.CheckButton.new_with_label(voc[\"under-clock\"])\n cb_userinfo_under_clock.set_active(preset[\"gtklock-userinfo-under-clock\"])\n cb_userinfo_under_clock.connect(\"toggled\", set_from_checkbutton, preset, \"gtklock-userinfo-under-clock\")\n cb_userinfo_under_clock.set_tooltip_text(\"user avatar and name below the clock\")\n box.pack_start(cb_userinfo_under_clock, False, False, 0)\n\n if gtklock_module_path(\"powerbar\"):\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"powerbar-module\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 0, 2, 1, 1)\n\n box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 0)\n grid.attach(box, 1, 2, 3, 1)\n\n cb_powerbar_show_labels = Gtk.CheckButton.new_with_label(voc[\"show-labels\"])\n cb_powerbar_show_labels.set_active(preset[\"gtklock-powerbar-show-labels\"])\n cb_powerbar_show_labels.connect(\"toggled\", set_from_checkbutton, preset, \"gtklock-powerbar-show-labels\")\n box.pack_start(cb_powerbar_show_labels, False, False, 0)\n\n cb_powerbar_linked_buttons = Gtk.CheckButton.new_with_label(voc[\"linked-buttons\"])\n cb_powerbar_linked_buttons.set_active(preset[\"gtklock-powerbar-linked-buttons\"])\n cb_powerbar_linked_buttons.connect(\"toggled\", set_from_checkbutton, preset, \"gtklock-powerbar-linked-buttons\")\n box.pack_start(cb_powerbar_linked_buttons, False, False, 0)\n\n if gtklock_module_path(\"playerctl\"):\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(voc[\"playerctl-module\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 0, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"album-cover-size\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 4, 1, 1)\n\n box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 0)\n grid.attach(box, 1, 4, 3, 1)\n\n sb_gtklock_playerctl_art_size = Gtk.SpinButton.new_with_range(0, 256, 1)\n sb_gtklock_playerctl_art_size.set_value(preset[\"gtklock-playerctl-art-size\"])\n sb_gtklock_playerctl_art_size.connect(\"value-changed\", set_int_from_spinbutton, preset,\n \"gtklock-playerctl-art-size\")\n sb_gtklock_playerctl_art_size.set_tooltip_text(voc[\"album-cover-size-tooltip\"])\n box.pack_start(sb_gtklock_playerctl_art_size, False, False, 0)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"position\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n box.pack_start(lbl, False, False, 6)\n\n combo_gtklock_playerctl_position = Gtk.ComboBoxText()\n combo_gtklock_playerctl_position.set_property(\"halign\", Gtk.Align.START)\n box.pack_start(combo_gtklock_playerctl_position, False, False, 0)\n for item in [\"top-left\", \"top-center\", \"top-right\", \"bottom-left\", \"bottom-center\", \"bottom-right\",\n \"above-clock\", \"under-clock\"]:\n combo_gtklock_playerctl_position.append(item, voc[item])\n combo_gtklock_playerctl_position.set_active_id(preset[\"gtklock-playerctl-position\"])\n combo_gtklock_playerctl_position.connect(\"changed\", set_dict_key_from_combo, preset,\n \"gtklock-playerctl-position\")\n combo_gtklock_playerctl_position.set_tooltip_text(\"playerctl widget placement\")\n\n cb_gtklock_playerctl_show_hidden = Gtk.CheckButton.new_with_label(voc[\"always-show\"])\n cb_gtklock_playerctl_show_hidden.set_active(preset[\"gtklock-playerctl-show-hidden\"])\n cb_gtklock_playerctl_show_hidden.connect(\"toggled\", set_from_checkbutton, preset,\n \"gtklock-playerctl-show-hidden\")\n cb_gtklock_playerctl_show_hidden.set_tooltip_text(voc[\"always-show-gtklock-tooltip\"])\n grid.attach(cb_gtklock_playerctl_show_hidden, 1, 5, 3, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef panel_styling_tab(settings, preset, preset_name, voc):\n frame = Gtk.Frame()\n frame.set_label(\" {}: {} \".format(preset_name, voc[\"panel-css\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(6)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"panel-config-name\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 0, 1, 1)\n\n entry_panel = Gtk.Entry()\n entry_panel.set_placeholder_text(\"config\")\n entry_panel.set_tooltip_text(voc[\"panel-config-name-tooltip\"])\n entry_panel.set_property(\"halign\", Gtk.Align.START)\n entry_panel.set_text(settings[\"panel-custom\"])\n entry_panel.connect(\"changed\", set_from_entry, settings, \"panel-custom\")\n grid.attach(entry_panel, 1, 0, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"panel-css-name\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 1, 1, 1)\n\n entry_panel_css = Gtk.Entry()\n entry_panel_css.set_placeholder_text(\"style.css\")\n entry_panel_css.set_tooltip_text(voc[\"panel-css-name-tooltip\"])\n entry_panel_css.set_property(\"halign\", Gtk.Align.START)\n entry_panel_css.set_text(preset[\"panel-css\"])\n entry_panel_css.connect(\"changed\", set_from_entry, preset, \"panel-css\")\n grid.attach(entry_panel_css, 1, 1, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"drawer-css-name\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 2, 1, 1)\n\n entry_panel_css = Gtk.Entry()\n entry_panel_css.set_placeholder_text(\"drawer.css\")\n entry_panel_css.set_property(\"halign\", Gtk.Align.START)\n entry_panel_css.set_text(preset[\"launcher-css\"])\n entry_panel_css.connect(\"changed\", set_from_entry, preset, \"launcher-css\")\n entry_panel_css.set_tooltip_text(voc[\"drawer-css-name-tooltip\"])\n grid.attach(entry_panel_css, 1, 2, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"dock-css-name\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 3, 1, 1)\n\n entry_panel_css = Gtk.Entry()\n entry_panel_css.set_placeholder_text(\"style.css\")\n entry_panel_css.set_property(\"halign\", Gtk.Align.START)\n entry_panel_css.set_text(preset[\"dock-css\"])\n entry_panel_css.connect(\"changed\", set_from_entry, preset, \"dock-css\")\n entry_panel_css.set_tooltip_text(voc[\"dock-css-name-tooltip\"])\n grid.attach(entry_panel_css, 1, 3, 1, 1)\n\n lbl = Gtk.Label.new(\"{}:\".format(voc[\"exit-menu-css-name\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 4, 1, 1)\n\n entry_panel_css = Gtk.Entry()\n entry_panel_css.set_placeholder_text(\"style.css\")\n entry_panel_css.set_property(\"halign\", Gtk.Align.START)\n entry_panel_css.set_text(preset[\"exit-css\"])\n entry_panel_css.connect(\"changed\", set_from_entry, preset, \"exit-css\")\n entry_panel_css.set_tooltip_text(voc[\"exit-menu-css-name-tooltip\"])\n grid.attach(entry_panel_css, 1, 4, 1, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef sys_info_tab(voc):\n frame = Gtk.Frame()\n frame.set_label(\" {} \".format(voc[\"system-info\"]))\n frame.set_label_align(0.5, 0.5)\n frame.set_property(\"hexpand\", True)\n grid = Gtk.Grid()\n frame.add(grid)\n grid.set_property(\"margin\", 12)\n grid.set_column_spacing(6)\n grid.set_row_spacing(3)\n\n name, home_url, logo = parse_os_release()\n if logo:\n img = Gtk.Image.new_from_icon_name(logo, Gtk.IconSize.DIALOG)\n img.set_property(\"halign\", Gtk.Align.END)\n grid.attach(img, 0, 0, 1, 2)\n\n txt = get_command_output(\"uname -m\")[0]\n if name:\n lbl = Gtk.Label()\n lbl.set_markup(\"{} {}\".format(name, txt))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 1, 0, 1, 1)\n\n if home_url:\n lbl = Gtk.Label()\n lbl.set_markup('{}'.format(home_url, home_url))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 1, 1, 1, 1)\n\n txt = get_command_output(\"uname -r\")[0]\n lbl = Gtk.Label.new(\"Kernel: {}\".format(txt))\n lbl.set_line_wrap(True)\n lbl.set_property(\"xalign\", 0)\n grid.attach(lbl, 1, 2, 1, 1)\n\n output = \"\"\n if os.getenv(\"SWAYSOCK\"):\n output = get_command_output(\"sway -v\")\n lbl = Gtk.Label.new(output[0])\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 1, 3, 1, 1)\n elif os.getenv(\"HYPRLAND_INSTANCE_SIGNATURE\"):\n reply = hyprctl(\"j/version\")\n values = json.loads(reply)\n if values and \"tag\" in values:\n lbl = Gtk.Label.new(\"Hyprland Tag: {}\".format(values[\"tag\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 1, 3, 1, 1)\n\n settings = Gtk.Settings.get_default()\n if settings:\n lbl = Gtk.Label.new(\"GTK theme:\")\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 5, 1, 1)\n\n lbl = Gtk.Label.new(settings.get_property(\"gtk-theme-name\"))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 1, 5, 1, 1)\n\n lbl = Gtk.Label.new(\"Icon theme:\")\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 6, 1, 1)\n\n lbl = Gtk.Label.new(settings.get_property(\"gtk-icon-theme-name\"))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 1, 6, 1, 1)\n\n lbl = Gtk.Label.new(\"Font:\")\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, 7, 1, 1)\n\n lbl = Gtk.Label.new(settings.get_property(\"gtk-font-name\"))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 1, 7, 1, 1)\n\n if os.getenv(\"SWAYSOCK\"):\n i3 = Connection()\n row = 9\n outputs = i3.get_outputs()\n for i in range(len(outputs)):\n output = outputs[i]\n\n lbl = Gtk.Label.new(\"{}:\".format(output.name))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, row + i, 1, 1)\n\n r = output.rect\n lbl = Gtk.Label.new(\n \"{}x{}, scale: {}, x: {}, y: {}\".format(r.width, r.height, output.scale, r.x, r.y))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 1, row + i, 1, 1)\n\n elif os.getenv(\"HYPRLAND_INSTANCE_SIGNATURE\"):\n row = 9\n reply = hyprctl(\"j/monitors\")\n monitors = json.loads(reply)\n for i in range(len(monitors)):\n m = monitors[i]\n\n lbl = Gtk.Label.new(\"{}:\".format(m[\"name\"]))\n lbl.set_property(\"halign\", Gtk.Align.END)\n grid.attach(lbl, 0, row + i, 1, 1)\n\n lbl = Gtk.Label.new(\n \"{}x{}, scale: {}, x: {}, y: {}\".format(m[\"width\"], m[\"height\"], m[\"scale\"], m[\"x\"], m[\"y\"]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 1, row + i, 1, 1)\n\n # Right column\n img = Gtk.Image.new_from_icon_name(\"nwg-shell\", Gtk.IconSize.DIALOG)\n img.set_property(\"halign\", Gtk.Align.END)\n grid.attach(img, 2, 0, 1, 2)\n\n output = get_command_output(\"nwg-shell -v\")\n if output:\n lbl = Gtk.Label()\n lbl.set_markup(\"{}\".format(output[0]))\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 3, 0, 1, 1)\n\n lbl = Gtk.Label()\n lbl.set_markup('https://nwg-piotr.github.io/nwg-shell')\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 3, 1, 1, 1)\n\n output = get_command_output(\"nwg-shell-config -v\")\n if output:\n lbl = Gtk.Label.new(output[0])\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 3, 2, 1, 1)\n\n output = get_command_output(\"nwg-panel -v\")\n if output:\n lbl = Gtk.Label.new(output[0])\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 3, 3, 1, 1)\n\n output = get_command_output(\"nwg-drawer -v\")\n if output:\n lbl = Gtk.Label.new(output[0])\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 3, 4, 1, 1)\n\n output = get_command_output(\"nwg-dock -v\")\n if output:\n lbl = Gtk.Label.new(output[0])\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 3, 5, 1, 1)\n\n output = get_command_output(\"nwg-menu -v\")\n if output:\n lbl = Gtk.Label.new(output[0])\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 3, 6, 1, 1)\n\n output = get_command_output(\"nwg-bar -v\")\n if output:\n lbl = Gtk.Label.new(output[0])\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 3, 7, 1, 1)\n\n output = get_command_output(\"nwg-look -v\")\n if output:\n lbl = Gtk.Label.new(output[0])\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 3, 8, 1, 1)\n\n output = get_command_output(\"nwg-displays -v\")\n if output:\n lbl = Gtk.Label.new(output[0])\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 3, 9, 1, 1)\n\n output = get_command_output(\"gtklock -v\")\n if output:\n lbl = Gtk.Label.new(output[0])\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 3, 10, 1, 1)\n\n output = get_command_output(\"swaync -v\")\n if output:\n lbl = Gtk.Label.new(output[0])\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 3, 11, 1, 1)\n\n output = get_command_output(\"azote -h\")\n if output:\n lbl = Gtk.Label.new(output[1])\n lbl.set_property(\"halign\", Gtk.Align.START)\n grid.attach(lbl, 3, 12, 1, 1)\n\n frame.show_all()\n\n return frame\n\n\ndef parse_os_release():\n name, home_url, logo = \"\", \"\", \"\"\n if os.path.isfile(\"/etc/os-release\"):\n lines = load_text_file(\"/etc/os-release\").splitlines()\n for line in lines:\n if line.startswith(\"NAME=\"):\n name = line.split(\"=\")[1].replace('\"', '')\n continue\n if line.startswith(\"HOME_URL=\"):\n home_url = line.split(\"=\")[1].replace('\"', '')\n continue\n if line.startswith(\"LOGO=\"):\n logo = line.split(\"=\")[1]\n continue\n\n if not logo:\n logo = name\n\n return name, home_url, logo\n","repo_name":"nwg-piotr/nwg-shell-config","sub_path":"nwg_shell_config/ui_components.py","file_name":"ui_components.py","file_ext":"py","file_size_in_byte":139469,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"87"} +{"seq_id":"36516545188","text":"import yfinance as yf\r\nimport pandas as pd\r\nimport pyfolio as pf\r\n\r\ndef get_top5_performers(stocks, start_date, end_date):\r\n data = yf.download(stocks, start=start_date, end=end_date)\r\n returns = data['Adj Close'].pct_change(52) # Adjusted Close prices for weekly returns\r\n returns = returns.iloc[-1] # Get the latest (most recent) 52-week returns\r\n sorted_returns = returns.sort_values(ascending=False)\r\n top5_performers = sorted_returns.head(5)\r\n return top5_performers\r\n\r\ndef calculate_atr(data, period=14):\r\n high_low = data['High'] - data['Low']\r\n high_close_prev = abs(data['High'] - data['Close'].shift())\r\n low_close_prev = abs(data['Low'] - data['Close'].shift())\r\n true_range = pd.concat([high_low, high_close_prev, low_close_prev], axis=1).max(axis=1)\r\n atr = true_range.rolling(window=period).mean()\r\n return atr\r\n\r\ndef rebalance_portfolio(top5, start_date, end_date):\r\n stop_loss_multiplier = 2\r\n portfolio = {}\r\n\r\n for symbol in top5.index:\r\n stock_data = yf.download(symbol, start=start_date, end=end_date)\r\n atr = calculate_atr(stock_data)\r\n entry_price = stock_data['Close'].iloc[-1]\r\n stop_loss_price = entry_price - (stop_loss_multiplier * atr.iloc[-1])\r\n portfolio[symbol] = stop_loss_price\r\n return portfolio\r\n\r\ndef main():\r\n stock_symbols = ['AAPL', 'GOOGL', 'MSFT', 'AMZN', 'TSLA', 'NVDA', 'JPM']\r\n start_date = '2020-01-01'\r\n end_date = '2023-01-01'\r\n top5 = get_top5_performers(stock_symbols, start_date, end_date)\r\n print(\"Top 5 Performers based on 52-week rolling returns:\")\r\n print(top5)\r\n weekly_dates = pd.date_range(start=start_date, end=end_date, freq='W')\r\n portfolio_performance = pd.DataFrame(index=weekly_dates)\r\n\r\n for i in range(len(weekly_dates)-1):\r\n portfolio = rebalance_portfolio(top5, start_date, weekly_dates[i])\r\n\r\n \r\n portfolio_performance['Returns'] = portfolio_performance['Portfolio Value'].pct_change()\r\n returns = portfolio_performance['Returns']\r\n positions = pd.DataFrame() # Placeholder for positions\r\n transactions = pd.DataFrame() # Placeholder for transactions\r\n portfolio_obj = pf.create_returns(returns, positions=positions, transactions=transactions)\r\n pf.create_full_tear_sheet(portfolio_obj)\r\n\r\nif _name_ == \"_main_\":\r\n main()","repo_name":"yogeshkb-yoyo/momentum_strategy_stock","sub_path":"momentum strategy.py","file_name":"momentum strategy.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"14937106947","text":"# --------------- VETOR\n# 0 1 2 3 4\nv = [34, 56, 32, 78, 12,13]\n\n# ----- SUBALGORITMOS\n# --- Procedimentos\n# Este procedimento exibe os elementos do vetor\ndef exibe_vetor(vet: list) -> None:\n for i in range(0, len(vet), 1):\n print(f\"v[{i}] = {vet[i]}\")\n\n# --- Funções\n# Esta função retorna o último elemento do vetor\ndef ultimo_elemento(vet: list) -> int:\n return vet[len(vet) - 1]\n\n# Esta função soma os elementos do vetor passado por parametro\ndef soma_vetor(vet: list) -> int:\n soma = 0\n for i in range(0, len(vet), 1):\n soma += vet[i] # soma = soma + vet[i]\n return soma\n\n\n\n# ----- PROGRAMA PRINCIPAL\n\nexibe_vetor(v)\nultimo = ultimo_elemento(v)\nprint(f\"Último elemento: {ultimo}\")\nprint(f\"Somatória do vetor: {soma_vetor(v)}\")\n\n\n''' \nprint(v)\nv[0] = int(input(\"Numero: \"))\nprint(v[0])\nprint(v)\nprint(len(v)) # retorna o tamanho do vetor ou da lista\n'''","repo_name":"IgorLuiz777/Python","sub_path":"lista/vetor.py","file_name":"vetor.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"14010555212","text":"#coding: utf-8 \n \nimport smtplib \nfrom email.mime.multipart import MIMEMultipart \nfrom email.mime.text import MIMEText \nfrom email.mime.image import MIMEImage \nfrom email.header import Header \n \n#设置smtplib所需的参数\n#下面的发件人,收件人是用于邮件传输的。\nsmtpserver = 'smtp.163.com'\nusername = 'XXX@163.com'\npassword='XXX'\nsender='XXX@163.com'\n#receiver='XXX@126.com'\n#收件人为多个收件人\nreceiver=['XXX@126.com','XXX@126.com']\n\nsubject = 'Python email test'\n#通过Header对象编码的文本,包含utf-8编码信息和Base64编码信息。以下中文名测试ok\n#subject = '中文标题'\n#subject=Header(subject, 'utf-8').encode()\n \n#构造邮件对象MIMEMultipart对象\n#下面的主题,发件人,收件人,日期是显示在邮件页面上的。\nmsg = MIMEMultipart('mixed') \nmsg['Subject'] = subject\nmsg['From'] = 'XXX@163.com '\n#msg['To'] = 'XXX@126.com'\n#收件人为多个收件人,通过join将列表转换为以;为间隔的字符串\nmsg['To'] = \";\".join(receiver) \n#msg['Date']='2012-3-16'\n\n#构造文字内容 \ntext = \"Hi!\\nHow are you?\\nHere is the link you wanted:\\nhttp://www.baidu.com\" \ntext_plain = MIMEText(text,'plain', 'utf-8') \nmsg.attach(text_plain) \n\n#构造图片链接\nsendimagefile=open(r'D:\\pythontest\\testimage.png','rb').read()\nimage = MIMEImage(sendimagefile)\nimage.add_header('Content-ID','')\nimage[\"Content-Disposition\"] = 'attachment; filename=\"testimage.png\"'\nmsg.attach(image)\n\n#构造html\n#发送正文中的图片:由于包含未被许可的信息,网易邮箱定义为垃圾邮件,报554 DT:SPM :

    \nhtml = \"\"\"\n \n \n \n

    Hi!
    \n How are you?
    \n Here is the link you wanted.
    \n

    \n \n \n\"\"\" \ntext_html = MIMEText(html,'html', 'utf-8')\ntext_html[\"Content-Disposition\"] = 'attachment; filename=\"texthtml.html\"' \nmsg.attach(text_html) \n\n\n#构造附件\nsendfile=open(r'D:\\pythontest\\1111.txt','rb').read()\ntext_att = MIMEText(sendfile, 'base64', 'utf-8') \ntext_att[\"Content-Type\"] = 'application/octet-stream' \n#以下附件可以重命名成aaa.txt \n#text_att[\"Content-Disposition\"] = 'attachment; filename=\"aaa.txt\"'\n#另一种实现方式\ntext_att.add_header('Content-Disposition', 'attachment', filename='aaa.txt')\n#以下中文测试不ok\n#text_att[\"Content-Disposition\"] = u'attachment; filename=\"中文附件.txt\"'.decode('utf-8')\nmsg.attach(text_att) \n \n#发送邮件\nsmtp = smtplib.SMTP() \nsmtp.connect('smtp.163.com')\n#我们用set_debuglevel(1)就可以打印出和SMTP服务器交互的所有信息。\n#smtp.set_debuglevel(1) \nsmtp.login(username, password) \nsmtp.sendmail(sender, receiver, msg.as_string()) \nsmtp.quit()\n\n\n\n","repo_name":"hltfaith/python-example","sub_path":"module_case/built-in/smtplib/example/example01.py","file_name":"example01.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"87"} +{"seq_id":"17456286570","text":"'''\nthis is about chain maps or maps in python\nusing collection module \njust to understand the concept'''\nfrom collections import ChainMap as maps\n\ndict1 = {'day1': 'Mon', 'day2': 'Tue', 'day3': 'Wed'}\ndict2 = {'day4':'Thu', 'day5': 'Fri', 'day6': 'Sat'}\n\nredict = maps(dict1, dict2)\nprint(redict.maps)\n\n#output\n'''[{'day1': 'Mon', 'day2': 'Tue', 'day3': 'Wed'}, {'day4': 'Thu', 'day5': 'Fri', 'day6': 'Sat'}]\n'''\n","repo_name":"pranjal201/Data-Structure","sub_path":"01week/07mapsin.py","file_name":"07mapsin.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"74931071000","text":"import sys\nimport os\nimport re\n\nthisdir = os.path.dirname(os.path.realpath(__file__))\nbasedir = os.path.dirname(thisdir)\nsys.path.append(basedir)\nfrom datasets import allsamples\nfrom plotstyle import SimpleCanvas\nimport config\n\nimport ROOT\n\ndma = []\ndmv = []\n\nfor name in allsamples.names():\n matches = re.match('dm([av])-([0-9]+)-([0-9]+)', name)\n if matches:\n if matches.group(1) == 'a':\n dma.append((float(matches.group(2)), float(matches.group(3))))\n else:\n dmv.append((float(matches.group(2)), float(matches.group(3))))\n\ncanvas = SimpleCanvas(cms = False)\ncanvas.SetGrid(True)\n\ngdma = ROOT.TGraph(len(dma))\nfor iP, (med, dm) in enumerate(dma):\n gdma.SetPoint(iP, med, dm)\n\ngdma.SetTitle('DMA;M_{med} (GeV);M_{DM} (GeV)')\ngdma.SetMarkerStyle(21)\n\ncanvas.addHistogram(gdma, drawOpt = 'P')\ncanvas.printWeb('signal_points', 'dma', logx = True)\n\ncanvas.Clear()\ncanvas.SetGrid(True)\n\ngdmv = ROOT.TGraph(len(dmv))\nfor iP, (med, dm) in enumerate(dmv):\n gdmv.SetPoint(iP, med, dm)\n\ngdmv.SetTitle('DMV;M_{med} (GeV);M_{DM} (GeV)')\ngdmv.SetMarkerStyle(21)\n\ncanvas.addHistogram(gdmv, drawOpt = 'P')\ncanvas.printWeb('signal_points', 'dmv', logx = True)\n","repo_name":"MiT-HEP/MonoX","sub_path":"monophoton/signal/available_points.py","file_name":"available_points.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"87"} +{"seq_id":"8976841740","text":"def get_unique_bottle(bottles: list) -> int:\n weight = 0.0\n power = 1\n for i in range(len(bottles)):\n weight += bottles[i] * power\n power *= 10\n\n # print(weight)\n if weight - int(weight) > 0:\n return 1\n else:\n cnt = 2\n int_weight = int(weight)\n while int_weight > 0:\n if int_weight % 10 == 2:\n return cnt\n else:\n int_weight //= 10\n cnt += 1\n\n return None\n\nif __name__ == '__main__':\n bottle_cnt = 5\n B = [1.0] * bottle_cnt\n print(B)\n for i in range(bottle_cnt):\n B[i] = 1.1\n print(B)\n print(f\"answer : {i+1}, result : {get_unique_bottle(B)}\")\n B[i] = 1.0","repo_name":"JoonHyeok-hozy-Kim/algorithm_study","sub_path":"Cracking/2024_internship_prep/ch06/interview_questions/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"32812474441","text":"''' Solves problem #15 on https://projecteuler.net\r\nSergey Lisitsin. May 2019'''\r\n\r\ndef propdivs(num):\r\n ''' Returns all the number's proper divisors'''\r\n result = [1]\r\n for x in range(2,(num//2)+1):\r\n if num % x == 0:\r\n result.append(x)\r\n return result\r\n\r\ndef isabundant(num):\r\n return sum(propdivs(num)) > num\r\n\r\nabundants = set()\r\n\r\nfor x in range(12, 28124):\r\n if isabundant(x):\r\n abundants.add(x)\r\n\r\nsums = set()\r\nfor x in abundants:\r\n for y in abundants:\r\n if x + y < 28124:\r\n sums.add(x+y)\r\n\r\nallnums = set(range(23124))\r\n\r\ntargetset = allnums - sums\r\nsum(targetset)\r\n","repo_name":"sergun4ik/projecteuler","sub_path":"problem_23.py","file_name":"problem_23.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"11407178655","text":"from methods.DBManager import DBManager\nimport logging\nimport re\n\n\nclass StringReplacer:\n def __init__(self, replacements, ignore_case=False):\n patterns = sorted(replacements, key=len, reverse=True)\n self.replacements = [replacements[k] for k in patterns]\n re_mode = re.IGNORECASE if ignore_case else 0\n self.pattern = re.compile(\n '|'.join((\"({})\".format(p) for p in patterns)), re_mode)\n\n def tr(matcher):\n index = next((index for index, value in enumerate(matcher.groups()) if value), None)\n return self.replacements[index]\n\n self.tr = tr\n\n def __call__(self, string):\n return self.pattern.sub(self.tr, string)\n\n\ndimension_list = [\n 'abstraction and problem decomposition',\n 'parallelism',\n 'logical thinking',\n 'synchronization',\n 'algorithmic notions of flow control',\n 'user interactivity',\n 'data representation',\n 'visual auditory'\n]\n\nstr_dict = {\n \"<2c>\": \",\",\n \"<2e>\": \".\",\n \"<2f>\": \"/\",\n \"<2d>\": \"-\",\n \"<2b>\": \"+\",\n \"<3d>\": \"=\",\n \"<5b>\": \"[\",\n \"\": \"!\",\n \"<21>\": \"!\",\n \"\": \":\",\n \"<3a>\": \":\",\n \"<3b>\": \";\",\n \"\": \";\",\n \"\": \"“\",\n \"\": \"‘\",\n \"\": \"’\",\n \"<27>\": \"'\",\n \"\": \"”\",\n \"<3f>\": \"?\",\n \"\": \"?\",\n \"\": \"【\",\n \"\": \"】\",\n \"<7b>\": \"{\",\n \"<7d>\": \"}\",\n \"<29>\": \")\",\n \"\": \",\",\n \"\": \"。\",\n \"\": \"、\",\n \"<2a>\": \"*\",\n \"<5d>\": \"]\",\n \"<40>\": \"@\",\n \"<23>\": \"#\",\n \"<25>\": \"%\",\n \"<5e>\": \"^\",\n \"<26>\": \"&\",\n \"<5f>\": \"_\",\n \"<5c>\": \"\\\\\",\n \"<24>\": \"$\",\n}\n\n\ndef get_course_list(table_name):\n DB = DBManager()\n sql = f\"select * from {table_name}\"\n data = DB.fetchall(sql)\n DB.destroy()\n l1 = []\n if data:\n for i in data:\n # 红气球&90201&0&&1@6@-375,450,0@D:2|I1:3@@~2@102@@@5$4@~3@46@@@5$15@&0,400&*\n l1.append(i[2] + \"&\" + str(i[1]) + \"&0&&\" +\n str(i[15], encoding=\"utf-8\"))\n s1 = \"*\".join(l1)\n s1 = \"^C:1|G:1^\" + s1\n return s1\n else:\n return 0\n\n\ndef get_rule_data(course_id, dimension_id):\n DB = DBManager()\n sql = \"\"\"\n select t.name, g.type_percent, g.quota_id\n from eval_dimension_grammar as g\n inner join eval_grammar_type as t\n where g.course_id = %s\n and g.dimension_id = %s\n and g.type_id = t.id\n order by g.dimension_id;\n \"\"\"\n data = DB.fetchall(sql, params=(course_id, dimension_id))\n DB.destroy()\n rule_list = []\n for i in data:\n sub_rule_list = [i[0], i[1]]\n sql1 = \"\"\"\n select\n quota_name, add_subtr_mark, quota_precent,\n primary_rule, middle_rule, senior_rule\n from eval_grammar_quota_standard\n where id in (%s);\n \"\"\" % i[2]\n data1 = DB.fetchall(sql1)\n [\n sub_rule_list.append(\n [j[0], j[1], j[2],\n eval(j[3]),\n eval(j[4]),\n eval(j[5])]) for j in data1\n ]\n rule_list.append(sub_rule_list)\n return rule_list\n\n\ndef get_persent(course_id, p_type):\n sql = \"\"\"\n select\n {0}_abstraction_percent,\n {0}_parallelism_percent,\n {0}_logical_percent,\n {0}_sync_percent,\n {0}_flow_percent,\n {0}_interactivity_percent,\n {0}_data_percent,\n {0}_visual_percent\n from eval_dimension_course where course_id = %s;\n \"\"\".format(p_type)\n DB = DBManager()\n data = DB.fetchone(sql, params=course_id)\n DB.destroy()\n return data\n\n\ndef get_block_score(dimension_name, course_id, dimension_id, percent_list):\n level_list = [\n dimension_name,\n percent_list[dimension_id-1],\n [0, set()],\n [0, set()],\n [0, set()]\n ]\n DB = DBManager()\n sql = \"\"\"\n select course_id, level, level_score, contain_block\n from eval_dimension_score\n where course_id = %s\n and dimension_id = %s\n order by dimension_id, level;\n \"\"\"\n data = DB.fetchall(sql, params=(course_id, dimension_id))\n sub_index = 2\n if data:\n for i in data:\n if i[-1]:\n block_list = i[-1].split(\",\")\n block_set = set([int(j) for j in block_list])\n level_list[sub_index][0] = int(i[-2])\n level_list[sub_index][1] = block_set\n sub_index += 1\n DB.destroy()\n level_list.append(0.0)\n return level_list\n\n\ndef get_all_rule(course_id):\n rule = []\n percent_list = get_persent(course_id, \"b\")\n for i in range(1, 9):\n data = get_block_score(dimension_list[i-1], course_id, i, percent_list)\n rule.append(data)\n return rule\n\n\ndef str_replace(source):\n replacer = StringReplacer(str_dict, True)\n return replacer(source)\n\n\nif __name__ == \"__main__\":\n # print(get_rule_data(\"63_318_1\", \"1\"))\n get_course_list(\"tb_obj_1465_10018\")\n get_all_rule(\"63_318_1\")\n","repo_name":"iamjing66/tornaodo_sdk","sub_path":"handlers/kbeServer/Editor/Evaluate/get_sql.py","file_name":"get_sql.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"12433322194","text":"import os\nimport json\nimport logging\nfrom flask import Flask, request, render_template, jsonify\nfrom waitress import serve\n\nfrom utils import tidy_input, parse_input, calc_stats, InvalidInput, DecimalEncoder\nfrom read_data import read_data_from_db\nfrom populate_tables import create_tables, repopulate_roll_table\n\n\nSIDES_LIST = json.loads(os.getenv(\"SIDES_LIST\", \"[2,3,4,6,8,10,12,20]\"))\nMAX_DICE_TOTAL = int(os.getenv(\"MAX_DICE_TOTAL\", \"9\"))\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.getenv(\"DATABASE_URL\")\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # silence the deprecation warning\n\n\nfrom models import db, RollModel\ndb.init_app(app)\nwith app.app_context():\n app.logger.debug('Building and populating database tables')\n app.logger.debug(f'SIDES_LIST: {SIDES_LIST}')\n app.logger.debug(f'SIDES_LIST: {MAX_DICE_TOTAL}')\n create_tables(app, db)\n repopulate_roll_table(app, db, sides_list=SIDES_LIST, max_dice_total=MAX_DICE_TOTAL)\n app.logger.debug('Database tables populated')\n\n\n@app.route('/', methods=['GET'])\ndef show_dice_stats():\n\n d1_input = request.args.get('d1', \"\", type=str)\n d2_input = request.args.get('d2', \"\", type=str)\n d3_input = request.args.get('d3', \"\", type=str)\n app.logger.info('D1 Input: {}; D2 input: {}; D3 input: {}'.format(d1_input, d2_input, d3_input))\n context_dict = {'d1_value': d1_input, 'd2_value': d2_input, 'd3_value': d3_input, 'max_number_dice': MAX_DICE_TOTAL}\n return render_template('template.html', **context_dict)\n\n\n@app.route('/get_data', methods=['GET'])\ndef get_data():\n input = request.args.get('d', None, type=str)\n app.logger.info('get_data input: {}'.format(input))\n\n if input is None or input == '':\n return jsonify(\"No input data\"), 422\n else:\n try:\n input_tidy = tidy_input(input)\n app.logger.debug('input_tidy: {}'.format(input_tidy))\n\n input_parsed = parse_input(input_tidy, sides=SIDES_LIST, max_dice=MAX_DICE_TOTAL)\n app.logger.debug('input_parsed: {}'.format(input_parsed))\n\n rolls_output = read_data_from_db(app, db, RollModel, input_parsed)\n app.logger.debug('rolls_output: {}'.format(rolls_output))\n\n stats_output = calc_stats(roll_name=input_tidy, rolls=rolls_output)\n app.logger.debug('stats_output: {}'.format(stats_output))\n\n return json.dumps({'rolls': rolls_output, 'stats': stats_output}, cls=DecimalEncoder)\n\n except InvalidInput as e:\n app.logger.debug('Caught error: {}'.format(str(e)))\n return jsonify(str(e)), 422\n\n except Exception as e:\n app.logger.debug('Other unknown error')\n return jsonify(f\"Unknown error: {str(e)}\"), 400\n\n\n# sanity check route\n@app.route('/canary', methods=['GET'])\ndef canary():\n return jsonify('tweet tweet!')\n\n\nif __name__ == '__main__':\n\n port = int(os.environ.get(\"PORT\", 5000))\n env = os.environ.get(\"APP_ENV\")\n\n app.logger.info('Starting server')\n\n if env == \"LOCAL_DEV\":\n app.logger.setLevel(logging.DEBUG)\n app.logger.info('Running on dev server')\n app.run(host='0.0.0.0', port=port, debug=True)\n else:\n app.logger.info('Running on prod server')\n serve(app, host=\"0.0.0.0\", port=port)\n","repo_name":"drb1001/dice_stats","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"38681847235","text":"import numpy as np\n\n\ndef rmse(similarity_matrix, obtained_matrix):\n \"\"\"\n Rooted mean square error of the representation.\n :param similarity_matrix: The approximated matrix, usually the adjacency matrix of a graph\n :param obtained_matrix: The matrix obtained with a representation method\n :return: The value of RMSE\n \"\"\"\n N = similarity_matrix.shape[0]\n frob_error_squared = np.linalg.norm(similarity_matrix - obtained_matrix)\n return frob_error_squared / N\n\n\ndef nrmse(similarity_matrix, obtained_matrix):\n \"\"\"\n Normalised mean square error of the representation.\n :param similarity_matrix: The approximated matrix, usually the adjacency matrix of a graph\n :param obtained_matrix: The matrix obtained with a representation method\n :return: The value of NRMSE\n \"\"\"\n frob_error = np.linalg.norm(similarity_matrix - obtained_matrix)\n similarity_matrix_norm = np.linalg.norm(similarity_matrix)\n return frob_error / similarity_matrix_norm\n\n\ndef precision_at_k(similarity_matrix, obtained_matrix):\n \"\"\"\n Precision at k for each k in range 0 to the number of non-zero elements of obtained_matrix\n :param similarity_matrix: The approximated matrix, usually the adjacency matrix of a graph\n :param obtained_matrix: The matrix obtained with a representation method\n :return: The array of values of P@k\n \"\"\"\n flat_sim = similarity_matrix.flatten()\n flat_obs = obtained_matrix.flatten()\n ars = np.argsort(flat_obs)[::-1].flatten()\n sorted_flat_obs = flat_obs[ars]\n sorted_flat_sim = flat_sim[ars]\n if len(sorted_flat_obs > 0) == 0:\n if np.sum(flat_sim) == 0:\n return np.repeat(1, flat_sim.shape[0])\n else:\n return np.repeat(0, flat_sim.shape[0])\n ep = np.cumsum(sorted_flat_obs > 0)\n true_predictions = sorted_flat_sim > 0\n return np.cumsum(true_predictions) / ep\n\n\ndef average_precision(similarity_vector, obtained_vector):\n \"\"\"\n Average precision of prediction\n :param similarity_vector: The approximated vector, usually a column from the adjacency matrix of a graph\n :param obtained_vector: The vector obtained with a representation method\n :return: The value of AP\n \"\"\"\n prec_k = precision_at_k(similarity_vector, obtained_vector)\n flat_sim_positive = similarity_vector > 0\n\n return np.sum(prec_k * flat_sim_positive) / np.sum(flat_sim_positive)\n\n\ndef mean_average_precision(similarity_matrix, obtained_matrix):\n \"\"\"\n Mean average precision of prediction. Mean precision is calculated for each vertex of a graph.\n :param similarity_matrix: The approximated matrix, usually the adjacency matrix of a graph\n :param obtained_matrix: The matrix obtained with a representation method\n :return: The value of MAP\n \"\"\"\n return np.mean([average_precision(similarity_matrix[i, :], obtained_matrix[i, :])\n for i in range(similarity_matrix.shape[0])])\n\n\ndef all_average_precision(similarity_matrix, obtained_matrix):\n \"\"\"\n A vector of average precision values for each vertex.\n :param similarity_matrix: The approximated matrix, usually the adjacency matrix of a graph\n :param obtained_matrix: The matrix obtained with a representation method\n :return: The array of values of AP\n \"\"\"\n return np.array([average_precision(similarity_matrix[i, :], obtained_matrix[i, :])\n for i in range(similarity_matrix.shape[0])])\n","repo_name":"lukaszbrzozowski/ReLeGy","sub_path":"relegy/metrics/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"87"} +{"seq_id":"18868237907","text":"\"\"\"p1_cmsproject URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path\r\nfrom cmsapp.views import home,dept,adddept,remdept,stu,addstu,remstu\r\n\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n path(\"\",home,name=\"home\"),\r\n path(\"dept\",dept,name=\"dept\"),\r\n path(\"adddept\",adddept,name=\"adddept\"),\r\n path(\"remdept/\",remdept,name=\"remdept\"),\r\n path(\"stu\",stu,name=\"stu\"),\r\n path(\"addstu\",addstu,name=\"addstu\"),\r\n path(\"remstu/\",remstu,name=\"remstu\"),\r\n]\r\n","repo_name":"SURBHI0402/College-Application","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"87"} +{"seq_id":"1946836015","text":"import discord\n\nfrom discord import Embed\n\nfrom discord import Color\nfrom discord.ext import commands\n\nimport json\n\nfrom helptext import helpText as ht\n\ndata = json.load(open('data.json', 'r'))\n\nclass Help(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @commands.command()\n async def help(self, ctx, *, param = ''):\n if ctx.author.id in data['bannedids']:\n await ctx.send(':no_entry_sign: **Sorry, but you are banned from using Nen!**\\n **Please contact the owner on the official server to appeal for unban**')\n return\n \n embed = Embed()\n embed.color = Color.darker_gray()\n\n # n.help doujin\n if param == 'doujin': \n embed.title = f'Nen sent senpai some help!\\n\\n{ht.doujin_title}'\n embed.description = ht.doujinhelp\n\n # n.help read\n elif param == 'read': \n embed.title = f'Nen sent senpai some help!\\n\\n{ht.read_title}'\n embed.description = ht.readhelp\n\n # n.help misc\n elif param == 'misc': \n embed.title = f'Nen sent senpai some help!\\n\\n{ht.misc_title}'\n embed.description = ht.mischelp\n\n elif param == 'reddit':\n embed.title = f'Nen sent senpai some help!\\n\\n{ht.reddit_title}'\n embed.description = ht.reddithelp\n\n # n.help\n else: \n embed.title = f'Nen sent senpai some help!\\n\\n{ht.main_title}'\n embed.description = ht.mainhelp\n \n\n embed.set_footer(text = f'Requested by {ctx.author.name}#{ctx.author.discriminator}', icon_url = ctx.author.avatar_url)\n\n await ctx.send(embed=embed)\n\n\ndef setup(client):\n client.add_cog(Help(client))","repo_name":"ultimus2935/NenV2-archived","sub_path":"extensions/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"87"} +{"seq_id":"37941532265","text":"import plotly.express as px\nimport pandas as pd\nimport numpy as np\n\nclass PileUp():\n\n def set_csv(self, csv):\n self.df = pd.read_csv(csv, sep='\\t', header=None, usecols=[1,2])\n \n def set_title(self, title):\n self.set_title = title\n\n def show(self, height=600):\n self.height = height\n numlst = [[i,j] for i,j in zip(self.df[1], self.df[2])]\n # それぞれのリストの長さを追加\n numlst = [[i,j,j-i] for i,j in numlst]\n\n # 始点の位置とその長さ順にソート\n numlst.sort(key=lambda x :(x[0], -x[2]))\n\n # numlstの表示\n # [始点, 終点, 長さ]\n #print(numlst)\n\n # ソート後のnumlstから長さの情報を削除\n numlst = [[i,j] for i,j,k in numlst]\n #print(numlst)\n\n # result辞書を作成\n # keyに階層情報\n # valueに始点、終点情報を代入\n result = {0:[numlst[0]]}\n\n # result辞書の更新\n # 一番下の階層の最終リストの終点が、入力した始点の値より小さい場合は、同一の階層のvalueにappendする\n # そうでない場合は、一つ上の階層で同様の処理をする\n # その処理が行えない場合は、新たな階層のkeyを作成し、そこに始点と終点のリストをappendする\n for i in numlst[1:]:\n for j in range(len(result.keys())):\n if result[j][-1][1] < i[0]:\n result[j].append(i)\n break\n else:\n result.setdefault(max(result.keys())+1,[i]) \n #print(result)\n\n # figureの作図\n # [始点,終点,None, 始点, 終点, None, .....]\n # [階層#, 階層#, ......]\n # 上記の一次元のリストを作成し、それぞれx,yの値としてpx.lineで作図\n x_arr = np.array([])\n y_arr = np.array([])\n for i in range(len(result.keys())):\n x_arr = np.concatenate([x_arr, np.array( [i + [None] for i in result[i]]).flatten()])\n y_arr = np.concatenate([y_arr, np.full(3*len(result[i]), i)])\n fig = px.line(x=x_arr, y=y_arr)\n fig.update_traces(line=dict(width=10))\n fig.update_layout(title=self.set_title, title_x=0.5, width=1000, height=self.height)\n fig.show()","repo_name":"utsumidaisuke/bio_chem","sub_path":"ttn_splicing/analysis/lib/introngap.py","file_name":"introngap.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"41841098980","text":"import discord \n\nfrom discord.ext import commands, tasks \nfrom discord.utils import get \n\nfrom urllib.request import Request \nimport urllib \nimport bs4\n\nimport json \n\nclient = commands.Bot(command_prefix = \"-\")\nclient.remove_command('list')\nclient.remove_command('help')\n\n@client.event \nasync def on_ready():\n status_time.start()\n\n@client.event \nasync def on_member_join(member):\n\n role = get(member.guild.roles, id=697762004940619787)\n await member.add_roles(role)\n await member.edit(nick=\"[ 1학년 ] \" + str(member.name))\n\n@tasks.loop(seconds=5)\nasync def status_time():\n guild = client.get_guild(695877575255261306)\n await client.change_presence(activity=discord.Game(\"Document: \" + str(len(set(guild.get_channel(718828639944310865).text_channels))) + \", Channels: \" + str(len(set(guild.text_channels)))))\n\n@client.command()\nasync def invites(ctx):\n\n guild = client.get_guild(ctx.channel.guild.id)\n invites = await guild.invites()\n embed = discord.Embed(title=\"DISCLISTS INVITES\", color=discord.Colour.from_rgb(47, 49, 54))\n\n for invite in invites:\n embed.add_field(name=invite.code, value=str(invite.created_at) + \": \" + str(invite.uses), inline=False)\n\n await ctx.send(embed=embed)\n\n@client.command()\nasync def pas(ctx, o):\n\n if o == \"off\":\n role = get(ctx.author.roles, id=719859191908401182)\n if not role in ctx.author.roles:\n await ctx.send(\"이미 비공개 채널로 설정된 상태입니다\")\n else:\n await ctx.send(\"성공적으로 비공개로 설정하였습니다\")\n\n roles = get(ctx.author.guild.roles, id=719859191908401182)\n member = ctx.author\n await member.remove_roles(roles)\n\n if o == \"on\":\n role = get(ctx.author.roles, id=719859191908401182)\n if role in ctx.author.roles:\n await ctx.send(\"이미 공개 채널로 설정된 상태입니다\")\n else:\n await ctx.send(\"성공적으로 공개로 설정하였습니다\")\n\n roles = get(ctx.author.guild.roles, id=719859191908401182)\n member = ctx.author \n await member.add_roles(roles)\n\n if not o == \"on\" and not o == \"off\":\n await ctx.send(\"해당 메뉴를 찾을 수 없습니다!\")\n\n@client.command()\nasync def github(ctx, name, github=None):\n\n try: \n\n if github == None: \n \n hdr = {'User-Agent': 'Mozilla/5.0'}\n url = 'https://api.github.com/users/' + str(name) + \"?xml=1\"\n req = Request(url, headers=hdr)\n html = urllib.request.urlopen(req)\n obj = bs4.BeautifulSoup(html, \"html.parser\")\n \n with open('./github/github.json', 'w', encoding=\"UTF-8\") as w:\n w.write(str(obj))\n w.close()\n\n with open('./github/github.json', 'r', encoding=\"UTF-8\") as r:\n gi = json.load(r)\n gi_id = gi[\"id\"]\n gi_type = gi[\"type\"]\n gi_company = gi[\"company\"]\n gi_name = gi[\"name\"]\n gi_location = gi[\"location\"]\n gi_email = gi[\"email\"]\n gi_public_repos = gi[\"public_repos\"]\n gi_followers = gi[\"followers\"]\n gi_following = gi[\"following\"]\n gi_created_at = gi[\"created_at\"]\n gi_updated_at = gi[\"updated_at\"]\n r.close()\n\n hdr = {'User-Agent': 'Mozilla/5.0'}\n url = 'https://api.github.com/users/' + str(name) + \"?xml=1\"\n req = Request(url, headers=hdr)\n html = urllib.request.urlopen(req)\n obj = bs4.BeautifulSoup(html, \"html.parser\")\n\n if gi_company == None:\n gi_company = \"회사를 찾을 수 없습니다\"\n if gi_location == None:\n gi_location = \"위치를 찾을 수 없습니다\"\n if gi_email == None:\n gi_email = \"이메일을 찾을 수 없습니다\"\n\n embed = discord.Embed(title=\"GITHUB\", color=discord.Colour.from_rgb(47, 49, 54))\n embed.add_field(name=\"깃허브 닉네임:\", value=gi_name, inline=False)\n embed.add_field(name=\"깃허브 아이디:\", value=gi_id, inline=False)\n embed.add_field(name=\"깃허브 타입:\", value=gi_type, inline=False)\n embed.add_field(name=\"깃허브 프로필: 회사\", value=gi_company, inline=False)\n embed.add_field(name=\"깃허브 프로필: 위치\", value=gi_location, inline=False)\n embed.add_field(name=\"깃허브 프로필: 이메일\", value=gi_email, inline=False)\n embed.add_field(name=\"깃허브 프로필: 공개 레포지토리\", value=gi_public_repos, inline=False)\n embed.add_field(name=\"깃허브 프로필: 내 팔로우\", value=gi_followers, inline=False)\n embed.add_field(name=\"깃허브 프로필: 팔로잉\", value=gi_following, inline=False)\n embed.add_field(name=\"깃허브 프로필: 계정 생성 날짜\", value=gi_created_at, inline=False)\n embed.add_field(name=\"깃허브 프로필: 업데이트 날짜\", value=gi_updated_at, inline=False)\n await ctx.send(embed=embed)\n\n if github == \"orgs\":\n\n hdr = {'User-Agent': 'Mozilla/5.0'}\n url = 'https://api.github.com/users/' + str(name) + \"/orgs\"\n req = Request(url, headers=hdr)\n html = urllib.request.urlopen(req)\n obj = bs4.BeautifulSoup(html, \"html.parser\")\n\n with open('./github/github.json', 'w', encoding=\"UTF-8\") as w:\n w.write(str(obj))\n w.close()\n\n with open('./github/github.json', 'r', encoding=\"UTF-8\") as r:\n gi = json.load(r)\n embed = discord.Embed(title=\"GITHUB ORGS\", color=discord.Colour.from_rgb(47, 49, 54))\n\n github_group = 0\n for gis in gi:\n\n github_group += 1\n embed.add_field(name=gis[\"login\"], value=\"https://github.com/\" + str(gis[\"login\"]), inline=False)\n\n r.close()\n await ctx.send(embed=embed)\n\n except:\n\n await ctx.send(\"해당 정보를 찾을 수 없습니다!\")\n\n@client.command()\nasync def help(ctx):\n\n embed = discord.Embed(color=discord.Colour.from_rgb(47, 49, 54))\n embed.add_field(name=\"-invites\", value=\"현재 사용하고 있는 초대 에셋을 불러옵니다\", inline=False)\n embed.add_field(name=\"-pas \", value=\"파트너 채널을 공개 채널로 전환합니다 (사용자)\", inline=False)\n embed.add_field(name=\"-github \", value=\"깃허브에서 프로필 정보를 조회 합니다\", inline=False)\n embed.set_footer(text=\"해당 봇은 파이썬 기반으로 제작되었습니다: DISCLISTS TEAM\")\n await ctx.send(embed=embed)\n\nclient.run('coken')\n","repo_name":"ttakkku/DiscListsBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"87"} +{"seq_id":"27679736485","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n\ndef print_solution(total, range_var, max_var):\n print(\"Total: \", total)\n print(\"Jarak: \", range_var)\n print(\"Tinggi Maksimal: \", max_var)\n\n\ndef get_plot(nx, ny, ax, ay):\n plt.figure()\n plt.plot(nx, ny, c='r', label='numerical')\n plt.plot(ax, ay, c='b', label='analytic')\n plt.axhline(c='black')\n plt.axvline(c='black')\n plt.legend()\n plt.show()\n\n\nclass Projectile:\n ANGLE = 35\n ANGLE_RAD = (ANGLE / 360) * (2 * np.pi)\n GRAVITY = 9.806\n\n def __init__(self, x, y, v, time, dt, d, m):\n self.x = x\n self.y = y\n self.v = v\n self.time = time\n self.dt = dt\n self.d = d\n self.m = m\n\n def numeric(self):\n array_x = [self.x]\n array_y = [self.y]\n array_t = [self.time]\n\n ax = 0\n ay = -self.GRAVITY\n\n vx = self.v * np.cos(self.ANGLE_RAD)\n vy = self.v * np.sin(self.ANGLE_RAD)\n\n while self.y >= 0:\n vy += ay * self.dt\n vx += ax * self.dt\n self.y += vy * self.dt\n self.x += vx * self.dt\n self.time += self.dt\n\n if self.y <= 0:\n break\n\n array_x.append(self.x)\n array_y.append(self.y)\n array_t.append(self.time)\n\n return {\n \"total_num\": array_t[-1],\n \"range_num\": array_x[-1],\n \"max_num\": np.max(array_y),\n \"array_x\": array_x,\n \"array_y\": array_y,\n \"array_t\": array_t\n }\n\n def analytic(self, array_t):\n array_x = [0]\n array_y = [0]\n\n x0 = 0\n y0 = 0\n\n vx0 = self.v * np.cos(self.ANGLE_RAD)\n vy0 = self.v * np.sin(self.ANGLE_RAD)\n\n vx = vx0\n vy = -vy0\n\n for t in array_t:\n v = math.sqrt((vx ** 2) + (vy ** 2))\n ax = (-self.d / self.m) * v * vx\n ay = -self.GRAVITY - ((self.d / self.m) * v * vy)\n x = x0 + (vx0 * t) + ((ax / 2) * t ** 2)\n y = y0 + (vy0 * t) + ((ay / 2) * t ** 2)\n array_x.append(x)\n array_y.append(y)\n\n total = (2 * self.v * np.sin(self.ANGLE_RAD)) / self.GRAVITY\n\n return {\n \"total_num\": total,\n \"range_num\": self.v * np.cos(self.ANGLE_RAD) * total,\n \"max_num\": (self.v ** 2 * np.sin(self.ANGLE_RAD) ** 2) / (2 * self.GRAVITY),\n \"array_x\": array_x,\n \"array_y\": array_y\n }\n\n def print_result(self):\n numeric = self.numeric()\n analytic = self.analytic(numeric[\"array_t\"])\n\n print(\"Solusi numerik\")\n print_solution(numeric[\"total_num\"], numeric[\"range_num\"], numeric[\"max_num\"])\n\n print(\"Solusi analytic\")\n print_solution(analytic[\"total_num\"], analytic[\"range_num\"], analytic[\"max_num\"])\n\n get_plot(numeric[\"array_x\"], numeric[\"array_y\"], analytic[\"array_x\"], analytic[\"array_y\"])\n\n\npr = Projectile(0, 0, 50, 0, 0.1, 0.0013, 150)\npr.print_result()\n","repo_name":"irvanrahmanto/projectile-motion_basicmodeling","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"39472857886","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 20 18:51:54 2018\n\n@author: miller\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom sklearn.datasets import load_svmlight_file\n\n\ndata_path = \"/home/miller/Documents/BDH NLP/Data/\"\n\nsorted_data = pd.read_csv(data_path + 'all_data_sorted.csv')\n#sorted_data.sort_values(['length'], inplace=True)\nsorted_data.to_csv(data_path + 'sorted_disch_sums_no_split.csv')\n\n\n### Ordering training set (longest discharge summaries first for gpu memory purposes) ###\ntrain = pd.read_csv(data_path + \"sorted_sums_matched_struc_train_full.csv\")\ntrain.sort_values(\"length\", inplace=True, ascending = False)\n\ntrain.to_csv(data_path + 'sorted_sums_matched_struc_train_reversed.csv', index=False)\n#test[[\"SUBJECT_ID\", \"HADM_ID\"]].to_csv('sorted_sums_matched_struc_train_reversed_ids.csv')\n\n\n#test = pd.read_csv(\"sorted_sums_matched_struc_test.csv\")\n#test.sort_values(\"length\", inplace=True, ascending = False)\n#test.to_csv('sorted_sums_matched_struc_test_reversed.csv', index=False)\n\n\n### Ordering validation set ###\nval = pd.read_csv(data_path + \"sorted_sums_matched_struc_val.csv\")\nval.sort_values(\"length\", inplace=True, ascending = False)\nval.to_csv(data_path + 'sorted_sums_matched_struc_val_reversed.csv', index=False)\n\n### Ordering test set ###\ntest = pd.read_csv(data_path + \"sorted_sums_matched_struc_test.csv\")\ntest.sort_values(\"length\", inplace=True, ascending = False)\ntest.to_csv(data_path + 'sorted_sums_matched_struc_test_reversed.csv', index=False)\n\n\n# Combining data sets\nall_data = train.append(val)\nall_data = all_data.append(test)\nall_data[['SUBJECT_ID','HADM_ID']].to_csv(data_path + 'all_reversed_ids.csv')\n\n\nX, y = load_svmlight_file(data_path + \"struc_data.svmlight\")\nX = X.tocsr()\n\nnp.sum(all_data['READMISSION'] != y)\n\nall_data.reset_index(inplace=True, drop=True)\n\n\n### Testing to make sure lengths were sorted properly\nlens = np.array(all_data['length'])\nlens_shift = np.array(all_data['length'].shift(-1))\n\nif np.sum( (lens - lens_shift) < 0) == 2:\n print(\"All good\")\n\n\n\n","repo_name":"amogh-nalwaya/6250-project","sub_path":"dataproc/pure_sorting_disch_sums.py","file_name":"pure_sorting_disch_sums.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"38785315543","text":"import crispy_forms\n\nfrom setuptools import setup, find_packages\n\n\ntests_require = [\n 'Django>=1.3,<1.6',\n]\n\nsetup(\n name='django-crispy-forms',\n version=crispy_forms.__version__,\n description=\"Best way to have Django DRY forms\",\n long_description=open('README.rst').read(),\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: JavaScript\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n extras_require={\n 'tests': tests_require,\n },\n keywords=['forms', 'django', 'crispy', 'DRY'],\n author='Miguel Araujo',\n author_email='miguel.araujo.perez@gmail.com',\n url='http://github.com/maraujop/django-crispy-forms',\n license='MIT',\n packages=find_packages(exclude=['docs']),\n include_package_data=True,\n zip_safe=False,\n)\n","repo_name":"neysay/JobSnap","sub_path":"src/build/django-crispy-forms/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"39249262395","text":"import copy\nfrom unittest import mock\n\nimport netaddr\nfrom oslo_db import exception as db_exc\nfrom oslo_serialization import jsonutils\nfrom oslo_utils.fixture import uuidsentinel\nfrom oslo_utils import timeutils\nfrom oslo_versionedobjects import base as ovo_base\nfrom oslo_versionedobjects import exception as ovo_exc\n\nfrom nova import conf\nfrom nova.db.main import api as db\nfrom nova import exception\nfrom nova import objects\nfrom nova.objects import base\nfrom nova.objects import compute_node\nfrom nova.objects import hv_spec\nfrom nova.objects import service\nfrom nova.tests.unit import fake_pci_device_pools\nfrom nova.tests.unit.objects import test_objects\n\n\nNOW = timeutils.utcnow().replace(microsecond=0)\nfake_stats = {'num_foo': '10'}\nfake_stats_db_format = jsonutils.dumps(fake_stats)\n# host_ip is coerced from a string to an IPAddress\n# but needs to be converted to a string for the database format\nfake_host_ip = '127.0.0.1'\nfake_numa_topology = objects.NUMATopology(cells=[\n objects.NUMACell(\n id=0,\n cpuset=set([1, 2]),\n pcpuset=set(),\n memory=512,\n cpu_usage=0,\n memory_usage=0,\n mempages=[],\n pinned_cpus=set(),\n siblings=[set([1]), set([2])]),\n objects.NUMACell(\n id=1,\n cpuset=set([3, 4]),\n pcpuset=set(),\n memory=512,\n cpu_usage=0,\n memory_usage=0,\n mempages=[],\n pinned_cpus=set(),\n siblings=[set([3]), set([4])])])\nfake_numa_topology_db_format = fake_numa_topology._to_json()\nfake_supported_instances = [('x86_64', 'kvm', 'hvm')]\nfake_hv_spec = hv_spec.HVSpec(arch=fake_supported_instances[0][0],\n hv_type=fake_supported_instances[0][1],\n vm_mode=fake_supported_instances[0][2])\nfake_supported_hv_specs = [fake_hv_spec]\n# for backward compatibility, each supported instance object\n# is stored as a list in the database\nfake_supported_hv_specs_db_format = jsonutils.dumps([fake_hv_spec.to_list()])\nfake_pci = jsonutils.dumps(fake_pci_device_pools.fake_pool_list_primitive)\nfake_compute_node = {\n 'created_at': NOW,\n 'updated_at': None,\n 'deleted_at': None,\n 'deleted': False,\n 'id': 123,\n 'uuid': uuidsentinel.fake_compute_node,\n 'service_id': None,\n 'host': 'fake',\n 'vcpus': 4,\n 'memory_mb': 4096,\n 'local_gb': 1024,\n 'vcpus_used': 2,\n 'memory_mb_used': 2048,\n 'local_gb_used': 512,\n 'hypervisor_type': 'Hyper-Dan-VM-ware',\n 'hypervisor_version': 1001,\n 'hypervisor_hostname': 'vm.danplanet.com',\n 'free_ram_mb': 1024,\n 'free_disk_gb': 256,\n 'current_workload': 100,\n 'running_vms': 2013,\n 'cpu_info': 'Schmintel i786',\n 'disk_available_least': 256,\n 'metrics': '',\n 'stats': fake_stats_db_format,\n 'host_ip': fake_host_ip,\n 'numa_topology': fake_numa_topology_db_format,\n 'supported_instances': fake_supported_hv_specs_db_format,\n 'pci_stats': fake_pci,\n 'cpu_allocation_ratio': 16.0,\n 'ram_allocation_ratio': 1.5,\n 'disk_allocation_ratio': 1.0,\n 'mapped': 0,\n }\n# FIXME(sbauza) : For compatibility checking, to be removed once we are sure\n# that all computes are running latest DB version with host field in it.\nfake_old_compute_node = fake_compute_node.copy()\ndel fake_old_compute_node['host']\n# resources are passed from the virt drivers and copied into the compute_node\nfake_resources = {\n 'vcpus': 2,\n 'memory_mb': 1024,\n 'local_gb': 10,\n 'cpu_info': 'fake-info',\n 'vcpus_used': 1,\n 'memory_mb_used': 512,\n 'local_gb_used': 4,\n 'numa_topology': fake_numa_topology_db_format,\n 'hypervisor_type': 'fake-type',\n 'hypervisor_version': 1,\n 'hypervisor_hostname': 'fake-host',\n 'disk_available_least': 256,\n 'host_ip': fake_host_ip,\n 'supported_instances': fake_supported_instances\n}\nfake_compute_with_resources = objects.ComputeNode(\n vcpus=fake_resources['vcpus'],\n memory_mb=fake_resources['memory_mb'],\n local_gb=fake_resources['local_gb'],\n cpu_info=fake_resources['cpu_info'],\n vcpus_used=fake_resources['vcpus_used'],\n memory_mb_used=fake_resources['memory_mb_used'],\n local_gb_used =fake_resources['local_gb_used'],\n numa_topology=fake_resources['numa_topology'],\n hypervisor_type=fake_resources['hypervisor_type'],\n hypervisor_version=fake_resources['hypervisor_version'],\n hypervisor_hostname=fake_resources['hypervisor_hostname'],\n disk_available_least=fake_resources['disk_available_least'],\n host_ip=netaddr.IPAddress(fake_resources['host_ip']),\n supported_hv_specs=fake_supported_hv_specs,\n)\n\nCONF = conf.CONF\n\n\nclass _TestComputeNodeObject(object):\n def supported_hv_specs_comparator(self, expected, obj_val):\n obj_val = [inst.to_list() for inst in obj_val]\n self.assertJsonEqual(expected, obj_val)\n\n def pci_device_pools_comparator(self, expected, obj_val):\n if obj_val is not None:\n obj_val = obj_val.obj_to_primitive()\n self.assertJsonEqual(expected, obj_val)\n else:\n self.assertEqual(expected, obj_val)\n\n def comparators(self):\n return {'stats': self.assertJsonEqual,\n 'host_ip': self.str_comparator,\n 'supported_hv_specs': self.supported_hv_specs_comparator,\n 'pci_device_pools': self.pci_device_pools_comparator,\n }\n\n def subs(self):\n return {'supported_hv_specs': 'supported_instances',\n 'pci_device_pools': 'pci_stats'}\n\n @mock.patch.object(db, 'compute_node_get')\n def test_get_by_id(self, get_mock):\n get_mock.return_value = fake_compute_node\n compute = compute_node.ComputeNode.get_by_id(self.context, 123)\n self.compare_obj(compute, fake_compute_node,\n subs=self.subs(),\n comparators=self.comparators())\n self.assertNotIn('uuid', compute.obj_what_changed())\n get_mock.assert_called_once_with(self.context, 123)\n\n @mock.patch.object(compute_node.ComputeNodeList, 'get_all_by_uuids')\n def test_get_by_uuid(self, get_all_by_uuids):\n fake_node = copy.copy(fake_compute_node)\n fake_node['stats'] = None\n get_all_by_uuids.return_value = objects.ComputeNodeList(\n objects=[objects.ComputeNode(**fake_node)])\n compute = compute_node.ComputeNode.get_by_uuid(\n self.context, uuidsentinel.fake_compute_node)\n self.assertEqual(uuidsentinel.fake_compute_node, compute.uuid)\n get_all_by_uuids.assert_called_once_with(\n self.context, [uuidsentinel.fake_compute_node])\n\n @mock.patch.object(compute_node.ComputeNodeList, 'get_all_by_uuids')\n def test_get_by_uuid_not_found(self, get_all_by_uuids):\n get_all_by_uuids.return_value = objects.ComputeNodeList()\n self.assertRaises(exception.ComputeHostNotFound,\n compute_node.ComputeNode.get_by_uuid,\n self.context, uuidsentinel.fake_compute_node)\n get_all_by_uuids.assert_called_once_with(\n self.context, [uuidsentinel.fake_compute_node])\n\n @mock.patch.object(db, 'compute_node_get')\n def test_get_without_mapped(self, get_mock):\n fake_node = copy.copy(fake_compute_node)\n fake_node['mapped'] = None\n get_mock.return_value = fake_node\n compute = compute_node.ComputeNode.get_by_id(self.context, 123)\n self.compare_obj(compute, fake_compute_node,\n subs=self.subs(),\n comparators=self.comparators())\n self.assertIn('mapped', compute)\n self.assertEqual(0, compute.mapped)\n\n @mock.patch.object(objects.Service, 'get_by_id')\n @mock.patch.object(db, 'compute_node_get')\n def test_get_by_id_with_host_field_not_in_db(self, mock_cn_get,\n mock_obj_svc_get):\n fake_compute_node_with_svc_id = fake_compute_node.copy()\n fake_compute_node_with_svc_id['service_id'] = 123\n fake_compute_node_with_no_host = fake_compute_node_with_svc_id.copy()\n host = fake_compute_node_with_no_host.pop('host')\n fake_service = service.Service(id=123)\n fake_service.host = host\n\n mock_cn_get.return_value = fake_compute_node_with_no_host\n mock_obj_svc_get.return_value = fake_service\n\n compute = compute_node.ComputeNode.get_by_id(self.context, 123)\n self.compare_obj(compute, fake_compute_node_with_svc_id,\n subs=self.subs(),\n comparators=self.comparators())\n\n @mock.patch.object(db, 'compute_nodes_get_by_service_id')\n def test_get_by_service_id(self, get_mock):\n get_mock.return_value = [fake_compute_node]\n compute = compute_node.ComputeNode.get_by_service_id(self.context, 456)\n self.compare_obj(compute, fake_compute_node,\n subs=self.subs(),\n comparators=self.comparators())\n get_mock.assert_called_once_with(self.context, 456)\n\n @mock.patch.object(db, 'compute_node_get_by_host_and_nodename')\n def test_get_by_host_and_nodename(self, cn_get_by_h_and_n):\n cn_get_by_h_and_n.return_value = fake_compute_node\n\n compute = compute_node.ComputeNode.get_by_host_and_nodename(\n self.context, 'fake', 'vm.danplanet.com')\n self.compare_obj(compute, fake_compute_node,\n subs=self.subs(),\n comparators=self.comparators())\n\n @mock.patch.object(db, 'compute_node_get_by_nodename')\n def test_get_by_nodename(self, cn_get_by_n):\n cn_get_by_n.return_value = fake_compute_node\n\n compute = compute_node.ComputeNode.get_by_nodename(\n self.context, 'vm.danplanet.com')\n self.compare_obj(compute, fake_compute_node,\n subs=self.subs(),\n comparators=self.comparators())\n\n @mock.patch('nova.db.main.api.compute_node_get_all_by_host')\n def test_get_first_node_by_host_for_old_compat(\n self, cn_get_all_by_host):\n another_node = fake_compute_node.copy()\n another_node['hypervisor_hostname'] = 'neverland'\n cn_get_all_by_host.return_value = [fake_compute_node, another_node]\n\n compute = (\n compute_node.ComputeNode.get_first_node_by_host_for_old_compat(\n self.context, 'fake')\n )\n self.compare_obj(compute, fake_compute_node,\n subs=self.subs(),\n comparators=self.comparators())\n\n @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')\n def test_get_first_node_by_host_for_old_compat_not_found(\n self, cn_get_all_by_host):\n cn_get_all_by_host.side_effect = exception.ComputeHostNotFound(\n host='fake')\n\n self.assertRaises(\n exception.ComputeHostNotFound,\n compute_node.ComputeNode.get_first_node_by_host_for_old_compat,\n self.context, 'fake')\n\n @mock.patch.object(db, 'compute_node_create')\n @mock.patch(\n 'nova.db.main.api.compute_node_get', return_value=fake_compute_node)\n def test_create(self, mock_get, mock_create):\n mock_create.return_value = fake_compute_node\n compute = compute_node.ComputeNode(context=self.context)\n compute.service_id = 456\n compute.uuid = uuidsentinel.fake_compute_node\n compute.stats = fake_stats\n # NOTE (pmurray): host_ip is coerced to an IPAddress\n compute.host_ip = fake_host_ip\n compute.supported_hv_specs = fake_supported_hv_specs\n with mock.patch('oslo_utils.uuidutils.generate_uuid') as mock_gu:\n compute.create()\n self.assertFalse(mock_gu.called)\n self.compare_obj(compute, fake_compute_node,\n subs=self.subs(),\n comparators=self.comparators())\n param_dict = {\n 'service_id': 456,\n 'stats': fake_stats_db_format,\n 'host_ip': fake_host_ip,\n 'supported_instances': fake_supported_hv_specs_db_format,\n 'uuid': uuidsentinel.fake_compute_node\n }\n mock_create.assert_called_once_with(self.context, param_dict)\n\n @mock.patch('nova.db.main.api.compute_node_create')\n @mock.patch('oslo_utils.uuidutils.generate_uuid')\n @mock.patch(\n 'nova.db.main.api.compute_node_get', return_value=fake_compute_node)\n def test_create_allocates_uuid(self, mock_get, mock_gu, mock_create):\n mock_create.return_value = fake_compute_node\n mock_gu.return_value = fake_compute_node['uuid']\n obj = objects.ComputeNode(context=self.context)\n obj.create()\n mock_gu.assert_called_once_with()\n mock_create.assert_called_once_with(\n self.context, {'uuid': fake_compute_node['uuid']})\n\n @mock.patch('nova.db.main.api.compute_node_create')\n @mock.patch(\n 'nova.db.main.api.compute_node_get', return_value=fake_compute_node)\n def test_recreate_fails(self, mock_get, mock_create):\n mock_create.return_value = fake_compute_node\n compute = compute_node.ComputeNode(context=self.context)\n compute.service_id = 456\n compute.uuid = uuidsentinel.fake_compute_node\n compute.create()\n self.assertRaises(exception.ObjectActionError, compute.create)\n param_dict = {'service_id': 456,\n 'uuid': uuidsentinel.fake_compute_node}\n mock_create.assert_called_once_with(self.context, param_dict)\n\n @mock.patch('nova.db.main.api.compute_node_create')\n def test_create_duplicate(self, mock_create):\n mock_create.side_effect = db_exc.DBDuplicateEntry\n compute = compute_node.ComputeNode(context=self.context)\n compute.service_id = 456\n compute.hypervisor_hostname = 'node1'\n self.assertRaises(exception.DuplicateRecord, compute.create)\n\n @mock.patch.object(db, 'compute_node_update')\n @mock.patch(\n 'nova.db.main.api.compute_node_get', return_value=fake_compute_node)\n def test_save(self, mock_get, mock_update):\n mock_update.return_value = fake_compute_node\n compute = compute_node.ComputeNode(context=self.context)\n compute.id = 123\n compute.vcpus_used = 3\n compute.stats = fake_stats\n compute.uuid = uuidsentinel.fake_compute_node\n # NOTE (pmurray): host_ip is coerced to an IPAddress\n compute.host_ip = fake_host_ip\n compute.supported_hv_specs = fake_supported_hv_specs\n compute.save()\n self.compare_obj(compute, fake_compute_node,\n subs=self.subs(),\n comparators=self.comparators())\n param_dict = {\n 'vcpus_used': 3,\n 'stats': fake_stats_db_format,\n 'host_ip': fake_host_ip,\n 'supported_instances': fake_supported_hv_specs_db_format,\n 'uuid': uuidsentinel.fake_compute_node,\n }\n mock_update.assert_called_once_with(self.context, 123, param_dict)\n\n @mock.patch('nova.db.main.api.compute_node_update')\n def test_save_pci_device_pools_empty(self, mock_update):\n fake_pci = jsonutils.dumps(\n objects.PciDevicePoolList(objects=[]).obj_to_primitive())\n compute_dict = fake_compute_node.copy()\n compute_dict['pci_stats'] = fake_pci\n mock_update.return_value = compute_dict\n\n compute = compute_node.ComputeNode(context=self.context)\n compute.id = 123\n compute.pci_device_pools = objects.PciDevicePoolList(objects=[])\n compute.save()\n self.compare_obj(compute, compute_dict,\n subs=self.subs(),\n comparators=self.comparators())\n\n mock_update.assert_called_once_with(\n self.context, 123, {'pci_stats': fake_pci})\n\n @mock.patch('nova.db.main.api.compute_node_update')\n def test_save_pci_device_pools_null(self, mock_update):\n compute_dict = fake_compute_node.copy()\n compute_dict['pci_stats'] = None\n mock_update.return_value = compute_dict\n\n compute = compute_node.ComputeNode(context=self.context)\n compute.id = 123\n compute.pci_device_pools = None\n compute.save()\n self.compare_obj(compute, compute_dict,\n subs=self.subs(),\n comparators=self.comparators())\n\n mock_update.assert_called_once_with(\n self.context, 123, {'pci_stats': None})\n\n @mock.patch.object(db, 'compute_node_create',\n return_value=fake_compute_node)\n @mock.patch.object(db, 'compute_node_get',\n return_value=fake_compute_node)\n def test_set_id_failure(self, mock_get, db_mock):\n compute = compute_node.ComputeNode(context=self.context,\n uuid=fake_compute_node['uuid'])\n compute.create()\n self.assertRaises(ovo_exc.ReadOnlyFieldError, setattr,\n compute, 'id', 124)\n\n @mock.patch.object(db, 'compute_node_delete')\n def test_destroy(self, mock_delete):\n compute = compute_node.ComputeNode(context=self.context)\n compute.id = 123\n compute.destroy()\n mock_delete.assert_called_once_with(self.context, 123, constraint=None)\n\n def test_destroy_host_constraint(self):\n # Create compute node with host='fake'\n compute = fake_compute_with_resources.obj_clone()\n compute._context = self.context\n compute.host = 'fake'\n compute.create()\n # Simulate a compute node ownership change due to a node rebalance\n compute.host = 'different'\n self.assertRaises(exception.ObjectActionError, compute.destroy)\n\n @mock.patch.object(db, 'compute_node_get_all')\n def test_get_all(self, mock_get_all):\n mock_get_all.return_value = [fake_compute_node]\n computes = compute_node.ComputeNodeList.get_all(self.context)\n self.assertEqual(1, len(computes))\n self.compare_obj(computes[0], fake_compute_node,\n subs=self.subs(),\n comparators=self.comparators())\n mock_get_all.assert_called_once_with(self.context)\n\n @mock.patch.object(db, 'compute_node_search_by_hypervisor')\n def test_get_by_hypervisor(self, mock_search):\n mock_search.return_value = [fake_compute_node]\n computes = compute_node.ComputeNodeList.get_by_hypervisor(self.context,\n 'hyper')\n self.assertEqual(1, len(computes))\n self.compare_obj(computes[0], fake_compute_node,\n subs=self.subs(),\n comparators=self.comparators())\n mock_search.assert_called_once_with(self.context, 'hyper')\n\n @mock.patch('nova.db.main.api.compute_node_get_all_by_pagination',\n return_value=[fake_compute_node])\n def test_get_by_pagination(self, fake_get_by_pagination):\n computes = compute_node.ComputeNodeList.get_by_pagination(\n self.context, limit=1, marker=1)\n self.assertEqual(1, len(computes))\n self.compare_obj(computes[0], fake_compute_node,\n subs=self.subs(),\n comparators=self.comparators())\n\n @mock.patch('nova.db.main.api.compute_nodes_get_by_service_id')\n def test__get_by_service(self, cn_get_by_svc_id):\n cn_get_by_svc_id.return_value = [fake_compute_node]\n computes = compute_node.ComputeNodeList._get_by_service(self.context,\n 123)\n self.assertEqual(1, len(computes))\n self.compare_obj(computes[0], fake_compute_node,\n subs=self.subs(),\n comparators=self.comparators())\n\n @mock.patch('nova.db.main.api.compute_node_get_all_by_host')\n def test_get_all_by_host(self, cn_get_all_by_host):\n cn_get_all_by_host.return_value = [fake_compute_node]\n computes = compute_node.ComputeNodeList.get_all_by_host(self.context,\n 'fake')\n self.assertEqual(1, len(computes))\n self.compare_obj(computes[0], fake_compute_node,\n subs=self.subs(),\n comparators=self.comparators())\n\n def test_compat_numa_topology(self):\n compute = compute_node.ComputeNode(numa_topology='fake-numa-topology')\n versions = ovo_base.obj_tree_get_versions('ComputeNode')\n primitive = compute.obj_to_primitive(target_version='1.4',\n version_manifest=versions)\n self.assertNotIn('numa_topology', primitive['nova_object.data'])\n\n primitive = compute.obj_to_primitive(target_version='1.5',\n version_manifest=versions)\n self.assertIn('numa_topology', primitive['nova_object.data'])\n\n def test_compat_supported_hv_specs(self):\n compute = compute_node.ComputeNode()\n compute.supported_hv_specs = fake_supported_hv_specs\n versions = ovo_base.obj_tree_get_versions('ComputeNode')\n primitive = compute.obj_to_primitive(target_version='1.5',\n version_manifest=versions)\n self.assertNotIn('supported_hv_specs', primitive['nova_object.data'])\n\n primitive = compute.obj_to_primitive(target_version='1.6',\n version_manifest=versions)\n self.assertIn('supported_hv_specs', primitive['nova_object.data'])\n\n @mock.patch('nova.objects.service.Service.get_by_compute_host')\n def test_compat_host(self, mock_get_compute):\n compute = compute_node.ComputeNode(host='fake-host')\n primitive = compute.obj_to_primitive(target_version='1.6')\n self.assertNotIn('host', primitive['nova_object.data'])\n\n primitive = compute.obj_to_primitive(target_version='1.7')\n self.assertIn('host', primitive['nova_object.data'])\n\n def test_compat_pci_device_pools(self):\n compute = compute_node.ComputeNode()\n compute.pci_device_pools = fake_pci_device_pools.fake_pool_list\n versions = ovo_base.obj_tree_get_versions('ComputeNode')\n primitive = compute.obj_to_primitive(target_version='1.8',\n version_manifest=versions)\n self.assertNotIn('pci_device_pools', primitive['nova_object.data'])\n\n primitive = compute.obj_to_primitive(target_version='1.9',\n version_manifest=versions)\n self.assertIn('pci_device_pools', primitive['nova_object.data'])\n\n @mock.patch('nova.objects.Service.get_by_compute_host')\n def test_compat_service_id(self, mock_get):\n mock_get.return_value = objects.Service(id=1)\n compute = objects.ComputeNode(host='fake-host', service_id=None)\n primitive = compute.obj_to_primitive(target_version='1.12')\n self.assertEqual(1, primitive['nova_object.data']['service_id'])\n\n @mock.patch('nova.objects.Service.get_by_compute_host')\n def test_compat_service_id_compute_host_not_found(self, mock_get):\n mock_get.side_effect = exception.ComputeHostNotFound(host='fake-host')\n compute = objects.ComputeNode(host='fake-host', service_id=None)\n primitive = compute.obj_to_primitive(target_version='1.12')\n self.assertEqual(-1, primitive['nova_object.data']['service_id'])\n\n def test_update_from_virt_driver(self):\n # copy in case the update has a side effect\n resources = copy.deepcopy(fake_resources)\n # Emulate the ironic driver which adds a uuid field.\n resources['uuid'] = uuidsentinel.node_uuid\n compute = compute_node.ComputeNode()\n compute.update_from_virt_driver(resources)\n expected = fake_compute_with_resources.obj_clone()\n expected.uuid = uuidsentinel.node_uuid\n self.assertTrue(base.obj_equal_prims(expected, compute))\n\n def test_update_from_virt_driver_uuid_already_set(self):\n \"\"\"Tests update_from_virt_driver where the compute node object already\n has a uuid value so an error is raised.\n \"\"\"\n # copy in case the update has a side effect\n resources = copy.deepcopy(fake_resources)\n # Emulate the ironic driver which adds a uuid field.\n resources['uuid'] = uuidsentinel.node_uuid\n compute = compute_node.ComputeNode(uuid=uuidsentinel.something_else)\n self.assertRaises(exception.InvalidNodeConfiguration,\n compute.update_from_virt_driver, resources)\n\n def test_update_from_virt_driver_missing_field(self):\n # NOTE(pmurray): update_from_virt_driver does not require\n # all fields to be present in resources. Validation of the\n # resources data structure would be done in a different method.\n resources = copy.deepcopy(fake_resources)\n del resources['vcpus']\n compute = compute_node.ComputeNode()\n compute.update_from_virt_driver(resources)\n expected = fake_compute_with_resources.obj_clone()\n del expected.vcpus\n self.assertTrue(base.obj_equal_prims(expected, compute))\n\n def test_update_from_virt_driver_extra_field(self):\n # copy in case the update has a side effect\n resources = copy.deepcopy(fake_resources)\n resources['extra_field'] = 'nonsense'\n compute = compute_node.ComputeNode()\n compute.update_from_virt_driver(resources)\n expected = fake_compute_with_resources\n self.assertTrue(base.obj_equal_prims(expected, compute))\n\n def test_update_from_virt_driver_bad_value(self):\n # copy in case the update has a side effect\n resources = copy.deepcopy(fake_resources)\n resources['vcpus'] = 'nonsense'\n compute = compute_node.ComputeNode()\n self.assertRaises(ValueError,\n compute.update_from_virt_driver, resources)\n\n def test_compat_allocation_ratios(self):\n compute = compute_node.ComputeNode(\n cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0)\n primitive = compute.obj_to_primitive(target_version='1.13')\n self.assertNotIn('cpu_allocation_ratio', primitive['nova_object.data'])\n self.assertNotIn('ram_allocation_ratio', primitive['nova_object.data'])\n\n primitive = compute.obj_to_primitive(target_version='1.14')\n self.assertIn('cpu_allocation_ratio', primitive['nova_object.data'])\n self.assertIn('ram_allocation_ratio', primitive['nova_object.data'])\n\n def test_compat_disk_allocation_ratio(self):\n compute = compute_node.ComputeNode(disk_allocation_ratio=1.0)\n primitive = compute.obj_to_primitive(target_version='1.15')\n self.assertNotIn(\n 'disk_allocation_ratio', primitive['nova_object.data'])\n\n primitive = compute.obj_to_primitive(target_version='1.16')\n self.assertIn('disk_allocation_ratio', primitive['nova_object.data'])\n\n @mock.patch('nova.db.main.api.compute_node_update')\n def test_compat_allocation_ratios_old_compute(self, mock_update):\n \"\"\"Tests the scenario that allocation ratios are overridden in config\n and the legacy compute node record from the database has None set for\n the allocation ratio values. The result is that the migrated record\n allocation ratios should reflect the config overrides.\n \"\"\"\n self.flags(cpu_allocation_ratio=2.0, ram_allocation_ratio=3.0,\n disk_allocation_ratio=0.9)\n compute_dict = fake_compute_node.copy()\n # old computes don't provide allocation ratios to the table\n compute_dict['cpu_allocation_ratio'] = None\n compute_dict['ram_allocation_ratio'] = None\n compute_dict['disk_allocation_ratio'] = None\n cls = objects.ComputeNode\n compute = cls._from_db_object(self.context, cls(), compute_dict)\n\n self.assertEqual(2.0, compute.cpu_allocation_ratio)\n self.assertEqual(3.0, compute.ram_allocation_ratio)\n self.assertEqual(0.9, compute.disk_allocation_ratio)\n\n mock_update.assert_called_once_with(\n self.context, 123, {'cpu_allocation_ratio': 2.0,\n 'ram_allocation_ratio': 3.0,\n 'disk_allocation_ratio': 0.9})\n\n @mock.patch('nova.db.main.api.compute_node_update')\n def test_compat_allocation_ratios_zero_conf(self, mock_update):\n \"\"\"Tests that the override allocation ratios are set to 0.0 for\n whatever reason (maybe an old nova.conf sample file is being used)\n and the legacy compute node record has None for allocation ratios,\n so the resulting data migration makes the record allocation ratios\n use the CONF.initial_*_allocation_ratio values.\n \"\"\"\n self.flags(cpu_allocation_ratio=0.0, ram_allocation_ratio=0.0,\n disk_allocation_ratio=0.0)\n compute_dict = fake_compute_node.copy()\n # the computes provide allocation ratios None\n compute_dict['cpu_allocation_ratio'] = None\n compute_dict['ram_allocation_ratio'] = None\n compute_dict['disk_allocation_ratio'] = None\n cls = objects.ComputeNode\n compute = cls._from_db_object(self.context, cls(), compute_dict)\n\n self.assertEqual(\n CONF.initial_cpu_allocation_ratio, compute.cpu_allocation_ratio)\n self.assertEqual(\n CONF.initial_ram_allocation_ratio, compute.ram_allocation_ratio)\n self.assertEqual(\n CONF.initial_disk_allocation_ratio, compute.disk_allocation_ratio)\n\n mock_update.assert_called_once_with(\n self.context, 123, {'cpu_allocation_ratio': 4.0,\n 'ram_allocation_ratio': 1.0,\n 'disk_allocation_ratio': 1.0})\n\n @mock.patch('nova.db.main.api.compute_node_update')\n def test_compat_allocation_ratios_None_conf_zero_values(self, mock_update):\n \"\"\"Tests the scenario that the CONF.*_allocation_ratio overrides are\n left to the default (None) and the compute node record allocation\n ratio values in the DB are 0.0, so they will be migrated to the\n CONF.initial_*_allocation_ratio values.\n \"\"\"\n # the CONF.x_allocation_ratio is None by default\n compute_dict = fake_compute_node.copy()\n # the computes provide allocation ratios 0.0\n compute_dict['cpu_allocation_ratio'] = 0.0\n compute_dict['ram_allocation_ratio'] = 0.0\n compute_dict['disk_allocation_ratio'] = 0.0\n cls = objects.ComputeNode\n compute = cls._from_db_object(self.context, cls(), compute_dict)\n\n self.assertEqual(\n CONF.initial_cpu_allocation_ratio, compute.cpu_allocation_ratio)\n self.assertEqual(\n CONF.initial_ram_allocation_ratio, compute.ram_allocation_ratio)\n self.assertEqual(\n CONF.initial_disk_allocation_ratio, compute.disk_allocation_ratio)\n\n mock_update.assert_called_once_with(\n self.context, 123, {'cpu_allocation_ratio': 4.0,\n 'ram_allocation_ratio': 1.0,\n 'disk_allocation_ratio': 1.0})\n\n @mock.patch('nova.db.main.api.compute_node_update')\n def test_compat_allocation_ratios_None_conf_None_values(self, mock_update):\n \"\"\"Tests the scenario that the override CONF.*_allocation_ratio options\n are the default values (None), the compute node record from the DB has\n None values for allocation ratios, so the resulting migrated record\n will have the CONF.initial_*_allocation_ratio values.\n \"\"\"\n # the CONF.x_allocation_ratio is None by default\n compute_dict = fake_compute_node.copy()\n # # the computes provide allocation ratios None\n compute_dict['cpu_allocation_ratio'] = None\n compute_dict['ram_allocation_ratio'] = None\n compute_dict['disk_allocation_ratio'] = None\n cls = objects.ComputeNode\n compute = cls._from_db_object(self.context, cls(), compute_dict)\n\n self.assertEqual(\n CONF.initial_cpu_allocation_ratio, compute.cpu_allocation_ratio)\n self.assertEqual(\n CONF.initial_ram_allocation_ratio, compute.ram_allocation_ratio)\n self.assertEqual(\n CONF.initial_disk_allocation_ratio, compute.disk_allocation_ratio)\n\n mock_update.assert_called_once_with(\n self.context, 123, {'cpu_allocation_ratio': 4.0,\n 'ram_allocation_ratio': 1.0,\n 'disk_allocation_ratio': 1.0})\n\n def test_get_all_by_not_mapped(self):\n for mapped in (1, 0, 1, 3):\n compute = fake_compute_with_resources.obj_clone()\n compute._context = self.context\n compute.mapped = mapped\n compute.create()\n nodes = compute_node.ComputeNodeList.get_all_by_not_mapped(\n self.context, 2)\n self.assertEqual(3, len(nodes))\n self.assertEqual([0, 1, 1], sorted([x.mapped for x in nodes]))\n\n\nclass TestComputeNodeObject(test_objects._LocalTest,\n _TestComputeNodeObject):\n pass\n\n\nclass TestRemoteComputeNodeObject(test_objects._RemoteTest,\n _TestComputeNodeObject):\n pass\n","repo_name":"openstack/nova","sub_path":"nova/tests/unit/objects/test_compute_node.py","file_name":"test_compute_node.py","file_ext":"py","file_size_in_byte":33428,"program_lang":"python","lang":"en","doc_type":"code","stars":2991,"dataset":"github-code","pt":"88"} +{"seq_id":"21165893420","text":"\n# coding: utf-8\n\n# In[15]:\n\n\nimport requests\nimport bs4\nfrom bs4 import BeautifulSoup as soup\n\nurl = requests.get('http://shop.abstraxjingga.com/webshaper/store/bestSellers.asp')\nsoup = soup(url.content, 'html.parser')\n\n\n# In[2]:\n\n\nnew_arrival = soup.find(id=\"col1\")\n#new_arrival = soup.findAll(\"div\", {\"class\": \"newProductsGrid\"})\nitem_name = new_arrival.find_all(class_=\"prodItemName\")\nbest = item_name[0:3]\n\n\n# In[3]:\n\n\nprint(best)\n\n\n# In[4]:\n\n\nfindcat = soup.find(id=\"col2Left\").get_text()\n\n\n# In[5]:\n\n\nprint(findcat)\n\n\n# In[6]:\n\n\nfindcat = soup.find(id=\"col2Left\")\nfind_second_cat = findcat.find(class_=\"categoryList1 cat-id-82\").get_text()\n\n\n# In[7]:\n\n\nprint(find_second_cat)\n\n\n# In[16]:\n\n#UPDATE 3 may\n\nnew_arrival = soup.findAll(\"div\", {\"class\": \"prodItemName\"})\nnew_arrival2 = soup.findAll(\"div\", {\"class\": \"prodItemPrice\"})\n \nfor x in range (len(new_arrival)):\n name=new_arrival[x].get_text()\n price=new_arrival2[x].get_text()\n print(name+\" and price \"+price)\n\n","repo_name":"amzar96/Web-Scrapping-BeautifulSoup4","sub_path":"WebScrap.py","file_name":"WebScrap.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"18057081938","text":"import pandas as pd\nfrom scrapy.crawler import CrawlerProcess\n\nfrom pepper_scraper.pepper_scraper.spiders.pepper_url_spider import (\n PepperSpider,\n)\n\n\ndef run_scraper():\n scrapped_data = pd.DataFrame(columns=PepperSpider.field_names)\n\n process = CrawlerProcess(\n {\n \"USER_AGENT\": \"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)\",\n \"LOG_LEVEL\": \"INFO\",\n \"DOWNLOAD_DELAY\": 1,\n }\n )\n process.crawl(PepperSpider)\n process.start()\n\n scrapped_data = pd.DataFrame(PepperSpider.results)\n scrapped_data.to_csv(\"pepper_scrapped_data.csv\", index=False)\n\n\nif __name__ == \"__main__\":\n run_scraper()\n","repo_name":"AdamJJ00/web-scrapping-pepper","sub_path":"pepper_scraper/scrapy/pepper_scraper/run_spider.py","file_name":"run_spider.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"41519434097","text":"import json\nfrom random import randint\n\nnVehicles = 3\nnWorkers = 4\nnTrips = 10\nupperBoundBenefit = 70\nlowerBoundBenefit = 30\nupperBoundDepretiation = 30\nupperBoundDuration = 3600\nlowerBoundDuration = 900\n\n\ntrips = [randint(lowerBoundDuration, upperBoundDuration) for i in range(nTrips)]\n\nbenefit = [[randint(lowerBoundBenefit, upperBoundBenefit) for i in range(nTrips)] for j in range(nVehicles)]\n\ndepreciation = [[randint(0, upperBoundDepretiation) for i in range(nVehicles)] for j in range(nWorkers)]\n\nworkerHours = [[randint(0, 57600)] for i in range(nWorkers)]\nfor hours in workerHours:\n hours.append(hours[0] + 28800)\n\n\n\nwith open('data.json', 'w') as jsonF:\n json.dump({\n 'numberTrips': nTrips,\n 'numberWorkers': nWorkers,\n 'numberVehicles': nVehicles,\n 'durations': trips,\n 'benefit': benefit,\n 'depreciation': depreciation,\n 'workerHours': workerHours\n }, jsonF)","repo_name":"Marcoshsc/AntColonyRoutingScheduling","sub_path":"randomData.py","file_name":"randomData.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"39822001057","text":"import collections\n\n\nnums = [1,3,-1,-3,5,3,6,7]\nk = int(input())\nanswer = []\nnums_len = len(nums)\nfor i in range(nums_len - 2):\n current_nums = nums[i:i+k] # slice의 마지막 i+3이면 2개의 숫자가 들어감. \n max_num = max(current_nums)\n answer.append(max_num)\n\nprint(answer)\n\ndef maxSlidingWindow(self, nums: list[int], k: int):\n results = []\n window = collections.deque()\n current_max = float('-inf') # 마이너스 무한대\n for i, v in enumerate(nums):\n window.append(v)\n if i < k - 1:\n continue\n\n if current_max == float('-inf'):\n current_max = max(window) # 처음 3개에 대한 max만\n elif v > current_max:\n current_max = v\n \n results.append(current_max)\n\n if current_max == window.popleft():\n current_max = float('-inf')\n\n return results","repo_name":"develop-sell/AlgorithmStudy","sub_path":"AlgorithmInterview/sliding_window/571.py","file_name":"571.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"28801770531","text":"# -*- coding: UTF-8 -*-\r\n\r\nimport sqlite3\r\nimport jieba\r\nimport logging\r\njieba.setLogLevel(logging.INFO) #设置不输出信息\r\n\r\nconn = sqlite3.connect('./QA_data/QA.db')\r\n\r\ncursor = conn.cursor()\r\nstop_words = []\r\nwith open('./QA_data/stop_words.txt', encoding='gbk') as f:\r\n for line in f.readlines():\r\n stop_words.append(line.strip('\\n'))\r\n\r\ndef match(input_question):\r\n res = []\r\n cnt = {}\r\n question = list(jieba.cut(input_question, cut_all=False)) #对查询字符串进行分词\r\n for word in reversed(question): #去除停用词\r\n if word in stop_words:\r\n question.remove(word)\r\n for tag in question: #按照每个tag,循环构造查询语句\r\n keyword = \"'%\" + tag + \"%'\"\r\n result = cursor.execute(\"select * from QA where tag like \" + keyword)\r\n for row in result:\r\n if row[0] not in cnt.keys():\r\n cnt[row[0]] = 0\r\n cnt[row[0]] += 1 #统计记录出现的次数\r\n try:\r\n res_id = sorted(cnt.items(), key=lambda d:d[1],reverse=True)[0][0] #返回出现次数最高的记录的id\r\n except:\r\n return tuple() #若查询不出则返回空\r\n cursor.execute(\"select * from QA where id= \" + str(res_id))\r\n res = cursor.fetchone()\r\n if type(res) == type(tuple()):\r\n return res #返回元组类型(id, question, answer, tag)\r\n else:\r\n return tuple() #若查询不出则返回空\r\n\r\n","repo_name":"Doragd/Chinese-Chatbot-PyTorch-Implementation","sub_path":"QA_data/QA_test.py","file_name":"QA_test.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":818,"dataset":"github-code","pt":"88"} +{"seq_id":"2271067110","text":"#!/usr/bin/python\nimport os\nimport os.path\nimport glob\nimport hashlib\nimport json\n\n\nassets = glob.iglob('src/**/*', recursive=True)\nassetmap = {}\nfor asset in assets:\n asset = asset[4:] # strip 'src/'\n if not asset == 'serviceworker.js' and os.path.isfile('src/'+asset):\n with open('src/'+asset, 'rb') as f:\n contents = f.read()\n digest = hashlib.sha256(contents).hexdigest()\n digest = digest[:16]\n assetmap[asset] = digest\n os.makedirs(os.path.dirname('out/'+asset), exist_ok=True)\n with open('out/'+asset, 'wb') as fout:\n fout.write(contents)\n\njs_assetmap = 'const HASHES = {};\\n'.format(json.dumps(assetmap))\n\nwith open('src/serviceworker.js', 'r') as swi:\n with open('out/serviceworker.js', 'w') as swo:\n swo.write(js_assetmap)\n swo.write(swi.read())\n\nprint('yes')\n","repo_name":"codl/squish","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"30838791922","text":"###########################################################################################\n# Runtime: O(MN)\n# Number of rows(M) x expected numbers(N)\n# Space: O(N)\n# We need to store the expected numbers in list\n############################################################################################\nclass Solution:\n def isToeplitzMatrix(self, matrix: List[List[int]]) -> bool:\n # Validate Input\n if not matrix or not matrix[0]:\n return False \n \n # Create a deque tracking the expected values for the next row\n expected = matrix[0]\n # We only care about the elements before last element\n expected.pop()\n \n # From the second row, pop out the last element of the expected numbers and compare it with the target row[1:]\n for row in matrix[1:]:\n # Compare row with expected numbers, invalidate it as soon as we find the numbers are not the same (O(N))\n if row[1:] != expected:\n return False\n else:\n # Pop the last element from row, use it as the expected numbers for the next iteration\n row.pop()\n expected = row\n # If we've reached here, all diagonals aligned\n return True\n","repo_name":"AnasImloul/Leetcode-Solutions","sub_path":"scripts/algorithms/T/Toeplitz Matrix/Toeplitz Matrix.py","file_name":"Toeplitz Matrix.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":334,"dataset":"github-code","pt":"88"} +{"seq_id":"39295810298","text":"\"\"\"Get_env.\"\"\"\nfrom typing import Any\n\nfrom spiffworkflow_backend.models.group import GroupModel\nfrom spiffworkflow_backend.models.group import GroupNotFoundError\nfrom spiffworkflow_backend.models.script_attributes_context import (\n ScriptAttributesContext,\n)\nfrom spiffworkflow_backend.scripts.script import Script\n\n\nclass GetGroupMembers(Script):\n \"\"\"GetGroupMembers.\"\"\"\n\n @staticmethod\n def requires_privileged_permissions() -> bool:\n \"\"\"We have deemed this function safe to run without elevated permissions.\"\"\"\n return False\n\n def get_description(self) -> str:\n \"\"\"Get_description.\"\"\"\n return \"\"\"Return the list of usernames of the users in the given group.\"\"\"\n\n def run(\n self,\n script_attributes_context: ScriptAttributesContext,\n *args: Any,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Run.\"\"\"\n group_identifier = args[0]\n group = GroupModel.query.filter_by(identifier=group_identifier).first()\n if group is None:\n raise GroupNotFoundError(\n \"Script 'get_group_members' could not find group with identifier\"\n f\" '{group_identifier}'.\"\n )\n\n usernames = [u.username for u in group.users]\n return usernames\n","repo_name":"sartography/spiffworkflow-backend","sub_path":"src/spiffworkflow_backend/scripts/get_group_members.py","file_name":"get_group_members.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"88"} +{"seq_id":"37061751710","text":"from typing import Optional\nfrom typing import Union\n\nfrom pyro import poutine\nfrom pyro.infer.autoguide import AutoLowRankMultivariateNormal\nfrom pyro.infer.autoguide import AutoNormal\nfrom pyro.infer.autoguide.guides import AutoGuideList\nfrom scvi.module.base import PyroBaseModuleClass\n\nfrom pyrovelocity._velocity_model import VelocityModelAuto\n\n\nclass VelocityModule(PyroBaseModuleClass):\n \"\"\"\n VelocityModule is an scvi-tools pyro module that combines the VelocityModelAuto and pyro AutoGuideList classes.\n\n Args:\n num_cells (int): Number of cells.\n num_genes (int): Number of genes.\n model_type (str, optional): Model type. Default is \"auto\".\n guide_type (str, optional): Guide type. Default is \"velocity_auto\".\n likelihood (str, optional): Likelihood type. Default is \"Poisson\".\n shared_time (bool, optional): If True, a shared time parameter will be used. Default is True.\n t_scale_on (bool, optional): If True, scale time parameter. Default is False.\n plate_size (int, optional): Size of the plate set. Default is 2.\n latent_factor (str, optional): Latent factor. Default is \"none\".\n latent_factor_operation (str, optional): Latent factor operation mode. Default is \"selection\".\n latent_factor_size (int, optional): Size of the latent factor. Default is 10.\n inducing_point_size (int, optional): Inducing point size. Default is 0.\n include_prior (bool, optional): If True, include prior in the model. Default is False.\n use_gpu (int, optional): GPU device index. Default is 0.\n num_aux_cells (int, optional): Number of auxiliary cells. Default is 0.\n only_cell_times (bool, optional): If True, only model cell times. Default is True.\n decoder_on (bool, optional): If True, use the decoder. Default is False.\n add_offset (bool, optional): If True, add offset to the model. Default is True.\n correct_library_size (Union[bool, str], optional): Library size correction method. Default is True.\n cell_specific_kinetics (Optional[str], optional): Cell-specific kinetics method. Default is None.\n kinetics_num (Optional[int], optional): Number of kinetics. Default is None.\n **initial_values: Initial values for the model parameters.\n\n Examples:\n >>> from scvi.module.base import PyroBaseModuleClass\n >>> from pyrovelocity._velocity_module import VelocityModule\n >>> num_cells = 10\n >>> num_genes = 20\n >>> velocity_module1 = VelocityModule(\n ... num_cells, num_genes, model_type=\"auto\",\n ... guide_type=\"auto_t0_constraint\", add_offset=False\n ... )\n -----------\n auto\n auto_t0_constraint\n >>> type(velocity_module1.model)\n \n >>> type(velocity_module1.guide)\n \n >>> velocity_module2 = VelocityModule(\n ... num_cells, num_genes, model_type=\"auto\",\n ... guide_type=\"auto\", add_offset=True\n ... )\n -----------\n auto\n auto\n >>> type(velocity_module2.model)\n \n >>> type(velocity_module2.guide)\n \n \"\"\"\n\n def __init__(\n self,\n num_cells: int,\n num_genes: int,\n model_type: str = \"auto\",\n guide_type: str = \"velocity_auto\",\n likelihood: str = \"Poisson\",\n shared_time: bool = True,\n t_scale_on: bool = False,\n plate_size: int = 2,\n latent_factor: str = \"none\",\n latent_factor_operation: str = \"selection\",\n latent_factor_size: int = 10,\n inducing_point_size: int = 0,\n include_prior: bool = False,\n use_gpu: int = 0,\n num_aux_cells: int = 0,\n only_cell_times: bool = True,\n decoder_on: bool = False,\n add_offset: bool = True,\n correct_library_size: Union[bool, str] = True,\n cell_specific_kinetics: Optional[str] = None,\n kinetics_num: Optional[int] = None,\n **initial_values\n ) -> None:\n super().__init__()\n self.num_cells = num_cells\n self.num_genes = num_genes\n self.model_type = model_type\n self.guide_type = guide_type\n self._model = None\n self.plate_size = plate_size\n self.num_aux_cells = num_aux_cells\n self.only_cell_times = only_cell_times\n print(\"-----------\")\n print(self.model_type)\n print(self.guide_type)\n\n self.cell_specific_kinetics = cell_specific_kinetics\n\n self._model = VelocityModelAuto(\n self.num_cells,\n self.num_genes,\n likelihood,\n shared_time,\n t_scale_on,\n self.plate_size,\n latent_factor,\n latent_factor_operation=latent_factor_operation,\n latent_factor_size=latent_factor_size,\n include_prior=include_prior,\n num_aux_cells=num_aux_cells,\n only_cell_times=self.only_cell_times,\n decoder_on=decoder_on,\n add_offset=add_offset,\n correct_library_size=correct_library_size,\n guide_type=self.guide_type,\n cell_specific_kinetics=self.cell_specific_kinetics,\n **initial_values\n )\n\n guide = AutoGuideList(self._model, create_plates=self._model.create_plates)\n guide.append(\n AutoNormal(\n poutine.block(\n self._model,\n expose=[\n \"cell_time\",\n \"u_read_depth\",\n \"s_read_depth\",\n \"kinetics_prob\",\n \"kinetics_weights\",\n ],\n ),\n init_scale=0.1,\n )\n )\n\n if add_offset:\n guide.append(\n AutoLowRankMultivariateNormal(\n poutine.block(\n self._model,\n expose=[\n \"alpha\",\n \"beta\",\n \"gamma\",\n \"dt_switching\",\n \"t0\",\n \"u_scale\",\n \"s_scale\",\n \"u_offset\",\n \"s_offset\",\n ],\n ),\n rank=10,\n init_scale=0.1,\n )\n )\n else:\n guide.append(\n AutoLowRankMultivariateNormal(\n poutine.block(\n self._model,\n expose=[\n \"alpha\",\n \"beta\",\n \"gamma\",\n \"dt_switching\",\n \"t0\",\n \"u_scale\",\n \"s_scale\",\n ],\n ),\n rank=10,\n init_scale=0.1,\n )\n )\n self._guide = guide\n\n @property\n def model(self) -> VelocityModelAuto:\n return self._model\n\n @property\n def guide(self) -> AutoGuideList:\n return self._guide\n","repo_name":"pinellolab/pyrovelocity","sub_path":"pyrovelocity/_velocity_module.py","file_name":"_velocity_module.py","file_ext":"py","file_size_in_byte":7463,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"88"} +{"seq_id":"14620991746","text":"\"\"\" Class cvcCalcMinimum provides methods for calculation of minimum over time values\n\n Input arguments:\n input_uids[0] -- data of input values\n input_uids[1] -- module parameters:\n timeMin -- string, allowed values:\n 'day' -- daily minimum\n 'segment' -- segment minimum\n 'data' -- whole data minimum\n default value: 'data'\n Output arguments:\n output_uids[0] -- minimum values, data array of size:\n [days, lats, lons] -- if timeMin == 'day'\n [segments, lats, lons] -- if timeMin == 'segment'\n [lat, lons] -- if timeMin == 'data'\n\"\"\"\n\nfrom core.base.dataaccess import DataAccess\nfrom core.mod.calc.calcbasicstat import CalcBasicStat\n\nCALC_MODE = 'timeMin'\n\nclass cvcCalcMinimum(CalcBasicStat):\n \"\"\" Performs calculation of time averaged values.\n\n \"\"\"\n\n def __init__(self, data_helper: DataAccess):\n super().__init__(data_helper)\n self._data_helper = data_helper\n\n def run(self):\n \"\"\" Main method of the class. Reads data arrays, process them and returns results. \"\"\"\n\n self.logger.info('Started!')\n\n self._run(CALC_MODE)\n\n self.logger.info('Finished!')\n","repo_name":"garrichello/core","sub_path":"core/mod/calc/calcminimum.py","file_name":"calcminimum.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"5450994257","text":"'''\nBubble sort algorithm swaps elements adjacent. \nBig O notation is O(n^2)\nfirst for loop runs thrugh array n number of times\nsecond for loop inside the 1st for loop runs n-1 times\n'''\n\nlist1 = [9, 5, 8, 1, 6, 3, 2, 0]\ndef bubbleSort(arr):\n count = 0\n for i in range(len(arr)):\n for j in range(len(arr) - i- 1):\n count +=1\n if arr[j] > arr[j+1]:\n temp = arr[j]\n arr[j] = arr[j+1]\n arr[j+1] = temp\n #or we can swap by saying\n # arr[j], arr[j+1] = arr[j+1], arr[j]\n \n print(\"Count =\", str(count))\n\n\n#testing bubble sort function\nprint(\"Pre-Sort: \" + str(list1))\nbubbleSort(list1)\nprint(\"Post-Sort: \" + str(list1))\n","repo_name":"as0113-dev/Python-Data-Structure","sub_path":"bubbleSort.py","file_name":"bubbleSort.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"3998367213","text":"import pytest\n\nfrom tests import supported_k8s_versions\nfrom tests.chart_tests.helm_template_generator import render_chart\n\n\n@pytest.mark.parametrize(\n \"kube_version\",\n supported_k8s_versions,\n)\nclass TestKibana:\n def test_kibana_index_defaults(self, kube_version):\n \"\"\"Test kibana Service with index defaults.\"\"\"\n docs = render_chart(\n kube_version=kube_version,\n values={},\n show_only=[\n \"charts/kibana/templates/kibana-default-index-cronjob.yaml\",\n ],\n )\n assert len(docs) == 1\n doc = docs[0]\n assert doc[\"kind\"] == \"Job\"\n assert doc[\"apiVersion\"] == \"batch/v1\"\n assert doc[\"metadata\"][\"name\"] == \"release-name-kibana-default-index\"\n assert (\n \"fluentd.*\"\n in doc[\"spec\"][\"template\"][\"spec\"][\"containers\"][0][\"command\"][2]\n )\n\n def test_kibana_index_with_logging_sidecar(self, kube_version):\n \"\"\"Test kibana Service with logging sidecar index.\"\"\"\n docs = render_chart(\n kube_version=kube_version,\n values={\"global\": {\"loggingSidecar\": {\"enabled\": True}}},\n show_only=[\n \"charts/kibana/templates/kibana-default-index-cronjob.yaml\",\n ],\n )\n\n assert len(docs) == 1\n doc = docs[0]\n assert doc[\"kind\"] == \"Job\"\n assert doc[\"apiVersion\"] == \"batch/v1\"\n assert doc[\"metadata\"][\"name\"] == \"release-name-kibana-default-index\"\n assert (\n \"vector.*\" in doc[\"spec\"][\"template\"][\"spec\"][\"containers\"][0][\"command\"][2]\n )\n\n def test_kibana_index_disabled(self, kube_version):\n \"\"\"Test kibana Service with index creation disabled.\"\"\"\n docs = render_chart(\n kube_version=kube_version,\n values={\"kibana\": {\"createDefaultIndex\": False}},\n show_only=[\n \"charts/kibana/templates/kibana-default-index-cronjob.yaml\",\n ],\n )\n\n assert len(docs) == 0\n\n def test_kibana_index_network_policy_enabled(self, kube_version):\n \"\"\"Test network policy for kibana index service.\"\"\"\n docs = render_chart(\n kube_version=kube_version,\n values={\"kibana\": {\"createDefaultIndex\": True}},\n show_only=[\n \"charts/kibana/templates/kibana-networkpolicy.yaml\",\n ],\n )\n\n assert len(docs) == 1\n doc = docs[0]\n assert \"NetworkPolicy\" == doc[\"kind\"]\n assert [\n {\n \"podSelector\": {\n \"matchLabels\": {\n \"component\": \"kibana-default-index\",\n \"release\": \"release-name\",\n \"tier\": \"logging\",\n }\n },\n }\n ] == [doc[\"spec\"][\"ingress\"][1][\"from\"][0]]\n\n assert [{\"port\": 5601, \"protocol\": \"TCP\"}] == doc[\"spec\"][\"ingress\"][1][\"ports\"]\n","repo_name":"astronomer/astronomer","sub_path":"tests/chart_tests/test_kibana.py","file_name":"test_kibana.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","stars":451,"dataset":"github-code","pt":"88"} +{"seq_id":"11488995158","text":"\nfrom bs4 import BeautifulSoup\n\ntry:\n from urlparse import urljoin\nexcept ImportError:\n from urllib.parse import urljoin\n\nimport requests\nimport json\nimport os\n\n\ndef tuple2dict(tup):\n di = {}\n for k, v in tup:\n di[k] = v\n return di\n\n\nif __name__ == '__main__':\n url = 'https://covid19.saglik.gov.tr/'\n page = requests.get(url)\n\n soup = BeautifulSoup(page.content, 'html.parser')\n results = soup.find_all(id=\"bg-logo\")\n elems_list = []\n\n json_file_path = os.path.join(os.getcwd(), 'datas', 'data.json')\n datas = {}\n try:\n with open(json_file_path, 'r') as json_file:\n datas = json.load(json_file)\n except Exception as e:\n print(\"Error: {}\".format(e))\n # print(datas)\n\n rs = results[0]\n tarih = rs.find(\"p\", class_=\"p3\").text + \"_\" + rs.find(\"p\", class_=\"p2\").text + \"_\" + rs.find(\"p\", class_=\"p1\").text\n\n li_elems_toplam = rs.find_all(\"li\", attrs={\"class\": \"d-flex justify-content-between baslik-k\"})\n li_elems_acik = rs.find_all(\"li\", attrs={\"class\": \"d-flex justify-content-between baslik-k-2 bg-acik\"})\n li_elems_koyu = rs.find_all(\"li\", attrs={\"class\": \"d-flex justify-content-between baslik-k-2 bg-koyu\"})\n\n li_elems = li_elems_toplam + li_elems_acik + li_elems_koyu\n for li_elem in li_elems:\n li_span_elems = li_elem.find_all(\"span\")\n elems_list.append((li_span_elems[0].text.replace(\"\\r\\n\", \"\").replace(\" \", \"\"),\n li_span_elems[1].text.replace(\"\\r\\n\", \"\").replace(\" \", \"\")))\n\n # elems_list.append((\"tarih\", tarih))\n elems_list.append((\"title\", rs.find(\"div\", class_='baslik-tablo').text.replace(\"\\n\", \"\")))\n datas[tarih] = tuple2dict(elems_list)\n with open(json_file_path, 'w') as outfile:\n outfile.write(json.dumps(datas, sort_keys=True, ensure_ascii=True, indent=4))\n","repo_name":"semihamakinist/covid19_web_scraping","sub_path":"covid19_saglik_gov_tr_2.py","file_name":"covid19_saglik_gov_tr_2.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"71167214369","text":"from datetime import datetime\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport streamlit as st\nimport matplotlib.pyplot as plt\n\nfrom sqlalchemy.orm import sessionmaker\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\n\nfrom utils import get_table_download_link, hash_file_reference, FileReference\n\nfrom utils import (detect_outliers, detects_unbalanced_classes, conditional_entropy, \n binning, scaling, standardization, onehot_encoder, ordinal_encoder,\n over_sampling, under_sampling)\n\nfrom utils import (markdown_outliers, markdown_missing_values, markdown_class_desbalance, \n markdown_class_desbalance_v2, markdown_class_desbalance_v3, markdown_binning,\n markdown_scaling, markdown_standardization, markdown_onehot, markdown_ordinal)\n\n\nfrom db import database, save_to_database_ORM, query_database_ORM_last_number_workflow, LogOperation\n\n\n\nst.set_option('deprecation.showfileUploaderEncoding', False)\n\n\ndict_db = {\n\n 'name_operator': [\n 'Data Outlier Treatment', 'Data Missing Imputation', 'Column Selection', \n 'Data Normalization', 'Data Standardization', 'Data Discretization', \n 'Data Coding', 'Data Type Convert', 'Data Unified', \n 'Oversampling', 'Undersampling', 'Houldout'\n ],\n\n 'type_operator': ['Data Cleaning', 'Data Reduction', 'Data Sampling', 'Data Transformation', 'Data Partition'],\n\n 'function_operator': [\n 'DropOutlier', 'Imputation-1', 'Imputation0', \n 'ImputationMean', 'ImputationMedian', 'ImputationMode', \n 'ImputationUnknown', 'LabelEncoder', 'DropQualitativeColumn', \n 'DropQuantitativeColumn', 'IncludeQualitativeColumn', 'IncludeQuantitativeColumn', \n 'KBinsDiscratizer', 'MinMaxScaler', 'StandardScaler', \n 'OneHotEncoder', 'OrdinalEncoder', 'SMOTE', \n 'RandomUnderSampler', 'UnifiedDatabase', 'TrainTestSplit'\n ]\n\n}\n\nconn_db = database(is_table_log=True)\n\nlast_number_workflow = query_database_ORM_last_number_workflow(conn=conn_db)\n\n\ndef main():\n # -------------------------------- Sidebar -------------------------------\n st.sidebar.markdown('## Load dataset')\n\n select_type = st.sidebar.selectbox('Choose the file extension', options=[\n 'Select an option', 'csv', 'xlsx', 'database'\n ])\n\n \n sep_text_input = st.sidebar.text_input('Insert the selected file separator', value=',')\n encoding_text_input = st.sidebar.text_input('Enter the encoding of the selected file', value='None')\n\t\n file = st.sidebar.file_uploader('Uploader do arquivo', type=select_type)\n \n \n if select_type == 'database':\n user = st.sidebar.text_input('Inform the database user:')\n passwd = st.sidebar.text_input('Enter the password for the database:', type='password')\n db_ip = st.sidebar.text_input('Enter the IP address of the database:')\n db_name = st.sidebar.text_input('Enter the name of the database:')\n table_name = st.sidebar.text_input('Enter the name of the table:')\n\n\n # -------------------------- Main page content ----------------\n # Uploading the file data\n @st.cache(allow_output_mutation=True)\n def read_file_data(file):\n if file is not None:\n if select_type == 'csv':\n df = pd.read_csv(file, sep=sep_text_input, encoding=encoding_text_input)\n return df\n elif select_type == 'xlsx':\n df = pd.read_excel(file)\n return df\n\n\n df = read_file_data(file)\n\n\n if not isinstance(df, pd.DataFrame):\n if select_type == 'database':\n if user and passwd and db_ip and db_name and table_name:\n conn = database(db_user=user, db_passwd=passwd, db_ip=db_ip, db_name=db_name, is_table_log=False)\n df = pd.read_sql_table(table_name, conn)\n\n\n if df is not None:\n \n # 1. Análise Exploratória de Dados\n st.title(' Data preprocessing assistant for classification problems')\n\n st.markdown('
    '*2, unsafe_allow_html=True)\n\n database_name = st.text_input('Enter the name of the database:')\n \n exploration = pd.DataFrame({\n 'column': df.columns, 'type': df.dtypes, 'NA #': df.isna().sum(), 'NA %': (df.isna().sum() / df.shape[0]) * 100\n })\n\n st.markdown('


    ', unsafe_allow_html=True)\n \n st.markdown('### 1 - Exploratory Data Analysis')\n st.markdown('#### 1.1 - Dataset information')\n if st.checkbox('Display raw data'):\n st.markdown('
    ', unsafe_allow_html=True)\n value = st.slider('Choose the number of lines:',\n min_value=1, max_value=100, value=5)\n st.dataframe(df.head(value), width=900, height=600)\n \n st.markdown('

    ', unsafe_allow_html=True)\n \n st.markdown('** Dataset dimension**')\n st.markdown(df.shape)\n \n st.markdown('

    ', unsafe_allow_html=True)\n \n st.markdown('**Descriptive statistics of the quantitative columns**')\n st.dataframe(df.describe(), width=900, height=600)\n \n st.markdown('

    ', unsafe_allow_html=True)\n \n st.markdown(\n '**Dataset information: Column name, Type, Numbers of NaNs (null) and Percentage of NaNs**')\n st.dataframe(exploration, width=900, height=600)\n\n \n st.markdown('

    ', unsafe_allow_html=True)\n st.markdown('#### 1.2 - Distribution of quantitative and qualitative columns')\n #st.markdown('
    ', unsafe_allow_html=True)\n \n if st.checkbox('Plot graph', key='21'):\n op6 = list(df.columns)\n op6.insert(0, 'Select an option')\n \n select_feature_quantitative = st.selectbox('Select a column', options=op6)\n \n if select_feature_quantitative not in 'Select an option':\n sns.countplot(y=select_feature_quantitative, data=df, orient='h')\n plt.title(str(select_feature_quantitative), fontsize=14)\n st.pyplot()\n else:\n pass\n \n \n \n # 2. Detect outliers \n st.markdown('

    ', unsafe_allow_html=True)\n \n st.markdown('### 2 - Data Cleaning')\n st.markdown('#### 2.1 - Detect and treat quantitative column outliers')\n \n op = list(df.select_dtypes(include=[np.number]).columns)\n op.insert(0, 'Select an option')\n \n select_boxplot = st.selectbox('Choose the column to plot a univariate boxplot:', options=op)\n \n if select_boxplot not in 'Select an option':\n if len(select_boxplot) > 0:\n colors = ['#B3F9C5']\n sns.boxplot(x=select_boxplot, data=df.select_dtypes(include=[np.number]), palette=colors)\n st.pyplot(dpi=100)\n else:\n st.markdown('**Boxplot chart - brief explanation:**')\n st.image('imgs/boxplot-information.png', width=700)\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n if st.checkbox('Explanation of the method used'):\n st.markdown(markdown_outliers)\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n is_remove_outliers_select = st.selectbox('Want to remove outliers?', options=(\n 'Select an option', 'Yes', 'No'\n ))\n \n outliers_drop = detect_outliers(df, 2, list(exploration[exploration['type'] != 'object']['column'].index))\n \n \n if is_remove_outliers_select in 'Yes':\n\n df_copy = df.copy()\n\n df = df.drop(outliers_drop, axis = 0).reset_index(drop=True) # removing the outliers from the base\n st.dataframe(df_copy.loc[outliers_drop])\n st.write(df.shape)\n st.success('Outliers successfully removed!')\n\n name_column_list_outliers = df.columns.tolist()\n for col in name_column_list_outliers:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][0], name_operator=dict_db['name_operator'][0], type_operator=dict_db['type_operator'][0], timestamp=datetime.now())\n\n\n # 3. Detect Missing values \n st.markdown('

    ', unsafe_allow_html=True)\n \n st.markdown('#### 2.2 - Detect and treat missing values')\n \n if st.checkbox('Missing values explanation'):\n \n st.markdown(markdown_missing_values)\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n percentual = st.slider(\n 'Enter a missing value percentage limit:', min_value=0, max_value=100)\n \n op7 = list(df.columns)\n op7.insert(0, 'Select an option')\n columns_missing_to_remove = st.multiselect('Inform the columns you want to remove because they contain a large volume of missing values:', options=op7)\n \n num_columns_list = list(exploration[(exploration['NA %'] > percentual) & (\n exploration['type'] != 'object')]['column']) #quantitativa\n \n cat_columns_list = list(exploration[(exploration['NA %'] > percentual) & (\n exploration['type'] == 'object')]['column']) #qualitative\n \n \n if columns_missing_to_remove:\n df = df.drop(list(columns_missing_to_remove), axis=1).reset_index(drop=True)\n \n if num_columns_list:\n num_columns_list = [num_col for num_col in num_columns_list if num_col not in columns_missing_to_remove]\n\n if len(num_columns_list) > 1:\n for col in num_columns_list:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][9], name_operator=dict_db['name_operator'][1], type_operator=dict_db['type_operator'][0], timestamp=datetime.now())\n else:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=num_columns_list, function_operator=dict_db['function_operator'][9], name_operator=dict_db['name_operator'][1], type_operator=dict_db['type_operator'][0], timestamp=datetime.now())\n\n \n if cat_columns_list:\n cat_columns_list = [cat_col for cat_col in cat_columns_list if cat_col not in columns_missing_to_remove]\n\n if len(cat_columns_list) > 1:\n for col in cat_columns_list:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][8], name_operator=dict_db['name_operator'][1], type_operator=dict_db['type_operator'][0], timestamp=datetime.now())\n else:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=cat_columns_list, function_operator=dict_db['function_operator'][8], name_operator=dict_db['name_operator'][1], type_operator=dict_db['type_operator'][0], timestamp=datetime.now())\n\n \n st.markdown('
    ', unsafe_allow_html=True)\n\n # ---------------------------- Quantitative Columns --------------------\n \n st.markdown('#### Imputation of quantitative data')\n \n st.markdown(num_columns_list)\n\n imputer = st.selectbox('Choose an imputation option:', options=(\n 'Select an option',\n 'Input with -1',\n 'Input with 0',\n 'Input with mean',\n 'Input with median',\n 'Input with mode',\n # 'Dropar'\n ))\n\n if imputer == 'Input with -1':\n df.fillna(-1, inplace=True)\n na_dict = { 'NA %' : df[exploration[(exploration['NA %'].drop(columns_missing_to_remove) > 0) & (exploration['type'] != 'object')]['column']].isna().sum() }\n df_no_missing_values = pd.DataFrame(na_dict)\n st.dataframe(df_no_missing_values.T)\n st.success('Values successfully filled!')\n\n name_column_list_imputer1 = df_no_missing_values.index.tolist()\n for col in name_column_list_imputer1:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][1], name_operator=dict_db['name_operator'][1], type_operator=dict_db['type_operator'][0], timestamp=datetime.now())\n\n\n elif imputer == 'Input with 0':\n df.fillna(0, inplace=True)\n na_dict = { 'NA %' : df[exploration[(exploration['NA %'].drop(columns_missing_to_remove) > 0) & (exploration['type'] != 'object')]['column']].isna().sum() }\n df_no_missing_values = pd.DataFrame(na_dict)\n st.dataframe(df_no_missing_values.T)\n st.success('Values successfully filled!')\n\n name_column_list_imputer0 = df_no_missing_values.index.tolist()\n for col in name_column_list_imputer0:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][2], name_operator=dict_db['name_operator'][1], type_operator=dict_db['type_operator'][0], timestamp=datetime.now())\n\n elif imputer == 'Input with mean':\n df.fillna(\n df[num_columns_list].mean(), inplace=True)\n na_dict = { 'NA %' : df[exploration[(exploration['NA %'].drop(columns_missing_to_remove) > 0) & (exploration['type'] != 'object')]['column']].isna().sum() }\n df_no_missing_values = pd.DataFrame(na_dict)\n st.dataframe(df_no_missing_values.T)\n st.success('Values successfully filled!')\n\n name_column_list_imputer_avg = df_no_missing_values.index.tolist()\n for col in name_column_list_imputer_avg:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][3], name_operator=dict_db['name_operator'][1], type_operator=dict_db['type_operator'][0], timestamp=datetime.now())\n\n elif imputer == 'Input with median':\n df.fillna(\n df[num_columns_list].median(), inplace=True)\n na_dict = { 'NA %' : df[exploration[(exploration['NA %'].drop(columns_missing_to_remove) > 0) & (exploration['type'] != 'object')]['column']].isna().sum() }\n df_no_missing_values = pd.DataFrame(na_dict)\n st.dataframe(df_no_missing_values.T)\n st.success('Values successfully filled!')\n\n name_column_list_imputer_median = df_no_missing_values.index.tolist()\n for col in name_column_list_imputer_median:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][4], name_operator=dict_db['name_operator'][1], type_operator=dict_db['type_operator'][0], timestamp=datetime.now())\n\n elif imputer == 'Input with mode':\n df.fillna(\n df[num_columns_list].mode().iloc[0], inplace=True)\n na_dict = { 'NA %' : df[exploration[(exploration['NA %'].drop(columns_missing_to_remove) > 0) & (exploration['type'] != 'object')]['column']].isna().sum() }\n df_no_missing_values = pd.DataFrame(na_dict)\n st.dataframe(df_no_missing_values.T)\n st.success('Values successfully filled!')\n\n name_column_list_imputer_moda = df_no_missing_values.index.tolist()\n for col in name_column_list_imputer_moda:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][5], name_operator=dict_db['name_operator'][1], type_operator=dict_db['type_operator'][0], timestamp=datetime.now())\n\n\n # ------------------------- Qualitative Columns ---------------------\n st.markdown('
    ', unsafe_allow_html=True)\n \n st.markdown('#### Imputation of qualitative data')\n\n st.markdown(cat_columns_list)\n\n cat_imputer = st.selectbox('Choose an imputation option:', options=(\n 'Select an option',\n 'Input with unknown',\n # 'Dropar'\n ))\n\n if cat_imputer in 'Input with unknown':\n df.fillna('unknown', inplace=True)\n na_dict = { 'NA %' : df[exploration[(exploration['NA %'].drop(columns_missing_to_remove) > 0) & (exploration['type'] == 'object')]['column']].isna().sum() }\n df_no_missing_values = pd.DataFrame(na_dict)\n st.dataframe(df_no_missing_values.T)\n st.success('Values successfully filled!')\n\n name_column_list_impute_unk = df_no_missing_values.index.tolist()\n for col in name_column_list_impute_unk:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][6], name_operator=dict_db['name_operator'][1], type_operator=dict_db['type_operator'][0], timestamp=datetime.now())\n\n\n # Separate quantitative and qualitative variables\n \n num_features = df.select_dtypes(include=[np.number]).copy()\n cat_features = df.select_dtypes(exclude=[np.number]).copy()\n \n \n # 3. Check if the classes are unbalanced \n st.markdown('

    ', unsafe_allow_html=True)\n \n st.markdown('### 3 - Check imbalance between classes')\n \n if st.checkbox('Unbalance explanation'):\n \n st.markdown(markdown_class_desbalance)\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n op1 = list(df.columns)\n op1.insert(0, 'Select an option')\n \n select_target_desbalance = st.selectbox('Enter the target column:', options=op1)\n \n if st.checkbox('Plot graph'):\n if select_target_desbalance not in 'select an option':\n sns.countplot(x=select_target_desbalance, data=df) # plots a countplot chart to check the distribution of classes \n plt.title('Target', fontsize=14)\n st.pyplot()\n \n if detects_unbalanced_classes(df, select_target_desbalance) < 20.0:\n st.markdown('
    ', unsafe_allow_html=True)\n st.success('Classes with similar distribution, in fact balanced.')\n else:\n st.markdown('
    ', unsafe_allow_html=True)\n st.warning('Classes with the possibility of being unbalanced. The treatment in section 7 - Data Sampling Correction is recommended.')\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n \n if df[select_target_desbalance].dtypes == 'object':\n \n st.warning('The target column is of the type qualitative - object. It is necessary to transform its type to quantitative.')\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n is_transformer_target_select = st.selectbox('Do you want to transform the target column to the quantitative type? (RECOMMENDED)', options=(\n 'Select an option', 'Yes', 'No'\n ))\n \n \n if is_transformer_target_select in 'Yes':\n encoder = LabelEncoder()\n df[select_target_desbalance] = encoder.fit_transform(df[select_target_desbalance])\n\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=str(select_target_desbalance), function_operator=dict_db['function_operator'][7], name_operator=dict_db['name_operator'][7], type_operator=dict_db['type_operator'][3], timestamp=datetime.now())\n\n \n \n if df[select_target_desbalance].dtypes != 'object':\n st.success('Successful transformation!')\n \n\n num_features[select_target_desbalance] = df[select_target_desbalance].copy()\n \n del cat_features[select_target_desbalance]\n \n else:\n pass\n \n \n else:\n st.error('Enter a column!')\n \n \n \n # 4 - Correlation between quantitative columns \n \n st.markdown('

    ', unsafe_allow_html=True)\n \n st.markdown('### 4 - Data Reduction - Feature Selection')\n \n st.markdown('#### 4.1 - Correlation between columns')\n select_corr = st.selectbox(' 4.1.1 - Enter the correlation method between quantitative columns you want to analyze:', options=(\n 'Select an option', 'pearson', 'kendall', 'spearman'\n ))\n\n if st.checkbox('Correlation method explanation'):\n st.markdown('''\n\t\t\t\t**Pearson's correlation**\n\t\t\t\t* Quantitative columns\n\t\t\t\t* Columns with normal distribution or sufficiently large sample\n\t\t\t\t* Preferable for linear type relationships\n\n\t\t\t\t**Correlação de Kerdell**\n\t\t\t\t* Ordinal scale columns \n\t\t\t\t* Preferable when having small samples\n \n\t\t\t\t**Correlação de Spearman**\n\t\t\t\t* Quantitative or ordinal scale columns\n\t\t\t\t* Use when columns are not normal\n\t\t\t\t* Preferable when there is no linear relationship \n\t\t\t''')\n \n st.markdown('
    ', unsafe_allow_html=True)\n\n if select_corr != 'Select an option':\n if df.shape[1] <= 30:\n plt.rcParams['figure.figsize'] = (10, 8)\n sns.heatmap(num_features.corr(method=select_corr), annot=True,\n linewidths=0.5, linecolor='black', cmap='Blues')\n st.pyplot(dpi=100)\n else:\n plt.rcParams['figure.figsize'] = (20, 10)\n sns.heatmap(num_features.corr(method=select_corr), annot=True,\n linewidths=0.5, linecolor='black', cmap='Blues')\n st.pyplot(dpi=100)\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n # st.markdown('#### 4.1.1 - Correlation between quantitative columns')\n\n cat_features_delete = []\n \n if st.checkbox('Quantitative columns', key='1'):\n if st.checkbox('I want to use all columns', key='2'):\n\n if len(num_features.columns.tolist()) > 1:\n for col in num_features.columns:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][11], name_operator=dict_db['name_operator'][2], type_operator=dict_db['type_operator'][1], timestamp=datetime.now())\n else:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=num_features.columns.tolist()[0], function_operator=dict_db['function_operator'][11], name_operator=dict_db['name_operator'][2], type_operator=dict_db['type_operator'][1], timestamp=datetime.now())\n\n\n st.success('All quantitative columns were selected!')\n else:\n \n num_fit_features_radio = st.radio('Do you want to include or exclude columns for preprocessing?', options=(\n 'Include', 'Exclude'\n ))\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n if num_fit_features_radio in 'Include':\n num_fit_features_add = st.multiselect(\n 'Select columns to include', options=list(df.select_dtypes(include=[np.number]).columns))\n num_features = num_features[num_fit_features_add]\n\n \n if len(num_fit_features_add) > 1:\n for col in num_fit_features_add:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][11], name_operator=dict_db['name_operator'][2], type_operator=dict_db['type_operator'][1], timestamp=datetime.now())\n else:\n if num_fit_features_add:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=num_fit_features_add[0], function_operator=dict_db['function_operator'][11], name_operator=dict_db['name_operator'][2], type_operator=dict_db['type_operator'][1], timestamp=datetime.now())\n\n \n st.success(f'Selected columns -> {list(num_features.columns)}')\n \n if num_fit_features_radio in 'Exclude':\n num_fit_features_delete = st.multiselect(\n 'Select columns to exclude', options=list(df.select_dtypes(include=[np.number]).columns)\n )\n num_features = num_features.drop(num_fit_features_delete, axis=1)\n\n\n if len(num_fit_features_delete) > 1:\n for col in num_fit_features_delete:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][9], name_operator=dict_db['name_operator'][2], type_operator=dict_db['type_operator'][1], timestamp=datetime.now())\n else:\n if num_fit_features_delete:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=num_fit_features_delete[0], function_operator=dict_db['function_operator'][9], name_operator=dict_db['name_operator'][2], type_operator=dict_db['type_operator'][1], timestamp=datetime.now())\n\n \n st.success(f'Available columns -> {list(num_features.columns)}')\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n st.markdown('#### 4.1.2 - The correlation between qualitative columns is based on the calculation of entropy')\n st.markdown('
    ', unsafe_allow_html=True)\n \n if st.checkbox('Qualitative columns', key='3'):\n if st.checkbox('I want to use all columns', key='4'):\n\n if len(cat_features.columns.tolist()) > 1:\n for col in cat_features.columns:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][10], name_operator=dict_db['name_operator'][2], type_operator=dict_db['type_operator'][1], timestamp=datetime.now())\n else:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=cat_features.columns.tolist()[0], function_operator=dict_db['function_operator'][10], name_operator=dict_db['name_operator'][2], type_operator=dict_db['type_operator'][1], timestamp=datetime.now())\n\n\n st.success('All qualitative columns have been selected!')\n else: \n op2 = list(df.columns)\n op2.insert(0, 'Select an option')\n \n select_cat_corr = st.selectbox(\n 'Enter the target column to calculate the correlation with the qualitative columns', options=op2)\n\n cat_corr = {}\n\n if select_cat_corr != 'Select an option':\n for col in cat_features.columns:\n cat_corr[col] = conditional_entropy(\n cat_features[col], df[select_cat_corr])\n\n series_cat_corr = pd.Series(cat_corr, name='correlation')\n st.dataframe(series_cat_corr)\n\n \n cat_fit_features_add_radio = st.radio('Do you want to include or exclude columns for preprocessing?', options=(\n 'Include', 'Exclude'\n ), key='1')\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n if cat_fit_features_add_radio in 'Include':\n cat_fit_features_add = st.multiselect(\n 'Select columns to include', options=list(cat_features.columns))\n \n cat_features = cat_features[cat_fit_features_add]\n\n if len(cat_fit_features_add) > 1:\n for col in cat_fit_features_add:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][10], name_operator=dict_db['name_operator'][2], type_operator=dict_db['type_operator'][1], timestamp=datetime.now())\n\n else:\n if cat_fit_features_add:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=cat_fit_features_add[0], function_operator=dict_db['function_operator'][10], name_operator=dict_db['name_operator'][2], type_operator=dict_db['type_operator'][1], timestamp=datetime.now())\n\n \n st.success(f'Selected columns -> {list(cat_features.columns)}')\n \n elif cat_fit_features_add_radio in 'Exclude':\n cat_fit_features_delete = st.multiselect(\n 'Select columns to exclude', options=list(cat_features.columns)\n )\n cat_features_delete.append(cat_fit_features_delete)\n \n cat_features = cat_features.drop(cat_fit_features_delete, axis=1)\n\n if len(cat_fit_features_delete) > 1:\n for col in cat_fit_features_delete:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][8], name_operator=dict_db['name_operator'][2], type_operator=dict_db['type_operator'][1], timestamp=datetime.now())\n\n else:\n if cat_fit_features_delete:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=cat_fit_features_delete[0], function_operator=dict_db['function_operator'][8], name_operator=dict_db['name_operator'][2], type_operator=dict_db['type_operator'][1], timestamp=datetime.now())\n\n \n st.success(f'Available columns -> {list(cat_features.columns)}')\n \n \n \n # 5 - Feature engineering \n st.markdown('

    ', unsafe_allow_html=True)\n \n st.markdown('### 5 - Data Transformation - Feature Engineering')\n \n op3 = list(num_features.columns)\n op3.insert(0, 'Select an option')\n \n select_target = st.selectbox('Enter the target column:', options=list(op3))\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n \n if select_target not in 'Select an option':\n \n \n if st.checkbox('Quantitative columns', key='5'):\n \n is_applied_binning = False\n \n if st.checkbox('Contnuous', key='6'):\n if st.checkbox('Discretization Explanation'): \n st.markdown(markdown_binning)\n \n n_bins_slider = st.slider('n_bins', min_value=2, max_value=20, value=5)\n encode_select = st.selectbox('encode', options=('onehot-dense', 'ordinal'))\n strategy_select = st.selectbox('strategy', options=('quantile', 'uniform', 'kmeans'))\n \n \n select_col_binning = st.multiselect('Inform the columns to apply discretization:', options=list(num_features.drop(select_target, axis=1).columns))\n list_col_binning = list(select_col_binning)\n st.markdown(list_col_binning)\n st.markdown('
    ', unsafe_allow_html=True)\n \n num_features[select_col_binning] = binning(num_features[select_col_binning], n_bins=n_bins_slider, encode=encode_select, strategy=strategy_select)\n \n is_applied_binning = True\n\n\n if len(select_col_binning) > 1:\n for col in select_col_binning:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][12], name_operator=dict_db['name_operator'][5], type_operator=dict_db['type_operator'][3], timestamp=datetime.now())\n\n else:\n if select_col_binning:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=select_col_binning[0], function_operator=dict_db['function_operator'][12], name_operator=dict_db['name_operator'][5], type_operator=dict_db['type_operator'][3], timestamp=datetime.now())\n\n \n if list_col_binning:\n st.success('Successful transformation!')\n \n if st.checkbox('Discreet and Continuous'):\n if st.checkbox('Normalization and Standardization Explanation'): \n st.markdown(markdown_scaling)\n st.markdown('
    ', unsafe_allow_html=True)\n st.markdown(markdown_standardization)\n \n select_method_var_quantitative = st.selectbox('Choose the method:', options=('Select an option ', 'Normalization', 'Standardization'))\n \n if select_method_var_quantitative in 'Normalization':\n if is_applied_binning:\n num_features = pd.concat([\n scaling(num_features.drop(select_col_binning, axis=1), select_target),\n num_features[select_col_binning]\n ], axis=1).reset_index(drop=True)\n\n if len(num_features.columns.tolist()) > 1:\n for col in num_features.columns:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][13], name_operator=dict_db['name_operator'][3], type_operator=dict_db['type_operator'][3], timestamp=datetime.now())\n\n else:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=num_features.columns.tolist()[0], function_operator=dict_db['function_operator'][13], name_operator=dict_db['name_operator'][3], type_operator=dict_db['type_operator'][3], timestamp=datetime.now())\n\n \n st.success('Successful transformation!')\n else:\n num_features = scaling(num_features, select_target)\n\n if len(num_features.columns.tolist()) > 1:\n for col in num_features.columns:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][13], name_operator=dict_db['name_operator'][3], type_operator=dict_db['type_operator'][3], timestamp=datetime.now())\n\n else:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=num_features.columns.tolist()[0], function_operator=dict_db['function_operator'][13], name_operator=dict_db['name_operator'][3], type_operator=dict_db['type_operator'][3], timestamp=datetime.now())\n\n st.success('Successful transformation!')\n \n if select_method_var_quantitative in 'Standardization':\n if is_applied_binning:\n num_features = pd.concat([\n standardization(num_features.drop(select_col_binning, axis=1), select_target),\n num_features[select_col_binning]\n ], axis=1).reset_index(drop=True)\n\n\n if len(num_features.columns.tolist()) > 1:\n for col in num_features.columns:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][14], name_operator=dict_db['name_operator'][4], type_operator=dict_db['type_operator'][3], timestamp=datetime.now())\n\n else:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=num_features.columns.tolist()[0], function_operator=dict_db['function_operator'][14], name_operator=dict_db['name_operator'][4], type_operator=dict_db['type_operator'][3], timestamp=datetime.now())\n\n \n st.success('Successful transformation!')\n else:\n num_features = standardization(num_features, select_target)\n\n if len(num_features.columns.tolist()) > 1:\n for col in num_features.columns:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][14], name_operator=dict_db['name_operator'][4], type_operator=dict_db['type_operator'][3], timestamp=datetime.now())\n\n else:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=num_features.columns.tolist()[0], function_operator=dict_db['function_operator'][14], name_operator=dict_db['name_operator'][4], type_operator=dict_db['type_operator'][3], timestamp=datetime.now())\n\n st.success('Successful transformation!')\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n \n if st.checkbox('Qualitative columns', key='7'):\n \n is_onehot_transform = False\n is_ordinal_transform = False\n \n no_preprocessed = []\n \n if st.checkbox('Nominal (OneHot Encoder)', key='8'):\n if st.checkbox('OneHot Encoder explanation'): \n st.markdown(markdown_onehot)\n st.markdown('
    ', unsafe_allow_html=True)\n \n select_cat_features_nominal = st.multiselect('Inform the columns to apply the encoding - OneHot Encoder:', options=list(cat_features.columns))\n list_cat_features_nominal = list(select_cat_features_nominal)\n st.markdown(list_cat_features_nominal)\n \n onehot_transform = onehot_encoder(cat_features[select_cat_features_nominal])\n \n is_onehot_transform = True\n\n if len(select_cat_features_nominal) > 1:\n for col in select_cat_features_nominal:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][15], name_operator=dict_db['name_operator'][6], type_operator=dict_db['type_operator'][3], timestamp=datetime.now())\n\n else:\n if select_cat_features_nominal:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=select_cat_features_nominal[0], function_operator=dict_db['function_operator'][15], name_operator=dict_db['name_operator'][6], type_operator=dict_db['type_operator'][3], timestamp=datetime.now())\n\n \n if list_cat_features_nominal:\n no_preprocessed.extend(list_cat_features_nominal)\n st.success('Successful transformation!')\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n if st.checkbox('Ordinal (Ordinal Encoder)', key='9'):\n if st.checkbox('Ordinal Encoder explanation'): \n st.markdown(markdown_ordinal)\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n select_cat_features_ordinal = st.multiselect('Inform the columns to apply the encoding - Ordinal Encoder:', options=list(cat_features.columns))\n list_cat_features_ordinal = list(select_cat_features_ordinal)\n st.markdown(list_cat_features_ordinal)\n \n ordinal_transform = ordinal_encoder(cat_features[select_cat_features_ordinal])\n \n is_ordinal_transform = True\n\n if len(select_cat_features_ordinal) > 1:\n for col in select_cat_features_ordinal:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][16], name_operator=dict_db['name_operator'][6], type_operator=dict_db['type_operator'][3], timestamp=datetime.now())\n else:\n if select_cat_features_ordinal:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=select_cat_features_ordinal[0], function_operator=dict_db['function_operator'][16], name_operator=dict_db['name_operator'][6], type_operator=dict_db['type_operator'][3], timestamp=datetime.now())\n\n \n if list_cat_features_ordinal:\n no_preprocessed.extend(list_cat_features_ordinal)\n st.success('Successful transformation!')\n \n if is_onehot_transform:\n cat_features = onehot_transform\n if is_ordinal_transform:\n cat_features = ordinal_transform\n if is_onehot_transform and is_ordinal_transform:\n cat_features = pd.concat([onehot_transform, ordinal_transform], axis=1).reset_index(drop=True)\n \n\n cat_cols = list(df.select_dtypes(include=np.object).columns)\n\n no_preprocessed = [no for no in no_preprocessed]\n \n col_no_preprocessed = [col for col in cat_cols if col not in no_preprocessed]\n \n if col_no_preprocessed:\n cat_features = pd.concat([cat_features, df[col_no_preprocessed]], axis=1).reset_index(drop=True)\n \n if cat_features_delete:\n cat_features = cat_features.drop(cat_features_delete[0], axis=1)\n \n \n st.markdown('

    ', unsafe_allow_html=True)\n \n \n \n # 8 - Generate preprocessed files (Trainee and test) / Single Base \n st.markdown('### 6 - Data Partition - Training and Testing or Single Base')\n \n \n is_select_partition = st.selectbox('Do you want to partition the dataset in Training and Testing?', options=('Select an option', 'No', 'Yes'))\n st.markdown('
    ', unsafe_allow_html=True)\n \n \n if is_select_partition in 'Select an option':\n pass\n else:\n if is_select_partition in 'No':\n op4 = list(num_features.columns)\n op4.insert(0, 'Select an option')\n \n select_target_partition = st.selectbox('Inform the target column to carry out the partitioning of the dataset:', options=list(op4))\n \n \n if select_target_partition is not 'Select an option':\n if select_target_partition not in cat_features:\n X = pd.concat([num_features.drop(select_target_partition, axis=1).reset_index(drop=True), cat_features], axis=1).reset_index(drop=True)\n \n y = num_features[select_target_partition].copy()\n \n unified_base = pd.concat([X, y], axis=1).reset_index(drop=True)\n \n \n if not unified_base.empty:\n st.success('Base disponível para download')\n \n \n is_completed_select = st.sidebar.selectbox('Completed all preprocessing operations?', options=('No', 'Yes'))\n \n \n if is_completed_select in 'Yes':\n bs4_unified = get_table_download_link(unified_base)\n \n st.sidebar.markdown(f'''\n \n \n \n ''', unsafe_allow_html=True)\n\n\n if len(unified_base.columns.tolist()) > 1:\n for col in unified_base.columns:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][19], name_operator=dict_db['name_operator'][8], type_operator=dict_db['type_operator'][4], timestamp=datetime.now())\n else:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=unified_base.columns.tolist()[0], function_operator=dict_db['function_operator'][19], name_operator=dict_db['name_operator'][8], type_operator=dict_db['type_operator'][4], timestamp=datetime.now())\n\n\n\n else:\n st.sidebar.warning('Completed preprocessing?')\n \n \n \n if is_select_partition in 'Yes':\n \n op5 = list(num_features.columns)\n op5.insert(0, 'Select an option')\n \n select_target_partition = st.selectbox('Enter the target column to generate the preprocessed dataset:', options=list(op5))\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n \n \n \n if select_target_partition != 'Select an option':\n if select_target_partition not in cat_features:\n X = pd.concat([num_features.drop(select_target_partition, axis=1).reset_index(drop=True), cat_features], axis=1).reset_index(drop=True)\n \n y = num_features[select_target_partition].copy()\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n \n select_test_size = st.slider('Inform the proportion of the size of the test base:', min_value=1, max_value=99, value=25)\n \n st.write(X.shape)\n st.write(y.shape)\n \n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=float(select_test_size) / 100.0)\n \n \n st.write(float(select_test_size) / 100.0)\n \n st.markdown('**Training**')\n st.write(X_train.shape)\n st.write(y_train.shape)\n \n st.markdown('**Testing**')\n st.write(X_test.shape)\n st.write(y_test.shape)\n\n \n st.markdown('
    ', unsafe_allow_html=True)\n \n \n # 7 - Correction of Data Sampling \n st.markdown('### 7 - Correction of Data Sampling')\n \n \n st.markdown('
    ', unsafe_allow_html=True)\n if st.checkbox('Data sampling correction explanation'): \n st.markdown(markdown_class_desbalance_v2)\n st.markdown('
    ', unsafe_allow_html=True)\n \n if st.checkbox('Explanation of the method to be used'):\n st.markdown(markdown_class_desbalance_v3)\n \n method_balance_select = st.selectbox('Choose the most appropriate method for your problem:', options=(\n 'Select an option', 'Oversampling', 'Undersampling'\n ))\n \n if method_balance_select in 'Oversampling':\n try:\n X_train, y_train = over_sampling(X_train, y_train)\n\n sampling_cols = X_train.columns.tolist() + [y.name]\n for col in sampling_cols:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][17], name_operator=dict_db['name_operator'][9], type_operator=dict_db['type_operator'][2], timestamp=datetime.now())\n\n st.success('Oversampling successfully applied!')\n except Exception as e:\n st.markdown(e) \n \n if method_balance_select in 'Undersampling':\n X_train, y_train = under_sampling(X_train, y_train)\n\n under_sampling_cols = X_train.columns.tolist() + [y.name]\n for col in under_sampling_cols:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][18], name_operator=dict_db['name_operator'][10], type_operator=dict_db['type_operator'][2], timestamp=datetime.now())\n \n st.success('Undersampling successfully applied!')\n \n \n train = pd.concat([X_train, y_train], axis=1).reset_index(drop=True)\n test = X_test\n \n is_completed_select = st.sidebar.selectbox('Completed all preprocessing operations?', options=('No', 'Yes'))\n \n \n if is_completed_select in 'Yes':\n bs4_train = get_table_download_link(train)\n \n st.sidebar.markdown(f'''\n \n \n \n ''', unsafe_allow_html=True)\n\n \n \n bs4_test = get_table_download_link(test)\n \n st.sidebar.markdown(f'''\n \n \n \n ''', unsafe_allow_html=True)\n\n\n partition_cols = train.columns.tolist() + test.columns.tolist()\n for col in partition_cols:\n save_to_database_ORM(conn_db, number_workflow=last_number_workflow, name_dataset=str(database_name), name_column=col, function_operator=dict_db['function_operator'][20], name_operator=dict_db['name_operator'][11], type_operator=dict_db['type_operator'][4], timestamp=datetime.now())\n\n else:\n st.sidebar.warning('Have you completed pre-processing?')\n \n \n else:\n\n st.sidebar.markdown('## Workflow query ')\n select_query_workflow = st.sidebar.selectbox('', options=('Selecione uma opção', 'Fazer consulta'))\n\n if select_query_workflow != 'Fazer consulta':\n st.markdown('

    Data PreProcessing Assistant for Classification Problems

    ', unsafe_allow_html=True)\n st.image('imgs/capa.png')\n\n if select_query_workflow == 'Fazer consulta':\n query = st.text_area('Query input')\n\n if query:\n try:\n value_tal = st.slider('', min_value=1, max_value=1000, value=5)\n df_query = pd.read_sql(query, conn_db)\n\n st.table(df_query.head(value_tal))\n except Exception as e:\n st.error('Invalid Query!')\n\n\n \n\n","repo_name":"LucimarLial/AssistantPP","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":58953,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"88"} +{"seq_id":"39662016139","text":"class Student:\n # [assignment] Skeleton class. Add your code here\n def __init__(self, name, age, tracks, score):\n self.name = name\n self.age = age\n self.tracks = tracks\n self.score = score\n print(\"My name is\", name, \"and i am\", age, \"years old. The tracks i registered for are \", tracks,\" and my score so far is\", score, \"percent.\" )\n pass\n def change_name(self, name):\n print(\"Initial name\", self.name, \"has been replaced. User's new name is\", name,\".\")\n def change_age(self, age):\n print( \"Initial age of user\", self.age, \"has been replaced. User's new age is\", age,\" years.\") \n def add_track(self, track):\n print(\"Additional track to the initial list of tracks\", self.tracks, \"is\", track, \".\")\n def get_score(self):\n print(\"User's score is\", self.score ,\" percent.\")\n\n\n\nBob = Student(name=\"Bob\", age=26, tracks=[\"FE\",\"BE\"], score=20.90)\n\n# Expected methods\nBob.change_name(\"Peter\")\nBob.change_age(34)\nBob.add_track(\"UI/UX\")\nBob.get_score()\n\n#trial\nTonye = Student(name=\"Tonye\", age=29, tracks=[\"nodeJS\", \"Figma\"], score=89.6)\n\n#Methods\nTonye.change_name(\"Ndiana\")\nTonye.change_age(26)\nTonye.get_score()\n","repo_name":"Udoetuk/classes_and_function_project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"70089026209","text":"# -*- coding: utf-8 -*-\n\nimport xml.etree.ElementTree as ET\nimport os\nfrom distutils.version import LooseVersion\nimport urllib\nimport subprocess\nimport tempfile\n\ndef run(args):\n subprocess.check_call(args)\n\ndef extract_value(text):\n return text.replace(\")\", \"\").strip()\n\ndef get_qgis_version():\n url = 'https://raw.githubusercontent.com/nextgis/nextgisqgis/master/cmake/util.cmake'\n qgis_major = \"0\"\n qgis_minor = \"0\"\n u2 = urllib.urlopen(url)\n for line in u2.readlines():\n if \"set(QGIS_MAJOR\" in line:\n qgis_major = extract_value(line.replace(\"set(QGIS_MAJOR\", \"\"))\n elif \"set(QGIS_MINOR\" in line:\n qgis_minor = extract_value(line.replace(\"set(QGIS_MINOR\", \"\"))\n return '{}.{}'.format(qgis_major, qgis_minor)\n\ndef install_plugins(plugins_list, out_dir):\n\n qgis_version = get_qgis_version()\n\n metadata_xml_urls = [\n 'http://plugins.qgis.org/plugins/plugins.xml?qgis=' + qgis_version,\n 'https://rm.nextgis.com/api/repo/1/qgis_xml?qgis=' + qgis_version,\n # 'http://nextgis.ru/programs/qgis/qgis-repo.xml?qgis=' + qgis_version\n ]\n\n # Create repos dir\n counter = 0\n repos_dir = os.path.join(tempfile.gettempdir(), 'repos')\n if not os.path.exists(repos_dir):\n os.makedirs(repos_dir)\n plugins_dir = os.path.join(tempfile.gettempdir(), 'plugins')\n if not os.path.exists(plugins_dir):\n os.makedirs(plugins_dir)\n for metadata_xml_url in metadata_xml_urls:\n print('Fetch {}'.format(metadata_xml_url))\n urllib.urlretrieve(metadata_xml_url, os.path.join(repos_dir, str(counter) + \".repo.xml\"))\n counter += 1\n for plugin in plugins_list:\n plugin_name1 = plugin\n plugin_name2 = plugin.replace(' ', '_')\n output_url = ''\n version = '0.0.0'\n # list all xml files\n for repo_xml in os.listdir(repos_dir):\n try:\n tree = ET.parse(os.path.join(repos_dir, repo_xml))\n root = tree.getroot()\n for pyqgis_plugin in root.findall('pyqgis_plugin'):\n if plugin_name1 == pyqgis_plugin.get('name') or plugin_name2 == pyqgis_plugin.get('name'):\n currentVersion = pyqgis_plugin.get('version').replace('-', '.')\n if LooseVersion(currentVersion) > LooseVersion(version):\n version = currentVersion\n output_url = pyqgis_plugin.find('download_url').text\n except:\n pass\n\n if output_url:\n print('Plugin {} download url: {}'.format(plugin, output_url))\n out_zip = os.path.join(plugins_dir, plugin_name2 + '.zip')\n urllib.urlretrieve(output_url, out_zip)\n\n # Extract zip to specific folder\n prev_dir = os.getcwd()\n os.chdir(out_dir)\n run(('cmake', '-E', 'tar', 'xzf', out_zip))\n os.chdir(prev_dir)\n else:\n print('Failed to find plugin {}'.find(plugin))\n","repo_name":"nextgis/nextgis_installer","sub_path":"opt/qgis.py","file_name":"qgis.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"88"} +{"seq_id":"39823087821","text":"import ipaddress\nimport sys\nimport time\nfrom zeroconf import IPVersion, ServiceBrowser, ServiceStateChange, Zeroconf, DNSAddress, DNSService, DNSText\n\n\ndef on_service_state_change(zeroconf, service_type, name, state_change):\n if state_change is ServiceStateChange.Added:\n zeroconf.get_service_info(service_type, name)\n\n\nclass BorderAgent(object):\n alias = None\n server_name = None\n addr = None\n port = None\n thread_status = None\n\n def __init__(self, alias):\n self.alias = alias\n\n def __repr__(self):\n return str([self.alias, self.addr, self.port, self.thread_status])\n\n\ndef get_ipaddr_priority(addr: ipaddress.IPv6Address):\n # calculate the priority of IPv6 addresses in order: Global > non Global > Link local\n if addr.is_link_local:\n return 0\n\n if not addr.is_global:\n return 1\n\n return 2\n\n\ndef parse_cache(cache):\n border_agents = []\n\n # Find all border routers\n for ptr in cache.get('_meshcop._udp.local.', []):\n border_agents.append(BorderAgent(ptr.alias))\n\n # Find server name, port and Thread Interface status for each border router\n for ba in border_agents:\n for record in cache.get(ba.alias.lower(), []):\n if isinstance(record, DNSService):\n ba.server_name = record.server\n ba.port = record.port\n elif isinstance(record, DNSText):\n text = bytearray(record.text)\n sb = text.split(b'sb=')[1][0:4]\n ba.thread_status = (sb[3] & 0x18) >> 3\n\n # Find IPv6 address for each border router\n for ba in border_agents:\n for record in cache.get(ba.server_name.lower(), []):\n if isinstance(record, DNSAddress):\n addr = ipaddress.ip_address(record.address)\n if not isinstance(addr, ipaddress.IPv6Address) or addr.is_multicast or addr.is_loopback:\n continue\n\n if not ba.addr or get_ipaddr_priority(addr) > get_ipaddr_priority(ipaddress.IPv6Address(ba.addr)):\n ba.addr = str(addr)\n\n return border_agents\n\n\ndef main():\n # Browse border agents\n zeroconf = Zeroconf(ip_version=IPVersion.V6Only)\n ServiceBrowser(zeroconf, \"_meshcop._udp.local.\", handlers=[on_service_state_change])\n time.sleep(2)\n cache = zeroconf.cache.cache\n zeroconf.close()\n\n border_agents = parse_cache(cache)\n for ba in border_agents:\n print(ba)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"openthread/openthread","sub_path":"tests/scripts/thread-cert/find_border_agents.py","file_name":"find_border_agents.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":3282,"dataset":"github-code","pt":"88"} +{"seq_id":"8088493392","text":"from flask import Flask, render_template\nfrom flask_mysqldb import MySQL\nfrom scrap_manual.data import DATA\nfrom descripcion import descripciones\nfrom dotenv import load_dotenv\nimport os\n\n# Cargo las variables de entorno desde el archivo .env\nload_dotenv()\n\n\napp = Flask(__name__)\n\n# Configuración de la base de datos usando variables de entorno\napp.config['MYSQL_HOST'] = os.environ.get('MYSQL_HOST')\napp.config['MYSQL_USER'] = os.environ.get('MYSQL_USER')\napp.config['MYSQL_PASSWORD'] = os.environ.get('MYSQL_PASSWORD')\napp.config['MYSQL_DB'] = os.environ.get('MYSQL_DB')\n\nmysql = MySQL(app)\n\n@app.route('/')\ndef index():\n producto_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n products = []\n\n for producto_id in producto_ids:\n cur = mysql.connection.cursor()\n cur.execute(\"\"\"\n SELECT s.super_nombre, p.producto_nombre, ROUND(hp.precio) AS precio\n FROM historial_precios hp\n INNER JOIN (\n SELECT super_id, producto_id, MAX(fecha_hora) AS fecha_maxima\n FROM historial_precios\n WHERE producto_id = %s\n GROUP BY super_id, producto_id\n ) AS ultimos_precios ON hp.super_id = ultimos_precios.super_id AND hp.producto_id = ultimos_precios.producto_id AND hp.fecha_hora = ultimos_precios.fecha_maxima\n JOIN supermercado s ON hp.super_id = s.super_id\n JOIN producto p ON hp.producto_id = p.producto_id\n ORDER BY hp.precio ASC LIMIT 1\n \"\"\", (producto_id,))\n data = cur.fetchone() # fetchone devuelve una sola fila o None si no hay datos\n cur.close()\n\n if data:\n supermercado, producto_nombre, precio = data\n imagen_url = DATA[producto_nombre][supermercado]['Imagenes']\n producto_link = DATA[producto_nombre][supermercado]['Link']\n descripcion = descripciones.get(producto_nombre, 'Descripción no disponible.')\n\n products.append({\n 'supermercado': supermercado,\n 'nombre': producto_nombre,\n 'precio': precio,\n 'imagen': imagen_url,\n 'link': producto_link,\n 'descripcion': descripcion,\n 'nombre_producto': producto_nombre\n })\n\n\n else:\n products.append({})\n\n return render_template('index.html', products=products)\n\n \n\n@app.route('/producto/')\ndef mostrar_producto(nombre_producto):\n if nombre_producto in DATA:\n cur = mysql.connection.cursor()\n cur.execute(\"\"\"\n SELECT s.super_nombre, p.producto_nombre, hp.precio, hp.fecha_hora\n FROM historial_precios hp\n INNER JOIN (\n SELECT super_id, MAX(fecha_hora) AS fecha_maxima\n FROM historial_precios\n JOIN producto ON producto.producto_id = historial_precios.producto_id\n WHERE producto.producto_nombre = %s\n GROUP BY super_id\n ) AS ultimos_precios ON hp.super_id = ultimos_precios.super_id AND hp.fecha_hora = ultimos_precios.fecha_maxima\n JOIN supermercado s ON hp.super_id = s.super_id\n JOIN producto p ON hp.producto_id = p.producto_id\n WHERE p.producto_nombre = %s\n ORDER BY hp.precio ASC\n \"\"\", (nombre_producto, nombre_producto))\n producto_data = cur.fetchall()\n cur.close()\n\n producto_data_formateado = []\n for supermercado, nombre_prod, precio, fecha in producto_data:\n precio_formateado = f\"{int(round(precio)):,.0f}\".replace(\",\", \".\")\n link_supermercado = DATA[nombre_producto].get(supermercado, {}).get('Link', '#')\n producto_data_formateado.append((supermercado, nombre_prod, precio_formateado, fecha, link_supermercado))\n\n descripcion_producto = descripciones.get(nombre_producto, 'Descripción no disponible.')\n imagen_url = DATA[nombre_producto][supermercado]['Imagenes'] if nombre_producto in DATA else None\n\n return render_template('precio_producto.html',\n producto_data=producto_data_formateado,\n nombre_producto=nombre_producto,\n descripcion_producto=descripcion_producto,\n imagen_url=imagen_url)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n\n","repo_name":"MiiguelHUB/WebScrapingINACAP_23","sub_path":"scrapping_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"239869701","text":"# -*- coding: utf-8 -*-\n\n# Learn more: https://github.com/kennethreitz/setup.py\n\nfrom setuptools import setup, find_packages\n\n\nwith open('README.md') as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nsetup(\n name='livedata-subscribetags',\n version='0.1.0',\n description='Sample script to subscribe to changes of tag\\'s value ',\n long_description=readme,\n author='HMS Industrial Netwoks S.A.',\n author_email='ewon@hms-networks.com',\n url='https://developer.ewon.biz/content/apiv2',\n license=license,\n packages=find_packages(exclude=('tests', 'docs')),\n scripts=['APIv2/example.py'],\n entry_points={\n 'console_scripts': [\n 'livedata-subscribetags=APIv2.example:launch',\n ],\n },\n install_requires=[\n 'stomp.py',\n 'websocket-client',\n ],\n dependency_links=['git+https://github.com/gschizas/websocket-client.git@patch-1#egg=websocket-client-0'],\n)\n","repo_name":"XDeschuyteneer/live","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"28140581904","text":"# Core packages\nimport os\nimport shutil\nimport sys\nimport tarfile\n\n# Third-party packages\nfrom charmhelpers.core.hookenv import config, resource_get, status_set\nfrom charms.reactive import hook, set_state, when\n\n\n@hook('config-changed')\ndef update():\n # Copy source code\n resource_path = resource_get('application')\n\n if not resource_path:\n status_set(\n 'blocked',\n 'Waiting for \"application\" resource'\n )\n sys.exit(0)\n\n status_set(\n 'maintenance',\n 'Extracting application code'\n )\n\n tar = tarfile.open(resource_path)\n tar.extractall('/srv/next')\n tar.close()\n\n if os.path.isdir('/srv/previous'):\n shutil.rmtree('/srv/previous')\n\n if os.path.isdir('/srv/active'):\n os.rename('/srv/active', '/srv/previous')\n\n os.rename('/srv/next', '/srv/active')\n\n set_state('wsgi.application.ready')\n\n\n@when('wsgi.running')\ndef set_status():\n build_filepath = '/srv/BUILD_LABEL'\n port = config('port') or 80\n\n if os.path.exists(build_filepath):\n with open('/srv/BUILD_LABEL') as build_file:\n build_label = build_file.read().strip()\n\n status_set(\n 'active',\n 'Build {build_label} running on port {port}'.format(**locals())\n )\n","repo_name":"nottrobin/webteam-wsgi","sub_path":"reactive/webteam_wsgi.py","file_name":"webteam_wsgi.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"24201270590","text":"import Levenshtein as levenshtein\nimport itertools\n\nsaved_strings = [\n 'Hello here!',\n 'this is amazing',\n 'how are you',\n 'I take this',\n 'keep working',\n 'try this also',\n]\n\n\nsearch_string = 'how you are'\n\n# def find_best(search_string, saved_string, nb = 1):\n# my_dict = {}\n# if nb == 1:\n# greater = -1\n# index = -1\n# for el in saved_string:\n# print(f'el: {el}')\n# ratio = levenshtein.ratio(search_string, el)\n# print(f'dist: {ratio} \\n')\n# if greater < ratio:\n# greater = ratio\n# index = saved_string.index(el)\n# return index, greater\n# else:\n# ratio_list = []\n# indexs = []\n# for num, el in enumerate(saved_string):\n# ratio = levenshtein.ratio(search_string, el)\n# index.append(num)\n# ratio_list.append(ratio)\n# return indexs, ratio_list\n\n\nclass SearchUtil:\n \"\"\"\n A class used to manipulate strings, and make quick search\n\n\n Attributes:\n ------------\n iterator: iterator\n an iterator that contains the list of the string elements to manipulate\n\n\n Methods:\n ---------\n get_size()\n return the number of elements present in the iterator\n\n find_best(search_string)\n return the best corresponding index string by calculating Levenshtein ratio. \n\n find_best_string(self, search_string)\n return the best corresponding index string by calculating Levenshtein ratio.\n\n \"\"\"\n def __init__(self, iterator):\n \"\"\"\n when initializing, convert iterator to list if not.\n\n Args:\n iterator (list | tuple | set): an iterator containing the data\n \n Raises:\n TypeError: iterator must be list, tuple or set\n \"\"\"\n if type(iterator) != list and type(iterator) != tuple and type(iterator) != set :\n raise TypeError(\"Iterator must be list, tuple or set\")\n self.iterator = iterator\n if(type(self.iterator) == list):\n print(\"this is a list\")\n else:\n print(\"this is not a list\")\n print(\"converting to list...\")\n self.iterator = list(self.iterator)\n print(type(self.iterator))\n \n def get_size(self):\n \"\"\"\n get the size of the iterator.\n\n Returns:\n int: the number of element in the iterator\n \"\"\"\n return len(self.iterator)\n\n def find_best(self, search_string):\n \"\"\"\n find the ratio and index of the best matching\n\n Args:\n search_string (string): the string to search in the data\n\n Returns:\n tuple: (index, greater) index is the index of the best matching in the iterator, and greater is the ratio coresponding\n \"\"\"\n greater = -1\n index = -1\n for el in self.iterator:\n #print(f'el: {el}')\n ratio = levenshtein.ratio(search_string, el)\n # print(f'dist: {ratio} \\n')\n if greater < ratio:\n greater = ratio\n index = self.iterator.index(el)\n return index, greater\n\n def find_best_string(self, search_string):\n \"\"\"\n find the best string in iterator\n\n Args:\n search_string (string): the string to search in the data\n\n Returns:\n string: the best matching string in the data\n \"\"\"\n index, greater = self.find_best(search_string)\n return self.iterator[index]\n \n def replace_iterator(self, new_iterator):\n \"\"\"\n\n Args:\n new_iterator (list | tuple | set): iterator to replace the current iterator\n \"\"\"\n self.__init__(new_iterator)\n \n def add_to_iterator(self, iterator):\n \"\"\"_summary_\n\n Args:\n iterator (list | tuple | set): iterator to add to the current iterator\n\n Raises:\n TypeError: iterator must be list, tuple or set\n \"\"\"\n if type(iterator) != list and type(iterator) != tuple and type(iterator) != set :\n raise TypeError(\"Iterator must be list, tuple or set\")\n else:\n if type(iterator) != list:\n iterator = list(iterator)\n self.iterator = list(itertools.chain(self.iterator, iterator))\n \nsearcher = SearchUtil(saved_strings)\n\nprint(searcher.get_size())\n\nbest_index, best_ratio = searcher.find_best(search_string)\nprint(f'best index: {best_index}')\n \nprint(f'the best match is \\'{saved_strings[best_index]}\\', and the ratio is {best_ratio}')\n\nprint(f'adding new values to iterator..')\nsearcher.add_to_iterator(['first added', 'how you are', 'something else'])\nprint('values added')\nprint(f'the new best matching string is : \\'{searcher.find_best_string(search_string)}\\' ')\n","repo_name":"Nathanf22/utils","sub_path":"string_match.py","file_name":"string_match.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"21711242638","text":"# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\n\nwith open('README.md') as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nsetup(\n name='biotaphy_analyses',\n version='2.0.0',\n description='Biotaphy package for various computations',\n long_description=readme,\n author='Biotaphy Team',\n author_email='cjgrady@ku.edu',\n url='https://github.com/biotaphy/analyses',\n license=license,\n packages=find_packages(exclude=('tests', 'docs', 'sample_data')),\n scripts=['bin/ancestral_distribution.py'],\n install_requires=[\n 'dendropy>=4.0.0',\n 'matplotlib',\n 'numpy>=1.11.0',\n 'scipy>=1.0.0']\n)\n","repo_name":"biotaphy/analyses","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"88"} +{"seq_id":"36498925771","text":"from . test import BaseTestCase, db\nfrom bucketlist.models import BucketList\nimport json\n\n\nclass TestCase(BaseTestCase):\n\n def test_add_bucketlist(self):\n response = self.add_bucketlist('Bucketlist')\n bucketlists = BucketList.query.all()\n self.assertEqual(bucketlists[0].name, 'Bucketlist')\n self.assertIn('Bucketlist', str(response.data))\n self.assertEqual(response.status_code, 201)\n\n def test_get_all_bucketlists(self):\n self.add_bucketlist('Bucketlist')\n response = self.retrieve_bucketlist(login=True)\n self.assertIn('Bucketlist', str(response.data))\n self.assertIn('john@example.com', str(response.data))\n self.assertEqual(response.status_code, 200)\n\n def test_get_single_bucketlist(self):\n self.add_bucketlist('Bucketlist1')\n self.add_bucketlist('Bucketlist2')\n response = self.retrieve_bucketlist(login=True, id=2)\n self.assertEqual(response.status_code, 200)\n self.assertIn('Bucketlist2', str(response.data))\n\n def test_update_bucketlist(self):\n self.add_bucketlist('Bucketlist1')\n response = self.update_bucketlist(1, \"Modified Name\")\n self.assertIn('Modified Name', str(response.data))\n self.assertEqual(response.status_code, 201)\n\n def test_delete_bucketlist(self):\n self.add_bucketlist('Bucketlist1')\n self.add_bucketlist('Bucketlist2')\n self.add_bucketlist('Bucketlist3')\n response = self.delete_bucketlist(login=True, id=2)\n bucketlists = BucketList.query.all()\n self.assertEqual(len(bucketlists), 2)\n self.assertIn('Successfully deleted.', str(response.data))\n self.assertEqual(response.status_code, 200)\n\n def test_login_is_required(self):\n response = self.retrieve_bucketlist()\n self.assertIn('Provide a valid auth token', str(response.data))\n self.assertEqual(response.status_code, 401)\n","repo_name":"domiebett/bucketlist","sub_path":"tests/test_bucketlist.py","file_name":"test_bucketlist.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"19415053194","text":"class Animal:\n def __init__(self, animal):\n self.name = animal\n\nclass Zoo:\n __animals = 0\n\n def __init__(self, name):\n self.name = name\n self.mammals = []\n self.fishes = []\n self.birds = []\n\n def add_animal(self, species, name):\n if species == \"mammal\":\n self.mammals.append(name)\n elif species == \"fish\":\n self.fishes.append(name)\n elif species == \"bird\":\n self.birds.append(name)\n Zoo.__animals += 1\n\n def get_info(self, species):\n global animals, name_species\n if species == \"mammal\":\n animals = self.mammals\n name_species = \"Mammals\"\n elif species == \"fish\":\n animals = self.fishes\n name_species = \"Fishes\"\n elif species == \"bird\":\n animals = self.birds\n name_species = \"Birds\"\n animals = \", \".join([animal.name for animal in animals])\n return f\"{name_species} in {self.name}: {animals}\\nTotal animals: {Zoo.__animals}\"\n\n\nname_of_zoo = input()\nn = int(input())\n\nzoo = Zoo(name_of_zoo)\n\nfor _ in range(n):\n species, name = input().split()\n animal = Animal(name)\n zoo.add_animal(species, animal)\n\nspecies_input = input()\n\nprint(zoo.get_info(species_input))","repo_name":"IvayloSavov/Fundamentals","sub_path":"objects_and_classes/zoo_2.py","file_name":"zoo_2.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"74778493088","text":"from __future__ import unicode_literals\n\nimport doiq.accounts\nimport redis\nimport json\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractBaseUser, PermissionsMixin\nfrom django.core import validators\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.dispatch import receiver\nfrom django.utils import timezone\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.utils.translation import ugettext_lazy as _\nfrom doiq.accounts.user.manager import UserManager\nfrom doiq.channel.models import ChannelMembership\nfrom sorl.thumbnail import get_thumbnail\nfrom django.core.cache import cache\nfrom hashlib import md5\nfrom doiq.accounts.user.signals import user_profile_was_changed\n\n\n@python_2_unicode_compatible\nclass User(AbstractBaseUser, PermissionsMixin):\n STATUSES = (\n (0, _('Offline')),\n (1, _('Online')),\n )\n email = models.EmailField(\n _('email address'),\n max_length=255,\n unique=True,\n error_messages={\n 'unique': _(\"A user with that email already exists.\"),\n },\n )\n full_name = models.CharField(_('full name'), max_length=30, blank=True)\n username = models.CharField(\n verbose_name=_('username'),\n max_length=70,\n unique=True,\n help_text=_('Required. 70 characters or fewer. Letters, digits only.'),\n validators=[\n validators.RegexValidator(\n r'^[\\w]+$',\n _('Enter a valid username. This value may contain only letters and numbers.')\n ),\n ],\n error_messages={\n 'unique': _(\"A user with that username already exists.\"),\n },\n )\n is_staff = models.BooleanField(\n verbose_name=_('staff status'),\n default=False,\n help_text=_('Designates whether the user can log into admin site.'),\n )\n is_active = models.BooleanField(\n verbose_name=_('active'),\n default=True,\n help_text=_(\n 'Designates whether this user should be treated as active. '\n 'Unselect this instead of deleting accounts.'\n ),\n )\n date_joined = models.DateTimeField(_('date joined'), default=timezone.now)\n subscribed = models.BooleanField(default=True)\n image = models.ForeignKey(\n to='filemanager.FileManager',\n verbose_name=_('User picture'),\n blank=True, null=True,\n related_name='avatars', on_delete=models.SET_NULL\n )\n status = models.IntegerField(choices=STATUSES, default=1)\n timezone = models.CharField(\n verbose_name=_('time zone'),\n max_length=64,\n default=settings.TIME_ZONE\n )\n friends = models.ManyToManyField('self', blank=True, symmetrical=True)\n\n objects = UserManager()\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ('username',)\n\n def __str__(self):\n return self.email\n\n def crop_small_picture(self, raw_data=None):\n cache_key = md5('{0}-image-small'.format(self.id)).hexdigest()\n cache_thumb = cache.get(cache_key)\n if cache_thumb:\n return cache_thumb\n if not raw_data:\n img = get_thumbnail(self.image.file.url, '52x52', crop='center')\n else:\n img = get_thumbnail(raw_data.file, '52x52', crop='center')\n cache.set(cache_key, img.url, 15 * 60)\n return img.url\n\n def crop_medium_picture(self, raw_data=None):\n cache_key = md5('{0}-image-medium'.format(self.id)).hexdigest()\n cache_thumb = cache.get(cache_key)\n if cache_thumb:\n return cache_thumb\n if not raw_data:\n img = get_thumbnail(self.image.file.url, '105x105', crop='center')\n else:\n img = get_thumbnail(raw_data.file, '105x105', crop='center')\n cache.set(cache_key, img.url, 15 * 60)\n return img.url\n\n @property\n def get_picture(self):\n if self.image:\n return self.crop_small_picture()\n return None\n\n @property\n def get_picture_medium(self):\n if self.image:\n return self.crop_medium_picture()\n return None\n\n def get_short_name(self):\n \"\"\"\n :return: The short name for the user.\n \"\"\"\n return self.full_name\n\n def get_full_name(self):\n \"\"\"\n :return: The first_name plus the last_name, with a space in between.\n \"\"\"\n return self.full_name or self.username or self.email\n\n def get_channels(self):\n if self.channels.count():\n return map(\n lambda x: {'name': x.name, 'id': x.id, 'channel_uid': x.channel_uid, 'opened': x.opened,\n 'counter_unread': x.channelmembership_set.filter(member=self)[0].counter_unread,\n 'type': x.type},\n self.channels.extra(select={'is_owner': 'SELECT owner_id = %s', 'lower_name': 'lower(name)'}, select_params=(self.id, ))\n .order_by('-opened', 'lower_name', 'id')\n )\n return []\n\n def get_all_available_private_channals(self):\n # friends = self.friends.values_list('id', flat=True)\n private_channals_membership = ChannelMembership.objects.filter(channel__type=1,\n member=self) # .values_list('channel', flat=True)\n return private_channals_membership\n\n\n@receiver(models.signals.m2m_changed, sender=User.friends.through)\ndef clear_accepted_invites_on_friend_delete(sender, instance, action, pk_set, *args, **kwargs):\n if action == 'post_remove':\n doiq.accounts.invites.models.Invite.objects.filter(\n Q(accepted=True) & Q(\n Q(invited_by=instance, user_id__in=pk_set) | Q(user_id=instance, invited_by_id__in=pk_set)\n )\n ).delete()\n\n@receiver(user_profile_was_changed, sender=User)\ndef handle_profile_changed_socket_notify(sender, user, **kwargs):\n picture_small = kwargs.get('picture_small')\n full_name = kwargs.get('full_name')\n client = redis.StrictRedis(db=8)\n message_struct = {\n 'type': 'profile_changed',\n 'avatar': picture_small,\n 'full_name': full_name,\n 'user_id': user.id\n }\n client.publish('user_todo', json.dumps(message_struct))\n del client","repo_name":"andrew-terpolovsky/teamapp","sub_path":"doiq/accounts/user/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"69910609569","text":"import time\nimport numpy as np\n\nn_iterations = 10000\n\ntotal_execution_time = 0\nfor _ in range(n_iterations):\n start_time = time.time()\n\n # Do some time consuming busywork computation\n size_3D = (100, 100, 100)\n random_array = np.random.sample(size=size_3D)\n total = np.sum(random_array)\n\n end_time = time.time()\n elapsed_time = end_time - start_time\n total_execution_time += elapsed_time\n\naverage_time = total_execution_time / n_iterations\nprint(\n \"Average execution time:\",\n f\"{average_time:.09} seconds\")\n","repo_name":"brohrer/how-to-train-your-robot","sub_path":"chapter_2/02_code_timing.py","file_name":"02_code_timing.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"88"} +{"seq_id":"36547566273","text":"import random\n\n\ndef create_random_array():\n array = []\n for i in range(30):\n array.append(random.randrange(-100, 100))\n return array\n\n\ndef quick_sort(input, start, end):\n if(start == None):#first time\n start = 0\n end = len(input)\n print('sorting ' , input)\n n = end - start\n\n if(n <= 1):\n return input\n random_index = n//2\n pivot = input[start + random_index]\n #print('Sorting array ' , input[start : end], ' with pivot ' , pivot)\n p = start\n e = 0\n g = end -1\n i = start\n\n\n while(i <= g) :\n #print(\"for i = \" , i, ', current element is ' , input[i])\n if input[i] < pivot :\n switch(input,i, p)\n p += 1\n i += 1\n elif input[i] == pivot :\n switch(input,i, p + e)\n e += 1\n i += 1\n else:\n switch(input,i, g)\n g -= 1\n\n\n quick_sort(input, start, i-1)\n quick_sort(input, i, end)\n #print(\"Sorted array is %s \" % (input))\n return input\n\n\n\n\ndef switch(array, index1, index2):\n #print('Initial array %s , first element at index %s is %s , second element at index %s is %s ' % (array, index1, array[index1], index2, array[index2]))\n tmp = array[index1]\n array[index1] = array[index2]\n array[index2] = tmp\n #print('after switch ' , array )\n\n\nprint('FINAL ', quick_sort(create_random_array(), None, None))\n\n#test = [-4, 29, -82, -75, 33, -61, -55, 68]\n\n#print('FINAL ', quick_sort(test, None, None))","repo_name":"ciortanmadalina/algo","sub_path":"untitled/algo2-devoir.py","file_name":"algo2-devoir.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"41551221826","text":"from typing import Union\n\nimport bcrypt\nfrom fastapi import FastAPI\nfrom app.models import User\nfrom db.supabase import create_supabase_client\n\napp = FastAPI()\n\n# Initialize supabase client\nsupabase = create_supabase_client()\n\ndef user_exists(key: str = \"email\", value: str = None):\n user = supabase.from_(\"users\").select(\"*\").eq(key, value).execute()\n return len(user.data) > 0\n\n# Create a new user\n@app.post(\"/user\")\ndef create_user(user: User):\n try:\n # Convert email to lowercase\n user_email = user.email.lower()\n # Hash password\n hased_password = bcrypt.hashpw(user.password, bcrypt.gensalt())\n\n # Check if user already exists\n if user_exists(value=user_email):\n return {\"message\": \"User already exists\"}\n\n # Add user to users table\n user = supabase.from_(\"users\")\\\n .insert({\"name\": user.name, \"email\": user_email, \"password\": hased_password})\\\n .execute()\n \n # Check if user was added\n if user:\n return {\"message\": \"User created successfully\"}\n else:\n return {\"message\": \"User creation failed\"}\n except Exception as e:\n print(\"Error: \", e)\n return {\"message\": \"User creation failed\"}\n\n\n# Retrieve a user\n@app.get(\"/user\")\ndef get_user(user_id: Union[str, None] = None):\n try:\n if user_id:\n user = supabase.from_(\"users\")\\\n .select(\"id\", \"name\", \"email\")\\\n .eq(\"id\", user_id)\\\n .execute()\n \n if user:\n return user\n else:\n users = supabase.from_(\"users\")\\\n .select(\"id\", \"email\", \"name\")\\\n .execute()\n if users:\n return users\n except Exception as e:\n print(f\"Error: {e}\")\n return {\"message\": \"User not found\"}\n\n\n# Update a user\n@app.put(\"/user\")\ndef update_user(user_id: str, email: str, name: str):\n try:\n user_email = email.lower()\n\n # Check if user exists\n if user_exists(\"id\", user_id):\n # Check if email already exists\n email_exists = supabase.from_(\"users\")\\\n .select(\"*\").eq(\"email\", user_email)\\\n .execute()\n if len(email_exists.data) > 0:\n return {\"message\": \"Email already exists\"}\n\n # Update user\n user = supabase.from_(\"users\")\\\n .update({\"name\": name, \"email\": user_email})\\\n .eq(\"id\", user_id).execute()\n if user:\n return {\"message\": \"User updated successfully\"}\n else:\n return {\"message\": \"User update failed\"}\n except Exception as e:\n print(f\"Error: {e}\")\n return {\"message\": \"User update failed\"}\n\n# Delete a user\n@app.delete(\"/user\")\ndef delete_user(user_id: str):\n try: \n # Check if user exists\n if user_exists(\"id\", user_id):\n # Delete user\n supabase.from_(\"users\")\\\n .delete().eq(\"id\", user_id)\\\n .execute()\n return {\"message\": \"User deleted successfully\"}\n \n else:\n return {\"message\": \"User deletion failed\"}\n except Exception as e:\n print(f\"Error: {e}\")\n return {\"message\": \"User deletion failed\"}","repo_name":"theinfosecguy/python-supabase-crud-api","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"88"} +{"seq_id":"6898394423","text":"import os\nimport unittest\nimport tempfile\nimport typing\n\nfrom PIL import Image, ImageChops # Channel Operations\n\nimport convert_image_lossless\n\n\nclass ConvertImageLosslessTestCase (unittest.TestCase):\n @staticmethod\n def _generate_src_and_dest_images(dest_ext: str) -> typing.Iterator[typing.Tuple[str, str]]:\n script_dir = os.path.dirname(os.path.abspath(__file__))\n test_filenames = [ # Must not contain duplicate filenames.\n \"test_images/1.png\",\n \"test_images/2.png\",\n ]\n test_filenames = [os.path.join(script_dir, f) for f in test_filenames]\n\n dest_filenames = [os.path.basename(f) for f in test_filenames]\n dest_filenames = [os.path.splitext(f)[0] + dest_ext for f in dest_filenames]\n\n return zip(test_filenames, dest_filenames)\n\n def _assert_image_equal(self, src_filename: str, dest_filename: str):\n src_image: Image.Image = Image.open(src_filename)\n dest_image: Image.Image = Image.open(dest_filename)\n\n diff = ImageChops.difference(src_image, dest_image)\n if diff.getbbox():\n self.fail(f\"Images don't equal: {diff.getbbox()}\")\n\n def test_convert_image_to_webp(self):\n test_filenames = self._generate_src_and_dest_images(\".webp\")\n\n with tempfile.TemporaryDirectory() as dir_name:\n for src_filename, dest_filename in test_filenames:\n dest_filename = os.path.join(dir_name, dest_filename)\n\n with self.subTest(src_filename=src_filename, dest_filename=dest_filename):\n convert_image_lossless.convert_image_to_webp(src_filename, dest_filename)\n self._assert_image_equal(src_filename, dest_filename)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"MacJim/Python-Utility-Scripts","sub_path":"test/test_convert_image_lossless.py","file_name":"test_convert_image_lossless.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"6015913016","text":"import re\n\nfrom tornado.gen import sleep\nfrom tornado.httpclient import AsyncHTTPClient\nfrom tornado.ioloop import IOLoop\n\nfrom config.config import CRAWEL_INTERVAL_TIME, REDIS_PROXY_KEY\nfrom utils.redisClient import RedisClient\n\nfrom .baseCrawler import BaseCrawler\n\n\nclass ProxyCrawler(BaseCrawler):\n\n _self = None\n\n def __init__(self, db):\n super().__init__()\n self._db = db\n\n @classmethod\n async def current(cls):\n if not cls._self:\n db = await RedisClient.current()\n cls._self = cls(db)\n\n return cls._self\n\n async def crawler_89ip(self, count=100, port=''):\n \"\"\"\n 异步生成器, 获取ip\n \"\"\"\n url = \"http://www.89ip.cn/tqdl.html?\" \\\n f\"api=1&num={count}&port={port}&address=&isp=\"\n\n http_client = AsyncHTTPClient()\n response = await http_client.fetch(url, headers=self.headers)\n if response.code == 200:\n for record in re.finditer(\n \"(\\d+.\\d+.\\d+.\\d+:\\d+)\",\n str(response.body)\n ):\n yield record.group(0)\n else:\n print(\"crawler_89ip 匹配代理失败\")\n\n async def run(self):\n\n while True:\n print(\"开始爬取代理ip\")\n\n if await self._db.count(REDIS_PROXY_KEY) < 100:\n async for record in self.crawler_89ip():\n await self._db.add(REDIS_PROXY_KEY, record)\n\n print(\"爬取代理ip结束\")\n await sleep(CRAWEL_INTERVAL_TIME)\n\n\nif __name__ == '__main__':\n crawler = ProxyCrawler.current()\n IOLoop.current().run_sync(crawler.run)\n","repo_name":"mengzxh/MagicPool","sub_path":"crawler/proxyCrawler.py","file_name":"proxyCrawler.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"30676961463","text":"\"\"\"File Backend Code\"\"\"\n\nimport os\nimport logging\nfrom pathlib import Path\n\nimport anyconfig\n\nfrom kheops.utils import render_template, glob_files, render_template_python\nfrom kheops.plugin.common import BackendPlugin, BackendCandidate\n\nfrom pprint import pprint\n\nlog = logging.getLogger(__name__)\n\n\n# class FileCandidate(Candidate):\n# path = None\n#\n# def _report_data(self):\n# data = {\n# # \"rule\": self.config,\n# \"value\": self.engine._plugin_value,\n# \"data\": self.data,\n# \"path\": str(self.path.relative_to(Path.cwd())),\n# }\n# data = dict(self.config)\n# return super()._report_data(data)\n\n\n# class Plugin(PluginEngineClass, PluginFileGlob):\nclass Plugin(BackendPlugin):\n \"\"\"Generic Plugin Class\"\"\"\n\n _plugin_name = \"file\"\n\n _plugin_engine = \"file\"\n # _schema_props_files = {\n _schema_props_new = {\n \"path\": {\n \"anyOf\": [\n {\n \"type\": \"string\",\n },\n {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\",\n },\n },\n ]\n },\n \"glob\": {\n \"default\": \"ansible.yml\",\n \"anyOf\": [\n {\n \"type\": \"string\",\n },\n # {\n # \"type\": \"array\",\n # \"items\": {\n # \"type\": \"string\",\n # },\n # },\n ],\n },\n }\n\n extensions = {\".yml\": \"yaml\", \".yaml\": \"yaml\"}\n\n def _init(self):\n\n # Guess top path\n top_path = self.ns.run[\"path_config\"]\n path_prefix = self.ns.config[\"config\"].get(\"file_path_prefix\", None)\n if path_prefix:\n top_path = os.path.join(top_path, path_prefix)\n self.top_path = top_path\n\n # Fetch module config\n path_suffix = self.ns.config[\"config\"].get(\"file_path_suffix\", \"auto\")\n if path_suffix == \"auto\":\n path_suffix = f\"/{self.ns.name}\"\n self.path_suffix = path_suffix\n\n def fetch_data(self, config) -> list:\n\n path = config.get(\"path\")\n if self.path_suffix:\n path = f\"{path}{self.path_suffix}\"\n\n raw_data = None\n status = \"not_found\"\n for ext, parser in self.extensions.items():\n new_path = os.path.join(self.top_path, path + ext)\n log.debug(\"Looking into %s\", new_path)\n if os.path.isfile(new_path):\n status = \"found\"\n try:\n raw_data = anyconfig.load(new_path, ac_parser=parser)\n except Exception:\n status = \"broken\"\n raw_data = None\n break\n\n ret = BackendCandidate(\n path=new_path,\n status=status,\n run=config,\n data=raw_data,\n )\n\n return [ret]\n","repo_name":"were10/kheops","sub_path":"kheops/plugin/backend/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"35299032363","text":"import importlib\nimport logging\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.core.util.event_pb2 import SessionLog\n\nfrom tensorforce import TensorforceError\n\n\nepsilon = 1e-6\n\n\nlog_levels = dict(\n info=logging.INFO,\n debug=logging.DEBUG,\n critical=logging.CRITICAL,\n warning=logging.WARNING,\n fatal=logging.FATAL\n)\n\n\ndef prod(xs):\n \"\"\"Computes the product along the elements in an iterable. Returns 1 for empty iterable.\n\n Args:\n xs: Iterable containing numbers.\n\n Returns: Product along iterable.\n\n \"\"\"\n p = 1\n for x in xs:\n p *= x\n return p\n\n\ndef rank(x):\n return x.get_shape().ndims\n\n\ndef shape(x, unknown=-1):\n return tuple(unknown if dims is None else dims for dims in x.get_shape().as_list())\n\n\ndef cumulative_discount(values, terminals, discount, cumulative_start=0.0):\n \"\"\"\n Compute cumulative discounts.\n Args:\n values: Values to discount\n terminals: Booleans indicating terminal states\n discount: Discount factor\n cumulative_start: Float or ndarray, estimated reward for state t + 1. Default 0.0\n\n Returns:\n dicounted_values: The cumulative discounted rewards.\n \"\"\"\n if discount == 0.0:\n return np.asarray(values)\n\n # cumulative start can either be a number or ndarray\n if type(cumulative_start) is np.ndarray:\n discounted_values = np.zeros((len(values),) + (cumulative_start.shape))\n else:\n discounted_values = np.zeros(len(values))\n\n cumulative = cumulative_start\n for n, (value, terminal) in reversed(list(enumerate(zip(values, terminals)))):\n if terminal:\n cumulative = np.zeros_like(cumulative_start, dtype=np.float32)\n cumulative = value + cumulative * discount\n discounted_values[n] = cumulative\n\n return discounted_values\n\n\ndef np_dtype(dtype):\n \"\"\"Translates dtype specifications in configurations to numpy data types.\n Args:\n dtype: String describing a numerical type (e.g. 'float') or numerical type primitive.\n\n Returns: Numpy data type\n\n \"\"\"\n if dtype == 'float' or dtype == float or dtype == np.float32 or dtype == tf.float32:\n return np.float32\n elif dtype == 'int' or dtype == int or dtype == np.int32 or dtype == tf.int32:\n return np.int32\n elif dtype == 'bool' or dtype == bool or dtype == np.bool_ or dtype == tf.bool:\n return np.bool_\n else:\n raise TensorforceError(\"Error: Type conversion from type {} not supported.\".format(str(dtype)))\n\n\ndef tf_dtype(dtype):\n \"\"\"Translates dtype specifications in configurations to tensorflow data types.\n\n Args:\n dtype: String describing a numerical type (e.g. 'float'), numpy data type,\n or numerical type primitive.\n\n Returns: TensorFlow data type\n\n \"\"\"\n if dtype == 'float' or dtype == float or dtype == np.float32 or dtype == tf.float32:\n return tf.float32\n elif dtype == 'int' or dtype == int or dtype == np.int32 or dtype == tf.int32:\n return tf.int32\n elif dtype == 'bool' or dtype == bool or dtype == np.bool_ or dtype == tf.bool:\n return tf.bool\n else:\n raise TensorforceError(\"Error: Type conversion from type {} not supported.\".format(str(dtype)))\n\n\ndef unflatten(vector, shapes):\n i = 0\n arrays = []\n for shape in shapes:\n size = np.prod(shape, dtype=np.int)\n array = vector[i:(i + size)].reshape(shape)\n arrays.append(array)\n i += size\n assert len(vector) == i, \"Passed weight does not have the correct shape.\"\n return arrays\n\n\ndef compute_ranks(x):\n \"\"\"\n Returns ranks in [0, len(x))\n Note: This is different from scipy.stats.rankdata, which returns ranks in\n [1, len(x)].\n \"\"\"\n assert x.ndim == 1\n ranks = np.empty(len(x), dtype=int)\n ranks[x.argsort()] = np.arange(len(x))\n return ranks\n\n\ndef compute_centered_ranks(x):\n y = compute_ranks(x.ravel()).reshape(x.shape).astype(np.float32)\n y /= (x.size - 1)\n y -= 0.5\n return y\n\n\ndef itergroups(items, group_size):\n assert group_size >= 1\n group = []\n for x in items:\n group.append(x)\n if len(group) == group_size:\n yield tuple(group)\n del group[:]\n if group:\n yield tuple(group)\n\n\ndef batched_weighted_sum(weights, vecs, slice_size):\n total = 0\n num_items_summed = 0\n for batch_weights, batch_vecs in zip(itergroups(weights, slice_size),\n itergroups(vecs, slice_size)):\n assert len(batch_weights) == len(batch_vecs) <= slice_size\n total += np.dot(np.asarray(batch_weights, dtype=np.float32),\n np.asarray(batch_vecs, dtype=np.float32))\n num_items_summed += len(batch_weights)\n return total, num_items_summed\n\n\ndef run_with_location_trace(self, sess, op):\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n sess.run(op, options=run_options, run_metadata=run_metadata)\n for device in run_metadata.step_stats.dev_stats:\n print(device.device)\n for node in device.node_stats:\n print(\" \", node.node_name)\n\n\n\ndef get_object(obj, predefined_objects=None, default_object=None, kwargs=None):\n \"\"\"\n Utility method to map some kind of object specification to its content,\n e.g. optimizer or baseline specifications to the respective classes.\n\n Args:\n obj: A specification dict (value for key 'type' optionally specifies\n the object, options as follows), a module path (e.g.,\n my_module.MyClass), a key in predefined_objects, or a callable\n (e.g., the class type object).\n predefined_objects: Dict containing predefined set of objects,\n accessible via their key\n default_object: Default object is no other is specified\n kwargs: Arguments for object creation\n\n Returns: The retrieved object\n\n \"\"\"\n args = ()\n kwargs = dict() if kwargs is None else kwargs\n\n if isinstance(obj, dict):\n kwargs.update(obj)\n obj = kwargs.pop('type', None)\n\n if predefined_objects is not None and obj in predefined_objects:\n obj = predefined_objects[obj]\n elif isinstance(obj, str):\n if obj.find('.') != -1:\n module_name, function_name = obj.rsplit('.', 1)\n module = importlib.import_module(module_name)\n obj = getattr(module, function_name)\n else:\n predef_obj_keys = list(predefined_objects.keys())\n raise TensorforceError(\"Error: object {} not found in predefined objects: {}\".format(obj,predef_obj_keys))\n elif callable(obj):\n pass\n elif default_object is not None:\n args = (obj,)\n obj = default_object\n else:\n # assumes the object is already instantiated\n return obj\n\n return obj(*args, **kwargs)\n\n\nclass UpdateSummarySaverHook(tf.train.SummarySaverHook):\n\n def __init__(self, update_input, *args, **kwargs):\n super(UpdateSummarySaverHook, self).__init__(*args, **kwargs)\n self.update_input = update_input\n\n def before_run(self, run_context):\n self._request_summary = run_context.original_args[1] is not None and \\\n run_context.original_args[1].get(self.update_input, False) and \\\n (self._next_step is None or self._timer.should_trigger_for_step(self._next_step))\n requests = {'global_step': self._global_step_tensor}\n if self._request_summary:\n if self._get_summary_op() is not None:\n requests['summary'] = self._get_summary_op()\n return tf.train.SessionRunArgs(requests)\n\n def after_run(self, run_context, run_values):\n if not self._summary_writer:\n return\n\n stale_global_step = run_values.results[\"global_step\"]\n global_step = stale_global_step + 1\n if self._next_step is None or self._request_summary:\n global_step = run_context.session.run(self._global_step_tensor)\n\n if self._next_step is None:\n self._summary_writer.add_session_log(SessionLog(status=SessionLog.START), global_step)\n\n if \"summary\" in run_values.results:\n self._timer.update_last_triggered_step(global_step)\n for summary in run_values.results[\"summary\"]:\n self._summary_writer.add_summary(summary, global_step)\n\n self._next_step = global_step + 1\n\n","repo_name":"rec-agent/rec-rl","sub_path":"tensorforce/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":8453,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"88"} +{"seq_id":"18299383756","text":"from time import sleep\n\nfrom selenium import webdriver\n\n\n# 获取浏览器对象\ndriver = webdriver.Firefox()\n# 打开URL\ndriver.get('https://www.baidu.com/')\n\n# 查找输入框\ninputa = driver.find_element_by_id('kw')\n\n# 输入内容\ninputa.send_keys('python')\n\n# 暂停3秒\nsleep(3)\n\n# 退出浏览器驱动\ndriver.quit()","repo_name":"shangguanxiaoguan/HMPython","sub_path":"seleniumtest/test02_id定位.py","file_name":"test02_id定位.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"37097865097","text":"import sys,getopt\nimport pandas as pd\nimport numpy as np\nfrom sklearn import model_selection\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.feature_selection import RFE\nfrom sklearn import ensemble\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.datasets import make_regression\nimport math\nimport pickle\n\nmodel_file = 'model.sav'\nmodel = None\nX_test = None\nY_test = None\n\ncust_dict = None\ncust_avg_settled = None\n\n\ndef main(argv):\n trainfile=''\n testfile=''\n datafile=''\n try:\n opts, args = getopt.getopt(argv, \"ht:e:p\", [\"train=\", \"test=\",\"predict=\"])\n except getopt.GetoptError:\n print('test.py -t -v -p ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print('test.py -t -v -p ')\n sys.exit()\n elif opt in (\"-t\", \"--train\"):\n train(arg)\n elif opt in (\"-e\", \"--test\"):\n testfile = arg\n elif opt in (\"-p\", \"--predict\"):\n predict(arg)\n\n\ndef train(trainfile):\n global model\n dataset = pd.read_csv(trainfile)\n dataset_new = extract_features(dataset)\n array = dataset_new.values\n n = len(dataset_new.columns)\n X = array[:, 0:n - 1]\n Y = array[:, n - 1]\n seed = 7\n X_train, X_rest, Y_train, Y_rest = model_selection.train_test_split(X, Y, test_size=0.40, random_state=seed)\n X_validation, X_test, Y_validation, Y_test = model_selection.train_test_split(X_rest, Y_rest, test_size=0.50,random_state=seed)\n # model = linear_reg(X_train,Y_train)\n model = gd_reg(X_train, Y_train)\n model_stats(model,X_validation,Y_validation)\n pickle.dump(model, open(model_file, 'wb'))\n print(\"model saved\")\n return model\n\n\ndef linear_reg(X_train,Y_train):\n lm = LinearRegression()\n lm.fit(X_train, Y_train)\n print(\"done training using linear regressor...\")\n return lm\n\n\ndef random_forest(X_train,Y_train):\n forest_reg = RandomForestRegressor(random_state=42)\n forest_reg.fit(X_train, Y_train)\n print(\"done training using random forest regressor...\")\n return forest_reg\n\n\ndef gd_reg(X_train,Y_train):\n gd = ensemble.GradientBoostingRegressor()\n gd.fit(X_train, Y_train)\n print(\"done training using gradient boost regressor...\")\n return gd\n\n\ndef model_stats(lm,X_validation, Y_validation):\n print(\"score= \", lm.score(X_validation, Y_validation))\n y_predict = lm.predict(X_validation)\n regression_model_mse = mean_squared_error(y_predict, Y_validation)\n print(\"regression rmse:\", math.sqrt(regression_model_mse))\n\n\ndef extract_features(dataset):\n global cust_avg_settled\n global cust_dict\n grouped = dataset.groupby('customerID', as_index=False)\n invoice_count = grouped.agg({\"invoiceNumber\": \"count\"})\n invoice_count.columns = ['customerID', 'total']\n\n custlist = invoice_count['customerID'].tolist()\n cust_dict = {x: custlist.index(x) for x in custlist}\n\n df = pd.DataFrame(list(cust_dict.items()), columns=['customerID', 'code'])\n\n df.to_csv(\"customer_map.csv\", index=0)\n\n settled_days_avg = grouped.agg({'DaysToSettle': 'mean'})\n settled_days_avg.columns = ['customerID', 'avgDaysToSettle']\n\n settled_days_avg.to_csv(\"avg_days.csv\", index=0)\n cust_avg_settled = pd.Series(settled_days_avg.avgDaysToSettle.values, index=settled_days_avg.customerID).to_dict()\n dataset_enriched = calc_features(dataset)\n return dataset_enriched\n\n\ndef calc_features(dataset):\n global cust_avg_settled\n global cust_dict\n dataset['invoicemonth'] = pd.to_datetime(dataset['InvoiceDate']).dt.month\n dataset['invoicedate'] = pd.to_datetime(dataset['InvoiceDate']).dt.day\n dataset['invoiceday'] = pd.to_datetime(dataset['InvoiceDate']).dt.weekday\n dataset['monthend'] = np.where(dataset['invoicedate'] > 27, 1, 0)\n dataset['firsthalfmonth'] = np.where(dataset['invoicedate'] < 16, 1, 0)\n paperless = {'Paper': 0, 'Electronic': 1}\n dataset['paperless'] = dataset['PaperlessBill'].map(paperless)\n disputed = {'Yes': 1, 'No': 0}\n dataset['disputed'] = dataset['Disputed'].map(disputed)\n\n if cust_avg_settled is None:\n cust_avg_df = pd.read_csv('avg_days.csv')\n cust_avg_settled = pd.Series(cust_avg_df.avgDaysToSettle.values, index=cust_avg_df.customerID).to_dict()\n\n dataset['avgDaysToSettle'] = dataset['customerID'].map(cust_avg_settled)\n if cust_dict is None:\n cust_map_df = pd.read_csv('customer_map.csv')\n cust_dict = pd.Series(cust_map_df.code.values, index=cust_map_df.customerID).to_dict()\n\n dataset['cust'] = dataset['customerID'].map(cust_dict)\n dataset_final = dataset[['cust', 'InvoiceAmount', 'invoicemonth', 'monthend', 'firsthalfmonth', 'paperless', 'disputed', 'avgDaysToSettle','DaysToSettle']]\n cols = dataset_final.columns\n dataset_final[cols] = dataset_final[cols].apply(pd.to_numeric)\n return dataset_final\n\n\ndef auto_extract_feature(X_train,Y_train):\n rfe = RFE(model, 4)\n fit = rfe.fit(X_train, Y_train)\n print(\"Num Features: %d\" % fit.n_features_)\n print(\"Selected Features: %s\" % fit.support_)\n print(\"Feature Ranking: %s\" % fit.ranking_)\n\n\ndef file_to_array(filename):\n invoice_data = pd.read_csv(filename)\n invoice_data_enriched = calc_features(invoice_data)\n array = invoice_data_enriched.values\n n = len(invoice_data_enriched.columns)\n X = array[:, 0:n - 1]\n return X\n\n\ndef predict(datafile):\n invoice_data = pd.read_csv(datafile)\n invoice_data_enriched = calc_features(invoice_data)\n array = invoice_data_enriched.values\n n = len(invoice_data_enriched.columns)\n x_value = array[:, 0:n - 1]\n\n loaded_model = pickle.load(open(model_file, 'rb'))\n y_value = loaded_model.predict(x_value)\n # print(\"prediction: \")\n # print(y_value)\n invoice_data['predicted'] = y_value\n # print(invoice_data)\n get_predicted_settled_date(invoice_data)\n build_graphs(invoice_data)\n\n\ndef get_predicted_settled_date(invoice_data):\n\n invoice_data['predictedDate'] = pd.to_datetime(invoice_data.InvoiceDate) + pd.to_timedelta(pd.np.ceil(invoice_data.predicted),unit=\"D\")\n invoice_data['predictedDate'] = invoice_data['predictedDate'].dt.strftime('%m/%d/%Y')\n out = invoice_data[['countryCode','customerID','invoiceNumber','InvoiceDate','DueDate','InvoiceAmount','Disputed','PaperlessBill','predictedDate']].copy()\n print(out)\n\n\ndef build_graphs(invoice_data):\n invoice_cash=invoice_data[['predictedDate','InvoiceAmount']].copy()\n invoice_cash = invoice_cash.sort_values(by='predictedDate')\n invoice_cash = invoice_cash.assign(sum=invoice_cash.InvoiceAmount.cumsum())\n invoice_cash['sum']=invoice_cash['sum'].round()\n # print(invoice_cash.head(2))\n invoice_bar=invoice_data[['predicted','invoiceNumber']].copy()\n invoice_bar['ontime'] = np.where(invoice_bar['predicted']<30,1,0)\n invoice_bar['delayed10'] = np.where(((invoice_bar['predicted']>30) & (invoice_bar['predicted']<40)),1,0)\n invoice_bar['delayed30'] = np.where(((invoice_bar['predicted']>40) & (invoice_bar['predicted']<60)),1,0)\n invoice_bar['delayed30p'] = np.where((invoice_bar['predicted']>60),1,0)\n ontime=invoice_bar['ontime'].sum()\n delayed10=invoice_bar['delayed10'].sum()\n delayed30=invoice_bar['delayed30'].sum()\n delayed30p=invoice_bar['delayed30p'].sum()\n array = invoice_cash.values\n x = array[:, 0:1]\n y = array[:, 2:3]\n n = len(x)\n x = np.reshape(x, n)\n y = np.reshape(y, n)\n # write as json\n var = '{\"label1\": [\"' + '\",\"'.join(x) + '\"' + '] ,\"data1\": [' + ','.join(map(str, y)) + '],'+'\"data2\":['+str(ontime)+','+str(delayed10)+','+str(delayed30)+','+str(delayed30p)+']}'\n # print(var)\n text_file = open(\"out.json\", \"w\")\n text_file.write(var)\n text_file.close()\n # print(invoice_bar.head(2))\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","repo_name":"aarcha123/awesometif","sub_path":"invoice_model.py","file_name":"invoice_model.py","file_ext":"py","file_size_in_byte":8003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"2035668033","text":"\"\"\"Sensor platform for switchbot_cloud.\"\"\"\nfrom datetime import timedelta\nfrom homeassistant.components.sensor import ENTITY_ID_FORMAT\nfrom homeassistant.const import DEVICE_CLASS_BATTERY, PERCENTAGE\nfrom homeassistant.core import callback\nfrom homeassistant.helpers.dispatcher import async_dispatcher_connect\nfrom homeassistant.helpers.entity import async_generate_entity_id, Entity\n\nfrom .account import get_account_from_config_entry\nfrom .const import DOMAIN, LOGGER, NAME, NEW_SENSOR, VERSION\n\nSCAN_INTERVAL = timedelta(minutes=5)\nPARALLEL_UPDATES = 1\n\n\nasync def async_setup_entry(hass, config_entry, async_add_entities):\n \"\"\"Set up a sensors for SwitchBot Cloud.\"\"\"\n account = get_account_from_config_entry(hass, config_entry)\n\n LOGGER.debug(\"Setup sensors for %s\", account.username)\n\n @callback\n def async_add_sensor(devices):\n entities = []\n\n for parent_device in devices:\n parent_id = parent_device.id\n parent_name = parent_device.name\n children = parent_device.children\n\n if not children:\n children = [parent_device]\n\n for device in children:\n device_id = device.id\n name = device.name\n battery = device.battery\n\n if device_id in account.known_ids[NEW_SENSOR]:\n continue\n\n entity_id = async_generate_entity_id(\n ENTITY_ID_FORMAT, \"{} battery level\".format(name), hass=hass\n )\n\n LOGGER.debug(\"Initialize %s\", entity_id)\n\n entities.append(\n SwitchBotCloudBatterySensor(\n entity_id,\n device_id,\n device,\n name,\n parent_id,\n parent_device,\n parent_name,\n battery,\n )\n )\n\n account.known_ids[NEW_SENSOR].append(device_id)\n\n if entities:\n async_add_entities(entities)\n\n account.listeners.append(\n async_dispatcher_connect(\n hass, account.async_signal_new_device(NEW_SENSOR), async_add_sensor\n )\n )\n\n\nclass SwitchBotCloudBatterySensor(Entity):\n \"\"\"A battery sensor implementation for SwitchBot Cloud.\"\"\"\n\n def __init__(\n self,\n entity_id,\n device_id,\n device,\n name,\n parent_id,\n parent_device,\n parent_name,\n battery,\n ):\n \"\"\"Initialize a sensor.\"\"\"\n super().__init__()\n\n self.entity_id = entity_id\n self._device = device\n\n self._unique_id = \"{}_battery_level\".format(device_id)\n self._name = name\n self._battery = battery\n\n self._parent = parent_device\n self._parent_id = parent_id\n self._parent_name = parent_name\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def device_info(self):\n \"\"\"Device info.\"\"\"\n return {\n \"identifiers\": {(DOMAIN, self._parent_id)},\n \"name\": self._parent_name,\n \"manufacturer\": NAME,\n \"model\": VERSION,\n }\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n return self._battery\n\n @property\n def unique_id(self):\n \"\"\"Return a unique ID.\"\"\"\n return self._unique_id\n\n @property\n def device_class(self):\n \"\"\"Return the class of the sensor.\"\"\"\n return DEVICE_CLASS_BATTERY\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the units of measurement.\"\"\"\n return PERCENTAGE\n\n async def async_update(self):\n \"\"\"Update the sensor state.\"\"\"\n self._name = await self.hass.async_add_executor_job(\n getattr, self._device, \"name\"\n )\n self._battery = await self.hass.async_add_executor_job(\n getattr, self._device, \"battery\"\n )\n\n self._parent_name = await self.hass.async_add_executor_job(\n getattr, self._parent, \"name\"\n )\n\n LOGGER.debug(\n \"Update battery sensor state: %s = %s\", self.entity_id, self._battery\n )\n","repo_name":"stuart-c/homeassistant-switchbot","sub_path":"custom_components/switchbot_cloud/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"88"} +{"seq_id":"29842001525","text":"import turtle\nimport time\nfrom game_design import Rectangle, TileGame, initialize_file\n\n\ndef set_board():\n # This function draws the layout of the game interface\n play_board = Rectangle(460, 500)\n play_board.draw(-125, 70)\n leader_board = Rectangle(230, 500, pencolor=\"blue\")\n leader_board.draw(240, 70)\n status_board = Rectangle(710, 90)\n status_board.draw(0, -280) \n \n\ndef main():\n s = turtle.Screen()\n s.setup(width=840, height=840)\n s.title(\"CS5001 Sliding Puzzle Game\")\n \n s.addshape(\"Resources/splash_screen.gif\")\n initial = turtle.Turtle()\n initial.shape(\"Resources/splash_screen.gif\")\n time.sleep(1)\n s.clear()\n \n user = s.textinput(\"CS5001 Puzzle Slide\", \"Your Name:\")\n \n num = s.numinput(\"5001 Puzzle Slide - Moves\",\n \"Enter the number of moves (chances) you want (5-200)?\", minval=5, maxval=200)\n \n set_board()\n puzzle_info = initialize_file(\"yoshi.puz\")\n instance = TileGame(puzzle_info, user, num)\n instance.checked() # check if the initial puzzle solvable\n instance.status_button() # load status button\n instance.load_leaders() # load leaders\n instance.load_puzzle() # load puzzles\n instance.load_sample() # load sample picture for the puzzle\n s.onclick(instance.get_click)\n\n turtle.done()\n\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"IvyWang152/Puzzle-Slider-Game","sub_path":"puzzle_game.py","file_name":"puzzle_game.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"29469678629","text":"from tkinter import ttk\nfrom tkinter import filedialog\nfrom threading import Thread\nimport tkinter as tk\nimport subprocess\nimport os\nimport json\nimport easygui\n\nPREFERENCES_FILE = '.converter_data.json'\n\n\ndef save_preferences(prefs):\n with open(PREFERENCES_FILE, 'w') as file:\n json.dump(prefs, file)\n\n\ndef load_preferences():\n if os.path.exists(PREFERENCES_FILE):\n with open(PREFERENCES_FILE, 'r') as file:\n return json.load(file)\n return {'file_directory': '.', 'selected_directory': '.'}\n\n\ndef select_files():\n filetypes = [\n [\"*.aac\", \"*.m4a\", \"*.flac\", \"*.mp3\", \"*.opus\", \"*.ogg\", \"*.wav\", \"*.ac3\", \"*.alac\", \"*.ape\",\n \"*.au\", \"*.caf\", \"*.dts\", \"*.gsm\", \"*.mka\", \"*.mlp\", \"*.mp2\", \"*.mpc\", \"*.ra\", \"*.spx\", \"*.tta\",\n \"*.voc\", \"*.w64\", \"*.wma\", \"Audio files\"],\n ]\n files = easygui.fileopenbox(default=os.path.join(preferences['file_directory'], '*'), filetypes=filetypes,\n multiple=True)\n\n file_list.delete(0, tk.END)\n for file in files:\n file_list.insert(tk.END, file)\n if files:\n preferences['file_directory'] = os.path.dirname(files[0])\n save_preferences(preferences)\n progress_bar['maximum'] = len(files)\n progress_bar['value'] = 0\n\n\ndef select_directory():\n directory = filedialog.askdirectory(initialdir=preferences['selected_directory'], title='Select Directory')\n directory_entry.delete(0, tk.END)\n directory_entry.insert(0, directory)\n preferences['selected_directory'] = directory\n save_preferences(preferences)\n\n\ndef convert_files_thread():\n directory = directory_entry.get()\n if not os.path.isdir(directory):\n os.makedirs(directory)\n files = file_list.get(0, tk.END)\n format_option = format_var.get()\n output_formats = format_option.split(' - ')\n output_format = output_formats[0]\n bitrate_option = None\n if 'kbps' in format_option:\n bitrate_option = output_formats[1]\n if bitrate_option:\n bitrate_option = bitrate_option.replace('kbps', 'k') # Removing \"kbps\" suffix\n\n progress_bar['maximum'] = len(files)\n progress_bar['value'] = 0\n\n for file in files:\n output_file = os.path.join(directory, file.split(os.path.sep)[-1].rsplit('.', 1)[0] + '.' + output_format)\n if os.path.isfile(output_file):\n root.after(0, lambda value=progress_bar['value'] + 1: progress_bar.config(value=value))\n continue\n command = ['ffmpeg', '-i', file, '-map_metadata', '0', '-y']\n\n if output_format == 'mp3' and bitrate_option:\n command += ['-b:a', bitrate_option]\n elif format_option == 'wav - 22050Hz':\n command += ['-ar', '22050']\n\n command.append(output_file)\n subprocess.run(command)\n root.after(0, lambda value=progress_bar['value'] + 1: progress_bar.config(value=value))\n\n root.after(0, lambda: progress_bar.config(value=0))\n\n\ndef convert_files():\n thread = Thread(target=convert_files_thread)\n thread.start()\n\n\npreferences = load_preferences()\n\nroot = tk.Tk()\nroot.title('Audio File Converter')\n\nselect_button = tk.Button(root, text='Select Files', command=select_files)\nselect_button.pack()\n\nfile_list = tk.Listbox(root)\nfile_list.pack(fill=tk.BOTH, expand=tk.YES)\n\nselect_directory_button = tk.Button(root, text='Select Output Directory', command=select_directory)\nselect_directory_button.pack()\n\ndirectory_entry = tk.Entry(root) # Single-line entry widget for editing the selected directory\ndirectory_entry.pack(fill=tk.X)\ndirectory_entry.insert(0, preferences['selected_directory']) # Set the initial value from preferences\n\nformat_var = tk.StringVar(root)\nformat_var.set('wav') # default value\nformat_menu = tk.OptionMenu(root, format_var, 'mp3 - 320kbps', 'mp3 - 192kbps', 'flac', 'wav', 'wav - 22050Hz')\nformat_menu.pack()\n\nconvert_button = tk.Button(root, text='Convert Files', command=convert_files)\nconvert_button.pack()\n\nprogress_bar = ttk.Progressbar(root, orient='horizontal', length=200, mode='determinate')\nprogress_bar.pack()\n\nroot.mainloop()\n","repo_name":"Henri-Laiho/youtubify","sub_path":"src/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"26885121324","text":"from httprunner import HttpRunner, Config, Step, RunRequest\n#运营>图片管理列表\nclass picture_list(HttpRunner):\n config = (\n Config(\"运营>图片管理列表\")\n .base_url(\"${ENV(BASE_URL)}\")\n .verify(False)\n .variables()\n )\n teststeps = [\n Step(\n RunRequest(\"运营>图片管理列表\")\n .post(\"/picture/list.do\")\n .with_headers(**{\n \"Content - Type\":\"application/x-www-form-urlencoded; charset=UTF-8\",\n \"Content - Length\":\"application/x-www-form-urlencoded; charset=UTF-8\",\n \"Cookie\":\"$Cookie\"\n })\n .with_data(\n {\n \"isAllow\":\"$isAllow\", #1>已启用\n \"picturePurpose\":\"$picturePurpose\", #日签>sign\n \"start\": \"0\",\n \"length\": \"10\"\n }\n )\n .extract()\n .with_jmespath(\"body.body.data[0].id\",\"id\")\n .validate()\n .assert_equal(\"status_code\", 200)\n )\n ]\nif __name__ == '__main__':\n picture_list().test_start()","repo_name":"465469264/master","sub_path":"api/web/picture_list.py","file_name":"picture_list.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"28294043786","text":"import torch\r\nimport torch.nn as nn\r\n# from SpaNet import BCR, denselayer\r\nimport numpy as np\r\nimport torch.nn.functional as f\r\nclass SwishImplementation(torch.autograd.Function):\r\n @staticmethod\r\n def forward(ctx, i):\r\n result = i * torch.sigmoid(i)\r\n ctx.save_for_backward(i)\r\n return result\r\n\r\n @staticmethod\r\n def backward(ctx, grad_output):\r\n i = ctx.saved_variables[0]\r\n sigmoid_i = torch.sigmoid(i)\r\n return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))\r\n \r\nclass MemoryEfficientSwish(nn.Module):\r\n def forward(self, x):\r\n return SwishImplementation.apply(x)\r\n\r\nclass Swish(nn.Module):\r\n def forward(self, x):\r\n return x * torch.sigmoid(x)\r\n\r\nclass My_Bn(nn.Module):\r\n def __init__(self):\r\n super(My_Bn,self).__init__()\r\n def forward(self,x):\r\n return x - nn.AdaptiveAvgPool2d(1)(x)\r\n \r\n\r\nclass BCR(nn.Module):\r\n def __init__(self,kernel,cin,cout,group=1,stride=1,RELU=True,padding = 0,BN=False):\r\n super(BCR,self).__init__()\r\n if stride > 0:\r\n self.conv = nn.Conv2d(in_channels=cin, out_channels=cout,kernel_size=kernel,groups=group,stride=stride,padding= padding)\r\n else:\r\n self.conv = nn.ConvTranspose2d(in_channels=cin, out_channels=cout,kernel_size=kernel,groups=group,stride=int(abs(stride)),padding=padding)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.Swish = MemoryEfficientSwish()\r\n \r\n if RELU:\r\n if BN:\r\n # self.Bn = nn.BatchNorm2d(num_features=cin)\r\n self.Bn = My_Bn()\r\n self.Module = nn.Sequential(\r\n self.Bn,\r\n self.conv,\r\n self.relu\r\n )\r\n else:\r\n self.Module = nn.Sequential(\r\n self.conv,\r\n self.relu\r\n )\r\n else:\r\n if BN:\r\n # self.Bn = nn.BatchNorm2d(num_features=cin)\r\n self.Bn = My_Bn()\r\n self.Module = nn.Sequential(\r\n self.Bn,\r\n self.conv,\r\n )\r\n else:\r\n self.Module = nn.Sequential(\r\n self.conv,\r\n )\r\n\r\n def forward(self, x):\r\n output = self.Module(x)\r\n return output\r\n\r\nclass denselayer(nn.Module):\r\n def __init__(self,cin,cout=31,RELU=True):\r\n super(denselayer, self).__init__()\r\n self.compressLayer = BCR(kernel=1,cin=cin,cout=cout,RELU=True,BN=True)\r\n self.actlayer = BCR(kernel=3,cin=cout,cout=cout,group=1,RELU=RELU,padding=1,BN=True)\r\n # self.Conv2d = BCR(kernel=3,cin=cin,cout=cout,group=1,RELU=RELU,padding=1,BN=False)\r\n self.bn = My_Bn()\r\n def forward(self, x):\r\n output = self.compressLayer(x)\r\n output = self.actlayer(output)\r\n # output = self.bn(output) \r\n return output\r\n\r\nclass Updenselayer(nn.Module):\r\n def __init__(self,cin,cout=31,RELU=True):\r\n super(Updenselayer, self).__init__()\r\n self.compressLayer = BCR(kernel=1,cin=cin,cout=cout,RELU=True,BN=False)\r\n self.actlayer = BCR(kernel=4,cin=cout,cout=cout,group=1,RELU=RELU,padding=1,stride = -2, BN=False)\r\n # self.Conv2d = BCR(kernel=3,cin=cin,cout=cout,group=1,RELU=RELU,padding=1,BN=False)\r\n def forward(self, x):\r\n output = self.compressLayer(x)\r\n output = self.actlayer(output)\r\n return output\r\nclass recon_net(nn.Module):\r\n def __init__(self,cin,cout =31,RELU=True):\r\n super(recon_net,self).__init__()\r\n self.convlayer = nn.Sequential(\r\n Updenselayer(cin = 31,cout=62,RELU=True),\r\n denselayer(cin = 62,cout= 62),\r\n Updenselayer(cin = 62,cout=128,RELU=True),\r\n denselayer(cin = 128,cout= 128),\r\n Updenselayer(cin = 128,cout=31,RELU=False),\r\n )\r\n self.bn = My_Bn()\r\n self.pool = nn.AdaptiveAvgPool2d(1)\r\n def forward(self,x):\r\n x_ = self.pool(x)\r\n x = self.convlayer(x)\r\n x = self.bn(x)\r\n output = x+x_\r\n return output\r\n\r\nclass scale_learning(nn.Module):\r\n def __init__(self):\r\n super(scale_learning,self).__init__()\r\n self.dmodule = nn.Sequential(\r\n BCR(kernel = 4,cin = 31, cout = 64, group= 1, stride= 2,padding=1,BN=False),\r\n BCR(kernel = 4,cin = 64, cout = 64, group= 1, stride= 2,padding=1,BN=False),\r\n BCR(kernel = 4,cin = 64, cout = 64, group= 1, stride= 2,padding=1,BN=False),\r\n BCR(kernel = 4,cin = 64, cout = 31, group= 1, stride= 2,padding=1,BN=False,RELU=False))\r\n self.pool_layer = nn.AdaptiveAvgPool2d(output_size=1)\r\n def forward(self,x):\r\n feature_1 = self.pool_layer(x)\r\n feature_2 = self.dmodule(x)\r\n # feature_2 = torch.nn.functional.sigmoid(feature_2)\r\n feature_2 = feature_1+ self.pool_layer(feature_2)\r\n return feature_2\r\n\r\nclass stage(nn.Module):\r\n def __init__(self,cin,cout,final=False,extra=0):\r\n super(stage,self).__init__()\r\n self.Upconv = BCR(kernel = 3,cin = cin, cout = cout,stride= 1,padding=1)\r\n if final == True:\r\n f_cout = cout +1\r\n else:\r\n f_cout = cout\r\n mid = cout*3\r\n self.denselayers = nn.ModuleList([\r\n denselayer(cin=2*cout+extra,cout=cout*2),\r\n denselayer(cin=4*cout+extra,cout=cout*2),\r\n denselayer(cin=6*cout+extra,cout=cout*2),\r\n denselayer(cin=8*cout+extra,cout=cout*2),\r\n denselayer(cin=10*cout+extra,cout=cout*2),\r\n denselayer(cin=12*cout+extra,cout=cout*2),\r\n denselayer(cin=14*cout+extra,cout=cout*2),\r\n denselayer(cin=16*cout+extra,cout=f_cout,RELU=False)])\r\n def forward(self,HSI,MSI,extra_data=None):\r\n MSI = self.Upconv(MSI)\r\n if extra_data is not None:\r\n assert(MSI.shape == extra_data.shape)\r\n assert(MSI.shape == HSI.shape)\r\n if extra_data != None:\r\n x = torch.cat([HSI,MSI,extra_data],1)\r\n else:\r\n x = torch.cat([HSI,MSI],1)\r\n x = [x]\r\n for layer in self.denselayers:\r\n x_ = layer(torch.cat(x,1))\r\n x.append(x_)\r\n \r\n if extra_data is not None:\r\n if x[-1].shape[1] != HSI.shape[1]:\r\n output = torch.tanh(x[:,:HSI.shape[1],:,:]) + HSI\r\n output = torch.cat((output,torch.sigmoid(output[:,HSI.shape[1]:,:,:])),1)\r\n else:\r\n output = torch.tanh(x[-1]) + extra_data\r\n else:\r\n if x[-1].shape[1] != MSI.shape[1]:\r\n output = torch.tanh(x[-1][:,:MSI.shape[1],:,:]) + MSI\r\n output = torch.cat((output,torch.sigmoid(x[-1][:,MSI.shape[1]:,:,:])),1)\r\n else:\r\n output = torch.tanh(x[-1]) + MSI\r\n return output\r\n\r\nclass SpeNet(nn.Module):\r\n def __init__(self,extra=[0,0,0]):\r\n super(SpeNet,self).__init__()\r\n # self.stages = nn.ModuleList([\r\n # stage(cin=3,cout=8,extra = extra[0]),\r\n # stage(cin=8+3,cout=16,extra = extra[1]),\r\n # stage(cin=16+3,cout=31,extra = extra[2],final=True)])\r\n self.stages = nn.ModuleList([\r\n stage(cin=3,cout=8,extra = extra[0]),\r\n stage(cin=8+3,cout=16,extra = extra[1]),\r\n stage(cin=16+3,cout=31,extra = extra[2],final=True)])\r\n self.scale_learning = scale_learning()\r\n self.pool = nn.AdaptiveAvgPool2d(1)\r\n # self.refine = BCR(kernel = 3,cin = 31,cout = 31,stride=1,padding=1,BN=False)\r\n self.refine = nn.Sequential(\r\n denselayer(cin=31,cout=31))\r\n # self.recon_net = recon_net(cin = 31)\r\n def forward(self,HSI,MSI,extra_data=None):\r\n MSI = MSI - self.pool(MSI)\r\n MSI = [MSI]\r\n # scale = self.scale_learning(HSI)\r\n # HSI = HSI_.detach()- self.pool(HSI)\r\n # HSI = HSI- self.pool(HSI)\r\n ref = [np.array(range(8))*4, np.array(range(16))*2]\r\n ref[0][-1] = 30\r\n ref[1][-1] = 30\r\n for index , stage in enumerate(self.stages):\r\n if index <2:\r\n HSI_ = HSI[:,ref[index],:,:]\r\n if extra_data != None:\r\n ex_data = extra_data[:,ref[index],:,:]\r\n else:\r\n HSI_ = HSI\r\n if extra_data != None:\r\n ex_data = extra_data\r\n if index in [1,2]:\r\n if extra_data != None:\r\n msi_ = stage(HSI_,torch.cat((MSI[index],MSI[0]),1),extra_data = ex_data)\r\n else:\r\n msi_ = stage(HSI_,torch.cat((MSI[index],MSI[0]),1))\r\n else:\r\n if extra_data != None:\r\n msi_ = stage(HSI_,MSI[index],extra_data = ex_data)\r\n else:\r\n msi_ = stage(HSI_,MSI[index])\r\n\r\n MSI.append(msi_)\r\n refined_hsi = MSI[-1][:,:31,:,:]+HSI\r\n refined_hsi = self.refine(refined_hsi)+refined_hsi\r\n return MSI,HSI,refined_hsi\r\n\r\n\r\n\r\n \r\n","repo_name":"ZHU-Zhiyu/PZRes-Net","sub_path":"demo_cave/SpeNet.py","file_name":"SpeNet.py","file_ext":"py","file_size_in_byte":9128,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"88"} +{"seq_id":"44343269671","text":"import torch\nimport torch.nn as nn\nfrom torch import tanh, relu_, sigmoid\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(4, 2)\n self.fc2 = nn.Linear(2, 1)\n\n def forward(self, x):\n x = self.fc1(x)\n x = tanh(x)\n x = self.fc2(x)\n\n return x\n\n def predict(self, x):\n pred = self.forward(x)\n binarize = lambda x: 0 if x <= 0.5 else 1\n ans = [ binarize(p) for p in pred ]\n\n return torch.tensor(ans)","repo_name":"megaduks/entropy-complex-networks","sub_path":"knnentropyloss/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"36750725838","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn.preprocessing as skl\nfrom sklearn.neighbors import KDTree\nimport open3d as o3d\nimport time\nimport pickle\n\n\ndef gaussian(dist, mu=0, sigma=1.0):\n return (1/(sigma*np.sqrt(2*np.pi))) * np.e ** (-0.5*((dist-mu)/sigma)**2)\n\n\ndef storeTree(inputTree, filename):\n import pickle\n fw = open(filename, 'wb') #以二进制读写方式打开文件\n pickle.dump(inputTree, fw) #pickle.dump(对象, 文件,[使用协议])。序列化对象\n # 将要持久化的数据“对象”,保存到“文件”中,使用有3种,索引0为ASCII,1是旧式2进制,2是新式2进制协议,不同之处在于后者更高效一些。\n #默认的话dump方法使用0做协议\n fw.close() #关闭文件\n\n\ndef grabTree(filename):\n import pickle\n fr = open(filename, 'rb')\n return pickle.load(fr)\n\n\n# pcd_base = o3d.io.read_point_cloud(\"data/UAV_only_B.xyzrgb\")\n# point_base = np.asarray(pcd_base.points)\n\nstart_time = time.process_time()\npcd_ref = o3d.io.read_point_cloud(\"data/Sony_ref.pcd\")\nend_time = time.process_time()\nprint(\"读取点云数据总耗时 :\" + str(end_time - start_time) + \"s\")\npoints_in_ref = np.asarray(pcd_ref.points)\n\npoints = pcd_ref.points\n\n# core = points_in_ref[5000]\ncore = np.array([[5.156748294830322e+00, 3.309646546840668e-01, -1.7760356664657593e+00]])\n# core = point_base[4086]\nprint(\"核心点坐标:\", core)\nradius = 9.74/1000\nn_sigma = 3\n\n# start_time = time.process_time()\n# kdt = KDTree(points_in_ref, leaf_size=5, metric='euclidean')\n# end_time = time.process_time()\n# print(\"knn建树总耗时 :\" + str(end_time - start_time) + \"s\")\n#\n# storeTree(kdt, 'tree_3.txt')\n\nstart_time = time.process_time()\nkdt = grabTree(\"tree_3.txt\")\nend_time = time.process_time()\nprint(\"读取knn树总耗时 :\" + str(end_time - start_time) + \"s\")\n\n\nstart_time = time.process_time()\ndis2, idx2 = kdt.query(core.reshape((1, -1)), k=100)\ndis2 = dis2[0]\nidx2 = idx2[0]\n\nidx, dis = kdt.query_radius(core.reshape((1, -1)), r=radius, return_distance=True)\nend_time = time.process_time()\nprint(\"临近点搜寻总耗时 :\" + str(end_time - start_time) + \"s\")\n\nneighbors = np.asarray(pcd_ref.points)[idx[0], :]\nprint(neighbors.shape)\n\ndist_set = np.sqrt(np.sum((core-neighbors)**2, axis=1))\ndist_set_2 = dis[0]\nprint(\"距离\", dist_set)\n\nprint(\"原生距离\", dis[0])\n\n# test_x = np.linspace(-400, 400, 200)\n# ss = skl.MinMaxScaler(feature_range=(-radius/2, radius/2))\n# test_x = ss.fit_transform(dist_set.reshape(-1, 1))\ntest_x = dist_set - np.min(dist_set)\ntest_y = gaussian(test_x, sigma=(radius-np.min(dist_set))/3)\nprint(\"权重\", test_y.flatten())\nplt.scatter(test_x, test_y)\nplt.show()\n# print(gaussian(0))\n\ntemp = dist_set.reshape(-1, 1)*test_y.reshape(-1, 1)\nprint(\"距离和权重乘积\", temp.flatten())\nweighted_dist = np.sum(dist_set.reshape(-1, 1)*test_y.reshape(-1, 1))/np.sum(test_y)\nprint(weighted_dist, np.average(dist_set))\n\nplt.scatter(np.arange(len(dist_set)), dist_set.flatten())\nplt.show()\n","repo_name":"Geoffrey1500/sfm_accuracy","sub_path":"gaussian weighted euclidean distance.py","file_name":"gaussian weighted euclidean distance.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"3582856505","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport logging\nimport pdb\nfrom time import time\n\nimport numpy as np\n\nfrom torch.profiler import profile, record_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .conv_module import BasicBlock, Bottleneck, HighResolutionModule\nfrom .loss import LossEvaluator\nfrom .decoder import Decoder\nfrom .attentive_norm import AttnBatchNorm2d\nfrom logocap.dataset.transforms import FLIP_CONFIG\n\nfrom logocap.dataset.constants import PERSON_SIGMA_DICT, PERSON_SKELETON_DICT\nimport matplotlib.pyplot as plt\nfrom torch.profiler import profile, record_function, ProfilerActivity\n\n\nBN_MOMENTUM = 0.1\nlogger = logging.getLogger(__name__)\n\nblocks_dict = {\n 'BASIC': BasicBlock,\n 'BOTTLENECK': Bottleneck,\n}\n\n\nclass Model(nn.Module):\n def compute_oks(self, pose_pool, keypoints_gt, areas_gt):\n num_centers, num_poses_per_center_y, num_poses_per_center_x = pose_pool.shape[:\n 3]\n pose_pool_reshaped = pose_pool.reshape(\n num_centers * num_poses_per_center_y * num_poses_per_center_x, self.num_joints,\n 2)\n deltas = torch.sum(\n (pose_pool_reshaped[:, None, :, :2] - keypoints_gt[None, ..., :2])\n **2,\n dim=-1) * (keypoints_gt[None, ..., -1] > 0).float()\n var = (self.coco_sigma_constant.cuda() * 2)**2\n visible = (keypoints_gt[None, ..., -1] > 0).float()\n err = deltas / (2 * var * areas_gt[None, :, None]).clamp_min(1e-6)\n oks = torch.exp(-err) * visible\n oks_person = torch.sum(oks, dim=-1) / torch.sum(visible,dim=-1).clamp_min(1)\n oks_parts = oks.reshape(num_centers, num_poses_per_center_y,\n num_poses_per_center_x, keypoints_gt.shape[0],\n -1)\n oks_person = oks_person.reshape(num_centers, num_poses_per_center_y,\n num_poses_per_center_x, -1)\n\n oks_person_best = torch.sum(\n oks_parts.max(dim=1)[0].max(dim=1)[0] * visible,\n dim=-1) / torch.sum(visible, dim=-1).clamp_min(1)\n \n return oks_parts, oks_person, oks_person_best\n\n def compute_oks_with_gt(self,\n pose_pool,\n meta,\n height,\n width,\n debug_image=None):\n device = pose_pool.device\n keypoints, areas = meta['annotations']['kpts'], meta['annotations'][\n 'areas']\n keypoints = keypoints.to(device)\n areas = areas.to(device)\n inv_transform = meta['transform_inv']\n pose_pool_image_coordinate = torch.zeros_like(pose_pool)\n pose_pool_image_coordinate[..., -1] = pose_pool[..., -1]\n px = pose_pool[...,\n 0] if not meta['HFlip'] else width - 1 - pose_pool[..., 0]\n py = pose_pool[..., 1]\n\n # else:\n pose_pool_image_coordinate[..., 0] = px * inv_transform[\n 0, 0] + py * inv_transform[0, 1] + inv_transform[0, 2]\n pose_pool_image_coordinate[..., 1] = px * inv_transform[\n 1, 0] + py * inv_transform[1, 1] + inv_transform[1, 2]\n\n oks_parts, oks_person_, oks_person_best = self.compute_oks(\n pose_pool_image_coordinate, keypoints, areas)\n\n oks_max_, assignment = oks_person_best.max(dim=-1)\n \n pose_gt_image_coordinate = keypoints[assignment].clone()\n areas_ = areas[assignment].clone()[:, None] * (\n (self.coco_sigma_constant.cuda()[0] * 2)**2)\n pose_gt = torch.zeros_like(pose_gt_image_coordinate)\n\n transform = meta['transform']\n\n pose_gt[:, :, 0] = transform[\n 0, 0] * pose_gt_image_coordinate[:, :, 0] + transform[\n 0, 1] * pose_gt_image_coordinate[:, :, 1] + transform[0, 2]\n pose_gt[:, :, 1] = transform[\n 1, 0] * pose_gt_image_coordinate[:, :, 0] + transform[\n 1, 1] * pose_gt_image_coordinate[:, :, 1] + transform[1, 2]\n if meta['HFlip']:\n pose_gt[:, :, 0] = width - 1 - pose_gt[:, :, 0]\n\n pose_gt[:, :, 2] = transform[0, 0] * transform[1, 1] * areas_ * (\n pose_gt_image_coordinate[:, :, 2] > 0).float()\n pose_gt[:, :, 2] *= (pose_gt[:, :, 0] >= 0).float() * (\n pose_gt[:, :, 0] < width).float() * (pose_gt[:, :, 1] >= 0).float(\n ) * (pose_gt[:, :, 1] < height).float()\n center_idx = torch.arange(assignment.shape[0], device=device)\n oks_parts = oks_parts[center_idx, :, :, assignment]\n oks_person = oks_person_[center_idx, :, :, assignment]\n\n return oks_parts, oks_person, pose_gt\n\n def compute_oks_batch(self, pose_pool, keypoints, areas, debug = None):\n batch_size, num_centers, num_poses_per_center_y, num_poses_per_center_x = pose_pool.shape[:4]\n pose_pool_reshaped = pose_pool.reshape(batch_size, -1, self.num_joints, 2)\n\n deltas = torch.sum( (pose_pool_reshaped[:,:,None] - keypoints[:,None,...,:2])**2,dim=-1)*(keypoints[:,None,...,-1]>0)\n var = (self.coco_sigma_constant[None].cuda()*2)**2\n visible = keypoints[:,None,...,-1]>0\n err = deltas/(2*var*areas[:,None,:,None]).clamp_min(1e-6)\n\n oks = torch.exp(-err)*visible\n\n oks_person = torch.sum(oks, dim=-1) / torch.sum(visible,dim=-1).clamp_min(1)\n oks_person = oks_person.reshape(batch_size, num_centers, num_poses_per_center_y,\n num_poses_per_center_x, keypoints.shape[1])\n\n oks_parts = oks.reshape(batch_size,num_centers,num_poses_per_center_y,num_poses_per_center_x,keypoints.shape[1],-1)\n\n oks_person_best = torch.sum(oks_parts.max(dim=2)[0].max(dim=2)[0] * visible, dim=-1) / torch.sum(visible, dim=-1).clamp_min(1)\n if debug:\n return deltas, var, visible, err\n return oks_parts, oks_person, oks_person_best\n\n def compute_oks_with_gt_batch(self, pose_pool, meta, height, width):\n device = pose_pool.device\n keypoints = meta['keypoints']\n areas = meta['areas']\n inv_transform = meta['transforms_inv'].reshape(-1,3,3,1,1,1,1)\n pose_pool_image_coordinate = torch.zeros_like(pose_pool)\n\n flip_stats = meta['HFlip'].reshape(-1,1,1,1,1)\n px = torch.where(flip_stats>0, width-1-pose_pool[...,0],pose_pool[...,0])\n py = pose_pool[...,1]\n\n pose_pool_image_coordinate[..., 0] = px * inv_transform[:,0,0] + py * inv_transform[:,0,1] + inv_transform[:,0,2]\n pose_pool_image_coordinate[..., 1] = px * inv_transform[:,1,0] + py * inv_transform[:,1,1] + inv_transform[:,1,2]\n\n oks_parts, oks_person_, oks_person_best = self.compute_oks_batch(pose_pool_image_coordinate, keypoints, areas)\n \n oks_max_, assignment = oks_person_best.max(dim=-1)\n \n ass_arange = torch.arange(pose_pool.shape[0],device='cuda').reshape(-1,1).expand_as(assignment)\n\n pose_gt_image_coordinate = keypoints[ass_arange,assignment].clone()\n\n areas_ = areas[ass_arange,assignment][:,:,None]*((self.coco_sigma_constant.cuda()*2)**2)\n\n pose_gt = torch.zeros_like(pose_gt_image_coordinate)\n\n transform = meta['transforms']\n pose_gt[:,:,:,0] = transform[:,0,0,None,None] * pose_gt_image_coordinate[:,:,:,0] + transform[:,0,1,None,None] * pose_gt_image_coordinate[:,:,:,1] + transform[:,0,2,None,None]\n pose_gt[:,:,:,1] = transform[:,1,0,None,None] * pose_gt_image_coordinate[:,:,:,0] + transform[:,1,1,None,None] * pose_gt_image_coordinate[:,:,:,1] + transform[:,1,2,None,None]\n\n pose_gt[:,:,:,0] = torch.where(meta['HFlip'].reshape(-1,1,1)>0, width-1-pose_gt[:,:,:,0], pose_gt[:,:,:,0])\n\n pose_gt[:,:,:,2] = (transform[:,0,0]*transform[:,1,1]).reshape(-1,1,1).expand_as(areas_)*areas_*(pose_gt_image_coordinate[:,:,:,2]>0)\n\n ctl_arange = torch.arange(pose_pool_image_coordinate.shape[1],device='cuda').reshape(1,-1).expand_as(assignment)\n \n oks_parts = oks_parts[ass_arange,ctl_arange,:,:,assignment]\n\n oks_person = oks_person_[ass_arange,ctl_arange,:,:,assignment]\n\n return oks_parts, oks_person, pose_gt\n\n def meta_batchify(self, meta_batch):\n batch_size = len(meta_batch)\n keypoints_batch = torch.zeros(batch_size,self.decoder.topk_center,self.num_joints,3,device='cuda')\n areas_batch = torch.zeros(batch_size,self.decoder.topk_center,device='cuda')\n transforms_batch = torch.zeros(batch_size,3,3,device='cuda')\n inv_transforms_batch = torch.zeros(batch_size,3,3,device='cuda')\n flip_stat_batch = torch.zeros(batch_size,device='cuda')\n\n for batch_id, meta in enumerate(meta_batch):\n keypoints = meta['annotations']['kpts']\n keypoints_batch[batch_id,:keypoints.shape[0]] = keypoints\n\n areas = meta['annotations']['areas']\n areas_batch[batch_id,:keypoints.shape[0]] = areas\n transforms_batch[batch_id] = meta['transform']\n inv_transforms_batch[batch_id] = meta['transform_inv']\n flip_stat_batch[batch_id] = meta['HFlip']\n\n return {'keypoints': keypoints_batch, \n 'areas':areas_batch,\n 'transforms': transforms_batch,\n 'transforms_inv': inv_transforms_batch,\n 'HFlip': flip_stat_batch\n }\n \n def __init__(self, cfg, backbone):\n super(Model, self).__init__()\n self.backbone = backbone\n inp_channels = backbone.output_channels\n\n multi_output_config_heatmap = cfg['MODEL']['EXTRA'][\n 'MULTI_LEVEL_OUTPUT_HEATMAP']\n\n multi_output_config_regression = cfg['MODEL']['EXTRA'][\n 'MULTI_LEVEL_OUTPUT_REGRESSION']\n\n attn_norm_affine_num = cfg['MODEL']['EXTRA']['AN_NUM_AFFINE']\n attn_norm_for_all = cfg['MODEL']['EXTRA']['AN_FOR_ALL']\n\n self.adaptation_level_test = cfg.TEST.ADAPTATION_LEVEL\n assert self.adaptation_level_test in ['full', 'partial', 'none']\n\n self.transition_cls = nn.Sequential(\n nn.Conv2d(inp_channels,\n multi_output_config_heatmap['NUM_CHANNELS'][0],\n 1,\n 1,\n 0,\n bias=False),\n nn.BatchNorm2d(multi_output_config_heatmap['NUM_CHANNELS'][0]),\n nn.ReLU(True))\n\n self.transition_reg = nn.Sequential(\n nn.Conv2d(inp_channels,\n multi_output_config_regression['NUM_CHANNELS'][0],\n 1,\n 1,\n 0,\n bias=False),\n nn.BatchNorm2d(multi_output_config_regression['NUM_CHANNELS'][0]),\n nn.ReLU(True))\n\n self.final_layer_cls = nn.Conv2d(\n multi_output_config_heatmap['NUM_CHANNELS'][0], cfg.DATASET.NUM_JOINTS, 1, 1, 0)\n self.final_layer_reg = nn.Conv2d(\n multi_output_config_regression['NUM_CHANNELS'][0], (cfg.DATASET.NUM_JOINTS-1)*2, 1, 1, 0)\n\n NUM_JOINTS_WITHOUT_CENTER = cfg.DATASET.NUM_JOINTS-1\n \n cmp_config = cfg.MODEL.EXTRA.CONV_MSG_PASSING\n\n self.transition_embedding = nn.Sequential(\n nn.Conv2d(inp_channels, cmp_config.DIM_EMBEDDING, 1, 1, 0, bias=False),\n nn.BatchNorm2d(cmp_config.DIM_EMBEDDING), nn.ReLU(True)\n )\n \n conv_msg_passing = []\n input_dim = NUM_JOINTS_WITHOUT_CENTER \\\n * cmp_config.DIM_EMBEDDING\n for layer_id, (output_dim, use_an) in enumerate(zip(cmp_config.DIM_CONVOLUTION,\n cmp_config.USE_AN)):\n \n if 'KSIZE' in cmp_config:\n ksize = cmp_config['KSIZE'][layer_id]\n else:\n ksize = 3\n padding = ksize //2 \n conv_msg_passing.append(\n nn.Conv2d(input_dim, output_dim, ksize, 1, padding=padding, bias=False)\n )\n normlayer = AttnBatchNorm2d(\n output_dim, \n attn_norm_affine_num, \n use_bn=False) if use_an else nn.BatchNorm2d(output_dim)\n \n conv_msg_passing.append(normlayer)\n conv_msg_passing.append(nn.ReLU(inplace=True))\n input_dim = output_dim\n \n conv_msg_passing.append(\n nn.Conv2d(input_dim, NUM_JOINTS_WITHOUT_CENTER, 1, 1, 0)\n )\n self.num_joints = NUM_JOINTS_WITHOUT_CENTER\n self.joints_mlp = nn.Sequential(*conv_msg_passing)\n self.cap_sigma = cfg.MODEL.DECODER.SIGMA\n self.cap_local_hm_size = cfg.MODEL.DECODER.LOCAL_HM_SIZE\n self.cap_global_hm_size = cfg.MODEL.DECODER.GLOBAL_HM_SIZE\n self.cap_topk_center = cfg.MODEL.DECODER.TOPK_CENTER\n self.cap_ksize = cfg.MODEL.DECODER.KSIZE\n \n self.loss_evaluator = LossEvaluator(cfg)\n self.decoder = Decoder(cfg)\n self.flip_test = True\n self.coco_sigma_constant = torch.tensor(PERSON_SIGMA_DICT[cfg.MODEL.TEMPLATE],\n dtype=torch.float32)[None,\n None]\n self.debug_with_gt = False\n self.debug_step = 0\n\n def feature_bilinear_sampling_per_image(self, features, px, py):\n _, height, width = features.shape\n shape = px.shape\n px = px.reshape(-1)\n py = py.reshape(-1)\n px0 = px.floor().clamp(min=0, max=width - 1)\n py0 = py.floor().clamp(min=0, max=height - 1)\n px1 = (px0 + 1).clamp(min=0, max=width - 1)\n py1 = (py0 + 1).clamp(min=0, max=height - 1)\n px0l, py0l, px1l, py1l = px0.long(), py0.long(), px1.long(), py1.long()\n delta_x0 = (px1 - px).clamp(min=0, max=1.0)\n delta_y0 = (py1 - py).clamp(min=0, max=1.0)\n delta_x1 = (px - px0).clamp(min=0, max=1.0)\n delta_y1 = (py - py0).clamp(min=0, max=1.0)\n features_00 = features[:, py0l, px0l] * delta_y0[None] * delta_x0[None]\n features_01 = features[:, py0l, px1l] * delta_y0[None] * delta_x1[None]\n features_10 = features[:, py1l, px0l] * delta_y1[None] * delta_x0[None]\n features_11 = features[:, py1l, px1l] * delta_y1[None] * delta_x1[None]\n\n out = features_00 + features_01 + features_10 + features_11\n out = out.permute((1, 0)).contiguous()\n out = out.reshape(*shape, -1)\n\n return out\n\n def feature_bilinear_sampling_batch(self, features, px, py):\n batch_size, _, height, width = features.shape\n shape = px.shape[1:]\n px = px.reshape(batch_size,-1)\n py = py.reshape(batch_size,-1)\n px0 = px.floor().clamp(min=0, max=width - 1)\n py0 = py.floor().clamp(min=0, max=height - 1)\n px1 = (px0 + 1).clamp(min=0, max=width - 1)\n py1 = (py0 + 1).clamp(min=0, max=height - 1)\n px0l, py0l, px1l, py1l = px0.long(), py0.long(), px1.long(), py1.long()\n\n batch_arange = torch.arange(batch_size,device=features.device).reshape(-1,1).expand_as(px0l)\n delta_x0 = (px1 - px).clamp(min=0, max=1.0)\n delta_y0 = (py1 - py).clamp(min=0, max=1.0)\n delta_x1 = (px - px0).clamp(min=0, max=1.0)\n delta_y1 = (py - py0).clamp(min=0, max=1.0)\n features_00 = features[batch_arange, :, py0l, px0l] * delta_y0[...,None] * delta_x0[...,None]\n features_01 = features[batch_arange, :, py0l, px1l] * delta_y0[...,None] * delta_x1[...,None]\n features_10 = features[batch_arange, :, py1l, px0l] * delta_y1[...,None] * delta_x0[...,None]\n features_11 = features[batch_arange, :, py1l, px1l] * delta_y1[...,None] * delta_x1[...,None]\n\n out = features_00 + features_01 + features_10 + features_11\n out = out.reshape(batch_size,*shape,-1)\n return out\n\n\n def forward_training(self, images_batch, targets_batch):\n assert self.training\n batch_size = images_batch.shape[0]\n features = self.backbone(images_batch)\n transition_cls = self.transition_cls(features)\n transition_reg = self.transition_reg(features)\n\n heatmaps = self.final_layer_cls(transition_cls)\n offsets = self.final_layer_reg(transition_reg)\n output_dict = {\n 'heatmaps': heatmaps,\n 'offsets': offsets\n }\n\n heatmaps_hr = torch.nn.functional.interpolate(\n heatmaps,\n size=(images_batch.shape[2], images_batch.shape[3]),\n mode='bilinear',\n align_corners=False)\n embedding = self.transition_embedding(features)\n\n oks_joints_batch = []\n oks_pred_batch = []\n oks_person_batch = []\n\n out_hm_batch = []\n gt_hm_batch = []\n\n debug_cond = self.debug_step in []\n\n decoder_output = self.decoder(images_batch, {'centermaps':heatmaps_hr[:,-1:].detach(),'offsets':offsets.detach()})\n \n yy, xx = torch.meshgrid(\n torch.arange(-self.cap_global_hm_size//2, self.cap_global_hm_size//2 + 1, device='cuda', dtype=torch.float32),\n torch.arange(-self.cap_global_hm_size//2, self.cap_global_hm_size//2 + 1, device='cuda', dtype=torch.float32))\n yy = yy[None, None]\n xx = xx[None, None]\n\n g = (torch.exp(-(xx**2 + yy**2) / (2 * self.cap_sigma * self.cap_sigma)))\n g *= g >= 1e-4\n\n latent_code_batch = []\n in_hm_batch = []\n for batch_id, (meta, image) in enumerate(\n zip(targets_batch['meta'], images_batch)):\n\n pose_pool_per_im = decoder_output['allposes'][batch_id] # 30,11,11,17,2\n ## heatmap refinement\n # import pdb; pdb.set_trace()\n oks_joints, oks_person, pose_gt = self.compute_oks_with_gt(\n pose_pool_per_im,\n meta,\n height=image.shape[1],\n width=image.shape[2],\n debug_image=image) # 30,11,11,17 30,11,11\n\n mask = oks_person.max(dim=-1)[0].max(dim=-1)[0] >= 0.5\n if mask.sum() == 0:\n mask[0] = 1\n pose_pool_per_im = pose_pool_per_im[mask]\n oks_joints = oks_joints[mask]\n oks_person = oks_person[mask]\n pose_gt = pose_gt[mask]\n pose_gt[..., -1] *= (pose_gt[..., 0] >= 0) * (\n pose_gt[..., 0] < image.shape[2]) * (pose_gt[..., 1] >= 0) * (\n pose_gt[..., 1] < image.shape[1])\n\n latent_code = self.feature_bilinear_sampling_per_image(\n embedding[batch_id], pose_pool_per_im[..., 0] / 4.0,\n pose_pool_per_im[..., 1] / 4.0)\n # 30,17,64,11,11\n latent_code = latent_code.permute((0, 3, 4, 1, 2)).contiguous()\n \n latent_code = latent_code.reshape(latent_code.shape[0], -1,\n self.decoder.ksize * 2 + 1,\n self.decoder.ksize * 2 + 1)\n \n latent_code_batch.append(latent_code)\n # oks_pred = self.joints_mlp(latent_code)\n\n \n center_poses = pose_pool_per_im[:, self.cap_ksize, self.cap_ksize]\n pose_xx = xx + center_poses[:, :, 0, None, None]\n pose_yy = yy + center_poses[:, :, 1, None, None]\n in_hm = self.decoder.jointness_bilinear_sampling_single_image(\n heatmaps_hr[batch_id, :-1],\n pose_xx.transpose(0, 1).contiguous(),\n pose_yy.transpose(0, 1).contiguous())\n in_hm = in_hm.transpose(0, 1).contiguous()\n \n in_hm *= g\n in_hm_batch.append(in_hm)\n \n # in_hm = in_hm.reshape(1, -1, in_hm.shape[2], in_hm.shape[3])\n \n # kernel = torch.flip((oks_pred / self.cap_local_hm_size**2).reshape(-1, 1, self.cap_local_hm_size, self.cap_local_hm_size),\n # [2, 3])\n # out_hm = F.conv2d(in_hm, kernel, padding=self.cap_ksize, groups=kernel.shape[0])\n # out_hm = out_hm.reshape(pose_pool_per_im.shape[0], self.num_joints,\n # *out_hm.shape[2:])\n xx_g = pose_xx - pose_gt[:, :, 0, None, None]\n yy_g = pose_yy - pose_gt[:, :, 1, None, None]\n gt_hm = torch.exp(-(xx_g**2 + yy_g**2) / (2 * 4*4)) #TODO: MAYBE A BUG EXISTED\n\n # oks_pred_batch.append(oks_pred)\n oks_joints_batch.append(oks_joints)\n oks_person_batch.append(oks_person)\n # out_hm_batch.append(out_hm)\n gt_hm_batch.append(gt_hm)\n latent_code_batch = torch.cat(latent_code_batch)\n oks_pred_batch = self.joints_mlp(latent_code_batch)\n in_hm_batch = torch.cat(in_hm_batch)\n kernel = torch.flip((oks_pred_batch / self.cap_local_hm_size**2).reshape(-1, 1, self.cap_local_hm_size, self.cap_local_hm_size),[2, 3])\n in_hm_batch = in_hm_batch.reshape(1, -1, in_hm_batch.shape[2], in_hm_batch.shape[3])\n out_hm_batch = F.conv2d(in_hm_batch,kernel, padding=self.cap_ksize, groups=kernel.shape[0])\n out_hm_batch = out_hm_batch.reshape(-1, self.num_joints, *out_hm_batch.shape[2:])\n\n # oks_pred_batch.append(code)\n # oks_pred_batch = torch.cat(oks_pred_batch)\n\n \n oks_joints_batch = torch.cat(oks_joints_batch)\n\n oks_joints_batch = oks_joints_batch.permute((0, 3, 1, 2)).contiguous()\n oks_person_batch = torch.cat(oks_person_batch)\n\n # out_hm_batch = torch.cat(out_hm_batch) if len(out_hm_batch) > 0 else out_hm_batch\n gt_hm_batch = torch.cat(gt_hm_batch) if len(out_hm_batch) > 0 else gt_hm_batch\n\n # err1 = (oks_joints_batch-oks_joints_batch_).abs().max()\n # err2 = (out_hm_batch-out_hm_batch_).abs().max()\n # err3 = (oks_person_batch - oks_person_batch).abs().max()\n # err4 = (gt_hm_batch - gt_hm_batch_).abs().max()\n # err = err1+err2+err3+err4\n # print(err)\n \n\n output_dict['oks.joints'] = oks_pred_batch\n output_dict['out_hm'] = out_hm_batch\n\n targets_batch['oks.joints'] = oks_joints_batch\n targets_batch['oks.person'] = oks_person_batch\n targets_batch['out_hm'] = gt_hm_batch\n\n loss_dict = self.loss_evaluator(output_dict, targets_batch)\n # output_dict['prof'] = prof\n self.debug_step +=1\n return output_dict, loss_dict\n\n def refine_none(self, allposes, embedding, heatmaps, **kwargs):\n # height, width = embedding.shape[2:]\n height, width = heatmaps.shape[1:]\n center_poses = allposes[:, self.cap_ksize, self.cap_ksize]\n\n pose_xx = center_poses[:, :, 0, None, None]\n pose_yy = center_poses[:, :, 1, None, None]\n in_hm = self.decoder.jointness_bilinear_sampling_single_image(\n heatmaps,\n pose_xx.transpose(0, 1).contiguous(),\n pose_yy.transpose(0, 1).contiguous())\n in_hm = in_hm.transpose(0, 1).contiguous().squeeze(-1)\n\n return torch.cat((center_poses, in_hm), dim=-1), None\n\n def refine_partial(self, allposes, embedding, heatmaps, **kwargs):\n height, width = embedding.shape[2:]\n latent_code = self.feature_bilinear_sampling_per_image(\n embedding[0], allposes[..., 0] / 4.0, allposes[..., 1] / 4.0)\n\n latent_code = latent_code.permute((0, 3, 4, 1, 2)).contiguous()\n latent_code = latent_code.reshape(allposes.shape[0], -1,\n self.decoder.ksize * 2 + 1,\n self.decoder.ksize * 2 + 1)\n\n oks_joints = self.joints_mlp(latent_code).clamp(0, 1.0)\n # oks_joints = self.joints_mlp(latent_code).sigmoid()\n\n oks_joints = oks_joints.reshape(oks_joints.shape[0],self.num_joints,-1).permute((0,2,1)).contiguous()\n max_val, argmax = oks_joints.max(dim=1)\n argmax_mat = torch.zeros_like(oks_joints)\n argmax_mat.scatter_(1,argmax[:,None],1)\n\n allposes = allposes.reshape(oks_joints.shape[0],-1,17,2)\n final_poses = torch.sum(allposes*argmax_mat[...,None],dim=1)\n heat_values = max_val.clone()\n final_poses = torch.cat((final_poses,heat_values[...,None]),dim=-1)\n\n return final_poses, None\n\n def refine(self, allposes, embedding, heatmaps, **kwargs):\n stride = kwargs.get('stride',4.0)\n torch.cuda.synchronize()\n now = time()\n height, width = embedding.shape[2:]\n latent_code = self.feature_bilinear_sampling_per_image(\n embedding[0], allposes[..., 0] / stride, allposes[..., 1] / stride)\n \n # latent_code_flip = self.feature_bilinear_sampling_per_image(embedding[1], width-1-allposes[...,0]/4.0, allposes[...,1]/4.0)\n # latent_code_flip = latent_code_flip[:,:,:,self.decoder.flip_config]\n\n # latent_code = 0.5*(latent_code+latent_code_flip)\n meta = kwargs.get('meta',None)\n\n latent_code = latent_code.permute((0, 3, 4, 1, 2)).contiguous()\n latent_code = latent_code.reshape(allposes.shape[0], -1,\n self.decoder.ksize * 2 + 1,\n self.decoder.ksize * 2 + 1)\n\n oks_joints = self.joints_mlp(latent_code).clamp(0, 1.0)\n oks_mask = (allposes[...,0]/stride>=0)*(allposes[...,0]/stride<=width-1)*(allposes[...,1]/stride>=0)*(allposes[...,1]/stride<=height-1)\n \n # oks_joints *= oks_mask.float().permute((0,3,1,2))\n\n torch.cuda.synchronize()\n kwargs['timing']['local-kam'] += time()-now\n \n torch.cuda.synchronize()\n now = time()\n # oks_joints = code.reshape(code.shape[0],17,11,11)\n # import pdb; pdb.set_trace()\n\n # kernel = oks_joints/121.0\n # kernel = kernel.transpose(0,1).contiguous()\n x_start = -self.cap_global_hm_size//2\n x_end = self.cap_global_hm_size//2\n y_start = -self.cap_global_hm_size//2\n y_end = self.cap_global_hm_size//2\n\n\n yy, xx = torch.meshgrid(\n torch.arange(y_start,\n y_end + 1,\n device='cuda',\n dtype=torch.float32),\n torch.arange(x_start,\n x_end + 1,\n device='cuda',\n dtype=torch.float32))\n yy = yy[None, None]\n xx = xx[None, None]\n # import pdb; pdb.set_trace()\n # xx = xx*sigmas\n # yy = yy*sigmas\n center_poses = allposes[:, self.cap_ksize, self.cap_ksize]\n pose_xx = xx + center_poses[:, :, 0, None, None]\n pose_yy = yy + center_poses[:, :, 1, None, None]\n \n in_hm = self.decoder.jointness_bilinear_sampling_single_image(\n heatmaps,\n pose_xx.transpose(0, 1).contiguous(),\n pose_yy.transpose(0, 1).contiguous())\n # import pdb; pdb.set_trace()\n\n in_hm = in_hm.transpose(0, 1).contiguous()\n\n g = (torch.exp(-(xx**2 + yy**2) / (2 * self.cap_sigma * self.cap_sigma)))\n g *= g >= 1e-4 #TODO: hardcode\n in_hm *= g\n in_hm = in_hm.reshape(1, -1, in_hm.shape[2], in_hm.shape[3])\n kernel = torch.flip((oks_joints / self.cap_local_hm_size**2).reshape(-1, 1, self.cap_local_hm_size, self.cap_local_hm_size),\n [2, 3]) \n\n out_hm = F.conv2d(in_hm, kernel, padding=self.cap_ksize, groups=kernel.shape[0])\n out_hm = out_hm.reshape(allposes.shape[0], self.num_joints, -1)\n\n value, idx = out_hm.topk(2)\n\n topk_y = torch.div(idx,xx.shape[-1],rounding_mode='trunc')\n topk_x = idx % xx.shape[-1]\n \n topk_v = value\n lambda1 = 0.75\n lambda2 = 1 - lambda1\n topk_y = lambda1 * topk_y[..., :1] + lambda2 * topk_y[..., 1:]\n topk_x = lambda1 * topk_x[..., :1] + lambda2 * topk_x[..., 1:]\n topk_v = lambda1 * topk_v[..., :1] + lambda2 * topk_v[..., 1:]\n # topk_v = 1.0 * topk_v[..., :1] + 0.0 * topk_v[..., 1:]\n\n offset = torch.cat((topk_x, topk_y), dim=-1)\n offset[..., 0] += xx.min()\n offset[..., 1] += yy.min()\n\n\n pose = center_poses + offset\n pose = torch.cat((pose, topk_v), dim=-1)\n torch.cuda.synchronize()\n\n kwargs['timing']['global-kam'] += time() - now\n return pose, None\n\n def forward_multiscale(self, images_batch, scale_factors, targets_batch=None):\n timing = {\n 'backbone': 0,\n 'local-kem': 0,\n 'local-kam': 0,\n 'global-kam': 0,\n }\n\n main_index = scale_factors.index(1.0)\n if self.flip_test:\n for i in range(len(images_batch)):\n img_flip = torch.flip(images_batch[i],[3])\n images_batch[i] = torch.cat((images_batch[i],img_flip),dim=0)\n \n\n features = self.backbone(images_batch[main_index])\n\n transition_cls = self.transition_cls(features)\n transition_reg = self.transition_reg(features)\n heatmaps = self.final_layer_cls(transition_cls)\n offsets = self.final_layer_reg(transition_reg)\n embedding = self.transition_embedding(features)\n\n if self.flip_test:\n heatmaps_flip = torch.flip(heatmaps[1:,self.decoder.flip_config_with_center],[3])\n heatmaps = (heatmaps[:1] + heatmaps_flip)*0.5\n offsets_flip = torch.flip(offsets[1:],[3])\n offsets_flip = offsets_flip.reshape(1,-1,2,offsets.shape[2],offsets.shape[3])[:,self.decoder.flip_config]\n offsets_flip[:,:, 0] *= -1.0\n offsets_flip = offsets_flip.reshape(1,-1,offsets.shape[2],offsets.shape[3])\n offsets = (offsets[:1] + offsets_flip)*0.5\n \n heatmaps_hr = torch.nn.functional.interpolate(heatmaps,\n size=(images_batch[main_index].size(2),images_batch[main_index].size(3)), mode='bilinear',align_corners=False)\n\n initial_poses = []\n initial_dict = self.decoder(images_batch[main_index],{'centermaps':heatmaps_hr[:,-1:],'offsets':offsets})\n initial_pose = initial_dict['allposes'][0]\n mask = initial_dict['centers'][0,...,-1]>=1e-2\n if mask.sum() == 0:\n mask[0] =1\n \n initial_poses = [initial_pose[mask]]\n initial_centers = [initial_dict['centers'][0,...,-1][mask]]\n\n max_score = initial_centers[0].max()\n\n for i in range(len(images_batch)):\n if i == main_index:\n continue\n features_ = self.backbone(images_batch[i])\n embedding_ = self.transition_embedding(features_)\n embedding += torch.nn.functional.interpolate(embedding_, size=(embedding.size(2),embedding.size(3)),mode='bilinear',align_corners=False)\n\n offsets_ = self.final_layer_reg(self.transition_reg(features_))\n offsets_f = torch.flip(offsets_[1:], [3])\n offsets_f = offsets_f.reshape(1,-1,2,offsets_.shape[2],offsets_.shape[3])[:,self.decoder.flip_config]\n offsets_f[:,:, 0] *= -1.0\n offsets_f = offsets_f.reshape(1,-1,offsets_.shape[2],offsets_.shape[3])\n offsets_ = (offsets_[:1] + offsets_f)*0.5\n heatmaps_ = self.final_layer_cls(self.transition_cls(features_))\n heatmaps_f = torch.flip(heatmaps_[1:,self.decoder.flip_config_with_center],[3])\n # heatmaps_f[...,1:] = heatmaps_f[...,:-1]\n heatmaps_ = (heatmaps_[:1] + heatmaps_f)*0.5\n heatmaps_hr_ = torch.nn.functional.interpolate(heatmaps_,\n size=(images_batch[main_index].size(2),images_batch[main_index].size(3)), mode='bilinear',align_corners=False)\n\n initial_dict = self.decoder(images_batch[i], {'centermaps':heatmaps_hr_[:,-1:],'offsets':offsets_})\n\n initial_pose = initial_dict['allposes'][0]\n mask = initial_dict['centers'][0,...,-1]>=0.1\n if mask.sum() > 1000:\n initial_pose = initial_pose[mask]/scale_factors[i]\n initial_poses.append(initial_pose)\n scores = initial_dict['centers'][0,...,-1][mask]*0.9\n # max_score_scale = scores.max()\n # scores = scores/max_score_scale*max_score*0.9\n initial_centers.append(initial_dict['centers'][0,...,-1][mask])\n heatmaps_hr += heatmaps_hr_\n\n # heatmaps_hr = torch.nn.functional.interpolate(heatmaps,\n # size=(images_batch[main_index].size(2),images_batch[main_index].size(3)), mode='bilinear',align_corners=False)\n\n heatmaps_hr = heatmaps_hr/len(images_batch)\n embedding = embedding/len(images_batch)\n # embedding[0] = (embedding[0] + torch.flip(embedding[1],[2]))\n # import pdb; pdb.set_trace()\n output_dict = {\n 'centermaps': heatmaps_hr[:,-1:],\n 'offsets': offsets,\n }\n\n torch.cuda.synchronize()\n now = time()\n timing['backbone'] += time() - now\n loss_dict = {}\n \n\n allposes = torch.cat(initial_poses)\n allcenters = torch.cat(initial_centers)\n torch.cuda.synchronize()\n timing['local-kem'] += time() - now\n\n now = time()\n \n if self.adaptation_level_test == 'none':\n refine_fn = self.refine_none\n embedding = features\n elif self.adaptation_level_test == 'partial':\n refine_fn = self.refine_partial \n else:\n refine_fn = self.refine\n\n final_poses, hm_instances = refine_fn(allposes,\n embedding,\n heatmaps_hr[0],\n debug_image=images_batch,timing=timing,\n meta = None if targets_batch is None else targets_batch['meta'])\n # final_poses[..., -1] *= output_dict['centers'][0][mask][..., None, -1]\n temp_x = final_poses[...,0]\n temp_y = final_poses[...,1]\n temp_x = temp_x.t()[:,None,None]\n temp_y = temp_y.t()[:,None,None]\n values = self.decoder.jointness_bilinear_sampling_single_image(heatmaps_hr[0], temp_x, temp_y)[:,0,0,]\n # self.decoder.jointness_bilinear_sampling_single_image(heatmaps_hr, final_poses[...,None,0].permute(), final_poses[...,None,1])\n # final_poses[..., -1] = torch.sqrt(final_poses[..., -1]*values.t())\n # final_poses[..., -1] = (0.75*final_poses[..., -1]+0.25*values.t())\n final_poses[..., -1] *= allcenters[:,None]\n final_poses[...,0] = final_poses[...,0].clamp(min=0,max=images_batch[1].shape[-1])\n final_poses[...,1] = final_poses[...,1].clamp(min=0,max=images_batch[1].shape[-2])\n \n scores = final_poses[..., -1].mean(dim=-1)\n # valid_poses = scores >= 1e-3\n valid_poses = scores >0\n num_valid_poses = valid_poses.sum()\n if num_valid_poses == 0:\n output_dict['poses'] = None\n return output_dict, loss_dict\n\n output_dict['poses'] = final_poses[valid_poses]\n output_dict['scores'] = scores[valid_poses]\n output_dict['allposes'] = allposes[valid_poses]\n output_dict['centers'] = allcenters\n output_dict['heatmaps'] = heatmaps_hr\n \n return output_dict, timing\n\n def forward(self, images_batch, targets_batch=None):\n if self.training:\n return self.forward_training(images_batch, targets_batch)\n if self.flip_test:\n temp = torch.flip(images_batch, [3])\n images_batch_flip = temp\n images_batch = torch.cat((images_batch, images_batch_flip), dim=0)\n \n timing = {\n 'backbone': 0,\n 'local-kem': 0,\n 'local-kam': 0,\n 'global-kam': 0,\n }\n torch.cuda.synchronize()\n now = time()\n features = self.backbone(images_batch)\n\n transition_cls = self.transition_cls(features)\n transition_reg = self.transition_reg(features)\n heatmaps = self.final_layer_cls(transition_cls)\n offsets = self.final_layer_reg(transition_reg)\n\n # jointsmaps, centermaps = heatmaps[:,:-1], heatmaps[:,-1:]\n\n if self.flip_test:\n heatmaps_flip = torch.flip(heatmaps[1:,self.decoder.flip_config_with_center],[3])\n heatmaps = (heatmaps[:1] + heatmaps_flip)*0.5\n offsets_flip = torch.flip(offsets[1:],[3])\n offsets_flip = offsets_flip.reshape(1,-1,2,offsets.shape[2],offsets.shape[3])[:,self.decoder.flip_config]\n offsets_flip[:,:, 0] *= -1.0\n offsets_flip = offsets_flip.reshape(1,-1,offsets.shape[2],offsets.shape[3])\n offsets = (offsets[:1] + offsets_flip)*0.5\n \n heatmaps_hr = torch.nn.functional.interpolate(heatmaps,\n size=(images_batch.size(2),images_batch.size(3)), mode='bilinear',align_corners=False)\n \n output_dict = {\n 'centermaps': heatmaps_hr[:,-1:],\n # 'centermaps': heatmaps[:,-1:],\n 'offsets': offsets,\n }\n\n torch.cuda.synchronize()\n timing['backbone'] += time() - now\n \n torch.cuda.synchronize()\n now = time()\n loss_dict = {}\n \n output_dict.update(\n self.decoder(images_batch,\n output_dict,\n flip_testing=self.flip_test))\n\n mask = output_dict['centers'][0,..., -1] >= 1e-2\n # mask = output_dict['centers'][0,..., -1] >= -1\n if mask.sum() == 0:\n mask[0] = 1\n\n allposes = output_dict['allposes'][0][mask]\n\n torch.cuda.synchronize()\n timing['local-kem'] += time() - now\n\n now = time()\n if self.adaptation_level_test == 'none':\n refine_fn = self.refine_none\n embedding = features\n elif self.adaptation_level_test == 'partial':\n refine_fn = self.refine_partial\n embedding = self.transition_embedding(features)\n else:\n refine_fn = self.refine\n embedding = self.transition_embedding(features)\n final_poses, hm_instances = refine_fn(allposes,\n embedding,\n heatmaps_hr[0],\n debug_image=images_batch,timing=timing,) \n final_poses[..., -1] *= output_dict['centers'][0][mask][..., None, -1]\n # final_poses[..., -1] = output_dict['centers'][0][mask][..., None, -1]\n # final_poses[...,0] = final_poses[...,0].clamp(min=0,max=images_batch.shape[-1])\n # final_poses[...,1] = final_poses[...,1].clamp(min=0,max=images_batch.shape[-2])\n scores = final_poses[..., -1].mean(dim=-1)\n # valid_poses = scores >= 1e-3\n valid_poses = scores >0\n num_valid_poses = valid_poses.sum()\n if num_valid_poses == 0:\n output_dict['poses'] = None\n return output_dict, loss_dict\n\n output_dict['poses'] = final_poses[valid_poses]\n output_dict['scores'] = scores[valid_poses]\n output_dict['allposes'] = allposes[valid_poses]\n output_dict['centers'] = output_dict['centers'][0][mask]\n output_dict['heatmaps'] = heatmaps_hr\n return output_dict, timing\n","repo_name":"cherubicXN/logocap","sub_path":"logocap/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":39614,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"88"} +{"seq_id":"9273860852","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport unittest, csv\nfrom main import load_letters, eng2ara\n\n\nclass Test(unittest.TestCase):\n\n letters = {}\n tests = {}\n\n def setUp(self):\n self.letters = load_letters()\n with open(\"tests.csv\", mode='r', encoding='utf-8') as file:\n reader = csv.reader(file, delimiter=';')\n self.tests = {row[0]: row[1] for row in reader}\n\n def test_table(self):\n for eng, ara in self.tests.items():\n self.assertEqual(ara, eng2ara(eng, self.letters))\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"mazenbesher/Eng2Ara","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"18042529105","text":"import unittest\nfrom app_tdd import *\n\nclass TestUsuarios(unittest.TestCase):\n \n def test_crear_usuarios_unicos(self):\n usuarios = []\n for i in range(10):\n usuarios.append(Usuario(\"Juan\"))\n ids = list(map(lambda usuario: usuario.id, usuarios))\n\n self.assertEqual(len(ids), len(set(ids)))\n\n def test_mensajes_enviados_a(self):\n usuario1 = Usuario(\"Juan\")\n usuario2 = Usuario(\"Pedro\")\n for i in range(20):\n Mensaje(\"test\", usuario1, Usuario(\"destinatario anonimo\"))\n Mensaje(\"para usuario2\", usuario1, usuario2)\n\n self.assertEqual(len(usuario1.mensajes_enviados_a(usuario2)), 20)\n self.assertEqual(usuario1.mensajes_enviados_a(usuario2)[0].destinatario.nombre, \"Pedro\")\n\n def test_mensajes_recibidos_de(self):\n usuario1 = Usuario(\"Juan\")\n usuario2 = Usuario(\"Pedro\")\n for i in range(20):\n Mensaje(\"test\", usuario1, Usuario(\"destinatario anonimo\"))\n Mensaje(\"para usuario2\", usuario1, usuario2)\n\n self.assertEqual(len(usuario2.mensajes_recibidos_de(usuario1)), 20)\n self.assertEqual(usuario2.mensajes_recibidos_de(usuario1)[0].remitente.nombre, \"Juan\")\n\n def test_get_contactos(self):\n usuario1 = Usuario(\"Juan\")\n for i in range(1,11):\n Mensaje(\"Hola\", usuario1, Usuario(\"destinatario \" + str(i)))\n Mensaje(\"Hola\", Usuario(\"remitente \" + str(i)), usuario1)\n\n lista_nombres = list(map(lambda contacto: contacto.nombre, usuario1.contactos))\n for i in range(1,11):\n self.assertIn(\"destinatario \" + str(i), lista_nombres)\n self.assertIn(\"remitente \" + str(i), lista_nombres)\n\n def test_conversacion_con(self):\n usuario1 = Usuario(\"Juan\")\n usuario2 = Usuario(\"Pedro\")\n Mensaje(\"Hola\", usuario1, usuario2)\n Mensaje(\"Que tal\", usuario2, usuario1)\n Mensaje(\"Como estas\", usuario1, usuario2)\n\n textos = list(map(lambda mensaje: mensaje.contenido, usuario1.conversacion_con(usuario2)))\n self.assertEqual(textos, [\"Hola\", \"Que tal\", \"Como estas\"])\n\nclass TestMensajes(unittest.TestCase):\n usuario1 = Usuario(\"Juan\")\n usuario2 = Usuario(\"Pedro\")\n\n def test_crear_mensaje(self):\n mensajes_antes = len(self.usuario1.mensajes)\n mensaje = Mensaje(\"contenido de prueba\", self.usuario1, self.usuario2)\n\n self.assertEqual(len(self.usuario1.mensajes) - mensajes_antes, 1)\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"Mjic99/Test-Driven-Development","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"72999942366","text":"import functions_framework\nimport src.usecase as api\nimport os\n\nplug_id = os.getenv(\"DEVICE_ID_PLUG\")\nmeter_id = os.getenv(\"DEVICE_ID_METER\")\nhumidifier_remote_id = os.getenv(\"REMOTE_ID_HUMIDIFIER\")\n\n\n@functions_framework.http\ndef hello_http(request):\n plug_status = api.get_plug_status(plug_id)\n if not plug_status[\"on\"]:\n return \"ok\"\n\n is_humidifier_on = plug_status[\"weight\"] > 10\n\n meter_status = api.get_meter_status(meter_id)\n\n if meter_status[\"humidity\"] < 40 and not is_humidifier_on:\n api.send_remote_command(humidifier_remote_id, \"電源\")\n\n if meter_status[\"humidity\"] >= 60 and is_humidifier_on:\n api.send_remote_command(humidifier_remote_id, \"電源\")\n\n return \"ok\"\n","repo_name":"TTRSQ/humidifier-func","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"3344215847","text":"from flask import Flask, jsonify, request # 서버 구현을 위한 Flask 객체 import\n\nfrom flask_restx import Api, Resource # Api 구현을 위한 Api 객체 import\nimport torch\nimport cv2\nfrom model import efficientNetV2M\nimport pandas as pd\nimport numpy as np\nimport albumentations as A\nimport albumentations.pytorch \nimport torch.nn.functional as F\n\n\napp = Flask(__name__) # Flask 객체 선언, 파라미터로 어플리케이션 패키지의 이름을 넣어줌.\napi = Api(app) # Flask 객체에 Api 객체 등록\n\ndef get_model():\n model = efficientNetV2M(in_channels=401)\n model.load_state_dict(torch.load('efficientV2aug.pt'))\n return model\n\n\ndef get_image(img_path):\n target = cv2.imread(img_path)\n target = cv2.cvtColor(target, cv2.COLOR_BGR2RGB)\n transform = A.Compose([\n A.Resize(300, 300),\n A.Normalize(),\n albumentations.pytorch.transforms.ToTensorV2()\n ])\n return transform(image = target)['image'].unsqueeze(0)\n\n\ndef get_predictions(img):\n df = pd.read_csv('../aidb.csv', encoding = 'cp949')\n model = get_model()\n model.eval()\n outputs = model.forward(img)\n _, y_hat = torch.sort(outputs, descending = True)\n print(outputs.sort())\n arr = []\n for i in range(3):\n arr.append(df['음 식 명'][y_hat[0][i].item() - 1])\n return arr\n \n@app.route('/predict', methods=[\"GET\", \"POST\"])\ndef predict():\n if request.method == \"GET\": \n message = {\n \"name\" : \"GET\"\n }\n return message\n\n if request.method == 'POST':\n file = request.get_json()\n img_path = file['img_path']\n img = get_image(img_path = img_path)\n class_id = get_predictions(img)\n print(class_id)\n return jsonify({'class_id': class_id})\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0', port=80)","repo_name":"simbean/Probodia","sub_path":"AI/foodClassifier/Classifier/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"38576617211","text":"import numpy as np\r\nimport pandas as pd\r\n\r\n\r\n# Given a list of ratings, remove all negative ones and return the rest\r\ndef filter_dislikes(lst, array):\r\n result = []\r\n for i, xs in enumerate(lst):\r\n rating = array[i]\r\n if rating != -1:\r\n result.append(xs)\r\n return result\r\n\r\n# Visual 1\r\ndef vis1(avguser1, avguser2):\r\n # step 1: concat two dataframes\r\n frames = [avguser1, avguser2]\r\n result = pd.concat(frames, axis=1)\r\n # step 2: calculate average\r\n result = result.mean(axis=1)\r\n\r\n # Restructure the output\r\n result = pd.DataFrame(result)\r\n result = result.rename(columns={0: \"percentage\"})\r\n result.index.name = \"index\"\r\n\r\n return result\r\n\r\n# Visual 2\r\ndef vis2(array, df):\r\n\r\n df = df.copy()\r\n toDrop = []\r\n ratings = []\r\n # step 1: remove input movies that had dislike\r\n #print(array)\r\n for i, rating in enumerate(array):\r\n if rating < 0:\r\n rating = -rating\r\n df.iloc[:,i] = 100 - df.iloc[:,i] \r\n ratings.append(rating)\r\n \r\n ratings = np.array(ratings)\r\n \r\n df = df.drop(df.columns[toDrop],axis = 1)\r\n df = df.reset_index()\r\n #df = df.drop(\"index\",axis=1)\r\n\r\n #print(df)\r\n \r\n\r\n for i in range(df.index[-1]+1):\r\n a= df.iloc[i,1:]\r\n df.iloc[i,1:] = np.average(a=a, weights=ratings)\r\n df = df.set_index(\"index\")\r\n\r\n #print(df)\r\n \r\n # step 2: calculate average\r\n df = df.mean(axis=1)\r\n\r\n #print(df)\r\n\r\n # Restructure the output\r\n vis2 = pd.DataFrame(df)\r\n vis2 = vis2.rename(columns={0: \"percentage\"})\r\n vis2.index.name = \"index\"\r\n\r\n #print(vis2)\r\n \r\n return vis2\r\n\r\n# Visual 3\r\ndef vis3(string, index_ouput_movie, index_input_movies, array1, array2, data_processed):\r\n # 1: find genres of output movie\r\n list_ouput_movie = data_processed.at[index_ouput_movie, string]\r\n\r\n # 2: remove input movies that had dislike\r\n #df1 = filter_dislikes(index_input_movies, array1)\r\n #df2 = filter_dislikes(index_input_movies, array2)\r\n \r\n df1 = index_input_movies\r\n df2 = index_input_movies\r\n\r\n #print(data_processed)\r\n\r\n # 3: find genres of input movies\r\n genre_dict_1 = {}\r\n for i, xs in enumerate(df1):\r\n temp = data_processed.at[xs, string]\r\n for genre in temp:\r\n genre_dict_1[genre] = genre_dict_1.get(genre, 0) + array1[i]\r\n #lst1_input_movie.extend(temp)\r\n \r\n #lst2_input_movie = []\r\n genre_dict_2 = {}\r\n for i, xs in enumerate(df2):\r\n temp = data_processed.at[xs, string]\r\n for genre in temp:\r\n genre_dict_2[genre] = genre_dict_2.get(genre, 0) + array2[i]\r\n #lst2_input_movie.extend(temp)\r\n \r\n \r\n # make dataframe\r\n df = pd.DataFrame(list_ouput_movie, columns=[string])\r\n df[\"User1\"] = \"\"\r\n df[\"User2\"] = \"\"\r\n \r\n\r\n for i, xs in enumerate(list_ouput_movie):\r\n #if xs in lst1_input_movie:\r\n df.at[i, 'User1'] = max(genre_dict_1.get(xs, 0), 0)\r\n \r\n for i, xs in enumerate(list_ouput_movie):\r\n \r\n #if xs in lst2_input_movie:\r\n df.at[i, 'User2'] = max(genre_dict_2.get(xs, 0), 0)\r\n \r\n df['Total'] = df['User1'] + df['User2']\r\n df['Total'] = np.array([max(i,1) for i in df['Total']])\r\n df['User1_per'] = df['User1'] / df['Total']\r\n #df.loc[np.isinf(df['User1_per']), 'User1_per'] = 0\r\n df['User2_per'] = df['User2'] / df['Total']\r\n #df.loc[np.isinf(df['User2_per']), 'User2_per'] = 0\r\n df = df.round(2)\r\n\r\n \r\n return df\r\n\r\n# Visual 4\r\ndef vis4(index_ouput_movie, index_input_movies, array1, array2, data_processed):\r\n # 1: find keywords of output movie\r\n list_ouput_movie = data_processed.at[index_ouput_movie, \"keywords\"]\r\n \r\n # 2: remove input movies that had dislike\r\n df1 = filter_dislikes(index_input_movies, array1)\r\n df2 = filter_dislikes(index_input_movies, array2)\r\n \r\n # 3: find keywords of input movies\r\n lst1_input_movie = []\r\n for i, xs in enumerate(df1):\r\n temp = data_processed.at[xs, \"keywords\"]\r\n lst1_input_movie.extend(temp)\r\n \r\n lst2_input_movie = []\r\n for i, xs in enumerate(df2):\r\n temp = data_processed.at[xs, \"keywords\"]\r\n lst2_input_movie.extend(temp)\r\n\r\n # 4: intersect keywords of input & output movies\r\n lst1 = [value for value in list_ouput_movie if value in lst1_input_movie]\r\n lst2 = [value for value in list_ouput_movie if value in lst2_input_movie]\r\n \r\n # 5: 0 = both like / 1 = user1 only likes it / 2 = user2 only likes it\r\n all_keywords = list(set(lst1) | set(lst2))\r\n result1 = []\r\n result2 = []\r\n result0 = []\r\n for kw in all_keywords:\r\n if (kw in lst1) & (kw in lst2):\r\n result0.append(kw)\r\n elif (kw in lst1):\r\n result1.append(kw)\r\n else:\r\n result2.append(kw)\r\n \r\n return [result0, result1, result2]\r\n\r\n","repo_name":"alexanderjoossens/movie-generator","sub_path":"src/backend/visualiser.py","file_name":"visualiser.py","file_ext":"py","file_size_in_byte":4910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"2765427314","text":"\r\n\r\n\r\n\r\n\r\ndef rent_data():\r\n\r\n print(\"=\"*30)\r\n date= input(\"Enter Date of Rent amount is paying: (dd/mm/yyyy) : \")\r\n\r\n amt = int(input(\"Enter Rent Amount paying : \"))\r\n\r\n electricity= int(input(\"Enter Electricity Bill Amount : \"))\r\n elediv= electricity/2\r\n print(\"Electricity Bill Divided : \",elediv)\r\n\r\n water= int(input(\"Enter Water Bill Amount :\"))\r\n watdiv= water/2\r\n print(\"water Bill Divided : \",watdiv)\r\n\r\n Tax = int(input(\"Enter Tax Bill Amount :\"))\r\n taxdiv= Tax/2\r\n print(\"Tax Bill Divided : \",taxdiv)\r\n print(\"=\"*70)\r\n\r\n total= amt+elediv+watdiv+taxdiv\r\n inttotal = int(total)\r\n stringtotal= str(inttotal)\r\n print(\"Total Amount of \",date,\" is \",stringtotal)\r\n\r\n choose = int(input(\"Enter 1 to save Rental Details : \"))\r\n\r\n if choose ==1:\r\n\r\n file= open(\"Rental.txt\",'a')\r\n file.write(\"\\n \\n\")\r\n #file.write(\"================================== Rental payment Details ===========================\")\r\n #file.write('\\n \\n')\r\n file.write(\"Date of Rental Amount paid :> \")\r\n file.write(date)\r\n file.write(\"\\t \\t\")\r\n file.write(\"Rental Amount paid :> \")\r\n file.write(stringtotal)\r\n file.close()\r\n\r\n print(\"****** Rental payment Details Saved ****** \")\r\n\r\n else:\r\n print(\"Thanks for using \")\r\n\r\n\r\n\r\n\r\ndef rental_ds():\r\n print(\"=\"*30)\r\n rfn= input(\"Enter Rental First Name : \")\r\n rmn = input(\"Enter Rental Middle Name : \")\r\n rln = input(\"Enter Rental Last Name : \")\r\n\r\n rem= input(\"Enter Rental Email Id : \")\r\n rph= input(\"Enter Rental Phone No : +91 \")\r\n\r\n rdp= input(\"Enter Rental Deposit Amount : \")\r\n rdt= input(\"Enter Deposit Given Date :(dd/mm/yyyy) : \")\r\n rrp= input(\"Enter per Month Rent Amount : \")\r\n\r\n rfm= input(\"Enter Rental No. of Member in Family : \")\r\n print(\"=\"*50)\r\n \r\n cs= int(input(\"Enter 1 to Save Details :> \"))\r\n\r\n if cs == 1:\r\n file= open('Rental.txt','a')\r\n files.write(\"\\n\")\r\n files.write(\"===========================Rental Details =======================\")\r\n files.write(\"\\n\")\r\n files.write(\"Rental full Name :> \")\r\n files.write(rfn)\r\n files.write(\" \")\r\n files.write(rmn)\r\n files.write(\" \" )\r\n files.write(rln)\r\n files.write(\"\\n\")\r\n files.write(\"Rental Email Id :> \")\r\n files.write(rem)\r\n files.write(\"\\t\")\r\n files.write(\"Rental Phone No :>+91\")\r\n files.write(rph)\r\n files.write(\"\\n\")\r\n files.write(\"Date of Deposite Paid :> \")\r\n files.write(rdt)\r\n files.write(\" \\t \")\r\n files.write(\"Rental Deposit Amount :> \")\r\n files.write(rdp)\r\n files.write(\" \\n\")\r\n files.write(\"Rent Amount for per month :> \")\r\n files.write(rrp)\r\n files.close()\r\n print(\"=\"*50)\r\n print(\"Saved Rental Details Successfully :) \")\r\n \r\n \r\n else :\r\n print(\"Thanks for using \")\r\n exitdata = input(\"press Y for Exit OR press N for Stay\")\r\n\r\n if exitdata==('y'|'Y'):\r\n print(\"Exiting tata bye\")\r\n exit()\r\n else :\r\n print(\"staying\")\r\n\r\n\r\n \r\n \r\n\r\n\r\n\r\ndef owner_ds ():\r\n print(\"=\"*30)\r\n ofn= input(\"Enter Owner First Name : \")\r\n omn = input(\"Enter Owner Middle Name : \")\r\n oln = input(\"Enter Owner Last Name : \")\r\n\r\n oem= input(\"Enter Owner Email Id : \")\r\n oph= input(\"Enter Owner Phone No : +91 \")\r\n\r\n ocity= input(\"Enter owner City Name : \")\r\n oad= input(\"Enter owner Address : \")\r\n opin= input(\"Enter owner pin code : \")\r\n\r\n cs= int(input(\"Enter 1 to Save Details :> \"))\r\n\r\n if cs == 1:\r\n \r\n file = open('Rental.txt','w')\r\n file.write(\"===========================OWNER Details =======================\")\r\n file.write(\"\\n\")\r\n file.write(\"Owner full Name :> \")\r\n file.write(ofn)\r\n file.write(\" \")\r\n file.write(omn)\r\n file.write(\" \" )\r\n file.write(oln)\r\n file.write(\"\\n\")\r\n file.write(\"Owner Email Id :> \")\r\n file.write(oem)\r\n file.write(\"\\t\")\r\n file.write(\"Owner Phone No :>+91\")\r\n file.write(oph)\r\n file.write(\"\\n\")\r\n file.write(\"Owner Address :> \")\r\n file.write(oad)\r\n file.write(\"\\n\")\r\n file.write(\"Owner City & PinCode :> \")\r\n file.write(ocity)\r\n file.write(\" \")\r\n file.write(opin)\r\n file.close()\r\n print(\"Saved owner Details Successfully :) \")\r\n \r\n \r\n else :\r\n print(\"Thanks for using \")\r\n exitdata = input(\"press Y for Exit OR press N for Stay\")\r\n\r\n if exitdata==('y'|'Y'):\r\n print(\"Exiting tata bye\")\r\n exit()\r\n else :\r\n print(\"staying\")\r\n\r\n \r\n\r\ndef main_body ():\r\n print(\" [1] Add Owner Details \")\r\n print(\" [2] Add Rental Details \")\r\n print(\" [3] Rental Payment Details \")\r\n print(\" [4] Sent Notificiation \")\r\n print(\" [5] Exit\")\r\n\r\n choose = int(input(\"Choose The service from above list :> \"))\r\n\r\n if choose ==1:\r\n owner_ds()\r\n \r\n elif choose ==2:\r\n rental_ds()\r\n\r\n elif choose ==3:\r\n rent_data()\r\n\r\n elif choose ==4:\r\n print(\"choose 4\")\r\n\r\n elif choose ==5:\r\n exit()\r\n else :\r\n print(\"please choose the correct service :(\")\r\n main_body()\r\n print(\"Thanks for using\")\r\n\r\nif __name__ == \"__main__\":\r\n main_body()\r\n","repo_name":"Softx-Digital-product/python","sub_path":"rental.py","file_name":"rental.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"8986754469","text":"import os\nfrom os.path import abspath, dirname, join\nfrom setuptools import setup, find_packages\nfrom torch.utils.cpp_extension import CppExtension, CUDAExtension, BuildExtension\n\nINCLUDE_DIR = join(dirname(abspath(__file__)), 'include')\nEXTRA_COMPILE_ARGS = ['-O3']\n\nEXTENSION = []\n\nCC = ['52', '53', '60', '61', '62', '70', '72', '75', '80']\n\nif os.getenv('USE_OPENMP', '1') == '1':\n EXTRA_COMPILE_ARGS.append('-fopenmp')\n\nif os.getenv('USE_CUDA', '1') == '1':\n EXTRA_COMPILE_ARGS.append('-DUSE_CUDA')\n\n GENERATE_CODES = []\n\n for cc in CC:\n GENERATE_CODES.append('--generate-code')\n GENERATE_CODES.append(f'arch=compute_{cc},code=compute_{cc}')\n\n EXTENSION.append(\n CUDAExtension(\n name='involution',\n sources=[\n 'src/involution2d_cpu.cpp',\n 'src/involution2d_cuda.cu',\n 'src/pytorch_wrapper.cpp',\n ],\n include_dirs=[\n INCLUDE_DIR\n ],\n extra_compile_args={\n 'cxx': EXTRA_COMPILE_ARGS,\n 'nvcc': ['-O3'] + GENERATE_CODES,\n }\n )\n )\nelse:\n EXTENSION.append(\n CppExtension(\n name='involution',\n sources=[\n 'src/involution2d_cpu.cpp',\n 'src/pytorch_wrapper.cpp',\n ],\n include_dirs=[\n INCLUDE_DIR\n ],\n extra_compile_args=EXTRA_COMPILE_ARGS\n )\n )\n\nsetup(\n name='involution-pytorch',\n version=\"0.1.0\",\n url=\"https://github.com/shikishima-TasakiLab/Involution-PyTorch\",\n license=\"MIT License\",\n author=\"Junya Shikishima\",\n author_email=\"160442065@ccalumni.meijo-u.ac.jp\",\n description=\"PyTorch Involution\",\n packages=find_packages(),\n ext_modules=EXTENSION,\n cmdclass={\n 'build_ext': BuildExtension,\n }\n)\n","repo_name":"shikishima-TasakiLab/Involution-PyTorch","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"88"} +{"seq_id":"74097397407","text":"products = [\n ('shoe', 9),\n ('cap', 5),\n ('jeans', 60),\n ('socks', 3),\n ('jacket', 55)\n]\n\nproducts.sort(key=lambda prod: prod[1], reverse=True)\nprint(products)\n\n\n# basic example of a function to square\ndef square(num):\n return num*num\n\n\nresult = square(5)\n\na=lambda num: num*num\n\n\nadd = lambda a,b : a+b\n\nresult = add(5,6)\nprint(result)","repo_name":"jaythomasv29/python-and-beyond","sub_path":"08-lambda-function.py","file_name":"08-lambda-function.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"14367628719","text":"def name():\n return \"Misc.\"\n\ndef parse(parser):\n misc = parser.add_argument_group(\"Misc.\")\n misc.add_argument(\"-as\", \"--auto-sprint\", action = \"store_true\",\n help = \"Player always sprints. Sprint Shoes have no effect\")\n misc.add_argument(\"-ond\", \"--original-name-display\", action = \"store_true\",\n help = \"Display original character names in party and party select menus\")\n misc.add_argument(\"-rr\", \"--random-rng\", action = \"store_true\",\n help = \"Randomize in-game RNG table. Affects Setzer's Slots, Auction House, Ebot's Rock, ...\")\n misc.add_argument(\"-rc\", \"--random-clock\", action = \"store_true\",\n help = \"Randomize clock's correct time and NPC clues in Zozo\")\n misc.add_argument(\"-scan\", \"--scan-all\", action = \"store_true\",\n help = \"All enemies scannable. All characters start with scan learned. Scan costs 0 MP. Useful for testing/debugging\")\n\n event_timers = misc.add_mutually_exclusive_group()\n event_timers.add_argument(\"-etr\", \"--event-timers-random\", action = \"store_true\",\n help = \"Collapsing House, Opera House, and Floating Continent timers randomized\")\n event_timers.add_argument(\"-etn\", \"--event-timers-none\", action = \"store_true\",\n help = \"Collapsing House, Opera House, and Floating Continent timers removed\")\n\n y_npc = misc.add_mutually_exclusive_group()\n y_npc.add_argument(\"-ymascot\", \"--y-npc-mascot\", action = \"store_true\",\n help = \"Transform NPC into random mascot\")\n y_npc.add_argument(\"-ycreature\", \"--y-npc-creature\", action = \"store_true\",\n help = \"Transform NPC into random creature\")\n y_npc.add_argument(\"-yimperial\", \"--y-npc-imperial\", action = \"store_true\",\n help = \"Transform NPC into random imperial unit\")\n y_npc.add_argument(\"-ymain\", \"--y-npc-main\", action = \"store_true\",\n help = \"Transform NPC into random main character\")\n y_npc.add_argument(\"-yreflect\", \"--y-npc-reflect\", action = \"store_true\",\n help = \"Transform NPC into current character\")\n y_npc.add_argument(\"-ystone\", \"--y-npc-stone\", action = \"store_true\",\n help = \"Turn NPC to stone\")\n y_npc.add_argument(\"-yvxz\", \"--y-npc-vanish-xzone\", action = \"store_true\",\n help = \"Cast vanish and x-zone on NPC\")\n y_npc.add_argument(\"-ysketch\", \"--y-npc-sketch\", action = \"store_true\",\n help = \"Sketch NPC\")\n y_npc.add_argument(\"-yrandom\", \"--y-npc-random\", action = \"store_true\",\n help = \"Transform NPC randomly\")\n y_npc.add_argument(\"-yremove\", \"--y-npc-remove\", action = \"store_true\",\n help = \"Remove NPC\")\n parser.y_npc_group = y_npc\n\n remove_flashes = misc.add_mutually_exclusive_group()\n remove_flashes.add_argument(\"-frw\", \"--flashes-remove-worst\", action = \"store_true\",\n help = \"Removes only the worst flashes from animations. Ex: Learning Bum Rush, Bum Rush, Quadra Slam/Slice, Flash, etc.\")\n remove_flashes.add_argument(\"-frm\", \"--flashes-remove-most\", action = \"store_true\",\n help = \"Removes most flashes from animations. Includes Kefka Death.\")\n\ndef process(args):\n args.y_npc = False # are any y_npc flags enabled?\n\n group = args.parser.y_npc_group\n for action in group._group_actions:\n if getattr(args, action.dest):\n args.y_npc = True\n break\n\ndef flags(args):\n flags = \"\"\n\n if args.auto_sprint:\n flags += \" -as\"\n if args.original_name_display:\n flags += \" -ond\"\n if args.random_rng:\n flags += \" -rr\"\n if args.random_clock:\n flags += \" -rc\"\n if args.scan_all:\n flags += \" -scan\"\n\n if args.event_timers_random:\n flags += \" -etr\"\n elif args.event_timers_none:\n flags += \" -etn\"\n\n if args.y_npc_mascot:\n flags += \" -ymascot\"\n elif args.y_npc_creature:\n flags += \" -ycreature\"\n elif args.y_npc_imperial:\n flags += \" -yimperial\"\n elif args.y_npc_main:\n flags += \" -ymain\"\n elif args.y_npc_reflect:\n flags += \" -yreflect\"\n elif args.y_npc_stone:\n flags += \" -ystone\"\n elif args.y_npc_vanish_xzone:\n flags += \" -yvxz\"\n elif args.y_npc_sketch:\n flags += \" -ysketch\"\n elif args.y_npc_random:\n flags += \" -yrandom\"\n elif args.y_npc_remove:\n flags += \" -yremove\"\n\n if args.flashes_remove_worst:\n flags += \" -frw\"\n if args.flashes_remove_most:\n flags += \" -frm\"\n\n return flags\n\ndef options(args):\n event_timers = \"Original\"\n if args.event_timers_random:\n event_timers = \"Random\"\n elif args.event_timers_none:\n event_timers = \"None\"\n\n y_npc = \"None\"\n if args.y_npc_mascot:\n y_npc = \"Mascot\"\n elif args.y_npc_creature:\n y_npc = \"Creature\"\n elif args.y_npc_imperial:\n y_npc = \"Imperial\"\n elif args.y_npc_main:\n y_npc = \"Main Character\"\n elif args.y_npc_reflect:\n y_npc = \"Reflect\"\n elif args.y_npc_stone:\n y_npc = \"Stone\"\n elif args.y_npc_vanish_xzone:\n y_npc = \"Vanish/X-Zone\"\n elif args.y_npc_sketch:\n y_npc = \"Sketch\"\n elif args.y_npc_random:\n y_npc = \"Random\"\n elif args.y_npc_remove:\n y_npc = \"Remove\"\n\n remove_flashes = \"Original\"\n if args.flashes_remove_worst:\n remove_flashes = \"Worst\"\n elif args.flashes_remove_most:\n remove_flashes = \"Most\"\n\n return [\n (\"Auto Sprint\", args.auto_sprint),\n (\"Original Name Display\", args.original_name_display),\n (\"Random RNG\", args.random_rng),\n (\"Random Clock\", args.random_clock),\n (\"Scan All\", args.scan_all),\n (\"Event Timers\", event_timers),\n (\"Y NPC\", y_npc),\n (\"Remove Flashes\", remove_flashes)\n ]\n\ndef menu(args):\n return (name(), options(args))\n\ndef log(args):\n from log import format_option\n log = [name()]\n\n entries = options(args)\n for entry in entries:\n log.append(format_option(*entry))\n\n return log\n","repo_name":"AtmaTek/WorldsCollide","sub_path":"args/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":6233,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"88"} +{"seq_id":"14919046316","text":"import matplotlib.pyplot as plt\r\nfrom scipy import stats\r\n\r\ndataFile = 'PROPRTS-1-data.txt'\r\n\r\n# Using readlines()\r\nfile1 = open(dataFile, 'r')\r\nLines = file1.readlines()\r\n\r\ny = []\r\nyy = []\r\nx = []\r\nxx = []\r\n\r\n# Strips the newline character\r\nfor line in Lines:\r\n line = line.replace('(','').replace(')','')\r\n line = line.replace(',', '')\r\n line = line.split()\r\n xx.append(line[0])\r\n xx = [float(i) for i in xx]\r\n yy.append(line[1])\r\n yy = [float(i) for i in yy]\r\nfile1.close()\r\nx.clear()\r\ny.clear()\r\nx = [float(i) for i in xx]\r\ny = [float(i) for i in yy]\r\nxx.clear()\r\nyy.clear()\r\n\r\nslope, intercept, r, p, std_err = stats.linregress(x, y)\r\n\r\ndef myfunc(x):\r\n return slope * x + intercept\r\n\r\nmymodel = list(map(myfunc, x))\r\n\r\nplt.clf()\r\nplt.scatter(x, y)\r\nplt.plot(x, mymodel)\r\nplt.title(\"PROPRTS-1-LinearRegression\")\r\nplt.savefig(\"PROPRTS-1-LR.png\")\r\n","repo_name":"0x00C0DE/PROPRTS","sub_path":"src/proprts/PROPRTS-1-LRmodel.py","file_name":"PROPRTS-1-LRmodel.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"31698897949","text":"from django import forms\nfrom .models import Notes\n\n\nclass NotesForm(forms.ModelForm):\n class Meta:\n model = Notes\n fields = [\"title\", \"text\"]\n widgets = {\n \"title\": forms.TextInput(attrs={\"class\": \"form-control mb-5\"}),\n \"text\": forms.Textarea(attrs={\"class\": \"form-control mb-5\"}),\n }\n labels = {\"text\": \"Write your thoughts here:\"}\n\n def clean_title(self):\n title = self.cleaned_data[\"title\"]\n return title\n","repo_name":"roshan2498/django-notes-app","sub_path":"notes/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"27818271503","text":"from math import sqrt, floor\n\n# list for memoization\nsaved = [2, 3]\n\ndef is_prime(num):\n # primes must be positive\n if num <= 1: return False\n\n # all numbers less than 3 and greater than 0 are prime are prime\n if num < 3: return True\n\n # if its in our saved primes list we don't need to calculate it\n if num in saved: return True\n\n # only ever need to check up to the square root of a value\n limit = floor(sqrt(num))\n\n # if our number is divisible by any number from 2 to it's root, it's not prime\n for i in range(2, limit + 1):\n if num % i == 0:\n return False\n # if it's not divisible by any of those, then it IS prime!\n saved.append(num)\n return True\n\nif __name__ == '__main__':\n mode = int(input(\"choose a mode:\\nbatch mode: 0\\nsingle mode: 1\\n\"))\n\n \"\"\" batch mode \"\"\"\n if mode == 0:\n start = int(input(\"input the start number: \"))\n end = int(input(\"input the end number: \"))\n\n for i in range(start, end + 1):\n if is_prime(i): print(f\"{i} is prime!\")\n else: print(f\"{i} is not prime\")\n\n if mode == 1:\n num = 1\n while num >= 0:\n num = int(input(\"input a number (or -1 to quit): \"))\n if is_prime(num): print(f\"{num} is prime!\")\n else: print(f\"{num} is not prime\")\n","repo_name":"kipawaa/Little-Projects","sub_path":"Python/Fun With Numbers/primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"37790844028","text":"import plotly as py\n\npy.offline.init_notebook_mode()\npyplot = py.offline.iplot\n\nimport plotly.graph_objs as go\nfrom plotly.graph_objs import Scatter\n\nfrom scipy import stats\n\nimport pandas as pd\n\nimport numpy as np\n\nimport seaborn as sns\n\nsns.set(style='darkgrid')\n\nimport matplotlib.pyplot as plt\n\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\nimport os\n\ndata = pd.read_csv('LCIS.csv')\n\n# 修改列名\ndata.rename(columns={'ListingId': '列表序号', 'recorddate': '记录日期'}, inplace=True)\n\n# 缺失率\nmiss_rate = pd.DataFrame(data.apply(lambda x: sum(x.isnull()) / len(x)))\n\n# 将缺失率保存为一列\nmiss_rate.columns = ['缺失率']\n\n# 缺失率以三位小数百分数形式表示\nmiss_rate[miss_rate['缺失率'] > 0]['缺失率'].apply(lambda x: format(x, '.3%'))\n\n# 计数‘下次计划还款利息’为缺失值(已还清)的用户的‘标当前状态’\ndata[data['下次计划还款利息'].isnull()]['标当前状态'].value_counts()\n\n# 显示'上次还款利息'为缺失值的用户信息的后九列\ndata[data['上次还款利息'].isnull()].iloc[:, -9:-1]\n\n# 查看历史成功借款金额缺失的用户情况\ndata[data['历史成功借款金额'].isnull()]\n\n# 查看记录日期缺失的用户情况\ndata[data['记录日期'].isnull()][['手机认证', '户口认证']]\n\n# 删除记录日期缺失的用户数据\ndata.dropna(subset=['记录日期'], how='any', inplace=True)\n\n# 去重画图\ndata[data.duplicated()]\ndata['手机认证'].value_counts().plot(kind='bar')\n\n# 取出’手机认证’一列中的'成功认证'和'未成功认证',其他删除\ndata = data[(data['手机认证'] == '成功认证') | (data['手机认证'] == '未成功认证')]\ndata = data[(data['户口认证'] == '成功认证') | (data['户口认证'] == '未成功认证')]\ndata = data[(data['视频认证'] == '成功认证') | (data['视频认证'] == '未成功认证')]\ndata = data[(data['学历认证'] == '成功认证') | (data['学历认证'] == '未成功认证')]\ndata = data[(data['征信认证'] == '成功认证') | (data['征信认证'] == '未成功认证')]\ndata = data[(data['淘宝认证'] == '成功认证') | (data['淘宝认证'] == '未成功认证')]\n\n# 不同性别的放贷比例与逾期关系\ndf_gender = pd.pivot_table(data=data, columns='标当前状态', index='性别', values='列表序号', aggfunc=np.size)\n\n# 借款笔数占比\ndf_gender['借款笔数占比'] = df_gender.apply(np.sum, axis=1) / df_gender.sum().sum()\n\n# 逾期笔数占比\ndf_gender['逾期笔数占比'] = df_gender['逾期中'] / df_gender.sum(axis=1)\n\n# 画图\nplt.figure(figsize=(16, 9))\n\nplt.subplot(121)\nplt.bar(x=df_gender.index, height=df_gender['借款笔数占比'], color=['c', 'g'])\nplt.title('男女借款比例')\n\nplt.subplot(122)\nplt.bar(x=df_gender.index, height=df_gender['逾期笔数占比'], color=['c', 'g'])\nplt.title('男女逾期情况')\n\nplt.suptitle('不同性别的客户画像')\nplt.show()\n\n# 借款累计金额占比\ndf_age = data.groupby(['年龄'])['借款金额'].sum()\ndf_age = pd.DataFrame(df_age)\ndf_age['借款金额累计'] = df_age['借款金额'].cumsum()\ndf_age['借款累计金额占比'] = df_age['借款金额累计'] / df_age['借款金额'].sum()\ndf_age\n\n# 80%的贷款借给了36岁以下的用户\nindex_num = df_age[df_age['借款累计金额占比'] > 0.8].index[0]\n\n# 画图\ncum_percent = df_age.loc[index_num, '借款累计金额占比']\nplt.figure(figsize=(16, 9))\nplt.bar(x=df_age.index, height=df_age['借款金额'], color='steelblue', alpha=0.5, width=0.7)\nplt.xlabel('年龄', fontsize=20)\nplt.axvline(x=index_num, color='orange', linestyle='--', alpha=0.8)\ndf_age['借款累计金额占比'].plot(style='--ob', secondary_y=True)\nplt.text(index_num + 0.4, cum_percent, '累计占比为:%.3f%%' % (cum_percent * 100), color='indianred')\nplt.show()\n\n# 年龄与借款的情况\n# 按照年龄分层\ndata['age_bin'] = pd.cut(data['年龄'], [17, 24, 30, 36, 42, 48, 54, 65], right=True)\n# 查看每个年龄段的情况\ndf_age = pd.pivot_table(data=data, columns='标当前状态', index='age_bin', values='列表序号', aggfunc=np.size)\n# 总的借款笔数\ndf_age['借款笔数'] = df_age.sum(axis=1)\n# 借款笔数分布\ndf_age['借款笔数分布'] = df_age['借款笔数'] / df_age['借款笔数'].sum()\n# 逾期占比\ndf_age['逾期占比'] = df_age['逾期中'] / df_age['借款笔数']\n# 变为百分数形式\ndf_age['借款笔数分布%'] = df_age['借款笔数分布'].apply(lambda x: format(x, '.3%'))\ndf_age['逾期占比%'] = df_age['逾期占比'].apply(lambda x: format(x, '.3%'))\n\n# 画图\nplt.figure(figsize=(16, 9))\ndf_age['借款笔数分布'].plot(kind='bar', rot=45, color='steelblue', alpha=0.5)\nplt.xlabel('年龄分段情况')\nplt.ylabel('借款笔数分布')\ndf_age['逾期占比'].plot(rot=45, color='steelblue', alpha=0.5, secondary_y=True)\nplt.ylabel('逾期占比情况')\nplt.grid(True)\nplt.show()\n\n# 学历与借款的情况\ndf_edu = pd.pivot_table(data=data, columns='标当前状态', index='学历认证', values='列表序号', aggfunc=np.size)\ndf_edu['借款笔数'] = df_edu.sum(axis=1)\ndf_edu['借款笔数占比'] = df_edu['借款笔数'] / df_edu['借款笔数'].sum()\ndf_edu['逾期占比'] = df_edu['逾期中'] / df_edu['借款笔数']\n\n# 画图\nplt.figure(figsize=(16, 9))\nplt.subplot(121)\nplt.pie(x=df_edu['借款笔数占比'], labels=['成功认证', '未成功认证'], colors=['orange', 'blue'], autopct='%.1f%%', pctdistance=0.5,\n labeldistance=1.1)\nplt.title('学历认证比例')\nplt.subplot(122)\nplt.bar(x=df_edu.index, height=df_edu['逾期占比'], color=['orange', 'blue'], alpha=0.5)\nplt.title('不同学历人群逾期情况')\nplt.suptitle('不同学历人群客户画像')\nplt.show()\n\n# plotly.graph_objs交互式画图\n# 画条形图\ntrace_basic = [go.Bar(x=df_edu.index, y=df_edu['逾期占比'], marker=dict(color='orange'), opacity=0.50)]\nlayout = go.Layout(title='不同学历人群逾期情况', xaxis=dict(title='不同学历人群客户画像'))\nfigure_basic = go.Figure(data=trace_basic, layout=layout, )\npyplot(figure_basic)\n\n# 画饼图\ntrace_basic1 = [\n go.Pie(labels=['成功认证', '未成功认证'], values=df_edu['借款笔数占比'], hole=0.2, textfont=dict(size=12, color='white'))]\nlayout1 = go.Layout(title='学历认证比例')\nfigure_basic1 = go.Figure(data=trace_basic1, layout=layout1, )\npyplot(figure_basic1)\n\n\n# 设计函数对多个对象进行处理\ndef trans(data, col, ind):\n df = pd.pivot_table(data=data, columns=col, index=ind, values='列表序号', aggfunc=np.size)\n df['借款笔数'] = df.sum(axis=1)\n df['借款笔数占比'] = df['借款笔数'] / df_edu['借款笔数'].sum()\n df['逾期占比'] = df['逾期中'] / df_edu['借款笔数']\n\n plt.figure(figsize=(16, 12))\n plt.subplot(121)\n plt.pie(x=df['借款笔数占比'], labels=['成功认证', '未成功认证'], colors=['orange', 'blue'], autopct='%.1f%%', pctdistance=0.5,\n labeldistance=1.1)\n plt.title('%s占比' % ind)\n plt.subplot(122)\n plt.bar(x=df.index, height=df['逾期占比'], color=['orange', 'blue'], alpha=0.5)\n plt.title('不同%s人群逾期情况' % ind)\n plt.suptitle('不同%s人群客户画像' % ind)\n plt.show()\n return df\n\n\ntrans(data, '标当前状态', '淘宝认证')\ntrans(data, '标当前状态', '征信认证')\ntrans(data, '标当前状态', '视频认证')\n","repo_name":"lyoovue/rfm","sub_path":"jinrong.py","file_name":"jinrong.py","file_ext":"py","file_size_in_byte":7424,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"43510967709","text":"from socket import *\n\nserverName = \"192.168.0.199\"\nserverPort = 12000\nclientSocket = socket(AF_INET, SOCK_STREAM)\nclientSocket.connect((serverName, serverPort))\nwhile True:\n sentence = input('Input sentence: ')\n clientSocket.send(sentence.encode())\n modifiedSentence = clientSocket.recv(1024)\n if sentence == \"bye\":\n break\n\nprint('From server: ', modifiedSentence.decode())\nclientSocket.close()\n\n\n","repo_name":"slitherman/PythonEchoServerAndClient","sub_path":"venv/EchoClient.py","file_name":"EchoClient.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"24432091603","text":"from PIL import Image\nimport numpy as np\nfrom skimage import io\nfrom skimage.color import rgb2lab, deltaE_cie76, rgb2grey, label2rgb\nfrom skimage.measure import label, regionprops\nimport keyboard\nimport mouse\nfrom PIL import ImageGrab\nimport time\nimport pyautogui\n\n# Helper function\ndef load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)\n\n''' Variables '''\n\n# mid = 960, 540\n# 400 x 400\nog_box = (760,340,1160,740)\n\n# 200 x 200\n#bbox = (860,440,1060,640)\n\nbbox = og_box\n\nbb_center = [(bbox[0] + bbox[2]) / 2,(bbox[1] + bbox[3]) / 2]\n\ncasting = 5\n\nzoom_amount = 5\n\nzoom_times = 0\n\nprint_output = True\n\ndef run_fishing_detection (casting, bbox, zoom_times):\n\n casting -= 1\n \n # Wait\n if casting > 0:\n print(\"Casting: \",casting)\n time.sleep(0.1)\n return casting, bbox, zoom_times\n\n #print(bbox)\n #Cap screen\n image_np = ImageGrab.grab(bbox)\n\n # Array of rgb colors\n rgb = load_image_into_numpy_array(image_np)\n \n img = rgb\n\n lab = rgb2lab(rgb)\n\n # color & threshold\n bobber_red = [200,25,25]\n\n threshold_fishing = 40\n\n #replace_colour = [[[0,0,0]]]\n replace_colour = [[[255,255,255]]]\n\n # bobber thresholding\n\n bobber_3d = np.uint8(np.asarray([[bobber_red]]))\n\n dE_bobber = deltaE_cie76(rgb2lab(bobber_3d), lab)\n\n rgb[dE_bobber > threshold_fishing] = replace_colour\n\n # grey version\n grey = rgb2grey(rgb)\n\n # only black parts\n thres_img = np.empty_like(grey)\n thres_img[grey == 1] = 0\n thres_img[grey < 1] = 1\n\n # label parts\n label_image = label(thres_img,connectivity = 2,background = 0)\n \n io.imsave(\"output_img.jpg\",img)\n\n image_label_overlay = label2rgb(label_image, image=img)\n \n \n # Region\n big_region = {\"area\" : 0, \"bbox\" : (0,0,0,0), \"center\" : (0,0)}\n \n # Region/Label analysis\n for region in regionprops(label_image):\n if region.area <= 2000: #and region.area < 600 and region.extent >= 0.5:\n \n # region bounding box\n minr, minc, maxr, maxc = region.bbox\n bbox_ = [minc,minr,maxc,maxr]\n \n # center\n center = region.centroid\n \n # get biggest area region\n if region.area > big_region[\"area\"]:\n big_region[\"area\"] = region.area\n big_region[\"bbox\"] = bbox_\n big_region[\"center\"] = center\n \n \n # Control\n \n # Found bobber\n if big_region[\"area\"] > 0:\n print(\"Found bobber\")\n center = big_region[\"center\"]\n #time.sleep(0.1)\n #xypos = print((center[1],center[0]))\n \n # Move mouse to position (remember bbox coords are relatice to the entire sceen)\n mouse.move(center[1] + bbox[0],center[0] + bbox[1],absolute=True,duration=0)\n time.sleep(0.5)\n \n # Resize bbox to shrink around the bobber\n # real x coords: bbox[0] + big_region[\"bbox\"][0] = real coord of that coord\n # Only resize/zoom a certain number of times\n if zoom_times < zoom_amount:\n zoom_times += 1\n bbox = (bbox[0] + (big_region[\"bbox\"][0] / 2),bbox[1] + (big_region[\"bbox\"][1] / 2),((bbox[0] + big_region[\"bbox\"][2]) + bbox[2]) / 2, ((bbox[1] + big_region[\"bbox\"][3]) + bbox[3]) / 2 )\n print(\"Zoomed in: \", zoom_times,\"/\",zoom_amount)\n # Save image\n if zoom_times == 3 and print_output:\n print(\"Printing\")\n io.imsave(\"output.jpg\",image_label_overlay)\n \n \n \n \n # If no bobber, recast\n else:\n print(\"-- Recasting --\")\n \n # Pull rell in, Catch fish\n print(\"- Pulls in -\")\n mouse.click(button='right')\n casting = 5\n time.sleep(2)\n \n # Wait and recast\n print(\"- Throws -\")\n mouse.click(button='right')\n time.sleep(3)\n \n # Resize bbox to original size\n bbox = og_box\n \n # Reset zoom number\n zoom_times = 0\n\n return casting, bbox, zoom_times#, bb_center\n \n # Save image\n #io.imsave(\"output.jpg\",image_label_overlay)\n\n\ntime.sleep(0.1)\nmouse.click(button='left')\ntime.sleep(1)\n\n# First throw\nmouse.click(button='right')\ntime.sleep(4)\n\nfor i in range(10000):\n casting, bbox, zoom_times = run_fishing_detection(casting, bbox, zoom_times) #, bb_center\n\n\n\n","repo_name":"Fangrelle/MC_Auto_Fisher","sub_path":"Minecraft_Fisher.py","file_name":"Minecraft_Fisher.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"74470542694","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n\nfrom pymongo import MongoClient\nclient = MongoClient('mongodb://192.168.0.17:27017')\ndb = client['epsi_iot']\nlum = db.lum\ntemp = db.temp\n\npost_data = {\n 'ds': '02/04/2000 00h00',\n 'y': 5200\n}\n\nresult = temp.insert_one(post_data)\n\npost_data = {\n 'ds': '02/04/2000 00h00',\n 'y': 1200\n}\nresult = lum.insert_one(post_data)\n#print('One post: {0}'.format(result.inserted_id))\n#new_result = posts.insert_many([post_1, post_2, post_3])\n\"\"\"\nbills_post = posts.find_one({'author': 'Scott'})\nprint(bills_post)\n\nscotts_posts = posts.find({'author': 'Scott'})\nprint(scotts_posts)\nfor post in scotts_posts:\n print(post)\n\"\"\"","repo_name":"cbarange/IOT_EPSI_B3","sub_path":"MongoDB_Connector.py","file_name":"MongoDB_Connector.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"70367239333","text":"from bleurt.lib import bert_tokenization\nimport tensorflow.compat.v1 as tf\nimport sentencepiece as spm\n\nflags = tf.flags\nlogging = tf.logging\nFLAGS = flags.FLAGS\n\n\nclass Tokenizer(object):\n \"\"\"Base class for WordPiece and TokenPiece tokenizers.\"\"\"\n\n def tokenize(self):\n raise NotImplementedError()\n\n def tokens_to_id(self):\n raise NotImplementedError()\n\n\nclass WordPieceTokenizer(Tokenizer):\n \"\"\"Wrapper around BERT's FullTokenizer.\"\"\"\n\n def __init__(self, vocab_file, do_lower_case):\n logging.info(\"Creating WordPiece tokenizer.\")\n self.vocab_file = vocab_file\n self.do_lower_case = do_lower_case\n self._tokenizer = bert_tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)\n logging.info(\"WordPiece tokenizer instantiated.\")\n\n def tokenize(self, text):\n return self._tokenizer.tokenize(text)\n\n def convert_tokens_to_ids(self, tokens):\n return self._tokenizer.convert_tokens_to_ids(tokens)\n\n\nclass SentencePieceTokenizer(Tokenizer):\n \"\"\"Wrapper around SentencePiece tokenizer.\"\"\"\n\n def __init__(self, sp_model):\n logging.info(\"Creating SentencePiece tokenizer.\")\n self._sp_model_path = sp_model + \".model\"\n logging.info(\"Will load model: {}.\".format(self._sp_model_path))\n self._sp_model = spm.SentencePieceProcessor()\n self._sp_model.Load(self._sp_model_path)\n self.vocab_size = self._sp_model.GetPieceSize()\n logging.info(\"SentencePiece tokenizer created.\")\n\n def tokenize(self, text):\n return self._sp_model.EncodeAsPieces(text)\n\n def convert_tokens_to_ids(self, tokens):\n return [self._sp_model.PieceToId(token) for token in tokens]\n\n\ndef create_tokenizer(vocab_file=None, do_lower_case=None, sp_model=None):\n \"\"\"Factory function for tokenizers.\"\"\"\n if vocab_file and do_lower_case is not None:\n return WordPieceTokenizer(vocab_file, do_lower_case)\n\n elif sp_model:\n logging.info(\"Creating SentencePiece tokenizer.\")\n return SentencePieceTokenizer(sp_model)\n\n else:\n raise ValueError(\"Cannot determine the type of Tokenizer to build from \"\n \"arguments.\")\n","repo_name":"google-research/bleurt","sub_path":"bleurt/lib/tokenizers.py","file_name":"tokenizers.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":584,"dataset":"github-code","pt":"9"} +{"seq_id":"36019812586","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nkeras_resnet.models\n~~~~~~~~~~~~~~~~~~~\n\nThis module implements popular residual models.\n\"\"\"\n\nfrom xtreme_vision.Detection.keras_resnet.models._1d import (\n ResNet1D,\n ResNet1D18,\n ResNet1D34,\n ResNet1D50,\n ResNet1D101,\n ResNet1D152,\n ResNet1D200\n)\n\nfrom xtreme_vision.Detection.keras_resnet.models._2d import (\n ResNet2D,\n ResNet2D18,\n ResNet2D34,\n ResNet2D50,\n ResNet2D101,\n ResNet2D152,\n ResNet2D200\n)\n\nfrom xtreme_vision.Detection.keras_resnet.models._3d import (\n ResNet3D,\n ResNet3D18,\n ResNet3D34,\n ResNet3D50,\n ResNet3D101,\n ResNet3D152,\n ResNet3D200\n)\n\nfrom xtreme_vision.Detection.keras_resnet.models._feature_pyramid_2d import (\n FPN2D,\n FPN2D18,\n FPN2D34,\n FPN2D50,\n FPN2D101,\n FPN2D152,\n FPN2D200\n)\n\nfrom xtreme_vision.Detection.keras_resnet.models._time_distributed_2d import (\n TimeDistributedResNet,\n TimeDistributedResNet18,\n TimeDistributedResNet34,\n TimeDistributedResNet50,\n TimeDistributedResNet101,\n TimeDistributedResNet152,\n TimeDistributedResNet200\n)\n\n# for backwards compatibility reasons\nResNet = ResNet2D\nResNet18 = ResNet2D18\nResNet34 = ResNet2D34\nResNet50 = ResNet2D50\nResNet101 = ResNet2D101\nResNet152 = ResNet2D152\nResNet200 = ResNet2D200\n","repo_name":"Adeel-Intizar/Xtreme-Vision","sub_path":"xtreme_vision/Detection/keras_resnet/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"9"} +{"seq_id":"32324906601","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Sep 28 23:26:55 2018\r\n\r\n@author: JiaSi\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport lunardate\r\n\r\n#60 tide\r\ntide60_name = [\r\n 'Sa',\r\n 'Ssa',\r\n 'Mm',\r\n 'Msf',\r\n 'Mf',\r\n '2Q1',\r\n 'σ1',\r\n 'Q1',\r\n 'ρ1',\r\n 'O1',\r\n 'MP1',\r\n 'M1',\r\n 'χ1',\r\n 'π1',\r\n 'P1',\r\n 'S1',\r\n 'K1',\r\n 'ψ1',\r\n 'φ1',\r\n 'θ1',\r\n 'J1',\r\n 'SO1',\r\n 'OO1',\r\n 'OQ2',\r\n 'MNS2',\r\n '2N2',\r\n 'μ2',\r\n 'N2',\r\n 'ν2',\r\n 'OP2',\r\n 'M2',\r\n 'MKS2',\r\n 'λ2',\r\n 'L2',\r\n 'T2',\r\n 'S2',\r\n 'R2',\r\n 'K2',\r\n 'MSN2',\r\n 'KJ2',\r\n '2SM2',\r\n 'MO3',\r\n 'M3',\r\n 'SO3',\r\n 'MK3',\r\n 'SK3',\r\n 'MN4',\r\n 'M4',\r\n 'SN4',\r\n 'MS4',\r\n 'MK4',\r\n 'S4',\r\n 'SK4',\r\n '2MN6',\r\n 'M6',\r\n 'MSN6',\r\n '2MS6',\r\n '2MK6',\r\n '2SM6',\r\n 'MSK6', \r\n ]\r\ntide60 = pd.DataFrame([0.0410686,\r\n 0.0821373,\r\n 0.5443747,\r\n 1.0158958,\r\n 1.0980331,\r\n 12.8542862,\r\n 12.9271398,\r\n 13.3986609,\r\n 13.4715145,\r\n 13.9430356,\r\n 14.0251729,\r\n 14.4920521,\r\n 14.5695476,\r\n 14.9178647,\r\n 14.9589314,\r\n 15.0000000,\r\n 15.0410686,\r\n 15.0821353,\r\n 15.1232059,\r\n 15.5125897,\r\n 15.5854433,\r\n 16.0569644,\r\n 16.1391017,\r\n 27.3416964,\r\n 27.4238337,\r\n 27.8953548,\r\n 27.9682084,\r\n 28.4397295,\r\n 28.5125831,\r\n 28.9019669,\r\n 28.9841042,\r\n 29.0662415,\r\n 29.4556253,\r\n 29.5284789,\r\n 29.9589333,\r\n 30.0000000,\r\n 30.0410667,\r\n 30.0821373,\r\n 30.5443747,\r\n 30.6265120,\r\n 31.0158958,\r\n 42.9271398,\r\n 43.4761563,\r\n 43.9430356,\r\n 44.0251729,\r\n 45.0410686,\r\n 57.4238337,\r\n 57.9682084,\r\n 58.4397295,\r\n 58.9841042,\r\n 59.0662415,\r\n 60.0000000,\r\n 60.0821373,\r\n 86.4079380,\r\n 86.9523127,\r\n 87.4238337,\r\n 87.9682084,\r\n 88.0503457,\r\n 88.9841042,\r\n 89.0662415])\r\ntide60.index = tide60_name\r\nw = tide60*np.pi/180/3600\r\n#---read raw data---\r\n#rwavedata(Raw File Path,Row of Data Start)\r\n#Raw Data Format:\r\n#*st yyyymmddhh height(mm)\r\n#格式為中央氣象局資料格式\r\ndef rwavedata_cwb(wave_path,start_l):\r\n raw_wave_f = pd.read_fwf(wave_path, widths = [6,11,7,1], header=None) #切割行\r\n wave_raw = raw_wave_f.loc[start_l-1:,1:2]\r\n wave_raw.index = range(len(wave_raw))\r\n return wave_raw\r\n\r\n#---date and wave_height to num---\r\n#Date,Wave Height = data_wave(Data from rwavedata)\r\ndef date_wave(wave_raw):\r\n date_obs = pd.to_datetime(wave_raw.loc[:,1],format='%Y%m%d%H')\r\n date_obs_int = date_obs.values.astype(np.int64) // 10**9\r\n wave_height = wave_raw.loc[:,2].astype('float64')\r\n return date_obs,date_obs_int,wave_height\r\n\r\n#----harmonic analyze----\r\n#HA parameter,Amplitude,phase,angular = HA_tide(Date,Wave Height)\r\ndef HA_tide(wave_date,wave_height,w):\r\n #leastsquare\r\n t = pd.DataFrame(wave_date) #Observation time\r\n h = wave_height #Observation wave height\r\n nan_ind = np.isnan(wave_height) #nan index\r\n t = t[~nan_ind]\r\n h = h[~nan_ind]\r\n A=np.ones((len(t),len(w)*2+1))\r\n for i in range(1,len(w)+1):\r\n A[:,2*i-1] = np.cos(w.iloc[i-1]*t).T\r\n A[:,2*i] = np.sin(w.iloc[i-1]*t).T\r\n para = np.linalg.lstsq(A,h)[0]\r\n #--amplitude and angular frequency--\r\n amp = np.ones((1,len(w)))\r\n pha_ang = amp\r\n for ii in range(1,len(w)+1):\r\n amp[:,ii-1] = np.sqrt(para[2*ii-1]**2 + para[2*ii]**2)/1000\r\n pha_ang[:,ii-1] = np.arctan(para[2*ii]/para[2*ii-1])\r\n if para[2*ii]<0:\r\n pha_ang[:,ii-1]=pha_ang[:,ii-1]+np.pi\r\n if pha_ang[:,ii-1]<0:\r\n pha_ang[:,ii-1]=pha_ang[:,ii-1]+2*np.pi\r\n pha_ang = pha_ang*180/np.pi\r\n return para,amp,pha_ang\r\n\r\n#---calculate HA wave height---\r\n# Caculate Wave = HA_wave(start time,end time,parameter)\r\n#Import time pormat : yyyymmddhh\r\ndef HA_wave(start,end,para,w,mat='%Y%m%d%H'):\r\n #Make a integral time series\r\n start = pd.to_datetime(start,format=mat)\r\n end = pd.to_datetime(end,format=mat)\r\n date_n = pd.date_range(start,end,freq='H')\r\n date_n_int = date_n.values.astype(np.int64) //10**9\r\n t_n = pd.DataFrame(date_n_int) \r\n A_n=np.ones((len(t_n),len(w)*2+1))\r\n for i2 in range(1,len(w)+1):\r\n A_n[:,2*i2-1] = np.cos(w.iloc[i2-1]*t_n).T\r\n A_n[:,2*i2] = np.sin(w.iloc[i2-1]*t_n).T\r\n h_ha = np.dot(A_n,para)\r\n return h_ha,date_n\r\n\r\n#---peak---\r\n#toppeak,buttonpeak = peak(wave height)\r\ndef peak(wave_height):\r\n delt_w1=0\r\n peak_ind = []\r\n peak_t_ind = []\r\n peak_b_ind = []\r\n #---anypeak---\r\n for j in range(len(wave_height)-1): \r\n delt_w = (wave_height.loc[j+1]-wave_height.loc[j])/2\r\n if (delt_w*delt_w1) <=0:\r\n peak_ind+=[j]\r\n if delt_w1>delt_w:\r\n peak_t_ind +=[j] #---toppeak---\r\n else:\r\n peak_b_ind +=[j] #---buttonpeak---\r\n delt_w1 = delt_w\r\n return peak_t_ind,peak_b_ind\r\n\r\n#---statistic of wave---\r\n#{MWL,MHWL,MLWL,HWL,LWL} = wave_statistic(wave height)\r\ndef wave_statistic(date_obs,wave_height):\r\n peak_t_ind,peak_b_ind = peak(wave_height)\r\n #lunar HA\r\n syzygy_date = [1,2,14,15,16,29,30]\r\n syzygy_ind =[]\r\n for i in range(0,len(date_obs)):\r\n year= date_obs[i].year\r\n month= date_obs[i].month\r\n day = date_obs[i].day\r\n lunar_date_temp = lunardate.LunarDate.fromSolarDate(year,month,day)\r\n if lunar_date_temp.day in syzygy_date:\r\n syzygy_ind += [i]\r\n peak_t_ind,peak_b_ind = peak(wave_height)\r\n set1 = set(peak_t_ind)\r\n set2 = set(syzygy_ind)\r\n set3 = set(peak_b_ind)\r\n hwost_ind = set1 & set2\r\n lwost_ind = set2 & set3\r\n #---WL---\r\n MWL = np.mean(wave_height)\r\n MHWL = np.mean(wave_height[peak_t_ind])\r\n MLWL = np.mean(wave_height[peak_b_ind])\r\n HWL = np.mean(wave_height[hwost_ind])\r\n LWL = np.mean(wave_height[lwost_ind])\r\n sta_para = {'MWL':MWL,\r\n 'MHWL':MHWL,\r\n 'MLWL':MLWL,\r\n 'HWL':HWL,\r\n 'LWL':LWL}\r\n return sta_para\r\n","repo_name":"adda1963/My_Python","sub_path":"HA_tide.py","file_name":"HA_tide.py","file_ext":"py","file_size_in_byte":6687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"74838891494","text":"from django.shortcuts import render\nfrom .models import Manipulando_arquivos_class\nfrom csv import reader\nimport os\n\n\ndef menu(request):\n conteudo = {\n 'x' : 'x'\n }\n\n return render(request, 'menu.html', context=conteudo)\n\ndef gerar_lista(request):\n if request.POST:\n lista_arquivos = Manipulando_arquivos_class.gerar_lista_todos_arquivos(str(request.POST['origem_lista']),request.POST['descricao_drive'])\n Manipulando_arquivos_class.cria_arquivo_lista_todos_arquivos(lista_arquivos,request.POST['descricao_drive'])\n \n return render(request, 'gerar_lista.html')\n\ndef comparar_lista(request):\n resultado_csv = ''\n \n if request.POST:\n descricao_hd_01 = str(request.FILES['arquivo_json_01']).split('.')[0]\n descricao_hd_02 = str(request.FILES['arquivo_json_02']).split('.')[0]\n\n resultado = Manipulando_arquivos_class.comparar_listas(request.FILES['arquivo_json_01'],request.FILES['arquivo_json_02'])\n resultado_csv = Manipulando_arquivos_class.criar_arquivo_resultado(resultado,descricao_hd_01,descricao_hd_02)\n\n conteudo = {\n 'resultado' : resultado_csv,\n } \n\n return render(request, 'comparar_lista.html', context=conteudo)\n\ndef mostrar_resultado(request):\n lista_resultado = ''\n \n if request.POST:\n arquivo_resultado = str(request.FILES['arquivo_json_resultado'])\n\n with open((os.getcwd() + '/arquivos/' + arquivo_resultado), 'r') as arquivo:\n arquivo_csv = reader(arquivo)\n lista_resultado = list(arquivo_csv)\n arquivo.close()\n \n #resultado_csv = reader(request.FILES['arquivo_json_resultado'])\n\n #print(resultado_csv)\n\n conteudo = {\n 'resultado' : lista_resultado,\n } \n\n return render(request, 'mostrar_resultado.html', context=conteudo)\n","repo_name":"tiagopena/ORGANIZA_FOTOS_prj","sub_path":"criar_lista_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"38281755660","text":"from nipype.pipeline import engine as pe\nfrom nipype.interfaces import utility as niu, afni\n\nfrom .hmc import init_bold_hmc_wf\nfrom .bold_ref import init_bold_reference_wf\nfrom .resampling import init_bold_preproc_trans_wf\nfrom .stc import init_bold_stc_wf\nfrom .inho_correction import init_inho_correction_wf\nfrom .registration import init_cross_modal_reg_wf\nfrom .confounds import init_bold_confs_wf\nfrom nipype.interfaces.utility import Function\n\n\ndef init_bold_main_wf(opts, output_folder, bold_scan_list, inho_cor_only=False, name='bold_main_wf'):\n \"\"\"\n This workflow controls the functional preprocessing stages of the pipeline when both\n functional and anatomical images are provided.\n\n **Parameters**\n\n opts\n parser options for preprocess\n inho_cor_only\n whether to run the bias correction steps, or further processing steps.\n\n **Inputs**\n\n bold\n Input BOLD series NIfTI file\n coreg_anat\n Anatomical reference for BOLD alignment\n coreg_mask\n Brain mask for anatomical reference\n WM_mask\n WM mask inherited from the common space registration\n CSF_mask\n CSF mask inherited from the common space registration\n vascular_mask\n vascular mask inherited from the common space registration\n labels\n Anatomical labels inherited from the common space registration\n unbiased_to_atlas_affine\n affine transform from the dataset template space to the commonspace space\n unbiased_to_atlas_warp\n non-linear transform from the dataset template space to the commonspace space\n native_to_unbiased_affine\n affine transform from the subject anatomical to the dataset template space\n native_to_unbiased_warp\n non-linear transform from the subject anatomical to the dataset template space\n commonspace_ref\n commonspace anatomical template\n\n **Outputs**\n\n input_bold\n The provided input BOLD file\n bold_ref\n Initial EPI median volume subsequently used as 3D reference EPI volume\n motcorr_params\n motion parameters file provided from antsMotionCorr\n init_denoise\n Corrected 3D ref EPI after initial correction step\n denoise_mask\n resampled mask used for final denoising\n corrected_EPI\n 3D reference EPI volume after bias field correction\n output_warped_bold\n Bias field corrected 3D EPI volume warped to the anatomical space\n bold_to_anat_affine\n affine transform from the EPI space to the anatomical space\n bold_to_anat_warp\n non-linear transform from the EPI space to the anatomical space\n bold_to_anat_inverse_warp\n inverse non-linear transform from the EPI space to the anatomical space\n resampled_bold\n Original BOLD timeseries resampled through motion realignment and\n susceptibility distortion correction based on registration to the\n anatomical image\n resampled_ref_bold\n 3D median EPI volume from the resampled native BOLD timeseries\n confounds_csv\n .csv file with measured confound timecourses, including global signal,\n WM signal, CSF signal, 6 rigid body motion parameters + their first\n temporal derivate + the 12 parameters squared (24 motion parameters),\n and aCompCorr timecourses\n FD_voxelwise\n Voxelwise framewise displacement (FD) measures that can be integrated\n to future confound regression.\n These measures are computed from antsMotionCorrStats.\n pos_voxelwise\n Voxel distancing across time based on rigid body movement parameters,\n which can be integrated for a voxelwise motion regression\n These measures are computed from antsMotionCorrStats.\n FD_csv\n .csv file with global framewise displacement (FD) measures\n EPI_brain_mask\n EPI brain mask for resampled bold\n EPI_WM_mask\n EPI WM mask for resampled bold\n EPI_CSF_mask\n EPI CSF mask for resampled bold\n EPI_labels\n EPI anatomical labels for resampled bold\n commonspace_bold\n Motion and SDC-corrected EPI timeseries resampled into common space\n by applying transforms from the anatomical common space registration\n commonspace_mask\n EPI brain mask for commonspace bold\n commonspace_WM_mask\n EPI WM mask for commonspace bold\n commonspace_CSF_mask\n EPI CSF mask for commonspace bold\n commonspace_vascular_mask\n EPI vascular mask for commonspace bold\n commonspace_labels\n EPI anatomical labels for commonspace bold\n \"\"\"\n\n workflow = pe.Workflow(name=name)\n\n inputnode = pe.Node(niu.IdentityInterface(\n fields=['bold', 'inho_cor_anat', 'inho_cor_mask', 'coreg_anat', 'coreg_mask',\n 'native_to_commonspace_transform_list','native_to_commonspace_inverse_list',\n 'commonspace_to_native_transform_list','commonspace_to_native_inverse_list',\n 'commonspace_ref']),\n name=\"inputnode\")\n\n outputnode = pe.Node(niu.IdentityInterface(\n fields=['input_bold', 'bold_ref', 'motcorr_params', 'init_denoise', 'denoise_mask', 'corrected_EPI',\n 'output_warped_bold', 'bold_to_anat_affine', 'bold_to_anat_warp', 'bold_to_anat_inverse_warp',\n 'native_bold', 'native_bold_ref', 'native_brain_mask', 'native_WM_mask', 'native_CSF_mask', 'native_labels',\n 'confounds_csv', 'FD_voxelwise', 'pos_voxelwise', 'FD_csv', 'commonspace_bold', 'commonspace_mask',\n 'commonspace_WM_mask', 'commonspace_CSF_mask', 'commonspace_vascular_mask', 'commonspace_labels',\n 'raw_brain_mask']),\n name='outputnode')\n\n boldbuffer = pe.Node(niu.IdentityInterface(fields=['bold_file']),\n name=\"boldbuffer\")\n\n # this node will serve as a relay of outputs from the inho_cor main_wf to the inputs for the rest of the main_wf for bold_only\n transitionnode = pe.Node(niu.IdentityInterface(fields=['bold_file', 'bold_ref', 'init_denoise', 'denoise_mask', 'corrected_EPI']),\n name=\"transitionnode\")\n\n if inho_cor_only or (not opts.bold_only):\n template_inputnode = pe.Node(niu.IdentityInterface(fields=['template_anat', 'template_mask']),\n name=\"template_inputnode\")\n\n\n bold_reference_wf = init_bold_reference_wf(opts=opts)\n\n num_scan = len(bold_scan_list)\n num_procs = min(opts.local_threads, num_scan)\n inho_cor_wf = init_inho_correction_wf(opts=opts, image_type='EPI', output_folder=output_folder, num_procs=num_procs, name=\"bold_inho_cor_wf\")\n\n if opts.apply_despiking:\n despike = pe.Node(\n afni.Despike(outputtype='NIFTI_GZ'),\n name='despike')\n workflow.connect([\n (inputnode, despike, [('bold', 'in_file')]),\n (despike, boldbuffer, [('out_file', 'bold_file')]),\n ])\n else:\n workflow.connect([\n (inputnode, boldbuffer, [('bold', 'bold_file')]),\n ])\n\n if opts.detect_dummy:\n workflow.connect([\n (bold_reference_wf, transitionnode, [\n ('outputnode.bold_file', 'bold_file'),\n ]),\n ])\n else:\n workflow.connect([\n (boldbuffer, transitionnode, [\n ('bold_file', 'bold_file'),\n ]),\n ])\n\n workflow.connect([\n (inputnode, inho_cor_wf, [\n ('inho_cor_anat', 'inputnode.anat_ref'),\n ('inho_cor_mask', 'inputnode.anat_mask'),\n ('bold', 'inputnode.name_source'),\n ]),\n (template_inputnode, inho_cor_wf, [\n (\"template_anat\", \"template_inputnode.template_anat\"),\n (\"template_mask\", \"template_inputnode.template_mask\"),\n ]),\n (boldbuffer, bold_reference_wf, [\n ('bold_file', 'inputnode.bold_file'),\n ]),\n (bold_reference_wf, inho_cor_wf, [\n ('outputnode.ref_image', 'inputnode.target_img'),\n ]),\n (bold_reference_wf, transitionnode, [\n ('outputnode.ref_image', 'bold_ref'),\n ]),\n (inho_cor_wf, transitionnode, [\n ('outputnode.init_denoise', 'init_denoise'),\n ('outputnode.denoise_mask', 'denoise_mask'),\n ('outputnode.corrected', 'corrected_EPI'),\n ]),\n ])\n\n if inho_cor_only:\n return workflow\n\n bold_stc_wf = init_bold_stc_wf(opts=opts)\n\n # HMC on the BOLD\n bold_hmc_wf = init_bold_hmc_wf(opts=opts)\n\n bold_commonspace_trans_wf = init_bold_preproc_trans_wf(opts=opts, resampling_dim=opts.commonspace_resampling, name='bold_commonspace_trans_wf')\n bold_commonspace_trans_wf.inputs.inputnode.mask_transforms_list = []\n bold_commonspace_trans_wf.inputs.inputnode.mask_inverses = []\n\n bold_confs_wf = init_bold_confs_wf(opts=opts, name=\"bold_confs_wf\")\n\n def prep_resampling_transforms(native_to_commonspace_transform_list,native_to_commonspace_inverse_list, bold_to_anat_warp, bold_to_anat_inverse_warp, bold_to_anat_affine, \n commonspace_to_native_transform_list, commonspace_to_native_inverse_list, bold_only=False):\n if bold_only:\n to_commonspace_transform_list = native_to_commonspace_transform_list\n to_commonspace_inverse_list = native_to_commonspace_inverse_list\n commonspace_to_raw_transform_list = commonspace_to_native_transform_list\n commonspace_to_raw_inverse_list = commonspace_to_native_inverse_list\n raw_to_native_transform_list = None\n raw_to_native_inverse_list = None\n else:\n to_commonspace_transform_list = native_to_commonspace_transform_list+[bold_to_anat_warp, bold_to_anat_affine]\n to_commonspace_inverse_list = native_to_commonspace_inverse_list+[0,0]\n commonspace_to_raw_transform_list = [bold_to_anat_affine, bold_to_anat_inverse_warp]+commonspace_to_native_transform_list\n commonspace_to_raw_inverse_list = [1,0]+commonspace_to_native_inverse_list\n raw_to_native_transform_list = [bold_to_anat_warp, bold_to_anat_affine]\n raw_to_native_inverse_list = [0, 0]\n\n return to_commonspace_transform_list, to_commonspace_inverse_list, raw_to_native_transform_list, raw_to_native_inverse_list, commonspace_to_raw_transform_list, commonspace_to_raw_inverse_list\n\n prep_resampling_transforms_node = pe.Node(Function(input_names=['native_to_commonspace_transform_list','native_to_commonspace_inverse_list', 'bold_to_anat_warp', 'bold_to_anat_inverse_warp', 'bold_to_anat_affine', \n 'commonspace_to_native_transform_list', 'commonspace_to_native_inverse_list', 'bold_only'],\n output_names=[\n 'to_commonspace_transform_list','to_commonspace_inverse_list', 'raw_to_native_transform_list', 'raw_to_native_inverse_list',\n 'commonspace_to_raw_transform_list', 'commonspace_to_raw_inverse_list'],\n function=prep_resampling_transforms),\n name='prep_resampling_transforms')\n prep_resampling_transforms_node.inputs.bold_only = opts.bold_only\n\n if not opts.bold_only:\n cross_modal_reg_wf = init_cross_modal_reg_wf(opts=opts)\n\n bold_native_trans_wf = init_bold_preproc_trans_wf(opts=opts, resampling_dim=opts.nativespace_resampling, name='bold_native_trans_wf')\n\n workflow.connect([\n (inputnode, cross_modal_reg_wf, [\n ('coreg_anat', 'inputnode.anat_ref'),\n ('coreg_mask', 'inputnode.anat_mask')]),\n (inputnode, bold_native_trans_wf, [\n ('commonspace_to_native_transform_list', 'inputnode.mask_transforms_list'),\n ('commonspace_to_native_inverse_list', 'inputnode.mask_inverses'),\n ('bold', 'inputnode.name_source'),\n ]),\n (transitionnode, cross_modal_reg_wf, [\n ('corrected_EPI', 'inputnode.ref_bold_brain'),\n ('denoise_mask', 'inputnode.moving_mask'),\n ]),\n (cross_modal_reg_wf, outputnode, [\n ('outputnode.bold_to_anat_affine', 'bold_to_anat_affine'),\n ('outputnode.bold_to_anat_warp', 'bold_to_anat_warp'),\n ('outputnode.bold_to_anat_inverse_warp', 'bold_to_anat_inverse_warp'),\n ('outputnode.output_warped_bold', 'output_warped_bold'),\n ]),\n (cross_modal_reg_wf, prep_resampling_transforms_node, [\n ('outputnode.bold_to_anat_affine', 'bold_to_anat_affine'),\n ('outputnode.bold_to_anat_warp', 'bold_to_anat_warp'),\n ('outputnode.bold_to_anat_inverse_warp', 'bold_to_anat_inverse_warp'),\n ]),\n (prep_resampling_transforms_node, bold_native_trans_wf, [\n ('raw_to_native_transform_list', 'inputnode.transforms_list'),\n ('raw_to_native_inverse_list', 'inputnode.inverses'),\n ('commonspace_to_raw_transform_list', 'inputnode.commonspace_to_raw_transform_list'),\n ('commonspace_to_raw_inverse_list', 'inputnode.commonspace_to_raw_inverse_list'),\n ]),\n (transitionnode, bold_native_trans_wf, [\n ('bold_ref', 'inputnode.raw_bold_ref'),\n ]),\n (cross_modal_reg_wf, bold_native_trans_wf, [\n ('outputnode.output_warped_bold', 'inputnode.ref_file')]),\n (bold_hmc_wf, bold_native_trans_wf, [\n ('outputnode.motcorr_params', 'inputnode.motcorr_params')]),\n (bold_native_trans_wf, bold_confs_wf, [\n ('outputnode.bold', 'inputnode.bold'),\n ('outputnode.bold_ref','inputnode.ref_bold'),\n ('outputnode.brain_mask', 'inputnode.brain_mask'),\n ('outputnode.WM_mask', 'inputnode.WM_mask'),\n ('outputnode.CSF_mask', 'inputnode.CSF_mask'),\n ('outputnode.vascular_mask', 'inputnode.vascular_mask'),\n ]),\n (bold_native_trans_wf, outputnode, [\n ('outputnode.bold', 'native_bold'),\n ('outputnode.bold_ref','native_bold_ref'),\n ('outputnode.brain_mask', 'native_brain_mask'),\n ('outputnode.WM_mask', 'native_WM_mask'),\n ('outputnode.CSF_mask', 'native_CSF_mask'),\n ('outputnode.vascular_mask', 'native_vascular_mask'),\n ('outputnode.labels', 'native_labels'),\n ]),\n ])\n\n else:\n prep_resampling_transforms_node.inputs.bold_to_anat_warp = None\n prep_resampling_transforms_node.inputs.bold_to_anat_inverse_warp = None\n prep_resampling_transforms_node.inputs.bold_to_anat_affine = None\n\n workflow.connect([\n (bold_commonspace_trans_wf, bold_confs_wf, [\n ('outputnode.bold', 'inputnode.bold'),\n ('outputnode.bold_ref','inputnode.ref_bold'),\n ('outputnode.brain_mask', 'inputnode.brain_mask'),\n ('outputnode.WM_mask', 'inputnode.WM_mask'),\n ('outputnode.CSF_mask', 'inputnode.CSF_mask'),\n ('outputnode.vascular_mask', 'inputnode.vascular_mask'),\n ]),\n ])\n\n\n # MAIN WORKFLOW STRUCTURE #######################################################\n workflow.connect([\n (inputnode, prep_resampling_transforms_node, [\n ('native_to_commonspace_transform_list', 'native_to_commonspace_transform_list'),\n ('native_to_commonspace_inverse_list', 'native_to_commonspace_inverse_list'),\n ('commonspace_to_native_transform_list', 'commonspace_to_native_transform_list'),\n ('commonspace_to_native_inverse_list', 'commonspace_to_native_inverse_list'),\n ]),\n (transitionnode, bold_stc_wf, [\n ('bold_file', 'inputnode.bold_file'),\n ]),\n (transitionnode, bold_hmc_wf, [\n ('bold_ref', 'inputnode.ref_image'),\n ]),\n (bold_hmc_wf, outputnode, [\n ('outputnode.motcorr_params', 'motcorr_params')]),\n (transitionnode, outputnode, [\n ('bold_ref', 'bold_ref'),\n ('init_denoise', 'init_denoise'),\n ('denoise_mask', 'denoise_mask'),\n ('corrected_EPI', 'corrected_EPI'),\n ]),\n (bold_hmc_wf, bold_confs_wf, [\n ('outputnode.motcorr_params', 'inputnode.movpar_file'),\n ]),\n (bold_confs_wf, outputnode, [\n ('outputnode.confounds_csv', 'confounds_csv'),\n ('outputnode.FD_csv', 'FD_csv'),\n ('outputnode.FD_voxelwise', 'FD_voxelwise'),\n ('outputnode.pos_voxelwise', 'pos_voxelwise'),\n ]),\n (prep_resampling_transforms_node, bold_commonspace_trans_wf, [\n ('to_commonspace_transform_list', 'inputnode.transforms_list'),\n ('to_commonspace_inverse_list', 'inputnode.inverses'),\n ('commonspace_to_raw_transform_list', 'inputnode.commonspace_to_raw_transform_list'),\n ('commonspace_to_raw_inverse_list', 'inputnode.commonspace_to_raw_inverse_list'),\n ]),\n (transitionnode, bold_commonspace_trans_wf, [\n ('bold_ref', 'inputnode.raw_bold_ref'),\n ]),\n (bold_commonspace_trans_wf, bold_confs_wf, [\n ('outputnode.raw_brain_mask', 'inputnode.raw_brain_mask'),\n ]),\n (inputnode, bold_confs_wf, [\n ('bold', 'inputnode.raw_bold'),\n ]),\n (bold_hmc_wf, bold_commonspace_trans_wf, [\n ('outputnode.motcorr_params', 'inputnode.motcorr_params')]),\n (inputnode, bold_commonspace_trans_wf, [\n ('bold', 'inputnode.name_source'),\n ('commonspace_ref', 'inputnode.ref_file'),\n ]),\n (bold_commonspace_trans_wf, outputnode, [\n ('outputnode.bold', 'commonspace_bold'),\n ('outputnode.brain_mask', 'commonspace_mask'),\n ('outputnode.WM_mask', 'commonspace_WM_mask'),\n ('outputnode.CSF_mask', 'commonspace_CSF_mask'),\n ('outputnode.vascular_mask', 'commonspace_vascular_mask'),\n ('outputnode.labels', 'commonspace_labels'),\n ('outputnode.raw_brain_mask', 'raw_brain_mask'),\n ]),\n ])\n\n if opts.apply_slice_mc:\n workflow.connect([\n (bold_stc_wf, bold_hmc_wf, [\n ('outputnode.stc_file', 'inputnode.bold_file')]),\n (bold_hmc_wf, bold_commonspace_trans_wf, [\n ('outputnode.slice_corrected_bold', 'inputnode.bold_file')]),\n ])\n if not opts.bold_only:\n workflow.connect([\n (bold_hmc_wf, bold_native_trans_wf, [\n ('outputnode.slice_corrected_bold', 'inputnode.bold_file')]),\n ])\n else:\n workflow.connect([\n (transitionnode, bold_hmc_wf, [\n ('bold_file', 'inputnode.bold_file')]),\n (bold_stc_wf, bold_commonspace_trans_wf, [\n ('outputnode.stc_file', 'inputnode.bold_file')]),\n ])\n if not opts.bold_only:\n workflow.connect([\n (bold_stc_wf, bold_native_trans_wf, [\n ('outputnode.stc_file', 'inputnode.bold_file')]),\n ])\n\n return workflow\n","repo_name":"davidgruskin/RABIES","sub_path":"rabies/preprocess_pkg/bold_main_wf.py","file_name":"bold_main_wf.py","file_ext":"py","file_size_in_byte":20272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"9"} +{"seq_id":"70825828454","text":"from django.core.validators import MinValueValidator\nfrom django.db import models\nfrom django.db.models import UniqueConstraint\n\nfrom users.models import User\n\n\nclass Ingredient(models.Model):\n name = models.CharField(\n verbose_name='Ингредиент',\n max_length=200\n )\n measurement_unit = models.CharField(\n verbose_name='Единица измерения',\n max_length=200\n )\n\n class Meta:\n verbose_name = 'Ингредиент'\n verbose_name_plural = 'Ингредиенты'\n ordering = ['name']\n constraints = [\n UniqueConstraint(\n fields=['name', 'measurement_unit'],\n name='unique_name_measurement_unit'\n )\n ]\n\n def __str__(self):\n return f'{self.name} {self.measurement_unit}'\n\n\nclass Tag(models.Model):\n name = models.CharField(\n verbose_name='Тэг',\n max_length=200,\n unique=True\n )\n color = models.CharField(\n verbose_name='Цвет',\n max_length=7,\n unique=True\n )\n slug = models.SlugField(\n verbose_name='slug',\n max_length=200,\n unique=True\n )\n\n class Meta:\n verbose_name = 'Тэг'\n verbose_name_plural = 'Тэги'\n\n def __str__(self):\n return self.name\n\n\nclass Recipe(models.Model):\n author = models.ForeignKey(\n User,\n verbose_name='Автор',\n on_delete=models.CASCADE\n )\n name = models.CharField(\n verbose_name='Название',\n max_length=200\n )\n image = models.ImageField(\n verbose_name='Изображение',\n upload_to='recipes/images/'\n )\n text = models.TextField(\n verbose_name='Описание'\n )\n ingredients = models.ManyToManyField(\n Ingredient,\n verbose_name='Ингредиенты',\n related_name='recipes',\n through='RecipeIngredient',\n )\n tags = models.ManyToManyField(\n Tag,\n verbose_name='Тег',\n related_name='recipes',\n )\n cooking_time = models.PositiveSmallIntegerField(\n validators=[MinValueValidator(\n 1, 'Время готовки не может быть меньше 1 мин.'\n )\n ],\n verbose_name='Время готовки'\n )\n pub_date = models.DateTimeField(\n verbose_name='Дата публикации',\n auto_now_add=True\n )\n\n class Meta:\n verbose_name = 'Рецепт'\n verbose_name_plural = 'Рецепты'\n ordering = ['-pub_date']\n\n def __str__(self):\n return f'{self.name} {self.text}'\n\n\nclass RecipeIngredient(models.Model):\n recipe = models.ForeignKey(\n Recipe,\n on_delete=models.CASCADE,\n related_name='recipe_ingr',\n verbose_name='Рецепт'\n )\n ingredient = models.ForeignKey(\n Ingredient,\n on_delete=models.CASCADE,\n related_name='recipe_ingr',\n verbose_name='Ингредиент'\n )\n amount = models.PositiveSmallIntegerField(\n validators=[MinValueValidator(\n 1, 'Количество ингредиентов не может быть меньше 1'\n ),\n ]\n )\n\n class Meta:\n verbose_name = 'Рецепт с ингредиентом'\n verbose_name_plural = 'Рецепты с игридиентами'\n ordering = ['recipe']\n constraints = [\n UniqueConstraint(\n fields=['recipe', 'ingredient'],\n name='unique_recipe_ingredient'\n )\n ]\n\n def __str__(self):\n return f\"{self.recipe} {self.ingredient} - {self.amount}\"\n\n\nclass FavoritesList(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='favorites',\n verbose_name='Пользователь',\n )\n recipe = models.ForeignKey(\n Recipe,\n on_delete=models.CASCADE,\n related_name='favorites',\n verbose_name='Рецепт'\n )\n\n class Meta:\n verbose_name = 'Список избранного'\n verbose_name_plural = 'Списки избранного'\n ordering = ['user']\n constraints = [\n UniqueConstraint(\n fields=['user', 'recipe'],\n name='unique_user_recipe_favorite'\n )\n ]\n\n def __str__(self):\n return f\"{self.recipe} {self.user}\"\n\n\nclass ShoppingList(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='shopping',\n verbose_name='Пользователь'\n )\n recipe = models.ForeignKey(\n Recipe,\n on_delete=models.CASCADE,\n related_name='shopping',\n verbose_name='Рецепт'\n )\n\n class Meta:\n verbose_name = 'Список покупок'\n verbose_name_plural = 'Списки покупок'\n ordering = ['id']\n constraints = [\n UniqueConstraint(\n fields=['user', 'recipe'],\n name='unique_user_recipe_shopping'\n )\n ]\n\n def __str__(self):\n return f\"{self.recipe} {self.user}\"\n","repo_name":"nevladi/foodgram-project-react","sub_path":"backend/foodgram/recipes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"12061370714","text":"from flask import Blueprint, jsonify, session, request\nfrom app.models import Review, db\nfrom app.forms import ReviewForm\n\nreviews_routes = Blueprint('reviews', __name__)\n\ndef validation_errors_to_error_messages(validation_errors):\n \"\"\"\n Simple function that turns the WTForms validation errors into a simple list\n \"\"\"\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f'{error}')\n return errorMessages\n\n\n@reviews_routes.route('/', methods=['POST'])\ndef create_new_review():\n res = request.get_json()\n form = ReviewForm()\n form[\"csrf_token\"].data = request.cookies[\"csrf_token\"]\n\n if form.validate_on_submit():\n review = Review(\n rating = res[\"rating\"],\n review = res[\"review\"],\n course_id = res[\"course_id\"],\n user_id = res[\"user_id\"],\n )\n db.session.add(review)\n db.session.commit()\n return review.to_dict()\n return {'errors': validation_errors_to_error_messages(form.errors)}, 401\n\n\n@reviews_routes.route('/', methods=[\"DELETE\"])\ndef delete_review(id):\n review = Review.query.get(id)\n if review: \n db.session.delete(review)\n db.session.commit()\n return {\"Response\": f\"Successfully deleted review.\"}\n\n\n@reviews_routes.route('/', methods=[\"PUT\"])\ndef edit_review(id):\n review = Review.query.get(id)\n res = request.get_json()\n form = ReviewForm()\n form[\"csrf_token\"].data = request.cookies[\"csrf_token\"]\n if review:\n if form.validate_on_submit():\n review.rating=res[\"rating\"]\n review.review=res[\"review\"]\n db.session.commit()\n return review.to_dict()\n return {'errors': validation_errors_to_error_messages(form.errors)}, 401","repo_name":"clarencema3/AllCourses","sub_path":"app/api/review_routes.py","file_name":"review_routes.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"29408935840","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm as CM\nfrom PIL import Image\n\nfrom data import get_input, get_output\nfrom model import load_model\n\n\ndef predict(modelname: str, img_path: str):\n '''\n Loads the specified model from the model-directory, loads the specified image.\n Predicts the amount of objects in the image.\n\n Returns\n -------\n count\n Number of objects in the image\n image\n The input image\n hmap\n The predicted heightmap \n '''\n # Function to load image,predict heat map, generate count and return (count , image , heat map)\n model = load_model(modelname)\n image = get_input(img_path, True)\n hmap = model.predict(image)\n count = np.sum(hmap)\n return count, image, hmap\n\n\ndef show_sample(img_path: str, modelname: str = None):\n '''\n Loads the specified model from the model-directory, loads the specified image.\n Plots the original sample, the groundtruth-heightmap and the predicted-heightmap\n '''\n if modelname is None:\n img = Image.open(img_path)\n img = np.array(img)\n groundtruth, _ = get_output(img_path)\n count = np.sum(groundtruth)\n hmap = None\n else:\n count, img, hmap = predict(modelname, img_path)\n groundtruth, _ = get_output(img_path)\n if groundtruth is None:\n count = int(np.sum(count)) + 1\n print(\"Prediction: {}\".format(count))\n fig, ax = plt.subplots(1, 2)\n ax[0].imshow(img)\n ax[1].imshow(hmap.reshape(\n hmap.shape[1], hmap.shape[2]), cmap=CM.jet)\n ax[0].set_title(\"Original image\")\n ax[1].set_title(\"Prediction ({})\".format(count))\n \n elif hmap is None:\n count = int(np.sum(count)) + 1\n print(\"Groundtruth: {}\".format(count))\n fig, ax = plt.subplots(1, 2)\n ax[0].imshow(img)\n ax[1].imshow(groundtruth, cmap=CM.jet)\n ax[0].set_title(\"Original image\")\n ax[1].set_title(\"Groundtruth ({})\".format(count))\n else:\n groundtruth = int(np.sum(groundtruth)) + 1\n count = int(np.sum(count)) + 1\n print(\"Groundtruth: {}; Prediction: {}\".format(groundtruth, count))\n\n fig, ax = plt.subplots(1, 3)\n ax[0].imshow(img.reshape(img.shape[1], img.shape[2], img.shape[3]))\n ax[1].imshow(hmap.reshape(hmap.shape[1], hmap.shape[2]), cmap=CM.jet)\n ax[2].imshow(groundtruth, cmap=CM.jet)\n ax[0].set_title(\"Original image\")\n ax[1].set_title(\"Prediction ({})\".format(count))\n ax[2].set_title(\"Groundtruth ({})\".format(groundtruth))\n plt.show()\n\n\nif __name__ == \"__main__\":\n from tkinter.filedialog import askopenfilename\n\n filename = \"y\"\n while filename != \"None\":\n filename = askopenfilename()\n show_sample(filename, \"Model\")\n","repo_name":"Haschtl/MLCMS-SS2021-Group-A","sub_path":"FinalProject/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"10310737544","text":"import os\n\nfrom django.core.mail import send_mail\nfrom django.template import loader\n\n\ndef send_welcome_mail(firstname, username, email_address):\n html_template = loader.get_template('communication/email_welcome.html')\n context = {\n 'firstname': firstname,\n 'username': username,\n }\n html = html_template.render(context)\n\n text = f\"\"\"\n Hi {firstname},\n\n You are receiving this email because you had signed up for Instaclone as\n {username}.\n\n Have a great day!\n Team Instaclone\n \"\"\"\n\n subject = \"Welcome to Instaclone!\"\n from_email = os.getenv('INSTACLONE_MAIL_USER')\n to_email = [email_address]\n\n send_mail(\n subject=subject,\n message=text,\n from_email=from_email,\n recipient_list=to_email,\n html_message=html,\n )\n","repo_name":"advaithhl/Instaclone","sub_path":"instaclone/communication/emails.py","file_name":"emails.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"22506637942","text":"from unittest import TestCase\n\nfrom minitorch import Tensor\n\n\nclass TestMean(TestCase):\n\n def test_mean(self):\n t1 = Tensor([1., 2., 3.])\n t2 = t1.mean()\n self.assertEqual(t2.data.tolist(), 2.)\n\n # (3,) -> ()\n t1 = Tensor([1., 2., 3., 4.], requires_grad=True)\n t2 = t1.mean()\n t2.backward()\n self.assertEqual(t1.grad.data.tolist(), [1/4., 1/4, 1/4, 1/4])\n\n # (2, 3) -> (3, )\n t1 = Tensor([[1., 2., 3.], [4., 5., 6.]], requires_grad=True)\n t2 = t1.mean(axis=0)\n t2.backward(Tensor([1., 1., 1.]))\n self.assertEqual(t1.grad.data.tolist(), [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]])\n\n # (2, 3) -> (2, )\n t1 = Tensor([[1., 2., 3., 4.], [4., 5., 6.,7.]], requires_grad=True)\n t2 = t1.mean(axis=1)\n t2.backward(Tensor([1., 1.]))\n self.assertEqual(t1.grad.data.tolist(), [[1/4., 1/4, 1/4, 1/4], [1/4., 1/4, 1/4, 1/4]])\n\n # (2, 3) -> (,)\n t1 = Tensor([[1., 2., 3.], [4., 5., 6.]], requires_grad=True)\n t2 = t1.mean()\n t2.backward(Tensor(1.0))\n self.assertEqual(t1.grad.data.tolist(), [[1/6, 1/6, 1/6], [1/6, 1/6, 1/6]])\n","repo_name":"zhouzaida/minitorch","sub_path":"tests/test_autograd/test_mean.py","file_name":"test_mean.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"9"} +{"seq_id":"18817555714","text":"def inp():\n fname = input(\"Enter input file name: \")\n # if (len(fname)) < 1: fname = \"../sample_input.txt\"\n try:\n fhand = open(fname)\n except:\n fname = \"../sample_input.txt\"\n print(\"Didn't entered a valid file, choose sample input file\")\n fhand = open(fname)\n groupList = list()\n group = list()\n for line in fhand:\n # print(\"group: \", group, type(group), len(groupList))\n line = line.rstrip()\n # print(\"Line: \", line)\n if line == '':\n groupList.append(group)\n group = []\n continue\n group.append(line)\n groupList.append(group)\n return groupList\n\ndef prepare_list_of_dict(groupList):\n # creating a list of dictionary of count of question with answer \"yes\" by group members\n totalList = list()\n for group in groupList:\n dictOfQuestions = dict()\n for person in group:\n for question in person:\n count = dictOfQuestions.get(question, 0)\n count += 1\n dictOfQuestions[question] = count\n totalList.append(dictOfQuestions)\n return totalList\n\ndef calc_num_of_yes_part_01(groupList):\n part01List = list()\n for item in groupList:\n listKeys = list(item.keys())\n lenght = len(listKeys)\n part01List.append(lenght)\n return part01List\n\ndef calc_total_yes(listOfTotalYes):\n sum = 0\n for num in listOfTotalYes:\n sum += num\n return sum\n\ndef calc_num_of_yes_part_02(groupList):\n groupListPart02 = prepare_list_of_dict(groupList)\n part02List = list()\n lengthList = list()\n for item in groupList:\n lengthList.append(len(item))\n\n i = 0\n for item in groupListPart02:\n valueList = list(item.values())\n length = lengthList[i]\n i += 1\n temp = valueList.count (length)\n part02List.append(temp)\n return part02List\n\n\ndef part_01(groupList):\n groupList = prepare_list_of_dict(groupList)\n # print (groupList)\n listOfTotalYes = calc_num_of_yes_part_01(groupList)\n # print(\"No of questions with Yes answer for all groups: \", listOfTotalYes)\n totalYes = calc_total_yes(listOfTotalYes)\n print(\"Total yes: \", totalYes)\n\ndef part_02(groupList):\n listOfTotalCommonYes = calc_num_of_yes_part_02(groupList)\n CommonYes = calc_total_yes(listOfTotalCommonYes)\n print (\"Common Yes: \", CommonYes)\n\ngroupList = inp()\n# print(\"Questions with answer 'yes': \", groupList)\npart_01(groupList)\npart_02(groupList)\n","repo_name":"jindalabhishek1/aoc","sub_path":"day06/python/day06.py","file_name":"day06.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"73367608933","text":"from sqlalchemy import create_engine, update\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom datetime import datetime\n\nfrom ORM.tables.tblUrlQueue import *\nfrom ORM.tables.tblRetailer import *\nfrom ORM.tables.tblStatic import *\nfrom ORM.tables.tblDynamic import *\nfrom ORM.tables.tblUrls import *\n\n\nEngine = create_engine('mysql+pymysql://root:qwerty1!@127.0.0.1/dataFetch')\nSession = sessionmaker(bind=Engine)\nsession = Session()\n\nclass CrawlResults():\n\n\tdef __init__(self, dic):\n\t\tprint('start crawl results')\n\t\tself.dic = dic\n\t\tself.stadic = Static(self.dic)\n\t\tself.url = self.stadic.staUrlId\n\t\tself.retId = self.stadic.staRetId \n\t\tself.childId = self.stadic.staChildIdentifier\n\t\tself.urldic = Urls(self.url, self.retId)\n\t\tself.dyndic = Dynamic(self.dic)\n\t\tself.dyndic.dynRetId = self.retId\n\n\tdef Upsert_Data(self):\n\t\ttry:\n\t\t\tself.URLID = CrawlResults.CheckPK_URL(self, self.url, self.retId)\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\n\t\ttry:\n\t\t\tself.stadic.staUrlId = self.URLID\n\t\t\tSTAID = CrawlResults.CheckPK_STA(self, self.retId, self.URLID, self.childId)\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\n\t\t# try:\n\t\tself.dyndic.dynStaId = STAID\n\t\tCrawlResults.Insert_tblDynamic(self.dyndic)\n\t\t# except Exception as e:\n\t\t# \tprint(e)\n\n\tdef CheckPK_URL(self, url, retId):\n\t\ttry:\n\t\t\tprint('try check pk URL')\n\t\t\tURLID = session.query(Urls).filter(Urls.urlUrl == url, Urls.urlRetId == retId).first().urlId\n\t\t\tprint('pk found URL')\n\t\t\tCrawlResults.Update_tblUrl(self.urldic, URLID)\n\t\t\treturn URLID\n\t\texcept NoResultFound:\n\t\t\treturn\n\t\texcept:\n\t\t\tURLID = CrawlResults.Insert_tblUrl(self.urldic)\n\t\t\treturn URLID\n\n\tdef Insert_tblUrl(urldic):\n\t\tprint('try insert tblurl')\n\t\turldic.urlDateInserted = datetime.now().replace(microsecond=0)\n\t\turldic.urlDateModified = datetime.now().replace(microsecond=0)\n\t\turldic.urlBadUrl = 0\n\t\turldic.urlBadUrlHistory = 0\n\t\turldic.urlPriority = 1\n\t\tsession.add(urldic)\n\t\tsession.commit()\n\t\tprint('inserted tblurl')\n\t\tsession.refresh(urldic)\n\t\tURLID = (urldic.urlId)\n\t\tsession.flush()\n\t\treturn URLID\n\n\tdef Update_tblUrl(urldic, UurlId):\n\t\tpass\n\n\tdef CheckPK_STA(self, retId, URLID, CHILDID):\n\t\ttry:\n\t\t\tprint('try check pk sta')\n\t\t\tSTAID = session.query(Static).filter(Static.staRetId == retId, Static.staUrlId == URLID, Static.staChildIdentifier == CHILDID).first().staId\n\t\t\tprint(\"sta pk found\")\n\t\t\tCrawlResults.Update_tblStatic(self.stadic, STAID)\n\t\t\treturn STAID\n\t\texcept:\n\t\t\tprint(\"sta pk not found\")\n\t\t\tprint(self.stadic)\n\t\t\tSTAID = CrawlResults.Insert_tblStatic(self.stadic)\n\t\t\treturn STAID\n\n\tdef Insert_tblStatic(self):\n\t\tprint(\"try insert tblstatic\")\n\t\tsession.add(self)\n\t\ttry:\n\t\t\tsession.commit()\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\tsession.refresh(self)\n\t\tSTAID = (self.staId)\n\t\tprint('inserted into tblstatic')\n\t\tsession.flush()\n\t\treturn STAID\n\n\tdef Update_tblStatic(self, STAID):\n\t\tprint(\"try update tblstatic\")\n\t\tprint(self)\n\t\tprint(STAID)\n\t\tself.staCrawlTime = datetime.now().replace(microsecond=0)\n\t\tprint('need to add update logic for tblstatic')\n\t\t# x = update(Urls.__table__).\\\n\t\t# \t\twhere(Urls.urlId == UurlId).\\\n\t\t# \t\tvalues(urlBadUrl=1)\n\t\t# print(x.execution_options())\n\t\t# Engine.execute(x)\n\t\t# session.flush()\n\t\t# pass\n\n\tdef Insert_tblDynamic(self):\n\t\tprint(\"try insert tbldynamic\")\n\t\tself.staCrawlTime = datetime.now().replace(microsecond=0)\n\t\tsession.add(self)\n\t\tsession.commit()\n\t\tsession.refresh(self)\n\t\tprint('inserted into tbldynamic' + str(self.dynStaId))","repo_name":"jkc909/dataFetch","sub_path":"junk/legacy_versions/dbCrawlResults.py","file_name":"dbCrawlResults.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"72450486054","text":"def create_histogram(word):\n # this function basically creates\n # a frequency chart of the different\n # letters in the word\n histogram = {}\n for letter in word:\n if not letter in histogram:\n histogram[letter] = 1\n else:\n histogram[letter] += 1\n return histogram\n\ndef find_anagrams(word, candidates):\n normalized = word.lower()\n base_histogram = create_histogram(normalized)\n anagrams = []\n # compare the base histogram with \n # histograms of candidates\n for candidate in candidates:\n normalized_candidate = candidate.lower()\n if normalized_candidate == normalized:\n continue\n if base_histogram == create_histogram(normalized_candidate):\n anagrams.append(candidate)\n \n return anagrams","repo_name":"Bitcents/exercism_tracks","sub_path":"python/anagram/anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"26876244433","text":"#--SIMILARITY WITH SPACY--\n\nimport spacy\nnlp = spacy.load('en_core_web_md')\n\nword1 = nlp(\"cat\")\nword2 = nlp(\"monkey\")\nword3 = nlp(\"banana\")\n\nprint(word1.similarity(word2))\nprint(word3.similarity(word2))\nprint(word3.similarity(word1))\n\n#--WORKING WITH VECTORS--\ntokens = nlp('cat apple monkey banana ')\nfor token1 in tokens:\n \n for token2 in tokens:\n print(token1.text, token2.text, token1.similarity(token2))\n\n#--WORKING WITH SENTENCES--\nsentence_to_compare = \"Why is my cat on the car\"\nsentences = [\"where did my dog go\",\n\"Hello, there is my car\",\n\"I\\'ve lost my car in my car\",\n\"I\\'d like my boat back\",\n\"I will name my dog Diana\"]\nmodel_sentence = nlp(sentence_to_compare)\nfor sentence in sentences:\n similarity = nlp(sentence).similarity(model_sentence)\n print(sentence + \" - \", similarity)\n\n\n\n#'apple and monkey' are more similar than 'apple and cat'. i guess there is more chance of a monkey eating an apple than a cat?\n\n#there is a lot less similarities using 'en_core_web_sm'.\n\n","repo_name":"bali3/semantic","sub_path":"semantic.py","file_name":"semantic.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"39987008368","text":"import csv, re, os, signal\r\nfrom colorama import Fore, init, deinit, Back, Style\r\n\r\n# Initialize Colorama for Windows\r\ninit()\r\n\r\nsignal.signal(signal.SIGINT, signal.SIG_DFL) # KeyboardInterrupt: Ctrl+C\r\n\r\n# Variables for the directory\r\nwhile True:\r\n check_filename = re.compile(r\"^[cC]:.+\\.csv$\")\r\n try:\r\n dir = os.getcwd()\r\n filebulk = \"bulkstatsschema.csv\"\r\n print(Fore.CYAN, end=\"\")\r\n print(Style.BRIGHT)\r\n filename = input(f\"Please type the name or complete path of the file?(default: bulkstatsschema.csv) \")\r\n print(Style.RESET_ALL, end=\"\")\r\n if filename == \"\" or filename == filebulk:\r\n filepath = f\"{dir}\\\\{filebulk}\"\r\n elif check_filename.match(filename):\r\n filepath = f\"{filename}\"\r\n else:\r\n filepath = f\"{dir}\\\\{filename}\"\r\n\r\n csvfile = open(f\"{filepath}\")\r\n break\r\n except FileNotFoundError:\r\n print()\r\n print(Fore.RED+\"~\"*79)\r\n print(f\"File does not exist in the folder below. Please type the complete path.\")\r\n print(f\"{filepath}\")\r\n print(\"~\"*79+Fore.RESET)\r\n continue\r\n\r\n# Function to check the format of the counter\r\ndef validate_counter(counter):\r\n regex = re.compile(r\"^%?([\\w\\-?])+%?$\")\r\n not_regex = re.compile(r\"^%?(epochtime|localdate|localtime|uptime)%?$\")\r\n if not_regex.match(counter):\r\n return False\r\n elif regex.match(counter):\r\n return True\r\n else:\r\n return False\r\n\r\n# Applying the counter\r\n\r\nwhile True:\r\n def run():\r\n csvfile = open(f\"{filepath}\")\r\n file = csv.reader(csvfile)\r\n print(Fore.CYAN, Style.BRIGHT, end=\"\")\r\n print(\"\\nPlease type the counter(s) with comma separated. Formats(%xxx-xxx-xxx...%,xxx-xxx-xxx):\", Style.RESET_ALL, end = \"\") \r\n counters = input()\r\n counters = counters.split(\",\")\r\n size_counters = len(counters)\r\n for counter in counters:\r\n csvfile = open(f\"{filepath}\")\r\n file = csv.reader(csvfile)\r\n validation = validate_counter(counter)\r\n if validation:\r\n print()\r\n counter_re = re.compile(r\"^%([\\w\\-?])+%$\")\r\n if (counter_re.match(counter)):\r\n # Searching for the position and schema for the counter.\r\n for row_num in file:\r\n rowsize = len(row_num)\r\n for i in range(rowsize-1):\r\n if (bool(row_num) == True):\r\n if (row_num[i] == counter):\r\n print(Style.BRIGHT)\r\n print(f\"{Fore.GREEN}=\"*79)\r\n print(f\"Counter: {row_num[i]} \\nSchema: {row_num[2]} \\nposition: {row_num[2:].index(row_num[i])}\")\r\n print(\"=\"*79 + f\"{Style.RESET_ALL}\")\r\n if (size_counters == (counters.index(counter)+1)):\r\n return\r\n\r\n else:\r\n percentage_counter = f\"%{counter}%\"\r\n # print(counters, counter, percentage_counter)\r\n for row_num in file:\r\n rowsize = len(row_num)\r\n for i in range(rowsize-1):\r\n if (bool(row_num) == True):\r\n if (row_num[i] == percentage_counter):\r\n print(Style.BRIGHT)\r\n print(f\"{Fore.GREEN}=\"*79)\r\n print(f\"Counter: {row_num[i]} \\nSchema: {row_num[2]} \\nposition: {row_num[2:].index(row_num[i])}\")\r\n print(\"=\"*79 + f\"{Style.RESET_ALL}\")\r\n if (size_counters == (counters.index(counter)+1)):\r\n return\r\n\r\n else:\r\n print(Style.BRIGHT)\r\n print(f\"{Fore.RED}*** ERROR! Wrong format or cannot be this counter. Please type again. ***{Style.RESET_ALL}\")\r\n del counter\r\n run()\r\n else:\r\n try:\r\n print(f\"{Style.BRIGHT}{Fore.RED}*** ERROR! Counter {counter} does not exist. Please retype the counters. ***{Style.RESET_ALL}\")\r\n run()\r\n except UnboundLocalError:\r\n return\r\n \r\n run()\r\n \r\n print(f\"{Fore.CYAN}{Style.BRIGHT}\\nWould you like to search another counter? (Yes/No)\", Style.RESET_ALL, end=\"\")\r\n answer = input()\r\n pattern_yes = re.compile(r\"^[yY][eE]?[Ss]?\")\r\n pattern_no = re.compile(r\"^[nN][oO]?\")\r\n\r\n if pattern_yes.match(answer):\r\n continue\r\n elif pattern_no.match(answer):\r\n print(\"\\nThank you!!! \\n\")\r\n deinit()\r\n break\r\n else:\r\n print(\"\\nYou typed something different from yes and no. Closing the program...\\n\")\r\n deinit()\r\n break\r\n\r\ndeinit()","repo_name":"luizpolli/counter_location","sub_path":"counters_locator.py","file_name":"counters_locator.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"35736219254","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/3/15 0:06\n# @Author : xulzee\n# @Email : xulzee@163.com\n# @File : Array To Queue.py\n# @Software: PyCharm\nclass Solution:\n def __init__(self, size):\n self.array = [0] * size\n self.first = 0\n self.last = 0\n self.size = 0\n\n def peek(self):\n if self.size == 0:\n return None\n return self.array[self.first]\n\n def push(self, obj):\n if self.size == self.array:\n return \"Stack is full\"\n self.size += 1\n self.array[self.last] = obj\n if self.last == len(self.array) - 1:\n self.last = 0\n else:\n self.last = self.last + 1\n\n def poll(self):\n if self.size == 0:\n return \"Stack is empty\"\n self.size -= 1\n tmp = self.first\n if self.first == len(self.array) - 1:\n self.first = 0\n else:\n self.first += 1\n return self.array[tmp]\n","repo_name":"xulzee/LeetCodeProjectPython","sub_path":"Array To Queue.py","file_name":"Array To Queue.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"9"} +{"seq_id":"3384716367","text":"#!/usr/bin/python3\n\nimport rospy\nimport actionlib\nimport sys\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\nfrom geometry_msgs.msg import PoseStamped\n\n# transform = (double(sys.argv[1]), double(sys.argv[2]), double(sys.argv[3]), double(sys.argv[4]), double(sys.argv[5]),\n# double(sys.argv[6]), double(sys.argv[7]))\n\n# Callbacks definition\n\n\n\ndef active_cb(extra):\n rospy.loginfo(\"Goal pose being processed\")\n\n\ndef feedback_cb(feedback):\n rospy.loginfo(\"Current location: \" + str(feedback))\n\n\ndef done_cb(status, result):\n if status == 3:\n rospy.loginfo(\"Goal reached\")\n if status == 2 or status == 8:\n rospy.loginfo(\"Goal cancelled\")\n if status == 4:\n rospy.loginfo(\"Goal aborted\")\n\n\ndef set_goal():\n goal = PoseStamped()\n goal.header.frame_id = \"map\"\n goal.header.stamp = rospy.Time.now()\n\n goal.pose.position.x = pos_x\n goal.pose.position.y = pos_y\n goal.pose.position.z = pos_z\n goal.pose.orientation.x = rot_x\n goal.pose.orientation.y = rot_y\n goal.pose.orientation.z = rot_z\n goal.pose.orientation.w = rot_w\n\n rospy.sleep(1)\n rospy.loginfo(\"Sending goal to navigation stack.\")\n pub.publish(goal)\n rospy.loginfo(\"Goal sent to navigation stack.\")\n\n\ndef goal_handler():\n rospy.init_node(\"set_goal\", anonymous=True)\n pub = rospy.Publisher(\"/move_base_simple/goal\", PoseStamped, queue_size=1)\n pos_x = rospy.get_param('/pos_x', -5.579485893249512)\n pos_y = rospy.get_param('/pos_y', 3.488150119781494)\n pos_z = rospy.get_param('/pos_z', 0.0)\n rot_x = rospy.get_param('/rot_x', 0.0)\n rot_y = rospy.get_param('/rot_y', 0.0)\n rot_z = rospy.get_param('/rot_z', 0.9751426556251712)\n rot_w = rospy.get_param('/rot_w', 0.22157797990840336)\n\n rospy.spin()\n# navclient.send_goal(goal, done_cb, active_cb, feedback_cb)\n# finished = navclient.wait_for_result()\n","repo_name":"Risskov/handirob","sub_path":"autonomous_nav/scripts/set_goal.py","file_name":"set_goal.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"12593651767","text":"from django.urls import path\nfrom . import views\n\napp_name = \"app1\"\n\nurlpatterns = [\n path('', views.home, name=\"home\"),\n path('about/', views.about, name=\"about\"),\n path('api/customers', views.customers, name=\"customers\"),\n path('api/customers//', views.details, name=\"details\")\n \n]","repo_name":"Caroline-Mwangi/Learn-React","sub_path":"Django/project1/app1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"71128816294","text":"import unittest\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\n\nfrom fbgemm_gpu.split_embedding_configs import EmbOptimType\n\nfrom torchrec import EmbeddingCollection, EmbeddingConfig, KeyedJaggedTensor\nfrom torchrec.distributed.embedding import EmbeddingCollectionSharder\nfrom torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder\nfrom torchrec.distributed.fused_embeddingbag import FusedEmbeddingBagCollectionSharder\nfrom torchrec.distributed.model_parallel import DistributedModelParallel as DMP\n\nfrom torchrec.distributed.planner import EmbeddingShardingPlanner, Topology\nfrom torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizerWrapper\nfrom torchrec.optim.optimizers import in_backward_optimizer_filter\n\nfrom torchrec_dynamic_embedding.id_transformer_group import IDTransformerGroup\nfrom utils import init_dist, register_memory_io\n\nregister_memory_io()\n\n\nclass Model(nn.Module):\n def __init__(self, num_embeddings, init_max, init_min, batch_size):\n super().__init__()\n self.embedding_dim = 16\n self.batch_size = batch_size\n self.config = EmbeddingConfig(\n name=\"id\",\n embedding_dim=self.embedding_dim,\n num_embeddings=num_embeddings,\n weight_init_max=init_max,\n weight_init_min=init_min,\n )\n self.emb = EmbeddingCollection(\n tables=[self.config], device=torch.device(\"meta\")\n )\n self.dense = nn.Linear(16, 1)\n\n def forward(self, x):\n embeddings = (\n self.emb(x)[\"id\"]\n .values()\n .reshape((self.batch_size, -1, self.embedding_dim))\n )\n fused = embeddings.sum(dim=1)\n output = self.dense(fused)\n pred = torch.sigmoid(output)\n return pred\n\n\nclass TestPSPrecision(unittest.TestCase):\n def testExtractTensor(self):\n init_dist()\n rank = dist.get_rank()\n device = torch.device(f\"cuda:{rank}\")\n torch.cuda.set_device(device)\n\n batch_size = 4\n model1 = Model(\n num_embeddings=1000, init_max=1, init_min=1, batch_size=batch_size\n )\n\n model2 = Model(\n num_embeddings=100, init_max=1, init_min=1, batch_size=batch_size\n )\n model2_config = model2.config\n\n model2.dense.weight.data.copy_(model1.dense.weight.data)\n model2.dense.bias.data.copy_(model1.dense.bias.data)\n\n def get_dmp(model):\n topology = Topology(\n world_size=dist.get_world_size(),\n local_world_size=dist.get_world_size(),\n compute_device=\"cuda\",\n )\n\n fused_params = {\n \"learning_rate\": 1e-1,\n \"optimizer\": EmbOptimType.ADAM,\n \"cache_load_factor\": 0.1,\n }\n sharders = [\n EmbeddingBagCollectionSharder(fused_params=fused_params),\n FusedEmbeddingBagCollectionSharder(fused_params=fused_params),\n EmbeddingCollectionSharder(fused_params=fused_params),\n ]\n plan = EmbeddingShardingPlanner(topology=topology, constraints=None).plan(\n model, sharders\n )\n model = DMP(module=model, device=device, plan=plan, sharders=sharders)\n\n dense_optimizer = KeyedOptimizerWrapper(\n dict(in_backward_optimizer_filter(model.named_parameters())),\n lambda params: torch.optim.Adam(params, lr=1e-1),\n )\n optimizer = CombinedOptimizer([model.fused_optimizer, dense_optimizer])\n\n return model, optimizer\n\n model1, optimizer1 = get_dmp(model1)\n model2, optimizer2 = get_dmp(model2)\n\n transformer = IDTransformerGroup(\n \"memory://\",\n model2,\n {\"emb\": [model2_config]},\n transform_config={\"type\": \"naive\"},\n ps_config={\"chunk_size\": 1024},\n )\n\n def sigmoid_crossentropy(y_true, y_pred):\n ce = nn.BCELoss()(y_pred, y_true)\n return torch.mean(torch.sum(ce, dim=-1))\n\n for i in range(100):\n kjt = KeyedJaggedTensor(\n keys=[\"id\"],\n values=torch.randint(0, 1000, (40,), dtype=torch.long),\n lengths=torch.tensor([10, 10, 10, 10], dtype=torch.long),\n )\n kjts, fetch_handles = transformer.transform({\"emb\": kjt})\n for handle in fetch_handles:\n handle.wait()\n mapped_kjt = kjts[\"emb\"]\n label = torch.randint(0, 2, (4, 1), device=device).float()\n kjt = kjt.to(device)\n mapped_kjt = mapped_kjt.to(device)\n\n output1 = model1(kjt)\n task_loss1 = sigmoid_crossentropy(label, output1)\n\n task_loss1.backward()\n optimizer1.step()\n optimizer1.zero_grad()\n\n output2 = model2(mapped_kjt)\n task_loss2 = sigmoid_crossentropy(label, output2)\n\n task_loss2.backward()\n optimizer2.step()\n optimizer2.zero_grad()\n\n self.assertTrue(abs((task_loss1 - task_loss2).item()) < 1e-7)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"pytorch/torchrec","sub_path":"contrib/dynamic_embedding/tests/test_integral_precision.py","file_name":"test_integral_precision.py","file_ext":"py","file_size_in_byte":5204,"program_lang":"python","lang":"en","doc_type":"code","stars":1577,"dataset":"github-code","pt":"9"} +{"seq_id":"71832822695","text":"import json\nimport matplotlib.pyplot as plt\n\n# ouvrir le fichier json\nwith open('bbs.json', 'r') as f:\n data = json.load(f)\n\n# récupérer les evalue\nevalues = []\nranks= []\nfor i,hit in enumerate(data['blast_output'][0]['hits']):\n evalue = float(hit['evalue'])\n evalues.append(evalue)\n ranks.append(i)\n\n# échelle logarithmique sur l'axe des y\nplt.yscale('log')\n# échelonner l'axe des abscisse de 500 à 500\nplt.xticks(range(0, i, 500))\n# afficher des points\nplt.scatter(ranks, evalues, s=1)\nplt.xlabel('Ranks')\nplt.ylabel('Evalue')\n# afficher sur le graphe la valeur de lengths[0] \nplt.text(-1100, evalues[0], evalues[0], color='r')\n# Ajouter un titre\nplt.title('Distribution of evalues')\nplt.savefig('Distribution_evalues_BBS.jpg')\nplt.show()","repo_name":"enashi04/blast-project","sub_path":"script/distribution_evalue_rank.py","file_name":"distribution_evalue_rank.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"2773284745","text":"\"\"\"\nTests for custom serializer/deserializer.\n\"\"\"\nfrom datetime import datetime\nfrom typing import List, Optional, Union\n\nimport pytest\n\nfrom serde import (\n SerdeError,\n SerdeSkip,\n default_deserializer,\n default_serializer,\n field,\n from_tuple,\n serde,\n to_tuple,\n)\nfrom serde.json import from_json, to_json\n\n\ndef test_custom_field_serializer():\n @serde\n class Foo:\n a: datetime\n b: datetime = field(\n serializer=lambda x: x.strftime(\"%d/%m/%y\"),\n deserializer=lambda x: datetime.strptime(x, \"%d/%m/%y\"),\n )\n c: Optional[datetime] = field(\n serializer=lambda x: x.strftime(\"%d/%m/%y\") if x else None,\n deserializer=lambda x: datetime.strptime(x, \"%d/%m/%y\") if x else None,\n )\n\n dt = datetime(2021, 1, 1, 0, 0, 0)\n f = Foo(dt, dt, None)\n\n assert to_json(f) == '{\"a\":\"2021-01-01T00:00:00\",\"b\":\"01/01/21\",\"c\":null}'\n assert f == from_json(Foo, to_json(f))\n\n assert to_tuple(f) == (datetime(2021, 1, 1, 0, 0), \"01/01/21\", None)\n assert f == from_tuple(Foo, to_tuple(f))\n\n\ndef test_raise_error():\n def raise_exception(_):\n raise Exception()\n\n @serde\n class Foo:\n i: int = field(serializer=raise_exception, deserializer=raise_exception)\n\n f = Foo(10)\n with pytest.raises(Exception):\n to_json(f)\n\n with pytest.raises(Exception):\n from_json(Foo, '{\"i\": 10}')\n\n\ndef test_wrong_signature():\n @serde\n class Foo:\n i: int = field(serializer=lambda: \"10\", deserializer=lambda: 10)\n\n f = Foo(10)\n with pytest.raises(SerdeError):\n to_json(f)\n\n with pytest.raises(SerdeError):\n from_json(Foo, '{\"i\": 10}')\n\n\ndef test_custom_class_serializer():\n def serializer(cls, o):\n if cls is datetime:\n return o.strftime(\"%d/%m/%y\")\n else:\n raise SerdeSkip()\n\n def deserializer(cls, o):\n if cls is datetime:\n return datetime.strptime(o, \"%d/%m/%y\")\n else:\n raise SerdeSkip()\n\n @serde(serializer=serializer, deserializer=deserializer)\n class Foo:\n a: int\n b: datetime\n c: datetime\n d: Optional[str] = None\n e: Union[str, int] = 10\n f: List[int] = field(default_factory=list)\n\n dt = datetime(2021, 1, 1, 0, 0, 0)\n f = Foo(10, dt, dt, f=[1, 2, 3])\n\n assert to_json(f) == '{\"a\":10,\"b\":\"01/01/21\",\"c\":\"01/01/21\",\"d\":null,\"e\":10,\"f\":[1,2,3]}'\n assert f == from_json(Foo, to_json(f))\n\n assert to_tuple(f) == (10, \"01/01/21\", \"01/01/21\", None, 10, [1, 2, 3])\n assert f == from_tuple(Foo, to_tuple(f))\n\n def fallback(_, __):\n raise SerdeSkip()\n\n @serde(serializer=fallback, deserializer=fallback)\n class Foo:\n a: Optional[str]\n b: str\n\n f = Foo(\"foo\", \"bar\")\n assert to_json(f) == '{\"a\":\"foo\",\"b\":\"bar\"}'\n assert f == from_json(Foo, '{\"a\":\"foo\",\"b\":\"bar\"}')\n assert Foo(None, \"bar\") == from_json(Foo, '{\"b\":\"bar\"}')\n with pytest.raises(Exception):\n assert Foo(None, \"bar\") == from_json(Foo, \"{}\")\n with pytest.raises(Exception):\n assert Foo(\"foo\", \"bar\") == from_json(Foo, '{\"a\": \"foo\"}')\n\n\ndef test_field_serialize_override_class_serializer():\n def serializer(cls, o):\n if cls is datetime:\n return o.strftime(\"%d/%m/%y\")\n else:\n raise SerdeSkip()\n\n def deserializer(cls, o):\n if cls is datetime:\n return datetime.strptime(o, \"%d/%m/%y\")\n else:\n raise SerdeSkip()\n\n @serde(serializer=serializer, deserializer=deserializer)\n class Foo:\n a: int\n b: datetime\n c: datetime = field(\n serializer=lambda x: x.strftime(\"%y.%m.%d\"),\n deserializer=lambda x: datetime.strptime(x, \"%y.%m.%d\"),\n )\n\n dt = datetime(2021, 1, 1, 0, 0, 0)\n f = Foo(10, dt, dt)\n\n assert to_json(f) == '{\"a\":10,\"b\":\"01/01/21\",\"c\":\"21.01.01\"}'\n assert f == from_json(Foo, to_json(f))\n\n assert to_tuple(f) == (10, \"01/01/21\", \"21.01.01\")\n assert f == from_tuple(Foo, to_tuple(f))\n\n\ndef test_override_by_default_serializer():\n def serializer(cls, o):\n if cls is datetime:\n return o.strftime(\"%d/%m/%y\")\n else:\n raise SerdeSkip()\n\n def deserializer(cls, o):\n if cls is datetime:\n return datetime.strptime(o, \"%d/%m/%y\")\n else:\n raise SerdeSkip()\n\n @serde(serializer=serializer, deserializer=deserializer)\n class Foo:\n a: int\n b: datetime\n c: datetime = field(serializer=default_serializer, deserializer=default_deserializer)\n\n dt = datetime(2021, 1, 1, 0, 0, 0)\n f = Foo(10, dt, dt)\n\n assert to_json(f) == '{\"a\":10,\"b\":\"01/01/21\",\"c\":\"2021-01-01T00:00:00\"}'\n assert f == from_json(Foo, to_json(f))\n\n assert to_tuple(f) == (10, \"01/01/21\", datetime(2021, 1, 1, 0, 0))\n assert f == from_tuple(Foo, to_tuple(f))\n","repo_name":"yukinarit/pyserde","sub_path":"tests/test_custom.py","file_name":"test_custom.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","stars":596,"dataset":"github-code","pt":"9"} +{"seq_id":"3968177445","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Hillas shower parametrization.\n\"\"\"\nfrom __future__ import division\nimport numpy as np\n\n__all__ = ['hillas_parameters']\n\ndef hillas_parameters(x, y, s):\n \"\"\"Compute Hillas parameters for a given shower image.\n \n Parameters\n ----------\n x : array-like\n Pixel x-coordinate\n y : array-like\n Pixel y-coordinate\n s : array-like\n Pixel value\n \n Returns\n -------\n Dictionary of Hillas parameters\n \n The formulae implemented here are taken from the Appendix of\n the Whipple Crab paper Weekes et al. (1998) \n http://adsabs.harvard.edu/abs/1989ApJ...342..379W\n (corrected for some obvious typos) \n \"\"\"\n x = np.asanyarray(x, dtype=np.float64)\n y = np.asanyarray(y, dtype=np.float64)\n s = np.asanyarray(s, dtype=np.float64)\n assert x.shape == s.shape\n assert y.shape == s.shape\n\n # Compute image moments \n _s = np.sum(s)\n m_x = np.sum(s * x) / _s\n m_y = np.sum(s * y) / _s\n m_xx = np.sum(s * x * x) / _s # note: typo in paper\n m_yy = np.sum(s * y * y) / _s\n m_xy = np.sum(s * x * y) / _s # note: typo in paper\n \n # Compute major axis line representation y = a * x + b\n S_xx = m_xx - m_x * m_x\n S_yy = m_yy - m_y * m_y\n S_xy = m_xy - m_x * m_y\n d = S_yy - S_xx\n temp = d * d + 4 * S_xy * S_xy\n a = (d + np.sqrt(temp)) / (2 * S_xy)\n b = m_y - a * m_x\n \n # Compute Hillas parameters\n width_2 = (S_yy + a * a * S_xx - 2 * a * S_xy) / (1 + a * a)\n width = np.sqrt(width_2)\n length_2 = (S_xx + a * a * S_yy + 2 * a * S_xy) / (1 + a * a)\n length = np.sqrt(length_2)\n miss = np.abs(b / (1 + a * a))\n r = np.sqrt(m_x * m_x + m_y * m_y)\n \n # Compute azwidth by transforming to (p, q) coordinates\n sin_theta = m_y / r\n cos_theta = m_x / r\n q = (m_x - x) * sin_theta + (y - m_y) * cos_theta\n m_q = np.sum(s * q) / _s\n m_qq = np.sum(s * q * q) / _s\n azwidth_2 = m_qq - m_q * m_q\n azwidth = np.sqrt(azwidth_2)\n\n # Return relevant parameters in a dict \n p = dict()\n p['x'] = m_x\n p['y'] = m_y\n p['a'] = a\n p['b'] = b\n p['width'] = width\n p['length'] = length\n p['miss'] = miss\n p['r'] = r\n p['azwidth'] = azwidth\n return p\n","repo_name":"pflaumenmus/gammapy","sub_path":"gammapy/shower/hillas.py","file_name":"hillas.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"9"} +{"seq_id":"70379039975","text":"from pbase.ptext.pipeline import Pipeline\n\nimport pytest\n\n\nconvert_token1 = str.split\nexamples1 = [[\"hello world\", \"hello pbase\"], [\"I love pbase\"]]\nresults1 = [[[\"hello\", \"world\"], [\"hello\", \"pbase\"]], [[\"I\", \"love\", \"pbase\"]]]\n\nconvert_token2 = str.upper\nresults2 = [[\"HELLO WORLD\", \"HELLO PBASE\"], [\"I LOVE PBASE\"]]\n\nresults3 = [[[\"HELLO\", \"WORLD\"], [\"HELLO\", \"PBASE\"]], [[\"I\", \"LOVE\", \"PBASE\"]]]\n\n\n@pytest.mark.parametrize(\"convert_tokens, examples, results\",\n [(convert_token1, examples1, results1),\n (convert_token2, examples1, results2),\n ([convert_token1, convert_token2], examples1, results3)])\ndef test_pipeline(convert_tokens, examples, results):\n if isinstance(convert_tokens, list):\n pipeline = Pipeline()\n for convert_token in convert_tokens:\n pipeline.add_after(pipeline=convert_token)\n else:\n pipeline = Pipeline(convert_token=convert_tokens)\n processed = pipeline(examples)\n assert processed == results","repo_name":"Impavidity/pbase","sub_path":"infrastructure/src/test/python/pbase/ptext/test_pipeline.py","file_name":"test_pipeline.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"9"} +{"seq_id":"29945413299","text":"import unittest\nimport src\n\n\nclass TestBasicRoomFunction(unittest.TestCase):\n def setUp(self):\n src.quests.debugMessages = []\n\n def test_create(self):\n # test direct creation\n for roomType in src.rooms.roomMap.values():\n item = roomType()\n\n def test_adding(self):\n room = src.rooms.EmptyRoom()\n item = src.items.Coal()\n item.yPosition = 1\n item.xPosition = 1\n room.addItems([item])\n self.assertEqual(item in room.itemsOnFloor, True)\n self.assertEqual(item.xPosition, 1)\n self.assertEqual(item.yPosition, 1)\n char = src.characters.Character()\n room.addCharacter(char, 2, 1)\n self.assertEqual(room.characters, [char])\n self.assertEqual(char.xPosition, 2)\n self.assertEqual(char.yPosition, 1)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"MarxMustermann/OfMiceAndMechs","sub_path":"tests/test_rooms.py","file_name":"test_rooms.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"9"} +{"seq_id":"37764011277","text":"from django.shortcuts import render\nfrom . forms import UserForm\nfrom . models import User\n# Create your views here.\n\n\ndef create(request):\n if request.method == 'POST':\n form = UserForm(request.POST)\n if form.is_valid():\n reg = User(\n name=form.cleaned_data[\"name\"], \n department=form.cleaned_data[\"department\"], \n semester=form.cleaned_data[\"semester\"])\n reg.save()\n else:\n form = UserForm()\n return render(request, \"enroll/register.html\", {\"form\": form})\n","repo_name":"Ishfaqdev/Django-form","sub_path":"enroll/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"32625546461","text":"from typing import List\nfrom collections import Counter, defaultdict, deque\nfrom functools import lru_cache\nimport heapq\n\nclass Solution:\n def electricCarPlan(self, paths: List[List[int]], cnt: int, start: int, end: int, charge: List[int]) -> int:\n n = len(charge)\n dp = [[-1] * (cnt+1) for _ in range(n)]\n adj = [[] for _ in range(n)]\n for a, b, d in paths:\n adj[a].append((b, d))\n adj[b].append((a, d))\n visited = set()\n\n dp[start][0] = 0\n import heapq\n hp = []\n hp.append((0, start, 0))\n\n while hp:\n (pcost, u, c) = heapq.heappop(hp)\n if (u, c) in visited:\n continue\n visited.add((u, c))\n\n for v, d in adj[u]:\n for i in range(max(d-c, 0), cnt+1-c): # to charge somes.\n c2 = i + c - d\n cost = i * charge[u] + d + pcost\n if (dp[v][c2] == -1 or cost < dp[v][c2]) and (v, c2) not in visited:\n dp[v][c2] = cost\n heapq.heappush(hp, (cost, v, c2))\n\n ans = (1 << 30)\n for v in dp[end]:\n if v == -1: continue\n ans = min(ans, v)\n return ans\n\ncases = [\n ([[1,3,3],[3,2,1],[2,1,3],[0,1,4],[3,0,5]], 6, 1, 0, [2,10,4,1], 43),\n ([[0,4,2],[4,3,5],[3,0,5],[0,1,5],[3,2,4],[1,2,8]], 8, 0, 2, [4,1,1,3,2], 38),\n]\n\nimport aatest_helper\naatest_helper.run_test_cases(Solution().electricCarPlan, cases)\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"dirtysalt/codes","sub_path":"misc/leetcode/lcp-35.py","file_name":"lcp-35.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"9"} +{"seq_id":"29377005542","text":"from django.db import models\nimport jwt\nfrom datetime import datetime, timedelta, date\nfrom django.conf import settings\nfrom django.contrib.auth.models import (\n AbstractBaseUser, BaseUserManager, PermissionsMixin\n)\nfrom django.db.models import Q\nimport logging\n\nfrom apps.ke_mixcloud_core.models import AbstractBase\n\nlogger = logging.getLogger(__name__)\n\n\nclass UserManager(BaseUserManager):\n\n def get_queryset(self):\n return super(UserManager, self).get_queryset().filter(deleted=False)\n\n def create_user(self, username, email, password=None, **kwargs):\n first_name = kwargs.get('first_name', None)\n last_name = kwargs.get('last_name', None)\n phone_number = kwargs.get('phone_number', None)\n\n createdate = kwargs.get('createdate', None)\n if createdate is None:\n createdate=date.today()\n txndate = kwargs.get('txndate', None)\n if txndate is None:\n txndate=datetime.now()\n\n\n approved = kwargs.get('approved', True)\n approved_by = kwargs.get('approved_by', None)\n approveddate = kwargs.get('approveddate', datetime.now())\n\n\n if first_name is None:\n raise TypeError('Users must have a first name.')\n\n if last_name is None:\n raise TypeError('Users must have a last name.')\n\n if username is None:\n raise TypeError('Users must have a username.')\n\n if email is None:\n raise TypeError('Users must have an email address.')\n\n if phone_number is None:\n raise TypeError('Users must have a phone number.')\n\n\n\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n first_name=first_name,\n last_name=last_name,\n phone_number=phone_number,\n createdate=createdate,\n txndate=txndate,\n\n approved=approved,\n approved_by=approved_by,\n approveddate=approveddate,\n\n )\n user.set_password(password)\n user.save()\n\n return user\n\n def create_superuser(self, username, email, first_name, last_name, phone_number, password=None):\n '''\n Set SYSTEM PERMISSION FOR THE SUPER USER\n '''\n\n\n if password is None:\n raise TypeError('Superusers must have a password.')\n user = self.create_user(username=username,\n email=email,\n password=password,\n first_name=first_name,\n last_name=last_name,\n phone_number=phone_number\n )\n user.is_superuser = True\n user.is_staff = True\n user.save()\n\n return user\n\n\nclass CustomUser(AbstractBaseUser, PermissionsMixin, AbstractBase):\n class Meta:\n verbose_name = 'User'\n verbose_name_plural = 'Users'\n ordering = ('-created_at', '-updated_at')\n\n\n username = models.CharField(db_index=True, max_length=255, unique=True)\n email = models.EmailField(db_index=True)\n is_active = models.BooleanField(default=True)\n is_staff = models.BooleanField(default=False)\n is_superuser = models.BooleanField(default=False)\n is_admin = models.BooleanField(default=False)\n\n first_name = models.CharField(max_length=255)\n last_name = models.CharField(max_length=255)\n phone_number = models.CharField(max_length=15)\n\n # DEFAULT FIELDS\n createdate = models.DateField(default=date.today, blank=True, null=True)\n txndate = models.DateTimeField(default=datetime.now)\n approved = models.BooleanField(default=False)\n approved_by = models.ForeignKey('authentication.CustomUser', models.DO_NOTHING, db_column='approved_by', blank=True, null=True)\n approveddate = models.DateTimeField(blank=True, null=True)\n\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = ['first_name', 'last_name', 'email', 'phone_number']\n\n objects = UserManager()\n\n def __str__(self):\n\n return self.username\n\n @property\n def get_full_name(self):\n\n return self.first_name + ' ' + self.last_name\n\n\n def get_short_name(self):\n return self.username\n\n @property\n def token(self):\n return self._generate_jwt_token()\n\n def _generate_jwt_token(self):\n dt = datetime.now() + timedelta(days=30)\n data = {\n 'id': self.id,\n 'email': self.email,\n 'username': self.username,\n 'exp': int(dt.timestamp())\n }\n token = jwt.encode(data, settings.SECRET_KEY, algorithm='HS256')\n\n return token.decode('utf-8')\n\n @classmethod\n def get_employees(cls):\n return CustomUser.objects.all()\n\n @classmethod\n def get_single_emp(cls, username):\n return CustomUser.objects.get(employee=username)\n\n def get_permissions(self):\n return self.role.permissionsmaps.all()\n\n def has_perm(self, perm, obj=None):\n if self.is_superuser:\n return True\n else:\n return False\n\n def has_perms(self, perm_list, obj=None):\n return all(self.has_perm(perm, obj) for perm in perm_list)\n\n\n","repo_name":"DennisMuchiri/django_fileserver","sub_path":"apps/authentication/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"39378482031","text":"from numpy import linspace\n\nbuffer = [0]\n\nspot = 0\nfor insert in linspace(1,2017,2017):\n spot += 304\n spot = spot%len(buffer) + 1\n buffer.insert(spot,int(insert))\n \n\nprint(f'Solution to part 1: {buffer[buffer.index(2017)+1]}')\n\nbuffer = [0, 0]\nbuffer_len = 1\nspot = 0\nfor insert in linspace(1,50000000,50000000):\n spot += 304\n spot = spot%buffer_len + 1\n if spot == 1:\n buffer[1] = int(insert)\n buffer_len += 1\n \n\nprint(f'Solution to part 2: {buffer[1]}')","repo_name":"ctallum/AdventOfCode2017","sub_path":"Day17/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"28868977597","text":"import sys\nimport pandas as pd\nimport numpy as np\nfrom sqlalchemy import create_engine\n\n\ndef load_data(messages_filepath, categories_filepath):\n \"\"\"\n This function loads and merge message and categories data\n arguments:\n messages_filepath: messages data filepath \n categories_filepath: categories data filepath\n return:\n (dataframe) : clean dataframe\n \n \"\"\"\n # messages data\n messages_df = pd.read_csv(messages_filepath)\n # categories data\n categories_df = pd.read_csv(categories_filepath)\n # merge message and categories dataframes\n merged_df = messages_df.merge(categories_df, how = 'outer', on = ['id'])\n \n return merged_df\n\n\ndef clean_data(df):\n \"\"\"\n Clean dataframe by splitting categories column into seperate category column and remove duplicates\n \n arguments:\n df (Dataframe): pandas dataframe to clean\n returns:\n df (pandas dataframe)\n \"\"\"\n # split and expand categories \n categories_ = df['categories'].str.split(';', expand = True)\n # all new column names\n column_names = categories_.iloc[0, :].apply(lambda x: x.split('-')[0])\n # update the column names of categories with col_names\n categories_.columns = column_names\n # convert category values to numeric based on category column values\n for col in categories_.columns:\n # convert each column to numeric\n df[col] = pd.to_numeric(categories_[col].astype(str).apply(lambda x: x[-1]))\n \n # drop the categories column from df\n df.drop('categories', axis = 1, inplace = True)\n \n # drop_duplicates\n df = df.drop_duplicates()\n \n return df\n \n\n \n \n \n \n\n\ndef save_data(df, database_filename):\n \"\"\"\n Saving data to given database_filename\n arguments:\n df (DataFrame) : dataframe to save\n database_filename (path): path to save databasefile\n \"\"\"\n engine = create_engine('sqlite:///'+database_filename)\n \n df.to_sql('Table_1', engine, index=False, if_exists='replace') \n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()","repo_name":"nick-choudhary/DisasterResponse","sub_path":"data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"11955177494","text":"from __future__ import annotations\nfrom typing import Any, Optional\n\n\nclass Node:\n def __init__(self, data: Any):\n self.data: Any = data\n self.next: Optional[Node] = None\n\n def __repr__(self):\n return repr(self.data)\n\n\nclass SinglyLinkedList:\n def __init__(self):\n self.head: Node = None\n self.tail: Node = None\n self.size: int = 0\n\n def append(self, node: Node) -> SinglyLinkedList:\n if self.head is None:\n self.head = node\n else:\n self.tail.next = node\n self.tail = node\n self.size += 1\n\n return self\n\n def remove(self, node: Node) -> None:\n curr = self.head\n prev = None\n\n while curr is not None and curr != node:\n prev = curr\n curr = curr.next\n\n if prev is None:\n self.head = curr.next\n elif curr.next is None:\n self.tail = prev\n elif curr:\n prev.next = curr.next\n self.size -= 1\n\n print(prev, curr)\n\n\n def first(self) -> Optional[Node]:\n return self.head\n\n def last(self) -> Optional[Node]:\n return self.tail\n\n def search(self, data: Any) -> Optional[Node]:\n curr = self.head\n while curr is not None:\n if curr.data == data:\n return curr\n curr = curr.next\n return None\n\n def is_empty(self) -> bool:\n return self.size == 0\n","repo_name":"andre-dasilva/algorithms","sub_path":"python/algorithms/lists/singly_linked_list.py","file_name":"singly_linked_list.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"13209134452","text":"# -*- coding: utf-8 -*-\n__author__ = 'Life'\n\nimport random\n\nfrom numbers import Number\nfrom math import *\n\n\ndef evl(self, args, msg):\n #__import__('importlib').import_module('src.res.shorten_games').shorten.update({'1': '2'})\n try:\n if (msg.user_id == '85413884' or msg.user_id == '54602374') and args:\n res = eval(' '.join(args))\n\n # check how we want to display it\n if hasattr(res, '__iter__'):\n if type(res) != str:\n if all(type(x) == str for x in res):\n return res\n else:\n return '/w (sender) Not all elems of iterable are string'\n else:\n return res\n if isinstance(res, Number) or type(res) == bool:\n return str(res)\n\n except Exception as exc:\n return \"/w (sender) {}: {}\".format(exc.__class__.__name__, str(exc))\n","repo_name":"AdrenalineLife/yolo-octo-shame","sub_path":"src/lib/commands/evl.py","file_name":"evl.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"1326183185","text":"# n=int(input(\"enter num\"))\n# i=1\n# while i<=n:\n# t=input(\"enter str\")\n# for j in range (len(t)):\n# print(t[j][0])\n# i+=1\n\n\n# a=\"archana\"\n# print(len(a))\n# # l=a.split()\n# # i=0\n# # while ib:\n# print(a,\"is max\")\n# else:\n# print(b,\"is max\")\n\ns=\"wretrt\"\na=s.split()\nb=a[::-1]\nprint(b)","repo_name":"archana-singh-1/Webscriping","sub_path":"a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"21523628550","text":"# -*- coding: utf-8 -*-\n# Django settings for feindex project.\nfrom django.utils.translation import ugettext_lazy as _\nfrom os.path import join\nfrom project.settings import create_secret_key, PROJECT_PATH, PROJECT_NAME, \\\n PROJECT_DIRS\n\n#import sys\n#sys.path.append('/home/mark/Repos/feinx/feinx/apps/')\n#sys.path.append('/home/mark/Repos/feinx/feinx/contrib/')\n\n# Debug\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\n# Admin\nADMINS = (\n ('Administrator', 'admin@example.com'),\n)\nMANAGERS = ADMINS\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'Asia/Shanghai'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\n#LANGUAGE_CODE = 'en-us'\nLANGUAGE_CODE = 'zh-cn'\nLANGUAGES = (\n ('zh-cn', _('Simplified Chinese')),\n ('en-us', _('English')),\n)\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = False\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = join(PROJECT_DIRS, 'media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = join(PROJECT_DIRS, 'static')\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n join(PROJECT_DIRS, 'assets'),\n)\n\n# Set default theme, and change it in your own settings\nTHEME = 'default'\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n #'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\n# Make sure you have the privilieges to write in the data directory\nSECRET_KEY = create_secret_key(join(PROJECT_PATH, '../data', '.secret_key'))\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n #'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.request',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.contrib.messages.context_processors.messages',\n 'feincms.context_processors.add_page_if_missing',\n)\n\nROOT_URLCONF = 'project.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'project.wsgi.application'\n\nTEMPLATE_DIRS = (\n join(PROJECT_DIRS, 'templates/%s' % THEME),\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'django.contrib.admindocs',\n 'feincms',\n 'feincms.module.page',\n 'feincms.module.medialibrary',\n 'mptt',\n 'feinx',\n 'feinx.apps.forum',\n 'feinx.apps.bootloader',\n 'feinx.contrib.account',\n #'pagination',\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n# Auth definition\n#AUTH_PROFILE_MODULE = 'contrib.profile.Profile'\n\n# Use new feincms reverse\nFEINCMS_REVERSE_MONKEY_PATCH = False\n\n# Feincms richtext editor\nFEINCMS_RICHTEXT_INIT_CONTEXT = {\n 'TINYMCE_JS_URL': '%slibs/tiny_mce/tiny_mce.js' % STATIC_URL,\n}\n\n# Profile backend to enable profile as default user model\n#AUTHENTICATION_BACKENDS = (\n# 'feinx.contrib.profile.auth_backends.ProfileModelBackend',\n#)\n\n# Custom user model. Default model is feinx's profile model.\n#CUSTOM_USER_MODEL = 'feinx.contrib.profile.Profile'\n\n\nTIME_ZONE_CHOICES = (\n ('-12.0', '(GMT -12:00) Eniwetok, Kwajalein'),\n ('-11.0', '(GMT -11:00) Midway Island, Samoa'),\n ('-10.0', '(GMT -10:00) Hawaii'),\n ('-9.0', '(GMT -9:00) Alaska'),\n ('-8.0', '(GMT -8:00) Pacific Time (US & Canada)'),\n ('-7.0', '(GMT -7:00) Mountain Time (US & Canada)'),\n ('-6.0', '(GMT -6:00) Central Time (US & Canada), Mexico City'),\n ('-5.0', '(GMT -5:00) Eastern Time (US & Canada), Bogota, Lima'),\n ('-4.0', '(GMT -4:00) Atlantic Time (Canada), Caracas, La Paz'),\n ('-3.5', '(GMT -3:30) Newfoundland'),\n ('-3.0', '(GMT -3:00) Brazil, Buenos Aires, Georgetown'),\n ('-2.0', '(GMT -2:00) Mid-Atlantic'),\n ('-1.0', '(GMT -1:00 hour) Azores, Cape Verde Islands'),\n ('0.0', '(GMT) Western Europe Time, London, Lisbon, Casablanca'),\n ('1.0', '(GMT +1:00 hour) Brussels, Copenhagen, Madrid, Paris'),\n ('2.0', '(GMT +2:00) Kaliningrad, South Africa'),\n ('3.0', '(GMT +3:00) Baghdad, Riyadh, Moscow, St. Petersburg'),\n ('3.5', '(GMT +3:30) Tehran'),\n ('4.0', '(GMT +4:00) Abu Dhabi, Muscat, Baku, Tbilisi'),\n ('4.5', '(GMT +4:30) Kabul'),\n ('5.0', '(GMT +5:00) Ekaterinburg, Islamabad, Karachi, Tashkent'),\n ('5.5', '(GMT +5:30) Bombay, Calcutta, Madras, New Delhi'),\n ('5.75', '(GMT +5:45) Kathmandu'),\n ('6.0', '(GMT +6:00) Almaty, Dhaka, Colombo'),\n ('7.0', '(GMT +7:00) Bangkok, Hanoi, Jakarta'),\n ('8.0', '(GMT +8:00) Beijing, Perth, Singapore, Hong Kong'),\n ('9.0', '(GMT +9:00) Tokyo, Seoul, Osaka, Sapporo, Yakutsk'),\n ('9.5', '(GMT +9:30) Adelaide, Darwin'),\n ('10.0', '(GMT +10:00) Eastern Australia, Guam, Vladivostok'),\n ('11.0', '(GMT +11:00) Magadan, Solomon Islands, New Caledonia'),\n ('12.0', '(GMT +12:00) Auckland, Wellington, Fiji, Kamchatka')\n)\n\n","repo_name":"indexofire/feinx","sub_path":"example/project/settings/settings_common.py","file_name":"settings_common.py","file_ext":"py","file_size_in_byte":7900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"19515731850","text":"#-*- encoding: utf-8 -*-\n\nimport time\nimport pandas as pd\n\ndef get_data(filename):\n \"\"\"从csv中获取数据\n :param filename:文件名\n :return:\n dta:series格式的时间序列\n check_value:需要检测的值\n \"\"\"\n data = pd.read_csv(filename)\n timestamp_list = []\n value_list = []\n for timestamp, value in zip(data['timestamp'], data['value']):\n a = time.localtime(timestamp)\n b = time.strftime(\"%Y-%m-%d %H:%M:%S\", a)\n timestamp_list.append(b)\n value_list.append(value)\n dta = pd.Series(value_list[:-1])\n dta = dta.fillna(dta.mean())\n dta.index = pd.Index(timestamp_list[:-1])\n dta.index = pd.DatetimeIndex(dta.index)\n # 最后一个点为检测点\n check_value = value_list[-1]\n return dta, check_value","repo_name":"jixinpu/aiopstools","sub_path":"aiopstools/anomaly_detection/get_timeseries_data.py","file_name":"get_timeseries_data.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":355,"dataset":"github-code","pt":"9"} +{"seq_id":"3600960247","text":"import luigi\nimport pandas as pd\nfrom .utils import STAGE_FORMAT\nfrom .task import MkDir, Parse, ExtractHttp, Load\n\n# Data-source level orchestrators.\n# Delegate endpoints / tables.\n\nclass KickFed(MkDir):\n\tdirname = luigi.Parameter(default='data')\n\tworkdir = luigi.Parameter(default='fed')\n\tdef requires(self):\n\t\tyield KickFedH15(**self.givedir)\n\n# Endpoint / table level orchestrators.\n# Arrange and kick off jobs.\n\nclass KickFedH15(MkDir):\n\tworkdir = luigi.Parameter(default='h15-selected-interest-rates')\n\tdef requires(self):\n\t\textract = ExtractFed(**self.givedir, urlpath='?rel=H15&series=bf17364827e38702b42a58cf8eaa3f78&lastobs=&from=&to=&filetype=csv&label=include&layout=seriescolumn&type=package')\n\t\tparse = ParseFedH15(**self.givedir, upstream=extract)\n\t\tyield Load(**self.givedir, upstream=parse)\n\n# Implement execution of job at leaf nodes.\n\nclass ExtractFed(ExtractHttp):\n\tdomain = luigi.Parameter(default=\"https://www.federalreserve.gov/datadownload/Output.aspx\")\n\nclass ParseFedH15(Parse):\n\tworkdir = luigi.Parameter(default='staging')\n\tdef run(self):\n\t\tsuper().run()\n\t\tdf = pd.read_csv(self.input().path, na_values=['ND'], skiprows=6, names=[\"date\", \"1_month\", \"3_month\", \"6_month\", \"1_year\", \"2_year\", \"3_year\", \"5_year\", \"7_year\", \"10_year\", \"20_year\", \"30_year\"])\n\t\tdf.to_csv(self.output().path, **STAGE_FORMAT)\n\n","repo_name":"hydra-lab/Operation-Pluto","sub_path":"pluto/fed.py","file_name":"fed.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"9"} +{"seq_id":"415201161","text":"__all__ = ('fast_dump', 'fast_load')\nfrom b64utils import *\nfrom base64 import b64encode, b64decode\nfrom Crypto.Hash import MD5\n\ndef get_md5_msg(msg: str):\n h = MD5.new(data=msg.encode())\n return b64encode(h.digest()).decode() + ';' + msg\n\ndef unpack_md5_msg(msg: str):\n l = msg.split(';')\n assert len(l) == 2\n h = MD5.new(data=l[1].encode())\n assert l[0] == b64encode(h.digest()).decode()\n return l[1]\n\n# Dump a list of numbers.\ndef fast_dump(target):\n assert len(target) > 0\n\n result = []\n for x in target:\n assert isinstance(x, int)\n result.append(int_b64encode(x))\n\n return get_md5_msg(','.join(result))\n\n# Load a list of numbers dumped by fast_dump.\ndef fast_load(pack_msg):\n msg = unpack_md5_msg(pack_msg)\n assert len(msg) > 0\n l = msg.split(',')\n return [int_b64decode(x) for x in l]","repo_name":"ipid/yao-millionaires","sub_path":"fastSerialize.py","file_name":"fastSerialize.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"9"} +{"seq_id":"25515679980","text":"import argparse\n\nimport matplotlib.pyplot as plt\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint\nfrom pytorch_lightning.loggers import WandbLogger\n\nfrom utils import (\n DataModule,\n LightningModel,\n Config,\n seed_everything,\n)\n\n\ndef train(CONFIG: Config):\n data_module = DataModule(CONFIG)\n data_module.setup()\n \n model = LightningModel(CONFIG=CONFIG)\n \n ckpt = ModelCheckpoint(\n save_top_k=1,\n monitor=\"val_f1_score\", # val f1\n mode=\"max\",\n dirpath=\".\\\\ckpt\",\n filename=\"ViT_base_384_v2_{epoch}\"\n )\n \n wandb_logger = WandbLogger(\n entity=\"bsh\",\n name=f\"{CONFIG.MODEL_NAME}_v2\",\n project=\"dacon_clf\", \n )\n \n trainer = pl.Trainer(\n max_epochs=CONFIG.EPOCHS,\n accelerator=\"gpu\",\n callbacks=ckpt,\n logger=wandb_logger\n )\n \n trainer.fit(\n model,\n data_module\n )\n \ndef test(CONFIG: Config):\n data_module = DataModule(CONFIG)\n data_module.setup()\n \n model = LightningModel.load_from_checkpoint(\".\\\\ckpt\\\\ViT_base_384_v2_epoch=6.ckpt\", CONFIG=CONFIG)\n \n trainer = pl.Trainer(\n accelerator=\"gpu\",\n )\n\n trainer.test(\n model,\n data_module\n )\n \n \ndef plot_cam(CONFIG: Config):\n data_module = DataModule(CONFIG)\n data_module.setup()\n \n #\n model = LightningModel.load_from_checkpoint(\".\\\\ckpt\\\\ViT_base_384_v2_epoch=6.ckpt\", CONFIG=CONFIG)\n \n trainer = pl.Trainer(\n precision=16,\n accelerator=\"gpu\",\n )\n \n trainer.predict(\n model,\n data_module\n )\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--seed', type=int, default=777)\n parser.add_argument('--epochs', type=int, default=30)\n parser.add_argument('--lr', type=float, default=1e-4)\n parser.add_argument('--img_resize', type=int, default=384)\n parser.add_argument('--num_workers', type=int, default=4)\n parser.add_argument('--batch_size', type=int, default=16)\n parser.add_argument('--mode', default='train')\n parser.add_argument('--model_name', default=\"ViT_base_384\")\n # parser.add_argument('--detail') \n args = parser.parse_args()\n \n CONFIG = Config(args=args)\n plt.rcParams[\"font.family\"] = \"MalGun Gothic\"\n \n # seed everything\n seed_everything(CONFIG.SEED)\n \n \n if CONFIG.MODE == 'train':\n train(CONFIG)\n elif CONFIG.MODE == 'test':\n test(CONFIG)\n elif CONFIG.MODE == 'predict':\n plot_cam(CONFIG)","repo_name":"sihyeong671/Dacon_papering_defect_clf","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"71361625887","text":"#!/usr/bin/python3\nimport sys\ninitial_length={'A':1189000,'B':1456000}\nroot2=2**0.5\ninputs = sys.argv\nin_type_from = inputs[1][0]\nin_type_to = inputs[2][0]\nif (in_type_from != 'A' and in_type_from != 'B') or (in_type_to != 'A' and in_type_to != 'B'):\n sys.exit('error! A series or B series only!') \nelse:\n length_from = initial_length[in_type_from]*((1/root2)**int(inputs[1][1:]))\n length_to = initial_length[in_type_to]*((1/root2)**int(inputs[2][1:]))\n result = (length_to/length_from)*100\n print('{before} to {after}: {result}%'.format(before=inputs[1],after=inputs[2],result=result))\n","repo_name":"ark231/conv-size","sub_path":"conv-size.py","file_name":"conv-size.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"72172304928","text":"N = int(input())\nS = input()\n\nprev = \"\"\nans = True\n\nfor s in S:\n if prev == \"\":\n prev = s\n continue\n if prev == s:\n ans = False\n break\n prev = s\n\nprint(\"Yes\") if ans else print(\"No\")","repo_name":"Oni-Men/atcoder","sub_path":"abc/293/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"71336576929","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport math\n\nmatplotlib.style.use('seaborn')\n\ndata = np.genfromtxt(fname=\"../build/random_avg_error.txt\", # globalerror / relative_error / random_avg_error\n dtype=np.float32,\n delimiter=' ',\n skip_header=0)\n\nsteps = data[:,0]\nerrors = data[:,3:]\nnum = errors.shape[1]\n\nfor i in range(0, num):\n\tplt.semilogy( steps, errors[:10000,i])\nplt.xlabel('time step'), plt.ylabel('error')\nplt.legend( [\"Finite Difs\", \"CNN\", \"CNNnoB\", \"CNNsym\", \"TCN\", \"TCN Avg\", \"TCN No Res\"])\nplt.show() # \"Analytic\", \"FD\", ","repo_name":"Thanduriel/StableNN","sub_path":"evaluation/heat_global_error.py","file_name":"heat_global_error.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"5149493677","text":"from datetime import datetime\nfrom datetime import date\n\nStudents = {}\nModules = {}\nUnits = dict([])\nclass student:\n def __init__(self):\n self.num = input(\"please enter the contact number: \")\n if self.num in Students:\n print(\"Dear student,welcome {} to Edyoda's digital university\".format(Students[self.num][\"full name\"]))\n self.student_menu()\n else:\n print(\"sorry, your details are not found\\n please ask the manager to add your details\")\n def student_menu(self):\n print(\"please choose one of the options\\n 1.Today sechudule\\n 2.view my modules\\n 3.update profile\\n 4.logout\")\n i = int(input())\n while i not in range(1,4):\n if i == 4:\n login()\n break\n i = int(input(\"please enter a valid number 1 or 2 or 3\"))\n else:\n if i == 1:\n self.view_module()\n self.student_menu()\n if i == 2:\n self.update_profile()\n self.student_menu()\n if i == 3:\n self.student_profile()\n self.student_menu()\n def student_profile(self):\n print(\"your! complete profile is : \")\n for i in Students[self.num]:\n if i != \"module\":\n print(\"{}:{}\".format(i,Students[self.num][i]))\n else:\n pass\n def update_profile(self):\n check = input(\"please enter the contact number of student: \")\n if check in Students:\n print(\"here are the details\")\n label = 1\n for i in Students[check]:\n print(\"{},{}\".format(label,i))\n label += 1\n val = input(\"please enter the name of details you want to update\")\n while (val not in Students[check]):\n print(\"this details not present for this student\\n\\n\")\n break\n else:\n print(\"select change name or change email\")\n n = int(input(\"1.change name \\n 2.change email\\n\"))\n while (n not in range(1,3)):\n n = int(input(\"please enter number either 1 or 2\"))\n else:\n if n == 1:\n name = input(\"enter the name do you want! \")\n Students[check][\"full name\"] = full_name\n print(\"you sucessfully changed name\")\n elif n == 2:\n email = input(\"enter e mail\")\n Students[check][\"email\"] = email\n print(\"you sucessfully change email \")\n print(\"Lets go back to main menu!\\n\\n\")\n \n def view_my_modules(self):\n check = input(\"please enter module name\\n\")\n if check in Modules:\n print(\"the details of the module is: \")\n for detail in Modules[check]:\n if type(Modules[check][detail]) != dict:\n print(\"{},{}\".format(detail,Modules[check][detail]))\n else:\n print(detail + \":\")\n for j in Modules[check][detail]:\n print(\"{},{}\".format(detail,Modules[check][detail][j]))\n else:\n print(\"sorry, the module details does not exist\\n\")\n print(\"let's go back to the main menu\\n\\n\")\n \nclass manager:\n global Students\n global Modules\n global Unit\n\n def __init__(self):\n self.Manager_menu()\n pass\n def Manager_menu(self):\n print(\"select your options,Dear Program Manager! \\n 1.Manage Modules \\n 2.Manage units \\n 3.Manage Students \\n 4.Logout\")\n i = int(input(\"enter your number\\n\"))\n while (i not in range(1,4)):\n if i == 4:\n exit()\n break\n else:\n print(\"please enter a valid number\\n\")\n i = int(input(\"please enter your option , make sure that the number between 1 to 5\"))\n else:\n if i == 1:\n self.Manage_modules()\n self.Manager_menu()\n elif i == 2:\n self.Manage_units()\n self.Manager_menu()\n elif i == 3:\n self.Manage_students()\n self.Manager_menu()\n def Manage_students(self):\n print(\"select your options,mange_students menu! \\n 1.create student\\n 2.view students\\n 3.update student\\n 4.delete student\\n 5.v_students\\n 6.exit\")\n i = int(input(\"enter the number\\n\"))\n while (i not in range(1,6)):\n if i == 6:\n exit()\n break\n else:\n print(\"enter a valid number, make sure that the number between 1 to 5\\n\")\n i = int(input(\"enter the number: \"))\n else:\n if i == 1:\n self.create_student()\n self.Manage_students()\n if i == 2:\n self.view_student()\n self.Manage_students()\n if i == 3:\n self.update_student()\n self.Manage_students()\n if i == 4:\n self.delete_student()\n self.Manage_students()\n if i == 5:\n self.v_students()\n self.Manage_students()\n def create_student(self):\n print(\"Okay, lets create a new student\")\n num = input(\"please enter contact number if student does not exist!\\n\")\n while num in Students:\n print(\"it seems the student already exist\\n\")\n break\n else:\n Students[num] = {}\n print(\" okay seems like the student does not exist! please enter the details\")\n full_name = input(\"please enter full name of student!:\\n\")\n Students[num][\"full name\"] = full_name\n age = input(\"please enter the age\\n\")\n while (age.isdigit() == False):\n age = input(\"enter age in numbers only\")\n Students[num][\"Age\"] = age\n gender = input(\"enter your gender\")\n while (gender not in [\"male\",\"MALE\",\"m\",\"M\",\"Female\",\"FEMALE\",\"F\",\"f\",\"others\",\"o\",\"O\"]):\n gender = input(\"please enter your gender m,f,o\\n\")\n Students[num][\"Gender\"] = gender\n\n Students[num][\"contactnumber\"] = num\n\n email = input(\"enter your email address!\\n\")\n Students[num][\"email\"] = email\n module = input(\"enter the course, what he/she want!\")\n Students[num][\"module\"] = module\n def view_student(self):\n check = input(\"please enter contact number of the student\\n\")\n if check in Students:\n print(\"the details of the student is: \")\n for detail in Students[check]:\n if type(Students[check][detail]) != dict:\n print(\"{},{}\".format(detail,Students[check][detail]))\n else:\n print(detail + \":\")\n for j in Students[check][detail]:\n print(\"{},{}\".format(detail,Students[check][detail][j]))\n \n else:\n print(\"sorry, the student details does not exist\\n\")\n print(\"let's go back to the main menu\\n\\n\")\n \n def v_students(self):\n for i,x in Students.items():\n print(i,x)\n \n\n def update_student(self):\n check = input(\"please enter the contact number of student: \")\n if check in Students:\n print(\"here are the details\")\n label = 1\n for i in Students[check]:\n print(\"{},{}\".format(label,i))\n label += 1\n val = input(\"please enter the name of details you want to update\")\n while (val not in Students[check]):\n print(\"this details not present for this student\\n\\n\")\n break\n else:\n if val == \"module\":\n print(\"select change module or revoke student\")\n n = int(input(\"1.change module \\n 2.revoke student\"))\n while (n not in range(1,3)):\n n = int(input(\"please enter number either 1 or 2\"))\n else:\n if n == 1:\n module = input(\"enter the course name do you want! \")\n Students[check][\"module\"] = module\n print(\"you sucessfully changed module\")\n elif n == 2:\n self.delete_student(check)\n print(\"student has been sucessfully deleted\")\n print(\"Lets go back to main menu!\\n\\n\")\n\n def delete_student(self):\n num = input(\"enter the contact number\")\n if num in Students:\n del Students[num]\n else:\n print(\"the student does not exist\")\n print(\"lets go back to main menu\")\n def Manage_modules(self):\n print(\"select your options! in modules \\n 1.create module \\n 2.list of modules\\n 3.show module\\n 4.delete module\\n 5.show detailed module\\n 6.exit\")\n i = int(input(\"enter the number\\n\"))\n while (i not in range(1,6)):\n if i == 6:\n exit()\n break\n else:\n print(\"enter a valid number, make sure that the number between 1 to 3\\n\")\n i = int(input(\"enter the number: \"))\n else:\n if i == 1:\n self.create_module()\n self.Manage_modules()\n if i == 2:\n self.view_module()\n self.Manage_modules()\n if i == 3:\n self.show_modules()\n self.Manage_modules()\n if i == 4:\n self.delete_module()\n self.Manage_modules()\n if i == 5:\n self.view_detailed_module()\n self.Manage_modules()\n def create_module(self):\n print(\"Okay, lets create a new module\")\n num = input(\"please enter module key does not exist!\\n\")\n while num in Modules:\n print(\"it seems the module already exist\\n\")\n break\n else:\n Modules[num] = {}\n print(\" okay seems like the module does not exist! please enter the details\")\n Module_name = input(\"please enter module name!:\\n\")\n Modules[num][\"module name\"] = Module_name\n \n Modules[num][\"id\"] = num\n start_date = input(\"enter the starting date of module \")\n Modules[num][\"startdate\"] = start_date\n end_date = input(\"enter the closing date: \")\n Modules[num][\"end date\"] = end_date\n units = input(\"enter the number of units\")\n Modules[num][\"units\"] = units\n date = input(\"please enter today's date\")\n Modules[num][\"date\"] = date\n i = [\"upcoming\",\"ongoing\",\"complete\"]\n if date < start_date:\n n = i[0]\n print(i[0])\n \n elif date <= end_date:\n n = i[1]\n print(i[1])\n \n else:\n n = i[2]\n print(i[2])\n status = n\n Modules[num][\"status\"] = status\n def view_module(self):\n check = input(\"please enter module name\\n\")\n if check in Modules:\n print(\"the details of the module is: \")\n for detail in Modules[check]:\n if type(Modules[check][detail]) != dict:\n print(\"{},{}\".format(detail,Modules[check][detail]))\n else:\n print(detail + \":\")\n for j in Modules[check][detail]:\n print(\"{},{}\".format(detail,Modules[check][detail][j]))\n else:\n print(\"sorry, the module details does not exist\\n\")\n print(\"let's go back to the main menu\\n\\n\")\n \n def show_modules(self):\n module_list = Modules.keys()\n print(\"choose modules\")\n temp = input(module_list)\n if temp not in module_list:\n print(\"please choose from available from above module list\")\n return show_module()\n else:\n return temp\n \n def delete_module(self):\n num = input(\"please enter contact number\")\n if num in Modules:\n del Modules[num]\n else:\n print(\"the module does not exist\")\n print(\"lets go back to main menu\")\n \n def Manage_units(self):\n print(\"select your options! in units \\n 1.create unit \\n 2. choose units\\n 3.update unit\\n 4.delete unit\\n 5.exit\")\n i = int(input(\"enter the number\\n\"))\n while (i not in range(1,5)):\n if i == 5:\n exit()\n break\n else:\n print(\"enter a valid number, make sure that the number between 1 to 5\\n\")\n i = int(input(\"enter the number: \"))\n else:\n if i == 1:\n self.create_unit()\n self.Manage_units()\n if i == 2:\n self.choose_unit()\n self.Manage_units()\n if i == 3:\n self.view_all_units()\n self.Manage_units()\n if i == 4:\n self.delete_unit()\n self.Manage_units()\n def create_unit(self):\n unit_list = list(Modules.keys())\n print(\"Okay, lets create a new unit\")\n num = input(\"please enter unit key does not exist!\\n\")\n while num in Units:\n print(\"it seems the unit already exist\\n\")\n break\n else:\n Units[num] = {}\n print(\" okay seems like the module does not exist! please enter the details\")\n unit_name = input(\"please enter module name!:\\n\")\n Units[num][\"unit name\"] = unit_name\n unit_type = input(\"the the type of unit\\n\")\n Units[num][\"type\"] = unit_type\n \n Units[num][\"id\"] = num\n start_date = input(\"enter the starting date of module\\n \")\n Units[num][\"startdate\"] = start_date\n end_date = input(\"enter the closing date: \\n\")\n Units[num][\"end date\"] = end_date\n \n def choose_unit():\n unitlist = (Units.keys())\n print(\"choose the unit\")\n tempn = input(unitlist)\n if tempn not in unitlist:\n print(\"please choose freom available units\")\n return choose_unit()\n else:\n return tempn\n def view_all_units():\n print(\"the list of units\")\n for i in Units:\n print(i)\n def delete_unit(self):\n num = input(\"enter the contact number\")\n if num in Units:\n del Units[num]\n else:\n print(\"the units does not exist\")\n print(\"lets go back to main menu\") \ndef login():\n print(\"welcome to Edyoda digital university\".center(80,\"#\"))\n i = int(input(\"please enter one of the following\\n 1.Students\\n 2.ProgramManager\\n 3.exit\"))\n while (i not in range(1,3)):\n if i == 3:\n break\n i = int(input(\"please enter a valid number either 1 or 2\"))\n else:\n if i == 1:\n obj = student()\n else:\n obj1 = manager()\nlogin()\n","repo_name":"Mdadilkhan1/practice","sub_path":"main.py/pro1.py","file_name":"pro1.py","file_ext":"py","file_size_in_byte":15364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"74202772767","text":"from tkinter import Text, font as f\r\nfrom tkinter import *\r\nfrom tkinter.messagebox import *#弹窗库\r\nfrom tkinter.filedialog import *\r\nimport sys,os\r\nimport hashlib\r\n\r\n\r\nif len(sys.argv)>1:\r\n file = sys.argv[1]\r\n\r\nfile = None\r\n\r\ndef opens(event=None):\r\n global file\r\n text.delete(1.0,\"end\")\r\n file = askopenfilename()\r\n if file:\r\n with open(file,'r') as f:\r\n text.insert('insert',f.read())\r\n win.title(file+'-Comcat Writer')\r\ndef newFile():\r\n global file\r\n text.delete(1.0,\"end\")\r\n file = None\r\n win.title(str(file)+'-Comcat Writer')\r\ndef delFile():\r\n file = askopenfilenames()\r\n if file:\r\n if askokcancel(\"system\",\"delet this files true?\"):\r\n for i in file:\r\n os.remove(i)\r\ndef save(event=None):\r\n global file\r\n if file == None:\r\n file = asksaveasfilename()\r\n if file:\r\n with open(file,'w') as f:\r\n f.write(getSig())\r\ndef getWin():\r\n global w,h\r\n if w != win.winfo_width() and h != win.winfo_height():\r\n w,h = win.winfo_width(),win.winfo_height()\r\n text.config(width=win.winfo_width(),height=win.winfo_height())\r\n win.after(1,getWin)\r\n\r\ndef move():\r\n text.edit_undo()\r\ndef getSig():\r\n contents = text.get(1.0, \"end\")\r\n return contents\r\ndef callback(event):\r\n text.edit_separator()\r\n\r\nwin = Tk()\r\nwin.geometry(\"1000x800\")\r\nwin.title(str(file)+'-Comcat Writer')\r\nw,h = win.winfo_width(),win.winfo_height()\r\n\r\ntext = Text(win,autoseparators=False, undo=True, maxundo=10)\r\ntext.bind('', callback)\r\ntext.bind_all('',save)\r\ntext.bind_all('',opens)\r\n#MENU\r\nmenu_all = Menu(win)\r\nfile_menu = Menu(menu_all,tearoff=0)\r\nrun_menu = Menu(menu_all,tearoff=0)\r\nedit_menu = Menu(menu_all,tearoff=0)\r\n\r\nopen_bt = Menu(file_menu,tearoff=0)\r\nnew_bt = Menu(file_menu,tearoff=0)\r\nsave_bt = Menu(file_menu,tearoff=0)\r\n\r\nfile_menu.add_command(label='save',command=save)\r\nfile_menu.add_command(label='open',command=opens)\r\nfile_menu.add_command(label='new',command=newFile)\r\nfile_menu.add_command(label='delet',command=delFile)\r\n\r\n\r\nback_bt = Menu(edit_menu,tearoff=0)\r\nedit_menu.add_command(label='back',command=move)\r\n\r\nmenu_all.add_cascade(label='File',menu=file_menu)\r\nmenu_all.add_cascade(label='Edit',menu=edit_menu)\r\nmenu_all.add_cascade(label='Run',menu=run_menu)\r\n#MENU END\r\n\r\nwin.config(menu=menu_all)\r\ntext.pack()\r\n\r\ngetWin()\r\n\r\nwin.mainloop()\r\n","repo_name":"PBCAT2022cat/comcat2023","sub_path":"write.py","file_name":"write.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"27898140281","text":"from advalg.graph_helpers import make_clique, make_cycle, make_path\nimport os\nfrom advalg.graph import Graph\nfrom typing import Callable\n\ndirname = os.path.dirname(__file__)\ngraph_path = os.path.join(dirname, 'data/vc_graph_small.txt')\n\ntests_fpt = [\n (\"10-path\", make_path(10), 5),\n (\"15-path\", make_path(15), 7),\n (\"10-cycle\", make_cycle(10), 5),\n (\"15-cycle\", make_cycle(15), 8),\n (\"24-cycle\", make_cycle(24), 12),\n (\"10-clique\", make_clique(10), 9),\n (\"15-clique\", make_clique(15), 14),\n (\"vc_graph_small\", Graph.from_file(graph_path), 12)\n]\n\ntests_sat = [\n (\"10-path\", make_path(10), 5),\n (\"15-path\", make_path(15), 7),\n (\"10-cycle\", make_cycle(10), 5),\n (\"15-cycle\", make_cycle(15), 8),\n (\"10-clique\", make_clique(10), 9),\n]\n\ndef run_tests(vc, cases):\n for name, g, size in cases:\n n = g.vertex_count()\n res = next(k for k in range(n+1) if vc(g, k))\n if res != size:\n print(f\"Test {name} failed. Expected {size} got {res}\")\n else:\n print(f\"Test {name} passed\")\n\ndef test_fpt(vc: Callable[[Graph, int], bool]) -> None:\n \"\"\"Tests the FPT algorithm for vertex cover\"\"\"\n print(f\"Testing: {vc.__name__}...\")\n run_tests(vc, tests_fpt)\n\ndef test_sat(vc: Callable[[Graph, int], bool]) -> None:\n \"\"\"Tests the reduction from vertex cover to SAT\"\"\"\n print(f\"Testing: {vc.__name__}...\")\n run_tests(vc, tests_sat)\n","repo_name":"teaching-tool/practical-exercises","sub_path":"pythonLib/advalg/tests6.py","file_name":"tests6.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"35292783496","text":"import sys\nfrom collections import deque\ninput=sys.stdin.readline\n\nn,m,t=map(int,input().split())\ngraph=[ list(map(int,input().split())) for _ in range(n)]\n\ndx=[1,-1,0,0]\ndy=[0,0,1,-1]\n\ndef bfs(x,y,w,z,time):\n q=deque()\n q.append((x,y,time))\n visited=[[0]*m for _ in range(n)]\n while q:\n x,y,time=q.popleft()\n for i in range(4):\n nx=x+dx[i]\n ny=y+dy[i]\n if 0<=nx bool:\n pyautogui.click(x=coord_x, y=coord_y)\n return True\n\n @staticmethod\n def get_screenshoot(monitor=monitor):\n with mss() as sct:\n img = nparray(sct.grab(monitor))\n return img\n\n @staticmethod\n def press(key, **kwargs):\n print(f'im pressing {key}')\n pyautogui.press(key, **kwargs)\n return True\n\n @staticmethod\n def alert(text: str, title: str, button: str, **kwargs):\n pyautogui.alert(text=text, title=title, button=button, **kwargs)\n return True\n\n\nclass CVImage(CVImageInterface):\n '''Класс для работы с изображениями'''\n cv2 = CV2Engine\n gui = AutoGUI\n\n @classmethod\n def get_gray_screenshoot(cls, monitor=monitor):\n img = cls.gui.get_screenshoot(monitor)\n gray_monitor_img = cls.cv2.cvtColor(img)\n return gray_monitor_img\n\n @classmethod\n def read_gray_img(cls, img_path: str):\n image = cls.cv2.imread(img_path)\n gray_image = cls.cv2.cvtColor(image)\n return gray_image\n\n @classmethod\n def match_template(cls, image, template, threshold=default_threshold, **kwargs):\n res = cls.cv2.matchTemplate(image, template, **kwargs)\n loc = where(res >= threshold)\n return loc\n\n @staticmethod\n def get_img_center_from_loc(loc: ndarray, template_shape: Tuple[int, int]) -> Tuple:\n w, h = template_shape # Метод вызываемый у ndarray\n center_list = list()\n for x1, y1 in zip(*loc[::-1]):\n x2, y2 = (x1 + w, y1 + h)\n center = ((x1 + x2) / 2, (y1 + y2) / 2)\n center_list.append(center)\n\n avg_center = tuple(map(mean, zip(*center_list)))\n return avg_center\n\n @classmethod\n def print_image(cls, image, image_name=\"Image\"):\n cls.cv2.imshow(image_name, image)\n cls.cv2.waitKey(0)\n cls.cv2.destroyAllWindows()\n\n @staticmethod\n def locate_center_in_match_template(\n monitor_img: ndarray,\n full_src_btn_img: ndarray,\n threshold=default_threshold\n ) -> Tuple[int, int]:\n loc = CVImage.match_template(monitor_img, full_src_btn_img, threshold)\n center_img = CVImage.get_img_center_from_loc(loc, full_src_btn_img.shape[::-1])\n return center_img\n\n @classmethod\n async def match_template_and_click(\n cls,\n template_img_path: str,\n sleep_time: int,\n threshold=default_threshold,\n gray_monitor_img=None\n ) -> bool:\n '''Получение скриншота, поиск элемента на скрине и клик по элементу'''\n if gray_monitor_img is None:\n gray_monitor_img = CVImage.get_gray_screenshoot()\n\n template_img = CVImage.read_gray_img(template_img_path)\n coords = cls.locate_center_in_match_template(gray_monitor_img, template_img, threshold)\n\n if coords:\n await cls.gui.mouse_click(*coords)\n await asyncio.sleep(sleep_time)\n return True\n\n return False\n\n\ndef convert_monitor_to_xy(monitor):\n x1 = monitor['left']\n y1 = monitor['top']\n x2 = monitor['left'] + monitor['width']\n y2 = monitor['top'] + monitor['height']\n return (x1, y1), (x2, y2)\n","repo_name":"dorsoleb/nu_pogodi_game_bot","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"88"} +{"seq_id":"18827487312","text":"import platform\n\ndef read_infos_sys():\n \n uname = platform.uname()._asdict()\n if platform.system() == \"Linux\":\n retDict = dict(uname, **(platform.freedesktop_os_release()))\n \n elif platform.system() == \"Windows\":\n retDict = dict(uname, **({\"os_version\":platform.win32_ver()}))\n else:\n retDict = uname\n\n return retDict","repo_name":"panvin/livecampus-python-1","sub_path":"Exercice7/system/infos.py","file_name":"infos.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"39192683697","text":"from tkinter import *\nfrom gen_data import gen_data\nfrom utils import clear_entry\nimport pandas as pd\n\ndef menu_widget(root):\n menu = Menu(root)\n root.config(menu=menu)\n\n subMenu = Menu(menu)\n menu.add_cascade(label=\"File\", menu=subMenu)\n return subMenu\n\ndef frame_widget(root):\n frame = Frame(root, bd=2, pady=10)\n frame.pack(fill=X)\n return frame\n\ndef label_widget(root):\n label_0 = Label(root, text=\"Compile & Generate Data\", font=\"helvatica 10 bold underline\")\n #label_1 = Label(root, text=\"Sources\", anchor=E)\n label_2 = Label(root, text=\"Save As...\", anchor=E)\n label_3 = Label(root, text=\"Search by Name\", font=\"helvatica 10 bold underline\", anchor=E)\n label_4 = Label(root, text=\"ID / Company Name\", anchor=E)\n #label_5 = Label(root, text=\"From\", anchor=E)\n return label_0, label_2, label_3, label_4\n\ndef entry_widget(root):\n fileEntry = Entry(root)\n #fileEntry.insert(0, \"data/scoring_A.csv, data/scoring_B.csv, data/scoring_C.csv\")\n saveEntry = Entry(root)\n saveEntry.insert(0, \"results\")\n searchEntry = Entry(root)\n #fromEntry = Entry(root)\n entries = [fileEntry, saveEntry, searchEntry]\n return entries\n\ndef radio_widget(root):\n options = [\"All\", \"Company info\", \"Sales\", \"Contact info\", \"Lead info\", \"Lead scores\"]\n value_inside = StringVar()\n value_inside.set(\"All\")\n mb = OptionMenu(root, value_inside, *options)\n return mb, value_inside\n\ndef grid_widget(widgets):\n widgets[0].grid(sticky=W, row=0, columnspan=2, padx=5)\n widgets[1].grid(sticky=E, row=1, padx=5)\n widgets[2].grid(sticky=E, row=2, padx=5)\n widgets[3].grid(sticky=W, row=5, columnspan=2) \n widgets[4].grid(sticky=E, row=6, padx=5)\n #widgets[5].grid(sticky=E, row=7, padx=5)\n widgets[5].grid(row=1, column=1, pady=5, padx=3)\n widgets[6].grid(row=2, column=1, pady=5, padx=3)\n widgets[7].grid(row=6, column=1, pady=5, padx=3)\n #widgets[9].grid(row=7, column=1, pady=5, padx=3)\n widgets[8].grid(sticky=E, row=3, columnspan=2)\n widgets[9].grid(sticky=E, row=7, columnspan=2)\n widgets[10].grid(sticky=E, row=7, column=0) \n","repo_name":"azfarjef/lead-scoring","sub_path":"seezhong/complile_sz/widget.py","file_name":"widget.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"39103507512","text":"import os\nfrom copy import deepcopy\nfrom enum import Enum\n\nimport pystac\nfrom pystac import STACError\nfrom pystac.stac_object import STACObject\nfrom pystac.layout import (BestPracticesLayoutStrategy, LayoutTemplate)\nfrom pystac.link import (Link, LinkType)\nfrom pystac.cache import ResolvedObjectCache\nfrom pystac.utils import (is_absolute_href, make_absolute_href)\n\n\nclass CatalogType(str, Enum):\n def __str__(self):\n return str(self.value)\n\n SELF_CONTAINED = 'SELF_CONTAINED'\n \"\"\"A 'self-contained catalog' is one that is designed for portability.\n Users may want to download a catalog from online and be able to use it on their\n local computer, so all links need to be relative.\n\n See:\n `The best practices documentation on self-contained catalogs `_\n \"\"\" # noqa E501\n\n ABSOLUTE_PUBLISHED = 'ABSOLUTE_PUBLISHED'\n \"\"\"\n Absolute Published Catalog is a catalog that uses absolute links for everything,\n both in the links objects and in the asset hrefs.\n\n See:\n `The best practices documentation on published catalogs `_\n \"\"\" # noqa E501\n\n RELATIVE_PUBLISHED = 'RELATIVE_PUBLISHED'\n \"\"\"\n Relative Published Catalog is a catalog that uses relative links for everything,\n but includes an absolute self link at the root catalog, to identify its online location.\n\n See:\n `The best practices documentation on published catalogs `_\n \"\"\" # noqa E501\n\n @classmethod\n def determine_type(cls, stac_json):\n \"\"\"Determines the catalog type based on a STAC JSON dict.\n\n Only applies to Catalogs or Collections\n\n Args:\n stac_json (dict): The STAC JSON dict to determine the catalog type\n\n Returns:\n str or None: The catalog type of the catalog or collection.\n Will return None if it cannot be determined.\n \"\"\"\n self_link = None\n relative = False\n for link in stac_json['links']:\n if link['rel'] == 'self':\n self_link = link\n else:\n relative |= not is_absolute_href(link['href'])\n\n if self_link:\n if relative:\n return cls.RELATIVE_PUBLISHED\n else:\n return cls.ABSOLUTE_PUBLISHED\n else:\n if relative:\n return cls.SELF_CONTAINED\n else:\n return None\n\n\nclass Catalog(STACObject):\n \"\"\"A PySTAC Catalog represents a STAC catalog in memory.\n\n A Catalog is a :class:`~pystac.STACObject` that may contain children,\n which are instances of :class:`~pystac.Catalog` or :class:`~pystac.Collection`,\n as well as :class:`~pystac.Item` s.\n\n Args:\n id (str): Identifier for the catalog. Must be unique within the STAC.\n description (str): Detailed multi-line description to fully explain the catalog.\n `CommonMark 0.28 syntax `_ MAY be used for rich text\n representation.\n title (str or None): Optional short descriptive one-line title for the catalog.\n stac_extensions (List[str]): Optional list of extensions the Catalog implements.\n href (str or None): Optional HREF for this catalog, which be set as the catalog's\n self link's HREF.\n catalog_type (str or None): Optional catalog type for this catalog. Must\n be one of the values in :class`~pystac.CatalogType`.\n\n Attributes:\n id (str): Identifier for the catalog.\n description (str): Detailed multi-line description to fully explain the catalog.\n title (str or None): Optional short descriptive one-line title for the catalog.\n stac_extensions (List[str] or None): Optional list of extensions the Catalog implements.\n extra_fields (dict or None): Extra fields that are part of the top-level JSON properties\n of the Catalog.\n links (List[Link]): A list of :class:`~pystac.Link` objects representing\n all links associated with this Catalog.\n catalog_type (str or None): The catalog type, or None if not known.\n \"\"\"\n\n STAC_OBJECT_TYPE = pystac.STACObjectType.CATALOG\n\n DEFAULT_FILE_NAME = \"catalog.json\"\n \"\"\"Default file name that will be given to this STAC object in a cononical format.\"\"\"\n def __init__(self,\n id,\n description,\n title=None,\n stac_extensions=None,\n extra_fields=None,\n href=None,\n catalog_type=None):\n super().__init__(stac_extensions)\n\n self.id = id\n self.description = description\n self.title = title\n if extra_fields is None:\n self.extra_fields = {}\n else:\n self.extra_fields = extra_fields\n\n self._resolved_objects = ResolvedObjectCache()\n\n self.add_link(Link.root(self))\n\n if href is not None:\n self.set_self_href(href)\n\n self.catalog_type = catalog_type\n\n self._resolved_objects.cache(self)\n\n def __repr__(self):\n return ''.format(self.id)\n\n def set_root(self, root, link_type=LinkType.ABSOLUTE):\n STACObject.set_root(self, root, link_type)\n if root is not None:\n root._resolved_objects = ResolvedObjectCache.merge(root._resolved_objects,\n self._resolved_objects)\n\n def add_child(self, child, title=None):\n \"\"\"Adds a link to a child :class:`~pystac.Catalog` or :class:`~pystac.Collection`.\n This method will set the child's parent to this object, and its root to\n this Catalog's root.\n\n Args:\n child (Catalog or Collection): The child to add.\n title (str): Optional title to give to the :class:`~pystac.Link`\n \"\"\"\n\n # Prevent typo confusion\n if isinstance(child, pystac.Item):\n raise STACError('Cannot add item as child. Use add_item instead.')\n\n child.set_root(self.get_root())\n child.set_parent(self)\n self.add_link(Link.child(child, title=title))\n\n def add_children(self, children):\n \"\"\"Adds links to multiple :class:`~pystac.Catalog` or `~pystac.Collection`s.\n This method will set each child's parent to this object, and their root to\n this Catalog's root.\n\n Args:\n children (Iterable[Catalog or Collection]): The children to add.\n \"\"\"\n for child in children:\n self.add_child(child)\n\n def add_item(self, item, title=None):\n \"\"\"Adds a link to an :class:`~pystac.Item`.\n This method will set the item's parent to this object, and its root to\n this Catalog's root.\n\n Args:\n item (Item): The item to add.\n title (str): Optional title to give to the :class:`~pystac.Link`\n \"\"\"\n\n # Prevent typo confusion\n if isinstance(item, pystac.Catalog):\n raise STACError('Cannot add catalog as item. Use add_child instead.')\n\n item.set_root(self.get_root())\n item.set_parent(self)\n self.add_link(Link.item(item, title=title))\n\n def add_items(self, items):\n \"\"\"Adds links to multiple :class:`~pystac.Item` s.\n This method will set each item's parent to this object, and their root to\n this Catalog's root.\n\n Args:\n items (Iterable[Item]): The items to add.\n \"\"\"\n for item in items:\n self.add_item(item)\n\n def get_child(self, id, recursive=False):\n \"\"\"Gets the child of this catalog with the given ID, if it exists.\n\n Args:\n id (str): The ID of the child to find.\n recursive (bool): If True, search this catalog and all children for the item;\n otherwise, only search the children of this catalog. Defaults to False.\n\n Return:\n Item or None: The item with the given ID, or None if not found.\n \"\"\"\n if not recursive:\n return next((c for c in self.get_children() if c.id == id), None)\n else:\n for root, _, _ in self.walk():\n child = root.get_child(id, recursive=False)\n if child is not None:\n return child\n return None\n\n def get_children(self):\n \"\"\"Return all children of this catalog.\n\n Return:\n Generator[Catalog or Collection]: Generator of children who's parent\n is this catalog.\n \"\"\"\n return self.get_stac_objects('child')\n\n def get_child_links(self):\n \"\"\"Return all child links of this catalog.\n\n Return:\n List[Link]: List of links of this catalog with ``rel == 'child'``\n \"\"\"\n return self.get_links('child')\n\n def clear_children(self):\n \"\"\"Removes all children from this catalog.\n\n Return:\n Catalog: Returns ``self``\n \"\"\"\n child_ids = [child.id for child in self.get_children()]\n for child_id in child_ids:\n self.remove_child(child_id)\n return self\n\n def remove_child(self, child_id):\n \"\"\"Removes an child from this catalog.\n\n Args:\n child_id (str): The ID of the child to remove.\n \"\"\"\n new_links = []\n root = self.get_root()\n for link in self.links:\n if link.rel != 'child':\n new_links.append(link)\n else:\n link.resolve_stac_object(root=root)\n if link.target.id != child_id:\n new_links.append(link)\n else:\n child = link.target\n child.set_parent(None)\n child.set_root(None)\n self.links = new_links\n\n def get_item(self, id, recursive=False):\n \"\"\"Returns an item with a given ID.\n\n Args:\n id (str): The ID of the item to find.\n recursive (bool): If True, search this catalog and all children for the item;\n otherwise, only search the items of this catalog. Defaults to False.\n\n Return:\n Item or None: The item with the given ID, or None if not found.\n \"\"\"\n if not recursive:\n return next((i for i in self.get_items() if i.id == id), None)\n else:\n for root, children, items in self.walk():\n item = root.get_item(id, recursive=False)\n if item is not None:\n return item\n return None\n\n def get_items(self):\n \"\"\"Return all items of this catalog.\n\n Return:\n Generator[Item]: Generator of items who's parent is this catalog.\n \"\"\"\n return self.get_stac_objects('item')\n\n def clear_items(self):\n \"\"\"Removes all items from this catalog.\n\n Return:\n Catalog: Returns ``self``\n \"\"\"\n for link in self.get_item_links():\n if link.is_resolved():\n item = link.target\n item.set_parent(None)\n item.set_root(None)\n\n self.links = [link for link in self.links if link.rel != 'item']\n return self\n\n def remove_item(self, item_id):\n \"\"\"Removes an item from this catalog.\n\n Args:\n item_id (str): The ID of the item to remove.\n \"\"\"\n new_links = []\n root = self.get_root()\n for link in self.links:\n if link.rel != 'item':\n new_links.append(link)\n else:\n link.resolve_stac_object(root=root)\n if link.target.id != item_id:\n new_links.append(link)\n else:\n item = link.target\n item.set_parent(None)\n item.set_root(None)\n self.links = new_links\n\n def get_all_items(self):\n \"\"\"Get all items from this catalog and all subcatalogs. Will traverse\n any subcatalogs recursively.\n\n Returns:\n Generator[Item]: All items that belong to this catalog, and all\n catalogs or collections connected to this catalog through\n child links.\n \"\"\"\n yield from self.get_items()\n for child in self.get_children():\n yield from child.get_all_items()\n\n def get_item_links(self):\n \"\"\"Return all item links of this catalog.\n\n Return:\n List[Link]: List of links of this catalog with ``rel == 'item'``\n \"\"\"\n return self.get_links('item')\n\n def to_dict(self, include_self_link=True):\n links = self.links\n if not include_self_link:\n links = filter(lambda l: l.rel != 'self', links)\n\n d = {\n 'id': self.id,\n 'stac_version': pystac.get_stac_version(),\n 'description': self.description,\n 'links': [link.to_dict() for link in links]\n }\n\n if self.stac_extensions is not None:\n d['stac_extensions'] = self.stac_extensions\n\n for key in self.extra_fields:\n d[key] = self.extra_fields[key]\n\n if self.title is not None:\n d['title'] = self.title\n\n return deepcopy(d)\n\n def clone(self):\n clone = Catalog(id=self.id,\n description=self.description,\n title=self.title,\n stac_extensions=self.stac_extensions,\n extra_fields=deepcopy(self.extra_fields),\n catalog_type=self.catalog_type)\n clone._resolved_objects.cache(clone)\n\n for link in self.links:\n if link.rel == 'root':\n # Catalog __init__ sets correct root to clone; don't reset\n # if the root link points to self\n root_is_self = link.is_resolved() and link.target is self\n if not root_is_self:\n clone.set_root(None)\n clone.add_link(link.clone())\n else:\n clone.add_link(link.clone())\n\n return clone\n\n def make_all_links_relative(self):\n \"\"\"Makes all the links of this catalog and all children and item\n to be relative, recursively\n \"\"\"\n super().make_links_relative()\n\n for child in self.get_children():\n child.make_all_links_relative()\n for item in self.get_items():\n item.make_links_relative()\n\n def make_all_links_absolute(self):\n \"\"\"Makes all the links of this catalog and all children and item\n to be absolute, recursively\n \"\"\"\n super().make_links_absolute()\n\n for child in self.get_children():\n child.make_all_links_absolute()\n for item in self.get_items():\n item.make_links_absolute()\n\n def make_all_asset_hrefs_relative(self):\n \"\"\"Makes all the HREFs of assets belonging to items in this catalog\n and all children to be relative, recursively.\n \"\"\"\n for _, _, items in self.walk():\n for item in items:\n item.make_asset_hrefs_relative()\n\n def make_all_asset_hrefs_absolute(self):\n \"\"\"Makes all the HREFs of assets belonging to items in this catalog\n and all children to be absolute, recursively.\n \"\"\"\n for _, _, items in self.walk():\n for item in items:\n item.make_asset_hrefs_absolute()\n\n def normalize_and_save(self, root_href, catalog_type, strategy=None):\n \"\"\"Normalizes link HREFs to the given root_href, and saves\n the catalog with the given catalog_type.\n\n This is a convenience method that simply calls :func:`Catalog.normalize_hrefs\n ` and :func:`Catalog.save `\n in sequence.\n\n Args:\n root_href (str): The absolute HREF that all links will be normalized against.\n catalog_type (str): The catalog type that dictates the structure of\n the catalog to save. Use a member of :class:`~pystac.CatalogType`.\n strategy (HrefLayoutStrategy): The layout strategy to use in setting the HREFS\n for this catalog. Defaults to :class:`~pystac.layout.BestPracticesLayoutStrategy`\n \"\"\"\n self.normalize_hrefs(root_href, strategy=strategy)\n self.save(catalog_type)\n\n def normalize_hrefs(self, root_href, strategy=None):\n \"\"\"Normalize HREFs will regenerate all link HREFs based on\n an absolute root_href and the canonical catalog layout as specified\n in the STAC specification's best practices.\n\n This method mutates the entire catalog tree.\n\n Args:\n root_href (str): The absolute HREF that all links will be normalized against.\n strategy (HrefLayoutStrategy): The layout strategy to use in setting the HREFS\n for this catalog. Defaults to :class:`~pystac.layout.BestPracticesLayoutStrategy`\n\n See:\n `STAC best practices document `_ for the canonical layout of a STAC.\n \"\"\" # noqa E501\n if strategy is None:\n strategy = BestPracticesLayoutStrategy()\n\n # Normalizing requires an absolute path\n if not is_absolute_href(root_href):\n root_href = make_absolute_href(root_href, os.getcwd(), start_is_dir=True)\n\n def process_item(item, _root_href):\n item.resolve_links()\n\n new_self_href = strategy.get_href(item, _root_href)\n\n def fn():\n item.set_self_href(new_self_href)\n\n return fn\n\n def process_catalog(cat, _root_href, is_root):\n setter_funcs = []\n\n cat.resolve_links()\n\n new_self_href = strategy.get_href(cat, _root_href, is_root)\n new_root = os.path.dirname(new_self_href)\n\n for item in cat.get_items():\n setter_funcs.append(process_item(item, new_root))\n\n for child in cat.get_children():\n setter_funcs.extend(process_catalog(child, new_root, is_root=False))\n\n def fn():\n cat.set_self_href(new_self_href)\n\n setter_funcs.append(fn)\n\n return setter_funcs\n\n # Collect functions that will actually mutate the objects.\n # Delay mutation as setting hrefs while walking the catalog\n # can result in bad links.\n setter_funcs = process_catalog(self, root_href, is_root=True)\n\n for fn in setter_funcs:\n fn()\n\n return self\n\n def generate_subcatalogs(self, template, defaults=None, parent_ids=None, **kwargs):\n \"\"\"Walks through the catalog and generates subcatalogs\n for items based on the template string. See :class:`~pystac.layout.LayoutTemplate`\n for details on the construction of template strings. This template string\n will be applied to the items, and subcatalogs will be created that separate\n and organize the items based on template values.\n\n Args:\n template (str): A template string that\n can be consumed by a :class:`~pystac.layout.LayoutTemplate`\n defaults (dict): Default values for the template variables\n that will be used if the property cannot be found on\n the item.\n parent_ids (List[str]): Optional list of the parent catalogs'\n identifiers. If the bottom-most subcatalags already match the\n template, no subcatalog is added.\n\n Returns:\n [catalog]: List of new catalogs created\n \"\"\"\n result = []\n parent_ids = parent_ids or list()\n parent_ids.append(self.id)\n for child in self.get_children():\n result.extend(\n child.generate_subcatalogs(template,\n defaults=defaults,\n parent_ids=parent_ids.copy()))\n\n layout_template = LayoutTemplate(template, defaults=defaults)\n\n items = list(self.get_items())\n for item in items:\n item_parts = layout_template.get_template_values(item)\n id_iter = reversed(parent_ids)\n if all(['{}'.format(id) == next(id_iter, None)\n for id in reversed(item_parts.values())]):\n # Skip items for which the sub-catalog structure already\n # matches the template. The list of parent IDs can include more\n # elements on the root side, so compare the reversed sequences.\n continue\n curr_parent = self\n for k, v in item_parts.items():\n subcat_id = '{}'.format(v)\n subcat = curr_parent.get_child(subcat_id)\n if subcat is None:\n subcat_desc = 'Catalog of items from {} with {} of {}'.format(\n curr_parent.id, k, v)\n subcat = pystac.Catalog(id=subcat_id, description=subcat_desc)\n curr_parent.add_child(subcat)\n result.append(subcat)\n curr_parent = subcat\n self.remove_item(item.id)\n curr_parent.add_item(item)\n\n return result\n\n def save(self, catalog_type=None):\n \"\"\"Save this catalog and all it's children/item to files determined by the object's\n self link HREF.\n\n Args:\n catalog_type (str): The catalog type that dictates the structure of\n the catalog to save. Use a member of :class:`~pystac.CatalogType`.\n If not supplied, the catalog_type of this catalog will be used.\n If that attribute is not set, an exception will be raised.\n\n Note:\n If the catalog type is ``CatalogType.ABSOLUTE_PUBLISHED``,\n all self links will be included, and link type will be set to ABSOLUTE.\n If the catalog type is ``CatalogType.RELATIVE_PUBLISHED``, this catalog's self\n link will be included, but no child catalog will have self links.\n Link types will be set to RELATIVE.\n If the catalog type is ``CatalogType.SELF_CONTAINED``, no self links will be\n included. Link types will be set to RELATIVE.\n\n Raises:\n ValueError: Raises if the catalog_type argument is not supplied and\n there is no catalog_type attribute on this catalog.\n \"\"\"\n catalog_type = catalog_type or self.catalog_type\n\n if catalog_type is None:\n raise ValueError('Must supply a catalog_type if one is not set on the catalog.')\n\n # Ensure relative vs absolute\n if catalog_type == CatalogType.ABSOLUTE_PUBLISHED:\n self.make_all_links_absolute()\n self.make_all_asset_hrefs_absolute()\n elif catalog_type in (CatalogType.SELF_CONTAINED, CatalogType.RELATIVE_PUBLISHED):\n self.make_all_links_relative()\n self.make_all_asset_hrefs_relative()\n else:\n raise ValueError(f'catalog_type is not a CatalogType: \"{catalog_type}\"')\n\n include_self_link = catalog_type in [\n CatalogType.ABSOLUTE_PUBLISHED, CatalogType.RELATIVE_PUBLISHED\n ]\n\n if catalog_type == CatalogType.RELATIVE_PUBLISHED:\n child_catalog_type = CatalogType.SELF_CONTAINED\n else:\n child_catalog_type = catalog_type\n\n items_include_self_link = catalog_type in [CatalogType.ABSOLUTE_PUBLISHED]\n\n for child_link in self.get_child_links():\n if child_link.is_resolved():\n child_link.target.save(catalog_type=child_catalog_type)\n\n for item_link in self.get_item_links():\n if item_link.is_resolved():\n item_link.target.save_object(include_self_link=items_include_self_link)\n\n self.save_object(include_self_link=include_self_link)\n\n self.catalog_type = catalog_type\n\n def walk(self):\n \"\"\"Walks through children and items of catalogs.\n\n For each catalog in the STAC's tree rooted at this catalog (including this catalog\n itself), it yields a 3-tuple (root, subcatalogs, items). The root in that\n 3-tuple refers to the current catalog being walked, the subcatalogs are any\n catalogs or collections for which the root is a parent, and items represents\n any items that have the root as a parent.\n\n This has similar functionality to Python's :func:`os.walk`.\n\n Returns:\n Generator[(Catalog, Generator[Catalog], Generator[Item])]: A generator that\n yields a 3-tuple (parent_catalog, children, items).\n \"\"\"\n children = self.get_children()\n items = self.get_items()\n\n yield (self, children, items)\n for child in self.get_children():\n yield from child.walk()\n\n def validate_all(self):\n \"\"\"Validates each catalog, collection contained within this catalog.\n\n Walks through the children and items of the catalog and validates each\n stac object.\n\n Raises:\n STACValidationError: Raises this error on any item that is invalid.\n Will raise on the first invalid stac object encountered.\n \"\"\"\n self.validate()\n for child in self.get_children():\n child.validate_all()\n for item in self.get_items():\n item.validate()\n\n def _object_links(self):\n return ['child', 'item'] + (pystac.STAC_EXTENSIONS.get_extended_object_links(self))\n\n def map_items(self, item_mapper):\n \"\"\"Creates a copy of a catalog, with each item passed through the\n item_mapper function.\n\n Args:\n item_mapper (Callable): A function that takes in an item, and returns either\n an item or list of items. The item that is passed into the item_mapper\n is a copy, so the method can mutate it safely.\n\n Returns:\n Catalog: A full copy of this catalog, with items manipulated according\n to the item_mapper function.\n \"\"\"\n\n new_cat = self.full_copy()\n\n def process_catalog(catalog):\n for child in catalog.get_children():\n process_catalog(child)\n\n item_links = []\n for item_link in catalog.get_item_links():\n item_link.resolve_stac_object(root=self.get_root())\n mapped = item_mapper(item_link.target)\n if mapped is None:\n raise Exception('item_mapper cannot return None.')\n if type(mapped) is not list:\n item_link.target = mapped\n item_links.append(item_link)\n else:\n for i in mapped:\n new_link = item_link.clone()\n new_link.target = i\n item_links.append(new_link)\n catalog.clear_items()\n catalog.add_links(item_links)\n\n process_catalog(new_cat)\n return new_cat\n\n def map_assets(self, asset_mapper):\n \"\"\"Creates a copy of a catalog, with each Asset for each Item passed\n through the asset_mapper function.\n\n Args:\n asset_mapper (Callable): A function that takes in an key and an Asset, and returns\n either an Asset, a (key, Asset), or a dictionary of Assets with unique keys.\n The Asset that is passed into the item_mapper is a copy, so the method can\n mutate it safely.\n\n Returns:\n Catalog: A full copy of this catalog, with assets manipulated according\n to the asset_mapper function.\n \"\"\"\n def apply_asset_mapper(tup):\n k, v = tup\n result = asset_mapper(k, v)\n if result is None:\n raise Exception('asset_mapper cannot return None.')\n if isinstance(result, pystac.Asset):\n return [(k, result)]\n elif isinstance(result, tuple):\n return [result]\n else:\n assets = list(result.items())\n if len(assets) < 1:\n raise Exception('asset_mapper must return a non-empy list')\n return assets\n\n def item_mapper(item):\n new_assets = [\n x for result in map(apply_asset_mapper, item.assets.items()) for x in result\n ]\n item.assets = dict(new_assets)\n return item\n\n return self.map_items(item_mapper)\n\n def describe(self, include_hrefs=False, _indent=0):\n \"\"\"Prints out information about this Catalog and all contained\n STACObjects.\n\n Args:\n include_hrefs (bool) - If True, print out each object's self link\n HREF along with the object ID.\n \"\"\"\n s = '{}* {}'.format(' ' * _indent, self)\n if include_hrefs:\n s += ' {}'.format(self.get_self_href())\n print(s)\n for child in self.get_children():\n child.describe(include_hrefs=include_hrefs, _indent=_indent + 4)\n for item in self.get_items():\n s = '{}* {}'.format(' ' * (_indent + 2), item)\n if include_hrefs:\n s += ' {}'.format(item.get_self_href())\n print(s)\n\n @classmethod\n def from_dict(cls, d, href=None, root=None):\n catalog_type = CatalogType.determine_type(d)\n\n d = deepcopy(d)\n\n id = d.pop('id')\n description = d.pop('description')\n title = d.pop('title', None)\n stac_extensions = d.pop('stac_extensions', None)\n links = d.pop('links')\n\n d.pop('stac_version')\n\n cat = Catalog(id=id,\n description=description,\n title=title,\n stac_extensions=stac_extensions,\n extra_fields=d,\n href=href,\n catalog_type=catalog_type)\n\n for link in links:\n if link['rel'] == 'root':\n # Remove the link that's generated in Catalog's constructor.\n cat.remove_links('root')\n\n if link['rel'] != 'self' or href is None:\n cat.add_link(Link.from_dict(link))\n\n return cat\n","repo_name":"philvarner/pystac","sub_path":"pystac/catalog.py","file_name":"catalog.py","file_ext":"py","file_size_in_byte":30562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"88"} +{"seq_id":"9402432029","text":"#EXERCISE 5\n#now, our strategy is to load and read the .tsv dataset, extract only the columns we are interested in - locations, prices - and compute our results\nimport pandas as pd\n#we exploit pandas since it lets us select only those columns of the file we are interested in!\ndirectory = \"/home/valeriop/Scrivania/sapienza/DataMining/Homework1/\" #to be changed, depending on where you run the code!\nwith open(directory+\"dataset_kijiji.tsv\") as f:\n\tdf = pd.read_csv(f, sep='\\t', names=['links', 'titles', 'timestamps', 'locations', 'descriptions', 'prices'])\n\tlocations = df['locations'].to_dict()\n\tprices = df['prices'].to_dict()\n#now we merge them into a single dictionary\nimport locale\nlocale.setlocale(locale.LC_NUMERIC,\"it_IT.utf-8\") #BE CAREFUL: run locale -a command on your terminal and change this according to your environment!\nfor key in prices: #we want them to be float values\n\tif prices[key]!=\"Contatta l'utente\" and prices[key]!=\"prices\":\n\t\tprices[key]=locale.atof(prices[key][:-2])\n#now each euro price has been converted into a float number\nprint(locations)\nprint(prices)\noutput={}\n#now for each apartment we count the locations and sum all the prices for each location: 2617 announcements in dataset\nfor key in range(0, 2617): #this range should be changed according to the number of announcements we have in the dataset, if dataset also changes!\n\tif locations[key] not in output:\n\t\tif prices[key]!=\"Contatta l'utente\" and prices[key]!=\"prices\":\n\t\t\toutput[locations[key]]=(1, prices[key])\n\telif prices[key]!=\"Contatta l'utente\" and prices[key]!=\"prices\":\n\t\toutput[locations[key]]=(output[locations[key]][0]+1, output[locations[key]][1]+prices[key])\n\nprint(output)\n\nfor key in output:\n\toutput[key] = (output[key][0], output[key][1]/output[key][0])\n\nprint(\"\\nFINAL OUTCOME: {'LOCATION ': (NUMBER OF ANNOUNCEMENTS IN LOCATION, AVERAGE PRICE)}:\\n\")\n\nprint(output)\n","repo_name":"valgh/DataMiningHW","sub_path":"DM_Homework_1_1856471_Valerio_Trenta/exercise5_compare.py","file_name":"exercise5_compare.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"41854632380","text":"import numpy as np\nimport scipy.sparse as sparse\n\ndef compute_probabilities(X, theta, temp_parameter):\n \"\"\"\n Computes, for each data point X[i], the probability that X[i] is\n labeled as j for j = 0, 1, ..., k-1\n\n Args:\n X: (n, d) numpy array (n data points each with d features)\n theta: (k, d) numpy array where row j represents the parameters of the model for label j\n temp_parameter: the temperature parameter of softmax function\n Returns:\n h: (k, n) numpy array where each entry [i][j] is the probability that X[i] is labeled as j\n \"\"\"\n h = np.dot(theta, X.T)\n h = h / temp_parameter\n\n # get max value of each column\n c = h.max(axis=0)\n\n h = h - c\n\n h = np.exp(h)\n\n s = 1 / np.sum(h, axis=0)\n\n return s * h\n\n\ndef compute_cost_function(X, Y, theta, lambda_factor, temp_parameter):\n \"\"\"\n Computes the total cost for every data points\n\n Args:\n X: (n, d) numpy array (n data points each with d features)\n Y: (n, ) numpy array containing the labels (number from 0-9) for each data point\n theta: (k, d) numpy array where row j represents the parameters of the model for label j\n lambda_factor: the regularization constant\n temp_parameter: the temperature parameter of softmax function\n Returns:\n c: the cost value\n \"\"\"\n h = compute_probabilities(X, theta, temp_parameter)\n\n cost = 0\n for i in range(X.shape[0]):\n for j in range(theta.shape[0]):\n if Y[i] == j:\n cost += np.log(h[j,i])\n\n cost = -cost / X.shape[0]\n\n theta = np.power(theta, 2)\n\n cost += lambda_factor / 2 * theta.sum()\n\n return cost\n\n\ndef run_gradient_descent_iteration(X, Y, theta, alpha, lambda_factor, temp_parameter):\n \"\"\"\n Runs one step of batch gradient descent\n\n Args:\n X: (n, d) numpy array (n data points each with d features)\n Y: (n, ) numpy array containing the labels (number from 0-9) for each data point\n theta: (k, d) numpy array where row j represents the parameters of the model for label j\n alpha: the learning rate\n lambda_factor: the regularization constant\n temp_parameter: the temperature parameter of softmax function\n Returns:\n theta: (k, d) numpy array that is the final value of parameter theta\n \"\"\"\n delta = sparse.coo_matrix(theta.shape).toarray()\n\n h = compute_probabilities(X, theta, temp_parameter)\n\n for j in range(delta.shape[0]):\n y = Y\n y = np.where(y != j, 0, 1)\n p = y - h[j]\n\n x = X.T * p\n x = x.T\n x = x.sum(axis=0)\n\n grad = -x / (temp_parameter * X.shape[0]) + lambda_factor * theta[j]\n\n delta[j] += grad\n\n theta -= alpha * delta\n\n return theta\n\n\ndef update_y(train_y, test_y):\n \"\"\"\n Changes the old digit labels for the training and test set for the new (mod 3) labels\n\n Args:\n train_y: 1D array containing labels of the training set\n test_y: 1D array containing labels of the test set\n Returns:\n train_y_mod3: 1D array containing new labels (0-2) of the training set\n test_y_mod3: 1D array containing new labels (0-2) of the test set\n \"\"\"\n train_y_mod3 = np.mod(train_y, 3)\n test_y_mod3 = np.mod(test_y, 3)\n\n return train_y_mod3, test_y_mod3\n\n\ndef softmax_error_mod3(X, Y, theta, temp_parameter):\n \"\"\"\n Computes the error of new labels when the classifier predicts the digit\n\n Args:\n X: 2D numpy array represents data points need to be classified\n Y: 1D numpy array represents the label (0-2) of data points\n theta: 2D numpy array where row j represents the parameters of the model for label j\n temp_parameter: temperature parameter of softmax function\n Returns:\n test error\n \"\"\"\n pred = predict(X, theta, temp_parameter)\n pred = np.mod(pred, 3)\n\n return 1- np.mean(pred == Y)\n\ndef augment_feature_vector(X):\n \"\"\"\n Adds a feature with value 1 at the begin for each data point\n\n Args:\n X: 2D numpy array represents data points\n Returns:\n X with added feature for each data point\n \"\"\"\n column_of_ones = np.zeros([len(X), 1]) + 1\n\n return np.hstack((column_of_ones, X))\n\n\ndef softmax_regression(X, Y, temp_parameter, alpha, lambda_factor, k, num_iterations):\n \"\"\"\n Runs batch gradient descent for a specified number of iterations on a dataset with theta\n initialized to the all-zeros array.\n\n Args:\n X: (n, d) numpy array (n data points each with d features)\n Y: (n, ) numpy array containing the labels (number from 0-9) for each data point\n temp_parameter: the temperature parameter of softmax function\n alpha: the learning rate\n lambda_factor: the regularization constant\n k: number of label\n num_iterations: number of iterations to run gradient descent\n Returns:\n theta: (k, d) numpy array that is the final value of parameter theta\n \"\"\"\n X = augment_feature_vector(X)\n theta = np.zeros([k, X.shape[1]])\n for i in range(num_iterations):\n theta = run_gradient_descent_iteration(X, Y, theta, alpha, lambda_factor, temp_parameter)\n\n return theta\n\n\ndef predict(X, theta, temp_parameter):\n \"\"\"\n Classifies the given dataset\n\n Args:\n X: 2D numpy array represents data points need to be classified\n theta: numpy array where row j represents the parameters of the model for label j\n temp_parameter: temperature parameter of softmax function\n Returns:\n Y: 1D numpy array containing the predicted result\n \"\"\"\n X = augment_feature_vector(X)\n probabilities = compute_probabilities(X, theta, temp_parameter)\n\n return np.argmax(probabilities, axis=0)\n\n\ndef softmax_error(X, Y, theta, temp_parameter):\n \"\"\"\n Calculates error on test dataset\n\n Args:\n X: 2D numpy array represents data points need to be classified\n Y: 1D numpy array represents the true label of data points\n theta: numpy array where row j represents the parameters of the model for label j\n temp_parameter: temperature parameter of softmax function\n Returns:\n test error\n \"\"\"\n pred = predict(X, theta, temp_parameter)\n\n return 1- np.mean(pred == Y)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"myntvn/machine-learning","sub_path":"digit-recognition/softmax.py","file_name":"softmax.py","file_ext":"py","file_size_in_byte":6290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"18183759491","text":"from PyQt5.Qt import *\nfrom PyQt5.QtWidgets import QTableView, QAbstractItemView, QHeaderView, QListView\n\nfrom .tools import getTableContentFitWidth\nfrom .models import WorkersModel, DepartmentModel\n\nclass InfoTableView(QTableView):\n def __init__(self, model=None):\n QTableView.__init__(self)\n self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.horizontalHeader().setSectionResizeMode(QHeaderView.Fixed)\n self.verticalHeader().setSectionResizeMode(QHeaderView.Fixed)\n if model is not None:\n self.setModel(model)\n self.setFitSize()\n\n def setFitSize(self):\n self.resizeColumnsToContents()\n self.resizeRowsToContents()\n self.setFixedWidth(getTableContentFitWidth(self, self.model().columnCount()))\n\n\nclass InfoListView(QListView):\n def __init__(self, model):\n QListView.__init__(self)\n self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.setModel(model)\n","repo_name":"ilev4ik/schedule-simulation","sub_path":"lib/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"70672214689","text":"\"\"\"\nScript for parsing logs\n\"\"\"\n\nimport sys\nimport os\nsys.path.append(os.path.abspath('../utility'))\n\nimport json\nimport argparse\n\nfrom const import *\n\ndef parse_log(log_file):\n\n\tf = open(log_file, 'r')\n\n\tfor row in f:\n\t\tdata = json.loads(row)\n\n\t\tif data[TYPE] == STATISTIC and data[NODE_ID] == 2:\n\t\t\tprint(data[PAYLOAD])\n\n\tf.close()\n\nif __name__ == '__main__':\n\t\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"-f\",\"--log_file\", type=str, help=\"Path to the log file\", required=True )\n\targs = parser.parse_args()\n\n\tparse_log(args.log_file)","repo_name":"govindlahoti/TreeNN","sub_path":"logs/log_parser.py","file_name":"log_parser.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"15054485248","text":"class simulation:\n def __init__(\n self,\n opt,\n command_line,\n use_script,\n sim_script=None,\n top_entity=None,\n run_time=None,\n ):\n self.opt = opt\n self.command_line = command_line\n self.use_script = use_script\n self.sim_script = sim_script\n self.top_entity = top_entity\n self.run_time = run_time\n if self.use_script == True:\n if self.sim_script == None:\n print(\"No path for simulation script!\")\n exit()\n else:\n if self.run_time != None or self.top_entity != None:\n print(\n \"!!!Warning!!! Run time and top entity should be defined inside the simulation script.\\nValues defined in the configuration file will be ignored!\"\n )\n else:\n if self.top_entity == None:\n print(\"The top entity name is not defined!\")\n exit()\n\n def __str__(self):\n return f\"Simulation settings( optimizations = {self.opt}, command line = {self.command_line}, use script = {self.use_script}, sim script path = {self.sim_script}, top entity = {self.top_entity}, run time = {self.run_time})\"\n","repo_name":"StathisDi/Powershell_scripts","sub_path":"vsim/python/simulation_class.py","file_name":"simulation_class.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"1634091319","text":"import pygame\nfrom pygame.locals import *\nfrom sys import exit\nimport random\n\n\n#Definições Globais\nLTela = 640\nATelaT = 640\nATela = 480\nVERDE= (0, 100, 0)\nBRANCO = (255, 255, 255)\nVERMELHO = (255, 0, 0)\nPRETO = (0, 0, 0)\nfps = 10\nrelogio = pygame.time.Clock()\npontos = 0\nXc = 0\nYc = 0\n\npygame.font.init()\npygame.mixer.init()\nfonte = pygame.font.get_default_font()\nfontepontos = pygame.font.SysFont(fonte, 40, bold = True)\nfonteveloc = pygame.font.SysFont(fonte, 40, bold = True)\nfontegameover = pygame.font.SysFont(fonte, 60, bold = True)\nfontecontinuar = pygame.font.SysFont(fonte, 30, bold = False)\nsomcolide = pygame.mixer.Sound('Plank.ogg')\nsomveloc = pygame.mixer.Sound('bing.ogg')\n\n#Definições da Cobrinha\nSt = 20 #Snake tamanho\nSx = random.randrange(0, LTela, St) #Snake x\nSy = random.randrange(0, ATela, St) #Snake y\nSc = VERDE #Snake Cor\nSv = 20 #Snake velocidade\nSd = 0 #Snake direção\nSh = [] #Snake head\nSb = [] #Snake body\n\n#Definições da comida\nFt = 20 #Food tamanho\nFx = random.randrange(0, LTela, Ft) #Food x\nFy = random.randrange(0, ATela, Ft) #Food y\nFc = VERMELHO #Food cor\n\npygame.init()\ntela = pygame.display.set_mode((LTela, ATelaT))\npygame.display.set_caption('Snake')\nXc = Sv\n\nwhile True:\n\trelogio.tick(fps)\n\ttela.fill(BRANCO)\n\tTpontos = f\"Pontuação: {pontos}\"\n\tTvelocidade = f\"Velocidade: {fps}\"\n\tTgameover = \"Game Over\"\n\tTcontinuar = \"Tecle s para continuar ou qualquer outra para sair\"\n\ttexto = fontepontos.render(Tpontos, 1, PRETO)\n\ttexto2 = fonteveloc.render(Tvelocidade, 1, PRETO)\n\ttela.blit(texto, (20, 500))\n\ttela.blit(texto2, (20, 550))\n\t\n\tfor event in pygame.event.get():\n\t\tif event.type == QUIT:\n\t\t\tpygame.quit()\n\t\t\texit()\n\t\t\t\t\n\t\tif event.type == KEYDOWN:\n\t\t\tif event.key == K_UP:\n\t\t\t\tif Yc == Sv:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tYc = -Sv \t\t\n\t\t\t\t\tXc = 0\n\n\t\t\tif event.key == K_DOWN:\n\t\t\t\tif Yc == -Sv:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tYc = Sv \t\t\n\t\t\t\t\tXc = 0\n\t\t\t\n\t\t\tif event.key == K_RIGHT:\n\t\t\t\tif Xc == -Sv:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tXc = Sv \t\t\n\t\t\t\t\tYc = 0\n\t\t\t\t\n\t\t\tif event.key == K_LEFT:\n\t\t\t\tif Xc == Sv:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tXc = -Sv \t\t\n\t\t\t\t\tYc = 0\n\t\t\t\t\t\n\tSx = Sx + Xc\n\tSy = Sy + Yc\n\t\n\tif Sx > LTela:\n\t\tSx=0\n\tif Sx < 0:\n\t\tSx=LTela\n\tif Sy > ATela:\n\t\tSy=0\n\tif Sy < 0:\n\t\tSy=ATela\n\t\t\n\tif Sx == Fx and Sy == Fy:\n\t\tFx = random.randrange(0, LTela, Ft)\n\t\tFy = random.randrange(0, ATela, Ft)\n\t\tpygame.draw.rect(tela,Fc,(Fx,Fy,Ft,Ft))\n\t\tsomcolide.play()\n\t\tpontos += 1\n\t\tif pontos % 10 == 0:\n\t\t\tsomveloc.play()\n\t\t\tfps += 1\n\n\t\t\n\tSh = []\n\tSh.append(Sx)\n\tSh.append(Sy)\n\tSb.append(Sh)\n\t\n\tif Sb.count(Sh) > 1:\n\t\t\n\t\tSh = []\n\t\t\n\t\twhile Sh == []:\n\t\t\ttexto = fontepontos.render(Tgameover, 1, VERMELHO)\n\t\t\ttela.blit(texto, (240, 210))\n\t\t\ttexto2 = fontecontinuar.render(Tcontinuar, 1, VERDE)\n\t\t\ttela.blit(texto2, (70, 250))\n\t\t\t\t\t\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == QUIT:\n\t\t\t\t\tpygame.quit()\n\t\t\t\t\texit()\n\t\t\t\tif event.type == KEYDOWN:\n\t\t\t\t\tif event.key == K_s:\n\t\t\t\t\t\tpontos=0\n\t\t\t\t\t\tSh.append(Sx)\n\t\t\t\t\t\tSh.append(Sy)\n\t\t\t\t\t\tSb=[]\n\t\t\t\t\telse:\n\t\t\t\t\t\tpygame.quit()\n\t\t\t\t\t\texit()\n\t\t\tpygame.display.update()\n\t\n\t\n\t\t\n\t\n\tpygame.draw.rect(tela, Sc, (Sh[0], Sh[1], St, St))\n\t\t\n\tfor i in Sb:\n\t\tpygame.draw.rect(tela, Sc, (i[0], i[1], St, St))\n\t\t\n\tpygame.draw.rect(tela, Fc, (Fx,Fy,Ft,Ft))\n\t\t\t\n\tif len(Sb) > pontos:\n\t\tdel Sb[0]\n\t\n\tpygame.display.update()\n\t\n","repo_name":"MendesCJ/Snake","sub_path":"Snake.py","file_name":"Snake.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"71153093409","text":"#Definition of a function - specifying what the procedure is, what it does and how so that it can be later called/used (maybe multiple times)\n\n#Parameter - data given into/injected into your function\n#Default Parameter - assumed if nothing is passed as argument\ndef t_area(b=1,h=1):\n area = 0.5*h*b\n return area\n\n\n\n#Call to the function - make your program execute the function\ntotal = t_area() + t_area(h=5) + t_area(10,18)\nprint(f\"Total area of 3 triangles is {total}\")\n\nheight = float(input(\"Enter Height: \"))\nbase = float(input(\"Enter Base: \"))\nt_area(height, base)","repo_name":"p-jab/QHO426_June23","sub_path":"demos/W3_funcs.py","file_name":"W3_funcs.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"88"} +{"seq_id":"13370499447","text":"soma = 0\nvelho = 0\nmedia_m = 0\nfor i in range(0, 4):\n nome = input('Insira o nome: ')\n idade = int(input('Insira a idade: '))\n sexo = input('Informe o sexo(com \"m\" ou \"f\"): ')\n media_m = media_m + idade\n if sexo == 'm':\n if velho < idade:\n velho = idade\n aux = nome\n if sexo == 'f' and idade < 20:\n soma = soma + 1\nmedia = media_m // 4\nprint(f'A media de idade é {media}')\nprint(f'O homem mais velho é o {aux}')\nprint(f'Tem {soma} mulheres com menos de 20 anos')\n","repo_name":"S-Linhares/python_testes","sub_path":"estudos_guanabara/desafio56.py","file_name":"desafio56.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"32858884948","text":"# Interview cake\n\nfrom linked_list import Linked_List\ndef check_linked_list_cycle(first_node):\n\t# \"\"\"Returns True if the linked list is a circular linked list, else\n\t# it returns False. \n\t# >>> check_linked_list_cycle(first_node)\n\t# False\n\t# \"\"\"\n\tll = Linked_List()\n\tfirst_node = ll.first_node()\n\tslow_runner = first_node\n\tfast_runner = first_node\n\n\twhile fast_runner != None and fast_runner.next != None:\n\t\tslow_runner = slow_runner.next\n\t\tfast_runner = fast_runner.next.next\n\n\t\tif fast_runner == slow_runner:\n\t\t\treturn True\n\treturn False\n\nif __name__ == \"__main__\":\n # import doctest\n # doctest.testmod()\n check_linked_list_cycle(9)","repo_name":"jkzilla/Algorithm_Fun","sub_path":"linked_list_cycles.py","file_name":"linked_list_cycles.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"71541847646","text":"#!/usr/bin/env python\n# Example analysis script, commented for clarity\n\nimport setupSUSY\n\n# Import core framework library and the onelepton library\nfrom libFrameworkSUSY import *\nfrom libWPol import *\n\n# Standard classes for running and Analysis\n# PSet is similar to the CMSSW PSet\n# Analysis wraps the manager class for our convenience\nfrom icf.core import PSet,Analysis\n\n# This is the default configuration\nfrom icf.config import defaultConfig\n\n# import default configuration\nconf=defaultConfig\n\n# We can override the configuration we imported\n# Here, we lower our Jet Et Cut to 20 GeV\n\nconf.Common.Jets.EtCut=20.0\nconf.Common.Jets.EtaCut=5.0\nconf.Common.Jets.EMFracCut=0.95\nconf.Common.Muons.EtaCut=2.1\nconf.Common.Muons.PtCut=0.0 #SET THIS 20.0\nconf.Common.Muons.TrkIsoCut=-1.0 #i.e. we are ignoring it\nconf.Common.Muons.CombIsoCut=0.1\nconf.Common.Electrons.PtCut=15.0 #SET THIS 15.0\nconf.Common.Electrons.EtaCut=3.0\nconf.Common.Electrons.TrkIsoCut=1.0\nconf.Common.Photons.EtCut=25.0\nconf.Common.Photons.IDReq=3\nconf.Common.ApplyXCleaning=True\n\n# -----------------------------------------------------------------------------\n# Cross-cleaning settings\n\nconf.XCleaning.Verbose=False\nconf.XCleaning.MuonJet=True\nconf.XCleaning.ElectronJet=True\nconf.XCleaning.PhotonJet=False\nconf.XCleaning.ResolveConflicts=False\n#conf.XCleaning.Jets.EtCut=0.\n#conf.XCleaning.Jets.EtaCut=1000.0\n#conf.XCleaning.Muons.PtCut=0.\n#conf.XCleaning.Muons.EtaCut=1000.\n#conf.XCleaning.Muons.TrkIsoCut=-1.\n#conf.XCleaning.Muons.CombIsoCut=6.\n#conf.XCleaning.Electrons.PtCut=0.0\n#conf.XCleaning.Electrons.EtaCut=1000.0\n#conf.XCleaning.Electrons.TrkIsoCut=6.0\n#conf.XCleaning.Electrons.CombIsoCut=0.2\n#conf.XCleaning.Photons.EtCut=0.0\n#conf.XCleaning.Photons.EtaCut=1000.0\n#conf.XCleaning.Photons.TrkIsoCut=9.\n#conf.XCleaning.Photons.AllIsoCut=0.2\n#conf.XCleaning.Photons.IDReq=2\n\n# Here we define a sample to use\n# The path and the filename are separate so you can easily move to using\n# local samples.\nrealdata=PSet(\n Name=\"realdatatest\",\n File=\"~/jaddisk2/realdata_minbias_goodcollv9_16.root\",\n Weight=1.0,\n Format=(\"ICF\",2),\n FirstEntry=0,\n LastEntry=0 #use 10 events for test of CS frame stuff\n)\n\nwjets=PSet(\n\tName=\"WJets-lite_madgraph_50_70_90_inf_1pb-1_mu20_21\",\n File=\"~/jaddisk3/ICFv2-lite/WJets_madgraph_lite.root\",\n CrossSection=241.70, #normalised to 10 pb^-1 (7TeV) = 2417.0\n Format=(\"ICF\",2),\n FirstEntry=0,\n LastEntry=0 #use 10 events for test of CS frame stuff\n)\n\nzjets=PSet(\n\tName=\"ZJets-lite_madgraph_50_70_90_inf_1pb-1_mu20_21_WSelection\",\n File=\"~/jaddisk3/ICFv2-lite/ZJets_madgraph_lite.root\",\n CrossSection=24.0, #normalised to 10 pb^-1 (7TeV) = 240.0\n Format=(\"ICF\",2),\n FirstEntry=0,\n LastEntry=0 #use 10 events for test of CS frame stuff\n)\n\nwmupythia=PSet(\n Name=\"WmuJets-lite_pythia_test\",\n File=\"~/jaddisk2/Wmunu_pythia_lite.root\",\n CrossSection=2417.0, #normalised to 10 pb^-1 (7TeV) = 2417.0\n Format=(\"ICF\",2),\n FirstEntry=0,\n LastEntry=0 #use 10 events for test of CS frame stuff\n)\n\nqcdjets=PSet(\n Name=\"QCDJets_Pythia80_50_75_100_WSelection\",\n File=\"~/jaddisk3/SUSYICFv2Ntuples/QCDJets_Pythia_80.root\",\n CrossSection=92380.0,\n Format=(\"ICF\",2),\n FirstEntry=0,\n LastEntry=0\n)\n\nqcdmu3050=PSet(\n\tName=\"QCDMu-lite_Pythia30to50_50_70_90_inf_1pb-1_mu20_21_WSelection\",\n\tFile=\"~/jaddisk2/QCD_pythia_30to50mu_lite.root\",\n\tCrossSection=6543.6, #normalised to 1pb-1 = 532000 * 0.0123\n\tFormat=(\"ICF\",2),\n\tFirstEntry=0,\n\tLastEntry=0 #total events = 6881160\n)\n\nqcdmu5080=PSet(\n Name=\"QCDMu-lite_Pythia50to80_50_70_90_inf_1pb-1_mu20_21_WSelection\",\n File=\"~/jaddisk2/QCD_pythia_50to80mu_lite.root\",\n CrossSection=1388.46, #normalised to 1pb-1 = 63400 * 0.0219\n Format=(\"ICF\",2),\n FirstEntry=0,\n LastEntry=0 #total events = 2307049\n)\n\nqcdmu80120=PSet(\n Name=\"QCDMu-lite_Pythia80to120_50_70_90_inf_1pb-1_mu20_21_WSelection\",\n File=\"~/jaddisk2/QCD_pythia_80to120mu_lite.root\",\n CrossSection=290.45, #normalised to 1pb-1 = 7850 * 0.037\n Format=(\"ICF\",2),\n FirstEntry=0,\n LastEntry=0 #total events = 516248\n)\n\nqcdmu120170=PSet(\n Name=\"QCDMu-lite_Pythia120to170_50_70_90_inf_1pb-1_mu20_21_WSelection\",\n File=\"~/jaddisk2/QCD_pythia_120to170mu_lite.root\",\n CrossSection=60.95, #normalised to 1pb-1 = 1150 * 0.053\n Format=(\"ICF\",2),\n FirstEntry=0,\n LastEntry=0\n)\n\nqcdmu170Inf=PSet(\n Name=\"QCDMu-lite_Pythia170toInf_50_70_90_inf_1pb-1_mu20_21_WSelection\",\n File=\"~/jaddisk2/QCD_pythia_170toInfmu_lite.root\",\n CrossSection=16.12, #normalised to 1pb-1 = 256 * 0.063\n Format=(\"ICF\",2),\n FirstEntry=0,\n LastEntry=0\n)\n\n# Create the analysis\na=Analysis(\"GenW\")\nb=Analysis(\"RecoW\")\nc=Analysis(\"Print\")\nd=Analysis(\"CSFrame\")\ne=Analysis(\"Reweight\")\nf=Analysis(\"GenW_CS\")\ng=Analysis(\"Reweight_CS\")\n\n# With the V2 code you can add linear sets of operations as before\n# Here we will use the Tree class to have branching operations\n# Create a Tree called Main\ntree = Tree(\"Main\")\ntreeb = Tree(\"Mainb\")\ntreec = Tree(\"Mainc\")\ntreed = Tree(\"Maind\")\ntreee = Tree(\"Maine\")\ntreef = Tree(\"Mainf\")\ntreeg = Tree(\"Maing\")\n# Initialise our operations\n\n# for debug info\nPrintDebug = Print_DebugInfo(\"\")\n\n#this is for the generator level plots\nCheckForW = MC_GenWExists()\nCheckForWMuon = MC_GenWMuonExists()\nCheckForWLepton = MC_GenWLeptonExists()\nWPTCut = MC_GenWPTCut(100.0)\nWPTCut50to75 = MC_GenWPTCut2(50.0, 75.0)\nWPTCut75to100 = MC_GenWPTCut2(75.0, 100.0)\nWPTCut0to50 = MC_GenWPTCut2(0.0,50.0)\nWPTCut50to100 = MC_GenWPTCut2(50.0,100.0)\nWPTCut0to100 = MC_GenWPTCut2(0.0,100.0)\nWPTCut100to200 = MC_GenWPTCut2(100.0,200.0)\nWPTCut90toinf = MC_GenWPTCut(90.0)\nWPTCut100toinf = MC_GenWPTCut(100.0)\nWPTCut200toinf = MC_GenWPTCut(200.0)\nWYCut0to225 = MC_GenWYCut(0.0,2.25)\nWY0Cutbin0 = MC_GenWYCut(0.0,0.5)\nWY0Cutbin1 = MC_GenWYCut(0.5,1.0)\nWY0Cutbin2 = MC_GenWYCut(1.0,1.5)\nWY0Cutbin3 = MC_GenWYCut(1.5,2.0)\nWY0Cutbin4 = MC_GenWYCut(2.0,2.5)\nWY0Cutbin5 = MC_GenWYCut(2.5,3.0)\nWY0Cutbin6 = MC_GenWYCut2(3.0)\nWY1Cutbin0 = MC_GenWYCut(0.0,0.5)\nWY1Cutbin1 = MC_GenWYCut(0.5,1.0)\nWY1Cutbin2 = MC_GenWYCut(1.0,1.5)\nWY1Cutbin3 = MC_GenWYCut(1.5,2.0)\nWY1Cutbin4 = MC_GenWYCut(2.0,2.5)\nWY1Cutbin5 = MC_GenWYCut(2.5,3.0)\nWY1Cutbin6 = MC_GenWYCut2(3.0)\nWY2Cutbin0 = MC_GenWYCut(0.0,0.5)\nWY2Cutbin1 = MC_GenWYCut(0.5,1.0)\nWY2Cutbin2 = MC_GenWYCut(1.0,1.5)\nWY2Cutbin3 = MC_GenWYCut(1.5,2.0)\nWY2Cutbin4 = MC_GenWYCut(2.0,2.5)\nWY2Cutbin5 = MC_GenWYCut(2.5,3.0)\nWY2Cutbin6 = MC_GenWYCut2(3.0)\n\nPlotWStuff0to100 = MC_WPlots(\"MC_WPlots_0to100\",\"W\")\nPlotWStuff0to50 = MC_WPlots(\"MC_WPlots_0to50\",\"W\")\nPlotWStuff100to200 = MC_WPlots(\"MC_WPlots_100to200\",\"W\")\nPlotWStuff200toinf = MC_WPlots(\"MC_WPlots_200toinf\",\"W\")\nPlotWStuff0toinf = MC_WPlots(\"MC_WPlots_0toinf\",\"W\")\nPlotWStuff50to75 = MC_WPlots(\"MC_WPlots_50to75\",\"W\")\nPlotWStuff75to100 = MC_WPlots(\"MC_WPlots_75to100\",\"W\")\nPlotWStuff100toinf = MC_WPlots(\"MC_WPlots_100toinf\",\"W\")\nPlotWStuff90toinfY225 = MC_WPlots(\"MC_WPlots_90toinf_Y225\",\"W\")\n#by PxYy we mean Pt bin x and Y bin y\nPlotWStuffP0Y0 = MC_WPlots(\"MC_WPlots_P0Y0\",\"Z\")\nPlotWStuffP0Y1 = MC_WPlots(\"MC_WPlots_P0Y1\",\"Z\")\nPlotWStuffP0Y2 = MC_WPlots(\"MC_WPlots_P0Y2\",\"Z\")\nPlotWStuffP0Y3 = MC_WPlots(\"MC_WPlots_P0Y3\",\"Z\")\nPlotWStuffP0Y4 = MC_WPlots(\"MC_WPlots_P0Y4\",\"Z\")\nPlotWStuffP0Y5 = MC_WPlots(\"MC_WPlots_P0Y5\",\"Z\")\nPlotWStuffP0Y6 = MC_WPlots(\"MC_WPlots_P0Y6\",\"Z\")\n\nPlotWStuffP1Y0 = MC_WPlots(\"MC_WPlots_P1Y0\",\"Z\")\nPlotWStuffP1Y1 = MC_WPlots(\"MC_WPlots_P1Y1\",\"Z\")\nPlotWStuffP1Y2 = MC_WPlots(\"MC_WPlots_P1Y2\",\"Z\")\nPlotWStuffP1Y3 = MC_WPlots(\"MC_WPlots_P1Y3\",\"Z\")\nPlotWStuffP1Y4 = MC_WPlots(\"MC_WPlots_P1Y4\",\"Z\")\nPlotWStuffP1Y5 = MC_WPlots(\"MC_WPlots_P1Y5\",\"Z\")\nPlotWStuffP1Y6 = MC_WPlots(\"MC_WPlots_P1Y6\",\"Z\")\n\nPlotWStuffP2Y0 = MC_WPlots(\"MC_WPlots_P2Y0\",\"Z\")\nPlotWStuffP2Y1 = MC_WPlots(\"MC_WPlots_P2Y1\",\"Z\")\nPlotWStuffP2Y2 = MC_WPlots(\"MC_WPlots_P2Y2\",\"Z\")\nPlotWStuffP2Y3 = MC_WPlots(\"MC_WPlots_P2Y3\",\"Z\")\nPlotWStuffP2Y4 = MC_WPlots(\"MC_WPlots_P2Y4\",\"Z\")\nPlotWStuffP2Y5 = MC_WPlots(\"MC_WPlots_P2Y5\",\"Z\")\nPlotWStuffP2Y6 = MC_WPlots(\"MC_WPlots_P2Y6\",\"Z\")\n\n#by PxYy we mean Pt bin x and Y bin y\n#setup for the CS frame stuff\nPlotCSWStuffP0Y0 = MC_CSFramePlots(\"MC_CSFramePlots_P0Y0\",\"W\")\nPlotCSWStuffP0Y1 = MC_CSFramePlots(\"MC_CSFramePlots_P0Y1\",\"W\")\nPlotCSWStuffP0Y2 = MC_CSFramePlots(\"MC_CSFramePlots_P0Y2\",\"W\")\nPlotCSWStuffP0Y3 = MC_CSFramePlots(\"MC_CSFramePlots_P0Y3\",\"W\")\nPlotCSWStuffP0Y4 = MC_CSFramePlots(\"MC_CSFramePlots_P0Y4\",\"W\")\nPlotCSWStuffP0Y5 = MC_CSFramePlots(\"MC_CSFramePlots_P0Y5\",\"W\")\nPlotCSWStuffP0Y6 = MC_CSFramePlots(\"MC_CSFramePlots_P0Y6\",\"W\")\n\nPlotCSWStuffP1Y0 = MC_CSFramePlots(\"MC_CSFramePlots_P1Y0\",\"W\")\nPlotCSWStuffP1Y1 = MC_CSFramePlots(\"MC_CSFramePlots_P1Y1\",\"W\")\nPlotCSWStuffP1Y2 = MC_CSFramePlots(\"MC_CSFramePlots_P1Y2\",\"W\")\nPlotCSWStuffP1Y3 = MC_CSFramePlots(\"MC_CSFramePlots_P1Y3\",\"W\")\nPlotCSWStuffP1Y4 = MC_CSFramePlots(\"MC_CSFramePlots_P1Y4\",\"W\")\nPlotCSWStuffP1Y5 = MC_CSFramePlots(\"MC_CSFramePlots_P1Y5\",\"W\")\nPlotCSWStuffP1Y6 = MC_CSFramePlots(\"MC_CSFramePlots_P1Y6\",\"W\")\n\nPlotCSWStuffP2Y0 = MC_CSFramePlots(\"MC_CSFramePlots_P2Y0\",\"W\")\nPlotCSWStuffP2Y1 = MC_CSFramePlots(\"MC_CSFramePlots_P2Y1\",\"W\")\nPlotCSWStuffP2Y2 = MC_CSFramePlots(\"MC_CSFramePlots_P2Y2\",\"W\")\nPlotCSWStuffP2Y3 = MC_CSFramePlots(\"MC_CSFramePlots_P2Y3\",\"W\")\nPlotCSWStuffP2Y4 = MC_CSFramePlots(\"MC_CSFramePlots_P2Y4\",\"W\")\nPlotCSWStuffP2Y5 = MC_CSFramePlots(\"MC_CSFramePlots_P2Y5\",\"W\")\nPlotCSWStuffP2Y6 = MC_CSFramePlots(\"MC_CSFramePlots_P2Y6\",\"W\")\n\n\nPlotCSFrame = MC_CSFramePlots(\"MC_CSFramePlots\",\"W\")\nWPTCut15to25 = MC_GenWPTCut2(15.0,25.0)\nPlotCSFrame15to25 = MC_CSFramePlots(\"MC_CSFramePlots_15to25\",\"W\")\nWPTCut25to35 = MC_GenWPTCut2(25.0,35.0)\nPlotCSFrame25to35 = MC_CSFramePlots(\"MC_CSFramePlots_25to35\",\"W\")\nWPTCut35to65 = MC_GenWPTCut2(35.0,65.0)\nPlotCSFrame35to65 = MC_CSFramePlots(\"MC_CSFramePlots_35to65\",\"W\")\nWPTCut65to105 = MC_GenWPTCut2(65.0,105.0)\nPlotCSFrame65to105 = MC_CSFramePlots(\"MC_CSFramePlots_65to105\",\"W\")\nPlotCSFrame0to50 = MC_CSFramePlots(\"MC_CSFramePlots_0to50\",\"W\")\nPlotCSFrame50to75 = MC_CSFramePlots(\"MC_CSFramePlots_50to75\",\"W\")\nPlotCSFrame75to100 = MC_CSFramePlots(\"MC_CSFramePlots_75to100\",\"W\")\nPlotCSFrame100toinf = MC_CSFramePlots(\"MC_CSFramePlots_100toinf\",\"W\")\nPlotCSFrame0toinf = MC_CSFramePlots(\"MC_CSFramePlots_0toinf\",\"Z\")\nPlotCSFrameRECO50to75 = RECO_CSFramePlots(\"RECO_CSFramePlots_50to75\",\"W\",True)\nPlotCSFrameRECO75to100 = RECO_CSFramePlots(\"RECO_CSFramePlots_75to100\",\"W\",True)\nPlotCSFrameRECO100toinf = RECO_CSFramePlots(\"RECO_CSFramePlots_100toinf\",\"W\",True)\nPlotCSFrameRECO50toinf = RECO_CSFramePlots(\"RECO_CSFramePlots_50toinf\",\"W\",True)\nPlotCSFrameRECO70toinf = RECO_CSFramePlots(\"RECO_CSFramePlots_70toinf\",\"W\",True)\nPlotCSFrameRECO90toinf = RECO_CSFramePlots(\"RECO_CSFramePlots_90toinf\",\"W\",True)\n\n#this is for the reconstruction level plots\n# Trigger Cut\n#TrigCut = OP_TriggerCut(10)\n# Lep comp.\nNumLeptons = OP_NumComLeptons(\"==\", 1)\nNumLeptons2 = OP_NumComLeptons(\"==\", 2)\n# Mu comp.\nNumMuons = OP_NumComMuons(\"==\", 1)\nNumMuons2 = OP_NumComMuons(\"==\", 2)\n# Photon comp.\nNumPh = OP_NumComPhotons(\"==\", 0)\n# Good Jets Cut\nNumGoodJets = OP_NumComJets(\"<\", 4)\n# Odd cuts\nOddEli = OP_OddElectron()\nOddMui = OP_OddMuon()\nOddJeti = OP_OddJet()\nOddPhoti = OP_OddPhoton()\n# Bad muon in jet\nBadMui = OP_BadMuonInJet()\n# Photon killed jet\naPhotkilledJet = OP_PhotonKilledJet()\n# Minimum DeltaR(Muon,Jet) cut\nminDRMuonJetCut = RECO_MuonJetDRCut(0.5)\n# CommonMHT Cut\nCommMHTCut = RECO_CommonMHTCut(200.)\nCommMHTCut100 = RECO_CommonMHTCut(100.)\nCommMHTCut80 = RECO_CommonMHTCut(80.)\nCommMHTCut75 = RECO_CommonMHTCut(75.)\nCommMHTCut50 = RECO_CommonMHTCut(50.)\nCommMHTCut70 = RECO_CommonMHTCut(70.)\nCommMHTCut90 = RECO_CommonMHTCut(90.)\nPFMETCut50 = RECO_PFMETCut(50.)\nPFMETCut70 = RECO_PFMETCut(70.)\nPFMETCut90 = RECO_PFMETCut(90.)\n# PlotRECOPol stuff NoHTCut\n#PlotRecoPolStuffnoHtCut = RECO_PolPlots(\"RECO_PolPlotsNoHtCut\", \"W\", True)\n#PlotRecoPolStuffnoMHtCut = RECO_PolPlots(\"RECO_PolPlotsNoMHtCut\", \"W\", True)\n# CommonHT Cut\n#CommHTCut = RECO_CommonHTCut(350.)\n# PlotRECOPol stuff\nPlotRECO = RECO_PolPlots(\"RECO_PolPlots\", \"W\", False)\n\nZmassCut = RECO_2ndMuonMass(25.0, 91.2)\nCommMHTCut50to75 = RECO_CommonMHTCut2(50.0,75.0)\nCommMHTCut75to100 = RECO_CommonMHTCut2(75.0,100.0)\nMuMuPtCut50to75 = RECO_MuMuPtCut2(50.0,75.0)\nMuMuPtCut75to100 = RECO_MuMuPtCut2(75.0,100.0)\nMuMuPtCut100 = RECO_MuMuPtCut(100.0)\nPlotRECO50to75 = RECO_PolPlots(\"RECO_PolPlots_50to75\", \"W\", True)\nPlotRECO75to100 = RECO_PolPlots(\"RECO_PolPlots_75to100\", \"W\", True)\nPlotRECO100toinf = RECO_PolPlots(\"RECO_PolPlots_100toinf\", \"W\", True)\nPlotRECO50toinf = RECO_PolPlots(\"RECO_PolPlots_50toinf\", \"W\", True)\nPlotRECO70toinf = RECO_PolPlots(\"RECO_PolPlots_70toinf\", \"W\", True)\nPlotRECO75toinf = RECO_PolPlots(\"RECO_PolPlots_75toinf\", \"W\", True)\nPlotRECO90toinf = RECO_PolPlots(\"RECO_PolPlots_90toinf\", \"W\", True)\n\n#this is for print/debug info\nPrintMCParticleInfo = MC_PrintGenParticleInfo(\"FULL\")\n\n#for the Z-boson analysis\nCheckForZ = MC_GenZExists()\n\n# Add the tree to our analysis\na+=tree\ntree.Attach(CheckForW)\n#tree.TAttach(CheckForW, CheckForWLepton)\ntree.TAttach(CheckForW, CheckForWMuon)\n#tree.TAttach(CheckForWLepton, WPTCut100toinf)\ntree.TAttach(CheckForWMuon, WPTCut100toinf)\ntree.TAttach(WPTCut100toinf, PlotWStuff100toinf)\n\n#tree.TAttach(CheckForWLepton, WPTCut90toinf)\n#tree.TAttach(WPTCut90toinf, WYCut0to225)\n#tree.TAttach(WYCut0to225, PlotWStuff90toinfY225)\n\n#tree.TAttach(CheckForW,CheckForWMuon)\n#tree.TAttach(CheckForWMuon, PlotWStuff0toinf)\n\n#tree.TAttach(CheckForWMuon,WPTCut0to100)\n#tree.TAttach(WPTCut0to100,PlotWStuff0to100)\n#tree.TAttach(CheckForWMuon,WPTCut100to200)\n#tree.TAttach(WPTCut100to200,PlotWStuff100to200)\n#tree.TAttach(CheckForWMuon,WPTCut200toinf)\n#tree.TAttach(WPTCut200toinf,PlotWStuff200toinf)\n\n#tree.TAttach(CheckForWMuon,WPTCut)\n#tree.TAttach(WPTCut,PlotWStuff100toinf)\n\n#tree.TAttach(CheckForWMuon, WYCut0top2)\n#tree.TAttach(WYCut0top2, PlotWStuffY0top2)\n#tree.TAttach(CheckForWMuon, WYCut2to2p2)\n#tree.TAttach(WYCut2to2p2, PlotWStuffY2to2p2)\n\nb+=treeb\ntreeb.Attach(NumLeptons)#use this as the starting point when running over file with no generator bosons...\n#the recoW cut flow:\n#treeb.Attach(CheckForW)\n#treeb.TAttach(CheckForW, NumLeptons)\ntreeb.TAttach(NumLeptons, NumMuons)\ntreeb.TAttach(NumMuons, NumPh)\ntreeb.TAttach(NumPh, NumGoodJets)\ntreeb.TAttach(NumGoodJets, OddJeti)\ntreeb.TAttach(OddJeti, minDRMuonJetCut)\ntreeb.TAttach(minDRMuonJetCut, ZmassCut)\ntreeb.TAttach(ZmassCut, PlotRECO)\n#treeb.TAttach(ZmassCut, PFMETCut50)\n#treeb.TAttach(PFMETCut50, PlotCSFrameRECO50toinf)\n#treeb.TAttach(PFMETCut50, PlotRECO50toinf)\n#treeb.TAttach(ZmassCut, PFMETCut70)\n#treeb.TAttach(PFMETCut70, PlotCSFrameRECO70toinf)\n#treeb.TAttach(PFMETCut70, PlotRECO70toinf)\n#treeb.TAttach(ZmassCut, PFMETCut90)\n#treeb.TAttach(PFMETCut90, PlotCSFrameRECO90toinf)\n#treeb.TAttach(PFMETCut90, PlotRECO90toinf)\n\n#treeb.TAttach(ZmassCut, CommMHTCut50to75)\n#treeb.TAttach(CommMHTCut50to75, PlotCSFrameRECO50to75)\n#treeb.TAttach(CommMHTCut50to75, PlotRECO50to75)\n#treeb.TAttach(ZmassCut, CommMHTCut75to100)\n#treeb.TAttach(CommMHTCut75to100, PlotCSFrameRECO75to100)\n#treeb.TAttach(CommMHTCut75to100, PlotRECO75to100)\n#treeb.TAttach(ZmassCut, CommMHTCut100)\n#treeb.TAttach(CommMHTCut100, PlotCSFrameRECO100toinf)\n#treeb.TAttach(CommMHTCut100, PlotRECO100toinf)\n#the recoZ cut flow:\n#treeb.Attach(CheckForZ)\n#treeb.TAttach(CheckForZ, NumLeptons2)\n#treeb.TAttach(NumLeptons2, NumMuons2)\n#treeb.TAttach(NumMuons2, NumPh)\n#treeb.TAttach(NumPh, NumGoodJets)\n#treeb.TAttach(NumGoodJets, OddJeti)\n#treeb.TAttach(OddJeti, MuMuPtCut50to75)\n#treeb.TAttach(MuMuPtCut50to75, PlotCSFrameRECO50to75)\n#treeb.TAttach(MuMuPtCut50to75, PlotRECO50to75)\n#treeb.TAttach(OddJeti, MuMuPtCut75to100)\n#treeb.TAttach(MuMuPtCut75to100, PlotCSFrameRECO75to100)\n#treeb.TAttach(MuMuPtCut75to100, PlotRECO75to100)\n#treeb.TAttach(OddJeti,MuMuPtCut100)\n#treeb.TAttach(MuMuPtCut100, PlotCSFrameRECO100toinf)\n#treeb.TAttach(MuMuPtCut100, PlotRECO100toinf)\n\n#treeb.TAttach(ZmassCut, CommMHTCut50to75)\n#treeb.TAttach(CommMHTCut50to75, Plot50to75)\n#treeb.TAttach(ZmassCut, CommMHTCut75to100)\n#treeb.TAttach(CommMHTCut75to100, Plot75to100)\n#treeb.TAttach(ZmassCut, CommMHTCut100)\n#treeb.TAttach(CommMHTCut100, Plot100toinf)\n#treeb.TAttach(NumPh, OddEli)\n#treeb.TAttach(OddEli, OddMui)\n#treeb.TAttach(OddMui, OddJeti)\n#treeb.TAttach(OddJeti, OddPhoti)\n#treeb.TAttach(OddPhoti, BadMui)\n#treeb.TAttach(NumPh, BadMui)\n#treeb.TAttach(BadMui, aPhotkilledJet)\n#treeb.TAttach(aPhotkilledJet, minDRMuonJetCut)\n#treeb.TAttach(minDRMuonJetCut, PlotRecoPolStuffnoMHtCut)\n#treeb.TAttach(PlotRecoPolStuffnoMHtCut, CommMHTCut100)\n#treeb.TAttach(CommMHTCut100, PlotRecoPolStuffnoHtCut)\n\nc+=treec\ntreec.Attach(PrintMCParticleInfo)\n#treec.Attach(PrintDebug)\n\nd+=treed\ntreed.Attach(CheckForZ)\n#treed.Attach(CheckForW)\n #treed.TAttach(CheckForW, CheckForWMuon)\n#treed.TAttach(CheckForW, CheckForWLepton)\n\n#treed.TAttach(CheckForWMuon, WPTCut15to25)\n#treed.TAttach(WPTCut15to25, PlotCSFrame15to25)\n#treed.TAttach(CheckForWMuon, WPTCut25to35)\n#treed.TAttach(WPTCut25to35, PlotCSFrame25to35)\n#treed.TAttach(CheckForWMuon, WPTCut35to65)\n#treed.TAttach(WPTCut35to65, PlotCSFrame35to65)\n#treed.TAttach(CheckForWMuon, WPTCut65to105)\n#treed.TAttach(WPTCut65to105, PlotCSFrame65to105)\n\n#treed.TAttach(CheckForWMuon, WPTCut100toinf)\n#treed.TAttach(WPTCut100toinf, PlotCSFrame100toinf)\n\n#treed.TAttach(CheckForW, PlotCSFrame0toinf)\ntreed.TAttach(CheckForZ, PlotCSFrame0toinf)\n\ne+=treee\n#treee.Attach(CheckForW)\ntreee.Attach(CheckForZ)\n#treee.TAttach(CheckForW, CheckForWLepton)\n\n#treee.TAttach(CheckForW, WPTCut0to50)\ntreee.TAttach(CheckForZ, WPTCut0to50)\ntreee.TAttach(WPTCut0to50, WY0Cutbin0)\ntreee.TAttach(WY0Cutbin0, PlotWStuffP0Y0)\ntreee.TAttach(WPTCut0to50, WY0Cutbin1)\ntreee.TAttach(WY0Cutbin1, PlotWStuffP0Y1)\ntreee.TAttach(WPTCut0to50, WY0Cutbin2)\ntreee.TAttach(WY0Cutbin2, PlotWStuffP0Y2)\ntreee.TAttach(WPTCut0to50, WY0Cutbin3)\ntreee.TAttach(WY0Cutbin3, PlotWStuffP0Y3)\ntreee.TAttach(WPTCut0to50, WY0Cutbin4)\ntreee.TAttach(WY0Cutbin4, PlotWStuffP0Y4)\ntreee.TAttach(WPTCut0to50, WY0Cutbin5)\ntreee.TAttach(WY0Cutbin5, PlotWStuffP0Y5)\ntreee.TAttach(WPTCut0to50, WY0Cutbin6)\ntreee.TAttach(WY0Cutbin6, PlotWStuffP0Y6)\n\n#treee.TAttach(CheckForW, WPTCut50to100)\ntreee.TAttach(CheckForZ, WPTCut50to100)\ntreee.TAttach(WPTCut50to100, WY1Cutbin0)\ntreee.TAttach(WY1Cutbin0, PlotWStuffP1Y0)\ntreee.TAttach(WPTCut50to100, WY1Cutbin1)\ntreee.TAttach(WY1Cutbin1, PlotWStuffP1Y1)\ntreee.TAttach(WPTCut50to100, WY1Cutbin2)\ntreee.TAttach(WY1Cutbin2, PlotWStuffP1Y2)\ntreee.TAttach(WPTCut50to100, WY1Cutbin3)\ntreee.TAttach(WY1Cutbin3, PlotWStuffP1Y3)\ntreee.TAttach(WPTCut50to100, WY1Cutbin4)\ntreee.TAttach(WY1Cutbin4, PlotWStuffP1Y4)\ntreee.TAttach(WPTCut50to100, WY1Cutbin5)\ntreee.TAttach(WY1Cutbin5, PlotWStuffP1Y5)\ntreee.TAttach(WPTCut50to100, WY1Cutbin6)\ntreee.TAttach(WY1Cutbin6, PlotWStuffP1Y6)\n\n#treee.TAttach(CheckForW, WPTCut100toinf)\ntreee.TAttach(CheckForZ, WPTCut100toinf)\ntreee.TAttach(WPTCut100toinf, WY2Cutbin0)\ntreee.TAttach(WY2Cutbin0, PlotWStuffP2Y0)\ntreee.TAttach(WPTCut100toinf, WY2Cutbin1)\ntreee.TAttach(WY2Cutbin1, PlotWStuffP2Y1)\ntreee.TAttach(WPTCut100toinf, WY2Cutbin2)\ntreee.TAttach(WY2Cutbin2, PlotWStuffP2Y2)\ntreee.TAttach(WPTCut100toinf, WY2Cutbin3)\ntreee.TAttach(WY2Cutbin3, PlotWStuffP2Y3)\ntreee.TAttach(WPTCut100toinf, WY2Cutbin4)\ntreee.TAttach(WY2Cutbin4, PlotWStuffP2Y4)\ntreee.TAttach(WPTCut100toinf, WY2Cutbin5)\ntreee.TAttach(WY2Cutbin5, PlotWStuffP2Y5)\ntreee.TAttach(WPTCut100toinf, WY2Cutbin6)\ntreee.TAttach(WY2Cutbin6, PlotWStuffP2Y6)\n\ng+=treeg\ntreeg.Attach(CheckForW)\n#treeg.TAttach(CheckForW, CheckForWLepton)\n#treeg.Attach(CheckForZ)\n\ntreeg.TAttach(CheckForW, WPTCut0to50)\n#treeg.TAttach(CheckForZ, WPTCut0to50)\ntreeg.TAttach(WPTCut0to50, WY0Cutbin0)\ntreeg.TAttach(WY0Cutbin0, PlotCSWStuffP0Y0)\ntreeg.TAttach(WPTCut0to50, WY0Cutbin1)\ntreeg.TAttach(WY0Cutbin1, PlotCSWStuffP0Y1)\ntreeg.TAttach(WPTCut0to50, WY0Cutbin2)\ntreeg.TAttach(WY0Cutbin2, PlotCSWStuffP0Y2)\ntreeg.TAttach(WPTCut0to50, WY0Cutbin3)\ntreeg.TAttach(WY0Cutbin3, PlotCSWStuffP0Y3)\ntreeg.TAttach(WPTCut0to50, WY0Cutbin4)\ntreeg.TAttach(WY0Cutbin4, PlotCSWStuffP0Y4)\ntreeg.TAttach(WPTCut0to50, WY0Cutbin5)\ntreeg.TAttach(WY0Cutbin5, PlotCSWStuffP0Y5)\ntreeg.TAttach(WPTCut0to50, WY0Cutbin6)\ntreeg.TAttach(WY0Cutbin6, PlotCSWStuffP0Y6)\n\ntreeg.TAttach(CheckForW, WPTCut50to100)\n#treeg.TAttach(CheckForZ, WPTCut50to100)\ntreeg.TAttach(WPTCut50to100, WY1Cutbin0)\ntreeg.TAttach(WY1Cutbin0, PlotCSWStuffP1Y0)\ntreeg.TAttach(WPTCut50to100, WY1Cutbin1)\ntreeg.TAttach(WY1Cutbin1, PlotCSWStuffP1Y1)\ntreeg.TAttach(WPTCut50to100, WY1Cutbin2)\ntreeg.TAttach(WY1Cutbin2, PlotCSWStuffP1Y2)\ntreeg.TAttach(WPTCut50to100, WY1Cutbin3)\ntreeg.TAttach(WY1Cutbin3, PlotCSWStuffP1Y3)\ntreeg.TAttach(WPTCut50to100, WY1Cutbin4)\ntreeg.TAttach(WY1Cutbin4, PlotCSWStuffP1Y4)\ntreeg.TAttach(WPTCut50to100, WY1Cutbin5)\ntreeg.TAttach(WY1Cutbin5, PlotCSWStuffP1Y5)\ntreeg.TAttach(WPTCut50to100, WY1Cutbin6)\ntreeg.TAttach(WY1Cutbin6, PlotCSWStuffP1Y6)\n\ntreeg.TAttach(CheckForW, WPTCut100toinf)\n#treeg.TAttach(CheckForZ, WPTCut100toinf)\ntreeg.TAttach(WPTCut100toinf, WY2Cutbin0)\ntreeg.TAttach(WY2Cutbin0, PlotCSWStuffP2Y0)\ntreeg.TAttach(WPTCut100toinf, WY2Cutbin1)\ntreeg.TAttach(WY2Cutbin1, PlotCSWStuffP2Y1)\ntreeg.TAttach(WPTCut100toinf, WY2Cutbin2)\ntreeg.TAttach(WY2Cutbin2, PlotCSWStuffP2Y2)\ntreeg.TAttach(WPTCut100toinf, WY2Cutbin3)\ntreeg.TAttach(WY2Cutbin3, PlotCSWStuffP2Y3)\ntreeg.TAttach(WPTCut100toinf, WY2Cutbin4)\ntreeg.TAttach(WY2Cutbin4, PlotCSWStuffP2Y4)\ntreeg.TAttach(WPTCut100toinf, WY2Cutbin5)\ntreeg.TAttach(WY2Cutbin5, PlotCSWStuffP2Y5)\ntreeg.TAttach(WPTCut100toinf, WY2Cutbin6)\ntreeg.TAttach(WY2Cutbin6, PlotCSWStuffP2Y6)\n\nf+=treef\ntreef.Attach(CheckForW)\n#treef.TAttach(CheckForW, CheckForWLepton)\n#treef.Attach(CheckForZ)\n\ntreef.TAttach(CheckForW, WPTCut0to50)\n#treef.TAttach(CheckForZ, WPTCut0to50)\ntreef.TAttach(WPTCut0to50, PlotWStuff0to50)\ntreef.TAttach(WPTCut0to50, PlotCSFrame0to50)\n\ntreef.TAttach(CheckForW, WPTCut50to75)\n#treef.TAttach(CheckForZ, WPTCut50to75)\ntreef.TAttach(WPTCut50to75, PlotWStuff50to75)\ntreef.TAttach(WPTCut50to75, PlotCSFrame50to75)\n\ntreef.TAttach(CheckForW, WPTCut75to100)\n#treef.TAttach(CheckForZ, WPTCut75to100)\ntreef.TAttach(WPTCut75to100, PlotWStuff75to100)\ntreef.TAttach(WPTCut75to100, PlotCSFrame75to100)\n\ntreef.TAttach(CheckForW, WPTCut100toinf)\n#treef.TAttach(CheckForZ, WPTCut100toinf)\ntreef.TAttach(WPTCut100toinf, PlotWStuff100toinf)\ntreef.TAttach(WPTCut100toinf, PlotCSFrame100toinf)\n\n# Define the list of samples to run over\nw=[wjets]\nz=[zjets]\nq=[qcdjets]\nw2=[wmupythia]\nqmu=[qcdmu3050, qcdmu5080, qcdmu80120]#, qcdmu120170, qcdmu170Inf]\ndata=[realdata]\n#a.Run(\"results\",conf,z)\nb.Run(\"results\",conf,z)\n#c.Run(\"results\",conf,z)\n#d.Run(\"results\",conf,z)\n#e.Run(\"results\",conf,z)\n#f.Run(\"results\",conf,w2)\n#g.Run(\"results\",conf,w)\n","repo_name":"brynmathias/AnalysisV2","sub_path":"WPol2/scripts/old_scripts/WPol_ICF2.py","file_name":"WPol_ICF2.py","file_ext":"py","file_size_in_byte":23010,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"88"} +{"seq_id":"7904613131","text":"try: \n from .messages import HABITATS\nexcept ImportError:\n from messages import HABITATS\n\n\ndef main():\n print('Please enter the number of the habitat '\n 'you would like to view:')\n print('enter \"exit\" to exit the Zoo Keeper.')\n while True:\n command = input()\n if command == 'exit':\n break\n try:\n habitat_id = int(command)\n except ValueError:\n print('habitat id must be a number, please try again.')\n else:\n if 0 <= habitat_id < len(HABITATS):\n print(HABITATS[habitat_id])\n else:\n print('habitat id not found, please try again')\n print('See you later!')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"c-c-k/zzz--memento","sub_path":"project_stubs/python/zookeeper/zookeeper/zookeeper.py","file_name":"zookeeper.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"88"} +{"seq_id":"19270207519","text":"from pymanopt.solvers.solver import Solver\nimport time\nfrom copy import deepcopy\nfrom pymanopt.solvers.linesearch import LineSearchBackTracking\n\"\"\"\nAdapted from the Pymanopt toolbox\n\nCopyright (c) 2015-2016, Pymanopt Developers.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of pymanopt nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n\nclass SteepestDescent(Solver):\n \"\"\"\n Steepest descent (gradient descent) algorithm based on\n steepestdescent.m from the manopt MATLAB package.\n \"\"\"\n\n def __init__(self, linesearch=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if linesearch is None:\n self._linesearch = LineSearchBackTracking()\n else:\n self._linesearch = linesearch\n self.linesearch = None\n\n # Function to solve optimisation problem using steepest descent.\n def solve(self, problem, x=None, reuselinesearch=False):\n \"\"\"\n Perform optimization using gradient descent with linesearch.\n This method first computes the gradient (derivative) of obj\n w.r.t. arg, and then optimizes by moving in the direction of\n steepest descent (which is the opposite direction to the gradient).\n Arguments:\n - problem\n Pymanopt problem setup using the Problem class, this must\n have a .manifold attribute specifying the manifold to optimize\n over, as well as a cost and enough information to compute\n the gradient of that cost.\n - x=None\n Optional parameter. Starting point on the manifold. If none\n then a starting point will be randomly generated.\n - reuselinesearch=False\n Whether to reuse the previous linesearch object. Allows to\n use information from a previous solve run.\n Returns:\n - x\n Local minimum of obj, or if algorithm terminated before\n convergence x will be the point at which it terminated.\n \"\"\"\n man = problem.manifold\n verbosity = problem.verbosity\n objective = problem.cost\n gradient = problem.grad\n\n if not reuselinesearch or self.linesearch is None:\n #print(\"copying old linesearch...\")\n self.linesearch = deepcopy(self._linesearch)\n linesearch = self.linesearch\n print(\"oldalpha is:\")\n print(linesearch._oldalpha)\n #rint(linesearch._oldf0)\n\n # If no starting point is specified, generate one at random.\n if x is None:\n x = man.rand()\n\n # Initialize iteration counter and timer\n iter = 0\n time0 = time.time()\n\n if verbosity >= 2:\n print(\" iter\\t\\t cost val\\t grad. norm\")\n\n self._start_optlog(extraiterfields=['gradnorm'],\n solverparams={'linesearcher': linesearch})\n\n while True:\n # Calculate new cost, grad and gradnorm\n print(\"entering descent loop...\")\n cost = objective(x)\n grad = gradient(x)\n gradnorm = man.norm(x, grad)\n iter = iter + 1\n\n if verbosity >= 2:\n print(\"%5d\\t%+.16e\\t%.8e\" % (iter, cost, gradnorm))\n\n if self._logverbosity >= 2:\n self._append_optlog(iter, x, cost, gradnorm=gradnorm)\n\n # Descent direction is minus the gradient\n desc_dir = -grad\n #print(\"descent direction:\")\n #print(desc_dir)\n\n # Perform line-search\n stepsize, x = linesearch.search(objective, man, x, desc_dir,\n cost, -gradnorm**2)\n\n stop_reason = self._check_stopping_criterion(\n time0, stepsize=stepsize, gradnorm=gradnorm, iter=iter)\n\n if stop_reason:\n if verbosity >= 1:\n print(stop_reason)\n print('')\n break\n\n if self._logverbosity <= 0:\n return x\n else:\n self._stop_optlog(x, objective(x), stop_reason, time0,\n stepsize=stepsize, gradnorm=gradnorm,\n iter=iter)\n return x, self._optlog\n","repo_name":"froec/BQonRDM","sub_path":"core/steepest_descent.py","file_name":"steepest_descent.py","file_ext":"py","file_size_in_byte":5594,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"88"} +{"seq_id":"2416684881","text":"import datetime as dt\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nimport indicators as ind\nimport marketsimcode as sim\n\n\ndef check_holdings(hold):\n try:\n if hold > 1000 or hold < -1000:\n return False\n else:\n return True\n except Exception as check_holdings_exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n print(f\"Exception in 'check_holdings'\", check_holdings_exception)\n\n\ndef determine_buy_sell_amount(holdings, buy_or_sell):\n try:\n if buy_or_sell == \"BUY\":\n if holdings == 0:\n return 1000\n elif holdings == -1000:\n return 2000\n else:\n return 0\n elif buy_or_sell == \"SELL\":\n if holdings == 0:\n return 1000\n elif holdings == 1000:\n return 2000\n else:\n return 0\n else:\n return 0\n except Exception as determine_buy_sell_amount_exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n print(f\"Exception in 'determine_buy_sell_amount'\", determine_buy_sell_amount_exception)\n\n\ndef find_optimal(stock_DF, symbol):\n try:\n total_holdings = 0\n columns = [\"Date\", \"Symbol\", \"Order\", \"Shares\"]\n order_df = pd.DataFrame(columns=columns)\n temp_DF = pd.DataFrame(index=stock_DF.index)\n temp_DF[\"Date\"] = stock_DF.index.values\n temp_DF[\"Adj_Close\"] = stock_DF[\"Adj Close\"]\n temp_DF[\"Next_Day_Adjusted_Close\"] = stock_DF[\"Adj Close\"].shift(-1)\n temp_DF[\"Diff\"] = temp_DF[\"Next_Day_Adjusted_Close\"] - temp_DF[\"Adj_Close\"]\n temp_DF[\"Order\"] = \"\"\n temp_DF[\"Position\"] = 0\n temp_DF.loc[temp_DF[\"Diff\"] > 0, \"Order\"] = \"BUY\"\n temp_DF.loc[temp_DF[\"Diff\"] < 0, \"Order\"] = \"SELL\"\n for i in range(temp_DF.shape[0]):\n if temp_DF.iloc[i][\"Order\"] == \"SELL\":\n if check_holdings(total_holdings):\n amount = determine_buy_sell_amount(total_holdings, \"SELL\")\n result = {\"Date\": temp_DF.iloc[i][\"Date\"], \"Symbol\": symbol,\n \"Order\": temp_DF.iloc[i][\"Order\"], \"Shares\": amount}\n order_df = order_df.append(result, ignore_index=True)\n total_holdings -= amount\n temp_DF.loc[temp_DF.iloc[i][\"Date\"], \"Position\"] = total_holdings\n else:\n continue\n elif temp_DF.iloc[i][\"Order\"] == \"BUY\":\n if check_holdings(total_holdings):\n amount = determine_buy_sell_amount(total_holdings, \"BUY\")\n result = {\"Date\": temp_DF.iloc[i][\"Date\"], \"Symbol\": symbol,\n \"Order\": temp_DF.iloc[i][\"Order\"], \"Shares\": amount}\n order_df = order_df.append(result, ignore_index=True)\n total_holdings += amount\n temp_DF.loc[temp_DF.iloc[i][\"Date\"], \"Position\"] = total_holdings\n else:\n continue\n return order_df\n except Exception as find_optimal_exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n print(f\"Exception in 'find_optimal'\", find_optimal_exception)\n\n\ndef testPolicy(symbol=\"JPM\", sd=dt.datetime(2008, 1, 1), ed=dt.datetime(2009, 12, 31), sv=100000,\n window=14, extra=\"Figure_\"):\n try:\n baseline_results = sim.get_baseline(symbol=symbol, sd=sd, ed=ed, sv=sv)\n plt.close(\"all\")\n all_data = ind.get_all_stock_data(symbol=symbol, sd=sd, ed=ed)\n results = ind.calculate_bollinger(ind.calculate_obv(all_data), window=window)\n results = ind.calculate_macd(ind.calculate_vortex(results, window=window))\n optimal_orders = find_optimal(results, symbol=symbol)\n optimal_prices_DF = sim.get_prices(symbols=[symbol], start_date=sd, end_date=ed)\n optimal_prices_DF = optimal_prices_DF[[symbol]] # remove SPY\n optimal_prices_DF[\"Cash\"] = np.ones(shape=(optimal_prices_DF.shape[0]))\n optimal_portval = sim.compute_portvals(orders_DF=optimal_orders, prices_DF=optimal_prices_DF,\n start_val=sv, commission=0, impact=0)\n fig, (ax1) = plt.subplots(1, 1, figsize=(10, 6))\n ax1.plot(baseline_results[\"PortVals\"] / baseline_results[\"PortVals\"][0],\n label=\"Baseline\", linewidth=1.15, color=\"tab:green\")\n ax1.plot(optimal_portval[\"PortVals\"] / optimal_portval[\"PortVals\"][0],\n label=\"Optimal\", linewidth=1.15, color=\"tab:red\")\n ax1.set_title(f\"Theoretically Optimal Strategy\\n{symbol}\", fontsize=15, weight='bold')\n ax1.set_ylabel(\"Performance\", fontsize=15, weight='heavy')\n ax1.legend(loc=\"best\", markerscale=1.1, frameon=True,\n edgecolor=\"black\", fancybox=True, shadow=True)\n ax1.set_xlabel(\"Trading Date\", fontsize=12, weight='heavy')\n plt.setp(ax1.get_xticklabels(), rotation=30, ha=\"right\",\n rotation_mode=\"anchor\")\n plt.tight_layout()\n plt.savefig(f\"{os.getcwd()}/{symbol}_Theoretically_Optimal_Strategy_better.png\")\n a = 8\n base_cumulative_returns = sim.get_cumulative_returns(baseline_results[\"PortVals\"])\n base_daily_returns = sim.get_daily_returns(baseline_results[\"PortVals\"])\n\n optimal_cumulative_returns = sim.get_cumulative_returns(optimal_portval[\"PortVals\"] )\n optimal_daily_returns = sim.get_daily_returns(optimal_portval[\"PortVals\"] )\n print(\"Baseline Statistics\")\n print(f\"\\tCumulative Return: {base_cumulative_returns}\")\n print(f\"\\tStandard Deviation of Daily Returns: {base_daily_returns.std()}\")\n print(f\"\\tMean of Daily Returns: {base_daily_returns.mean()}\")\n\n print(\"Theoretically Optimal Strategy Statistics\")\n print(f\"\\tCumulative Return: {optimal_cumulative_returns}\")\n print(f\"\\tStandard Deviation of Daily Returns: {optimal_daily_returns.std()}\")\n print(f\"\\tMean of Daily Returns: {optimal_daily_returns.mean()}\")\n return\n except Exception as testPolicy_exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n print(f\"Exception in 'testPolicy'\", testPolicy_exception)\n\n\ndef author():\n \"\"\"\n :return: The GT username of the student\n :rtype: str\n \"\"\"\n return \"jadams334\" # Change this to your user ID\n\n\nif __name__ == '__main__':\n print()\n","repo_name":"repository101/Georgia-Tech","sub_path":"CS 7646 - Machine Learning for Trading/Project 6 - Indicator Evaluation/TheoreticallyOptimalStrategy.py","file_name":"TheoreticallyOptimalStrategy.py","file_ext":"py","file_size_in_byte":6968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"33604308234","text":"from instrument import Instrument, NullInstrument\nimport struct\nimport sys\n\nGENMIDI_HEADER = \"#OPL_II#\"\nNUM_INSTRUMENTS = 175\nINSTR_DATA_LEN = 36\nINSTR_NAME_LEN = 32\n\nFLAG_FIXED_PITCH = 0x0001\nFLAG_TWO_VOICE = 0x0004\n\nKSL_MASK = 0xc0\nVOLUME_MASK = 0x3f\n\n# Order of fields in GENMIDI data structures.\n\nGENMIDI_FIELDS = [\n\t\"m_am_vibrato_eg\",\n\t\"m_attack_decay\",\n\t\"m_sustain_release\",\n\t\"m_waveform\",\n\t\"m_ksl\",\n\t\"m_volume\",\n\t\"feedback_fm\",\n\t\"c_am_vibrato_eg\",\n\t\"c_attack_decay\",\n\t\"c_sustain_release\",\n\t\"c_waveform\",\n\t\"c_ksl\",\n\t\"c_volume\",\n\t\"null\",\n\t\"note_offset\"\n]\n\n# Encode a single voice of an instrument to binary.\n\ndef encode_voice(data, offset):\n\tresult = dict(data)\n\n\tresult[\"m_ksl\"] = data[\"m_ksl_volume\"] & KSL_MASK\n\tresult[\"m_volume\"] = data[\"m_ksl_volume\"] & VOLUME_MASK\n\tresult[\"c_ksl\"] = data[\"c_ksl_volume\"] & KSL_MASK\n\tresult[\"c_volume\"] = data[\"c_ksl_volume\"] & VOLUME_MASK\n\n\tresult[\"null\"] = 0\n\tresult[\"note_offset\"] = offset\n\n\treturn struct.pack(\" bool:\n return False\n\n def display_state(self):\n self.interface.show()\n\n def update_state(self, move):\n self.player.move(move)\n self.update_board()\n\n def play(self):\n # game loop - infinite\n while self.playing:\n self.display_state()\n move = self.interface.get_user_input()\n self.update_state(move)\n\n\ng = Game()\ng.play()\n","repo_name":"kevinelong/intro_to_python","sub_path":"video_games.py","file_name":"video_games.py","file_ext":"py","file_size_in_byte":5879,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"88"} +{"seq_id":"43284652521","text":"from sys import stderr\nfrom re import search, IGNORECASE\nfrom urllib.parse import quote_plus\nfrom genefab._util import get_json, date2stamp\nfrom genefab._util import FFIELD_ALIASES, FFIELD_VALUES, API_ROOT, GENELAB_ROOT\nfrom genefab._util import DELIM_DEFAULT, STORAGE_PREFIX\nfrom genefab._exceptions import GeneLabJSONException\nfrom genefab._assay import AssayDispatcher\nfrom pandas import DataFrame, concat\nfrom os.path import join\n\n\nclass GeneLabDataSet():\n \"\"\"Stores GLDS metadata associated with an accession number\"\"\"\n accession, assays, storage = None, None, None\n verbose = False\n\n def __init__(self, accession, verbose=False, storage_prefix=STORAGE_PREFIX, index_by=\"Sample Name\", name_delim=DELIM_DEFAULT):\n \"\"\"Request JSON representation of ISA metadata and store fields\"\"\"\n self.accession = accession\n self.verbose = verbose\n self.storage = join(storage_prefix, accession)\n data_json = get_json(\n \"{}/data/study/data/{}/\".format(API_ROOT, accession), self.verbose\n )\n if len(data_json) == 0:\n raise GeneLabJSONException(\"Invalid JSON (GLDS does not exist?)\")\n if len(data_json) > 1:\n raise GeneLabJSONException(\"Invalid JSON, too many sections\")\n self._json = data_json[0]\n try:\n self.internal_id = self._json[\"_id\"]\n self.metadata_id = self._json[\"metadata_id\"]\n if len(self._json[\"foreignFields\"]) != 1:\n raise NotImplementedError(\"Multiple foreignFields\")\n self._isa2json = self._json[\"foreignFields\"][0][\"isa2json\"]\n self._info = self._isa2json[\"additionalInformation\"]\n for field in \"description\", \"samples\", \"ontologies\", \"organisms\":\n setattr(self, field, self._info[field])\n except KeyError:\n error_message = \"Malformed JSON ({})\".format(self.accession)\n raise GeneLabJSONException(error_message)\n self.assays = AssayDispatcher(\n parent=self, json=self._info[\"assays\"], storage_prefix=self.storage,\n name_delim=name_delim, glds_file_urls=self.get_files_info(\"urls\"),\n index_by=index_by, glds_file_dates=self.get_files_info(\"dates\")\n )\n\n @property\n def factors(self):\n \"\"\"List factors\"\"\"\n return [fi[\"factor\"] for fi in self.description[\"factors\"]]\n\n @property\n def summary_dataframe(self):\n \"\"\"List factors, assay names and types\"\"\"\n assays_df = self.assays.summary_dataframe.copy()\n assays_df[\"type\"] = \"assay\"\n factors_df = DataFrame(\n columns=[\"type\", \"name\", \"factors\"],\n data=[\n [\"dataset\", self.accession, factor]\n for factor in self.factors\n ]\n )\n return concat([factors_df, assays_df], axis=0, sort=False)\n\n def get_files_info(self, kind=\"urls\"):\n \"\"\"Get filenames and associated URLs\"\"\"\n if self.accession is None:\n raise ValueError(\"Uninitialized GLDS instance\")\n elif kind == \"urls\":\n getter_url = \"{}/data/glds/files/{}\"\n acc_nr = search(r'\\d+$', self.accession).group()\n files_json = get_json(\n getter_url.format(API_ROOT, acc_nr), self.verbose\n )\n try:\n filedata = files_json[\"studies\"][self.accession][\"study_files\"]\n except KeyError:\n raise GeneLabJSONException(\"Malformed JSON\")\n return {\n fd[\"file_name\"]: GENELAB_ROOT+fd[\"remote_url\"]\n for fd in filedata\n }\n elif kind == \"dates\":\n getter_url = \"{}/data/study/filelistings/{}\"\n filedata = get_json(\n getter_url.format(API_ROOT, self.internal_id), self.verbose\n )\n return {fd[\"file_name\"]: date2stamp(fd) for fd in filedata}\n else:\n raise ValueError(\"Unrecognized parameter: '{}'\".format(kind))\n\n\ndef get_ffield_matches(verbose=False, **ffield_kwargs):\n \"\"\"Expand passed regexes to all matching ffield values\"\"\"\n for ffield_alias, ffregex in ffield_kwargs.items():\n if verbose:\n print(\"looking up\", ffield_alias, end=\"(s): \", file=stderr)\n if ffield_alias in FFIELD_ALIASES:\n ffield = FFIELD_ALIASES[ffield_alias]\n else:\n raise IndexError(\"Unrecognized field: \" + ffield_alias)\n for ffvalue in FFIELD_VALUES[ffield]:\n if search(ffregex, ffvalue, IGNORECASE):\n if verbose:\n print('\"{}\"'.format(ffvalue), end=\", \", file=stderr)\n yield ffield, ffvalue\n if verbose:\n print(\"\\b\", file=stderr)\n\n\ndef get_datasets(maxcount=\"25\", storage=STORAGE_PREFIX, verbose=False, onerror=\"warn\", **ffield_kwargs):\n \"\"\"Match passed regexes and combine into search URL, get JSON and parse for accessions\"\"\"\n url_lead_components = [\n API_ROOT+\"/data/search/?term=GLDS\", \"type=cgene\", \"size=\"+str(maxcount)\n ]\n url_ffield_components = [\n \"ffield={}&fvalue={}\".format(ffield, quote_plus(ffvalue))\n for ffield, ffvalue\n in get_ffield_matches(verbose=verbose, **ffield_kwargs)\n ]\n url = \"&\".join(url_lead_components + url_ffield_components)\n try:\n json = get_json(url, verbose=verbose)[\"hits\"][\"hits\"]\n except:\n raise GeneLabJSONException(\"Unrecognized JSON structure\")\n datasets = []\n for hit in json:\n try:\n datasets.append(\n GeneLabDataSet(\n hit[\"_id\"], storage_prefix=storage, verbose=verbose\n )\n )\n except Exception as e:\n if onerror == \"ignore\":\n pass\n elif onerror == \"warn\":\n msgmask = \"Warning: Could not process {} due to error:\"\n print(msgmask.format(hit[\"_id\"]), e, file=stderr)\n else:\n raise\n return datasets\n","repo_name":"Tubbz-alt/genefab","sub_path":"genefab/_dataset.py","file_name":"_dataset.py","file_ext":"py","file_size_in_byte":5982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"13255114592","text":"\"\"\" fspsParameters.py -- A file to help determine the correct FSPS parameters to use.\n\nBenjamin Rose\nbrose3@nd.edu\nbenjamin.rose@me.com\nUniversity of Notre Dame\n2017-01-20\nPython 3\n\"\"\"\nimport numpy as np\nimport fsps\n\ndef test_compute_vega_mags():\n \"\"\"\n Does explicitly turning off `compute_vega_mags` matter in the output of fsps?\n\n # Parameters\n\n sp : fsps.StellarPopulation\n An instance of the FSPS StellarPopulation class. \n \"\"\"\n filters = ['sdss_u', 'sdss_g', 'sdss_r', 'sdss_i', 'sdss_z']\n age = 13.7\n\n #somewhere someone said to only have one instance of \n #`fsps.StellarPopulation()` but to test the defaults I need a clean \n #instance.\n sp = fsps.StellarPopulation()\n\n # test default (should be `False`)\n mags1 = sp.get_mags(tage=age, bands=filters)\n\n #test like what many do (set to `False`)\n #We have to reinitialize, because `compute_vega_mags` can only be changed \n #at initiation.\n sp = fsps.StellarPopulation(compute_vega_mags = False)\n mags2 = sp.get_mags(tage=age, bands=filters)\n\n #compare to the milimag (ish)\n #could use `np.allclose` if I want to drop this down to a single boolean\n return np.isclose(mags1, mags2)\n\ndef test_add_neb_emission():\n \"\"\"\n This function shows the difference of turning on the nebular emissions. We \n expect that for young stars we can get a *** r-i slop with nebular \n emissions on, but every other case (old-with/without, young/without) does \n not.\n \"\"\"\n # set up basics\n sdss_bands = ['sdss_u', 'sdss_g', 'sdss_r', 'sdss_i', 'sdss_z']\n sp = fsps.StellarPopulation(zcontinuous=2, logzsol=0, dust1=1.0, dust2=0.5,\n cloudy_dust=True, sfh=4)\n\n # create SED for old stellar population-without emission\n sp.params['tau'] = 10.0\n sp.params['sf_start'] = 6.4\n #set age to a redshift of 0.2\n oldwo = sp.get_mags(tage=11.4, redshift=0.2, bands=sdss_bands)\n\n # create SED for old stellar population-with emission\n sp.params['add_neb_emission'] = True\n oldw = sp.get_mags(tage=11.4, redshift=0.2, bands=sdss_bands)\n\n # create SED for young stellar population-without emission\n sp.params['add_neb_emission'] = False\n # sp.params['tau'] = 10.0\n sp.params['sf_start'] = 11\n youngwo = sp.get_mags(tage=11.4, redshift=0.2, bands=sdss_bands)\n\n # create SED for young stellar population-with emission\n sp.params['add_neb_emission'] = True\n youngw = sp.get_mags(tage=11.4, redshift=0.2, bands=sdss_bands)\n\n # create SED for youngest stellar population-without emission\n sp.params['add_neb_emission'] = False\n # sp.params['tau'] = 10.0\n sp.params['sf_start'] = 11.35\n youngestwo = sp.get_mags(tage=11.4, redshift=0.2, bands=sdss_bands)\n\n # create SED for youngest stellar population-with emission\n sp.params['add_neb_emission'] = True\n youngestw = sp.get_mags(tage=11.4, redshift=0.2, bands=sdss_bands)\n\n #scale them so they all have the same r-band mag (of 45 \"mags\")\n #this simulates the best fit that determines the total stellar mass.\n def correctSED(SED):\n delta = 21 - SED[2]\n return SED + delta\n oldwo = correctSED(oldwo)\n oldw = correctSED(oldw)\n youngwo = correctSED(youngwo)\n youngw = correctSED(youngw)\n youngestwo = correctSED(youngestwo)\n youngestw = correctSED(youngestw)\n\n # plot \n import matplotlib.pyplot as plt\n import seaborn as sns\n fig = plt.figure('neb emission test')\n ax = fig.add_subplot(111)\n # x = [0,1,2,3,4]\n x = [3551, 4686, 6166, 7480, 8932]\n plt.plot(x, oldwo, label='5 Gyr - no emission')\n plt.plot(x, oldw, '--', label='5 Gyr - emission')\n plt.plot(x, youngwo, '-', label='400 Myr - no emission')\n plt.plot(x, youngw, '-.', label='400 Myr - emission')\n plt.plot(x, youngestwo, '--', label='50 Myr - no emission')\n plt.plot(x, youngestw, ':', label='50 Myr - emission')\n plt.xticks(x)\n ax.set_xticklabels(['u','g','r','i','z'])\n # ax.set_yticklabels(['u','g','r','i','z'])\n plt.ylabel('theoretical magnitudes')\n plt.gca().invert_yaxis()\n plt.legend(loc=4)\n plt.savefig('figures/add_neb_emission.pdf')\n plt.show()\n\n print(oldwo)\n print(oldw)\n print(youngwo)\n print(youngw)\n print(youngestwo)\n print(youngestw)\n return None\n\ndef test_neg_ri_color(NebEmission=False):\n \"\"\"\n Tried to see if the r-i color was age only or just nebular emission.\n Saves a figure to either `figures/oldVSyoung.pdf` or`figures/oldVSyoung_withNebEmission.pdf` depending on input parameter.\n\n Was helpful for determing if negative r-i color was from age alone or \n needed `add_neb_emission`.\n\n Parameters \n ----------\n NebEmission : bool\n Determines if it outputs a figure of varying ages with our without emission turned on. \n \"\"\"\n sdss_bands = ['sdss_u', 'sdss_g', 'sdss_r', 'sdss_i', 'sdss_z']\n sp = fsps.StellarPopulation(zcontinuous=2, logzsol=0, dust1=1.0, dust2=0.5,\n cloudy_dust=True, sfh=4)\n if NebEmission:\n sp.params['add_neb_emission'] = True\n\n # create SED for oldest stellar\n sp.params['tau'] = 0.5\n sp.params['sf_start'] = 1\n #set age to a redshift of 0.2\n oldest = sp.get_mags(tage=11.4, redshift=0.2, bands=sdss_bands)\n\n # create SED for old stellar\n sp.params['tau'] = 2\n sp.params['sf_start'] = 4\n #set age to a redshift of 0.2\n old = sp.get_mags(tage=11.4, redshift=0.2, bands=sdss_bands)\n\n # create SED for young stellar\n sp.params['tau'] = 5\n sp.params['sf_start'] = 7\n #set age to a redshift of 0.2\n young = sp.get_mags(tage=11.4, redshift=0.2, bands=sdss_bands)\n\n # create SED for younger stellar\n sp.params['tau'] = 10\n sp.params['sf_start'] = 11\n #set age to a redshift of 0.2\n younger = sp.get_mags(tage=11.4, redshift=0.2, bands=sdss_bands)\n\n # create SED for youngest stellar\n sp.params['tau'] = 10\n sp.params['sf_start'] = 11.35\n #set age to a redshift of 0.2\n youngest = sp.get_mags(tage=11.4, redshift=0.2, bands=sdss_bands)\n\n def correctSED(SED):\n delta = 45 - SED[2]\n return SED + delta\n oldest = correctSED(oldest)\n old = correctSED(old)\n young = correctSED(young)\n younger = correctSED(younger)\n youngest = correctSED(youngest)\n\n # plot \n import matplotlib.pyplot as plt\n import seaborn as sns\n fig = plt.figure('neb emission test')\n ax = fig.add_subplot(111)\n # x = [0,1,2,3,4]\n x = [3551, 4686, 6166, 7480, 8932]\n plt.plot(x, oldest, label='oldest - 10.5 Gyr')\n plt.plot(x, old, '-.', label='old - 7.4 Gyr')\n plt.plot(x, young, ':', label='young - 4.4 Gyr')\n plt.plot(x, younger, ':', label='younger - 400 Myr')\n plt.plot(x, youngest, '--', label='youngest - 50 Myr')\n plt.xticks(x)\n ax.set_xticklabels(['u','g','r','i','z'])\n plt.gca().invert_yaxis()\n plt.legend(loc=4)\n if NebEmission:\n plt.savefig('figures/oldVSyoung_withNebEmission.pdf')\n else:\n plt.savefig('figures/oldVSyoung.pdf')\n plt.show()","repo_name":"benjaminrose/MC-Age","sub_path":"fspsParameters.py","file_name":"fspsParameters.py","file_ext":"py","file_size_in_byte":7009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"1002455176","text":"from src.utils import *\n\ndef answer_test(args):\n \"\"\"\n Answer a test from a file with the following format:\n