code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
# coding=utf-8 import tensorflow as tf v = tf.Variable(0, dtype=tf.float32, name='v3') # 在没有声明滑动平均模型时只有一个变量v,所以下面的语句只会输出v:0 for variables in tf.global_variables(): print(variables.name) ema = tf.train.ExponentialMovingAverage(0.99) # 加入命名空间中 maintain_averages_op = ema.apply(tf.global_variables()) # 在申明滑动平均模型之后,TensorFlow会自动生成一个影子变量 # v/ExponentialMovingAverage。于是下面的语句输出 # v:0 和 v/ExponentialMovingAverage:0 for variables in tf.global_variables(): print(variables.name) saver = tf.train.Saver() with tf.Session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) sess.run(tf.assign(v, 10)) sess.run(maintain_averages_op) # 保存时候会将v0, v/ExponentialMovingAverage:0 这两个变量保存下来 saver.save(sess, "Saved_model/model2.ckpt") print(sess.run([v, ema.average(v)]))
[ "tensorflow.Variable", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.global_variables", "tensorflow.global_variables_initializer", "tensorflow.assign", "tensorflow.train.ExponentialMovingAverage" ]
[((44, 87), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'dtype': 'tf.float32', 'name': '"""v3"""'}), "(0, dtype=tf.float32, name='v3')\n", (55, 87), True, 'import tensorflow as tf\n'), ((143, 164), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (162, 164), True, 'import tensorflow as tf\n'), ((199, 238), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', (['(0.99)'], {}), '(0.99)\n', (232, 238), True, 'import tensorflow as tf\n'), ((434, 455), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (453, 455), True, 'import tensorflow as tf\n'), ((492, 508), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (506, 508), True, 'import tensorflow as tf\n'), ((282, 303), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (301, 303), True, 'import tensorflow as tf\n'), ((514, 526), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (524, 526), True, 'import tensorflow as tf\n'), ((550, 583), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (581, 583), True, 'import tensorflow as tf\n'), ((619, 635), 'tensorflow.assign', 'tf.assign', (['v', '(10)'], {}), '(v, 10)\n', (628, 635), True, 'import tensorflow as tf\n')]
""" validataclass Copyright (c) 2021, binary butterfly GmbH and contributors Use of this source code is governed by an MIT-style license that can be found in the LICENSE file. """ from datetime import date from typing import Any from .string_validator import StringValidator from validataclass.exceptions import InvalidDateError __all__ = [ 'DateValidator', ] class DateValidator(StringValidator): """ Validator that parses date strings in "YYYY-MM-DD" format (e.g. "2021-01-31") to `datetime.date` objects. Currently no parameters are supported. Examples: ``` DateValidator() ``` See also: `TimeValidator`, `DateTimeValidator` Valid input: Valid dates in YYYY-MM-DD format as `str` Output: `datetime.date` """ def __init__(self): """ Create a `DateValidator`. No parameters. """ # Initialize StringValidator without any parameters super().__init__() def validate(self, input_data: Any) -> date: """ Validate input as a valid date string and convert it to a `datetime.date` object. """ # First, validate input data as string date_string = super().validate(input_data) # Try to create date object from string (only accepts "YYYY-MM-DD") try: date_obj = date.fromisoformat(date_string) except ValueError: raise InvalidDateError() return date_obj
[ "validataclass.exceptions.InvalidDateError", "datetime.date.fromisoformat" ]
[((1329, 1360), 'datetime.date.fromisoformat', 'date.fromisoformat', (['date_string'], {}), '(date_string)\n', (1347, 1360), False, 'from datetime import date\n'), ((1406, 1424), 'validataclass.exceptions.InvalidDateError', 'InvalidDateError', ([], {}), '()\n', (1422, 1424), False, 'from validataclass.exceptions import InvalidDateError\n')]
#!/usr/bin/env python # coding: utf-8 from __future__ import print_function from __future__ import absolute_import import os import re import shutil import stat import sys import tct from os.path import exists as ospe, join as ospj from tct import deepget params = tct.readjson(sys.argv[1]) binabspath = sys.argv[2] facts = tct.readjson(params['factsfile']) milestones = tct.readjson(params['milestonesfile']) reason = '' resultfile = params['resultfile'] result = tct.readjson(resultfile) loglist = result['loglist'] = result.get('loglist', []) toolname = params['toolname'] toolname_pure = params['toolname_pure'] workdir = params['workdir'] exitcode = CONTINUE = 0 # ================================================== # Make a copy of milestones for later inspection? # -------------------------------------------------- if 0 or milestones.get('debug_always_make_milestones_snapshot'): tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1]) # ================================================== # Helper functions # -------------------------------------------------- def lookup(D, *keys, **kwdargs): result = deepget(D, *keys, **kwdargs) loglist.append((keys, result)) return result # ================================================== # define # -------------------------------------------------- copied_latex_resources = [] run_latex_make_sh_file = None xeq_name_cnt = 0 # ================================================== # Check params # -------------------------------------------------- if exitcode == CONTINUE: loglist.append('CHECK PARAMS') make_latex = lookup(milestones, 'make_latex', default=None) if not make_latex: CONTINUE == -2 reason = 'Nothing to do' if exitcode == CONTINUE: build_latex = lookup(milestones, 'build_latex', default=None) builder_latex_folder = lookup(milestones, 'builder_latex_folder', default=None) latex_contrib_typo3_folder = lookup(milestones, 'latex_contrib_typo3_folder', default=None) if not (1 and build_latex and builder_latex_folder and latex_contrib_typo3_folder): CONTINUE = -2 reason = 'Bad params or nothing to do' if exitcode == CONTINUE: loglist.append('PARAMS are ok') else: loglist.append('Bad PARAMS or nothing to do') # ================================================== # work # -------------------------------------------------- if exitcode == CONTINUE: if not os.path.isdir(latex_contrib_typo3_folder): exitcode = 22 reason = 'Folder does not exist' if exitcode == CONTINUE: foldername = os.path.split(latex_contrib_typo3_folder)[1] destpath = ospj(builder_latex_folder, foldername) shutil.copytree(latex_contrib_typo3_folder, destpath) if exitcode == CONTINUE: run_latex_make_sh_file = ospj(builder_latex_folder, 'run-make.sh') f2text = ( "#!/bin/bash\n" "\n" "# This is run-make.sh\n" "\n" 'scriptdir=$( cd $(dirname "$0") ; pwd -P )' "\n" "# cd to this dir\n" "pushd \"$scriptdir\" >/dev/null\n" "\n" "# set environment var pointing to the folder and run make\n" "TEXINPUTS=::texmf_typo3 make\n" "\n" "popd >/dev/null\n" "\n" ) with open(run_latex_make_sh_file, 'w') as f2: f2.write(f2text) file_permissions = (os.stat(run_latex_make_sh_file).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) os.chmod(run_latex_make_sh_file, file_permissions) if exitcode == CONTINUE: makefile_path = ospj(builder_latex_folder, 'Makefile') makefile_original_path = makefile_path + '.original' if ospe(makefile_path) and not ospe(makefile_original_path): shutil.copy2(makefile_path, makefile_original_path) with open(makefile_path, 'rb') as f1: data = f1.read() data, cnt = re.subn("LATEXMKOPTS[ ]*=[ ]*\n", "\n\n\n\nLATEXMKOPTS = -interaction=nonstopmode\n\n\n\n\n", data) if cnt: with open(makefile_path, 'wb') as f2: f2.write(data) # ================================================== # Set MILESTONE # -------------------------------------------------- if copied_latex_resources: result['MILESTONES'].append({'copied_latex_resources': copied_latex_resources}) if run_latex_make_sh_file: result['MILESTONES'].append({'run_latex_make_sh_file': run_latex_make_sh_file}) # ================================================== # save result # -------------------------------------------------- tct.save_the_result(result, resultfile, params, facts, milestones, exitcode, CONTINUE, reason) # ================================================== # Return with proper exitcode # -------------------------------------------------- sys.exit(exitcode)
[ "tct.make_snapshot_of_milestones", "os.path.exists", "tct.deepget", "shutil.copy2", "os.path.join", "os.chmod", "shutil.copytree", "tct.readjson", "os.path.split", "os.path.isdir", "tct.save_the_result", "sys.exit", "os.stat", "re.subn" ]
[((269, 294), 'tct.readjson', 'tct.readjson', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (281, 294), False, 'import tct\n'), ((328, 361), 'tct.readjson', 'tct.readjson', (["params['factsfile']"], {}), "(params['factsfile'])\n", (340, 361), False, 'import tct\n'), ((375, 413), 'tct.readjson', 'tct.readjson', (["params['milestonesfile']"], {}), "(params['milestonesfile'])\n", (387, 413), False, 'import tct\n'), ((469, 493), 'tct.readjson', 'tct.readjson', (['resultfile'], {}), '(resultfile)\n', (481, 493), False, 'import tct\n'), ((4819, 4917), 'tct.save_the_result', 'tct.save_the_result', (['result', 'resultfile', 'params', 'facts', 'milestones', 'exitcode', 'CONTINUE', 'reason'], {}), '(result, resultfile, params, facts, milestones, exitcode,\n CONTINUE, reason)\n', (4838, 4917), False, 'import tct\n'), ((5053, 5071), 'sys.exit', 'sys.exit', (['exitcode'], {}), '(exitcode)\n', (5061, 5071), False, 'import sys\n'), ((900, 970), 'tct.make_snapshot_of_milestones', 'tct.make_snapshot_of_milestones', (["params['milestonesfile']", 'sys.argv[1]'], {}), "(params['milestonesfile'], sys.argv[1])\n", (931, 970), False, 'import tct\n'), ((1145, 1173), 'tct.deepget', 'deepget', (['D', '*keys'], {}), '(D, *keys, **kwdargs)\n', (1152, 1173), False, 'from tct import deepget\n'), ((2772, 2810), 'os.path.join', 'ospj', (['builder_latex_folder', 'foldername'], {}), '(builder_latex_folder, foldername)\n', (2776, 2810), True, 'from os.path import exists as ospe, join as ospj\n'), ((2815, 2868), 'shutil.copytree', 'shutil.copytree', (['latex_contrib_typo3_folder', 'destpath'], {}), '(latex_contrib_typo3_folder, destpath)\n', (2830, 2868), False, 'import shutil\n'), ((2924, 2965), 'os.path.join', 'ospj', (['builder_latex_folder', '"""run-make.sh"""'], {}), "(builder_latex_folder, 'run-make.sh')\n", (2928, 2965), True, 'from os.path import exists as ospe, join as ospj\n'), ((3696, 3746), 'os.chmod', 'os.chmod', (['run_latex_make_sh_file', 'file_permissions'], {}), '(run_latex_make_sh_file, file_permissions)\n', (3704, 3746), False, 'import os\n'), ((3793, 3831), 'os.path.join', 'ospj', (['builder_latex_folder', '"""Makefile"""'], {}), "(builder_latex_folder, 'Makefile')\n", (3797, 3831), True, 'from os.path import exists as ospe, join as ospj\n'), ((4097, 4195), 're.subn', 're.subn', (['"""LATEXMKOPTS[ ]*=[ ]*\n"""', '"""\n\n\n\nLATEXMKOPTS = -interaction=nonstopmode\n\n\n\n\n"""', 'data'], {}), '(\'LATEXMKOPTS[ ]*=[ ]*\\n\',\n """\n\n\n\nLATEXMKOPTS = -interaction=nonstopmode\n\n\n\n\n""", data)\n', (4104, 4195), False, 'import re\n'), ((2563, 2604), 'os.path.isdir', 'os.path.isdir', (['latex_contrib_typo3_folder'], {}), '(latex_contrib_typo3_folder)\n', (2576, 2604), False, 'import os\n'), ((2712, 2753), 'os.path.split', 'os.path.split', (['latex_contrib_typo3_folder'], {}), '(latex_contrib_typo3_folder)\n', (2725, 2753), False, 'import os\n'), ((3896, 3915), 'os.path.exists', 'ospe', (['makefile_path'], {}), '(makefile_path)\n', (3900, 3915), True, 'from os.path import exists as ospe, join as ospj\n'), ((3962, 4013), 'shutil.copy2', 'shutil.copy2', (['makefile_path', 'makefile_original_path'], {}), '(makefile_path, makefile_original_path)\n', (3974, 4013), False, 'import shutil\n'), ((3924, 3952), 'os.path.exists', 'ospe', (['makefile_original_path'], {}), '(makefile_original_path)\n', (3928, 3952), True, 'from os.path import exists as ospe, join as ospj\n'), ((3490, 3521), 'os.stat', 'os.stat', (['run_latex_make_sh_file'], {}), '(run_latex_make_sh_file)\n', (3497, 3521), False, 'import os\n')]
import logging import random import time import src.support.outbound_routing as ob from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal import src.support.settings as config # This file containst the functions that perform transaction-specific # calls, building the needed requests and sending them to the aries agents CRED_NAMES = [ "payment_agreement", "payment_credential", "package_cred", "received_package", ] ##User #User -> Vendor def send_payment_agreement_proposal(product_id): if not config.agent_data.product_id: config.agent_data.update_product_id(product_id) proposal = { "@type": "did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview", "attributes": [ { "name": "product_id", "value": product_id }, ] } offer_json = build_credential_proposal( config.agent_data.current_connection, comment="request for payment agreement credential", schema_name="payment agreement", prop_schema=proposal ) resp = ob.send_cred_proposal(offer_json) return resp def send_payment_agreement_cred_offer(conn_id, creddef_id, product_id, value=None, endpoint="placeholder_endpoint"): logging.debug("Issue credential to user") print("value is : ", value, " product_id is: ", product_id) builder = build_cred(creddef_id) builder.with_attribute({"payment_endpoint": endpoint}) \ .with_attribute({"timestamp": str(int(time.time()))}) \ .with_attribute({"amount": value}) \ .with_attribute({"product_id": product_id}) \ .with_type("did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview") \ .with_conn_id(conn_id) offer_req = builder.build_offer("purchase request") config.agent_data.previews[creddef_id] = builder.build_preview() return ob.send_cred_offer(offer_req) #User -> Bank def propose_proof_of_payment_agreement(connection_id, cred_def_id): proposal = build_proof_proposal( "proof_of_payment_agreement" ).withAttribute( "payment_endpoint", cred_def_id, ).withAttribute( "amount", cred_def_id ).withAttribute( "timestamp", cred_def_id ).build(connection_id, comment="proof of payment agreement") return ob.send_proof_proposal(proposal) #User -> Vendor def propose_proof_of_payment(connection_id, cred_def_id=None): proposal = build_proof_proposal( "proof_of_payment" ).withAttribute( "transaction_no", cred_def_id, ).withAttribute( "timestamp", cred_def_id ).build(connection_id, comment="wanna prove payhment") return ob.send_proof_proposal(proposal) def refuse_payment_agreement(conn_id, creddef_id): #todo: return a problem report if vendor cant/wont sell return None def request_proof_of_payment_agreement(creddef_id = None): if not creddef_id: return {"error": "no creddef id"} builder = build_proof_request(name="proof of payment agreement", version="1.0") req = builder.withAttribute( "payment_endpoint", restrictions=[{"cred_def_id": creddef_id}] ).withAttribute( "timestamp", restrictions=[{"cred_def_id": creddef_id}] ).withAttribute( "amount", restrictions=[{"cred_def_id": creddef_id}] ).with_conn_id(config.agent_data.current_connection).build() return ob.send_proof_request(req) #### Stage 2: Payment; #Bank -> User def send_payment_cred_offer(conn_id, creddef_id): transaction_no = gen_transaction_id() config.agent_data.transaction_no = transaction_no logging.debug("Issue credential to user") builder = build_cred(creddef_id) builder.with_attribute({"transaction_no": transaction_no}) \ .with_attribute({"timestamp": str(int(time.time()))}) \ .with_type("did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview") \ .with_conn_id(conn_id) offer_req = builder.build_offer("payment credential issuance") config.agent_data.previews[creddef_id] = builder.build_preview() return ob.send_cred_offer(offer_req) #stage 3: proving payment #Vendor -> User def request_proof_of_payment(creddef_id = None, presex_id=None): if not creddef_id: if not config.agent_data.payment_creddef: return {"error": "no creddef id"} else: creddef_id = config.agent_data.payment_creddef builder = build_proof_request(name="proof of payment", version="1.0") req = builder.withAttribute( "transaction_no", restrictions=[{"cred_def_id": creddef_id}] ).withAttribute( "timestamp", restrictions=[{"cred_def_id": creddef_id}] ).with_conn_id(config.agent_data.current_connection).build() return ob.send_proof_request(req, presex_id) ##### PROOF PACKAGE AT SHIPPING SERVICE ###### #Vendor -> User def propose_proof_of_dispatch(connection_id, cred_def_id): proposal = build_proof_proposal( "proof_of_dispatch" ).withAttribute( "package_no", cred_def_id, ).withAttribute( "timestamp", cred_def_id ).build(connection_id, comment="Package is at shipping service") return ob.send_proof_proposal(proposal) #User -> Vendor def request_proof_of_dispatch(creddef_id = None, presex_id=None): if not creddef_id: if not config.payment_creddef: return {"error": "no creddef id"} else: creddef_id = config.agent_data.payment_creddef builder = build_proof_request(name="proof of dispatch", version="1.0") req = builder.withAttribute( "timestamp", restrictions=[{"cred_def_id": creddef_id}] ).withAttribute( "package_no", restrictions=[{"cred_def_id": creddef_id}] ).with_conn_id(config.agent_data.current_connection).build() return ob.send_proof_request(req, presex_id) ############################################## ####END Stage 2 ####START Stage 3: Package ownership #Vendor -> user def send_package_cred_offer(conn_id, creddef_id): logging.debug("Issue credential to user") package_no = gen_package_no() config.agent_data.update_package_no(package_no) builder = build_cred(creddef_id) builder.with_attribute({"package_no": package_no}) \ .with_attribute({"timestamp": str(int(time.time()))}) \ .with_attribute({"status": "dispatched_to_shipping_service"}) \ .with_type("did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview") \ .with_conn_id(conn_id) offer_req = builder.build_offer("package credential issuance") config.agent_data.previews[creddef_id] = builder.build_preview() return ob.send_cred_offer(offer_req) #User -> Shipper # todo self attest address def propose_proof_of_ownership(conn_id, creddef_id): builder = build_proof_proposal("proof of package ownership") req = builder.withAttribute( "package_no", cred_def_id=creddef_id ).withAttribute( "timestamp", cred_def_id=creddef_id ).withAttribute( "shipping_address", ).build(conn_id, comment="proof of package ownership") return ob.send_proof_proposal(req) #Shipper -> User def request_proof_of_ownership(creddef_id): builder = build_proof_request(name="proof of package ownership", version="1.0") req = builder.withAttribute( "package_no", restrictions=[{"cred_def_id": creddef_id}] ).withAttribute( "timestamp", restrictions=[{"cred_def_id": creddef_id}] ).withAttribute( "shipping_address", ).with_conn_id(config.agent_data.current_connection).build() return ob.send_proof_request(req) ####END Stage 3 ####START Stage 4: receipt of package #Shipper -> Vendor def send_package_receipt_cred_offer(conn_id, creddef_id, package_no): logging.debug("Issue receipt credential to vendor") builder = build_cred(creddef_id) builder.with_attribute({"package_no": package_no}) \ .with_attribute({"timestamp": str(int(time.time()))}) \ .with_type("did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview") \ .with_conn_id(conn_id) offer_req = builder.build_offer("package-receipt credential issuance") config.agent_data.previews[creddef_id] = builder.build_preview() return ob.send_cred_offer(offer_req) #User -> Vendor def request_proof_of_receipt(): builder = build_proof_request(name="proof of shipped package", version="1.0") req = builder.withAttribute( "package_no", restrictions=[{"issuer_did":config.agent_data.shipper_did}] ).withAttribute( "timestamp", restrictions=[{"issuer_did":config.agent_data.shipper_did}] ).withAttribute( "status", restrictions=[{"issuer_did": config.agent_data.shipper_did}] ).with_conn_id(config.agent_data.current_connection).build() return ob.send_proof_request(req) #Vendor -> User def propose_proof_of_package_status(connection_id, cred_def_id=None): proposal = build_proof_proposal( "proof_of_package_status" ).withAttribute( "package_no", cred_def_id=cred_def_id, ).withAttribute( "timestamp", cred_def_id=cred_def_id, ).build(connection_id, comment="Package is at shipping service") return ob.send_proof_proposal(proposal) ##helper def register_schema(name, version, attrs, revocation=False): schema = build_schema(name, version, attrs) resp = ob.register_schema(schema) id = resp["schema_id"] creddef = {"schema_id": id, "support_revocation": revocation} resp = ob.register_creddef(creddef) creddef_id = resp["credential_definition_id"] config.agent_data.creddef_id = creddef_id return id, creddef_id ## need a way of keeping track who is for what def get_agreement_creddefid(): credentials = ob.get_credentials() res = credentials["results"] print("results of payment credf: ", res) payment_creds = [x for x in res if "payment_agreement" in x["schema_id"]] print("payment creds", res) if payment_creds: return payment_creds[0]["cred_def_id"] else: return None def get_creddefid(schema_name): credentials = ob.get_credentials() res = credentials["results"] print("results of payment credf: ", res) payment_creds = [x for x in res if schema_name in x["schema_id"]] print("payment creds", res) if payment_creds: return payment_creds[0]["cred_def_id"] def get_payment_creddefid(): credentials = ob.get_credentials() res = credentials["results"] payment_creds = [x for x in res if "payment_credential" in x["schema_id"]] if payment_creds: return payment_creds[0]["cred_def_id"] def get_package_creddefid(): credentials = ob.get_credentials() res = credentials["results"] package_creds = [x for x in res if "package_cred" in x["schema_id"]] if package_creds: return package_creds[0]["cred_def_id"] def register_payment_agreement_schema(url): schema_name = "payment_agreement" schema = { "schema_name": schema_name, "schema_version": "1.0", "attributes": ["amount", "timestamp", "payment_endpoint", "product_id"] } response = ob.post(url + "/schemas", data=schema) id = response["schema_id"] creddef = {"schema_id": id, "support_revocation": False} resp = ob.register_creddef(creddef) if resp: config.agent_data.creddef_id = resp["credential_definition_id"] config.agent_data.creddefs[schema_name] = resp["credential_definition_id"] logging.debug(f"Registered schema with id: %s, and creddef_id: %s", id, resp["credential_definition_id"]) return id, resp["credential_definition_id"] #schema reg def register_payment_schema(url): schema = { "schema_name": "payment_credential", "schema_version": "1.0", "attributes": ["transaction_no", "timestamp"] } response = ob.post(url + "/schemas", data=schema) id = response["schema_id"] creddef = {"schema_id": id, "support_revocation": False} resp = ob.register_creddef(creddef) if resp: print(resp) config.agent_data.creddef_id = resp["credential_definition_id"] config.agent_data.payment_creddef = resp["credential_definition_id"] logging.debug(f"Registered schema with id: %s, and creddef_id: %s", id, resp["credential_definition_id"]) return id, resp["credential_definition_id"] def register_package_schema(url): schema_name = "package_cred" schema = { "schema_name": schema_name, "schema_version": "1.0", "attributes": ["package_no", "timestamp", "status"] } response = ob.post(url + "/schemas", data=schema) id = response["schema_id"] creddef = {"schema_id":id, "support_revocation": False} resp = ob.register_creddef(creddef) if resp: config.agent_data.creddef_id = resp["credential_definition_id"] config.agent_data.creddefs[schema_name] = resp["credential_definition_id"] logging.debug(f"Registered schema with id: %s, and creddef_id: %s", id, resp["credential_definition_id"]) return id, resp["credential_definition_id"] def register_receipt_schema(url): schema_name = "received_package" schema = { "schema_name": schema_name, "schema_version": "1.0", "attributes": ["package_no", "timestamp"] } response = ob.post(url + "/schemas", data=schema) id = response["schema_id"] creddef = {"schema_id": id, "support_revocation": False} resp = ob.register_creddef(creddef) if resp: config.agent_data.creddef_id = resp["credential_definition_id"] config.agent_data.creddefs[schema_name] = resp["credential_definition_id"] logging.debug(f"Registered schema with id: %s, and creddef_id: %s", id, resp["credential_definition_id"]) return id, resp["credential_definition_id"] def get_schema_name(creddef): resp = ob.get_creddef(creddef) if not resp: return False schema_id = resp["credential_definition"]["schemaId"] resp = ob.get_schema(schema_id) if not resp: return False return resp["schema"]["name"] #####VALIDATORS##### def is_credential_stored(name): credentials = ob.get_credentials() res = credentials["results"] matching_creds = [x for x in res if name in x["schema_id"]] if not matching_creds: return False return True def is_proof_validated(schema_name, proof_name=None, ex_id=None): proof_records = ob.get_pres_ex_records() results = proof_records["results"] if results: for result in results: if "verified" in result: if result["verified"] == "true": attrs = result["presentation_request"]["requested_attributes"] for attr in attrs: for attrname in attrs[attr]: if attrname == "restrictions": restrictions = attrs[attr][attrname] for restriction in restrictions: if "cred_def_id" in restriction: name = get_schema_name(restriction["cred_def_id"]) if name == schema_name: return True return False def get_proof_validated(schema_name, proof_name=None, ex_id=None): proof_records = ob.get_pres_ex_records() results = proof_records["results"] if results: for result in results: if "verified" in result: if result["verified"] == "true": attrs = result["presentation_request"]["requested_attributes"] for attr in attrs: for attrname in attrs[attr]: if attrname == "restrictions": restrictions = attrs[attr][attrname] for restriction in restrictions: if "cred_def_id" in restriction: name = get_schema_name(restriction["cred_def_id"]) if name == schema_name: return True return False # Helper function # returns True if a schema of @schema_name is stored def have_receieved_proof_proposal(schema_name=None): proof_records = ob.get_pres_ex_records() results = proof_records["results"] if results: for result in results: state = result["state"] if state == "proposal_received": proposal = result["presentation_proposal_dict"]["presentation_proposal"] attrs = proposal["attributes"] for attr in attrs: if "cred_def_id" in attr: if get_schema_name(attr["cred_def_id"]) == schema_name: return True return False # Helper funciton. # Returns value string if a credential attribute of name = @name # is present in the given aries issue-credential message def get_cred_attr_value(name, offer): attributes = offer["credential_proposal_dict"]["credential_proposal"]["attributes"] for attr in attributes: if attr["name"] == name: return attr["value"] return False def get_cred_attrs(offer): return offer["credential_proposal_dict"]["credential_proposal"]["attributes"] def gen_package_no(n=7): range_start = 10**(n-1) range_end = (10**n)-1 return str(random.randint(range_start, range_end)) def gen_product_id(n=4): range_start = 10**(n-1) range_end = (10**n)-1 return str(random.randint(range_start, range_end)) def gen_transaction_id(n=5): range_start = 10**(n-1) range_end = (10**n)-1 return "t_id_" + str(random.randint(range_start, range_end)) def parse_payment_endpoint(data): get_cred_attr_value("", data)
[ "src.support.creds.build_proof_request", "logging.debug", "src.support.outbound_routing.get_pres_ex_records", "src.support.creds.build_credential_proposal", "src.support.settings.agent_data.update_package_no", "src.support.outbound_routing.send_proof_proposal", "src.support.outbound_routing.get_schema",...
[((949, 1129), 'src.support.creds.build_credential_proposal', 'build_credential_proposal', (['config.agent_data.current_connection'], {'comment': '"""request for payment agreement credential"""', 'schema_name': '"""payment agreement"""', 'prop_schema': 'proposal'}), "(config.agent_data.current_connection, comment=\n 'request for payment agreement credential', schema_name=\n 'payment agreement', prop_schema=proposal)\n", (974, 1129), False, 'from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal\n'), ((1170, 1203), 'src.support.outbound_routing.send_cred_proposal', 'ob.send_cred_proposal', (['offer_json'], {}), '(offer_json)\n', (1191, 1203), True, 'import src.support.outbound_routing as ob\n'), ((1342, 1383), 'logging.debug', 'logging.debug', (['"""Issue credential to user"""'], {}), "('Issue credential to user')\n", (1355, 1383), False, 'import logging\n'), ((1463, 1485), 'src.support.creds.build_cred', 'build_cred', (['creddef_id'], {}), '(creddef_id)\n', (1473, 1485), False, 'from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal\n'), ((1978, 2007), 'src.support.outbound_routing.send_cred_offer', 'ob.send_cred_offer', (['offer_req'], {}), '(offer_req)\n', (1996, 2007), True, 'import src.support.outbound_routing as ob\n'), ((2432, 2464), 'src.support.outbound_routing.send_proof_proposal', 'ob.send_proof_proposal', (['proposal'], {}), '(proposal)\n', (2454, 2464), True, 'import src.support.outbound_routing as ob\n'), ((2809, 2841), 'src.support.outbound_routing.send_proof_proposal', 'ob.send_proof_proposal', (['proposal'], {}), '(proposal)\n', (2831, 2841), True, 'import src.support.outbound_routing as ob\n'), ((3111, 3180), 'src.support.creds.build_proof_request', 'build_proof_request', ([], {'name': '"""proof of payment agreement"""', 'version': '"""1.0"""'}), "(name='proof of payment agreement', version='1.0')\n", (3130, 3180), False, 'from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal\n'), ((3552, 3578), 'src.support.outbound_routing.send_proof_request', 'ob.send_proof_request', (['req'], {}), '(req)\n', (3573, 3578), True, 'import src.support.outbound_routing as ob\n'), ((3769, 3810), 'logging.debug', 'logging.debug', (['"""Issue credential to user"""'], {}), "('Issue credential to user')\n", (3782, 3810), False, 'import logging\n'), ((3825, 3847), 'src.support.creds.build_cred', 'build_cred', (['creddef_id'], {}), '(creddef_id)\n', (3835, 3847), False, 'from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal\n'), ((4256, 4285), 'src.support.outbound_routing.send_cred_offer', 'ob.send_cred_offer', (['offer_req'], {}), '(offer_req)\n', (4274, 4285), True, 'import src.support.outbound_routing as ob\n'), ((4604, 4663), 'src.support.creds.build_proof_request', 'build_proof_request', ([], {'name': '"""proof of payment"""', 'version': '"""1.0"""'}), "(name='proof of payment', version='1.0')\n", (4623, 4663), False, 'from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal\n'), ((4943, 4980), 'src.support.outbound_routing.send_proof_request', 'ob.send_proof_request', (['req', 'presex_id'], {}), '(req, presex_id)\n', (4964, 4980), True, 'import src.support.outbound_routing as ob\n'), ((5377, 5409), 'src.support.outbound_routing.send_proof_proposal', 'ob.send_proof_proposal', (['proposal'], {}), '(proposal)\n', (5399, 5409), True, 'import src.support.outbound_routing as ob\n'), ((5690, 5750), 'src.support.creds.build_proof_request', 'build_proof_request', ([], {'name': '"""proof of dispatch"""', 'version': '"""1.0"""'}), "(name='proof of dispatch', version='1.0')\n", (5709, 5750), False, 'from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal\n'), ((6026, 6063), 'src.support.outbound_routing.send_proof_request', 'ob.send_proof_request', (['req', 'presex_id'], {}), '(req, presex_id)\n', (6047, 6063), True, 'import src.support.outbound_routing as ob\n'), ((6238, 6279), 'logging.debug', 'logging.debug', (['"""Issue credential to user"""'], {}), "('Issue credential to user')\n", (6251, 6279), False, 'import logging\n'), ((6319, 6366), 'src.support.settings.agent_data.update_package_no', 'config.agent_data.update_package_no', (['package_no'], {}), '(package_no)\n', (6354, 6366), True, 'import src.support.settings as config\n'), ((6381, 6403), 'src.support.creds.build_cred', 'build_cred', (['creddef_id'], {}), '(creddef_id)\n', (6391, 6403), False, 'from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal\n'), ((6877, 6906), 'src.support.outbound_routing.send_cred_offer', 'ob.send_cred_offer', (['offer_req'], {}), '(offer_req)\n', (6895, 6906), True, 'import src.support.outbound_routing as ob\n'), ((7019, 7069), 'src.support.creds.build_proof_proposal', 'build_proof_proposal', (['"""proof of package ownership"""'], {}), "('proof of package ownership')\n", (7039, 7069), False, 'from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal\n'), ((7348, 7375), 'src.support.outbound_routing.send_proof_proposal', 'ob.send_proof_proposal', (['req'], {}), '(req)\n', (7370, 7375), True, 'import src.support.outbound_routing as ob\n'), ((7453, 7522), 'src.support.creds.build_proof_request', 'build_proof_request', ([], {'name': '"""proof of package ownership"""', 'version': '"""1.0"""'}), "(name='proof of package ownership', version='1.0')\n", (7472, 7522), False, 'from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal\n'), ((7847, 7873), 'src.support.outbound_routing.send_proof_request', 'ob.send_proof_request', (['req'], {}), '(req)\n', (7868, 7873), True, 'import src.support.outbound_routing as ob\n'), ((8022, 8073), 'logging.debug', 'logging.debug', (['"""Issue receipt credential to vendor"""'], {}), "('Issue receipt credential to vendor')\n", (8035, 8073), False, 'import logging\n'), ((8088, 8110), 'src.support.creds.build_cred', 'build_cred', (['creddef_id'], {}), '(creddef_id)\n', (8098, 8110), False, 'from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal\n'), ((8519, 8548), 'src.support.outbound_routing.send_cred_offer', 'ob.send_cred_offer', (['offer_req'], {}), '(offer_req)\n', (8537, 8548), True, 'import src.support.outbound_routing as ob\n'), ((8612, 8679), 'src.support.creds.build_proof_request', 'build_proof_request', ([], {'name': '"""proof of shipped package"""', 'version': '"""1.0"""'}), "(name='proof of shipped package', version='1.0')\n", (8631, 8679), False, 'from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal\n'), ((9097, 9123), 'src.support.outbound_routing.send_proof_request', 'ob.send_proof_request', (['req'], {}), '(req)\n', (9118, 9123), True, 'import src.support.outbound_routing as ob\n'), ((9513, 9545), 'src.support.outbound_routing.send_proof_proposal', 'ob.send_proof_proposal', (['proposal'], {}), '(proposal)\n', (9535, 9545), True, 'import src.support.outbound_routing as ob\n'), ((9630, 9664), 'src.support.creds.build_schema', 'build_schema', (['name', 'version', 'attrs'], {}), '(name, version, attrs)\n', (9642, 9664), False, 'from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal\n'), ((9676, 9702), 'src.support.outbound_routing.register_schema', 'ob.register_schema', (['schema'], {}), '(schema)\n', (9694, 9702), True, 'import src.support.outbound_routing as ob\n'), ((9807, 9835), 'src.support.outbound_routing.register_creddef', 'ob.register_creddef', (['creddef'], {}), '(creddef)\n', (9826, 9835), True, 'import src.support.outbound_routing as ob\n'), ((10056, 10076), 'src.support.outbound_routing.get_credentials', 'ob.get_credentials', ([], {}), '()\n', (10074, 10076), True, 'import src.support.outbound_routing as ob\n'), ((10415, 10435), 'src.support.outbound_routing.get_credentials', 'ob.get_credentials', ([], {}), '()\n', (10433, 10435), True, 'import src.support.outbound_routing as ob\n'), ((10734, 10754), 'src.support.outbound_routing.get_credentials', 'ob.get_credentials', ([], {}), '()\n', (10752, 10754), True, 'import src.support.outbound_routing as ob\n'), ((10984, 11004), 'src.support.outbound_routing.get_credentials', 'ob.get_credentials', ([], {}), '()\n', (11002, 11004), True, 'import src.support.outbound_routing as ob\n'), ((11448, 11486), 'src.support.outbound_routing.post', 'ob.post', (["(url + '/schemas')"], {'data': 'schema'}), "(url + '/schemas', data=schema)\n", (11455, 11486), True, 'import src.support.outbound_routing as ob\n'), ((11590, 11618), 'src.support.outbound_routing.register_creddef', 'ob.register_creddef', (['creddef'], {}), '(creddef)\n', (11609, 11618), True, 'import src.support.outbound_routing as ob\n'), ((12168, 12206), 'src.support.outbound_routing.post', 'ob.post', (["(url + '/schemas')"], {'data': 'schema'}), "(url + '/schemas', data=schema)\n", (12175, 12206), True, 'import src.support.outbound_routing as ob\n'), ((12310, 12338), 'src.support.outbound_routing.register_creddef', 'ob.register_creddef', (['creddef'], {}), '(creddef)\n', (12329, 12338), True, 'import src.support.outbound_routing as ob\n'), ((12921, 12959), 'src.support.outbound_routing.post', 'ob.post', (["(url + '/schemas')"], {'data': 'schema'}), "(url + '/schemas', data=schema)\n", (12928, 12959), True, 'import src.support.outbound_routing as ob\n'), ((13063, 13091), 'src.support.outbound_routing.register_creddef', 'ob.register_creddef', (['creddef'], {}), '(creddef)\n', (13082, 13091), True, 'import src.support.outbound_routing as ob\n'), ((13654, 13692), 'src.support.outbound_routing.post', 'ob.post', (["(url + '/schemas')"], {'data': 'schema'}), "(url + '/schemas', data=schema)\n", (13661, 13692), True, 'import src.support.outbound_routing as ob\n'), ((13797, 13825), 'src.support.outbound_routing.register_creddef', 'ob.register_creddef', (['creddef'], {}), '(creddef)\n', (13816, 13825), True, 'import src.support.outbound_routing as ob\n'), ((14202, 14225), 'src.support.outbound_routing.get_creddef', 'ob.get_creddef', (['creddef'], {}), '(creddef)\n', (14216, 14225), True, 'import src.support.outbound_routing as ob\n'), ((14334, 14358), 'src.support.outbound_routing.get_schema', 'ob.get_schema', (['schema_id'], {}), '(schema_id)\n', (14347, 14358), True, 'import src.support.outbound_routing as ob\n'), ((14504, 14524), 'src.support.outbound_routing.get_credentials', 'ob.get_credentials', ([], {}), '()\n', (14522, 14524), True, 'import src.support.outbound_routing as ob\n'), ((14773, 14797), 'src.support.outbound_routing.get_pres_ex_records', 'ob.get_pres_ex_records', ([], {}), '()\n', (14795, 14797), True, 'import src.support.outbound_routing as ob\n'), ((15723, 15747), 'src.support.outbound_routing.get_pres_ex_records', 'ob.get_pres_ex_records', ([], {}), '()\n', (15745, 15747), True, 'import src.support.outbound_routing as ob\n'), ((16730, 16754), 'src.support.outbound_routing.get_pres_ex_records', 'ob.get_pres_ex_records', ([], {}), '()\n', (16752, 16754), True, 'import src.support.outbound_routing as ob\n'), ((629, 676), 'src.support.settings.agent_data.update_product_id', 'config.agent_data.update_product_id', (['product_id'], {}), '(product_id)\n', (664, 676), True, 'import src.support.settings as config\n'), ((11795, 11904), 'logging.debug', 'logging.debug', (['f"""Registered schema with id: %s, and creddef_id: %s"""', 'id', "resp['credential_definition_id']"], {}), "(f'Registered schema with id: %s, and creddef_id: %s', id,\n resp['credential_definition_id'])\n", (11808, 11904), False, 'import logging\n'), ((12529, 12638), 'logging.debug', 'logging.debug', (['f"""Registered schema with id: %s, and creddef_id: %s"""', 'id', "resp['credential_definition_id']"], {}), "(f'Registered schema with id: %s, and creddef_id: %s', id,\n resp['credential_definition_id'])\n", (12542, 12638), False, 'import logging\n'), ((13268, 13377), 'logging.debug', 'logging.debug', (['f"""Registered schema with id: %s, and creddef_id: %s"""', 'id', "resp['credential_definition_id']"], {}), "(f'Registered schema with id: %s, and creddef_id: %s', id,\n resp['credential_definition_id'])\n", (13281, 13377), False, 'import logging\n'), ((14002, 14111), 'logging.debug', 'logging.debug', (['f"""Registered schema with id: %s, and creddef_id: %s"""', 'id', "resp['credential_definition_id']"], {}), "(f'Registered schema with id: %s, and creddef_id: %s', id,\n resp['credential_definition_id'])\n", (14015, 14111), False, 'import logging\n'), ((17863, 17901), 'random.randint', 'random.randint', (['range_start', 'range_end'], {}), '(range_start, range_end)\n', (17877, 17901), False, 'import random\n'), ((17998, 18036), 'random.randint', 'random.randint', (['range_start', 'range_end'], {}), '(range_start, range_end)\n', (18012, 18036), False, 'import random\n'), ((18147, 18185), 'random.randint', 'random.randint', (['range_start', 'range_end'], {}), '(range_start, range_end)\n', (18161, 18185), False, 'import random\n'), ((2560, 2600), 'src.support.creds.build_proof_proposal', 'build_proof_proposal', (['"""proof_of_payment"""'], {}), "('proof_of_payment')\n", (2580, 2600), False, 'from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal\n'), ((5121, 5162), 'src.support.creds.build_proof_proposal', 'build_proof_proposal', (['"""proof_of_dispatch"""'], {}), "('proof_of_dispatch')\n", (5141, 5162), False, 'from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal\n'), ((9226, 9273), 'src.support.creds.build_proof_proposal', 'build_proof_proposal', (['"""proof_of_package_status"""'], {}), "('proof_of_package_status')\n", (9246, 9273), False, 'from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal\n'), ((2106, 2156), 'src.support.creds.build_proof_proposal', 'build_proof_proposal', (['"""proof_of_payment_agreement"""'], {}), "('proof_of_payment_agreement')\n", (2126, 2156), False, 'from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal\n'), ((3959, 3970), 'time.time', 'time.time', ([], {}), '()\n', (3968, 3970), False, 'import time\n'), ((8214, 8225), 'time.time', 'time.time', ([], {}), '()\n', (8223, 8225), False, 'import time\n'), ((6508, 6519), 'time.time', 'time.time', ([], {}), '()\n', (6517, 6519), False, 'import time\n'), ((1593, 1604), 'time.time', 'time.time', ([], {}), '()\n', (1602, 1604), False, 'import time\n')]
# # To run locally, execute: # # spark-submit --master local[2] wide_and_deep_example.py # S3_ROOT_DIR = 's3://{YOUR_S3_BUCKET}/{YOUR_S3_PATH}/' batch_size = 100 worker_count = 1 server_count = 1 import metaspore as ms spark = ms.spark.get_session(batch_size=batch_size, worker_count=worker_count, server_count=server_count, ) sc = spark.sparkContext with spark: module = ms.nn.WideAndDeepModule( wide_column_name_path=S3_ROOT_DIR + 'demo/schema/column_name_demo.txt', wide_combine_schema_path=S3_ROOT_DIR + 'demo/schema/combine_schema_demo.txt', deep_sparse_column_name_path=S3_ROOT_DIR + 'demo/schema/column_name_demo.txt', deep_sparse_combine_schema_path=S3_ROOT_DIR + 'demo/schema/combine_schema_demo.txt', ) model_out_path = S3_ROOT_DIR + 'demo/output/dev/model_out/' estimator = ms.PyTorchEstimator(module=module, worker_count=worker_count, server_count=server_count, model_out_path=model_out_path, input_label_column_index=0) train_dataset_path = S3_ROOT_DIR + 'demo/data/train/day_0_0.001_train.csv' train_dataset = ms.input.read_s3_csv(spark, train_dataset_path, delimiter='\t') model = estimator.fit(train_dataset) test_dataset_path = S3_ROOT_DIR + 'demo/data/test/day_0_0.001_test.csv' test_dataset = ms.input.read_s3_csv(spark, test_dataset_path, delimiter='\t') result = model.transform(test_dataset) result.show(5) import pyspark evaluator = pyspark.ml.evaluation.BinaryClassificationEvaluator() test_auc = evaluator.evaluate(result) print('test_auc: %g' % test_auc)
[ "metaspore.spark.get_session", "pyspark.ml.evaluation.BinaryClassificationEvaluator", "metaspore.nn.WideAndDeepModule", "metaspore.PyTorchEstimator", "metaspore.input.read_s3_csv" ]
[((232, 333), 'metaspore.spark.get_session', 'ms.spark.get_session', ([], {'batch_size': 'batch_size', 'worker_count': 'worker_count', 'server_count': 'server_count'}), '(batch_size=batch_size, worker_count=worker_count,\n server_count=server_count)\n', (252, 333), True, 'import metaspore as ms\n'), ((468, 827), 'metaspore.nn.WideAndDeepModule', 'ms.nn.WideAndDeepModule', ([], {'wide_column_name_path': "(S3_ROOT_DIR + 'demo/schema/column_name_demo.txt')", 'wide_combine_schema_path': "(S3_ROOT_DIR + 'demo/schema/combine_schema_demo.txt')", 'deep_sparse_column_name_path': "(S3_ROOT_DIR + 'demo/schema/column_name_demo.txt')", 'deep_sparse_combine_schema_path': "(S3_ROOT_DIR + 'demo/schema/combine_schema_demo.txt')"}), "(wide_column_name_path=S3_ROOT_DIR +\n 'demo/schema/column_name_demo.txt', wide_combine_schema_path=\n S3_ROOT_DIR + 'demo/schema/combine_schema_demo.txt',\n deep_sparse_column_name_path=S3_ROOT_DIR +\n 'demo/schema/column_name_demo.txt', deep_sparse_combine_schema_path=\n S3_ROOT_DIR + 'demo/schema/combine_schema_demo.txt')\n", (491, 827), True, 'import metaspore as ms\n'), ((926, 1078), 'metaspore.PyTorchEstimator', 'ms.PyTorchEstimator', ([], {'module': 'module', 'worker_count': 'worker_count', 'server_count': 'server_count', 'model_out_path': 'model_out_path', 'input_label_column_index': '(0)'}), '(module=module, worker_count=worker_count, server_count=\n server_count, model_out_path=model_out_path, input_label_column_index=0)\n', (945, 1078), True, 'import metaspore as ms\n'), ((1318, 1381), 'metaspore.input.read_s3_csv', 'ms.input.read_s3_csv', (['spark', 'train_dataset_path'], {'delimiter': '"""\t"""'}), "(spark, train_dataset_path, delimiter='\\t')\n", (1338, 1381), True, 'import metaspore as ms\n'), ((1519, 1581), 'metaspore.input.read_s3_csv', 'ms.input.read_s3_csv', (['spark', 'test_dataset_path'], {'delimiter': '"""\t"""'}), "(spark, test_dataset_path, delimiter='\\t')\n", (1539, 1581), True, 'import metaspore as ms\n'), ((1680, 1733), 'pyspark.ml.evaluation.BinaryClassificationEvaluator', 'pyspark.ml.evaluation.BinaryClassificationEvaluator', ([], {}), '()\n', (1731, 1733), False, 'import pyspark\n')]
# dockerpty. # # Copyright 2014 <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation, exec_create def start(client, container, interactive=True, stdout=None, stderr=None, stdin=None, logs=None): """ Present the PTY of the container inside the current process. This is just a wrapper for PseudoTerminal(client, container).start() """ operation = RunOperation(client, container, interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin, logs=logs) PseudoTerminal(client, operation).start() def exec_command( client, container, command, interactive=True, stdout=None, stderr=None, stdin=None): """ Run provided command via exec API in provided container. This is just a wrapper for PseudoTerminal(client, container).exec_command() """ exec_id = exec_create(client, container, command, interactive=interactive) operation = ExecOperation(client, exec_id, interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin) PseudoTerminal(client, operation).start() def start_exec(client, exec_id, interactive=True, stdout=None, stderr=None, stdin=None): operation = ExecOperation(client, exec_id, interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin) PseudoTerminal(client, operation).start()
[ "dockerpty.pty.RunOperation", "dockerpty.pty.ExecOperation", "dockerpty.pty.PseudoTerminal", "dockerpty.pty.exec_create" ]
[((950, 1064), 'dockerpty.pty.RunOperation', 'RunOperation', (['client', 'container'], {'interactive': 'interactive', 'stdout': 'stdout', 'stderr': 'stderr', 'stdin': 'stdin', 'logs': 'logs'}), '(client, container, interactive=interactive, stdout=stdout,\n stderr=stderr, stdin=stdin, logs=logs)\n', (962, 1064), False, 'from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation, exec_create\n'), ((1422, 1486), 'dockerpty.pty.exec_create', 'exec_create', (['client', 'container', 'command'], {'interactive': 'interactive'}), '(client, container, command, interactive=interactive)\n', (1433, 1486), False, 'from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation, exec_create\n'), ((1504, 1606), 'dockerpty.pty.ExecOperation', 'ExecOperation', (['client', 'exec_id'], {'interactive': 'interactive', 'stdout': 'stdout', 'stderr': 'stderr', 'stdin': 'stdin'}), '(client, exec_id, interactive=interactive, stdout=stdout,\n stderr=stderr, stdin=stdin)\n', (1517, 1606), False, 'from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation, exec_create\n'), ((1786, 1888), 'dockerpty.pty.ExecOperation', 'ExecOperation', (['client', 'exec_id'], {'interactive': 'interactive', 'stdout': 'stdout', 'stderr': 'stderr', 'stdin': 'stdin'}), '(client, exec_id, interactive=interactive, stdout=stdout,\n stderr=stderr, stdin=stdin)\n', (1799, 1888), False, 'from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation, exec_create\n'), ((1095, 1128), 'dockerpty.pty.PseudoTerminal', 'PseudoTerminal', (['client', 'operation'], {}), '(client, operation)\n', (1109, 1128), False, 'from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation, exec_create\n'), ((1637, 1670), 'dockerpty.pty.PseudoTerminal', 'PseudoTerminal', (['client', 'operation'], {}), '(client, operation)\n', (1651, 1670), False, 'from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation, exec_create\n'), ((1919, 1952), 'dockerpty.pty.PseudoTerminal', 'PseudoTerminal', (['client', 'operation'], {}), '(client, operation)\n', (1933, 1952), False, 'from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation, exec_create\n')]
import numpy as np from astroquery.hitran import Hitran from astropy import units as un from astropy.constants import c, k_B, h, u def calc_solid_angle(radius,distance): ''' Convenience function to calculate solid angle from radius and distance, assuming a disk shape. Parameters ---------- radius : float radius value in AU distance : float distance value in parsec Returns ---------- solid angle : float solid angle in steradians ''' return np.pi*radius**2./(distance*206265.)**2. def calc_radius(solid_angle,distance): ''' Convenience function to calculate disk radius from solid angle and distance, assuming a disk shape. Parameters ---------- solid_angle : float solid angle value in radians distance : float distance value in parsec Returns ---------- radius : float disk radius in AU ''' return (distance*206265)*np.sqrt(solid_angle/np.pi) def get_molmass(molecule_name,isotopologue_number=1): ''' For a given input molecular formula, return the corresponding molecular mass, in amu Parameters ---------- molecular_formula : str The string describing the molecule. isotopologue_number : int, optional The isotopologue number, from most to least common. Returns ------- mu : float Molecular mass in amu ''' mol_isot_code=molecule_name+'_'+str(isotopologue_number) #https://hitran.org/docs/iso-meta/ mass = { 'H2O_1':18.010565, 'H2O_2':20.014811, 'H2O_3':19.01478, 'H2O_4':19.01674, 'H2O_5':21.020985, 'H2O_6':20.020956, 'H2O_7':20.022915, 'CO2_1':43.98983,'CO2_2':44.993185,'CO2_3':45.994076,'CO2_4':44.994045, 'CO2_5':46.997431,'CO2_6':45.9974,'CO2_7':47.998322,'CO2_8':46.998291, 'CO2_9':45.998262,'CO2_10':49.001675,'CO2_11':48.001646,'CO2_12':47.0016182378, 'O3_1':47.984745,'O3_2':49.988991,'O3_3':49.988991,'O3_4':48.98896,'O3_5':48.98896, 'N2O_1':44.001062,'N2O_2':44.998096,'N2O_3':44.998096,'N2O_4':46.005308,'N2O_5':45.005278, 'CO_1':27.994915,'CO_2':28.99827,'CO_3':29.999161,'CO_4':28.99913,'CO_5':31.002516,'CO_6':30.002485, 'CH4_1':16.0313,'CH4_2':17.034655,'CH4_3':17.037475,'CH4_4':18.04083, 'O2_1':31.98983,'O2_2':33.994076,'O2_3':32.994045, 'NO_1':29.997989,'NO_2':30.995023,'NO_3':32.002234, 'SO2_1':63.961901,'SO2_2':65.957695, 'NO2_1':45.992904,'NO2_2':46.989938, 'NH3_1':17.026549,'NH3_2':18.023583, 'HNO3_1':62.995644,'HNO3_2':63.99268, 'OH_1':17.00274,'OH_2':19.006986,'OH_3':18.008915, 'HF_1':20.006229,'HF_2':21.012404, 'HCl_1':35.976678,'HCl_2':37.973729,'HCl_3':36.982853,'HCl_4':38.979904, 'HBr_1':79.92616,'HBr_2':81.924115,'HBr_3':80.932336,'HBr_4':82.930289, 'HI_1':127.912297,'HI_2':128.918472, 'ClO_1':50.963768,'ClO_2':52.960819, 'OCS_1':59.966986,'OCS_2':61.96278,'OCS_3':60.970341,'OCS_4':60.966371,'OCS_5':61.971231, 'OCS_6':62.966136, 'H2CO_1':30.010565,'H2CO_2':31.01392,'H2CO_3':32.014811, 'HOCl_1':51.971593,'HOCl_2':53.968644, 'N2_1':28.006148,'N2_2':29.003182, 'HCN_1':27.010899,'HCN_2':28.014254,'HCN_3':28.007933, 'CH3Cl_1':49.992328,'CH3CL_2':51.989379, 'H2O2_1':34.00548, 'C2H2_1':26.01565,'C2H2_2':27.019005,'C2H2_3':27.021825, 'C2H6_1':30.04695,'C2H6_2':31.050305, 'PH3_1':33.997238, 'COF2_1':65.991722,'COF2_2':66.995083, 'SF6_1':145.962492, 'H2S_1':33.987721,'H2S_2':35.983515,'H2S_3':34.987105, 'HCOOH_1':46.00548, 'HO2_1':32.997655, 'O_1':15.994915, 'ClONO2_1':96.956672,'ClONO2_2':98.953723, 'NO+_1':29.997989, 'HOBr_1':95.921076,'HOBr_2':97.919027, 'C2H4_1':28.0313,'C2H4_2':29.034655, 'CH3OH_1':32.026215, 'CH3Br_1':93.941811,'CH3Br_2':95.939764, 'CH3CN_1':41.026549, 'CF4_1':87.993616, 'C4H2_1':50.01565, 'HC3N_1':51.010899, 'H2_1':2.01565,'H2_2':3.021825, 'CS_1':43.971036,'CS_2':45.966787,'CS_3':44.974368,'CS_4':44.970399, 'SO3_1':79.95682, 'C2N2_1':52.006148, 'COCl2_1':97.9326199796,'COCl2_2':99.9296698896, 'CS2_1':75.94414,'CS2_2':77.93994,'CS2_3':76.943256,'CS2_4':76.947495} return mass[mol_isot_code]
[ "numpy.sqrt" ]
[((945, 973), 'numpy.sqrt', 'np.sqrt', (['(solid_angle / np.pi)'], {}), '(solid_angle / np.pi)\n', (952, 973), True, 'import numpy as np\n')]
from random import shuffle#自带洗牌方法 from copy import deepcopy class Solution(object): def __init__(self, nums): """ :type nums: List[int] :type size: int """ self.nums=nums def reset(self): """ Resets the array to its original configuration and return it. :rtype: List[int] """ return self.nums def shuffle(self): """ Returns a random shuffling of the array. :rtype: List[int] """ nums=deepcopy(self.nums)#导入deepcopy来拷贝引用对象,不然直接用的话会影响reset的输出 shuffle(nums) return nums
[ "random.shuffle", "copy.deepcopy" ]
[((537, 556), 'copy.deepcopy', 'deepcopy', (['self.nums'], {}), '(self.nums)\n', (545, 556), False, 'from copy import deepcopy\n'), ((602, 615), 'random.shuffle', 'shuffle', (['nums'], {}), '(nums)\n', (609, 615), False, 'from random import shuffle\n')]
# Copyright (c) Facebook, Inc. and its affiliates. import numpy as np from termcolor import colored import logging import torch.nn as nn import torch.utils.data log = logging.getLogger(__name__) import torch import numpy as np import math class Dataset(torch.utils.data.Dataset): def __init__(self, x, y): self.dataset = [ (torch.FloatTensor(x[i]), torch.FloatTensor(y[i])) for i in range(len(x)) ] def __len__(self): return len(self.dataset) def __getitem__(self, idx): return self.dataset[idx] class Dynamics(nn.Module): def __init__(self,env): super(Dynamics, self).__init__() self.env=env self.dt = env.dt self.model_cfg = {} self.model_cfg['device'] = 'cpu' self.model_cfg['hidden_size'] = [100, 30] self.model_cfg['batch_size'] = 128 self.model_cfg['epochs'] = 500 self.model_cfg['display_epoch'] = 50 self.model_cfg['learning_rate'] = 0.001 self.model_cfg['ensemble_size'] = 3 self.model_cfg['state_dim'] = env.state_dim self.model_cfg['action_dim'] = env.action_dim self.model_cfg['output_dim'] = env.pos_dim self.ensemble = EnsembleProbabilisticModel(self.model_cfg) self.data_X = [] self.data_Y = [] self.norm_in = torch.Tensor(np.expand_dims(np.array([1.0,1.0,8.0,8.0,1.0,1.0]),axis=0)) def train(self,states,actions): inputs = (torch.cat((states[:-1],actions),dim=1)/self.norm_in).detach().numpy() outputs = (states[1:,self.env.pos_dim:] - states[:-1,self.env.pos_dim:]).detach().numpy() self.data_X+=list(inputs) self.data_Y+=list(outputs) training_dataset = {} training_dataset['X'] = np.array(self.data_X) training_dataset['Y'] = np.array(self.data_Y) #self.ensemble = EnsembleProbabilisticModel(self.model_cfg) self.ensemble.train_model(training_dataset, training_dataset, 0.0) def step_model(self,state,action): input_x = torch.cat((state,action),dim=0)/self.norm_in pred_acc = self.ensemble.forward(input_x)[0].squeeze() #numerically integrate predicted acceleration to velocity and position pred_vel = state[self.env.pos_dim:]+pred_acc pred_pos = state[:self.env.pos_dim] + pred_vel*self.dt pred_pos = torch.clamp(pred_pos, min=-3.0, max=3.0) pred_vel = torch.clamp(pred_vel, min=-4.0, max=4.0) next_state = torch.cat((pred_pos.squeeze(),pred_vel.squeeze()),dim=0) return next_state.squeeze() # I did not make this inherit from nn.Module, because our GP implementation is not torch based class AbstractModel(object): # def forward(self, x): # raise NotImplementedError("Subclass must implement") def train_model(self, training_dataset, testing_dataset, training_params): raise NotImplementedError("Subclass must implement") # function that (if necessary) converts between numpy input x and torch, and returns a prediction in numpy def predict_np(self, x): raise NotImplementedError("Subclass must implement") def get_input_size(self): raise NotImplementedError("Subclass must implement") def get_output_size(self): raise NotImplementedError("Subclass must implement") def get_hyperparameters(self): return None class Dataset(torch.utils.data.Dataset): def __init__(self, x, y): self.dataset = [ (torch.FloatTensor(x[i]), torch.FloatTensor(y[i])) for i in range(len(x)) ] def __len__(self): return len(self.dataset) def __getitem__(self, idx): return self.dataset[idx] # creates K datasets out of X and Y # if N is the total number of data points, then this function splits it in to K subsets. and each dataset contains K-1 # subsets. # so let's say K=5. We create 5 subsets. # Each datasets contains 4 out of the 5 datasets, by leaving out one of the K subsets. def split_to_subsets(X, Y, K): if K == 1: # for 1 split, do not resshuffle dataset return [Dataset(X, Y)] n_data = len(X) chunk_sz = int(math.ceil(n_data / K)) all_idx = np.random.permutation(n_data) datasets = [] # each dataset contains for i in range(K): start_idx = i * (chunk_sz) end_idx = min(start_idx + chunk_sz, n_data) dataset_idx = np.delete(all_idx, range(start_idx, end_idx), axis=0) X_subset = [X[idx] for idx in dataset_idx] Y_subset = [Y[idx] for idx in dataset_idx] datasets.append(Dataset(X_subset, Y_subset)) return datasets class NLLLoss(torch.nn.modules.loss._Loss): """ Specialized NLL loss used to predict both mean (the actual function) and the variance of the input data. """ def __init__(self, size_average=None, reduce=None, reduction="mean"): super(NLLLoss, self).__init__(size_average, reduce, reduction) def forward(self, net_output, target): assert net_output.dim() == 3 assert net_output.size(0) == 2 mean = net_output[0] var = net_output[1] reduction = "mean" ret = 0.5 * torch.log(var) + 0.5 * ((mean - target) ** 2) / var # ret = 0.5 * ((mean - target) ** 2) if reduction != "none": ret = torch.mean(ret) if reduction == "mean" else torch.sum(ret) return ret class EnsembleProbabilisticModel(AbstractModel): def __init__(self, model_cfg): super(EnsembleProbabilisticModel, self).__init__() self.input_dimension = model_cfg['state_dim'] + model_cfg['action_dim'] # predicting velocity only (second half of state space) assert model_cfg['state_dim'] % 2 == 0 self.output_dimension = model_cfg['state_dim'] // 2 if model_cfg['device'] == "gpu": self.device = model_cfg['gpu_name'] else: self.device = "cpu" self.ensemble_size = model_cfg['ensemble_size'] self.model_cfg = model_cfg self.reset() def reset(self): self.models = [PModel(self.model_cfg) for _ in range(self.ensemble_size)] def forward(self, x): x = torch.Tensor(x) means = [] variances = [] for eid in range(self.ensemble_size): mean_and_var = self.models[eid](x) means.append(mean_and_var[0]) variances.append(mean_and_var[1]) mean = sum(means) / len(means) dum = torch.zeros_like(variances[0]) for i in range(len(means)): dum_var2 = variances[i] dum_mean2 = means[i] * means[i] dum += dum_var2 + dum_mean2 var = (dum / len(means)) - (mean * mean) # Clipping the variance to a minimum of 1e-3, we can interpret this as saying weexpect a minimum # level of noise # the clipping here is probably not necessary anymore because we're now clipping at the individual model level var = var.clamp_min(1e-3) return torch.stack((mean, var)) def predict_np(self, x_np): x = torch.Tensor(x_np) pred = self.forward(x).detach().cpu().numpy() return pred[0].squeeze(), pred[1].squeeze() def train_model(self, training_dataset, testing_dataset, training_params): X = training_dataset["X"] Y = training_dataset["Y"] datasets = split_to_subsets(X, Y, self.ensemble_size) for m in range(self.ensemble_size): print(colored("training model={}".format(m), "green")) self.models[m].train_model(datasets[m]) def get_gradient(self, x_np): x = torch.Tensor(x_np).requires_grad_() output_mean, _ = self.forward(x) gradients = [] # get gradients of ENN with respect to x and u for output_dim in range(self.output_dimension): grads = torch.autograd.grad( output_mean[0, output_dim], x, create_graph=True )[0].data gradients.append(grads.detach().cpu().numpy()[0, :]) return np.array(gradients).reshape( [self.output_dimension, self.input_dimension] ) def get_input_size(self): return self.input_dimension def get_output_size(self): return self.output_dimension def get_hyper_params(self): return None class PModel(nn.Module): """ Probabilistic network Output a 3d tensor: d0 : always 2, first element is mean and second element is variance d1 : batch size d2 : output size (number of dimensions in the output of the modeled function) """ def __init__(self, config): super(PModel, self).__init__() if config["device"] == "gpu": self.device = config["gpu_name"] else: self.device = "cpu" self.input_sz = config['state_dim'] + config['action_dim'] self.output_sz = config['output_dim'] self.learning_rate = config["learning_rate"] self.display_epoch = config["display_epoch"] self.epochs = config["epochs"] w = config["hidden_size"] self.layers = nn.Sequential( nn.Linear(self.input_sz, w[0]), nn.Tanh(), nn.Linear(w[0], w[1]), nn.Tanh(), ) self.mean = nn.Linear(w[1], self.output_sz) self.var = nn.Sequential(nn.Linear(w[1], self.output_sz), nn.Softplus()) self.to(self.device) def forward(self, x): x = x.to(device=self.device) assert x.dim() == 2, "Expected 2 dimensional input, got {}".format(x.dim()) assert x.size(1) == self.input_sz y = self.layers(x) mean_p = self.mean(y) var_p = self.var(y) # Clipping the variance to a minimum of 1e-3, we can interpret this as saying weexpect a minimum # level of noise var_p = var_p.clamp_min(1e-3) return torch.stack((mean_p, var_p)) def predict_np(self, x_np): x = torch.Tensor(x_np) pred = self.forward(x).detach().cpu().numpy() return pred[0].squeeze(), pred[1].squeeze() def train_model(self, training_data): train_loader = torch.utils.data.DataLoader( training_data, batch_size=64, num_workers=0 ) optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate) loss_fn = NLLLoss() for epoch in range(self.epochs): losses = [] for batch, (data, target) in enumerate( train_loader, 1 ): # This is the training loader x = data.type(torch.FloatTensor).to(device=self.device) y = target.type(torch.FloatTensor).to(device=self.device) if x.dim() == 1: x = x.unsqueeze(0).t() if y.dim() == 1: y = y.unsqueeze(0).t() py = self.forward(x) loss = loss_fn(py, y) optimizer.zero_grad() loss.backward() optimizer.step() losses.append(loss.item()) if epoch % self.display_epoch == 0: print( colored( "epoch={}, loss={}".format(epoch, np.mean(losses)), "yellow" ) )
[ "logging.getLogger", "torch.nn.Tanh", "numpy.array", "torch.sum", "numpy.mean", "torch.mean", "torch.zeros_like", "numpy.random.permutation", "torch.Tensor", "torch.autograd.grad", "torch.cat", "torch.clamp", "torch.nn.Softplus", "math.ceil", "torch.log", "torch.stack", "torch.nn.Lin...
[((169, 196), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (186, 196), False, 'import logging\n'), ((4210, 4239), 'numpy.random.permutation', 'np.random.permutation', (['n_data'], {}), '(n_data)\n', (4231, 4239), True, 'import numpy as np\n'), ((1774, 1795), 'numpy.array', 'np.array', (['self.data_X'], {}), '(self.data_X)\n', (1782, 1795), True, 'import numpy as np\n'), ((1828, 1849), 'numpy.array', 'np.array', (['self.data_Y'], {}), '(self.data_Y)\n', (1836, 1849), True, 'import numpy as np\n'), ((2376, 2416), 'torch.clamp', 'torch.clamp', (['pred_pos'], {'min': '(-3.0)', 'max': '(3.0)'}), '(pred_pos, min=-3.0, max=3.0)\n', (2387, 2416), False, 'import torch\n'), ((2436, 2476), 'torch.clamp', 'torch.clamp', (['pred_vel'], {'min': '(-4.0)', 'max': '(4.0)'}), '(pred_vel, min=-4.0, max=4.0)\n', (2447, 2476), False, 'import torch\n'), ((4173, 4194), 'math.ceil', 'math.ceil', (['(n_data / K)'], {}), '(n_data / K)\n', (4182, 4194), False, 'import math\n'), ((6203, 6218), 'torch.Tensor', 'torch.Tensor', (['x'], {}), '(x)\n', (6215, 6218), False, 'import torch\n'), ((6496, 6526), 'torch.zeros_like', 'torch.zeros_like', (['variances[0]'], {}), '(variances[0])\n', (6512, 6526), False, 'import torch\n'), ((7031, 7055), 'torch.stack', 'torch.stack', (['(mean, var)'], {}), '((mean, var))\n', (7042, 7055), False, 'import torch\n'), ((7101, 7119), 'torch.Tensor', 'torch.Tensor', (['x_np'], {}), '(x_np)\n', (7113, 7119), False, 'import torch\n'), ((9311, 9342), 'torch.nn.Linear', 'nn.Linear', (['w[1]', 'self.output_sz'], {}), '(w[1], self.output_sz)\n', (9320, 9342), True, 'import torch.nn as nn\n'), ((9911, 9939), 'torch.stack', 'torch.stack', (['(mean_p, var_p)'], {}), '((mean_p, var_p))\n', (9922, 9939), False, 'import torch\n'), ((9985, 10003), 'torch.Tensor', 'torch.Tensor', (['x_np'], {}), '(x_np)\n', (9997, 10003), False, 'import torch\n'), ((10176, 10248), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['training_data'], {'batch_size': '(64)', 'num_workers': '(0)'}), '(training_data, batch_size=64, num_workers=0)\n', (10203, 10248), False, 'import torch\n'), ((2052, 2085), 'torch.cat', 'torch.cat', (['(state, action)'], {'dim': '(0)'}), '((state, action), dim=0)\n', (2061, 2085), False, 'import torch\n'), ((9167, 9197), 'torch.nn.Linear', 'nn.Linear', (['self.input_sz', 'w[0]'], {}), '(self.input_sz, w[0])\n', (9176, 9197), True, 'import torch.nn as nn\n'), ((9211, 9220), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (9218, 9220), True, 'import torch.nn as nn\n'), ((9234, 9255), 'torch.nn.Linear', 'nn.Linear', (['w[0]', 'w[1]'], {}), '(w[0], w[1])\n', (9243, 9255), True, 'import torch.nn as nn\n'), ((9269, 9278), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (9276, 9278), True, 'import torch.nn as nn\n'), ((9376, 9407), 'torch.nn.Linear', 'nn.Linear', (['w[1]', 'self.output_sz'], {}), '(w[1], self.output_sz)\n', (9385, 9407), True, 'import torch.nn as nn\n'), ((9409, 9422), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (9420, 9422), True, 'import torch.nn as nn\n'), ((353, 376), 'torch.FloatTensor', 'torch.FloatTensor', (['x[i]'], {}), '(x[i])\n', (370, 376), False, 'import torch\n'), ((378, 401), 'torch.FloatTensor', 'torch.FloatTensor', (['y[i]'], {}), '(y[i])\n', (395, 401), False, 'import torch\n'), ((1370, 1410), 'numpy.array', 'np.array', (['[1.0, 1.0, 8.0, 8.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 8.0, 8.0, 1.0, 1.0])\n', (1378, 1410), True, 'import numpy as np\n'), ((3505, 3528), 'torch.FloatTensor', 'torch.FloatTensor', (['x[i]'], {}), '(x[i])\n', (3522, 3528), False, 'import torch\n'), ((3530, 3553), 'torch.FloatTensor', 'torch.FloatTensor', (['y[i]'], {}), '(y[i])\n', (3547, 3553), False, 'import torch\n'), ((5190, 5204), 'torch.log', 'torch.log', (['var'], {}), '(var)\n', (5199, 5204), False, 'import torch\n'), ((5338, 5353), 'torch.mean', 'torch.mean', (['ret'], {}), '(ret)\n', (5348, 5353), False, 'import torch\n'), ((5382, 5396), 'torch.sum', 'torch.sum', (['ret'], {}), '(ret)\n', (5391, 5396), False, 'import torch\n'), ((7649, 7667), 'torch.Tensor', 'torch.Tensor', (['x_np'], {}), '(x_np)\n', (7661, 7667), False, 'import torch\n'), ((8069, 8088), 'numpy.array', 'np.array', (['gradients'], {}), '(gradients)\n', (8077, 8088), True, 'import numpy as np\n'), ((7880, 7949), 'torch.autograd.grad', 'torch.autograd.grad', (['output_mean[0, output_dim]', 'x'], {'create_graph': '(True)'}), '(output_mean[0, output_dim], x, create_graph=True)\n', (7899, 7949), False, 'import torch\n'), ((1473, 1513), 'torch.cat', 'torch.cat', (['(states[:-1], actions)'], {'dim': '(1)'}), '((states[:-1], actions), dim=1)\n', (1482, 1513), False, 'import torch\n'), ((11253, 11268), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (11260, 11268), True, 'import numpy as np\n')]
import unittest import os import sys import argparse import numpy as np import audacity as aud print('Module file:') print(aud.__file__) SCRIPT_DIR = os.path.split(os.path.realpath(__file__))[0] PACKAGE_DIR = os.path.realpath(os.path.join(SCRIPT_DIR,'..')) DATA_DIR = os.path.join(PACKAGE_DIR, 'data') TEST_FILE_1 = os.path.join(DATA_DIR, 'test-1.aup') class testReader(unittest.TestCase): TEST_FILE = TEST_FILE_1 def test_read_data_is_2d(self): filename = self.TEST_FILE print('Audio file:') print(filename) au = aud.Aup(filename) data = au.get_data() assert len(data.shape) == 2 def test_read_channels_have_same_length(self): filename = self.TEST_FILE au = aud.Aup(filename) data = au.get_data() for ii in range(au.nchannels-1): assert len(data[ii]) == len(data[ii+1]) def test_nsample_getter_same_as_data(self): filename = self.TEST_FILE au = aud.Aup(filename) lens = au.get_channel_nsamples() for ii, ll in enumerate(lens): self.assertEqual(len(au.get_channel_data(ii)), ll) def test_single_file_len_is_right(self): filename = self.TEST_FILE au = aud.Aup(filename) chno = 0 au.open(chno) for f, data in zip(au.files[chno], au.read()): self.assertEqual(f[2]-f[1], len(data)/4) def main(): global test_file parser = argparse.ArgumentParser() parser.add_argument('--input', default='') parser.add_argument('unittest_args', nargs='*') args = parser.parse_args() # TODO: Go do something with args.input and args.filename # Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone) sys.argv[1:] = args.unittest_args if args.input: print('Changing audio file to '+args.input) testReader.TEST_FILE = args.input unittest.main() if __name__ == '__main__': main()
[ "audacity.Aup", "argparse.ArgumentParser", "os.path.join", "os.path.realpath", "unittest.main" ]
[((271, 304), 'os.path.join', 'os.path.join', (['PACKAGE_DIR', '"""data"""'], {}), "(PACKAGE_DIR, 'data')\n", (283, 304), False, 'import os\n'), ((320, 356), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""test-1.aup"""'], {}), "(DATA_DIR, 'test-1.aup')\n", (332, 356), False, 'import os\n'), ((229, 259), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', '""".."""'], {}), "(SCRIPT_DIR, '..')\n", (241, 259), False, 'import os\n'), ((1447, 1472), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1470, 1472), False, 'import argparse\n'), ((1900, 1915), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1913, 1915), False, 'import unittest\n'), ((167, 193), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (183, 193), False, 'import os\n'), ((561, 578), 'audacity.Aup', 'aud.Aup', (['filename'], {}), '(filename)\n', (568, 578), True, 'import audacity as aud\n'), ((743, 760), 'audacity.Aup', 'aud.Aup', (['filename'], {}), '(filename)\n', (750, 760), True, 'import audacity as aud\n'), ((979, 996), 'audacity.Aup', 'aud.Aup', (['filename'], {}), '(filename)\n', (986, 996), True, 'import audacity as aud\n'), ((1233, 1250), 'audacity.Aup', 'aud.Aup', (['filename'], {}), '(filename)\n', (1240, 1250), True, 'import audacity as aud\n')]
# -*- coding: utf-8 -*- """ # @Time : 24/10/18 2:40 PM # @Author : <NAME> # @FileName: plot_result.py # @Software: PyCharm # @Github : https://github.com/hzm2016 """ import collections import matplotlib.pyplot as plt import numpy as np import pickle import copy as cp from baselines.deepq.assembly.src.value_functions import * """=================================Plot result=====================================""" # YLABEL = ['$F_x(N)$', '$F_y(N)$', '$F_z(N)$', '$M_x(Nm)$', '$M_y(Nm)$', '$M_z(Nm)$'] YLABEL = ['$F_x$(N)', '$F_y$(N)', '$F_z$(N)', '$M_x$(Nm)', '$M_y$(Nm)', '$M_z$(Nm)'] Title = ["X axis force", "Y axis force", "Z axis force", "X axis moment", "Y axis moment", "Z axis moment"] High = np.array([40, 40, 0, 5, 5, 5, 542, -36, 188, 5, 5, 5]) Low = np.array([-40, -40, -40, -5, -5, -5, 538, -42, 192, -5, -5, -5]) scale = np.array([40, 40, 40, 5, 5, 5]) """=================================================================================""" plt.rcParams['font.sans-serif']=['SimHei'] plt.rcParams['axes.unicode_minus']=False def plot(result_path): plt.figure(figsize=(15, 15), dpi=100) plt.title('Search Result') prediction_result = np.load(result_path) for i in range(len(prediction_result)): for j in range(6): line = prediction_result[:, j] # plt.subplot(2, 3, j+1) plt.plot(line) plt.ylabel(YLABEL[j]) plt.xlabel('steps') plt.legend(YLABEL) plt.show() def plot_force_and_moment(path_2, path_3): V_force = np.load(path_2) V_state = np.load(path_3) plt.figure(figsize=(15, 10), dpi=100) plt.title("Search Result of Force", fontsize=20) plt.plot(V_force[:100]) plt.xlabel("Steps", fontsize=20) plt.ylabel("F(N)", fontsize=20) plt.legend(labels=['Fx', 'Fy', 'Fz', 'Mx', 'My', 'Mz'], loc='best', fontsize=20) plt.xticks(fontsize=15) plt.yticks(fontsize=15) plt.figure(figsize=(15, 10), dpi=100) plt.title("Search Result of State", fontsize=20) plt.plot(V_state[:100] - [539.88427, -38.68679, 190.03184, 179.88444, 1.30539, 0.21414]) plt.xlabel("Steps", fontsize=20) plt.ylabel("Coordinate", fontsize=20) plt.legend(labels=['x', 'y', 'z', 'rx', 'ry', 'rz'], loc='best', fontsize=20) plt.xticks(fontsize=15) plt.yticks(fontsize=15) plt.show() def plot_reward(reward_path): reward = np.load(reward_path) print(reward[0]) plt.figure(figsize=(15, 15), dpi=100) plt.title('Episode Reward') plt.plot(np.arange(len(reward) - 1), np.array(reward[1:])) plt.ylabel('Episode Reward') plt.xlabel('Episodes') plt.show() def plot_raw_data(path_1): data = np.load(path_1) force_m = np.zeros((len(data), 12)) plt.figure(figsize=(20, 20), dpi=100) plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0) plt.subplots_adjust(left=0.065, bottom=0.1, right=0.995, top=0.9, wspace=0.2, hspace=0.2) plt.title("True Data") for j in range(len(data)): force_m[j] = data[j, 0] k = -1 for i in range(len(data)): if data[i, 1] == 0: print("===========================================") line = force_m[k+1:i+1] print(line) k = i for j in range(6): plt.subplot(2, 3, j + 1) plt.plot(line[:, j]) # plt.plot(line[:, 0]) if j == 1: plt.ylabel(YLABEL[j], fontsize=17.5) plt.xlabel('steps', fontsize=20) plt.xticks(fontsize=15) plt.yticks(fontsize=15) else: plt.ylabel(YLABEL[j], fontsize=20) plt.xlabel('steps', fontsize=20) plt.xticks(fontsize=15) plt.yticks(fontsize=15) i += 1 def plot_continuous_data(path): raw_data = np.load(path) plt.figure(figsize=(20, 15)) plt.title('Episode Reward') plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0) plt.subplots_adjust(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23, hspace=0.22) # plt.subplots_adjust(left=0.065, bottom=0.1, right=0.995, top=0.9, wspace=0.2, hspace=0.2) data = np.zeros((len(raw_data), 12)) for j in range(len(raw_data)): data[j] = raw_data[j, 0] for j in range(6): plt.subplot(2, 3, j + 1) plt.plot(data[:, j]*scale[j], linewidth=2.5) # plt.ylabel(YLABEL[j], fontsize=18) if j>2: plt.xlabel('steps', fontsize=30) plt.title(YLABEL[j], fontsize=30) plt.xticks(fontsize=25) plt.yticks(fontsize=25) # plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.2) plt.savefig('raw_data.pdf') plt.show() def compute_true_return(path): raw_data = np.load(path) # print(raw_data) clock = 0 G = 0. past_gammas = [] past_cumulants = [] all_G = [] for i in range(len(raw_data)): observation, action, done, action_probability = raw_data[i] if done == False: gamma = 0.99 else: gamma = 0. past_gammas.append(gamma) past_cumulants.append(1) if done == False: clock += 1 G = 0 all_G.append(cp.deepcopy(G)) else: print('clock', clock) for j in reversed(range(0, clock + 1)): G *= past_gammas[j] G += past_cumulants[j] all_G.append(cp.deepcopy(G)) clock = 0 past_cumulants = [] past_gammas = [] print(len(raw_data)) plt.figure(figsize=(20, 15)) plt.plot(all_G[300:400]) plt.show() return all_G # Plot the true prediction and true value def plot_different_gamma_data(path): f = open(path, 'rb') titles = ['$\gamma = 0.4$', '$\gamma = 0.8$', '$\gamma = 0.96$', '$\gamma =1.0$'] # true_data = compute_true_return('prediction_result_different_gamma.npy') # f = open('../data/learning_result_policy', 'rb') # plot_value_functions = ['Move down Fy', 'Move down Fx', 'Move down Fz', 'Move down Mx', 'Move down My', 'Move down Mz'] plot_value_functions = ['Move down step', 'Move down step 2', 'Move down step 3', 'Move down step 4'] # plot_value_functions = ['Move down Fx', 'Move down Fx 1', 'Move down Fx 2', 'Move down Fx 3'] raw_data = pickle.load(f) plt.figure(figsize=(20, 15)) plt.tight_layout(pad=3, w_pad=1., h_pad=0.5) plt.subplots_adjust(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23, hspace=0.23) # legend = sorted([key for key in plot_value_functions.keys()]) # print(legend) # print(value_functions.keys()) for j, key in enumerate(plot_value_functions): plt.subplot(2, 2, j + 1) # print(list(raw_data[('GTD(1)', 'Hindsight Error')][key])) # plt.plot(np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[:], linewidth=2.5) # plt.plot(true_data[300:]) plt.plot(np.array(raw_data[('GTD(0)', 'UDE')][key])[600:], linewidth=2.75) # print('true value', np.array(raw_data[('GTD(0)', 'UDE')][key])[300:400]) # plt.plot(np.array(raw_data[('GTD(0)', 'TD Error')][key])[600:], linewidth=2.5) # print('old prediction', np.array(raw_data[('GTD(0)', 'TD Error')][key])[300:400]) plt.plot(np.array(raw_data[('GTD(0)', 'Prediction')][key])[600:], linewidth=2.75) # plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[300:] - np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[300:], linewidth=2.5) # plt.legend('True value', 'Prediction value') plt.title(titles[j], fontsize=30) if j > 1: plt.xlabel('steps', fontsize=30) plt.ylabel('Number of steps', fontsize=30) plt.xticks(fontsize=25) plt.yticks(fontsize=25) # plt.savefig('different_gamma.pdf') plt.show() # Plot the true prediction and true value def chinese_plot_different_gamma_data(path): f = open(path, 'rb') titles = ['$\gamma = 0.4$', '$\gamma = 0.8$', '$\gamma = 0.96$', '$\gamma =1.0$'] # true_data = compute_true_return('prediction_result_different_gamma.npy') # f = open('../data/learning_result_policy', 'rb') # plot_value_functions = ['Move down Fy', 'Move down Fx', 'Move down Fz', 'Move down Mx', 'Move down My', 'Move down Mz'] plot_value_functions = ['Move down step', 'Move down step 2', 'Move down step 3', 'Move down step 4'] # plot_value_functions = ['Move down Fx', 'Move down Fx 1', 'Move down Fx 2', 'Move down Fx 3'] raw_data = pickle.load(f) plt.figure(figsize=(20, 12), dpi=1000) plt.tight_layout(pad=3, w_pad=1., h_pad=0.5) plt.subplots_adjust(left=0.08, bottom=0.12, right=0.98, top=0.95, wspace=0.23, hspace=0.33) # legend = sorted([key for key in plot_value_functions.keys()]) # print(legend) # print(value_functions.keys()) for j, key in enumerate(plot_value_functions): plt.subplot(2, 2, j + 1) # print(list(raw_data[('GTD(1)', 'Hindsight Error')][key])) # plt.plot(np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[:], linewidth=2.5) # plt.plot(true_data[300:]) plt.plot(np.array(raw_data[('GTD(0)', 'UDE')][key])[600:], linewidth=2.75) # print('true value', np.array(raw_data[('GTD(0)', 'UDE')][key])[300:400]) # plt.plot(np.array(raw_data[('GTD(0)', 'TD Error')][key])[600:], linewidth=2.5) # print('old prediction', np.array(raw_data[('GTD(0)', 'TD Error')][key])[300:400]) plt.plot(np.array(raw_data[('GTD(0)', 'Prediction')][key])[600:], linewidth=2.75) # plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[300:] - np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[300:], linewidth=2.5) # plt.legend('True value', 'Prediction value') plt.title(titles[j], fontsize=36) if j > 1: plt.xlabel('搜索步数', fontsize=36) plt.ylabel('预测周期', fontsize=36) plt.xticks([0, 50, 100, 150, 200], fontsize=36) plt.yticks(fontsize=36) plt.savefig('./figure/pdf/chinese_different_gamma.pdf') # plt.show() def chinese_plot_compare_raw_data(path1, path2): raw_data = np.load(path1) raw_data_1 = np.load(path2) plt.figure(figsize=(20, 12), dpi=1000) plt.title('Episode Reward') plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0) plt.subplots_adjust(left=0.08, bottom=0.08, right=0.98, top=0.95, wspace=0.33, hspace=0.15) data = np.zeros((len(raw_data), 12)) for j in range(len(raw_data)): data[j] = raw_data[j, 0] data_1 = np.zeros((len(raw_data_1), 12)) for j in range(len(raw_data_1)): data_1[j] = raw_data_1[j, 0] for j in range(6): plt.subplot(2, 3, j + 1) plt.plot(data[:100, j], linewidth=2.5, color='r', linestyle='--') plt.plot(data_1[:100, j], linewidth=2.5, color='b') # plt.ylabel(YLABEL[j], fontsize=18) if j>2: plt.xlabel('搜索步数', fontsize=38) plt.title(YLABEL[j], fontsize=38) plt.xticks(fontsize=38) plt.yticks(fontsize=38) # plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.2) plt.savefig('./figure/pdf/chinese_raw_data.pdf') # plt.show() # Plot the true prediction and true value def chinese_plot_different_policy_data(path, name): f = open(path, 'rb') # true_data = compute_true_return('prediction_result_different_gamma.npy') # f = open('../data/learning_result_policy', 'rb') plot_value_functions = ['Move down Fx', 'Move down Fy', 'Move down Fz', 'Move down Mx', 'Move down My', 'Move down Mz'] # plot_value_functions = ['Move down step', 'Move down step 2', 'Move down step 3', 'Move down step 4'] # plot_value_functions = ['Move down Fx', 'Move down Fx 1', 'Move down Fx 2', 'Move down Fx 3'] raw_data = pickle.load(f) plt.figure(figsize=(20, 12), dpi=1000) plt.title('Episode Reward') plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0) plt.subplots_adjust(left=0.1, bottom=0.1, right=0.98, top=0.95, wspace=0.33, hspace=0.25) # plt.subplots_adjust(left=0.1, bottom=0.12, right=0.98, top=0.94, wspace=0.23, hspace=0.33) # legend = sorted([key for key in plot_value_functions.keys()]) # print(legend) # print(value_functions.keys()) for j, key in enumerate(plot_value_functions): plt.subplot(2, 3, j + 1) # print(list(raw_data[('GTD(1)', 'Hindsight Error')][key])) # plt.plot(np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[400:]*scale[j], linewidth=2.5) # plt.plot(true_data[300:]) plt.plot(np.array(raw_data[('GTD(1)', 'UDE')][key])[1000:]*scale[j], linewidth=2.5) # print('true value', np.array(raw_data[('GTD(0)', 'UDE')][key])[300:400]) # plt.plot(np.array(raw_data[('GTD(0)', 'TD Error')][key])[600:], linewidth=2.5, color='r') # print('old prediction', np.array(raw_data[('GTD(0)', 'TD Error')][key])[300:400]) plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[1000:]*scale[j], linewidth=2.5) # plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[300:] - np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[300:], linewidth=2.5) # plt.legend('True value', 'Prediction value') plt.title(YLABEL[j], fontsize=38) if j > 2: plt.xlabel('搜索步数', fontsize=38) plt.xticks([0, 50, 100, 150, 200], fontsize=38) plt.yticks(fontsize=38) plt.savefig('./figure/pdf/chinese_' + name +'.pdf') # plt.show() # Plot the true prediction and true value def plot_different_policy_data(path): f = open(path, 'rb') # true_data = compute_true_return('prediction_result_different_gamma.npy') # f = open('../data/learning_result_policy', 'rb') plot_value_functions = ['Move down Fx', 'Move down Fy', 'Move down Fz', 'Move down Mx', 'Move down My', 'Move down Mz'] # plot_value_functions = ['Move down step', 'Move down step 2', 'Move down step 3', 'Move down step 4'] # plot_value_functions = ['Move down Fx', 'Move down Fx 1', 'Move down Fx 2', 'Move down Fx 3'] raw_data = pickle.load(f) plt.figure(figsize=(20, 12), dpi=1000) plt.title('Episode Reward') plt.tight_layout(pad=3, w_pad=1.0, h_pad=1.0) plt.subplots_adjust(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23, hspace=0.23) # legend = sorted([key for key in plot_value_functions.keys()]) # print(legend) # print(value_functions.keys()) for j, key in enumerate(plot_value_functions): plt.subplot(2, 3, j + 1) # print(list(raw_data[('GTD(1)', 'Hindsight Error')][key])) # plt.plot(np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[400:]*scale[j], linewidth=2.5) # plt.plot(true_data[300:]) plt.plot(np.array(raw_data[('GTD(1)', 'UDE')][key])[1000:]*scale[j], linewidth=2.5) # print('true value', np.array(raw_data[('GTD(0)', 'UDE')][key])[300:400]) # plt.plot(np.array(raw_data[('GTD(0)', 'TD Error')][key])[600:], linewidth=2.5, color='r') # print('old prediction', np.array(raw_data[('GTD(0)', 'TD Error')][key])[300:400]) plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[1000:]*scale[j], linewidth=2.5) # plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[300:] - np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[300:], linewidth=2.5) # plt.legend('True value', 'Prediction value') plt.title(YLABEL[j], fontsize=30) if j > 2: plt.xlabel('steps', fontsize=30) plt.xticks([0, 50, 100, 150, 200], fontsize=25) plt.yticks(fontsize=25) plt.savefig('./figure/pdf/chinese_different_policies_b.pdf') # plt.show() if __name__ == "__main__": # force = np.load('./search_force.npy') # state = np.load('./search_state.npy') # print(np.max(force, axis=0)) # print(np.min(force, axis=0)) # print(np.max(state, axis=0)) # print(np.min(state, axis=0)) # plot('./search_state.npy') # plot('./search_force.npy') # plot_reward('./episode_rewards.npy') # data = np.load('prediction_result.npy') # print(data[:, 2]) # plot_continuous_data('prediction_result_different_gamma_six_force.npy') # f = open('../data/learning_result', 'rb') # y = pickle.load(f) # data = y[('GTD(1)', 'Hindsight Error')]['Move down Fz'] # print(data) # plt.figure(figsize=(15, 15), dpi=100) # plt.title('Search Result') # # plt.plot(data) # plt.ylabel(YLABEL[0]) # plt.xlabel('steps') # plt.legend(YLABEL) # plt.show() # compute_true_return('prediction_result_different_gamma.npy') # plot_true_data('learning_result_six_force_gamma_0.9') # plot_true_data('learning_result_different_gamma') # plot_different_gamma_data('learning_result_different_policy') """=============================== plot different policy ===================================== """ # plot_different_policy_data('learning_result_six_force_gamma_0.9') # chinese_plot_different_policy_data('learning_result_six_force_gamma_0.9') # plot_different_policy_data('learning_result_different_policy_new_3') chinese_plot_different_policy_data('learning_result_different_policy_new_3', 'off_policy_3') # chinese_plot_different_policy_data('learning_result_different_policy') # chinese_plot_different_policy_data('learning_result_different_policy') """=============================== plot different gamma ======================================== """ # plot_different_gamma_data('learning_result_different_gamma_new') # chinese_plot_different_gamma_data('learning_result_different_gamma_new')
[ "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "pickle.load", "numpy.array", "matplotlib.pyplot.figure", "matplotlib.pyplot.yticks", "matplotlib.pyplot.tight_layout", ...
[((719, 773), 'numpy.array', 'np.array', (['[40, 40, 0, 5, 5, 5, 542, -36, 188, 5, 5, 5]'], {}), '([40, 40, 0, 5, 5, 5, 542, -36, 188, 5, 5, 5])\n', (727, 773), True, 'import numpy as np\n'), ((780, 844), 'numpy.array', 'np.array', (['[-40, -40, -40, -5, -5, -5, 538, -42, 192, -5, -5, -5]'], {}), '([-40, -40, -40, -5, -5, -5, 538, -42, 192, -5, -5, -5])\n', (788, 844), True, 'import numpy as np\n'), ((853, 884), 'numpy.array', 'np.array', (['[40, 40, 40, 5, 5, 5]'], {}), '([40, 40, 40, 5, 5, 5])\n', (861, 884), True, 'import numpy as np\n'), ((1086, 1123), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)', 'dpi': '(100)'}), '(figsize=(15, 15), dpi=100)\n', (1096, 1123), True, 'import matplotlib.pyplot as plt\n'), ((1128, 1154), 'matplotlib.pyplot.title', 'plt.title', (['"""Search Result"""'], {}), "('Search Result')\n", (1137, 1154), True, 'import matplotlib.pyplot as plt\n'), ((1179, 1199), 'numpy.load', 'np.load', (['result_path'], {}), '(result_path)\n', (1186, 1199), True, 'import numpy as np\n'), ((1479, 1489), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1487, 1489), True, 'import matplotlib.pyplot as plt\n'), ((1550, 1565), 'numpy.load', 'np.load', (['path_2'], {}), '(path_2)\n', (1557, 1565), True, 'import numpy as np\n'), ((1580, 1595), 'numpy.load', 'np.load', (['path_3'], {}), '(path_3)\n', (1587, 1595), True, 'import numpy as np\n'), ((1601, 1638), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)', 'dpi': '(100)'}), '(figsize=(15, 10), dpi=100)\n', (1611, 1638), True, 'import matplotlib.pyplot as plt\n'), ((1643, 1691), 'matplotlib.pyplot.title', 'plt.title', (['"""Search Result of Force"""'], {'fontsize': '(20)'}), "('Search Result of Force', fontsize=20)\n", (1652, 1691), True, 'import matplotlib.pyplot as plt\n'), ((1696, 1719), 'matplotlib.pyplot.plot', 'plt.plot', (['V_force[:100]'], {}), '(V_force[:100])\n', (1704, 1719), True, 'import matplotlib.pyplot as plt\n'), ((1724, 1756), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Steps"""'], {'fontsize': '(20)'}), "('Steps', fontsize=20)\n", (1734, 1756), True, 'import matplotlib.pyplot as plt\n'), ((1761, 1792), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F(N)"""'], {'fontsize': '(20)'}), "('F(N)', fontsize=20)\n", (1771, 1792), True, 'import matplotlib.pyplot as plt\n'), ((1797, 1882), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labels': "['Fx', 'Fy', 'Fz', 'Mx', 'My', 'Mz']", 'loc': '"""best"""', 'fontsize': '(20)'}), "(labels=['Fx', 'Fy', 'Fz', 'Mx', 'My', 'Mz'], loc='best', fontsize=20\n )\n", (1807, 1882), True, 'import matplotlib.pyplot as plt\n'), ((1882, 1905), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (1892, 1905), True, 'import matplotlib.pyplot as plt\n'), ((1910, 1933), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (1920, 1933), True, 'import matplotlib.pyplot as plt\n'), ((1939, 1976), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)', 'dpi': '(100)'}), '(figsize=(15, 10), dpi=100)\n', (1949, 1976), True, 'import matplotlib.pyplot as plt\n'), ((1981, 2029), 'matplotlib.pyplot.title', 'plt.title', (['"""Search Result of State"""'], {'fontsize': '(20)'}), "('Search Result of State', fontsize=20)\n", (1990, 2029), True, 'import matplotlib.pyplot as plt\n'), ((2034, 2127), 'matplotlib.pyplot.plot', 'plt.plot', (['(V_state[:100] - [539.88427, -38.68679, 190.03184, 179.88444, 1.30539, 0.21414]\n )'], {}), '(V_state[:100] - [539.88427, -38.68679, 190.03184, 179.88444, \n 1.30539, 0.21414])\n', (2042, 2127), True, 'import matplotlib.pyplot as plt\n'), ((2127, 2159), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Steps"""'], {'fontsize': '(20)'}), "('Steps', fontsize=20)\n", (2137, 2159), True, 'import matplotlib.pyplot as plt\n'), ((2164, 2201), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Coordinate"""'], {'fontsize': '(20)'}), "('Coordinate', fontsize=20)\n", (2174, 2201), True, 'import matplotlib.pyplot as plt\n'), ((2206, 2283), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labels': "['x', 'y', 'z', 'rx', 'ry', 'rz']", 'loc': '"""best"""', 'fontsize': '(20)'}), "(labels=['x', 'y', 'z', 'rx', 'ry', 'rz'], loc='best', fontsize=20)\n", (2216, 2283), True, 'import matplotlib.pyplot as plt\n'), ((2288, 2311), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (2298, 2311), True, 'import matplotlib.pyplot as plt\n'), ((2316, 2339), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (2326, 2339), True, 'import matplotlib.pyplot as plt\n'), ((2345, 2355), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2353, 2355), True, 'import matplotlib.pyplot as plt\n'), ((2401, 2421), 'numpy.load', 'np.load', (['reward_path'], {}), '(reward_path)\n', (2408, 2421), True, 'import numpy as np\n'), ((2447, 2484), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)', 'dpi': '(100)'}), '(figsize=(15, 15), dpi=100)\n', (2457, 2484), True, 'import matplotlib.pyplot as plt\n'), ((2489, 2516), 'matplotlib.pyplot.title', 'plt.title', (['"""Episode Reward"""'], {}), "('Episode Reward')\n", (2498, 2516), True, 'import matplotlib.pyplot as plt\n'), ((2584, 2612), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Episode Reward"""'], {}), "('Episode Reward')\n", (2594, 2612), True, 'import matplotlib.pyplot as plt\n'), ((2617, 2639), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episodes"""'], {}), "('Episodes')\n", (2627, 2639), True, 'import matplotlib.pyplot as plt\n'), ((2644, 2654), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2652, 2654), True, 'import matplotlib.pyplot as plt\n'), ((2695, 2710), 'numpy.load', 'np.load', (['path_1'], {}), '(path_1)\n', (2702, 2710), True, 'import numpy as np\n'), ((2756, 2793), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)', 'dpi': '(100)'}), '(figsize=(20, 20), dpi=100)\n', (2766, 2793), True, 'import matplotlib.pyplot as plt\n'), ((2798, 2843), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(3)', 'w_pad': '(0.5)', 'h_pad': '(1.0)'}), '(pad=3, w_pad=0.5, h_pad=1.0)\n', (2814, 2843), True, 'import matplotlib.pyplot as plt\n'), ((2848, 2942), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.065)', 'bottom': '(0.1)', 'right': '(0.995)', 'top': '(0.9)', 'wspace': '(0.2)', 'hspace': '(0.2)'}), '(left=0.065, bottom=0.1, right=0.995, top=0.9, wspace=\n 0.2, hspace=0.2)\n', (2867, 2942), True, 'import matplotlib.pyplot as plt\n'), ((2942, 2964), 'matplotlib.pyplot.title', 'plt.title', (['"""True Data"""'], {}), "('True Data')\n", (2951, 2964), True, 'import matplotlib.pyplot as plt\n'), ((3897, 3910), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (3904, 3910), True, 'import numpy as np\n'), ((3915, 3943), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (3925, 3943), True, 'import matplotlib.pyplot as plt\n'), ((3948, 3975), 'matplotlib.pyplot.title', 'plt.title', (['"""Episode Reward"""'], {}), "('Episode Reward')\n", (3957, 3975), True, 'import matplotlib.pyplot as plt\n'), ((3980, 4025), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(3)', 'w_pad': '(0.5)', 'h_pad': '(1.0)'}), '(pad=3, w_pad=0.5, h_pad=1.0)\n', (3996, 4025), True, 'import matplotlib.pyplot as plt\n'), ((4030, 4123), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'bottom': '(0.15)', 'right': '(0.98)', 'top': '(0.9)', 'wspace': '(0.23)', 'hspace': '(0.22)'}), '(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23,\n hspace=0.22)\n', (4049, 4123), True, 'import matplotlib.pyplot as plt\n'), ((4742, 4769), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""raw_data.pdf"""'], {}), "('raw_data.pdf')\n", (4753, 4769), True, 'import matplotlib.pyplot as plt\n'), ((4774, 4784), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4782, 4784), True, 'import matplotlib.pyplot as plt\n'), ((4833, 4846), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (4840, 4846), True, 'import numpy as np\n'), ((5652, 5680), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (5662, 5680), True, 'import matplotlib.pyplot as plt\n'), ((5685, 5709), 'matplotlib.pyplot.plot', 'plt.plot', (['all_G[300:400]'], {}), '(all_G[300:400])\n', (5693, 5709), True, 'import matplotlib.pyplot as plt\n'), ((5714, 5724), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5722, 5724), True, 'import matplotlib.pyplot as plt\n'), ((6415, 6429), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6426, 6429), False, 'import pickle\n'), ((6434, 6462), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (6444, 6462), True, 'import matplotlib.pyplot as plt\n'), ((6467, 6512), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(3)', 'w_pad': '(1.0)', 'h_pad': '(0.5)'}), '(pad=3, w_pad=1.0, h_pad=0.5)\n', (6483, 6512), True, 'import matplotlib.pyplot as plt\n'), ((6516, 6609), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'bottom': '(0.15)', 'right': '(0.98)', 'top': '(0.9)', 'wspace': '(0.23)', 'hspace': '(0.23)'}), '(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23,\n hspace=0.23)\n', (6535, 6609), True, 'import matplotlib.pyplot as plt\n'), ((7922, 7932), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7930, 7932), True, 'import matplotlib.pyplot as plt\n'), ((8614, 8628), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8625, 8628), False, 'import pickle\n'), ((8633, 8671), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 12)', 'dpi': '(1000)'}), '(figsize=(20, 12), dpi=1000)\n', (8643, 8671), True, 'import matplotlib.pyplot as plt\n'), ((8676, 8721), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(3)', 'w_pad': '(1.0)', 'h_pad': '(0.5)'}), '(pad=3, w_pad=1.0, h_pad=0.5)\n', (8692, 8721), True, 'import matplotlib.pyplot as plt\n'), ((8725, 8821), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.08)', 'bottom': '(0.12)', 'right': '(0.98)', 'top': '(0.95)', 'wspace': '(0.23)', 'hspace': '(0.33)'}), '(left=0.08, bottom=0.12, right=0.98, top=0.95, wspace=\n 0.23, hspace=0.33)\n', (8744, 8821), True, 'import matplotlib.pyplot as plt\n'), ((10108, 10163), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./figure/pdf/chinese_different_gamma.pdf"""'], {}), "('./figure/pdf/chinese_different_gamma.pdf')\n", (10119, 10163), True, 'import matplotlib.pyplot as plt\n'), ((10247, 10261), 'numpy.load', 'np.load', (['path1'], {}), '(path1)\n', (10254, 10261), True, 'import numpy as np\n'), ((10279, 10293), 'numpy.load', 'np.load', (['path2'], {}), '(path2)\n', (10286, 10293), True, 'import numpy as np\n'), ((10298, 10336), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 12)', 'dpi': '(1000)'}), '(figsize=(20, 12), dpi=1000)\n', (10308, 10336), True, 'import matplotlib.pyplot as plt\n'), ((10341, 10368), 'matplotlib.pyplot.title', 'plt.title', (['"""Episode Reward"""'], {}), "('Episode Reward')\n", (10350, 10368), True, 'import matplotlib.pyplot as plt\n'), ((10373, 10418), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(3)', 'w_pad': '(0.5)', 'h_pad': '(1.0)'}), '(pad=3, w_pad=0.5, h_pad=1.0)\n', (10389, 10418), True, 'import matplotlib.pyplot as plt\n'), ((10423, 10519), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.08)', 'bottom': '(0.08)', 'right': '(0.98)', 'top': '(0.95)', 'wspace': '(0.33)', 'hspace': '(0.15)'}), '(left=0.08, bottom=0.08, right=0.98, top=0.95, wspace=\n 0.33, hspace=0.15)\n', (10442, 10519), True, 'import matplotlib.pyplot as plt\n'), ((11243, 11291), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./figure/pdf/chinese_raw_data.pdf"""'], {}), "('./figure/pdf/chinese_raw_data.pdf')\n", (11254, 11291), True, 'import matplotlib.pyplot as plt\n'), ((11912, 11926), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (11923, 11926), False, 'import pickle\n'), ((11931, 11969), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 12)', 'dpi': '(1000)'}), '(figsize=(20, 12), dpi=1000)\n', (11941, 11969), True, 'import matplotlib.pyplot as plt\n'), ((11974, 12001), 'matplotlib.pyplot.title', 'plt.title', (['"""Episode Reward"""'], {}), "('Episode Reward')\n", (11983, 12001), True, 'import matplotlib.pyplot as plt\n'), ((12006, 12051), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(3)', 'w_pad': '(0.5)', 'h_pad': '(1.0)'}), '(pad=3, w_pad=0.5, h_pad=1.0)\n', (12022, 12051), True, 'import matplotlib.pyplot as plt\n'), ((12056, 12149), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'bottom': '(0.1)', 'right': '(0.98)', 'top': '(0.95)', 'wspace': '(0.33)', 'hspace': '(0.25)'}), '(left=0.1, bottom=0.1, right=0.98, top=0.95, wspace=0.33,\n hspace=0.25)\n', (12075, 12149), True, 'import matplotlib.pyplot as plt\n'), ((13533, 13585), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./figure/pdf/chinese_' + name + '.pdf')"], {}), "('./figure/pdf/chinese_' + name + '.pdf')\n", (13544, 13585), True, 'import matplotlib.pyplot as plt\n'), ((14190, 14204), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (14201, 14204), False, 'import pickle\n'), ((14209, 14247), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 12)', 'dpi': '(1000)'}), '(figsize=(20, 12), dpi=1000)\n', (14219, 14247), True, 'import matplotlib.pyplot as plt\n'), ((14252, 14279), 'matplotlib.pyplot.title', 'plt.title', (['"""Episode Reward"""'], {}), "('Episode Reward')\n", (14261, 14279), True, 'import matplotlib.pyplot as plt\n'), ((14284, 14329), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(3)', 'w_pad': '(1.0)', 'h_pad': '(1.0)'}), '(pad=3, w_pad=1.0, h_pad=1.0)\n', (14300, 14329), True, 'import matplotlib.pyplot as plt\n'), ((14334, 14427), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'bottom': '(0.15)', 'right': '(0.98)', 'top': '(0.9)', 'wspace': '(0.23)', 'hspace': '(0.23)'}), '(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23,\n hspace=0.23)\n', (14353, 14427), True, 'import matplotlib.pyplot as plt\n'), ((15716, 15776), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./figure/pdf/chinese_different_policies_b.pdf"""'], {}), "('./figure/pdf/chinese_different_policies_b.pdf')\n", (15727, 15776), True, 'import matplotlib.pyplot as plt\n'), ((2558, 2578), 'numpy.array', 'np.array', (['reward[1:]'], {}), '(reward[1:])\n', (2566, 2578), True, 'import numpy as np\n'), ((4356, 4380), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(j + 1)'], {}), '(2, 3, j + 1)\n', (4367, 4380), True, 'import matplotlib.pyplot as plt\n'), ((4389, 4435), 'matplotlib.pyplot.plot', 'plt.plot', (['(data[:, j] * scale[j])'], {'linewidth': '(2.5)'}), '(data[:, j] * scale[j], linewidth=2.5)\n', (4397, 4435), True, 'import matplotlib.pyplot as plt\n'), ((4548, 4581), 'matplotlib.pyplot.title', 'plt.title', (['YLABEL[j]'], {'fontsize': '(30)'}), '(YLABEL[j], fontsize=30)\n', (4557, 4581), True, 'import matplotlib.pyplot as plt\n'), ((4590, 4613), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(25)'}), '(fontsize=25)\n', (4600, 4613), True, 'import matplotlib.pyplot as plt\n'), ((4622, 4645), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(25)'}), '(fontsize=25)\n', (4632, 4645), True, 'import matplotlib.pyplot as plt\n'), ((6789, 6813), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(j + 1)'], {}), '(2, 2, j + 1)\n', (6800, 6813), True, 'import matplotlib.pyplot as plt\n'), ((7665, 7698), 'matplotlib.pyplot.title', 'plt.title', (['titles[j]'], {'fontsize': '(30)'}), '(titles[j], fontsize=30)\n', (7674, 7698), True, 'import matplotlib.pyplot as plt\n'), ((7770, 7812), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of steps"""'], {'fontsize': '(30)'}), "('Number of steps', fontsize=30)\n", (7780, 7812), True, 'import matplotlib.pyplot as plt\n'), ((7821, 7844), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(25)'}), '(fontsize=25)\n', (7831, 7844), True, 'import matplotlib.pyplot as plt\n'), ((7853, 7876), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(25)'}), '(fontsize=25)\n', (7863, 7876), True, 'import matplotlib.pyplot as plt\n'), ((9001, 9025), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(j + 1)'], {}), '(2, 2, j + 1)\n', (9012, 9025), True, 'import matplotlib.pyplot as plt\n'), ((9878, 9911), 'matplotlib.pyplot.title', 'plt.title', (['titles[j]'], {'fontsize': '(36)'}), '(titles[j], fontsize=36)\n', (9887, 9911), True, 'import matplotlib.pyplot as plt\n'), ((9983, 10014), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""预测周期"""'], {'fontsize': '(36)'}), "('预测周期', fontsize=36)\n", (9993, 10014), True, 'import matplotlib.pyplot as plt\n'), ((10023, 10070), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 50, 100, 150, 200]'], {'fontsize': '(36)'}), '([0, 50, 100, 150, 200], fontsize=36)\n', (10033, 10070), True, 'import matplotlib.pyplot as plt\n'), ((10079, 10102), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(36)'}), '(fontsize=36)\n', (10089, 10102), True, 'import matplotlib.pyplot as plt\n'), ((10776, 10800), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(j + 1)'], {}), '(2, 3, j + 1)\n', (10787, 10800), True, 'import matplotlib.pyplot as plt\n'), ((10809, 10874), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:100, j]'], {'linewidth': '(2.5)', 'color': '"""r"""', 'linestyle': '"""--"""'}), "(data[:100, j], linewidth=2.5, color='r', linestyle='--')\n", (10817, 10874), True, 'import matplotlib.pyplot as plt\n'), ((10883, 10934), 'matplotlib.pyplot.plot', 'plt.plot', (['data_1[:100, j]'], {'linewidth': '(2.5)', 'color': '"""b"""'}), "(data_1[:100, j], linewidth=2.5, color='b')\n", (10891, 10934), True, 'import matplotlib.pyplot as plt\n'), ((11048, 11081), 'matplotlib.pyplot.title', 'plt.title', (['YLABEL[j]'], {'fontsize': '(38)'}), '(YLABEL[j], fontsize=38)\n', (11057, 11081), True, 'import matplotlib.pyplot as plt\n'), ((11090, 11113), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(38)'}), '(fontsize=38)\n', (11100, 11113), True, 'import matplotlib.pyplot as plt\n'), ((11122, 11145), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(38)'}), '(fontsize=38)\n', (11132, 11145), True, 'import matplotlib.pyplot as plt\n'), ((12427, 12451), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(j + 1)'], {}), '(2, 3, j + 1)\n', (12438, 12451), True, 'import matplotlib.pyplot as plt\n'), ((13344, 13377), 'matplotlib.pyplot.title', 'plt.title', (['YLABEL[j]'], {'fontsize': '(38)'}), '(YLABEL[j], fontsize=38)\n', (13353, 13377), True, 'import matplotlib.pyplot as plt\n'), ((13448, 13495), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 50, 100, 150, 200]'], {'fontsize': '(38)'}), '([0, 50, 100, 150, 200], fontsize=38)\n', (13458, 13495), True, 'import matplotlib.pyplot as plt\n'), ((13504, 13527), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(38)'}), '(fontsize=38)\n', (13514, 13527), True, 'import matplotlib.pyplot as plt\n'), ((14608, 14632), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(j + 1)'], {}), '(2, 3, j + 1)\n', (14619, 14632), True, 'import matplotlib.pyplot as plt\n'), ((15526, 15559), 'matplotlib.pyplot.title', 'plt.title', (['YLABEL[j]'], {'fontsize': '(30)'}), '(YLABEL[j], fontsize=30)\n', (15535, 15559), True, 'import matplotlib.pyplot as plt\n'), ((15631, 15678), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 50, 100, 150, 200]'], {'fontsize': '(25)'}), '([0, 50, 100, 150, 200], fontsize=25)\n', (15641, 15678), True, 'import matplotlib.pyplot as plt\n'), ((15687, 15710), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(25)'}), '(fontsize=25)\n', (15697, 15710), True, 'import matplotlib.pyplot as plt\n'), ((1363, 1377), 'matplotlib.pyplot.plot', 'plt.plot', (['line'], {}), '(line)\n', (1371, 1377), True, 'import matplotlib.pyplot as plt\n'), ((1390, 1411), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['YLABEL[j]'], {}), '(YLABEL[j])\n', (1400, 1411), True, 'import matplotlib.pyplot as plt\n'), ((1424, 1443), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {}), "('steps')\n", (1434, 1443), True, 'import matplotlib.pyplot as plt\n'), ((1456, 1474), 'matplotlib.pyplot.legend', 'plt.legend', (['YLABEL'], {}), '(YLABEL)\n', (1466, 1474), True, 'import matplotlib.pyplot as plt\n'), ((4507, 4539), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {'fontsize': '(30)'}), "('steps', fontsize=30)\n", (4517, 4539), True, 'import matplotlib.pyplot as plt\n'), ((7729, 7761), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {'fontsize': '(30)'}), "('steps', fontsize=30)\n", (7739, 7761), True, 'import matplotlib.pyplot as plt\n'), ((9943, 9974), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""搜索步数"""'], {'fontsize': '(36)'}), "('搜索步数', fontsize=36)\n", (9953, 9974), True, 'import matplotlib.pyplot as plt\n'), ((11008, 11039), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""搜索步数"""'], {'fontsize': '(38)'}), "('搜索步数', fontsize=38)\n", (11018, 11039), True, 'import matplotlib.pyplot as plt\n'), ((13408, 13439), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""搜索步数"""'], {'fontsize': '(38)'}), "('搜索步数', fontsize=38)\n", (13418, 13439), True, 'import matplotlib.pyplot as plt\n'), ((15590, 15622), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {'fontsize': '(30)'}), "('steps', fontsize=30)\n", (15600, 15622), True, 'import matplotlib.pyplot as plt\n'), ((3288, 3312), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(j + 1)'], {}), '(2, 3, j + 1)\n', (3299, 3312), True, 'import matplotlib.pyplot as plt\n'), ((3329, 3349), 'matplotlib.pyplot.plot', 'plt.plot', (['line[:, j]'], {}), '(line[:, j])\n', (3337, 3349), True, 'import matplotlib.pyplot as plt\n'), ((5307, 5321), 'copy.deepcopy', 'cp.deepcopy', (['G'], {}), '(G)\n', (5318, 5321), True, 'import copy as cp\n'), ((5523, 5537), 'copy.deepcopy', 'cp.deepcopy', (['G'], {}), '(G)\n', (5534, 5537), True, 'import copy as cp\n'), ((7028, 7068), 'numpy.array', 'np.array', (["raw_data['GTD(0)', 'UDE'][key]"], {}), "(raw_data['GTD(0)', 'UDE'][key])\n", (7036, 7068), True, 'import numpy as np\n'), ((7375, 7422), 'numpy.array', 'np.array', (["raw_data['GTD(0)', 'Prediction'][key]"], {}), "(raw_data['GTD(0)', 'Prediction'][key])\n", (7383, 7422), True, 'import numpy as np\n'), ((9240, 9280), 'numpy.array', 'np.array', (["raw_data['GTD(0)', 'UDE'][key]"], {}), "(raw_data['GTD(0)', 'UDE'][key])\n", (9248, 9280), True, 'import numpy as np\n'), ((9587, 9634), 'numpy.array', 'np.array', (["raw_data['GTD(0)', 'Prediction'][key]"], {}), "(raw_data['GTD(0)', 'Prediction'][key])\n", (9595, 9634), True, 'import numpy as np\n'), ((3437, 3473), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['YLABEL[j]'], {'fontsize': '(17.5)'}), '(YLABEL[j], fontsize=17.5)\n', (3447, 3473), True, 'import matplotlib.pyplot as plt\n'), ((3494, 3526), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {'fontsize': '(20)'}), "('steps', fontsize=20)\n", (3504, 3526), True, 'import matplotlib.pyplot as plt\n'), ((3547, 3570), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (3557, 3570), True, 'import matplotlib.pyplot as plt\n'), ((3591, 3614), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (3601, 3614), True, 'import matplotlib.pyplot as plt\n'), ((3657, 3691), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['YLABEL[j]'], {'fontsize': '(20)'}), '(YLABEL[j], fontsize=20)\n', (3667, 3691), True, 'import matplotlib.pyplot as plt\n'), ((3712, 3744), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {'fontsize': '(20)'}), "('steps', fontsize=20)\n", (3722, 3744), True, 'import matplotlib.pyplot as plt\n'), ((3765, 3788), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (3775, 3788), True, 'import matplotlib.pyplot as plt\n'), ((3809, 3832), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (3819, 3832), True, 'import matplotlib.pyplot as plt\n'), ((12678, 12718), 'numpy.array', 'np.array', (["raw_data['GTD(1)', 'UDE'][key]"], {}), "(raw_data['GTD(1)', 'UDE'][key])\n", (12686, 12718), True, 'import numpy as np\n'), ((13045, 13092), 'numpy.array', 'np.array', (["raw_data['GTD(1)', 'Prediction'][key]"], {}), "(raw_data['GTD(1)', 'Prediction'][key])\n", (13053, 13092), True, 'import numpy as np\n'), ((14859, 14899), 'numpy.array', 'np.array', (["raw_data['GTD(1)', 'UDE'][key]"], {}), "(raw_data['GTD(1)', 'UDE'][key])\n", (14867, 14899), True, 'import numpy as np\n'), ((15226, 15273), 'numpy.array', 'np.array', (["raw_data['GTD(1)', 'Prediction'][key]"], {}), "(raw_data['GTD(1)', 'Prediction'][key])\n", (15234, 15273), True, 'import numpy as np\n')]
from repacolors import ColorScale from .colorbrewer import PALETTES as CBPALETTES PALETTES = { "ryb": ["#fe2713", "#fd5307", "#fb9900", "#fabc00", "#fefe34", "#d1e92c", "#66b032", "#0492ce", "#0347fe", "#3e01a4", "#8600af", "#a7194b"], "rybw3": ["#FE2712", "#FC600A", "#FB9902", "#FCCC1A", "#FEFE33", "#B2D732", "#66B032", "#347C98", "#0247FE", "#4424D6", "#8601AF", "#C21460"], **CBPALETTES } def get_palette(name: str): if name.lower() not in PALETTES: raise KeyError(f"'{name}' palette not found") return PALETTES[name.lower()] def get_scale(name: str, *args, **kwargs) -> ColorScale: kwargs["name"] = name return ColorScale(get_palette(name), *args, **kwargs) def demo(width: int = 80): for name, colors in PALETTES.items(): s = ColorScale(colors) print(f"{name:12s}", end="") s.print(width=width, height=2, border=0)
[ "repacolors.ColorScale" ]
[((790, 808), 'repacolors.ColorScale', 'ColorScale', (['colors'], {}), '(colors)\n', (800, 808), False, 'from repacolors import ColorScale\n')]
# Copyright (c) 2019 <NAME> <<EMAIL>> # See the COPYRIGHT file for more information import subprocess import time def ping(guest_ip): out = subprocess.run(['ping', '-c 1', guest_ip], capture_output=True) return out.returncode == 0 def nmap_ssh(guest_ip): out = subprocess.run(['nmap', guest_ip, '-PN', '-p ssh'], capture_output=True) return out.returncode == 0 and b'open' in out.stdout def read_file(file_name): with open(file_name, 'r') as file: return file.read() def generate_mac_ip(guest_id): # TODO support more hex_id = hex(guest_id)[2:] mac = 'aa:bb:cc:dd:ee:' + hex_id.zfill(2) ip = '192.168.150.' + str(guest_id) return mac, ip def now(): return time.time()
[ "subprocess.run", "time.time" ]
[((147, 210), 'subprocess.run', 'subprocess.run', (["['ping', '-c 1', guest_ip]"], {'capture_output': '(True)'}), "(['ping', '-c 1', guest_ip], capture_output=True)\n", (161, 210), False, 'import subprocess\n'), ((278, 350), 'subprocess.run', 'subprocess.run', (["['nmap', guest_ip, '-PN', '-p ssh']"], {'capture_output': '(True)'}), "(['nmap', guest_ip, '-PN', '-p ssh'], capture_output=True)\n", (292, 350), False, 'import subprocess\n'), ((720, 731), 'time.time', 'time.time', ([], {}), '()\n', (729, 731), False, 'import time\n')]
import json from urllib.parse import urljoin from django.conf import settings import requests def get_ticket_endpoint(): return urljoin(settings.ZENDESK_BASE_URL, '/api/v2/tickets.json') def zendesk_auth(): return ( '{username}/token'.format(username=settings.ZENDESK_API_USERNAME), settings.ZENDESK_API_TOKEN ) def create_ticket(subject, tags, ticket_body, requester_email=None, custom_fields=None): """ Create a new Zendesk ticket """ payload = {'ticket': { 'subject': subject, 'comment': { 'body': ticket_body }, 'group_id': settings.ZENDESK_GROUP_ID, 'tags': tags, 'custom_fields': list(custom_fields or ()), }} if requester_email: payload['ticket']['requester'] = { 'name': 'Sender: %s' % requester_email.split('@')[0], 'email': requester_email, } else: payload['ticket']['requester_id'] = settings.ZENDESK_REQUESTER_ID requests.post( get_ticket_endpoint(), data=json.dumps(payload), auth=zendesk_auth(), headers={'content-type': 'application/json'}).raise_for_status()
[ "json.dumps", "urllib.parse.urljoin" ]
[((135, 193), 'urllib.parse.urljoin', 'urljoin', (['settings.ZENDESK_BASE_URL', '"""/api/v2/tickets.json"""'], {}), "(settings.ZENDESK_BASE_URL, '/api/v2/tickets.json')\n", (142, 193), False, 'from urllib.parse import urljoin\n'), ((1054, 1073), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (1064, 1073), False, 'import json\n')]
from django.conf import settings from rest_framework.routers import DefaultRouter, SimpleRouter from message_service.mailing.api.views import ( ClientsViewSet, MailingViewSet, MessageViewSet, ) from message_service.users.api.views import UserViewSet if settings.DEBUG: router = DefaultRouter() else: router = SimpleRouter() router.register("users", UserViewSet) router.register("clients", ClientsViewSet, basename="clients") router.register("mailing", MailingViewSet, basename="mailing-list") router.register("messages", MessageViewSet, basename="message") app_name = "api" urlpatterns = router.urls
[ "rest_framework.routers.SimpleRouter", "rest_framework.routers.DefaultRouter" ]
[((296, 311), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (309, 311), False, 'from rest_framework.routers import DefaultRouter, SimpleRouter\n'), ((331, 345), 'rest_framework.routers.SimpleRouter', 'SimpleRouter', ([], {}), '()\n', (343, 345), False, 'from rest_framework.routers import DefaultRouter, SimpleRouter\n')]
#! /usr/bin/env python from math import factorial import numpy as np # test passed def generate_poly(max_exponent,max_diff,symbol): f=np.zeros((max_diff+1, max_exponent+1), dtype=float) for k in range(max_diff+1): for i in range(max_exponent+1): if (i - k) >= 0: f[k,i] = factorial(i)*symbol**(i-k)/factorial(i-k) else: f[k,i] = 0 return f
[ "math.factorial", "numpy.zeros" ]
[((137, 192), 'numpy.zeros', 'np.zeros', (['(max_diff + 1, max_exponent + 1)'], {'dtype': 'float'}), '((max_diff + 1, max_exponent + 1), dtype=float)\n', (145, 192), True, 'import numpy as np\n'), ((315, 331), 'math.factorial', 'factorial', (['(i - k)'], {}), '(i - k)\n', (324, 331), False, 'from math import factorial\n'), ((288, 300), 'math.factorial', 'factorial', (['i'], {}), '(i)\n', (297, 300), False, 'from math import factorial\n')]
# --- # jupyter: # jupytext: # formats: ipynb,py:light # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.11.0 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch def train(model: nn.Module, optimizer: Optimizer, loss: nn.Module, train_loader: DataLoader, valid_loader: DataLoader = None, epochs: int = 100, gpu: int = None, score: list = None, scheduler=None, make_sigmoid=False, make_softmax=False) -> tuple: """ :param model: torch ML model :param optimizer: torch optimizer algorithm :param loss: loss function :param train_loader: training set :param valid_loader: validation set :param epochs: number of epochs :param gpu: gpu number :param scheduler: Learning Rate scheduler :return: train accuracy, train loss, validation accuracy, validation loss """ # GPU if gpu is not None: model = model.cuda(gpu) epochs_train_loss = [] epochs_valid_loss = [] for ep in range(epochs): model.training = True all_losses = [] all_predictions = [] all_targets = [] for i, (inputs, targets) in enumerate(train_loader): # GPU if gpu is not None: inputs = inputs.cuda(gpu) targets = targets.float().cuda(gpu) predictions = model(inputs).squeeze() err = loss(predictions, targets) # Machine is learning err.backward() optimizer.step() optimizer.zero_grad() # Clean GPU if gpu is not None: err = err.detach().cpu() inputs = inputs.cpu() targets = targets.cpu() predictions = predictions.cpu() torch.cuda.empty_cache() all_losses.append(err) if make_sigmoid: labels = (F.sigmoid(predictions) >= 0.5) * 1 elif make_softmax: labels = (F.softmax(predictions) >= 0.5) * 1 else: labels = predictions all_predictions.append(labels) all_targets.append(targets) print( f'\rBatch : {i + 1} / {len(train_loader)} - Loss : {err:.2e}', end='') all_predictions = torch.vstack(all_predictions) all_targets = torch.vstack(all_targets) train_loss = np.vstack(all_losses).mean() # Historic epochs_train_loss.append(train_loss) if scheduler is not None: scheduler.step() # Validation step if valid_loader is not None: valid_loss = valid(model, loss, valid_loader, gpu) # Historic epochs_valid_loss.append(valid_loss) print( f'\rEpoch : {ep + 1} - Train Loss : {train_loss:.2e} - ' f'- Valid Loss : {valid_loss:.2e}') else: # Display epoch information print(f'\rEpoch : {ep + 1} - Train Loss : {train_loss:.2e}') if valid_loader is not None: return epochs_train_loss, epochs_valid_loss return epochs_train_loss a = torch.randn((15, 1)) b = torch.randn((9, 1)) torch.vstack((a, b)).shape
[ "torch.vstack", "torch.cuda.empty_cache", "torch.randn" ]
[((3316, 3336), 'torch.randn', 'torch.randn', (['(15, 1)'], {}), '((15, 1))\n', (3327, 3336), False, 'import torch\n'), ((3341, 3360), 'torch.randn', 'torch.randn', (['(9, 1)'], {}), '((9, 1))\n', (3352, 3360), False, 'import torch\n'), ((3362, 3382), 'torch.vstack', 'torch.vstack', (['(a, b)'], {}), '((a, b))\n', (3374, 3382), False, 'import torch\n'), ((2466, 2495), 'torch.vstack', 'torch.vstack', (['all_predictions'], {}), '(all_predictions)\n', (2478, 2495), False, 'import torch\n'), ((2518, 2543), 'torch.vstack', 'torch.vstack', (['all_targets'], {}), '(all_targets)\n', (2530, 2543), False, 'import torch\n'), ((1897, 1921), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1919, 1921), False, 'import torch\n')]
import sqlite3 import pandas as pd import re import random from bs4 import BeautifulSoup class Process: SEQ_LENGTH = 40 sql_transaction = [] dataset = [] cursor_train = None cursor_validation = None cursor_test = None # I know in advance that there are 199819620 rows NUM_ROWS = 199819620 train_size = None val_size = None test_size = None counter = 0 def __init__(self, path, database_dir, split=(0.8, 0.1, 0.1), SEQ_LENGTH=40): self.SEQ_LENGTH = SEQ_LENGTH # Connecting to the train database connection_train = sqlite3.connect(database_dir + "/sequence_train.db") c = connection_train.cursor() self.cursor_train = c # Connecting to the validation database connection_validation = sqlite3.connect(database_dir + "/sequence_val.db") c = connection_validation.cursor() self.cursor_validation = c # Connecting to the test database connection_test = sqlite3.connect(database_dir + "/sequence_test.db") c = connection_test.cursor() self.cursor_test = c train, val, test = split if (train + val + test) != 1.0: raise ValueError('Invalid split data') self.train_size = int(self.NUM_ROWS * train) self.val_size = int(self.NUM_ROWS * val) self.test_size = int(self.NUM_ROWS * test) print('--Reading the dataset--') # Reading the dataset data = pd.read_csv(path, sep='\t', error_bad_lines=False) # Filtering it data = data[data['verified_purchase'] == 'Y'] # Selecting reviews with review length > SEQ_LENGTH data = data[data['review_body'].str.len() > SEQ_LENGTH] # Selecting review_body column data = data[['review_body']] # Dropping empty rows data = data.dropna() # Shuffling the data data = data.sample(frac=1) data = data.values self.dataset = data def create_table(self): self.cursor_train.execute("CREATE TABLE IF NOT EXISTS reviews(review TEXT, next TEXT);") self.cursor_validation.execute("CREATE TABLE IF NOT EXISTS reviews(review TEXT, next TEXT);") self.cursor_test.execute("CREATE TABLE IF NOT EXISTS reviews(review TEXT, next TEXT);") def transaction_bldr(self, sql, db): self.sql_transaction.append(sql) if len(self.sql_transaction) > 1000: random.shuffle(self.sql_transaction) if db == 'train': self.cursor_train.execute('BEGIN TRANSACTION') for s in self.sql_transaction: try: self.cursor_train.execute(s) except Exception as ex: print('Transaction fail ', ex) print('SQL ', s) self.cursor_train.execute('commit') self.sql_transaction = [] elif db == 'val': self.cursor_validation.execute('BEGIN TRANSACTION') for s in self.sql_transaction: try: self.cursor_validation.execute(s) except Exception as ex: print('Transaction fail ', ex) print('SQL ', s) self.cursor_validation.execute('commit') self.sql_transaction = [] elif db == 'test': self.cursor_test.execute('BEGIN TRANSACTION') for s in self.sql_transaction: try: self.cursor_test.execute(s) except Exception as ex: print('Transaction fail ', ex) print('SQL ', s) self.cursor_test.execute('commit') self.sql_transaction = [] def insertData(self, sequence, nxt, db): try: sql = "INSERT INTO reviews(review, next) VALUES('{}', '{}');".format(sequence, nxt) self.transaction_bldr(sql, db) except Exception as e: print('Something went wrong when inserting the data into database, ',str(e)) def deEmojify(self,inputString): return inputString.encode('ascii', 'ignore').decode('ascii') def clean_review(self,review): # Changing to lowercase review = self.deEmojify(review.lower()) # Changing he'll to he will review = re.sub(r"i'm", "i am", review) review = re.sub(r"aren't", "are not", review) review = re.sub(r"couldn't", "counld not", review) review = re.sub(r"didn't", "did not", review) review = re.sub(r"doesn't", "does not", review) review = re.sub(r"don't", "do not", review) review = re.sub(r"hadn't", "had not", review) review = re.sub(r"hasn't", "has not", review) review = re.sub(r"haven't", "have not", review) review = re.sub(r"isn't", "is not", review) review = re.sub(r"it't", "had not", review) review = re.sub(r"hadn't", "had not", review) review = re.sub(r"won't", "will not", review) review = re.sub(r"can't", "cannot", review) review = re.sub(r"mightn't", "might not", review) review = re.sub(r"mustn't", "must not", review) review = re.sub(r"needn't", "need not", review) review = re.sub(r"shouldn't", "should not", review) review = re.sub(r"wasn't", "was not", review) review = re.sub(r"weren't", "were not", review) review = re.sub(r"won't", "will not", review) review = re.sub(r"wouldn't", "would not", review) review = re.sub(r"\'s", " is", review) review = re.sub(r"\'ll", " will", review) review = re.sub(r"\'ve", " have", review) review = re.sub(r"\'re", " are", review) review = re.sub(r"\'d", " would", review) review = re.sub(r"'", " ", review) review = re.sub(r'"', " ", review) # Removing links and other stuffs from string review = re.sub(r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))''', '', review, flags=re.MULTILINE) review = BeautifulSoup(review, "lxml").text return review def process(self): for index, review in enumerate(self.dataset): if index % 1000 == 0: print('--Preprocessing {}th review--'.format(index+1)) review = self.clean_review(review[0]) for k in range(len(review) - self.SEQ_LENGTH): # Seleting the sequence seq = review[k:self.SEQ_LENGTH + k] nxt = review[self.SEQ_LENGTH + k] if self.counter < self.train_size: self.insertData(seq, nxt, 'train') elif self.counter < self.train_size + self.val_size: self.insertData(seq, nxt, 'val') elif self.counter < self.train_size + self.val_size + self.test_size: self.insertData(seq, nxt, 'test') self.counter += 1 process = Process('dataset/02.tsv', 'dataset', (.8, .1, .1), 40) process.create_table() process.process()
[ "random.shuffle", "sqlite3.connect", "pandas.read_csv", "bs4.BeautifulSoup", "re.sub" ]
[((610, 662), 'sqlite3.connect', 'sqlite3.connect', (["(database_dir + '/sequence_train.db')"], {}), "(database_dir + '/sequence_train.db')\n", (625, 662), False, 'import sqlite3\n'), ((829, 879), 'sqlite3.connect', 'sqlite3.connect', (["(database_dir + '/sequence_val.db')"], {}), "(database_dir + '/sequence_val.db')\n", (844, 879), False, 'import sqlite3\n'), ((1044, 1095), 'sqlite3.connect', 'sqlite3.connect', (["(database_dir + '/sequence_test.db')"], {}), "(database_dir + '/sequence_test.db')\n", (1059, 1095), False, 'import sqlite3\n'), ((1561, 1611), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': '"""\t"""', 'error_bad_lines': '(False)'}), "(path, sep='\\t', error_bad_lines=False)\n", (1572, 1611), True, 'import pandas as pd\n'), ((4661, 4690), 're.sub', 're.sub', (['"""i\'m"""', '"""i am"""', 'review'], {}), '("i\'m", \'i am\', review)\n', (4667, 4690), False, 'import re\n'), ((4709, 4744), 're.sub', 're.sub', (['"""aren\'t"""', '"""are not"""', 'review'], {}), '("aren\'t", \'are not\', review)\n', (4715, 4744), False, 'import re\n'), ((4763, 4803), 're.sub', 're.sub', (['"""couldn\'t"""', '"""counld not"""', 'review'], {}), '("couldn\'t", \'counld not\', review)\n', (4769, 4803), False, 'import re\n'), ((4822, 4857), 're.sub', 're.sub', (['"""didn\'t"""', '"""did not"""', 'review'], {}), '("didn\'t", \'did not\', review)\n', (4828, 4857), False, 'import re\n'), ((4876, 4913), 're.sub', 're.sub', (['"""doesn\'t"""', '"""does not"""', 'review'], {}), '("doesn\'t", \'does not\', review)\n', (4882, 4913), False, 'import re\n'), ((4932, 4965), 're.sub', 're.sub', (['"""don\'t"""', '"""do not"""', 'review'], {}), '("don\'t", \'do not\', review)\n', (4938, 4965), False, 'import re\n'), ((4984, 5019), 're.sub', 're.sub', (['"""hadn\'t"""', '"""had not"""', 'review'], {}), '("hadn\'t", \'had not\', review)\n', (4990, 5019), False, 'import re\n'), ((5038, 5073), 're.sub', 're.sub', (['"""hasn\'t"""', '"""has not"""', 'review'], {}), '("hasn\'t", \'has not\', review)\n', (5044, 5073), False, 'import re\n'), ((5092, 5129), 're.sub', 're.sub', (['"""haven\'t"""', '"""have not"""', 'review'], {}), '("haven\'t", \'have not\', review)\n', (5098, 5129), False, 'import re\n'), ((5148, 5181), 're.sub', 're.sub', (['"""isn\'t"""', '"""is not"""', 'review'], {}), '("isn\'t", \'is not\', review)\n', (5154, 5181), False, 'import re\n'), ((5200, 5233), 're.sub', 're.sub', (['"""it\'t"""', '"""had not"""', 'review'], {}), '("it\'t", \'had not\', review)\n', (5206, 5233), False, 'import re\n'), ((5252, 5287), 're.sub', 're.sub', (['"""hadn\'t"""', '"""had not"""', 'review'], {}), '("hadn\'t", \'had not\', review)\n', (5258, 5287), False, 'import re\n'), ((5306, 5341), 're.sub', 're.sub', (['"""won\'t"""', '"""will not"""', 'review'], {}), '("won\'t", \'will not\', review)\n', (5312, 5341), False, 'import re\n'), ((5360, 5393), 're.sub', 're.sub', (['"""can\'t"""', '"""cannot"""', 'review'], {}), '("can\'t", \'cannot\', review)\n', (5366, 5393), False, 'import re\n'), ((5412, 5451), 're.sub', 're.sub', (['"""mightn\'t"""', '"""might not"""', 'review'], {}), '("mightn\'t", \'might not\', review)\n', (5418, 5451), False, 'import re\n'), ((5470, 5507), 're.sub', 're.sub', (['"""mustn\'t"""', '"""must not"""', 'review'], {}), '("mustn\'t", \'must not\', review)\n', (5476, 5507), False, 'import re\n'), ((5526, 5563), 're.sub', 're.sub', (['"""needn\'t"""', '"""need not"""', 'review'], {}), '("needn\'t", \'need not\', review)\n', (5532, 5563), False, 'import re\n'), ((5582, 5623), 're.sub', 're.sub', (['"""shouldn\'t"""', '"""should not"""', 'review'], {}), '("shouldn\'t", \'should not\', review)\n', (5588, 5623), False, 'import re\n'), ((5642, 5677), 're.sub', 're.sub', (['"""wasn\'t"""', '"""was not"""', 'review'], {}), '("wasn\'t", \'was not\', review)\n', (5648, 5677), False, 'import re\n'), ((5696, 5733), 're.sub', 're.sub', (['"""weren\'t"""', '"""were not"""', 'review'], {}), '("weren\'t", \'were not\', review)\n', (5702, 5733), False, 'import re\n'), ((5752, 5787), 're.sub', 're.sub', (['"""won\'t"""', '"""will not"""', 'review'], {}), '("won\'t", \'will not\', review)\n', (5758, 5787), False, 'import re\n'), ((5806, 5845), 're.sub', 're.sub', (['"""wouldn\'t"""', '"""would not"""', 'review'], {}), '("wouldn\'t", \'would not\', review)\n', (5812, 5845), False, 'import re\n'), ((5873, 5902), 're.sub', 're.sub', (['"""\\\\\'s"""', '""" is"""', 'review'], {}), '("\\\\\'s", \' is\', review)\n', (5879, 5902), False, 'import re\n'), ((5920, 5952), 're.sub', 're.sub', (['"""\\\\\'ll"""', '""" will"""', 'review'], {}), '("\\\\\'ll", \' will\', review)\n', (5926, 5952), False, 'import re\n'), ((5970, 6002), 're.sub', 're.sub', (['"""\\\\\'ve"""', '""" have"""', 'review'], {}), '("\\\\\'ve", \' have\', review)\n', (5976, 6002), False, 'import re\n'), ((6020, 6051), 're.sub', 're.sub', (['"""\\\\\'re"""', '""" are"""', 'review'], {}), '("\\\\\'re", \' are\', review)\n', (6026, 6051), False, 'import re\n'), ((6069, 6101), 're.sub', 're.sub', (['"""\\\\\'d"""', '""" would"""', 'review'], {}), '("\\\\\'d", \' would\', review)\n', (6075, 6101), False, 'import re\n'), ((6128, 6152), 're.sub', 're.sub', (['"""\'"""', '""" """', 'review'], {}), '("\'", \' \', review)\n', (6134, 6152), False, 'import re\n'), ((6171, 6195), 're.sub', 're.sub', (['"""\\""""', '""" """', 'review'], {}), '(\'"\', \' \', review)\n', (6177, 6195), False, 'import re\n'), ((6277, 6530), 're.sub', 're.sub', (['"""(?i)\\\\b((?:https?://|www\\\\d{0,3}[.]|[a-z0-9.\\\\-]+[.][a-z]{2,4}/)(?:[^\\\\s()<>]+|\\\\(([^\\\\s()<>]+|(\\\\([^\\\\s()<>]+\\\\)))*\\\\))+(?:\\\\(([^\\\\s()<>]+|(\\\\([^\\\\s()<>]+\\\\)))*\\\\)|[^\\\\s`!()\\\\[\\\\]{};:\'".,<>?«»“”‘’]))"""', '""""""', 'review'], {'flags': 're.MULTILINE'}), '(\n \'(?i)\\\\b((?:https?://|www\\\\d{0,3}[.]|[a-z0-9.\\\\-]+[.][a-z]{2,4}/)(?:[^\\\\s()<>]+|\\\\(([^\\\\s()<>]+|(\\\\([^\\\\s()<>]+\\\\)))*\\\\))+(?:\\\\(([^\\\\s()<>]+|(\\\\([^\\\\s()<>]+\\\\)))*\\\\)|[^\\\\s`!()\\\\[\\\\]{};:\\\'".,<>?«»“”‘’]))\'\n , \'\', review, flags=re.MULTILINE)\n', (6283, 6530), False, 'import re\n'), ((2615, 2651), 'random.shuffle', 'random.shuffle', (['self.sql_transaction'], {}), '(self.sql_transaction)\n', (2629, 2651), False, 'import random\n'), ((6532, 6561), 'bs4.BeautifulSoup', 'BeautifulSoup', (['review', '"""lxml"""'], {}), "(review, 'lxml')\n", (6545, 6561), False, 'from bs4 import BeautifulSoup\n')]
from django.conf.urls import url from . import views urlpatterns = [ #url(r'^view/(?P<pk>[0-9]+)', views.ArticleDetailView.as_view(), name = "detail"), #url(r'', views.ArticleIndexView.as_view(), name = "index"), url(r'login', views.Login.as_view()), url(r'logout', views.Logout.as_view()), url(r'register', views.Register.as_view()), url(r'token/(.+)', views.Token.as_view()), url(r'', views.Panel), ]
[ "django.conf.urls.url" ]
[((444, 464), 'django.conf.urls.url', 'url', (['""""""', 'views.Panel'], {}), "('', views.Panel)\n", (447, 464), False, 'from django.conf.urls import url\n')]
""" Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution. SPDX-License-Identifier: Apache-2.0 OR MIT """ # setup path import azlmbr.legacy.general as general import azlmbr.bus as bus import azlmbr.editor as editor import azlmbr.entity import azlmbr.object import azlmbr.math import azlmbr.whitebox.api as api from azlmbr.entity import EntityId # get Component Type for WhiteBoxMesh whiteBoxMeshComponentTypeId = get_white_box_component_type() # use old White Box entity to hold White Box component if it exists, otherwise use a new one newEntityId = None oldEntityId = general.find_editor_entity('WhiteBox') if oldEntityId.IsValid(): whiteBoxMeshComponentExists = editor.EditorComponentAPIBus(bus.Broadcast, 'HasComponentOfType', oldEntityId, whiteBoxMeshComponentTypeId) if (whiteBoxMeshComponentExists): oldwhiteBoxMeshComponent = editor.EditorComponentAPIBus(bus.Broadcast, 'GetComponentOfType', oldEntityId, whiteBoxMeshComponentTypeId) editor.EditorComponentAPIBus(bus.Broadcast, 'RemoveComponents', [oldwhiteBoxMeshComponent.GetValue()]) newEntityId = oldEntityId else: newEntityId = editor.ToolsApplicationRequestBus(bus.Broadcast, 'CreateNewEntity', EntityId()) editor.EditorEntityAPIBus(bus.Event, 'SetName', newEntityId, "WhiteBox") # add whiteBoxMeshComponent to entity and enable whiteBoxMeshComponentOutcome = editor.EditorComponentAPIBus(bus.Broadcast, 'AddComponentsOfType', newEntityId, [whiteBoxMeshComponentTypeId]) if (whiteBoxMeshComponentOutcome.IsSuccess()): print("White Box Component added to entity.") whiteBoxMeshComponents = whiteBoxMeshComponentOutcome.GetValue() whiteBoxMeshComponent = whiteBoxMeshComponents[0] editor.EditorComponentAPIBus(bus.Broadcast, 'EnableComponents', whiteBoxMeshComponents) isComponentEnabled = editor.EditorComponentAPIBus(bus.Broadcast, 'IsComponentEnabled', whiteBoxMeshComponent) if (isComponentEnabled): print("Enabled Mesh component.") whiteBoxMesh = azlmbr.whitebox.request.bus.EditorWhiteBoxComponentRequestBus(bus.Event, 'GetWhiteBoxMeshHandle', whiteBoxMeshComponent) # translate append (extrude) a polygon if (len(sys.argv) >= 2 and float(sys.argv[2]) != 0.0): # create face handle from user input (argv[1]) faceHandle = azlmbr.object.construct('FaceHandle', int(sys.argv[1])) # find the polygon handle that corresponds to the given face facePolygonHandle = whiteBoxMesh.FacePolygonHandle(faceHandle) # translate append (extrude) the polygon by a distance specified by the user (argv[2]) whiteBoxMesh.TranslatePolygonAppend(facePolygonHandle, float(sys.argv[2])) # recalculate uvs as mesh will have changed whiteBoxMesh.CalculatePlanarUVs() # notify the white box component the mesh has changed to force it to rebuild the render mesh azlmbr.whitebox.notification.bus.EditorWhiteBoxComponentNotificationBus(bus.Event, 'OnWhiteBoxMeshModified', whiteBoxMeshComponent)
[ "azlmbr.entity.EntityId", "azlmbr.editor.EditorEntityAPIBus", "azlmbr.editor.EditorComponentAPIBus", "azlmbr.legacy.general.find_editor_entity" ]
[((675, 713), 'azlmbr.legacy.general.find_editor_entity', 'general.find_editor_entity', (['"""WhiteBox"""'], {}), "('WhiteBox')\n", (701, 713), True, 'import azlmbr.legacy.general as general\n'), ((1471, 1585), 'azlmbr.editor.EditorComponentAPIBus', 'editor.EditorComponentAPIBus', (['bus.Broadcast', '"""AddComponentsOfType"""', 'newEntityId', '[whiteBoxMeshComponentTypeId]'], {}), "(bus.Broadcast, 'AddComponentsOfType',\n newEntityId, [whiteBoxMeshComponentTypeId])\n", (1499, 1585), True, 'import azlmbr.editor as editor\n'), ((1797, 1888), 'azlmbr.editor.EditorComponentAPIBus', 'editor.EditorComponentAPIBus', (['bus.Broadcast', '"""EnableComponents"""', 'whiteBoxMeshComponents'], {}), "(bus.Broadcast, 'EnableComponents',\n whiteBoxMeshComponents)\n", (1825, 1888), True, 'import azlmbr.editor as editor\n'), ((1907, 1999), 'azlmbr.editor.EditorComponentAPIBus', 'editor.EditorComponentAPIBus', (['bus.Broadcast', '"""IsComponentEnabled"""', 'whiteBoxMeshComponent'], {}), "(bus.Broadcast, 'IsComponentEnabled',\n whiteBoxMeshComponent)\n", (1935, 1999), True, 'import azlmbr.editor as editor\n'), ((775, 886), 'azlmbr.editor.EditorComponentAPIBus', 'editor.EditorComponentAPIBus', (['bus.Broadcast', '"""HasComponentOfType"""', 'oldEntityId', 'whiteBoxMeshComponentTypeId'], {}), "(bus.Broadcast, 'HasComponentOfType',\n oldEntityId, whiteBoxMeshComponentTypeId)\n", (803, 886), True, 'import azlmbr.editor as editor\n'), ((1317, 1389), 'azlmbr.editor.EditorEntityAPIBus', 'editor.EditorEntityAPIBus', (['bus.Event', '"""SetName"""', 'newEntityId', '"""WhiteBox"""'], {}), "(bus.Event, 'SetName', newEntityId, 'WhiteBox')\n", (1342, 1389), True, 'import azlmbr.editor as editor\n'), ((956, 1067), 'azlmbr.editor.EditorComponentAPIBus', 'editor.EditorComponentAPIBus', (['bus.Broadcast', '"""GetComponentOfType"""', 'oldEntityId', 'whiteBoxMeshComponentTypeId'], {}), "(bus.Broadcast, 'GetComponentOfType',\n oldEntityId, whiteBoxMeshComponentTypeId)\n", (984, 1067), True, 'import azlmbr.editor as editor\n'), ((1301, 1311), 'azlmbr.entity.EntityId', 'EntityId', ([], {}), '()\n', (1309, 1311), False, 'from azlmbr.entity import EntityId\n')]
from dask.distributed import Client import dask.dataframe as dd import pandas as pd import numpy as np import os import matplotlib.pyplot as plt import matplotlib.cm as cm from sklearn.manifold import TSNE from sklearn.decomposition import PCA from IPython.display import display, HTML from sklearn.cluster import KMeans import plotly import plotly.graph_objs as go import plotly.io as pio from functools import partial from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, recall_score def make_groundtruth_figures(data_folder, update_figs=False, no_labels=False): vectors = pd.read_csv(os.path.join(data_folder, 'features.csv'), index_col='app') if no_labels: # mostly for testing all_apps = vectors.assign( label=['app', 'app'], category=['app', 'app'] ) else: all_apps = pd.read_csv("data/out/all-apps/all_apps.csv", index_col='app') all_apps['label'] = all_apps[all_apps.category=='malware'].app_dir.str.split('/').apply(lambda list: list[5]) top_9_malware = all_apps.label.value_counts().sort_values(ascending=False)[:9] top_9_min = top_9_malware.min() other_mal_map = {key: "Other malware" for key, value in all_apps.label.value_counts().items() if value <= top_9_min} # other_mal_map = {key: key for key, value in all_apps.label.value_counts().items() if value <= 200} all_apps.label = all_apps.label.map(other_mal_map).fillna(all_apps.label) all_apps.label.fillna(all_apps.category, inplace=True) vectors = vectors.assign( label=all_apps.label, category=all_apps.category ) labels = vectors.label # Retrieve node embeddings and corresponding subjects node_ids = list(vectors.uid) # list of node IDs node_embeddings = vectors.drop(columns=['uid', 'category', 'label']) node_targets = labels transform = TSNE # Dimensionality reduction transformer # 2D plot -- matplotlib print('Making 2D plot...') plt.rcParams.update({'font.size': 14}) trans = transform(n_components=2) node_embeddings_2d = trans.fit_transform(node_embeddings) label_map = {l: i for i, l in enumerate(np.unique(node_targets))} node_colours = [label_map[target] for target in node_targets] plt.figure(figsize=(10, 8)) plt.axes().set(aspect="equal") scatter = plt.scatter( node_embeddings_2d[:, 0], node_embeddings_2d[:, 1], c=node_colours, cmap='tab20', alpha=1, s=5 ) plt.title("2D {} visualization of node embeddings".format(transform.__name__)) legend1 = plt.legend(scatter.legend_elements()[0], pd.Series(label_map.keys()).str.replace('-', ' ').str.title(), loc='center left', bbox_to_anchor=(1, 0.5), title="App Type", markerscale=1.5) # order labels (https://stackoverflow.com/a/46160465/13710014) # handles, g_labels = plt.gca().get_legend_handles_labels() # print(handles, labels) # if not no_labels: # order = ['Popular Apps', 'Random Apps'] # order += list(top_9_malware.index) # plt.legend([handles[idx] for idx in order],[labels[idx] for idx in order]) plt.savefig(os.path.join(data_folder, '2D-plot.png'), bbox_inches='tight') # 3D plot - using plotly print('Making 3D plot...') trans3d = transform(n_components=3) node_embeddings_3d = trans3d.fit_transform(node_embeddings) data_3d = pd.DataFrame(node_embeddings_3d, index=vectors.index) data_3d['malware'] = vectors['category']=='malware' data_3d['type'] = vectors.label type_chart = data_3d[['malware', 'type']].drop_duplicates() type_chart['num'] = type_chart.type.map(label_map) layout = go.Layout( title="Interactive 3D TNSE representation of node embeddings", margin={'l': 0, 'r': 0, 'b': 0, 't': 30}, legend=dict(y=0.5, itemsizing='constant'), scene={ 'xaxis': { 'showspikes': False, 'showgrid': False, 'zeroline': False, 'visible': False }, 'yaxis': { 'showspikes': False, 'showgrid': False, 'zeroline': False, 'visible': False }, 'zaxis': { 'showspikes': False, 'showgrid': False, 'zeroline': False, 'visible': False } } ) fig = go.Figure(layout=layout) # add invisible bounding trace to keep axes' scale constant fig.add_trace( go.Scatter3d( x=[data_3d[0].min(), data_3d[0].max()], y=[data_3d[1].min(), data_3d[1].max()], z=[data_3d[2].min(), data_3d[2].max()], mode='markers', marker={ 'color':'rgba(0,0,0,0)', 'opacity': 0, }, showlegend=False ) ) for index, row in type_chart.sort_values('num', ascending=False).iterrows(): if row['malware']: symbol = 'circle' group='Malware' size = 2 else: symbol = 'x' group='Unlabeled' size = 1.5 name = f"{group}, {row['type'].replace('-', ' ').title()}" if row['type']=='Other malware': name=row['type'] df = data_3d[data_3d.type==row['type']] rbg = tuple([255*val for val in cm.tab20(row['num'])[:3]]) color = f"rgb{rbg}" trace = go.Scatter3d( name=name, x=df[0], y=df[1], z=df[2], customdata=list(df.index), hovertemplate= "<b>%{customdata}</b><br>" + f"{name}" + "<extra></extra>", mode='markers', marker={ 'size': size, 'opacity': 1, 'color': color, 'symbol': symbol, }, ) fig.add_trace(trace) # Save the plot. pio.write_html(fig, file=os.path.join(data_folder, '3D-plot.html'), auto_open=True) if update_figs: pio.write_html(fig, file=os.path.join('docs', '_includes', '3D-plot.html'), auto_open=True) def compute_model_performance_statistics(pred, true): ''' Returns a series with the f1-score, accuracy, recall, and confusion counts (TP, TN, FP, FN). ''' TN, FP, FN, TP = confusion_matrix(true, pred).ravel() return pd.Series({ 'ACC': accuracy_score(true, pred), 'TPR': recall_score(true, pred), 'F1': f1_score(true, pred), 'TP': TP, 'TN': TN, 'FP': FP, 'FN': FN }) def create_performance_table(m2v_results_path, hindroid_results_path, outpath=None): results = pd.read_csv(m2v_results_path, index_col='app', usecols=['app', 'm2vDroid', 'true']) if 'true' in results.columns: results = results.drop(columns=['true']) results = results.join(pd.read_csv(hindroid_results_path, index_col='app')) y_true = results.true table = results.drop(columns=['true']).apply(partial(compute_model_performance_statistics, true=y_true)).T table = table.astype({col: int for col in ['TP', 'TN', 'FP', 'FN']}) if outpath is not None: table.to_csv(outpath) return table def generate_analysis(data_path, jobs={}): "Generates plots, aggregates, and statistical analysis on app data located in `data_path`" # load data # app_data_path = os.path.join(data_path, 'app_data.csv') # app_data = dd.read_csv(app_data_path) # os.makedirs(out_folder, exist_ok=True) if "plots" in jobs: make_groundtruth_figures(data_path, **jobs['plots'])
[ "sklearn.metrics.f1_score", "numpy.unique", "pandas.read_csv", "os.path.join", "sklearn.metrics.recall_score", "matplotlib.pyplot.rcParams.update", "matplotlib.pyplot.figure", "matplotlib.pyplot.axes", "functools.partial", "matplotlib.pyplot.scatter", "matplotlib.cm.tab20", "pandas.DataFrame",...
[((2021, 2059), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 14}"], {}), "({'font.size': 14})\n", (2040, 2059), True, 'import matplotlib.pyplot as plt\n'), ((2306, 2333), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (2316, 2333), True, 'import matplotlib.pyplot as plt\n'), ((2383, 2495), 'matplotlib.pyplot.scatter', 'plt.scatter', (['node_embeddings_2d[:, 0]', 'node_embeddings_2d[:, 1]'], {'c': 'node_colours', 'cmap': '"""tab20"""', 'alpha': '(1)', 's': '(5)'}), "(node_embeddings_2d[:, 0], node_embeddings_2d[:, 1], c=\n node_colours, cmap='tab20', alpha=1, s=5)\n", (2394, 2495), True, 'import matplotlib.pyplot as plt\n'), ((3486, 3539), 'pandas.DataFrame', 'pd.DataFrame', (['node_embeddings_3d'], {'index': 'vectors.index'}), '(node_embeddings_3d, index=vectors.index)\n', (3498, 3539), True, 'import pandas as pd\n'), ((4530, 4554), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'layout': 'layout'}), '(layout=layout)\n', (4539, 4554), True, 'import plotly.graph_objs as go\n'), ((6864, 6951), 'pandas.read_csv', 'pd.read_csv', (['m2v_results_path'], {'index_col': '"""app"""', 'usecols': "['app', 'm2vDroid', 'true']"}), "(m2v_results_path, index_col='app', usecols=['app', 'm2vDroid',\n 'true'])\n", (6875, 6951), True, 'import pandas as pd\n'), ((612, 653), 'os.path.join', 'os.path.join', (['data_folder', '"""features.csv"""'], {}), "(data_folder, 'features.csv')\n", (624, 653), False, 'import os\n'), ((860, 922), 'pandas.read_csv', 'pd.read_csv', (['"""data/out/all-apps/all_apps.csv"""'], {'index_col': '"""app"""'}), "('data/out/all-apps/all_apps.csv', index_col='app')\n", (871, 922), True, 'import pandas as pd\n'), ((3240, 3280), 'os.path.join', 'os.path.join', (['data_folder', '"""2D-plot.png"""'], {}), "(data_folder, '2D-plot.png')\n", (3252, 3280), False, 'import os\n'), ((7058, 7109), 'pandas.read_csv', 'pd.read_csv', (['hindroid_results_path'], {'index_col': '"""app"""'}), "(hindroid_results_path, index_col='app')\n", (7069, 7109), True, 'import pandas as pd\n'), ((2338, 2348), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (2346, 2348), True, 'import matplotlib.pyplot as plt\n'), ((6120, 6161), 'os.path.join', 'os.path.join', (['data_folder', '"""3D-plot.html"""'], {}), "(data_folder, '3D-plot.html')\n", (6132, 6161), False, 'import os\n'), ((6497, 6525), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['true', 'pred'], {}), '(true, pred)\n', (6513, 6525), False, 'from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, recall_score\n'), ((6572, 6598), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['true', 'pred'], {}), '(true, pred)\n', (6586, 6598), False, 'from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, recall_score\n'), ((6615, 6639), 'sklearn.metrics.recall_score', 'recall_score', (['true', 'pred'], {}), '(true, pred)\n', (6627, 6639), False, 'from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, recall_score\n'), ((6655, 6675), 'sklearn.metrics.f1_score', 'f1_score', (['true', 'pred'], {}), '(true, pred)\n', (6663, 6675), False, 'from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, recall_score\n'), ((7186, 7244), 'functools.partial', 'partial', (['compute_model_performance_statistics'], {'true': 'y_true'}), '(compute_model_performance_statistics, true=y_true)\n', (7193, 7244), False, 'from functools import partial\n'), ((2209, 2232), 'numpy.unique', 'np.unique', (['node_targets'], {}), '(node_targets)\n', (2218, 2232), True, 'import numpy as np\n'), ((6237, 6286), 'os.path.join', 'os.path.join', (['"""docs"""', '"""_includes"""', '"""3D-plot.html"""'], {}), "('docs', '_includes', '3D-plot.html')\n", (6249, 6286), False, 'import os\n'), ((5505, 5525), 'matplotlib.cm.tab20', 'cm.tab20', (["row['num']"], {}), "(row['num'])\n", (5513, 5525), True, 'import matplotlib.cm as cm\n')]
#!/usr/bin/env python3 # encoding: utf-8 import torch.nn.functional as F from rls.algorithms.single.dqn import DQN from rls.common.decorator import iton from rls.utils.torch_utils import n_step_return class DDQN(DQN): """ Double DQN, https://arxiv.org/abs/1509.06461 Double DQN + LSTM, https://arxiv.org/abs/1908.06040 """ policy_mode = 'off-policy' def __init__(self, **kwargs): super().__init__(**kwargs) @iton def _train(self, BATCH): q = self.q_net(BATCH.obs, begin_mask=BATCH.begin_mask) # [T, B, A] q_next = self.q_net(BATCH.obs_, begin_mask=BATCH.begin_mask) # [T, B, A] q_target_next = self.q_net.t(BATCH.obs_, begin_mask=BATCH.begin_mask) # [T, B, A] next_max_action = q_next.argmax(-1) # [T, B] next_max_action_one_hot = F.one_hot(next_max_action.squeeze(), self.a_dim).float() # [T, B, A] q_eval = (q * BATCH.action).sum(-1, keepdim=True) # [T, B, 1] q_target_next_max = (q_target_next * next_max_action_one_hot).sum(-1, keepdim=True) # [T, B, 1] q_target = n_step_return(BATCH.reward, self.gamma, BATCH.done, q_target_next_max, BATCH.begin_mask).detach() # [T, B, 1] td_error = q_target - q_eval # [T, B, 1] q_loss = (td_error.square() * BATCH.get('isw', 1.0)).mean() # 1 self.oplr.optimize(q_loss) return td_error, { 'LEARNING_RATE/lr': self.oplr.lr, 'LOSS/loss': q_loss, 'Statistics/q_max': q_eval.max(), 'Statistics/q_min': q_eval.min(), 'Statistics/q_mean': q_eval.mean() }
[ "rls.utils.torch_utils.n_step_return" ]
[((1115, 1207), 'rls.utils.torch_utils.n_step_return', 'n_step_return', (['BATCH.reward', 'self.gamma', 'BATCH.done', 'q_target_next_max', 'BATCH.begin_mask'], {}), '(BATCH.reward, self.gamma, BATCH.done, q_target_next_max,\n BATCH.begin_mask)\n', (1128, 1207), False, 'from rls.utils.torch_utils import n_step_return\n')]
#!/usr/bin/python3 import os os.system('wget \ https://opendata.arcgis.com/datasets/6ac5e325468c4cb9b905f1728d6fbf0f_0.csv \ -O hifld_hospital.csv')
[ "os.system" ]
[((29, 154), 'os.system', 'os.system', (['"""wget https://opendata.arcgis.com/datasets/6ac5e325468c4cb9b905f1728d6fbf0f_0.csv -O hifld_hospital.csv"""'], {}), "(\n 'wget https://opendata.arcgis.com/datasets/6ac5e325468c4cb9b905f1728d6fbf0f_0.csv -O hifld_hospital.csv'\n )\n", (38, 154), False, 'import os\n')]
import argparse import subprocess import sys import logging logger = logging.getLogger("helper") def azcli(command): process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out,err = process.communicate() logger.debug(str(out,"utf-8")) exit_code = process.returncode if exit_code and exit_code != 0: logger.error("{}".format(str(err,"utf-8"))) sys.exit(exit_code) else: return out
[ "logging.getLogger", "subprocess.Popen", "sys.exit" ]
[((70, 97), 'logging.getLogger', 'logging.getLogger', (['"""helper"""'], {}), "('helper')\n", (87, 97), False, 'import logging\n'), ((133, 206), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (149, 206), False, 'import subprocess\n'), ((410, 429), 'sys.exit', 'sys.exit', (['exit_code'], {}), '(exit_code)\n', (418, 429), False, 'import sys\n')]
from Vertex import Vertex import pygame from Colours import Colours class Grid: def createGrid(self, rows, width): grid = [] space = width // rows for x in range(rows): grid.append([]) for i in range(rows): vertex = Vertex(space, rows, x, i) grid[x].append(vertex) return grid def generateGrid(self, width, rows, window): space = width // rows for x in range(rows): pygame.draw.line(window, Colours.BLACK, (0, x * space), (width, x * space)) for i in range(rows): pygame.draw.line(window, Colours.BLACK, (i * space, 0), (i * space, width)) def colour(self, window, grid, rows, width): window.fill(Colours.WHITE) for row in grid: for vertex in row: vertex.createVertex(window) self.generateGrid(width, rows, window) pygame.display.update() def getClicked(self, position, rows, width): space = width // rows y, x = position column = x // space row = y // space return row, column
[ "pygame.display.update", "Vertex.Vertex", "pygame.draw.line" ]
[((964, 987), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (985, 987), False, 'import pygame\n'), ((510, 585), 'pygame.draw.line', 'pygame.draw.line', (['window', 'Colours.BLACK', '(0, x * space)', '(width, x * space)'], {}), '(window, Colours.BLACK, (0, x * space), (width, x * space))\n', (526, 585), False, 'import pygame\n'), ((296, 321), 'Vertex.Vertex', 'Vertex', (['space', 'rows', 'x', 'i'], {}), '(space, rows, x, i)\n', (302, 321), False, 'from Vertex import Vertex\n'), ((638, 713), 'pygame.draw.line', 'pygame.draw.line', (['window', 'Colours.BLACK', '(i * space, 0)', '(i * space, width)'], {}), '(window, Colours.BLACK, (i * space, 0), (i * space, width))\n', (654, 713), False, 'import pygame\n')]
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # (C) British Crown copyright. The Met Office. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Unit tests for ShowerConditionProbability plugin""" from typing import Dict, List, Tuple, Union import numpy as np import pytest from iris.cube import CubeList from numpy import ndarray from improver.metadata.constants import FLOAT_DTYPE from improver.precipitation_type.shower_condition_probability import ( ShowerConditionProbability, ) from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube ATTRIBUTES = { "institution": "Met Office", "mosg__model_configuration": "gl_ens", "source": "Met Office Unified Model", "title": "MOGREPS-G Forecast on UK 2 km Standard Grid", } EXPECTED_ATTRIBUTES = { "institution": "Met Office", "source": "Met Office Unified Model", "title": "Post-Processed MOGREPS-G Forecast on UK 2 km Standard Grid", } MODEL_ID_ATTR_ATTRIBUTES = EXPECTED_ATTRIBUTES.copy() MODEL_ID_ATTR_ATTRIBUTES.update({"mosg__model_configuration": "gl_ens"}) @pytest.fixture(name="test_cubes") def cube_fixture(cube_properties: Tuple[Dict[str, Dict[str, Union[List, ndarray]]]]): """Create a test cube""" cubes = CubeList() for name, values in cube_properties.items(): cubes.append( set_up_variable_cube( values["data"], name=name, units=1, realizations=values["realizations"], attributes=ATTRIBUTES, ) ) return cubes @pytest.mark.parametrize( "cube_properties, kwargs, expected", ( # Simple case with one realization, cloud dominates returned # probabilities (i.e. clear skies). ( { "low_and_medium_type_cloud_area_fraction": { "data": np.zeros((2, 2)).astype(FLOAT_DTYPE), "realizations": [0], }, "convective_ratio": { "data": np.zeros((2, 2)).astype(FLOAT_DTYPE), "realizations": [0], }, }, # Other plugin kwargs {"cloud_threshold": 0.5, "convection_threshold": 0.5}, # Expected result (np.ones((2, 2)).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES), ), # As above, but using the model_id_attr keyword to preserve the model # information. ( { "low_and_medium_type_cloud_area_fraction": { "data": np.zeros((2, 2)).astype(FLOAT_DTYPE), "realizations": [0], }, "convective_ratio": { "data": np.zeros((2, 2)).astype(FLOAT_DTYPE), "realizations": [0], }, }, # Other plugin kwargs { "model_id_attr": "mosg__model_configuration", "cloud_threshold": 0.5, "convection_threshold": 0.5, }, # Expected result (np.ones((2, 2)).astype(FLOAT_DTYPE), MODEL_ID_ATTR_ATTRIBUTES), ), # Simple case with one realization, convection dominates returned # probabilities. ( { "low_and_medium_type_cloud_area_fraction": { "data": np.ones((2, 2)).astype(FLOAT_DTYPE), "realizations": [0], }, "convective_ratio": { "data": np.ones((2, 2)).astype(FLOAT_DTYPE), "realizations": [0], }, }, # Other plugin kwargs {"cloud_threshold": 0.5, "convection_threshold": 0.5}, # Expected result (np.ones((2, 2)).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES), ), # As above, but the convective_ratio includes masked values. This test # checks that they are ignored in setting the resulting probabilities # and that the output is not masked. One resulting value differs to the # above, corresponding to the masked point. ( { "low_and_medium_type_cloud_area_fraction": { "data": np.ones((2, 2)).astype(FLOAT_DTYPE), "realizations": [0], }, "convective_ratio": { "data": np.ma.masked_array( np.ones((2, 2)).astype(FLOAT_DTYPE), mask=np.array([[0, 0], [0, 1]]), ), "realizations": [0], }, }, # Other plugin kwargs {"cloud_threshold": 0.5, "convection_threshold": 0.5}, # Expected result (np.array([[1, 1], [1, 0]]).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES), ), # Multi-realization case with a range of probabilities returned due # to variable cloud. ( { "low_and_medium_type_cloud_area_fraction": { "data": np.array( [[[0.4, 0.6], [0.4, 0.6]], [[0.6, 0.6], [0.6, 0.6]]] ).astype(FLOAT_DTYPE), "realizations": [0, 1], }, "convective_ratio": { "data": np.zeros((2, 2, 2)).astype(FLOAT_DTYPE), "realizations": [0, 1], }, }, # Other plugin kwargs {"cloud_threshold": 0.5, "convection_threshold": 0.5}, # Expected result (np.array([[0.5, 0], [0.5, 0]]).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES), ), # Same as above, but with different threshold values applied. # Cloud =< 0.7, which will result in probabilities all equal to 1. ( { "low_and_medium_type_cloud_area_fraction": { "data": np.array( [[[0.4, 0.6], [0.4, 0.6]], [[0.6, 0.6], [0.6, 0.6]]] ).astype(FLOAT_DTYPE), "realizations": [0, 1], }, "convective_ratio": { "data": np.zeros((2, 2, 2)).astype(FLOAT_DTYPE), "realizations": [0, 1], }, }, # Other plugin kwargs {"cloud_threshold": 0.7, "convection_threshold": 0.5}, # Expected result (np.ones((2, 2)).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES), ), # Multi-realization case with cloud and convection both providing a # showery probability of 1. ( { "low_and_medium_type_cloud_area_fraction": { "data": np.array([[[0, 1], [1, 1]], [[0, 1], [1, 1]]]).astype( FLOAT_DTYPE ), "realizations": [0, 1], }, "convective_ratio": { "data": np.array([[[0, 0], [0, 1]], [[0, 0], [0, 1]]]).astype( FLOAT_DTYPE ), "realizations": [0, 1], }, }, # Other plugin kwargs {"cloud_threshold": 0.5, "convection_threshold": 0.5}, # Expected result (np.array([[1, 0], [0, 1]]).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES), ), ), ) def test_scenarios(test_cubes, kwargs, expected): """Test output type and metadata""" expected_shape = test_cubes[0].shape[-2:] result = ShowerConditionProbability(**kwargs)(test_cubes) assert result.name() == "probability_of_shower_condition_above_threshold" assert result.units == "1" assert result.shape == expected_shape assert result.data.dtype == FLOAT_DTYPE assert (result.data == expected[0]).all() assert result.attributes == expected[1] assert result.coord(var_name="threshold").name() == "shower_condition" assert result.coord(var_name="threshold").points == 1.0 def test_incorrect_inputs_exception(): """Tests that the expected exception is raised for incorrectly named input cubes.""" temperature = set_up_variable_cube(np.ones((2, 2)).astype(FLOAT_DTYPE)) expected = ( "A cloud area fraction and convective ratio are required, " f"but the inputs were: {temperature.name()}, {temperature.name()}" ) with pytest.raises(ValueError, match=expected): ShowerConditionProbability()(CubeList([temperature, temperature])) def test_mismatched_shape_exception(): """Tests that the expected exception is raised for cloud and convection cubes of different shapes.""" cloud = set_up_variable_cube( np.ones((2, 2)).astype(FLOAT_DTYPE), name="low_and_medium_type_cloud_area_fraction", ) convection = set_up_variable_cube( np.ones((3, 3)).astype(FLOAT_DTYPE), name="convective_ratio" ) expected = ( "The cloud area fraction and convective ratio cubes are not the same " "shape and cannot be combined to generate a shower probability" ) with pytest.raises(ValueError, match=expected): ShowerConditionProbability()(CubeList([cloud, convection]))
[ "iris.cube.CubeList", "numpy.ones", "improver.synthetic_data.set_up_test_cubes.set_up_variable_cube", "improver.precipitation_type.shower_condition_probability.ShowerConditionProbability", "numpy.array", "numpy.zeros", "pytest.raises", "pytest.fixture" ]
[((2584, 2617), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""test_cubes"""'}), "(name='test_cubes')\n", (2598, 2617), False, 'import pytest\n'), ((2745, 2755), 'iris.cube.CubeList', 'CubeList', ([], {}), '()\n', (2753, 2755), False, 'from iris.cube import CubeList\n'), ((9135, 9171), 'improver.precipitation_type.shower_condition_probability.ShowerConditionProbability', 'ShowerConditionProbability', ([], {}), '(**kwargs)\n', (9161, 9171), False, 'from improver.precipitation_type.shower_condition_probability import ShowerConditionProbability\n'), ((9991, 10032), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'expected'}), '(ValueError, match=expected)\n', (10004, 10032), False, 'import pytest\n'), ((10700, 10741), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'expected'}), '(ValueError, match=expected)\n', (10713, 10741), False, 'import pytest\n'), ((2839, 2960), 'improver.synthetic_data.set_up_test_cubes.set_up_variable_cube', 'set_up_variable_cube', (["values['data']"], {'name': 'name', 'units': '(1)', 'realizations': "values['realizations']", 'attributes': 'ATTRIBUTES'}), "(values['data'], name=name, units=1, realizations=\n values['realizations'], attributes=ATTRIBUTES)\n", (2859, 2960), False, 'from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube\n'), ((10042, 10070), 'improver.precipitation_type.shower_condition_probability.ShowerConditionProbability', 'ShowerConditionProbability', ([], {}), '()\n', (10068, 10070), False, 'from improver.precipitation_type.shower_condition_probability import ShowerConditionProbability\n'), ((10071, 10107), 'iris.cube.CubeList', 'CubeList', (['[temperature, temperature]'], {}), '([temperature, temperature])\n', (10079, 10107), False, 'from iris.cube import CubeList\n'), ((10751, 10779), 'improver.precipitation_type.shower_condition_probability.ShowerConditionProbability', 'ShowerConditionProbability', ([], {}), '()\n', (10777, 10779), False, 'from improver.precipitation_type.shower_condition_probability import ShowerConditionProbability\n'), ((10780, 10809), 'iris.cube.CubeList', 'CubeList', (['[cloud, convection]'], {}), '([cloud, convection])\n', (10788, 10809), False, 'from iris.cube import CubeList\n'), ((9778, 9793), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (9785, 9793), True, 'import numpy as np\n'), ((10302, 10317), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (10309, 10317), True, 'import numpy as np\n'), ((10448, 10463), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (10455, 10463), True, 'import numpy as np\n'), ((3800, 3815), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (3807, 3815), True, 'import numpy as np\n'), ((4614, 4629), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (4621, 4629), True, 'import numpy as np\n'), ((5320, 5335), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (5327, 5335), True, 'import numpy as np\n'), ((6335, 6361), 'numpy.array', 'np.array', (['[[1, 1], [1, 0]]'], {}), '([[1, 1], [1, 0]])\n', (6343, 6361), True, 'import numpy as np\n'), ((7156, 7186), 'numpy.array', 'np.array', (['[[0.5, 0], [0.5, 0]]'], {}), '([[0.5, 0], [0.5, 0]])\n', (7164, 7186), True, 'import numpy as np\n'), ((8021, 8036), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (8028, 8036), True, 'import numpy as np\n'), ((8895, 8921), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (8903, 8921), True, 'import numpy as np\n'), ((3379, 3395), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (3387, 3395), True, 'import numpy as np\n'), ((3543, 3559), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (3551, 3559), True, 'import numpy as np\n'), ((4084, 4100), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (4092, 4100), True, 'import numpy as np\n'), ((4248, 4264), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (4256, 4264), True, 'import numpy as np\n'), ((4901, 4916), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (4908, 4916), True, 'import numpy as np\n'), ((5064, 5079), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (5071, 5079), True, 'import numpy as np\n'), ((5792, 5807), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (5799, 5807), True, 'import numpy as np\n'), ((6065, 6091), 'numpy.array', 'np.array', (['[[0, 0], [0, 1]]'], {}), '([[0, 0], [0, 1]])\n', (6073, 6091), True, 'import numpy as np\n'), ((6634, 6696), 'numpy.array', 'np.array', (['[[[0.4, 0.6], [0.4, 0.6]], [[0.6, 0.6], [0.6, 0.6]]]'], {}), '([[[0.4, 0.6], [0.4, 0.6]], [[0.6, 0.6], [0.6, 0.6]]])\n', (6642, 6696), True, 'import numpy as np\n'), ((6893, 6912), 'numpy.zeros', 'np.zeros', (['(2, 2, 2)'], {}), '((2, 2, 2))\n', (6901, 6912), True, 'import numpy as np\n'), ((7499, 7561), 'numpy.array', 'np.array', (['[[[0.4, 0.6], [0.4, 0.6]], [[0.6, 0.6], [0.6, 0.6]]]'], {}), '([[[0.4, 0.6], [0.4, 0.6]], [[0.6, 0.6], [0.6, 0.6]]])\n', (7507, 7561), True, 'import numpy as np\n'), ((7758, 7777), 'numpy.zeros', 'np.zeros', (['(2, 2, 2)'], {}), '((2, 2, 2))\n', (7766, 7777), True, 'import numpy as np\n'), ((8316, 8362), 'numpy.array', 'np.array', (['[[[0, 1], [1, 1]], [[0, 1], [1, 1]]]'], {}), '([[[0, 1], [1, 1]], [[0, 1], [1, 1]]])\n', (8324, 8362), True, 'import numpy as np\n'), ((8559, 8605), 'numpy.array', 'np.array', (['[[[0, 0], [0, 1]], [[0, 0], [0, 1]]]'], {}), '([[[0, 0], [0, 1]], [[0, 0], [0, 1]]])\n', (8567, 8605), True, 'import numpy as np\n'), ((5999, 6014), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (6006, 6014), True, 'import numpy as np\n')]
# pylint: disable=no-member, too-many-locals, no-self-use """Vessels File Upload """ import time from flask import request # from library.couch_database import CouchDatabase from library.postgresql_queries import PostgreSQL from library.couch_queries import Queries from library.common import Common from library.aws_s3 import AwsS3 class Upload(Common): """Class for Vessels""" # INITIALIZE def __init__(self): """The Constructor for Vessel Upload class""" self.couch_query = Queries() self.postgres = PostgreSQL() self.aws3 = AwsS3() super(Upload, self).__init__() # GET VESSEL FUNCTION def file_upload(self): """ This API is for Uploading Vessel File --- tags: - Vessel produces: - application/json parameters: - name: token in: header description: Token required: true type: string - name: userid in: header description: User ID required: true type: string - name: vessel_id in: query description: Vessel ID required: true type: string responses: 500: description: Error 200: description: Vessel File Upload """ # INIT DATA data = {} # VESSEL ID vessel_id = request.args.get('vessel_id') # # GET DATA token = request.headers.get('token') userid = request.headers.get('userid') # CHECK TOKEN token_validation = self.validate_token(token, userid) if not token_validation: data["alert"] = "Invalid Token" data['status'] = 'Failed' # RETURN ALERT return self.return_data(data) # RH_<VesselIMO>_<ImageID> parameters = self.couch_query.get_complete_values( vessel_id, "PARAMETERS" ) # VESSEL IMO vessel_imo = parameters['PARAMETERS']['INFO']['IMO'] file_upload = [] filenames = request.files.getlist('upfile') for filename in filenames: try: file_name = filename.filename # ext = file_name.split(".")[-1] # if not self.allowed_file_type(file_name): # data["alert"] = "File Type Not Allowed!" # data['status'] = 'Failed' # return self.return_data(data) except ImportError: data["alert"] = "No image!" data['status'] = 'Failed' # RETURN ALERT return self.return_data(data) file_name = self.rename_file(vessel_id, file_name) vimg_data = {} vimg_data['vessel_id'] = vessel_id vimg_data['vessel_imo'] = vessel_imo vimg_data['file_name'] = file_name vimg_data['status'] = "active" vimg_data['created_on'] = time.time() # ADD FILE TO VESSEL FILE TABLE self.postgres.insert('vessel_file', vimg_data, 'vessel_file_id') # FILE NAME # file_name_upload = str(vessel_file_id) + "." + ext # upload_file = 'VesselFiles/' + "RH_" + vessel_imo + "_" + file_name_upload upload_file = 'VesselFiles/' + vessel_imo +"/" + file_name body = request.files['upfile'] # SAVE TO S3 url = "" if self.aws3.save_file(upload_file, body): url = self.aws3.get_url(upload_file) file_upload.append({ "filename": file_name, "url": url }) data["status"] = "ok" data["data"] = file_upload # RETURN return self.return_data(data) def allowed_file_type(self, filename): """ Check Allowed File Extension """ allowed_extensions = set(['txt', 'pdf']) return '.' in filename and filename.rsplit('.', 1)[1].lower() in allowed_extensions def rename_file(self, vessel_id, filename): """ Rename File """ sql_str = "SELECT * FROM vessel_file" sql_str += " WHERE vessel_id='{0}'".format(vessel_id) sql_str += " AND file_name='{0}'".format(filename) vessel_file = self.postgres.query_fetch_one(sql_str) if vessel_file: new_name = self.file_replace(vessel_file['file_name']) return self.rename_file(vessel_id, new_name) return filename
[ "flask.request.args.get", "flask.request.files.getlist", "library.couch_queries.Queries", "library.aws_s3.AwsS3", "library.postgresql_queries.PostgreSQL", "time.time", "flask.request.headers.get" ]
[((523, 532), 'library.couch_queries.Queries', 'Queries', ([], {}), '()\n', (530, 532), False, 'from library.couch_queries import Queries\n'), ((558, 570), 'library.postgresql_queries.PostgreSQL', 'PostgreSQL', ([], {}), '()\n', (568, 570), False, 'from library.postgresql_queries import PostgreSQL\n'), ((592, 599), 'library.aws_s3.AwsS3', 'AwsS3', ([], {}), '()\n', (597, 599), False, 'from library.aws_s3 import AwsS3\n'), ((1518, 1547), 'flask.request.args.get', 'request.args.get', (['"""vessel_id"""'], {}), "('vessel_id')\n", (1534, 1547), False, 'from flask import request\n'), ((1589, 1617), 'flask.request.headers.get', 'request.headers.get', (['"""token"""'], {}), "('token')\n", (1608, 1617), False, 'from flask import request\n'), ((1636, 1665), 'flask.request.headers.get', 'request.headers.get', (['"""userid"""'], {}), "('userid')\n", (1655, 1665), False, 'from flask import request\n'), ((2241, 2272), 'flask.request.files.getlist', 'request.files.getlist', (['"""upfile"""'], {}), "('upfile')\n", (2262, 2272), False, 'from flask import request\n'), ((3190, 3201), 'time.time', 'time.time', ([], {}), '()\n', (3199, 3201), False, 'import time\n')]
import tensorflow as tf import numpy as np from tqdm.notebook import tqdm class System(): def __init__(self, num_part, dim, Ansatz=None, External=None, Internal=None, Sampler=None ): self.num_part = num_part self.dim = dim self.Ansatz = Ansatz self.External = External self.Internal = Internal self.Sampler = Sampler self.Ansatz.system = self self.Sampler.system = self class Metropolis(): def __init__(self, step_length, steps): self.step_length = step_length self.steps = steps def __call__(self, batch_size): total_accepted = 0 dim = self.system.dim # inital position for walkers x_old = tf.random.uniform( (batch_size, dim), minval=-2, maxval=2, dtype=tf.dtypes.float64) psi_old = self.system.Ansatz(x_old).numpy() # thermalizing steps for i in range(self.steps): x_new = x_old + self.step_length * \ tf.random.uniform((batch_size, dim), minval=-1, maxval=1, dtype=tf.dtypes.float64) psi_new = self.system.Ansatz(x_new).numpy() U = np.random.uniform(0, 1, (batch_size, 1)) # vectorized acceptance criterion mask = ((psi_new / psi_old)**2 > U)[:, 0] x_old = x_old.numpy() x_new = x_new.numpy() # update walkers x_old[mask] = x_new[mask] psi_old[mask] = psi_new[mask] x_old = tf.convert_to_tensor(x_old, dtype=tf.dtypes.float64) total_accepted += np.sum(mask) return x_old, total_accepted class HarmonicOsc(): def __init__(self, omega): self.omega = omega def __call__(self, x): V = 0.5 * self.omega**2 * \ tf.reshape(tf.reduce_sum(x**2, axis=1), (-1, 1)) return V class Coulomb(): def __init__(self, alpha, beta): self.alpha = alpha self.beta = beta def __call__(self, x, num_part, dim): V = 0 for i in range(num_part): for j in range(i): r12 = tf.norm(x[:, i * dim:(i + 1) * dim] - x[:, j * dim:(j + 1) * dim], axis=1) r12 = tf.reshape(r12, (-1, 1)) V += self.alpha / tf.math.sqrt(r12**2 + self.beta**2) return V def oneBodyDensity(pos, bins, mode="radial"): if mode == "radial1D": density = np.zeros(bins.shape[0]) r_min = bins[0] dr = bins[1] - bins[0] rPos = np.linalg.norm(pos, axis=1) for r in tqdm(rPos): try: density[int((r - r_min) // dr)] += 1 / dr except: pass return density if mode == "radial2D": density = np.zeros(bins.shape[0]) r_min = bins[0] dr = bins[1] - bins[0] rPos = np.linalg.norm(pos, axis=1) for r in tqdm(rPos): try: density[int((r - r_min) // dr)] += 1 / (2 * np.pi * dr * r) except: pass return density if mode == "radial3D": density = np.zeros(bins.shape[0]) r_min = bins[0] dr = bins[1] - bins[0] rPos = np.linalg.norm(pos, axis=1) for r in tqdm(rPos): try: density[int((r - r_min) // dr)] += 1 / (4 * np.pi * dr * r**2) except: pass return density if mode == "1D": density = np.zeros(bins.shape[0]) x_min = bins[0] dx = bins[1] - bins[0] for x in tqdm(pos): try: density[int((x - x_min) // dx)] += 1 except: pass return density / dx if mode == "2D": density = np.zeros((bins.shape[0], bins.shape[0])) y_min = x_min = bins[0] dy = dx = bins[1] - bins[0] for x, y in tqdm(pos): try: density[int((x - x_min) // dx), int((y - y_min) // dy)] += 1 except: pass return density / pos.shape[0]
[ "tensorflow.random.uniform", "tensorflow.reshape", "tensorflow.reduce_sum", "tensorflow.math.sqrt", "numpy.linalg.norm", "numpy.sum", "numpy.zeros", "numpy.random.uniform", "tensorflow.convert_to_tensor", "tqdm.notebook.tqdm", "tensorflow.norm" ]
[((842, 929), 'tensorflow.random.uniform', 'tf.random.uniform', (['(batch_size, dim)'], {'minval': '(-2)', 'maxval': '(2)', 'dtype': 'tf.dtypes.float64'}), '((batch_size, dim), minval=-2, maxval=2, dtype=tf.dtypes.\n float64)\n', (859, 929), True, 'import tensorflow as tf\n'), ((2594, 2617), 'numpy.zeros', 'np.zeros', (['bins.shape[0]'], {}), '(bins.shape[0])\n', (2602, 2617), True, 'import numpy as np\n'), ((2688, 2715), 'numpy.linalg.norm', 'np.linalg.norm', (['pos'], {'axis': '(1)'}), '(pos, axis=1)\n', (2702, 2715), True, 'import numpy as np\n'), ((2733, 2743), 'tqdm.notebook.tqdm', 'tqdm', (['rPos'], {}), '(rPos)\n', (2737, 2743), False, 'from tqdm.notebook import tqdm\n'), ((2931, 2954), 'numpy.zeros', 'np.zeros', (['bins.shape[0]'], {}), '(bins.shape[0])\n', (2939, 2954), True, 'import numpy as np\n'), ((3025, 3052), 'numpy.linalg.norm', 'np.linalg.norm', (['pos'], {'axis': '(1)'}), '(pos, axis=1)\n', (3039, 3052), True, 'import numpy as np\n'), ((3070, 3080), 'tqdm.notebook.tqdm', 'tqdm', (['rPos'], {}), '(rPos)\n', (3074, 3080), False, 'from tqdm.notebook import tqdm\n'), ((3286, 3309), 'numpy.zeros', 'np.zeros', (['bins.shape[0]'], {}), '(bins.shape[0])\n', (3294, 3309), True, 'import numpy as np\n'), ((3380, 3407), 'numpy.linalg.norm', 'np.linalg.norm', (['pos'], {'axis': '(1)'}), '(pos, axis=1)\n', (3394, 3407), True, 'import numpy as np\n'), ((3425, 3435), 'tqdm.notebook.tqdm', 'tqdm', (['rPos'], {}), '(rPos)\n', (3429, 3435), False, 'from tqdm.notebook import tqdm\n'), ((3638, 3661), 'numpy.zeros', 'np.zeros', (['bins.shape[0]'], {}), '(bins.shape[0])\n', (3646, 3661), True, 'import numpy as np\n'), ((3734, 3743), 'tqdm.notebook.tqdm', 'tqdm', (['pos'], {}), '(pos)\n', (3738, 3743), False, 'from tqdm.notebook import tqdm\n'), ((3925, 3965), 'numpy.zeros', 'np.zeros', (['(bins.shape[0], bins.shape[0])'], {}), '((bins.shape[0], bins.shape[0]))\n', (3933, 3965), True, 'import numpy as np\n'), ((4054, 4063), 'tqdm.notebook.tqdm', 'tqdm', (['pos'], {}), '(pos)\n', (4058, 4063), False, 'from tqdm.notebook import tqdm\n'), ((1311, 1351), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(batch_size, 1)'], {}), '(0, 1, (batch_size, 1))\n', (1328, 1351), True, 'import numpy as np\n'), ((1651, 1703), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x_old'], {'dtype': 'tf.dtypes.float64'}), '(x_old, dtype=tf.dtypes.float64)\n', (1671, 1703), True, 'import tensorflow as tf\n'), ((1734, 1746), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (1740, 1746), True, 'import numpy as np\n'), ((1953, 1982), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(x ** 2)'], {'axis': '(1)'}), '(x ** 2, axis=1)\n', (1966, 1982), True, 'import tensorflow as tf\n'), ((2260, 2334), 'tensorflow.norm', 'tf.norm', (['(x[:, i * dim:(i + 1) * dim] - x[:, j * dim:(j + 1) * dim])'], {'axis': '(1)'}), '(x[:, i * dim:(i + 1) * dim] - x[:, j * dim:(j + 1) * dim], axis=1)\n', (2267, 2334), True, 'import tensorflow as tf\n'), ((2387, 2411), 'tensorflow.reshape', 'tf.reshape', (['r12', '(-1, 1)'], {}), '(r12, (-1, 1))\n', (2397, 2411), True, 'import tensorflow as tf\n'), ((1121, 1208), 'tensorflow.random.uniform', 'tf.random.uniform', (['(batch_size, dim)'], {'minval': '(-1)', 'maxval': '(1)', 'dtype': 'tf.dtypes.float64'}), '((batch_size, dim), minval=-1, maxval=1, dtype=tf.dtypes.\n float64)\n', (1138, 1208), True, 'import tensorflow as tf\n'), ((2446, 2485), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['(r12 ** 2 + self.beta ** 2)'], {}), '(r12 ** 2 + self.beta ** 2)\n', (2458, 2485), True, 'import tensorflow as tf\n')]
from setuptools import setup setup( name='torch-dimcheck', version='0.0.1', description='Dimensionality annotations for tensor parameters and return values', packages=['torch_dimcheck'], author='<NAME>', author_email='<EMAIL>', )
[ "setuptools.setup" ]
[((30, 236), 'setuptools.setup', 'setup', ([], {'name': '"""torch-dimcheck"""', 'version': '"""0.0.1"""', 'description': '"""Dimensionality annotations for tensor parameters and return values"""', 'packages': "['torch_dimcheck']", 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""'}), "(name='torch-dimcheck', version='0.0.1', description=\n 'Dimensionality annotations for tensor parameters and return values',\n packages=['torch_dimcheck'], author='<NAME>', author_email='<EMAIL>')\n", (35, 236), False, 'from setuptools import setup\n')]
#!/usr/bin/env python from os import ( path as os_path, mkdir as os_mkdir, getcwd ) from argparse import ArgumentParser from logging import ( Logger, getLogger ) from glob import glob from typing import ( Dict, ) from colored import fg, bg, attr from brs_utils import ( create_logger ) from .RetroPath2 import ( set_vars, retropath2 ) from .Args import ( build_args_parser ) from ._version import __version__ __ERROR_CODES__ = { 0: 0, 'NoError': 0, 'SrcInSink': 1, 'FileNotFound': 2, 'OSError': 3, 'NoSolution': 4, 'TimeLimit': 5, 'InChI': 6 } def print_conf( kvars: Dict, prog: str, logger: Logger = getLogger(__name__) ) -> None: """ Print configuration. Parameters ---------- kvars : Dict Dictionnary with variables to print. logger : Logger The logger object. Returns ------- int Return code. """ # print ('%s%s Configuration %s' % (fg('magenta'), attr('bold'), attr('reset'))) print('{fg}{attr1}Configuration {attr2}'.format(fg=fg('cyan'), attr1=attr('bold'), attr2=attr('reset'))) print('{fg}'.format(fg=fg('cyan')), end='') print(' + ' + prog) print(' |--version: '+__version__) print(' + KNIME') print(' |--path: '+kvars['kexec']) # logger.info(' - version: '+kvars['kver']) print(' + RetroPath2.0 workflow') print(' |--path: '+kvars['workflow']) # logger.info(' - version: r20210127') print('') print ('{attr}'.format(attr=attr('reset')), end='') def _cli(): parser = build_args_parser() args = parse_and_check_args(parser) if args.log.lower() in ['silent', 'quiet'] or args.silent: args.log = 'CRITICAL' # Store KNIME vars into a dictionary kvars = set_vars( args.kexec, args.kver, args.kpkg_install, args.kwf ) # Print out configuration if not args.silent and args.log.lower() not in ['critical', 'error']: print_conf(kvars, prog = parser.prog) # Create logger logger = create_logger(parser.prog, args.log) logger.debug('args: ' + str(args)) logger.debug('kvars: ' + str(kvars)) r_code, result_files = retropath2( sink_file=args.sink_file, source_file=args.source_file, rules_file=args.rules_file, outdir=args.outdir, kvars=kvars, max_steps=args.max_steps, topx=args.topx, dmin=args.dmin, dmax=args.dmax, mwmax_source=args.mwmax_source, mwmax_cof=args.mwmax_cof, timeout=args.timeout, logger=logger ) print(r_code) if r_code == 'OK' or r_code == 'TimeLimit': logger.info('{attr1}Results{attr2}'.format(attr1=attr('bold'), attr2=attr('reset'))) logger.info(' |- Checking... ') r_code = check_results(result_files, logger) logger.info(' |--path: '+args.outdir) else: logger.error('Exiting...') return __ERROR_CODES__[r_code] def check_results( result_files: Dict, logger: Logger = getLogger(__name__) ) -> int: # Check if any result has been found r_code = check_scope(result_files['outdir'], logger) if r_code == -1: r_code = 'NoSolution' return r_code def check_scope( outdir: str, logger: Logger = getLogger(__name__) ) -> int: """ Check if result is present in outdir. Parameters ---------- outdir : str The folder where results heve been written. logger : Logger The logger object. Returns ------- int Return code. """ csv_scopes = sorted( glob(os_path.join(outdir, '*_scope.csv')), key=lambda scope: os_path.getmtime(scope) ) if csv_scopes == []: logger.warning(' Warning: No solution has been found') return -1 return 0 def parse_and_check_args( parser: ArgumentParser ) -> None: args = parser.parse_args() if args.kver is None and args.kpkg_install and args.kexec is not None: parser.error("--kexec requires --kver.") # Create outdir if does not exist if not os_path.exists(args.outdir): os_mkdir(args.outdir) if args.source_file is not None: if args.source_name is not None: parser.error("--source_name is not compliant with --source_file.") if args.source_inchi is not None: parser.error("--source_inchi is not compliant with --source_file.") else: if args.source_inchi is None: parser.error("--source_inchi is mandatory.") if args.source_name is None or args.source_name == '': args.source_name = 'target' # Create temporary source file args.source_file = os_path.join(args.outdir, 'source.csv') with open(args.source_file, 'w') as temp_f: temp_f.write('Name,InChI\n') temp_f.write('"%s","%s"' % (args.source_name, args.source_inchi.strip())) return args if __name__ == '__main__': _cli()
[ "logging.getLogger", "os.path.exists", "colored.fg", "os.path.join", "os.mkdir", "colored.attr", "os.path.getmtime", "brs_utils.create_logger" ]
[((729, 748), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (738, 748), False, 'from logging import Logger, getLogger\n'), ((2136, 2172), 'brs_utils.create_logger', 'create_logger', (['parser.prog', 'args.log'], {}), '(parser.prog, args.log)\n', (2149, 2172), False, 'from brs_utils import create_logger\n'), ((3083, 3102), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (3092, 3102), False, 'from logging import Logger, getLogger\n'), ((3343, 3362), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (3352, 3362), False, 'from logging import Logger, getLogger\n'), ((4161, 4188), 'os.path.exists', 'os_path.exists', (['args.outdir'], {}), '(args.outdir)\n', (4175, 4188), True, 'from os import path as os_path, mkdir as os_mkdir, getcwd\n'), ((4198, 4219), 'os.mkdir', 'os_mkdir', (['args.outdir'], {}), '(args.outdir)\n', (4206, 4219), True, 'from os import path as os_path, mkdir as os_mkdir, getcwd\n'), ((4774, 4813), 'os.path.join', 'os_path.join', (['args.outdir', '"""source.csv"""'], {}), "(args.outdir, 'source.csv')\n", (4786, 4813), True, 'from os import path as os_path, mkdir as os_mkdir, getcwd\n'), ((3663, 3698), 'os.path.join', 'os_path.join', (['outdir', '"""*_scope.csv"""'], {}), "(outdir, '*_scope.csv')\n", (3675, 3698), True, 'from os import path as os_path, mkdir as os_mkdir, getcwd\n'), ((1128, 1138), 'colored.fg', 'fg', (['"""cyan"""'], {}), "('cyan')\n", (1130, 1138), False, 'from colored import fg, bg, attr\n'), ((1146, 1158), 'colored.attr', 'attr', (['"""bold"""'], {}), "('bold')\n", (1150, 1158), False, 'from colored import fg, bg, attr\n'), ((1166, 1179), 'colored.attr', 'attr', (['"""reset"""'], {}), "('reset')\n", (1170, 1179), False, 'from colored import fg, bg, attr\n'), ((1209, 1219), 'colored.fg', 'fg', (['"""cyan"""'], {}), "('cyan')\n", (1211, 1219), False, 'from colored import fg, bg, attr\n'), ((1589, 1602), 'colored.attr', 'attr', (['"""reset"""'], {}), "('reset')\n", (1593, 1602), False, 'from colored import fg, bg, attr\n'), ((3727, 3750), 'os.path.getmtime', 'os_path.getmtime', (['scope'], {}), '(scope)\n', (3743, 3750), True, 'from os import path as os_path, mkdir as os_mkdir, getcwd\n'), ((2757, 2769), 'colored.attr', 'attr', (['"""bold"""'], {}), "('bold')\n", (2761, 2769), False, 'from colored import fg, bg, attr\n'), ((2777, 2790), 'colored.attr', 'attr', (['"""reset"""'], {}), "('reset')\n", (2781, 2790), False, 'from colored import fg, bg, attr\n')]
import functools import heapq import logging from collections import deque from threading import Condition, RLock from typing import Any, Callable, List, NamedTuple, Optional from pytils.mixins import DaemonHandler from ._config.time import DEFAULT_TIME_SUPPLIER, TimeSupplier, TimeType, ZERO_DURATION __all__ = [ 'Action', 'Clock', 'Handler', 'Schedule', 'ScheduleKey', 'SchedulingQueue', 'TimeSupplier', 'TimeType', ] _DEFAULT_MAX_TASK_QUEUE_SIZE = 4096 _MAX_SLEEP_DURATION = 12. # # Convenience Function wrap_action = functools.partial # # Data Definitions class ScheduleKey(NamedTuple): period: Optional[float] action: 'Action' class ScheduleEntry(NamedTuple): next_run: float key: 'ScheduleKey' Action = Callable[[], Any] Handler = Callable[[Action], Any] # # Clock LOGGER = logging.getLogger('pytils.clock') class Clock: def __init__( self, max_queue_size: int = _DEFAULT_MAX_TASK_QUEUE_SIZE, s_time: TimeSupplier = DEFAULT_TIME_SUPPLIER): self._scheduling_queue = SchedulingQueue(max_queue_size, s_time) @property def schedule(self): return self._scheduling_queue.schedule def run_scheduler(self): self.schedule.run() def start_scheduler(self): self.schedule.start() def run_handler(self): self._scheduling_queue.run() def start_handler(self): self._scheduling_queue.start() class SchedulingQueue(DaemonHandler): def __init__( self, max_queue_size: int = _DEFAULT_MAX_TASK_QUEUE_SIZE, s_time: TimeSupplier = DEFAULT_TIME_SUPPLIER): self._cv = Condition() self._task_queue = deque(maxlen=max_queue_size) self._schedule = Schedule(self._enqueue, s_time) @property def schedule(self): return self._schedule def handle_one(self): with self._cv: self._cv.wait_for(self._task_queue.__len__) action = self._task_queue.popleft() self._cv.notify() action() def _enqueue(self, action: Action): with self._cv: self._task_queue.append(action) self._cv.notify() class Schedule(DaemonHandler): def __init__(self, handler: Handler, s_time: TimeSupplier = DEFAULT_TIME_SUPPLIER): self.s_time = s_time self._lock = RLock() self._cv = Condition(self._lock) self._schedule = [] # type: List[ScheduleEntry] self._handler = handler def register( self, action: Action, period: Optional[TimeType], delay: Optional[TimeType] = None) -> ScheduleKey: if period <= ZERO_DURATION: raise ValueError('period must be positive or None') if delay < ZERO_DURATION: raise ValueError('delay must be non-negative or None') if not delay: delay = ZERO_DURATION key = ScheduleKey(period, action) entry = ScheduleEntry(self.s_time() + delay, key) with self._cv: self._enqueue(entry) return key def handle_one(self): with self._cv: while not self._cv.wait_for(self.has_expired, self._get_next_sleep_duration()): pass self._handle_entry(self._dequeue()) def _handle_entry(self, entry: ScheduleEntry): self._handler(self._create_readmittence_action_from_key(entry.key)) def _create_readmittence_action_from_key(self, key: ScheduleKey) -> Action: if key.period is None: return key.action def perform_action_and_readmit(): next_run = self.s_time() + key.period key.action() current_time = self.s_time() if next_run < current_time: LOGGER.warning('Scheduled task took longer than its period length to complete') self._enqueue(ScheduleEntry(max(current_time, next_run), key)) return perform_action_and_readmit def has_expired(self) -> bool: with self._lock: return bool(self._schedule) and self._schedule[0].next_run - self.s_time() <= 0 def _get_next_sleep_duration(self) -> TimeType: if self._schedule: return min(_MAX_SLEEP_DURATION, max(ZERO_DURATION, self._schedule[0].next_run - self.s_time())) else: return _MAX_SLEEP_DURATION def _enqueue(self, entry: ScheduleEntry): self._cv.notify() heapq.heappush(self._schedule, entry) def _dequeue(self) -> Optional[ScheduleEntry]: self._cv.notify() return heapq.heappop(self._schedule) if self._schedule else None
[ "logging.getLogger", "collections.deque", "threading.RLock", "heapq.heappop", "heapq.heappush", "threading.Condition" ]
[((842, 875), 'logging.getLogger', 'logging.getLogger', (['"""pytils.clock"""'], {}), "('pytils.clock')\n", (859, 875), False, 'import logging\n'), ((1683, 1694), 'threading.Condition', 'Condition', ([], {}), '()\n', (1692, 1694), False, 'from threading import Condition, RLock\n'), ((1722, 1750), 'collections.deque', 'deque', ([], {'maxlen': 'max_queue_size'}), '(maxlen=max_queue_size)\n', (1727, 1750), False, 'from collections import deque\n'), ((2395, 2402), 'threading.RLock', 'RLock', ([], {}), '()\n', (2400, 2402), False, 'from threading import Condition, RLock\n'), ((2422, 2443), 'threading.Condition', 'Condition', (['self._lock'], {}), '(self._lock)\n', (2431, 2443), False, 'from threading import Condition, RLock\n'), ((4509, 4546), 'heapq.heappush', 'heapq.heappush', (['self._schedule', 'entry'], {}), '(self._schedule, entry)\n', (4523, 4546), False, 'import heapq\n'), ((4640, 4669), 'heapq.heappop', 'heapq.heappop', (['self._schedule'], {}), '(self._schedule)\n', (4653, 4669), False, 'import heapq\n')]
"""Execute validated & constructed query on device. Accepts input from front end application, validates the input and returns errors if input is invalid. Passes validated parameters to construct.py, which is used to build & run the Netmiko connections or hyperglass-frr API calls, returns the output back to the front end. """ # Standard Library from ssl import CertificateError from typing import Iterable # Third Party import httpx # Project from hyperglass.log import log from hyperglass.util import parse_exception from hyperglass.encode import jwt_decode, jwt_encode from hyperglass.exceptions import RestError, ResponseEmpty from hyperglass.configuration import params # Local from ._common import Connection class AgentConnection(Connection): """Connect to target device via hyperglass-agent.""" async def collect(self) -> Iterable: # noqa: C901 """Connect to a device running hyperglass-agent via HTTP.""" log.debug("Query parameters: {}", self.query) client_params = { "headers": {"Content-Type": "application/json"}, "timeout": params.request_timeout, } if self.device.ssl is not None and self.device.ssl.enable: with self.device.ssl.cert.open("r") as file: cert = file.read() if not cert: raise RestError( "SSL Certificate for device {d} has not been imported", level="danger", d=self.device.name, ) http_protocol = "https" client_params.update({"verify": str(self.device.ssl.cert)}) log.debug( ( f"Using {str(self.device.ssl.cert)} to validate connection " f"to {self.device.name}" ) ) else: http_protocol = "http" endpoint = "{protocol}://{address}:{port}/query/".format( protocol=http_protocol, address=self.device._target, port=self.device.port ) log.debug("URL endpoint: {}", endpoint) try: async with httpx.AsyncClient(**client_params) as http_client: responses = () for query in self.query: encoded_query = await jwt_encode( payload=query, secret=self.device.credential.password.get_secret_value(), duration=params.request_timeout, ) log.debug("Encoded JWT: {}", encoded_query) raw_response = await http_client.post( endpoint, json={"encoded": encoded_query} ) log.debug("HTTP status code: {}", raw_response.status_code) raw = raw_response.text log.debug("Raw Response:\n{}", raw) if raw_response.status_code == 200: decoded = await jwt_decode( payload=raw_response.json()["encoded"], secret=self.device.credential.password.get_secret_value(), ) log.debug("Decoded Response:\n{}", decoded) responses += (decoded,) elif raw_response.status_code == 204: raise ResponseEmpty( params.messages.no_output, device_name=self.device.name, ) else: log.error(raw_response.text) except httpx.exceptions.HTTPError as rest_error: msg = parse_exception(rest_error) log.error("Error connecting to device {}: {}", self.device.name, msg) raise RestError( params.messages.connection_error, device_name=self.device.name, error=msg, ) except OSError as ose: log.critical(str(ose)) raise RestError( params.messages.connection_error, device_name=self.device.name, error="System error", ) except CertificateError as cert_error: log.critical(str(cert_error)) msg = parse_exception(cert_error) raise RestError( params.messages.connection_error, device_name=self.device.name, error=f"{msg}: {cert_error}", ) if raw_response.status_code != 200: log.error("Response code is {}", raw_response.status_code) raise RestError( params.messages.connection_error, device_name=self.device.name, error=params.messages.general, ) if not responses: log.error("No response from device {}", self.device.name) raise RestError( params.messages.connection_error, device_name=self.device.name, error=params.messages.no_response, ) return responses
[ "hyperglass.util.parse_exception", "hyperglass.exceptions.ResponseEmpty", "httpx.AsyncClient", "hyperglass.log.log.error", "hyperglass.exceptions.RestError", "hyperglass.log.log.debug" ]
[((947, 992), 'hyperglass.log.log.debug', 'log.debug', (['"""Query parameters: {}"""', 'self.query'], {}), "('Query parameters: {}', self.query)\n", (956, 992), False, 'from hyperglass.log import log\n'), ((2077, 2116), 'hyperglass.log.log.debug', 'log.debug', (['"""URL endpoint: {}"""', 'endpoint'], {}), "('URL endpoint: {}', endpoint)\n", (2086, 2116), False, 'from hyperglass.log import log\n'), ((4599, 4657), 'hyperglass.log.log.error', 'log.error', (['"""Response code is {}"""', 'raw_response.status_code'], {}), "('Response code is {}', raw_response.status_code)\n", (4608, 4657), False, 'from hyperglass.log import log\n'), ((4676, 4784), 'hyperglass.exceptions.RestError', 'RestError', (['params.messages.connection_error'], {'device_name': 'self.device.name', 'error': 'params.messages.general'}), '(params.messages.connection_error, device_name=self.device.name,\n error=params.messages.general)\n', (4685, 4784), False, 'from hyperglass.exceptions import RestError, ResponseEmpty\n'), ((4883, 4940), 'hyperglass.log.log.error', 'log.error', (['"""No response from device {}"""', 'self.device.name'], {}), "('No response from device {}', self.device.name)\n", (4892, 4940), False, 'from hyperglass.log import log\n'), ((4959, 5071), 'hyperglass.exceptions.RestError', 'RestError', (['params.messages.connection_error'], {'device_name': 'self.device.name', 'error': 'params.messages.no_response'}), '(params.messages.connection_error, device_name=self.device.name,\n error=params.messages.no_response)\n', (4968, 5071), False, 'from hyperglass.exceptions import RestError, ResponseEmpty\n'), ((2154, 2188), 'httpx.AsyncClient', 'httpx.AsyncClient', ([], {}), '(**client_params)\n', (2171, 2188), False, 'import httpx\n'), ((3703, 3730), 'hyperglass.util.parse_exception', 'parse_exception', (['rest_error'], {}), '(rest_error)\n', (3718, 3730), False, 'from hyperglass.util import parse_exception\n'), ((3743, 3812), 'hyperglass.log.log.error', 'log.error', (['"""Error connecting to device {}: {}"""', 'self.device.name', 'msg'], {}), "('Error connecting to device {}: {}', self.device.name, msg)\n", (3752, 3812), False, 'from hyperglass.log import log\n'), ((3831, 3919), 'hyperglass.exceptions.RestError', 'RestError', (['params.messages.connection_error'], {'device_name': 'self.device.name', 'error': 'msg'}), '(params.messages.connection_error, device_name=self.device.name,\n error=msg)\n', (3840, 3919), False, 'from hyperglass.exceptions import RestError, ResponseEmpty\n'), ((4063, 4162), 'hyperglass.exceptions.RestError', 'RestError', (['params.messages.connection_error'], {'device_name': 'self.device.name', 'error': '"""System error"""'}), "(params.messages.connection_error, device_name=self.device.name,\n error='System error')\n", (4072, 4162), False, 'from hyperglass.exceptions import RestError, ResponseEmpty\n'), ((4329, 4356), 'hyperglass.util.parse_exception', 'parse_exception', (['cert_error'], {}), '(cert_error)\n', (4344, 4356), False, 'from hyperglass.util import parse_exception\n'), ((4375, 4482), 'hyperglass.exceptions.RestError', 'RestError', (['params.messages.connection_error'], {'device_name': 'self.device.name', 'error': 'f"""{msg}: {cert_error}"""'}), "(params.messages.connection_error, device_name=self.device.name,\n error=f'{msg}: {cert_error}')\n", (4384, 4482), False, 'from hyperglass.exceptions import RestError, ResponseEmpty\n'), ((1352, 1458), 'hyperglass.exceptions.RestError', 'RestError', (['"""SSL Certificate for device {d} has not been imported"""'], {'level': '"""danger"""', 'd': 'self.device.name'}), "('SSL Certificate for device {d} has not been imported', level=\n 'danger', d=self.device.name)\n", (1361, 1458), False, 'from hyperglass.exceptions import RestError, ResponseEmpty\n'), ((2553, 2596), 'hyperglass.log.log.debug', 'log.debug', (['"""Encoded JWT: {}"""', 'encoded_query'], {}), "('Encoded JWT: {}', encoded_query)\n", (2562, 2596), False, 'from hyperglass.log import log\n'), ((2765, 2824), 'hyperglass.log.log.debug', 'log.debug', (['"""HTTP status code: {}"""', 'raw_response.status_code'], {}), "('HTTP status code: {}', raw_response.status_code)\n", (2774, 2824), False, 'from hyperglass.log import log\n'), ((2890, 2925), 'hyperglass.log.log.debug', 'log.debug', (['"""Raw Response:\n{}"""', 'raw'], {}), "('Raw Response:\\n{}', raw)\n", (2899, 2925), False, 'from hyperglass.log import log\n'), ((3240, 3286), 'hyperglass.log.log.debug', 'log.debug', (['"""Decoded Response:\n{}"""', 'decoded'], {}), '("""Decoded Response:\n{}""", decoded)\n', (3249, 3286), False, 'from hyperglass.log import log\n'), ((3421, 3491), 'hyperglass.exceptions.ResponseEmpty', 'ResponseEmpty', (['params.messages.no_output'], {'device_name': 'self.device.name'}), '(params.messages.no_output, device_name=self.device.name)\n', (3434, 3491), False, 'from hyperglass.exceptions import RestError, ResponseEmpty\n'), ((3598, 3626), 'hyperglass.log.log.error', 'log.error', (['raw_response.text'], {}), '(raw_response.text)\n', (3607, 3626), False, 'from hyperglass.log import log\n')]
import test_agent print('Logging in') Meerkat = test_agent.TestAgent(username='meerkat', password='<PASSWORD>', endpoint='/messages/') Pangolin = test_agent.TestAgent(username='pangolin', password='<PASSWORD>', endpoint='/messages/') Badger = test_agent.TestAgent(username='badger', password='<PASSWORD>', endpoint='/messages/') Anon = test_agent.TestAgent(endpoint='/messages/') print("Meerkat sending message to Pangolin") meerkat_sent_message = Meerkat.post(recepient='pangolin', title="Hello Pangolin", body="It's me, Meerkat") assert meerkat_sent_message.status_code == 201, f'Failed to send message. code: {meerkat_sent_message.status_code}' print("Checking meerkat's mailboxes") meerkat_mailbox = Meerkat.get() assert meerkat_mailbox.json() == [], "Meerkat can see a message" meerkat_outgoing = Meerkat.get('sender') assert len(meerkat_outgoing.json()) == 1, "Meerkat can's see an outgoing message'" msg_id = meerkat_outgoing.json()[0]['id'] print("Meerkat's mailboxes passed the initial message test") print("Checking Pangolin's mailboxes") pangolin_read_mailbox = Pangolin.get('read') assert pangolin_read_mailbox.json() == [], "read messages showed up in Pangolin's mailbox" pangolin_outgoing = Pangolin.get('sender') assert pangolin_outgoing.json() == [], "Pangolin appears to have an outgoing message" pangoling_mailbox = Pangolin.get() assert int(pangoling_mailbox.json()[0]['id']) == msg_id, "No matching message in pangolin's inbox" message = Pangolin.get(f'{msg_id}').json() assert message['title'] == "Hello Pangolin" and message['body'] == "It's me, Meerkat", 'Wrong message found' pangolin_read_mailbox = Pangolin.get('read') assert len(pangolin_read_mailbox.json()) == 1, "Message not found in read" print("Pangolin succesfully passed the initial message test") print("Ensuring Badger is unable to access message") badger_mail = Badger.get('all') assert badger_mail.json() == [], "Badger has mail" badger_message = Badger.get(f'{msg_id}') assert badger_message.status_code >= 400, 'Badger gained unauthorozed access' print("Badger succesfully passed the initial message test") print("Ensuring Anon is unable to access message") anon_mail = Anon.get() assert anon_mail.status_code >= 400, "Anon can see a mailbox" anon_message = Anon.get(f'{msg_id}') assert anon_message.status_code >= 400, "Anon can see meerkat's message" print("Anon passed test") print('Pangolin deleting message') deletion = Pangolin.delete(f'{msg_id}') assert deletion.status_code == 204, "failed to delete" pangolin_read_mailbox = Pangolin.get('read') print(pangolin_read_mailbox.json()) assert pangolin_read_mailbox.json() == [], "read messages showed up in Pangolin's mailbox" pangolin_deleted = Pangolin.get('deleted') assert len(pangolin_deleted.json()) == 1, "Deleted message does not show up" print('Pangolin succesfully deleted messages') print("Ensuring meerkat can still see message") meerkat_outgoing = Meerkat.get('sender') assert len(meerkat_outgoing.json()) == 1, "Meerkat can's see an outgoing message'" meerkat_outgoing = Meerkat.get('deleted') assert meerkat_outgoing.json() == [], "Meerkat sees deleted message" print('Meerkat can succesfully see message pangolin deleted as non-deleted')
[ "test_agent.TestAgent" ]
[((49, 140), 'test_agent.TestAgent', 'test_agent.TestAgent', ([], {'username': '"""meerkat"""', 'password': '"""<PASSWORD>"""', 'endpoint': '"""/messages/"""'}), "(username='meerkat', password='<PASSWORD>', endpoint=\n '/messages/')\n", (69, 140), False, 'import test_agent\n'), ((147, 239), 'test_agent.TestAgent', 'test_agent.TestAgent', ([], {'username': '"""pangolin"""', 'password': '"""<PASSWORD>"""', 'endpoint': '"""/messages/"""'}), "(username='pangolin', password='<PASSWORD>', endpoint=\n '/messages/')\n", (167, 239), False, 'import test_agent\n'), ((244, 334), 'test_agent.TestAgent', 'test_agent.TestAgent', ([], {'username': '"""badger"""', 'password': '"""<PASSWORD>"""', 'endpoint': '"""/messages/"""'}), "(username='badger', password='<PASSWORD>', endpoint=\n '/messages/')\n", (264, 334), False, 'import test_agent\n'), ((337, 380), 'test_agent.TestAgent', 'test_agent.TestAgent', ([], {'endpoint': '"""/messages/"""'}), "(endpoint='/messages/')\n", (357, 380), False, 'import test_agent\n')]
""" Unit tests for the HR solver. """ import pytest from matching import Matching from matching import Player as Resident from matching.games import HospitalResident from matching.players import Hospital from .params import HOSPITAL_RESIDENT, make_game, make_prefs @HOSPITAL_RESIDENT def test_init(resident_names, hospital_names, capacities, seed): """ Test that an instance of HospitalResident is created correctly when passed a set of players. """ residents, hospitals, game = make_game( resident_names, hospital_names, capacities, seed ) assert game.residents == residents assert game.hospitals == hospitals assert all([resident.matching is None for resident in game.residents]) assert all([hospital.matching == [] for hospital in game.hospitals]) assert game.matching is None @HOSPITAL_RESIDENT def test_create_from_dictionaries( resident_names, hospital_names, capacities, seed ): """ Test that HospitalResident is created correctly when passed a set of dictionaries for each party. """ resident_prefs, hospital_prefs = make_prefs( resident_names, hospital_names, seed ) capacities_ = dict(zip(hospital_names, capacities)) game = HospitalResident.create_from_dictionaries( resident_prefs, hospital_prefs, capacities_ ) for resident in game.residents: assert resident.pref_names == resident_prefs[resident.name] assert resident.matching is None for hospital in game.hospitals: assert hospital.pref_names == hospital_prefs[hospital.name] assert hospital.capacity == capacities_[hospital.name] assert hospital.matching == [] assert game.matching is None @HOSPITAL_RESIDENT def test_inputs_resident_prefs( resident_names, hospital_names, capacities, seed ): """ Test that each resident's preference list is a subset of the available hospitals, and check that an Exception is raised if not. """ _, _, game = make_game(resident_names, hospital_names, capacities, seed) assert game._check_resident_prefs() game.residents[0].prefs = [Resident("foo")] with pytest.raises(Exception): game._check_resident_prefs() @HOSPITAL_RESIDENT def test_inputs_hospital_prefs( resident_names, hospital_names, capacities, seed ): """ Test that each hospital has ranked all and only those residents that have ranked it, and check that an Exception is raised if not. """ _, _, game = make_game(resident_names, hospital_names, capacities, seed) assert game._check_hospital_prefs() game.hospitals[0].prefs.pop() with pytest.raises(Exception): game._check_hospital_prefs() @HOSPITAL_RESIDENT def test_solve(resident_names, hospital_names, capacities, seed): """ Test that HospitalResident can solve games correctly when passed players. """ for optimal in ["resident", "hospital"]: residents, hospitals, game = make_game( resident_names, hospital_names, capacities, seed ) matching = game.solve(optimal) assert isinstance(matching, Matching) assert set(matching.keys()) == set(hospitals) matched_residents = [ res for match in matching.values() for res in match ] assert matched_residents != [] and set(matched_residents).issubset( set(residents) ) for resident in set(residents) - set(matched_residents): assert resident.matching is None @HOSPITAL_RESIDENT def test_check_validity(resident_names, hospital_names, capacities, seed): """ Test that HospitalResident finds a valid matching when the game is solved. """ _, _, game = make_game(resident_names, hospital_names, capacities, seed) game.solve() assert game.check_validity() @HOSPITAL_RESIDENT def test_resident_matching(resident_names, hospital_names, capacities, seed): """ Test that HospitalResident recognises a valid matching requires a resident to have a preference of their match, if they have one. """ _, _, game = make_game(resident_names, hospital_names, capacities, seed) game.solve() game.residents[0].matching = Resident(name="foo") with pytest.raises(Exception): game._check_resident_matching() @HOSPITAL_RESIDENT def test_hospital_matching(resident_names, hospital_names, capacities, seed): """ Test that HospitalResident recognises a valid matching requires a hospital to have a preference of each of its matches, if any. """ _, _, game = make_game(resident_names, hospital_names, capacities, seed) game.solve() game.hospitals[0].matching.append(Resident(name="foo")) with pytest.raises(Exception): game._check_hospital_matching() @HOSPITAL_RESIDENT def test_hospital_capacity(resident_names, hospital_names, capacities, seed): """ Test that HospitalResident recognises a valid matching requires all hospitals to not be over-subscribed. """ _, _, game = make_game(resident_names, hospital_names, capacities, seed) game.solve() game.hospitals[0].matching = range(game.hospitals[0].capacity + 1) with pytest.raises(Exception): game._check_hospital_capacity() def test_check_stability(): """ Test that HospitalResident can recognise whether a matching is stable or not. """ residents = [Resident("A"), Resident("B"), Resident("C")] hospitals = [Hospital("X", 2), Hospital("Y", 2)] a, b, c = residents x, y = hospitals a.set_prefs([x, y]) b.set_prefs([y]) c.set_prefs([y, x]) x.set_prefs([c, a]) y.set_prefs([a, b, c]) game = HospitalResident(residents, hospitals) matching = game.solve() assert game.check_stability() matching[x] = [c] matching[y] = [a, b] assert not game.check_stability()
[ "matching.players.Hospital", "matching.games.HospitalResident", "matching.games.HospitalResident.create_from_dictionaries", "pytest.raises", "matching.Player" ]
[((1226, 1316), 'matching.games.HospitalResident.create_from_dictionaries', 'HospitalResident.create_from_dictionaries', (['resident_prefs', 'hospital_prefs', 'capacities_'], {}), '(resident_prefs, hospital_prefs,\n capacities_)\n', (1267, 1316), False, 'from matching.games import HospitalResident\n'), ((4191, 4211), 'matching.Player', 'Resident', ([], {'name': '"""foo"""'}), "(name='foo')\n", (4199, 4211), True, 'from matching import Player as Resident\n'), ((5646, 5684), 'matching.games.HospitalResident', 'HospitalResident', (['residents', 'hospitals'], {}), '(residents, hospitals)\n', (5662, 5684), False, 'from matching.games import HospitalResident\n'), ((2118, 2133), 'matching.Player', 'Resident', (['"""foo"""'], {}), "('foo')\n", (2126, 2133), True, 'from matching import Player as Resident\n'), ((2145, 2169), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2158, 2169), False, 'import pytest\n'), ((2628, 2652), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2641, 2652), False, 'import pytest\n'), ((4222, 4246), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (4235, 4246), False, 'import pytest\n'), ((4665, 4685), 'matching.Player', 'Resident', ([], {'name': '"""foo"""'}), "(name='foo')\n", (4673, 4685), True, 'from matching import Player as Resident\n'), ((4697, 4721), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (4710, 4721), False, 'import pytest\n'), ((5160, 5184), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (5173, 5184), False, 'import pytest\n'), ((5368, 5381), 'matching.Player', 'Resident', (['"""A"""'], {}), "('A')\n", (5376, 5381), True, 'from matching import Player as Resident\n'), ((5383, 5396), 'matching.Player', 'Resident', (['"""B"""'], {}), "('B')\n", (5391, 5396), True, 'from matching import Player as Resident\n'), ((5398, 5411), 'matching.Player', 'Resident', (['"""C"""'], {}), "('C')\n", (5406, 5411), True, 'from matching import Player as Resident\n'), ((5430, 5446), 'matching.players.Hospital', 'Hospital', (['"""X"""', '(2)'], {}), "('X', 2)\n", (5438, 5446), False, 'from matching.players import Hospital\n'), ((5448, 5464), 'matching.players.Hospital', 'Hospital', (['"""Y"""', '(2)'], {}), "('Y', 2)\n", (5456, 5464), False, 'from matching.players import Hospital\n')]
import date import os def get_time_delta(kline_type = '1_day'): if kline_type.lower() == '1_day'.lower(): return 0 kline_array = kline_type.split("_") if len(kline_array) != 2: raise ValueError('KLine_type {0} not supported'.format(kline_type)) if kline_array[1].lower() == 'min'.lower(): time_interval = int(kline_array[0]) if time_interval in [1,5,15,30,60]: return time_interval else: raise ValueError('KLine_type {0} not supported'.format(kline_type)) else: raise ValueError('KLine_type {0} not supported'.format(kline_type)) def get_kline_type_labal(kline_type): ''' kline_type must be fit the format {int}_{time unit} Example: 1_min ''' time_interval = get_time_delta(kline_type) if time_interval == 0: return 'KLINE_DAILY' else: return "KLINE_{0}_MIN".format(str(time_interval)) def get_kline_type_folder_name(kline_type = '1_day'): ''' kline_type must be fit the format {int}_{time unit} Example: 1_min ''' time_interval = get_time_delta(kline_type) if time_interval == 0: return 'daily' else: return "{0}min".format(str(time_interval)) def get_kline_file_relative_root(kline_type = '1_day'): return os.path.join('KLine', get_kline_type_folder_name(kline_type)) def get_kline_time_list(kline_type = '1_day'): time_interval = get_time_delta(kline_type) if time_interval == 0: return ["0"] else: kline_time_list = [] for time_delta in range(0, 120/time_interval): kline_time_list.append(date.create_kline_time_string(9,30,time_delta*time_interval)) for time_delta in range(0, 120/time_interval): kline_time_list.append(date.create_kline_time_string(13,00,time_delta*time_interval)) return kline_time_list
[ "date.create_kline_time_string" ]
[((1617, 1681), 'date.create_kline_time_string', 'date.create_kline_time_string', (['(9)', '(30)', '(time_delta * time_interval)'], {}), '(9, 30, time_delta * time_interval)\n', (1646, 1681), False, 'import date\n'), ((1769, 1833), 'date.create_kline_time_string', 'date.create_kline_time_string', (['(13)', '(0)', '(time_delta * time_interval)'], {}), '(13, 0, time_delta * time_interval)\n', (1798, 1833), False, 'import date\n')]
from collections import defaultdict def check_winner(cards): for card_index, card in cards.items(): for index in range(5): complete_line = all([x[1] for x in card[index]]) complete_column = all([card[x][index][1] for x in range(5)]) if complete_line or complete_column: return card_index with open("input.txt") as input_file: lines = input_file.readlines() drawn = [int(n) for n in lines[0].strip().split(",")] del lines[0:2] cards = defaultdict(list) card = 0 # Cards prep for line in lines: line = line.strip() if not line: card += 1 continue card_line = [[int(n), False] for n in line.split()] cards[card].append(card_line) # Final game for n in drawn: # Find all cells to cross for card in cards.values(): for line in card: try: index = line.index([n, False]) line[index][1] = True except ValueError: pass # Check a winner winner = check_winner(cards) if winner is not None: last_drawn = n break sum = 0 for line in cards[winner]: for n, drawn in line: if not drawn: sum += n print(sum * last_drawn)
[ "collections.defaultdict" ]
[((508, 525), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (519, 525), False, 'from collections import defaultdict\n')]
""" Demonstrates the hover functionality of mpldatacursor as well as point labels and a custom formatting function. Notice that overlapping points have both labels displayed. """ import string import matplotlib.pyplot as plt import numpy as np from mpldatacursor import datacursor np.random.seed(1977) x, y = np.random.random((2, 26)) labels = string.ascii_lowercase fig, ax = plt.subplots() ax.scatter(x, y, s=200) ax.set_title('Mouse over a point') # Show only the point label and allow nicer formatting if points overlap formatter = lambda **kwargs: ', '.join(kwargs['point_label']) datacursor(hover=True, formatter=formatter, point_labels=labels) plt.show()
[ "numpy.random.random", "numpy.random.seed", "matplotlib.pyplot.subplots", "mpldatacursor.datacursor", "matplotlib.pyplot.show" ]
[((281, 301), 'numpy.random.seed', 'np.random.seed', (['(1977)'], {}), '(1977)\n', (295, 301), True, 'import numpy as np\n'), ((310, 335), 'numpy.random.random', 'np.random.random', (['(2, 26)'], {}), '((2, 26))\n', (326, 335), True, 'import numpy as np\n'), ((379, 393), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (391, 393), True, 'import matplotlib.pyplot as plt\n'), ((589, 653), 'mpldatacursor.datacursor', 'datacursor', ([], {'hover': '(True)', 'formatter': 'formatter', 'point_labels': 'labels'}), '(hover=True, formatter=formatter, point_labels=labels)\n', (599, 653), False, 'from mpldatacursor import datacursor\n'), ((655, 665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (663, 665), True, 'import matplotlib.pyplot as plt\n')]
import torch import torch.nn as nn class FilterResponseNorm(nn.Module): def __init__(self, num_features, eps=1e-6, use_TLU=True): super(FilterResponseNorm, self).__init__() self.num_features = num_features self.eps = eps self.use_TLU = use_TLU self.weight = nn.Parameter(torch.Tensor(num_features)) self.bias = nn.Parameter(torch.Tensor(num_features)) if use_TLU: self.tau = nn.Parameter(torch.Tensor(num_features)) else: self.register_parameter('tau', None) self.reset_parameters() def reset_parameters(self): nn.init.uniform_(self.weight) nn.init.zeros_(self.bias) if self.use_TLU: nn.init.zeros_(self.tau) def forward(self, input): nu2 = torch.mean(input.pow(2), (2, 3), keepdim=True) out = input * torch.rsqrt(nu2 + abs(self.eps)) weight = self.weight[..., None, None] bias = self.bias[..., None, None] # Return after applying the Offset-ReLU non-linearity if self.use_TLU: tau = self.tau[..., None, None] return torch.max(weight*out + bias, tau) else: return self.gamma*out + self.bias
[ "torch.max", "torch.nn.init.uniform_", "torch.nn.init.zeros_", "torch.Tensor" ]
[((629, 658), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.weight'], {}), '(self.weight)\n', (645, 658), True, 'import torch.nn as nn\n'), ((667, 692), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.bias'], {}), '(self.bias)\n', (681, 692), True, 'import torch.nn as nn\n'), ((319, 345), 'torch.Tensor', 'torch.Tensor', (['num_features'], {}), '(num_features)\n', (331, 345), False, 'import torch\n'), ((380, 406), 'torch.Tensor', 'torch.Tensor', (['num_features'], {}), '(num_features)\n', (392, 406), False, 'import torch\n'), ((730, 754), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.tau'], {}), '(self.tau)\n', (744, 754), True, 'import torch.nn as nn\n'), ((1142, 1177), 'torch.max', 'torch.max', (['(weight * out + bias)', 'tau'], {}), '(weight * out + bias, tau)\n', (1151, 1177), False, 'import torch\n'), ((464, 490), 'torch.Tensor', 'torch.Tensor', (['num_features'], {}), '(num_features)\n', (476, 490), False, 'import torch\n')]
from time import time def profile(funcao): def funcao_wrapper(*args, **kwargs): inicio = time() resultado = funcao(*args, **kwargs) fim = time() print(fim - inicio) return resultado return funcao_wrapper @profile def f(n): return 'Executei f {}'.format(n) print(f.__name__) a = f print(a(4)) print(a(5)) def g(): def h(): return 'executando h' return h print(g()())
[ "time.time" ]
[((103, 109), 'time.time', 'time', ([], {}), '()\n', (107, 109), False, 'from time import time\n'), ((168, 174), 'time.time', 'time', ([], {}), '()\n', (172, 174), False, 'from time import time\n')]
import numpy as np # import matplotlib.pyplot as plt from scipy.cluster.vq import kmeans # def plothist(x): # vmin = x.min()-1 # vmax = x.max()+1 # bins = np.arange(vmin, vmax, (vmax - vmin)/50) # plt.hist(x, bins=bins) # plt.show() # def scatterpred(pred): # plt.scatter(pred[:,0], pred[:,1]) # plt.show() # def scatter_kmeans(pred): # plt.scatter(pred[:,0], pred[:,1], color='b') # c,v = kmeans(pred, 8) # plt.scatter(c[:,0], c[:,1], color='r') # plt.show() def most_assigned(x, c): nb_c = len(c) assign = np.zeros(nb_c) for i in range(len(x)): y = x[i].reshape((1,2)) d = np.sqrt(np.sum(np.power(y.repeat(nb_c, axis=0) - c, 2), axis=1)) assign[d.argmin()] += 1 return assign.argmax() def mean_on_most_assigned(x, c): nb_c = len(c) assign = np.zeros(nb_c) mean = np.zeros(c.shape) for i in range(len(x)): y = x[i].reshape((1,2)) d = np.sqrt(np.sum(np.power(y.repeat(nb_c, axis=0) - c, 2), axis=1)) idx = d.argmin() assign[idx] += 1 mean[idx,:] += x[i] idx = assign.argmax() return mean[idx,:] / assign[idx] # def best_kmeans(pred): # plt.scatter(pred[:,0], pred[:,1], color='b') # c,v = kmeans(pred, 3) # plt.scatter(c[:,0], c[:,1], color='g') # n = most_assigned(pred, c) # plt.scatter(c[n,0], c[n,1], color='r') # plt.show() def clustering_joints(y_pred, k=3): _,nb_spl,nb_joints,dim = y_pred.shape y = np.zeros((nb_spl, nb_joints, dim)) for s in range(nb_spl): for j in range(nb_joints): d = y_pred[:,s,j] c,v = kmeans(d, k) n = most_assigned(d, c) y[s,j,:] = c[n] return y def clustering_grid(y_pred, size=10): _, nb_spl, nb_joints, dim = y_pred.shape assert dim == 2 yp = np.zeros((nb_spl, nb_joints, dim)) for s in range(nb_spl): for j in range(nb_joints): d = y_pred[:,s,j,:] xmin = d[:,0].min() ymin = d[:,1].min() xmax = d[:,0].max() ymax = d[:,1].max() xstep = (xmax - xmin) / size ystep = (ymax - ymin) / size c = np.zeros((size * size, dim)) for x in range(size): for y in range(size): c[x + size*y, 0] = xmin + (x + 0.5) * xstep c[x + size*y, 1] = ymin + (y + 0.5) * ystep yp[s,j,:] = mean_on_most_assigned(d, c) return yp def mean_joints(y_pred): _, nb_spl, dim, nb_joints = y_pred.shape assert dim == 2 yp = np.zeros((nb_spl, dim, nb_joints)) for s in range(nb_spl): for j in range(nb_joints): d = y_pred[:,s,:,j] yp[s, 0, j] = d[:,0].mean() yp[s, 1, j] = d[:,1].mean() return yp
[ "numpy.zeros", "scipy.cluster.vq.kmeans" ]
[((564, 578), 'numpy.zeros', 'np.zeros', (['nb_c'], {}), '(nb_c)\n', (572, 578), True, 'import numpy as np\n'), ((840, 854), 'numpy.zeros', 'np.zeros', (['nb_c'], {}), '(nb_c)\n', (848, 854), True, 'import numpy as np\n'), ((866, 883), 'numpy.zeros', 'np.zeros', (['c.shape'], {}), '(c.shape)\n', (874, 883), True, 'import numpy as np\n'), ((1494, 1528), 'numpy.zeros', 'np.zeros', (['(nb_spl, nb_joints, dim)'], {}), '((nb_spl, nb_joints, dim))\n', (1502, 1528), True, 'import numpy as np\n'), ((1843, 1877), 'numpy.zeros', 'np.zeros', (['(nb_spl, nb_joints, dim)'], {}), '((nb_spl, nb_joints, dim))\n', (1851, 1877), True, 'import numpy as np\n'), ((2594, 2628), 'numpy.zeros', 'np.zeros', (['(nb_spl, dim, nb_joints)'], {}), '((nb_spl, dim, nb_joints))\n', (2602, 2628), True, 'import numpy as np\n'), ((1640, 1652), 'scipy.cluster.vq.kmeans', 'kmeans', (['d', 'k'], {}), '(d, k)\n', (1646, 1652), False, 'from scipy.cluster.vq import kmeans\n'), ((2199, 2227), 'numpy.zeros', 'np.zeros', (['(size * size, dim)'], {}), '((size * size, dim))\n', (2207, 2227), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- """ Created on Tue Apr 20 13:32:20 2021 #--- ag csv results to single file based on dashboard_dbs #--- <NAME> (<EMAIL>) #--- Jul, 2021. #--- Dev-log in: https://github.com/Murilodsv/py-jules @author: muril """ # DEBUG import os; os.chdir('C:/Murilo/py-jules') #------------------------------# #--- generate qsub-clusters ---# #------------------------------# dash_nm = 'dashboard_db.csv' # Filename of Dashboard CSV wd_out = 'ginore/csv_res' sim_id = 'future-crop' dap_f = range(1,361) var_nm = 'fsmc' var_ag = 'pft' f_sufix = '.day.time_pft_y_x.csv' #--- Get scripts arguments if __name__ == "__main__": import sys if len(sys.argv) > 1: #--- use arguments dash_nm = str(sys.argv[1]) # debug dash_nm = 'dashboard_db_future.csv' wd_out = str(sys.argv[2]) sim_id = str(sys.argv[3]) dap_f = str(sys.argv[4]) var_nm = str(sys.argv[5]) var_ag = str(sys.argv[6]) f_sufix = str(sys.argv[7]) import numpy as np dap_f = '1,360' dap_f = np.array(dap_f.split(',')) # arg_run_id = np.array(str('SC3572').split(',')) if len(dap_f) == 1: dap_f = range(0,int(dap_f)) else: dap_f = range(int(dap_f[0]), int(dap_f[1])+1) #----------------------# #--- Load libraries ---# #----------------------# import os import os.path import util as u from time import time #--- Track progress run_start = time() #----------------------# #--- Read dashboard ---# #----------------------# #--- get run wd wd = os.getcwd().replace('\\','/') #--- Open CSVs dash = u.df_csv(wd+'/'+dash_nm) #--- list of clusters l_ids = dash['run_id'].unique() ini_df = True for i in l_ids: #--- filename fn = wd+'/jules_run/'+i+'/namelists/output/'+i+f_sufix if os.path.exists(fn): print('Reading '+i) #--- read file df_i = u.df_csv(fn) #--- filter dap df_i = df_i[:][df_i['dap'].isin(range(1,360))] #--- aggregate the var_nm by mean and var_ag df_i = df_i.groupby(var_ag, as_index=False)[var_nm].mean() #--- flag with run_id df_i['run_id'] = i #--- bind to single df if ini_df: df_res = df_i ini_df = False else: df_res = df_res.append(df_i) #--- write results df_res.to_csv(wd+'/'+wd_out+'/'+sim_id+f_sufix, index = None, header=True) #--- track time print("\nElapsed time of copying: --- %.3f seconds ---" % (time() - run_start))
[ "os.path.exists", "time.time", "util.df_csv", "os.getcwd" ]
[((1562, 1568), 'time.time', 'time', ([], {}), '()\n', (1566, 1568), False, 'from time import time\n'), ((1722, 1750), 'util.df_csv', 'u.df_csv', (["(wd + '/' + dash_nm)"], {}), "(wd + '/' + dash_nm)\n", (1730, 1750), True, 'import util as u\n'), ((1919, 1937), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (1933, 1937), False, 'import os\n'), ((1669, 1680), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1678, 1680), False, 'import os\n'), ((2007, 2019), 'util.df_csv', 'u.df_csv', (['fn'], {}), '(fn)\n', (2015, 2019), True, 'import util as u\n'), ((2625, 2631), 'time.time', 'time', ([], {}), '()\n', (2629, 2631), False, 'from time import time\n')]
import os from django.conf.urls.defaults import * # Uncomment the next two lines to enable the admin: # from django.contrib import admin # admin.autodiscover() urlpatterns = patterns('', # Example: # (r'^django_amf_example/', include('django_amf_example.foo.urls')), # Uncomment the admin/doc line below and add 'django.contrib.admindocs' # to INSTALLED_APPS to enable admin documentation: # (r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: # (r'^admin/', include(admin.site.urls)), (r'^$', 'django.views.generic.simple.redirect_to', {'url': '/static/hello_world.html'}), (r'^amf', 'django_amf_example.hello_world.django_channels.rpc_channel'), (r'^static/(?P<path>.*)$', 'django.views.static.serve', { 'document_root': os.path.join('flex', 'deploy'), 'show_indexes': True}), )
[ "os.path.join" ]
[((832, 862), 'os.path.join', 'os.path.join', (['"""flex"""', '"""deploy"""'], {}), "('flex', 'deploy')\n", (844, 862), False, 'import os\n')]
# coding: utf-8 """ MolecularMatch MMPower MMPower API # noqa: E501 OpenAPI spec version: 1.0.0 Contact: <EMAIL> Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class PrivateTrial(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'id': 'str', 'institution_id': 'str', 'institution_study_id': 'str', 'registry_id': 'str', 'visible_to_idn': 'bool', 'brief_title': 'str', 'acronym': 'list[str]', 'official_title': 'str', 'sponsors': 'list[ClinicalTrialSponsors]', 'source': 'str', 'oversight': 'Oversight', 'brief_summary': 'str', 'detailed_description': 'str', 'status': 'str', 'start_date': 'datetime', 'completion_date': 'datetime', 'phase': 'str', 'study_type': 'str', 'has_expanded_access': 'bool', 'expanded_access': 'ExpandedAccess', 'study_design': 'StudyDesign', 'primary_outcome': 'list[Outcome]', 'secondary_outcome': 'list[Outcome]', 'other_outcome': 'list[Outcome]', 'number_of_arms': 'int', 'number_of_groups': 'int', 'enrollment': 'int', 'condition': 'list[str]', 'arm_group': 'list[ArmGroup]', 'intervention': 'list[Intervention]', 'biospec_retention': 'str', 'biospec_descr': 'str', 'eligibility': 'Eligibility', 'overall_official': 'list[Investigator]', 'overall_contact': 'Contact', 'overall_contact_backup': 'Contact', 'location': 'list[Location]', 'location_countries': 'list[str]', 'link': 'str', 'reference': 'list[Reference]', 'verification_date': 'datetime', 'study_first_submitted': 'datetime', 'study_first_posted': 'datetime', 'last_update_posted': 'datetime', 'keyword': 'list[str]', 'responsible_party': 'list[ResponsibleParty]', 'processing_status': 'str', 'test': 'bool' } attribute_map = { 'id': 'id', 'institution_id': 'institution_id', 'institution_study_id': 'institution_study_id', 'registry_id': 'registry_id', 'visible_to_idn': 'visible_to_IDN', 'brief_title': 'brief_title', 'acronym': 'acronym', 'official_title': 'official_title', 'sponsors': 'sponsors', 'source': 'source', 'oversight': 'oversight', 'brief_summary': 'brief_summary', 'detailed_description': 'detailed_description', 'status': 'status', 'start_date': 'start_date', 'completion_date': 'completion_date', 'phase': 'phase', 'study_type': 'study_type', 'has_expanded_access': 'has_expanded_access', 'expanded_access': 'expanded_access', 'study_design': 'study_design', 'primary_outcome': 'primary_outcome', 'secondary_outcome': 'secondary_outcome', 'other_outcome': 'other_outcome', 'number_of_arms': 'number_of_arms', 'number_of_groups': 'number_of_groups', 'enrollment': 'enrollment', 'condition': 'condition', 'arm_group': 'arm_group', 'intervention': 'intervention', 'biospec_retention': 'biospec_retention', 'biospec_descr': 'biospec_descr', 'eligibility': 'eligibility', 'overall_official': 'overall_official', 'overall_contact': 'overall_contact', 'overall_contact_backup': 'overall_contact_backup', 'location': 'location', 'location_countries': 'location_countries', 'link': 'link', 'reference': 'reference', 'verification_date': 'verification_date', 'study_first_submitted': 'study_first_submitted', 'study_first_posted': 'study_first_posted', 'last_update_posted': 'last_update_posted', 'keyword': 'keyword', 'responsible_party': 'responsible_party', 'processing_status': 'processing_status', 'test': 'test' } def __init__(self, id=None, institution_id=None, institution_study_id=None, registry_id=None, visible_to_idn=True, brief_title=None, acronym=None, official_title=None, sponsors=None, source=None, oversight=None, brief_summary=None, detailed_description=None, status=None, start_date=None, completion_date=None, phase='N/A', study_type=None, has_expanded_access=None, expanded_access=None, study_design=None, primary_outcome=None, secondary_outcome=None, other_outcome=None, number_of_arms=1, number_of_groups=1, enrollment=None, condition=None, arm_group=None, intervention=None, biospec_retention='None Retained', biospec_descr=None, eligibility=None, overall_official=None, overall_contact=None, overall_contact_backup=None, location=None, location_countries=None, link=None, reference=None, verification_date=None, study_first_submitted=None, study_first_posted=None, last_update_posted=None, keyword=None, responsible_party=None, processing_status='received', test=None): # noqa: E501 """PrivateTrial - a model defined in Swagger""" # noqa: E501 self._id = None self._institution_id = None self._institution_study_id = None self._registry_id = None self._visible_to_idn = None self._brief_title = None self._acronym = None self._official_title = None self._sponsors = None self._source = None self._oversight = None self._brief_summary = None self._detailed_description = None self._status = None self._start_date = None self._completion_date = None self._phase = None self._study_type = None self._has_expanded_access = None self._expanded_access = None self._study_design = None self._primary_outcome = None self._secondary_outcome = None self._other_outcome = None self._number_of_arms = None self._number_of_groups = None self._enrollment = None self._condition = None self._arm_group = None self._intervention = None self._biospec_retention = None self._biospec_descr = None self._eligibility = None self._overall_official = None self._overall_contact = None self._overall_contact_backup = None self._location = None self._location_countries = None self._link = None self._reference = None self._verification_date = None self._study_first_submitted = None self._study_first_posted = None self._last_update_posted = None self._keyword = None self._responsible_party = None self._processing_status = None self._test = None self.discriminator = None if id is not None: self.id = id self.institution_id = institution_id self.institution_study_id = institution_study_id if registry_id is not None: self.registry_id = registry_id if visible_to_idn is not None: self.visible_to_idn = visible_to_idn if brief_title is not None: self.brief_title = brief_title if acronym is not None: self.acronym = acronym self.official_title = official_title if sponsors is not None: self.sponsors = sponsors if source is not None: self.source = source if oversight is not None: self.oversight = oversight if brief_summary is not None: self.brief_summary = brief_summary if detailed_description is not None: self.detailed_description = detailed_description self.status = status self.start_date = start_date if completion_date is not None: self.completion_date = completion_date if phase is not None: self.phase = phase self.study_type = study_type if has_expanded_access is not None: self.has_expanded_access = has_expanded_access if expanded_access is not None: self.expanded_access = expanded_access if study_design is not None: self.study_design = study_design if primary_outcome is not None: self.primary_outcome = primary_outcome if secondary_outcome is not None: self.secondary_outcome = secondary_outcome if other_outcome is not None: self.other_outcome = other_outcome if number_of_arms is not None: self.number_of_arms = number_of_arms if number_of_groups is not None: self.number_of_groups = number_of_groups if enrollment is not None: self.enrollment = enrollment if condition is not None: self.condition = condition if arm_group is not None: self.arm_group = arm_group if intervention is not None: self.intervention = intervention if biospec_retention is not None: self.biospec_retention = biospec_retention if biospec_descr is not None: self.biospec_descr = biospec_descr if eligibility is not None: self.eligibility = eligibility if overall_official is not None: self.overall_official = overall_official if overall_contact is not None: self.overall_contact = overall_contact if overall_contact_backup is not None: self.overall_contact_backup = overall_contact_backup self.location = location if location_countries is not None: self.location_countries = location_countries if link is not None: self.link = link if reference is not None: self.reference = reference if verification_date is not None: self.verification_date = verification_date if study_first_submitted is not None: self.study_first_submitted = study_first_submitted if study_first_posted is not None: self.study_first_posted = study_first_posted if last_update_posted is not None: self.last_update_posted = last_update_posted if keyword is not None: self.keyword = keyword if responsible_party is not None: self.responsible_party = responsible_party if processing_status is not None: self.processing_status = processing_status if test is not None: self.test = test @property def id(self): """Gets the id of this PrivateTrial. # noqa: E501 unique study identifier. # noqa: E501 :return: The id of this PrivateTrial. # noqa: E501 :rtype: str """ return self._id @id.setter def id(self, id): """Sets the id of this PrivateTrial. unique study identifier. # noqa: E501 :param id: The id of this PrivateTrial. # noqa: E501 :type: str """ self._id = id @property def institution_id(self): """Gets the institution_id of this PrivateTrial. # noqa: E501 Unique institution identifier. # noqa: E501 :return: The institution_id of this PrivateTrial. # noqa: E501 :rtype: str """ return self._institution_id @institution_id.setter def institution_id(self, institution_id): """Sets the institution_id of this PrivateTrial. Unique institution identifier. # noqa: E501 :param institution_id: The institution_id of this PrivateTrial. # noqa: E501 :type: str """ if institution_id is None: raise ValueError("Invalid value for `institution_id`, must not be `None`") # noqa: E501 self._institution_id = institution_id @property def institution_study_id(self): """Gets the institution_study_id of this PrivateTrial. # noqa: E501 Unique study identifier (for the institution). # noqa: E501 :return: The institution_study_id of this PrivateTrial. # noqa: E501 :rtype: str """ return self._institution_study_id @institution_study_id.setter def institution_study_id(self, institution_study_id): """Sets the institution_study_id of this PrivateTrial. Unique study identifier (for the institution). # noqa: E501 :param institution_study_id: The institution_study_id of this PrivateTrial. # noqa: E501 :type: str """ if institution_study_id is None: raise ValueError("Invalid value for `institution_study_id`, must not be `None`") # noqa: E501 self._institution_study_id = institution_study_id @property def registry_id(self): """Gets the registry_id of this PrivateTrial. # noqa: E501 The public registry study id. This is only populated once the trial is no longer a private trial. # noqa: E501 :return: The registry_id of this PrivateTrial. # noqa: E501 :rtype: str """ return self._registry_id @registry_id.setter def registry_id(self, registry_id): """Sets the registry_id of this PrivateTrial. The public registry study id. This is only populated once the trial is no longer a private trial. # noqa: E501 :param registry_id: The registry_id of this PrivateTrial. # noqa: E501 :type: str """ self._registry_id = registry_id @property def visible_to_idn(self): """Gets the visible_to_idn of this PrivateTrial. # noqa: E501 If true, then this trial will be visible to the entire IDN, else it is visible only to the owning institution. # noqa: E501 :return: The visible_to_idn of this PrivateTrial. # noqa: E501 :rtype: bool """ return self._visible_to_idn @visible_to_idn.setter def visible_to_idn(self, visible_to_idn): """Sets the visible_to_idn of this PrivateTrial. If true, then this trial will be visible to the entire IDN, else it is visible only to the owning institution. # noqa: E501 :param visible_to_idn: The visible_to_idn of this PrivateTrial. # noqa: E501 :type: bool """ self._visible_to_idn = visible_to_idn @property def brief_title(self): """Gets the brief_title of this PrivateTrial. # noqa: E501 A short title of the clinical study written in language intended for the lay public. The title should include, where possible, information on the participants, condition being evaluated, and intervention(s) studied. # noqa: E501 :return: The brief_title of this PrivateTrial. # noqa: E501 :rtype: str """ return self._brief_title @brief_title.setter def brief_title(self, brief_title): """Sets the brief_title of this PrivateTrial. A short title of the clinical study written in language intended for the lay public. The title should include, where possible, information on the participants, condition being evaluated, and intervention(s) studied. # noqa: E501 :param brief_title: The brief_title of this PrivateTrial. # noqa: E501 :type: str """ self._brief_title = brief_title @property def acronym(self): """Gets the acronym of this PrivateTrial. # noqa: E501 Acronyms or abbreviations used publicly to identify the clinical study. # noqa: E501 :return: The acronym of this PrivateTrial. # noqa: E501 :rtype: list[str] """ return self._acronym @acronym.setter def acronym(self, acronym): """Sets the acronym of this PrivateTrial. Acronyms or abbreviations used publicly to identify the clinical study. # noqa: E501 :param acronym: The acronym of this PrivateTrial. # noqa: E501 :type: list[str] """ self._acronym = acronym @property def official_title(self): """Gets the official_title of this PrivateTrial. # noqa: E501 Official title for the clinical trial. # noqa: E501 :return: The official_title of this PrivateTrial. # noqa: E501 :rtype: str """ return self._official_title @official_title.setter def official_title(self, official_title): """Sets the official_title of this PrivateTrial. Official title for the clinical trial. # noqa: E501 :param official_title: The official_title of this PrivateTrial. # noqa: E501 :type: str """ if official_title is None: raise ValueError("Invalid value for `official_title`, must not be `None`") # noqa: E501 self._official_title = official_title @property def sponsors(self): """Gets the sponsors of this PrivateTrial. # noqa: E501 The list of organizations or persons who initiated the study and who have authority and control over the study. # noqa: E501 :return: The sponsors of this PrivateTrial. # noqa: E501 :rtype: list[ClinicalTrialSponsors] """ return self._sponsors @sponsors.setter def sponsors(self, sponsors): """Sets the sponsors of this PrivateTrial. The list of organizations or persons who initiated the study and who have authority and control over the study. # noqa: E501 :param sponsors: The sponsors of this PrivateTrial. # noqa: E501 :type: list[ClinicalTrialSponsors] """ self._sponsors = sponsors @property def source(self): """Gets the source of this PrivateTrial. # noqa: E501 Native data source of this record # noqa: E501 :return: The source of this PrivateTrial. # noqa: E501 :rtype: str """ return self._source @source.setter def source(self, source): """Sets the source of this PrivateTrial. Native data source of this record # noqa: E501 :param source: The source of this PrivateTrial. # noqa: E501 :type: str """ self._source = source @property def oversight(self): """Gets the oversight of this PrivateTrial. # noqa: E501 :return: The oversight of this PrivateTrial. # noqa: E501 :rtype: Oversight """ return self._oversight @oversight.setter def oversight(self, oversight): """Sets the oversight of this PrivateTrial. :param oversight: The oversight of this PrivateTrial. # noqa: E501 :type: Oversight """ self._oversight = oversight @property def brief_summary(self): """Gets the brief_summary of this PrivateTrial. # noqa: E501 A short description of the clinical study, including a brief statement of the clinical study's hypothesis, written in language intended for the lay public. # noqa: E501 :return: The brief_summary of this PrivateTrial. # noqa: E501 :rtype: str """ return self._brief_summary @brief_summary.setter def brief_summary(self, brief_summary): """Sets the brief_summary of this PrivateTrial. A short description of the clinical study, including a brief statement of the clinical study's hypothesis, written in language intended for the lay public. # noqa: E501 :param brief_summary: The brief_summary of this PrivateTrial. # noqa: E501 :type: str """ self._brief_summary = brief_summary @property def detailed_description(self): """Gets the detailed_description of this PrivateTrial. # noqa: E501 Extended description of the protocol, including more technical information (as compared to the Brief Summary), if desired. Do not include the entire protocol; do not duplicate information recorded in other data elements, such as Eligibility Criteria or outcome measures. # noqa: E501 :return: The detailed_description of this PrivateTrial. # noqa: E501 :rtype: str """ return self._detailed_description @detailed_description.setter def detailed_description(self, detailed_description): """Sets the detailed_description of this PrivateTrial. Extended description of the protocol, including more technical information (as compared to the Brief Summary), if desired. Do not include the entire protocol; do not duplicate information recorded in other data elements, such as Eligibility Criteria or outcome measures. # noqa: E501 :param detailed_description: The detailed_description of this PrivateTrial. # noqa: E501 :type: str """ self._detailed_description = detailed_description @property def status(self): """Gets the status of this PrivateTrial. # noqa: E501 Trial recruiting status. # noqa: E501 :return: The status of this PrivateTrial. # noqa: E501 :rtype: str """ return self._status @status.setter def status(self, status): """Sets the status of this PrivateTrial. Trial recruiting status. # noqa: E501 :param status: The status of this PrivateTrial. # noqa: E501 :type: str """ if status is None: raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 allowed_values = ["Active, not recruiting", "Approved for marketing", "Available", "Completed", "Enrolling by invitation", "No longer available", "Not yet recruiting", "Recruiting", "Suspended", "Temporarily not available", "Terminated", "Withdrawn", "Withheld", "Unknown status"] # noqa: E501 if status not in allowed_values: raise ValueError( "Invalid value for `status` ({0}), must be one of {1}" # noqa: E501 .format(status, allowed_values) ) self._status = status @property def start_date(self): """Gets the start_date of this PrivateTrial. # noqa: E501 The estimated date on which the clinical study will be open for recruitment of participants, or the actual date on which the first participant was enrolled. # noqa: E501 :return: The start_date of this PrivateTrial. # noqa: E501 :rtype: datetime """ return self._start_date @start_date.setter def start_date(self, start_date): """Sets the start_date of this PrivateTrial. The estimated date on which the clinical study will be open for recruitment of participants, or the actual date on which the first participant was enrolled. # noqa: E501 :param start_date: The start_date of this PrivateTrial. # noqa: E501 :type: datetime """ if start_date is None: raise ValueError("Invalid value for `start_date`, must not be `None`") # noqa: E501 self._start_date = start_date @property def completion_date(self): """Gets the completion_date of this PrivateTrial. # noqa: E501 The date the final participant was examined or received an intervention for purposes of final collection of data for the primary and secondary outcome measures and adverse events (for example, last participant’s last visit), whether the clinical study concluded according to the pre-specified protocol or was terminated # noqa: E501 :return: The completion_date of this PrivateTrial. # noqa: E501 :rtype: datetime """ return self._completion_date @completion_date.setter def completion_date(self, completion_date): """Sets the completion_date of this PrivateTrial. The date the final participant was examined or received an intervention for purposes of final collection of data for the primary and secondary outcome measures and adverse events (for example, last participant’s last visit), whether the clinical study concluded according to the pre-specified protocol or was terminated # noqa: E501 :param completion_date: The completion_date of this PrivateTrial. # noqa: E501 :type: datetime """ self._completion_date = completion_date @property def phase(self): """Gets the phase of this PrivateTrial. # noqa: E501 For a clinical trial of a drug product (including a biological product), the numerical phase of such clinical trial, consistent with terminology in 21 CFR 312.21 and in 21 CFR 312.85 for phase 4 studies. # noqa: E501 :return: The phase of this PrivateTrial. # noqa: E501 :rtype: str """ return self._phase @phase.setter def phase(self, phase): """Sets the phase of this PrivateTrial. For a clinical trial of a drug product (including a biological product), the numerical phase of such clinical trial, consistent with terminology in 21 CFR 312.21 and in 21 CFR 312.85 for phase 4 studies. # noqa: E501 :param phase: The phase of this PrivateTrial. # noqa: E501 :type: str """ allowed_values = ["N/A", "Early Phase 1", "Phase 1", "Phase 1/Phase 2", "Phase 2", "Phase 2/Phase 3", "Phase 3", "Phase 4"] # noqa: E501 if phase not in allowed_values: raise ValueError( "Invalid value for `phase` ({0}), must be one of {1}" # noqa: E501 .format(phase, allowed_values) ) self._phase = phase @property def study_type(self): """Gets the study_type of this PrivateTrial. # noqa: E501 The nature of the investigation or investigational use for which clinical study information is being submitted. # noqa: E501 :return: The study_type of this PrivateTrial. # noqa: E501 :rtype: str """ return self._study_type @study_type.setter def study_type(self, study_type): """Sets the study_type of this PrivateTrial. The nature of the investigation or investigational use for which clinical study information is being submitted. # noqa: E501 :param study_type: The study_type of this PrivateTrial. # noqa: E501 :type: str """ if study_type is None: raise ValueError("Invalid value for `study_type`, must not be `None`") # noqa: E501 allowed_values = ["Expanded Access", "Interventional", "N/A", "Observational", "Observational [Patient Registry]"] # noqa: E501 if study_type not in allowed_values: raise ValueError( "Invalid value for `study_type` ({0}), must be one of {1}" # noqa: E501 .format(study_type, allowed_values) ) self._study_type = study_type @property def has_expanded_access(self): """Gets the has_expanded_access of this PrivateTrial. # noqa: E501 Whether there is expanded access to the investigational product for patients who do not qualify for enrollment in a clinical trial. Expanded Access for investigational drug products (including biological products) includes all expanded access types under section 561 of the Federal Food, Drug, and Cosmetic Act: (1) for individual participants, including emergency use; (2) for intermediate-size participant populations; and (3) under a treatment IND or treatment protocol. # noqa: E501 :return: The has_expanded_access of this PrivateTrial. # noqa: E501 :rtype: bool """ return self._has_expanded_access @has_expanded_access.setter def has_expanded_access(self, has_expanded_access): """Sets the has_expanded_access of this PrivateTrial. Whether there is expanded access to the investigational product for patients who do not qualify for enrollment in a clinical trial. Expanded Access for investigational drug products (including biological products) includes all expanded access types under section 561 of the Federal Food, Drug, and Cosmetic Act: (1) for individual participants, including emergency use; (2) for intermediate-size participant populations; and (3) under a treatment IND or treatment protocol. # noqa: E501 :param has_expanded_access: The has_expanded_access of this PrivateTrial. # noqa: E501 :type: bool """ self._has_expanded_access = has_expanded_access @property def expanded_access(self): """Gets the expanded_access of this PrivateTrial. # noqa: E501 :return: The expanded_access of this PrivateTrial. # noqa: E501 :rtype: ExpandedAccess """ return self._expanded_access @expanded_access.setter def expanded_access(self, expanded_access): """Sets the expanded_access of this PrivateTrial. :param expanded_access: The expanded_access of this PrivateTrial. # noqa: E501 :type: ExpandedAccess """ self._expanded_access = expanded_access @property def study_design(self): """Gets the study_design of this PrivateTrial. # noqa: E501 :return: The study_design of this PrivateTrial. # noqa: E501 :rtype: StudyDesign """ return self._study_design @study_design.setter def study_design(self, study_design): """Sets the study_design of this PrivateTrial. :param study_design: The study_design of this PrivateTrial. # noqa: E501 :type: StudyDesign """ self._study_design = study_design @property def primary_outcome(self): """Gets the primary_outcome of this PrivateTrial. # noqa: E501 The outcome that an investigator considers to be the most important among the many outcomes that are to be examined in the study. # noqa: E501 :return: The primary_outcome of this PrivateTrial. # noqa: E501 :rtype: list[Outcome] """ return self._primary_outcome @primary_outcome.setter def primary_outcome(self, primary_outcome): """Sets the primary_outcome of this PrivateTrial. The outcome that an investigator considers to be the most important among the many outcomes that are to be examined in the study. # noqa: E501 :param primary_outcome: The primary_outcome of this PrivateTrial. # noqa: E501 :type: list[Outcome] """ self._primary_outcome = primary_outcome @property def secondary_outcome(self): """Gets the secondary_outcome of this PrivateTrial. # noqa: E501 :return: The secondary_outcome of this PrivateTrial. # noqa: E501 :rtype: list[Outcome] """ return self._secondary_outcome @secondary_outcome.setter def secondary_outcome(self, secondary_outcome): """Sets the secondary_outcome of this PrivateTrial. :param secondary_outcome: The secondary_outcome of this PrivateTrial. # noqa: E501 :type: list[Outcome] """ self._secondary_outcome = secondary_outcome @property def other_outcome(self): """Gets the other_outcome of this PrivateTrial. # noqa: E501 :return: The other_outcome of this PrivateTrial. # noqa: E501 :rtype: list[Outcome] """ return self._other_outcome @other_outcome.setter def other_outcome(self, other_outcome): """Sets the other_outcome of this PrivateTrial. :param other_outcome: The other_outcome of this PrivateTrial. # noqa: E501 :type: list[Outcome] """ self._other_outcome = other_outcome @property def number_of_arms(self): """Gets the number_of_arms of this PrivateTrial. # noqa: E501 The number of trial arms. # noqa: E501 :return: The number_of_arms of this PrivateTrial. # noqa: E501 :rtype: int """ return self._number_of_arms @number_of_arms.setter def number_of_arms(self, number_of_arms): """Sets the number_of_arms of this PrivateTrial. The number of trial arms. # noqa: E501 :param number_of_arms: The number_of_arms of this PrivateTrial. # noqa: E501 :type: int """ self._number_of_arms = number_of_arms @property def number_of_groups(self): """Gets the number_of_groups of this PrivateTrial. # noqa: E501 The number of trial groups. # noqa: E501 :return: The number_of_groups of this PrivateTrial. # noqa: E501 :rtype: int """ return self._number_of_groups @number_of_groups.setter def number_of_groups(self, number_of_groups): """Sets the number_of_groups of this PrivateTrial. The number of trial groups. # noqa: E501 :param number_of_groups: The number_of_groups of this PrivateTrial. # noqa: E501 :type: int """ self._number_of_groups = number_of_groups @property def enrollment(self): """Gets the enrollment of this PrivateTrial. # noqa: E501 The estimated total number of participants to be enrolled (target number) or the actual total number of participants that are enrolled in the clinical study. # noqa: E501 :return: The enrollment of this PrivateTrial. # noqa: E501 :rtype: int """ return self._enrollment @enrollment.setter def enrollment(self, enrollment): """Sets the enrollment of this PrivateTrial. The estimated total number of participants to be enrolled (target number) or the actual total number of participants that are enrolled in the clinical study. # noqa: E501 :param enrollment: The enrollment of this PrivateTrial. # noqa: E501 :type: int """ self._enrollment = enrollment @property def condition(self): """Gets the condition of this PrivateTrial. # noqa: E501 Diseases/Conditions related to this trial. # noqa: E501 :return: The condition of this PrivateTrial. # noqa: E501 :rtype: list[str] """ return self._condition @condition.setter def condition(self, condition): """Sets the condition of this PrivateTrial. Diseases/Conditions related to this trial. # noqa: E501 :param condition: The condition of this PrivateTrial. # noqa: E501 :type: list[str] """ self._condition = condition @property def arm_group(self): """Gets the arm_group of this PrivateTrial. # noqa: E501 Pre-specified groups of participants in a clinical trial assigned to receive specific interventions (or no intervention) according to a protocol. # noqa: E501 :return: The arm_group of this PrivateTrial. # noqa: E501 :rtype: list[ArmGroup] """ return self._arm_group @arm_group.setter def arm_group(self, arm_group): """Sets the arm_group of this PrivateTrial. Pre-specified groups of participants in a clinical trial assigned to receive specific interventions (or no intervention) according to a protocol. # noqa: E501 :param arm_group: The arm_group of this PrivateTrial. # noqa: E501 :type: list[ArmGroup] """ self._arm_group = arm_group @property def intervention(self): """Gets the intervention of this PrivateTrial. # noqa: E501 Specifies the intervention(s) associated with each arm or group. # noqa: E501 :return: The intervention of this PrivateTrial. # noqa: E501 :rtype: list[Intervention] """ return self._intervention @intervention.setter def intervention(self, intervention): """Sets the intervention of this PrivateTrial. Specifies the intervention(s) associated with each arm or group. # noqa: E501 :param intervention: The intervention of this PrivateTrial. # noqa: E501 :type: list[Intervention] """ self._intervention = intervention @property def biospec_retention(self): """Gets the biospec_retention of this PrivateTrial. # noqa: E501 :return: The biospec_retention of this PrivateTrial. # noqa: E501 :rtype: str """ return self._biospec_retention @biospec_retention.setter def biospec_retention(self, biospec_retention): """Sets the biospec_retention of this PrivateTrial. :param biospec_retention: The biospec_retention of this PrivateTrial. # noqa: E501 :type: str """ allowed_values = ["None Retained", "Samples With DNA", "Samples Without DNA"] # noqa: E501 if biospec_retention not in allowed_values: raise ValueError( "Invalid value for `biospec_retention` ({0}), must be one of {1}" # noqa: E501 .format(biospec_retention, allowed_values) ) self._biospec_retention = biospec_retention @property def biospec_descr(self): """Gets the biospec_descr of this PrivateTrial. # noqa: E501 :return: The biospec_descr of this PrivateTrial. # noqa: E501 :rtype: str """ return self._biospec_descr @biospec_descr.setter def biospec_descr(self, biospec_descr): """Sets the biospec_descr of this PrivateTrial. :param biospec_descr: The biospec_descr of this PrivateTrial. # noqa: E501 :type: str """ self._biospec_descr = biospec_descr @property def eligibility(self): """Gets the eligibility of this PrivateTrial. # noqa: E501 :return: The eligibility of this PrivateTrial. # noqa: E501 :rtype: Eligibility """ return self._eligibility @eligibility.setter def eligibility(self, eligibility): """Sets the eligibility of this PrivateTrial. :param eligibility: The eligibility of this PrivateTrial. # noqa: E501 :type: Eligibility """ self._eligibility = eligibility @property def overall_official(self): """Gets the overall_official of this PrivateTrial. # noqa: E501 Person responsible for the overall scientific leadership of the protocol, including study principal investigator. # noqa: E501 :return: The overall_official of this PrivateTrial. # noqa: E501 :rtype: list[Investigator] """ return self._overall_official @overall_official.setter def overall_official(self, overall_official): """Sets the overall_official of this PrivateTrial. Person responsible for the overall scientific leadership of the protocol, including study principal investigator. # noqa: E501 :param overall_official: The overall_official of this PrivateTrial. # noqa: E501 :type: list[Investigator] """ self._overall_official = overall_official @property def overall_contact(self): """Gets the overall_contact of this PrivateTrial. # noqa: E501 :return: The overall_contact of this PrivateTrial. # noqa: E501 :rtype: Contact """ return self._overall_contact @overall_contact.setter def overall_contact(self, overall_contact): """Sets the overall_contact of this PrivateTrial. :param overall_contact: The overall_contact of this PrivateTrial. # noqa: E501 :type: Contact """ self._overall_contact = overall_contact @property def overall_contact_backup(self): """Gets the overall_contact_backup of this PrivateTrial. # noqa: E501 :return: The overall_contact_backup of this PrivateTrial. # noqa: E501 :rtype: Contact """ return self._overall_contact_backup @overall_contact_backup.setter def overall_contact_backup(self, overall_contact_backup): """Sets the overall_contact_backup of this PrivateTrial. :param overall_contact_backup: The overall_contact_backup of this PrivateTrial. # noqa: E501 :type: Contact """ self._overall_contact_backup = overall_contact_backup @property def location(self): """Gets the location of this PrivateTrial. # noqa: E501 Information about the locations offering this trial. # noqa: E501 :return: The location of this PrivateTrial. # noqa: E501 :rtype: list[Location] """ return self._location @location.setter def location(self, location): """Sets the location of this PrivateTrial. Information about the locations offering this trial. # noqa: E501 :param location: The location of this PrivateTrial. # noqa: E501 :type: list[Location] """ if location is None: raise ValueError("Invalid value for `location`, must not be `None`") # noqa: E501 self._location = location @property def location_countries(self): """Gets the location_countries of this PrivateTrial. # noqa: E501 Countries with locations offering this trial. # noqa: E501 :return: The location_countries of this PrivateTrial. # noqa: E501 :rtype: list[str] """ return self._location_countries @location_countries.setter def location_countries(self, location_countries): """Sets the location_countries of this PrivateTrial. Countries with locations offering this trial. # noqa: E501 :param location_countries: The location_countries of this PrivateTrial. # noqa: E501 :type: list[str] """ self._location_countries = location_countries @property def link(self): """Gets the link of this PrivateTrial. # noqa: E501 URL to institution (if private) or registry listing of this trial. # noqa: E501 :return: The link of this PrivateTrial. # noqa: E501 :rtype: str """ return self._link @link.setter def link(self, link): """Sets the link of this PrivateTrial. URL to institution (if private) or registry listing of this trial. # noqa: E501 :param link: The link of this PrivateTrial. # noqa: E501 :type: str """ self._link = link @property def reference(self): """Gets the reference of this PrivateTrial. # noqa: E501 Reference publications pertaining to this trial. # noqa: E501 :return: The reference of this PrivateTrial. # noqa: E501 :rtype: list[Reference] """ return self._reference @reference.setter def reference(self, reference): """Sets the reference of this PrivateTrial. Reference publications pertaining to this trial. # noqa: E501 :param reference: The reference of this PrivateTrial. # noqa: E501 :type: list[Reference] """ self._reference = reference @property def verification_date(self): """Gets the verification_date of this PrivateTrial. # noqa: E501 The date on which the responsible party last verified the clinical study information in the entire ClinicalTrials.gov record for the clinical study, even if no additional or updated information is being submitted. # noqa: E501 :return: The verification_date of this PrivateTrial. # noqa: E501 :rtype: datetime """ return self._verification_date @verification_date.setter def verification_date(self, verification_date): """Sets the verification_date of this PrivateTrial. The date on which the responsible party last verified the clinical study information in the entire ClinicalTrials.gov record for the clinical study, even if no additional or updated information is being submitted. # noqa: E501 :param verification_date: The verification_date of this PrivateTrial. # noqa: E501 :type: datetime """ self._verification_date = verification_date @property def study_first_submitted(self): """Gets the study_first_submitted of this PrivateTrial. # noqa: E501 The date on which the study sponsor or investigator first submitted a study record to the trial registry. # noqa: E501 :return: The study_first_submitted of this PrivateTrial. # noqa: E501 :rtype: datetime """ return self._study_first_submitted @study_first_submitted.setter def study_first_submitted(self, study_first_submitted): """Sets the study_first_submitted of this PrivateTrial. The date on which the study sponsor or investigator first submitted a study record to the trial registry. # noqa: E501 :param study_first_submitted: The study_first_submitted of this PrivateTrial. # noqa: E501 :type: datetime """ self._study_first_submitted = study_first_submitted @property def study_first_posted(self): """Gets the study_first_posted of this PrivateTrial. # noqa: E501 The date on which the study was first made public on trial registry. # noqa: E501 :return: The study_first_posted of this PrivateTrial. # noqa: E501 :rtype: datetime """ return self._study_first_posted @study_first_posted.setter def study_first_posted(self, study_first_posted): """Sets the study_first_posted of this PrivateTrial. The date on which the study was first made public on trial registry. # noqa: E501 :param study_first_posted: The study_first_posted of this PrivateTrial. # noqa: E501 :type: datetime """ self._study_first_posted = study_first_posted @property def last_update_posted(self): """Gets the last_update_posted of this PrivateTrial. # noqa: E501 The most recent date that any information was updated for this trial. # noqa: E501 :return: The last_update_posted of this PrivateTrial. # noqa: E501 :rtype: datetime """ return self._last_update_posted @last_update_posted.setter def last_update_posted(self, last_update_posted): """Sets the last_update_posted of this PrivateTrial. The most recent date that any information was updated for this trial. # noqa: E501 :param last_update_posted: The last_update_posted of this PrivateTrial. # noqa: E501 :type: datetime """ self._last_update_posted = last_update_posted @property def keyword(self): """Gets the keyword of this PrivateTrial. # noqa: E501 Words or phrases that best describe the protocol. Keywords help users find studies in the database. Use NLM's Medical Subject Heading (MeSH)-controlled vocabulary terms where appropriate. Be as specific and precise as possible. # noqa: E501 :return: The keyword of this PrivateTrial. # noqa: E501 :rtype: list[str] """ return self._keyword @keyword.setter def keyword(self, keyword): """Sets the keyword of this PrivateTrial. Words or phrases that best describe the protocol. Keywords help users find studies in the database. Use NLM's Medical Subject Heading (MeSH)-controlled vocabulary terms where appropriate. Be as specific and precise as possible. # noqa: E501 :param keyword: The keyword of this PrivateTrial. # noqa: E501 :type: list[str] """ self._keyword = keyword @property def responsible_party(self): """Gets the responsible_party of this PrivateTrial. # noqa: E501 The entities and individuals responsible for this trial. # noqa: E501 :return: The responsible_party of this PrivateTrial. # noqa: E501 :rtype: list[ResponsibleParty] """ return self._responsible_party @responsible_party.setter def responsible_party(self, responsible_party): """Sets the responsible_party of this PrivateTrial. The entities and individuals responsible for this trial. # noqa: E501 :param responsible_party: The responsible_party of this PrivateTrial. # noqa: E501 :type: list[ResponsibleParty] """ self._responsible_party = responsible_party @property def processing_status(self): """Gets the processing_status of this PrivateTrial. # noqa: E501 Indication of its level of readiness and incorporation into the MolecularMatch Knowledge base. # noqa: E501 :return: The processing_status of this PrivateTrial. # noqa: E501 :rtype: str """ return self._processing_status @processing_status.setter def processing_status(self, processing_status): """Sets the processing_status of this PrivateTrial. Indication of its level of readiness and incorporation into the MolecularMatch Knowledge base. # noqa: E501 :param processing_status: The processing_status of this PrivateTrial. # noqa: E501 :type: str """ allowed_values = ["received", "in-process", "registered"] # noqa: E501 if processing_status not in allowed_values: raise ValueError( "Invalid value for `processing_status` ({0}), must be one of {1}" # noqa: E501 .format(processing_status, allowed_values) ) self._processing_status = processing_status @property def test(self): """Gets the test of this PrivateTrial. # noqa: E501 A flag to mark test private trials. # noqa: E501 :return: The test of this PrivateTrial. # noqa: E501 :rtype: bool """ return self._test @test.setter def test(self, test): """Sets the test of this PrivateTrial. A flag to mark test private trials. # noqa: E501 :param test: The test of this PrivateTrial. # noqa: E501 :type: bool """ self._test = test def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(PrivateTrial, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PrivateTrial): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
[ "six.iteritems" ]
[((50583, 50616), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (50596, 50616), False, 'import six\n')]
import argparse import gym import gym_module_select from stable_baselines.common.vec_env import DummyVecEnv def init_parse_argument(): parser = argparse.ArgumentParser() parser.add_argument('-e', '--num-exp', help='num experiment episode', type=int, default=10) args = parser.parse_args() return args args = init_parse_argument() env = gym.make('ModuleSelect-v1', verbose=1, save_log_flag=True, log_num=7, ) env = DummyVecEnv([lambda: env]) num_done = 0 num_proc = 0 try: obs = env.reset() while num_done < args.num_exp: if 0 <= num_proc <= 1: action = [4] elif 2 <= num_proc <= 3 or num_proc == 6: action = [3] elif num_proc == 5: action = [2] elif 8 <= num_proc <= 11: action = [1] elif num_proc == 4 or num_proc == 7: action = [0] else: print("action error") obs, rewards, dones, info = env.step(action) num_proc = int(obs[0][0]) env.render() if dones[0]: num_done += 1 except KeyboardInterrupt: pass env.close()
[ "gym.make", "argparse.ArgumentParser", "stable_baselines.common.vec_env.DummyVecEnv" ]
[((357, 426), 'gym.make', 'gym.make', (['"""ModuleSelect-v1"""'], {'verbose': '(1)', 'save_log_flag': '(True)', 'log_num': '(7)'}), "('ModuleSelect-v1', verbose=1, save_log_flag=True, log_num=7)\n", (365, 426), False, 'import gym\n'), ((495, 522), 'stable_baselines.common.vec_env.DummyVecEnv', 'DummyVecEnv', (['[lambda : env]'], {}), '([lambda : env])\n', (506, 522), False, 'from stable_baselines.common.vec_env import DummyVecEnv\n'), ((150, 175), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (173, 175), False, 'import argparse\n')]
import unittest from flask_script import Manager, Shell, Server from app import app, db from app.fake_populate import populate manager = Manager(app) def make_shell_context(): return dict(app=app) @manager.command def recreate_db(): """ Create the SQL database. """ db.drop_all() db.create_all() db.session.commit() print("recreated the database") @manager.command def fake_populate(): """ Load dummy data into db """ recreate_db() populate() print("populated database with dummy data") @manager.command def test(): """ run unit tests :return: result, successful or not """ tests = unittest.TestLoader().discover('tests', pattern='test*.py') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0 return 1 @manager.command def read_once(): """ a one-time read from the license server. """ from app.read_licenses import read read() print('Read completed.') manager.add_command('runserver', Server(threaded=True)) manager.add_command('shell', Shell(make_context=make_shell_context)) if __name__ == '__main__': manager.run()
[ "app.db.session.commit", "flask_script.Server", "flask_script.Manager", "flask_script.Shell", "app.fake_populate.populate", "app.read_licenses.read", "app.db.create_all", "app.db.drop_all", "unittest.TextTestRunner", "unittest.TestLoader" ]
[((138, 150), 'flask_script.Manager', 'Manager', (['app'], {}), '(app)\n', (145, 150), False, 'from flask_script import Manager, Shell, Server\n'), ((289, 302), 'app.db.drop_all', 'db.drop_all', ([], {}), '()\n', (300, 302), False, 'from app import app, db\n'), ((307, 322), 'app.db.create_all', 'db.create_all', ([], {}), '()\n', (320, 322), False, 'from app import app, db\n'), ((327, 346), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (344, 346), False, 'from app import app, db\n'), ((488, 498), 'app.fake_populate.populate', 'populate', ([], {}), '()\n', (496, 498), False, 'from app.fake_populate import populate\n'), ((984, 990), 'app.read_licenses.read', 'read', ([], {}), '()\n', (988, 990), False, 'from app.read_licenses import read\n'), ((1055, 1076), 'flask_script.Server', 'Server', ([], {'threaded': '(True)'}), '(threaded=True)\n', (1061, 1076), False, 'from flask_script import Manager, Shell, Server\n'), ((1107, 1145), 'flask_script.Shell', 'Shell', ([], {'make_context': 'make_shell_context'}), '(make_context=make_shell_context)\n', (1112, 1145), False, 'from flask_script import Manager, Shell, Server\n'), ((663, 684), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (682, 684), False, 'import unittest\n'), ((736, 772), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (759, 772), False, 'import unittest\n')]
from makememe.generator.prompts.prompt import Prompt import datetime from PIL import Image from makememe.generator.design.image_manager import Image_Manager class Waiting(Prompt): name = "Waiting" description = "waiting" def __init__(self): self.instruction = """ ### Message:I've been waiting for SpaceX to launch the starship for ever Meme:{"subject": "SpaceX Startship"} ### Message:I can't wait for makememe.ai to launch, but it's taking a little while Meme:{"subject": "makememe.ai"} ### Message:Drakes new album is going to be fire. Why do I have to wait Meme:{"subject": "Drakes new album"} ### Message:I want to create an NFT, but opensea.com is taking a while to load Meme:{"subject": "opensea.com"} ### """ def create(self, meme_text): with Image.open(f"makememe/static/meme_pics/{self.name.lower()}.jpg").convert( "RGBA" ) as base: overlay_image = Image_Manager.add_text( base=base, text=meme_text["subject"], position=(600, 950), font_size=40, wrapped_width=20, ) watermark = Image_Manager.add_text( base=base, text="makememe.ai", position=(30, 1100), font_size=20 ) base = Image.alpha_composite(base, watermark) out = Image.alpha_composite(base, overlay_image) if out.mode in ("RGBA", "P"): out = out.convert("RGB") # User.objects.filter() date = datetime.datetime.now() image_name = f"{date}.jpg" file_location = f"makememe/static/creations/{image_name}" out.save(file_location) return image_name
[ "datetime.datetime.now", "makememe.generator.design.image_manager.Image_Manager.add_text", "PIL.Image.alpha_composite" ]
[((928, 1045), 'makememe.generator.design.image_manager.Image_Manager.add_text', 'Image_Manager.add_text', ([], {'base': 'base', 'text': "meme_text['subject']", 'position': '(600, 950)', 'font_size': '(40)', 'wrapped_width': '(20)'}), "(base=base, text=meme_text['subject'], position=(600,\n 950), font_size=40, wrapped_width=20)\n", (950, 1045), False, 'from makememe.generator.design.image_manager import Image_Manager\n'), ((1161, 1253), 'makememe.generator.design.image_manager.Image_Manager.add_text', 'Image_Manager.add_text', ([], {'base': 'base', 'text': '"""makememe.ai"""', 'position': '(30, 1100)', 'font_size': '(20)'}), "(base=base, text='makememe.ai', position=(30, 1100),\n font_size=20)\n", (1183, 1253), False, 'from makememe.generator.design.image_manager import Image_Manager\n'), ((1300, 1338), 'PIL.Image.alpha_composite', 'Image.alpha_composite', (['base', 'watermark'], {}), '(base, watermark)\n', (1321, 1338), False, 'from PIL import Image\n'), ((1357, 1399), 'PIL.Image.alpha_composite', 'Image.alpha_composite', (['base', 'overlay_image'], {}), '(base, overlay_image)\n', (1378, 1399), False, 'from PIL import Image\n'), ((1546, 1569), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1567, 1569), False, 'import datetime\n')]
from python_helper import log, Test, SettingHelper, RandomHelper, ObjectHelper, TestHelper, ReflectionHelper, Constant from python_framework import EncapsulateItWithGlobalException, GlobalException, ExceptionHandler, HttpStatus LOG_HELPER_SETTINGS = { log.LOG : False, log.INFO : True, log.SUCCESS : True, log.SETTING : True, log.DEBUG : True, log.WARNING : True, log.WRAPPER : True, log.FAILURE : True, log.ERROR : True, log.TEST : False } FULL_LOG_HELPER_SETTINGS = { log.LOG : True, log.INFO : True, log.SUCCESS : True, log.SETTING : True, log.DEBUG : True, log.WARNING : True, log.WRAPPER : True, log.FAILURE : True, log.ERROR : True, log.TEST : False } SUCCESS = '__SUCCESS__' FAILURE = '__FAILURE__' RAISED_EXCEPTION = Exception(FAILURE) @EncapsulateItWithGlobalException() def externalFuncionDoesNotThrowsException(): return SUCCESS @EncapsulateItWithGlobalException() def externalFuncionDoesThrowsException(): raise RAISED_EXCEPTION @Test(environmentVariables={ SettingHelper.ACTIVE_ENVIRONMENT : SettingHelper.LOCAL_ENVIRONMENT, **FULL_LOG_HELPER_SETTINGS } ) def encapsulateItWithGlobalException_noParameters_unknownException() : #arrange @EncapsulateItWithGlobalException() def internalFuncionDoesNotThrowsException(): return SUCCESS @EncapsulateItWithGlobalException() def internalFuncionDoesThrowsException(): raise RAISED_EXCEPTION # #act externalSuccess = externalFuncionDoesNotThrowsException() internalSuccess = internalFuncionDoesNotThrowsException() externalFailure = TestHelper.getRaisedException(externalFuncionDoesThrowsException) internalFailure = TestHelper.getRaisedException(internalFuncionDoesThrowsException) # print(externalFailure.logResource) # print(externalFailure.logResourceMethod) # print(internalFailure.logResource) # print(internalFailure.logResourceMethod) #assert assert SUCCESS == externalSuccess assert SUCCESS == internalSuccess assert not RAISED_EXCEPTION == externalFailure, f'not {RAISED_EXCEPTION} == {externalFailure}: {not RAISED_EXCEPTION == externalFailure}' assert not RAISED_EXCEPTION == internalFailure, f'not {RAISED_EXCEPTION} == {internalFailure}: {not RAISED_EXCEPTION == internalFailure}' assert not RAISED_EXCEPTION == externalFailure, f'not {RAISED_EXCEPTION} == {externalFailure}: {not RAISED_EXCEPTION == externalFailure}' assert not RAISED_EXCEPTION == internalFailure, f'not {RAISED_EXCEPTION} == {internalFailure}: {not RAISED_EXCEPTION == internalFailure}' assert GlobalException == ReflectionHelper.getClass(externalFailure) assert GlobalException == ReflectionHelper.getClass(internalFailure) assert ExceptionHandler.DEFAULT_MESSAGE == externalFailure.message, f'{ExceptionHandler.DEFAULT_LOG_MESSAGE} == {externalFailure.message}: {ExceptionHandler.DEFAULT_LOG_MESSAGE == externalFailure.message}' assert ExceptionHandler.DEFAULT_MESSAGE == internalFailure.message assert HttpStatus.INTERNAL_SERVER_ERROR == externalFailure.status assert HttpStatus.INTERNAL_SERVER_ERROR == internalFailure.status assert FAILURE == externalFailure.logMessage assert FAILURE == internalFailure.logMessage assert ExceptionHandler.DEFAULT_LOG_RESOURCE == externalFailure.logResource, f'{ExceptionHandler.DEFAULT_LOG_RESOURCE} == {externalFailure.logResource}: {ExceptionHandler.DEFAULT_LOG_RESOURCE == externalFailure.logResource}' assert externalFuncionDoesThrowsException.__name__ == externalFailure.logResourceMethod.__name__, f'{externalFuncionDoesThrowsException} == {externalFailure.logResourceMethod}: {externalFuncionDoesThrowsException == externalFailure.logResourceMethod}' assert ExceptionHandler.DEFAULT_LOG_RESOURCE == internalFailure.logResource, f'{ExceptionHandler.DEFAULT_LOG_RESOURCE} == {internalFailure.logResource}: {ExceptionHandler.DEFAULT_LOG_RESOURCE == internalFailure.logResource}' assert type(internalFuncionDoesThrowsException) == type(internalFailure.logResourceMethod) @Test(environmentVariables={ SettingHelper.ACTIVE_ENVIRONMENT : SettingHelper.LOCAL_ENVIRONMENT, **FULL_LOG_HELPER_SETTINGS } ) def encapsulateItWithGlobalException_noParameters_GlobalException() : #arrange class MyClass: def myMethod(self): ... resource = MyClass() ERROR_MESSAGE = 'ERROR_MESSAGE' LOG_ERROR_MESSAGE = 'LOG_ERROR_MESSAGE' EXCEPTION_STATUS = HttpStatus.BAD_REQUEST simpleException = GlobalException( status = EXCEPTION_STATUS, message = ERROR_MESSAGE, logMessage = LOG_ERROR_MESSAGE, logResource = resource, logResourceMethod = resource.myMethod ) @EncapsulateItWithGlobalException() def internalFuncionDoesThrowsException(): raise simpleException # #act internalFailure = TestHelper.getRaisedException(internalFuncionDoesThrowsException) # print(internalFailure) # print(internalFailure.logResource) # print(internalFailure.logResourceMethod) #assert assert not RAISED_EXCEPTION == internalFailure, f'not {RAISED_EXCEPTION} == {internalFailure}: {not RAISED_EXCEPTION == internalFailure}' assert not RAISED_EXCEPTION == internalFailure, f'not {RAISED_EXCEPTION} == {internalFailure}: {not RAISED_EXCEPTION == internalFailure}' assert GlobalException == ReflectionHelper.getClass(internalFailure) assert ERROR_MESSAGE == internalFailure.message, f'"{ERROR_MESSAGE}" and "{internalFailure.message}" should be equals' assert EXCEPTION_STATUS == internalFailure.status assert LOG_ERROR_MESSAGE == internalFailure.logMessage, f'"{LOG_ERROR_MESSAGE} == {internalFailure.logMessage}": {LOG_ERROR_MESSAGE == internalFailure.logMessage}' assert resource == internalFailure.logResource, f'"{resource} == {internalFailure.logResource}": {resource == internalFailure.logResource}' assert resource.myMethod == internalFailure.logResourceMethod, f'"{resource.myMethod} == {internalFailure.logResourceMethod}": {resource.myMethod == internalFailure.logResourceMethod}' @Test(environmentVariables={ SettingHelper.ACTIVE_ENVIRONMENT : SettingHelper.LOCAL_ENVIRONMENT, **FULL_LOG_HELPER_SETTINGS } ) def encapsulateItWithGlobalException_withParameters_GlobalException() : #arrange class MyClass: def myMethod(self): ... resource = MyClass() ERROR_MESSAGE = 'ERROR_MESSAGE' LOG_ERROR_MESSAGE = 'LOG_ERROR_MESSAGE' EXCEPTION_STATUS = HttpStatus.BAD_REQUEST simpleException = Exception(ERROR_MESSAGE) PERSONALIZED_MESSAGE = 'PERSONALIZED_MESSAGE' PERSONALIZED_STATUS = HttpStatus.UNAUTHORIZED @EncapsulateItWithGlobalException(message=PERSONALIZED_MESSAGE, status=PERSONALIZED_STATUS) def internalFuncionDoesThrowsException(): raise simpleException #act internalFailure = TestHelper.getRaisedException(internalFuncionDoesThrowsException) print(internalFailure) print(internalFailure.logResource) print(internalFailure.logResourceMethod) #assert assert not RAISED_EXCEPTION == internalFailure, f'not {RAISED_EXCEPTION} == {internalFailure}: {not RAISED_EXCEPTION == internalFailure}' assert not RAISED_EXCEPTION == internalFailure, f'not {RAISED_EXCEPTION} == {internalFailure}: {not RAISED_EXCEPTION == internalFailure}' assert GlobalException == ReflectionHelper.getClass(internalFailure) assert PERSONALIZED_MESSAGE == internalFailure.message, f'{PERSONALIZED_MESSAGE} == {internalFailure.message}: {PERSONALIZED_MESSAGE == internalFailure.message}' assert PERSONALIZED_STATUS == internalFailure.status assert ERROR_MESSAGE == internalFailure.logMessage, f'{ERROR_MESSAGE} == {internalFailure.logMessage}: {ERROR_MESSAGE == internalFailure.logMessage}' assert ExceptionHandler.DEFAULT_LOG_RESOURCE == internalFailure.logResource assert type(internalFuncionDoesThrowsException) == type(internalFailure.logResourceMethod)
[ "python_framework.GlobalException", "python_helper.ReflectionHelper.getClass", "python_helper.Test", "python_helper.TestHelper.getRaisedException", "python_framework.EncapsulateItWithGlobalException" ]
[((830, 864), 'python_framework.EncapsulateItWithGlobalException', 'EncapsulateItWithGlobalException', ([], {}), '()\n', (862, 864), False, 'from python_framework import EncapsulateItWithGlobalException, GlobalException, ExceptionHandler, HttpStatus\n'), ((931, 965), 'python_framework.EncapsulateItWithGlobalException', 'EncapsulateItWithGlobalException', ([], {}), '()\n', (963, 965), False, 'from python_framework import EncapsulateItWithGlobalException, GlobalException, ExceptionHandler, HttpStatus\n'), ((1037, 1164), 'python_helper.Test', 'Test', ([], {'environmentVariables': '{SettingHelper.ACTIVE_ENVIRONMENT: SettingHelper.LOCAL_ENVIRONMENT, **\n FULL_LOG_HELPER_SETTINGS}'}), '(environmentVariables={SettingHelper.ACTIVE_ENVIRONMENT: SettingHelper.\n LOCAL_ENVIRONMENT, **FULL_LOG_HELPER_SETTINGS})\n', (1041, 1164), False, 'from python_helper import log, Test, SettingHelper, RandomHelper, ObjectHelper, TestHelper, ReflectionHelper, Constant\n'), ((4119, 4246), 'python_helper.Test', 'Test', ([], {'environmentVariables': '{SettingHelper.ACTIVE_ENVIRONMENT: SettingHelper.LOCAL_ENVIRONMENT, **\n FULL_LOG_HELPER_SETTINGS}'}), '(environmentVariables={SettingHelper.ACTIVE_ENVIRONMENT: SettingHelper.\n LOCAL_ENVIRONMENT, **FULL_LOG_HELPER_SETTINGS})\n', (4123, 4246), False, 'from python_helper import log, Test, SettingHelper, RandomHelper, ObjectHelper, TestHelper, ReflectionHelper, Constant\n'), ((6179, 6306), 'python_helper.Test', 'Test', ([], {'environmentVariables': '{SettingHelper.ACTIVE_ENVIRONMENT: SettingHelper.LOCAL_ENVIRONMENT, **\n FULL_LOG_HELPER_SETTINGS}'}), '(environmentVariables={SettingHelper.ACTIVE_ENVIRONMENT: SettingHelper.\n LOCAL_ENVIRONMENT, **FULL_LOG_HELPER_SETTINGS})\n', (6183, 6306), False, 'from python_helper import log, Test, SettingHelper, RandomHelper, ObjectHelper, TestHelper, ReflectionHelper, Constant\n'), ((1273, 1307), 'python_framework.EncapsulateItWithGlobalException', 'EncapsulateItWithGlobalException', ([], {}), '()\n', (1305, 1307), False, 'from python_framework import EncapsulateItWithGlobalException, GlobalException, ExceptionHandler, HttpStatus\n'), ((1385, 1419), 'python_framework.EncapsulateItWithGlobalException', 'EncapsulateItWithGlobalException', ([], {}), '()\n', (1417, 1419), False, 'from python_framework import EncapsulateItWithGlobalException, GlobalException, ExceptionHandler, HttpStatus\n'), ((1655, 1720), 'python_helper.TestHelper.getRaisedException', 'TestHelper.getRaisedException', (['externalFuncionDoesThrowsException'], {}), '(externalFuncionDoesThrowsException)\n', (1684, 1720), False, 'from python_helper import log, Test, SettingHelper, RandomHelper, ObjectHelper, TestHelper, ReflectionHelper, Constant\n'), ((1743, 1808), 'python_helper.TestHelper.getRaisedException', 'TestHelper.getRaisedException', (['internalFuncionDoesThrowsException'], {}), '(internalFuncionDoesThrowsException)\n', (1772, 1808), False, 'from python_helper import log, Test, SettingHelper, RandomHelper, ObjectHelper, TestHelper, ReflectionHelper, Constant\n'), ((4586, 4748), 'python_framework.GlobalException', 'GlobalException', ([], {'status': 'EXCEPTION_STATUS', 'message': 'ERROR_MESSAGE', 'logMessage': 'LOG_ERROR_MESSAGE', 'logResource': 'resource', 'logResourceMethod': 'resource.myMethod'}), '(status=EXCEPTION_STATUS, message=ERROR_MESSAGE, logMessage=\n LOG_ERROR_MESSAGE, logResource=resource, logResourceMethod=resource.\n myMethod)\n', (4601, 4748), False, 'from python_framework import EncapsulateItWithGlobalException, GlobalException, ExceptionHandler, HttpStatus\n'), ((4800, 4834), 'python_framework.EncapsulateItWithGlobalException', 'EncapsulateItWithGlobalException', ([], {}), '()\n', (4832, 4834), False, 'from python_framework import EncapsulateItWithGlobalException, GlobalException, ExceptionHandler, HttpStatus\n'), ((4945, 5010), 'python_helper.TestHelper.getRaisedException', 'TestHelper.getRaisedException', (['internalFuncionDoesThrowsException'], {}), '(internalFuncionDoesThrowsException)\n', (4974, 5010), False, 'from python_helper import log, Test, SettingHelper, RandomHelper, ObjectHelper, TestHelper, ReflectionHelper, Constant\n'), ((6778, 6873), 'python_framework.EncapsulateItWithGlobalException', 'EncapsulateItWithGlobalException', ([], {'message': 'PERSONALIZED_MESSAGE', 'status': 'PERSONALIZED_STATUS'}), '(message=PERSONALIZED_MESSAGE, status=\n PERSONALIZED_STATUS)\n', (6810, 6873), False, 'from python_framework import EncapsulateItWithGlobalException, GlobalException, ExceptionHandler, HttpStatus\n'), ((6977, 7042), 'python_helper.TestHelper.getRaisedException', 'TestHelper.getRaisedException', (['internalFuncionDoesThrowsException'], {}), '(internalFuncionDoesThrowsException)\n', (7006, 7042), False, 'from python_helper import log, Test, SettingHelper, RandomHelper, ObjectHelper, TestHelper, ReflectionHelper, Constant\n'), ((2672, 2714), 'python_helper.ReflectionHelper.getClass', 'ReflectionHelper.getClass', (['externalFailure'], {}), '(externalFailure)\n', (2697, 2714), False, 'from python_helper import log, Test, SettingHelper, RandomHelper, ObjectHelper, TestHelper, ReflectionHelper, Constant\n'), ((2745, 2787), 'python_helper.ReflectionHelper.getClass', 'ReflectionHelper.getClass', (['internalFailure'], {}), '(internalFailure)\n', (2770, 2787), False, 'from python_helper import log, Test, SettingHelper, RandomHelper, ObjectHelper, TestHelper, ReflectionHelper, Constant\n'), ((5455, 5497), 'python_helper.ReflectionHelper.getClass', 'ReflectionHelper.getClass', (['internalFailure'], {}), '(internalFailure)\n', (5480, 5497), False, 'from python_helper import log, Test, SettingHelper, RandomHelper, ObjectHelper, TestHelper, ReflectionHelper, Constant\n'), ((7481, 7523), 'python_helper.ReflectionHelper.getClass', 'ReflectionHelper.getClass', (['internalFailure'], {}), '(internalFailure)\n', (7506, 7523), False, 'from python_helper import log, Test, SettingHelper, RandomHelper, ObjectHelper, TestHelper, ReflectionHelper, Constant\n')]
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-03-30 12:46 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0005_auto_20180330_1813'), ] operations = [ migrations.AlterField( model_name='incubator', name='followers', field=models.ManyToManyField(blank=True, related_name='incubator_follows', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='incubator', name='incubated_startup', field=models.ManyToManyField(blank=True, related_name='incubators', through='app.IncubatorStartup', to='app.Startup'), ), migrations.AlterField( model_name='incubator', name='members', field=models.ManyToManyField(blank=True, related_name='incubator_members', through='app.IncubatorMember', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='incubator', name='ratings', field=models.ManyToManyField(blank=True, related_name='rated_incubators', through='app.IncubatorRating', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='startup', name='members', field=models.ManyToManyField(blank=True, related_name='startup_members', through='app.StartupMember', to=settings.AUTH_USER_MODEL), ), ]
[ "django.db.models.ManyToManyField" ]
[((434, 536), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""incubator_follows"""', 'to': 'settings.AUTH_USER_MODEL'}), "(blank=True, related_name='incubator_follows', to=\n settings.AUTH_USER_MODEL)\n", (456, 536), False, 'from django.db import migrations, models\n'), ((667, 783), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""incubators"""', 'through': '"""app.IncubatorStartup"""', 'to': '"""app.Startup"""'}), "(blank=True, related_name='incubators', through=\n 'app.IncubatorStartup', to='app.Startup')\n", (689, 783), False, 'from django.db import migrations, models\n'), ((904, 1036), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""incubator_members"""', 'through': '"""app.IncubatorMember"""', 'to': 'settings.AUTH_USER_MODEL'}), "(blank=True, related_name='incubator_members',\n through='app.IncubatorMember', to=settings.AUTH_USER_MODEL)\n", (926, 1036), False, 'from django.db import migrations, models\n'), ((1158, 1290), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""rated_incubators"""', 'through': '"""app.IncubatorRating"""', 'to': 'settings.AUTH_USER_MODEL'}), "(blank=True, related_name='rated_incubators', through\n ='app.IncubatorRating', to=settings.AUTH_USER_MODEL)\n", (1180, 1290), False, 'from django.db import migrations, models\n'), ((1409, 1538), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""startup_members"""', 'through': '"""app.StartupMember"""', 'to': 'settings.AUTH_USER_MODEL'}), "(blank=True, related_name='startup_members', through=\n 'app.StartupMember', to=settings.AUTH_USER_MODEL)\n", (1431, 1538), False, 'from django.db import migrations, models\n')]
# encoding=utf-8 from airtest.core.win import Windows import unittest import numpy import time from testconf import try_remove SNAPSHOT = "win_snapshot.png" class TestWin(unittest.TestCase): @classmethod def setUpClass(cls): w = Windows() w.start_app("calc") time.sleep(1) cls.windows = Windows(title_re=".*计算器.*".decode("utf-8")) def test_snapshot(self): try_remove(SNAPSHOT) result = self.windows.snapshot(filename=SNAPSHOT) self.assertIsInstance(result, numpy.ndarray) try_remove(SNAPSHOT) def test_touch(self): self.windows.touch((11, 11)) def test_swipe(self): self.windows.swipe((11, 11), (100, 100)) @classmethod def tearDownClass(cls): cls.windows.app.kill() if __name__ == '__main__': unittest.main()
[ "unittest.main", "airtest.core.win.Windows", "testconf.try_remove", "time.sleep" ]
[((825, 840), 'unittest.main', 'unittest.main', ([], {}), '()\n', (838, 840), False, 'import unittest\n'), ((250, 259), 'airtest.core.win.Windows', 'Windows', ([], {}), '()\n', (257, 259), False, 'from airtest.core.win import Windows\n'), ((296, 309), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (306, 309), False, 'import time\n'), ((414, 434), 'testconf.try_remove', 'try_remove', (['SNAPSHOT'], {}), '(SNAPSHOT)\n', (424, 434), False, 'from testconf import try_remove\n'), ((554, 574), 'testconf.try_remove', 'try_remove', (['SNAPSHOT'], {}), '(SNAPSHOT)\n', (564, 574), False, 'from testconf import try_remove\n')]
# Import required libraries import cv2 from os.path import os, dirname import tensorflow as tf import numpy as np from tqdm import tqdm import random # List of categories (directories names) CATEGORIES = ["bad_apple", "bad_grape", "bad_pear", "cherry", "good_apple", "good_avocado", "good_grape", "good_pear", "ripe_avocado"] # Level 2 - display information about errors only os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Commented line = use GPU os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # Source folder path main_dir = dirname(os.path.abspath(__file__)) # Paths to image database (train, test, all) training_dir = os.path.join(main_dir, 'database', 'training') testing_dir = os.path.join(main_dir, 'database', 'testing') all_dir = os.path.join(main_dir, 'database', 'all') # Currently used path DATADIR = testing_dir # Load all images and save them to array variable for category in CATEGORIES: path = os.path.join(DATADIR, category) for img in os.listdir(path): img_array = cv2.imread(os.path.join(path, img)) break break # Variable to store training data testing_data = [] # Function that converts previously created data array to a test data array def create_testing_data(): for category in CATEGORIES: path = os.path.join(DATADIR, category) class_num = CATEGORIES.index(category) for img in tqdm(os.listdir(path)): try: img_array = cv2.imread(os.path.join(path, img)) testing_data.append([img_array, class_num]) except Exception as e: pass # Call the function create_testing_data() # Shuffle test data random.shuffle(testing_data) # Create array variables to store objects and labels X = [] y = [] # Save objects and labels to arrays for features, label in testing_data: X.append(features) y.append(label) # Convert arrays to NumPy matrices X = np.array(X) y = np.array(y) # Change the value range from 0-255 to 0-1 X = X / 255.0 # Load the trained model from given path keras_model_path = os.path.join(main_dir, 'models', 'test') model = tf.keras.models.load_model(keras_model_path) # Display model summary model.summary() # Display information about the effectiveness of test data classification loss, acc = model.evaluate(X, y, verbose=2) print('Accuracy: {:5.2f}%'.format(100 * acc)) print('Loss: {:5.2f}'.format(loss))
[ "random.shuffle", "os.path.os.listdir", "numpy.array", "tensorflow.keras.models.load_model", "os.path.os.path.join", "os.path.os.path.abspath" ]
[((618, 664), 'os.path.os.path.join', 'os.path.join', (['main_dir', '"""database"""', '"""training"""'], {}), "(main_dir, 'database', 'training')\n", (630, 664), False, 'from os.path import os, dirname\n'), ((679, 724), 'os.path.os.path.join', 'os.path.join', (['main_dir', '"""database"""', '"""testing"""'], {}), "(main_dir, 'database', 'testing')\n", (691, 724), False, 'from os.path import os, dirname\n'), ((735, 776), 'os.path.os.path.join', 'os.path.join', (['main_dir', '"""database"""', '"""all"""'], {}), "(main_dir, 'database', 'all')\n", (747, 776), False, 'from os.path import os, dirname\n'), ((1646, 1674), 'random.shuffle', 'random.shuffle', (['testing_data'], {}), '(testing_data)\n', (1660, 1674), False, 'import random\n'), ((1900, 1911), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1908, 1911), True, 'import numpy as np\n'), ((1916, 1927), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1924, 1927), True, 'import numpy as np\n'), ((2047, 2087), 'os.path.os.path.join', 'os.path.join', (['main_dir', '"""models"""', '"""test"""'], {}), "(main_dir, 'models', 'test')\n", (2059, 2087), False, 'from os.path import os, dirname\n'), ((2096, 2140), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['keras_model_path'], {}), '(keras_model_path)\n', (2122, 2140), True, 'import tensorflow as tf\n'), ((530, 555), 'os.path.os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (545, 555), False, 'from os.path import os, dirname\n'), ((912, 943), 'os.path.os.path.join', 'os.path.join', (['DATADIR', 'category'], {}), '(DATADIR, category)\n', (924, 943), False, 'from os.path import os, dirname\n'), ((959, 975), 'os.path.os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (969, 975), False, 'from os.path import os, dirname\n'), ((1262, 1293), 'os.path.os.path.join', 'os.path.join', (['DATADIR', 'category'], {}), '(DATADIR, category)\n', (1274, 1293), False, 'from os.path import os, dirname\n'), ((1008, 1031), 'os.path.os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (1020, 1031), False, 'from os.path import os, dirname\n'), ((1365, 1381), 'os.path.os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1375, 1381), False, 'from os.path import os, dirname\n'), ((1440, 1463), 'os.path.os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (1452, 1463), False, 'from os.path import os, dirname\n')]
"""Covers import of data downloaded from the `Meadows online behavior platform <https://meadows-research.com/>`_. For information on available file types see the meadows `documentation on downloads <https://meadows-research.com/documentation\ /researcher/downloads/>`_. """ from os.path import basename import numpy from scipy.io import loadmat from pyrsa.rdm.rdms import RDMs def load_rdms(fpath, sort=True): """Read a Meadows results file and return any RDMs as a pyrsa object Args: fpath (str): path to .mat Meadows results file sort (bool): whether to sort the RDM based on the stimulus names Raises: ValueError: Will raise an error if the file is missing an expected variable. This can happen if the file does not contain MA task data. Returns: RDMs: All rdms found in the data file as an RDMs object """ info = extract_filename_segments(fpath) data = loadmat(fpath) if info['participant_scope'] == 'single': for var in ('stimuli', 'rdmutv'): if var not in data: raise ValueError(f'File missing variable: {var}') utvs = data['rdmutv'] stimuli_fnames = data['stimuli'] pnames = [info['participant']] else: stim_vars = [v for v in data.keys() if v[:7] == 'stimuli'] stimuli_fnames = data[stim_vars[0]] pnames = ['-'.join(v.split('_')[1:]) for v in stim_vars] utv_vars = ['rdmutv_' + p.replace('-', '_') for p in pnames] utvs = numpy.squeeze(numpy.stack([data[v] for v in utv_vars])) desc_info_keys = ( 'participant', 'task_index', 'task_name', 'experiment_name' ) conds = [f.split('.')[0] for f in stimuli_fnames] rdms = RDMs( utvs, dissimilarity_measure='euclidean', descriptors={k: info[k] for k in desc_info_keys if k in info}, rdm_descriptors=dict(participants=pnames), pattern_descriptors=dict(conds=conds), ) if sort: rdms.sort_by(conds='alpha') return rdms def extract_filename_segments(fpath): """Get information from the name of a downloaded results file Will determine: * participant_scope: 'single' or 'multiple', how many participant sessions this file covers. * task_scope: 'single' or 'multiple', how many experiment tasks this file covers. * participant: the Meadows nickname of the participant, if this is a single participation file. * task_index: the 1-based index of the task in the experiment, if this is a single participant file. * task_name: the name of the task in the experiment, if this is not a single participant file. * version: the experiment version as a string. * experiment_name: name of the experiment on Meadows. * structure: the structure of the data contained, one of 'tree', 'events', '1D', '2D', etc. * filetype: the file extension and file format used to serialize the data. Args: fpath (str): File system path to downloaded file Returns: dict: Dictionary with the fields described above. """ fname, ext = basename(fpath).split('.') segments = fname.split('_') info = dict( task_scope='single', version=segments[3].replace('v', ''), experiment_name=segments[1], structure=segments[-1], filetype=ext ) if segments[-2].isdigit(): info['participant_scope'] = 'single' info['participant'] = segments[-3] info['task_index'] = int(segments[-2]) else: info['participant_scope'] = 'multiple' info['task_name'] = segments[-2] return info
[ "numpy.stack", "scipy.io.loadmat", "os.path.basename" ]
[((948, 962), 'scipy.io.loadmat', 'loadmat', (['fpath'], {}), '(fpath)\n', (955, 962), False, 'from scipy.io import loadmat\n'), ((1543, 1583), 'numpy.stack', 'numpy.stack', (['[data[v] for v in utv_vars]'], {}), '([data[v] for v in utv_vars])\n', (1554, 1583), False, 'import numpy\n'), ((3258, 3273), 'os.path.basename', 'basename', (['fpath'], {}), '(fpath)\n', (3266, 3273), False, 'from os.path import basename\n')]
import unittest import torch from torchvision.models.resnet import BasicBlock, Bottleneck from nuscenes.prediction.models.backbone import ResNetBackbone, MobileNetBackbone class TestBackBones(unittest.TestCase): def count_layers(self, model): if isinstance(model[4][0], BasicBlock): n_convs = 2 elif isinstance(model[4][0], Bottleneck): n_convs = 3 else: raise ValueError("Backbone layer block not supported!") return sum([len(model[i]) for i in range(4, 8)]) * n_convs + 2 def test_resnet(self): rn_18 = ResNetBackbone('resnet18') rn_34 = ResNetBackbone('resnet34') rn_50 = ResNetBackbone('resnet50') rn_101 = ResNetBackbone('resnet101') rn_152 = ResNetBackbone('resnet152') tensor = torch.ones((1, 3, 100, 100)) self.assertEqual(rn_18(tensor).shape[1], 512) self.assertEqual(rn_34(tensor).shape[1], 512) self.assertEqual(rn_50(tensor).shape[1], 2048) self.assertEqual(rn_101(tensor).shape[1], 2048) self.assertAlmostEqual(rn_152(tensor).shape[1], 2048) self.assertEqual(self.count_layers(list(rn_18.backbone.children())), 18) self.assertEqual(self.count_layers(list(rn_34.backbone.children())), 34) self.assertEqual(self.count_layers(list(rn_50.backbone.children())), 50) self.assertEqual(self.count_layers(list(rn_101.backbone.children())), 101) self.assertEqual(self.count_layers(list(rn_152.backbone.children())), 152) with self.assertRaises(ValueError): ResNetBackbone('resnet51') def test_mobilenet(self): mobilenet = MobileNetBackbone('mobilenet_v2') tensor = torch.ones((1, 3, 100, 100)) self.assertEqual(mobilenet(tensor).shape[1], 1280)
[ "nuscenes.prediction.models.backbone.MobileNetBackbone", "nuscenes.prediction.models.backbone.ResNetBackbone", "torch.ones" ]
[((597, 623), 'nuscenes.prediction.models.backbone.ResNetBackbone', 'ResNetBackbone', (['"""resnet18"""'], {}), "('resnet18')\n", (611, 623), False, 'from nuscenes.prediction.models.backbone import ResNetBackbone, MobileNetBackbone\n'), ((640, 666), 'nuscenes.prediction.models.backbone.ResNetBackbone', 'ResNetBackbone', (['"""resnet34"""'], {}), "('resnet34')\n", (654, 666), False, 'from nuscenes.prediction.models.backbone import ResNetBackbone, MobileNetBackbone\n'), ((683, 709), 'nuscenes.prediction.models.backbone.ResNetBackbone', 'ResNetBackbone', (['"""resnet50"""'], {}), "('resnet50')\n", (697, 709), False, 'from nuscenes.prediction.models.backbone import ResNetBackbone, MobileNetBackbone\n'), ((727, 754), 'nuscenes.prediction.models.backbone.ResNetBackbone', 'ResNetBackbone', (['"""resnet101"""'], {}), "('resnet101')\n", (741, 754), False, 'from nuscenes.prediction.models.backbone import ResNetBackbone, MobileNetBackbone\n'), ((772, 799), 'nuscenes.prediction.models.backbone.ResNetBackbone', 'ResNetBackbone', (['"""resnet152"""'], {}), "('resnet152')\n", (786, 799), False, 'from nuscenes.prediction.models.backbone import ResNetBackbone, MobileNetBackbone\n'), ((818, 846), 'torch.ones', 'torch.ones', (['(1, 3, 100, 100)'], {}), '((1, 3, 100, 100))\n', (828, 846), False, 'import torch\n'), ((1675, 1708), 'nuscenes.prediction.models.backbone.MobileNetBackbone', 'MobileNetBackbone', (['"""mobilenet_v2"""'], {}), "('mobilenet_v2')\n", (1692, 1708), False, 'from nuscenes.prediction.models.backbone import ResNetBackbone, MobileNetBackbone\n'), ((1727, 1755), 'torch.ones', 'torch.ones', (['(1, 3, 100, 100)'], {}), '((1, 3, 100, 100))\n', (1737, 1755), False, 'import torch\n'), ((1596, 1622), 'nuscenes.prediction.models.backbone.ResNetBackbone', 'ResNetBackbone', (['"""resnet51"""'], {}), "('resnet51')\n", (1610, 1622), False, 'from nuscenes.prediction.models.backbone import ResNetBackbone, MobileNetBackbone\n')]
#!/usr/bin/python3 # -*- coding: utf-8 -*- # @File : Qrbar_test.py import cv2 import numpy as np from pyzbar.pyzbar import decode img = cv2.imread('qrcode.png') for barcode in decode(img): print(barcode.data.decode('utf-8')) print(barcode.data) pts = np.array([barcode.polygon], np.int32) pts = pts.reshape((-1, 1, 2)) print(pts) print(barcode.rect)
[ "pyzbar.pyzbar.decode", "numpy.array", "cv2.imread" ]
[((141, 165), 'cv2.imread', 'cv2.imread', (['"""qrcode.png"""'], {}), "('qrcode.png')\n", (151, 165), False, 'import cv2\n'), ((181, 192), 'pyzbar.pyzbar.decode', 'decode', (['img'], {}), '(img)\n', (187, 192), False, 'from pyzbar.pyzbar import decode\n'), ((268, 305), 'numpy.array', 'np.array', (['[barcode.polygon]', 'np.int32'], {}), '([barcode.polygon], np.int32)\n', (276, 305), True, 'import numpy as np\n')]
#!/usr/bin/env python """ """ from __future__ import print_function import argparse import sys from . import common from . import helper from . import vcs_tool PARAMS = {} PARAMS['this_script'] = common.get_script_name_from_filename(__file__) def setup_and_dispatch(): parser = argparse.ArgumentParser( description=common.format_title(PARAMS['this_script']), formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument('--compile', action='store_true', help='Compile and scrub requirements') parser.add_argument('--commit', action='store_true', help='Commit to configured VCS') try: args, extra_args = parser.parse_known_args() except BaseException as e: raise e print(common.format_title(PARAMS['this_script'])) print() if sum(map(bool, [args.compile, args.commit])) > 1: common.exit_with_error('Error: Only one top-level option may be specified', parser=parser) if args.compile: helper.main(scriptname=PARAMS['this_script'], args=extra_args) elif args.commit: vcs_tool.main(scriptname=PARAMS['this_script'], args=extra_args) else: parser.print_help(sys.stderr) def main(): setup_and_dispatch() sys.exit(0) if __name__ == "__main__": main()
[ "sys.exit" ]
[((1295, 1306), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1303, 1306), False, 'import sys\n')]
from django.db import models # from themall.models import Customer # Create your models here. class Seller(models.Model): email = models.OneToOneField('themall.Customer', on_delete=models.CASCADE, to_field='email') store_name = models.CharField(max_length=100) slug = models.SlugField(max_length=100) description = models.TextField(max_length=1000) def __str__(self): return self.email.__str__()
[ "django.db.models.OneToOneField", "django.db.models.TextField", "django.db.models.SlugField", "django.db.models.CharField" ]
[((136, 225), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""themall.Customer"""'], {'on_delete': 'models.CASCADE', 'to_field': '"""email"""'}), "('themall.Customer', on_delete=models.CASCADE, to_field\n ='email')\n", (156, 225), False, 'from django.db import models\n'), ((236, 268), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (252, 268), False, 'from django.db import models\n'), ((278, 310), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (294, 310), False, 'from django.db import models\n'), ((327, 360), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(1000)'}), '(max_length=1000)\n', (343, 360), False, 'from django.db import models\n')]
# Copyright (c) 2017-2021, <NAME>. All rights reserved. # For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md import time import pytest from harness import log from harness.dom_parser_utils import * from harness.interface.defs import key_codes from bt_fixtures import * @pytest.mark.rt1051 @pytest.mark.usefixtures("bt_all_devices") @pytest.mark.usefixtures("bt_reset") @pytest.mark.usefixtures("bt_main_window") @pytest.mark.usefixtures("phone_in_desktop") @pytest.mark.usefixtures("phone_unlocked") @pytest.mark.skipif("not config.getvalue('--bt_device')", reason='--bt_device was not specified') def test_bt_pairing_hmi(harness, bt_device): if not bt_device: return bt_device_name = bt_device current_window_content = get_window_content(harness, 1) is_device_in_history = item_contains_recursively(current_window_content, 'TextValue', bt_device_name ) if not is_device_in_history : log.info("Device {} not in all devices history, scanning...".format(bt_device_name)) harness.connection.send_key_code(key_codes["left"]) max_try_count = 5 for _ in range(max_try_count, 0, -1) : time.sleep(2) current_window_content = get_window_content(harness, 1) is_device_in_history = item_contains_recursively(current_window_content, 'TextValue', bt_device_name ) if is_device_in_history: break log.info("Device {} not found, retrying...".format(bt_device_name)) assert max_try_count current_window_content = get_window_content(harness, 1) parent_of_list_items = find_parent(current_window_content, 'ListItem') steps_to_navigate_down = get_child_number_that_contains_recursively(parent_of_list_items, [('TextValue', bt_device_name)]) assert steps_to_navigate_down > -1 log.info("Navigating to the {} device, {} down".format(bt_device_name, steps_to_navigate_down ) ) for _ in range(steps_to_navigate_down) : harness.connection.send_key_code(key_codes["down"]) log.info("Checking if device {} is focused...".format(bt_device_name)) current_window_content = get_window_content(harness, 1) parent_of_list_items = find_parent(current_window_content, 'ListItem') assert item_has_child_that_contains_recursively( parent_of_list_items, [('TextValue', bt_device_name), ('Focus', True)] )
[ "pytest.mark.usefixtures", "time.sleep", "pytest.mark.skipif" ]
[((306, 347), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""bt_all_devices"""'], {}), "('bt_all_devices')\n", (329, 347), False, 'import pytest\n'), ((349, 384), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""bt_reset"""'], {}), "('bt_reset')\n", (372, 384), False, 'import pytest\n'), ((386, 427), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""bt_main_window"""'], {}), "('bt_main_window')\n", (409, 427), False, 'import pytest\n'), ((429, 472), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""phone_in_desktop"""'], {}), "('phone_in_desktop')\n", (452, 472), False, 'import pytest\n'), ((474, 515), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""phone_unlocked"""'], {}), "('phone_unlocked')\n", (497, 515), False, 'import pytest\n'), ((517, 618), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""not config.getvalue(\'--bt_device\')"""'], {'reason': '"""--bt_device was not specified"""'}), '("not config.getvalue(\'--bt_device\')", reason=\n \'--bt_device was not specified\')\n', (535, 618), False, 'import pytest\n'), ((1168, 1181), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1178, 1181), False, 'import time\n')]
import smtplib, ssl, os from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from .html_template import emailHtml from .text_template import emailText port = 465 context = ssl.create_default_context() def sendEmail(emailData): adminUser = os.getenv("ADMIN_USERNAME") password = os.getenv("<PASSWORD>") sender = emailData["tutor"] receivers = emailData["recipient"] message = MIMEMultipart("alternative") message["Subject"] = "Tutor Confirmation" message["From"] = adminUser message["To"] = receivers message["Cc"] = sender, "<EMAIL>" text = emailText(emailData) html = emailHtml(emailData) part1 = MIMEText(text, "plain") part2 = MIMEText(html, "html") message.attach(part1) message.attach(part2) try: with smtplib.SMTP_SSL('smtp.gmail.com',port,context=context) as server: server.login(adminUser,password) server.sendmail(adminUser, receivers, message.as_string()) print("Successfully sent email") except smtplib.SMTPException: print("Error: unable to send email")
[ "os.getenv", "smtplib.SMTP_SSL", "ssl.create_default_context", "email.mime.multipart.MIMEMultipart", "email.mime.text.MIMEText" ]
[((206, 234), 'ssl.create_default_context', 'ssl.create_default_context', ([], {}), '()\n', (232, 234), False, 'import smtplib, ssl, os\n'), ((278, 305), 'os.getenv', 'os.getenv', (['"""ADMIN_USERNAME"""'], {}), "('ADMIN_USERNAME')\n", (287, 305), False, 'import smtplib, ssl, os\n'), ((321, 344), 'os.getenv', 'os.getenv', (['"""<PASSWORD>"""'], {}), "('<PASSWORD>')\n", (330, 344), False, 'import smtplib, ssl, os\n'), ((431, 459), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', (['"""alternative"""'], {}), "('alternative')\n", (444, 459), False, 'from email.mime.multipart import MIMEMultipart\n'), ((684, 707), 'email.mime.text.MIMEText', 'MIMEText', (['text', '"""plain"""'], {}), "(text, 'plain')\n", (692, 707), False, 'from email.mime.text import MIMEText\n'), ((720, 742), 'email.mime.text.MIMEText', 'MIMEText', (['html', '"""html"""'], {}), "(html, 'html')\n", (728, 742), False, 'from email.mime.text import MIMEText\n'), ((823, 880), 'smtplib.SMTP_SSL', 'smtplib.SMTP_SSL', (['"""smtp.gmail.com"""', 'port'], {'context': 'context'}), "('smtp.gmail.com', port, context=context)\n", (839, 880), False, 'import smtplib, ssl, os\n')]
import requests import os import json import datetime ''' Pulls a dbml file from the API. User must manually add the file id, found in the 'response_ids.json' file generated from dbml_post_to_api.py ''' url='http://ec2-54-167-67-34.compute-1.amazonaws.com/api/dbmls' #url of the API id = '6192b1f31c2a512293fea940' #id of the file, taken from 'response_ids.json' file generated from dbml_post_to_api.py res = requests.get(f'{url}/{id}') dbml_file = json.loads(res.json()['contents'])
[ "requests.get" ]
[((422, 449), 'requests.get', 'requests.get', (['f"""{url}/{id}"""'], {}), "(f'{url}/{id}')\n", (434, 449), False, 'import requests\n')]
from openpyxl import load_workbook def getRowCount(file): wb = load_workbook(file) sheet = wb.active return sheet.max_row def getColumnCount(file): wb = load_workbook(file) sheet = wb.active return sheet.max_column def getCellData(file, cell): wb = load_workbook(file) sheet = wb.active return sheet.cell(row=cell[0], column=cell[1]).value def setCellData(file, cell, data): wb = load_workbook(file) sheet = wb.active sheet.cell(row=cell[0], column=cell[1]).value = data wb.save(file)
[ "openpyxl.load_workbook" ]
[((69, 88), 'openpyxl.load_workbook', 'load_workbook', (['file'], {}), '(file)\n', (82, 88), False, 'from openpyxl import load_workbook\n'), ((172, 191), 'openpyxl.load_workbook', 'load_workbook', (['file'], {}), '(file)\n', (185, 191), False, 'from openpyxl import load_workbook\n'), ((281, 300), 'openpyxl.load_workbook', 'load_workbook', (['file'], {}), '(file)\n', (294, 300), False, 'from openpyxl import load_workbook\n'), ((425, 444), 'openpyxl.load_workbook', 'load_workbook', (['file'], {}), '(file)\n', (438, 444), False, 'from openpyxl import load_workbook\n')]
# pommerman/cli/run_battle.py # pommerman/agents/TensorFlowAgent/pit.py import atexit from datetime import datetime import os import random import sys import time import argparse import numpy as np from pommerman import helpers, make from TensorFlowAgent import TensorFlowAgent from pommerman import utility import tensorflow as tf class Pit(object): def __init__(self, tfa, saver, game_nums=2): self.tfa = tfa self.saver = saver self.game_nums = game_nums def launch_games(self, sess, render=True): sess.run(tf.global_variables_initializer()) self.tfa.restore_weigths(sess, self.saver) env = self.tfa.getEnv() reward_board = np.zeros((1, 4)) for i in range(self.game_nums): curr_state = env.reset() while True: if render: env.render() all_actions = env.act(curr_state) next_state, reward, terminal, _ = env.step(all_actions) if terminal: reward_board += np.array(reward) print("Game #{0}, rewards = {1}, reward agent = {2}".format(i, "".join(str(i) + " " for i in reward), reward[self.tfa.agent_id])) break def main(args): tf.reset_default_graph() with tf.Session() as sess: tfa = TensorFlowAgent(name="TFA", args=args, sess=sess) saver = tf.train.Saver(allow_empty=True) pit = Pit(tfa, saver, game_nums=2) pit.launch_games(sess) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--environment", type=str, default="pommerman") parser.add_argument("--policy", type=str, default="MlpPolicy") parser.add_argument("--checkpoint_dir", type=str, default="./save_model") parser.add_argument("--a_learning_rate", type=float, default=0.0001) parser.add_argument("--c_learning_rate", type=float, default=0.0002) parser.add_argument('--summary_dir', type=str, default='./summary_log') parser.add_argument("--cliprange", type=float, default=0.2) parser.add_argument("--batch_size", type=int, default=32) parser.add_argument("--training_step", type=int, default=10) parser.add_argument("--gamma", type=float, default=0.9) parser.add_argument("--train", type=str, default="False", choices=["False"]) parser.add_argument("--type", type=str, default="Simple", choices=["Simple, CNN"]) args = parser.parse_args() main(args)
[ "tensorflow.reset_default_graph", "argparse.ArgumentParser", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.global_variables_initializer", "numpy.array", "TensorFlowAgent.TensorFlowAgent", "numpy.zeros" ]
[((1290, 1314), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1312, 1314), True, 'import tensorflow as tf\n'), ((1580, 1605), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1603, 1605), False, 'import argparse\n'), ((701, 717), 'numpy.zeros', 'np.zeros', (['(1, 4)'], {}), '((1, 4))\n', (709, 717), True, 'import numpy as np\n'), ((1325, 1337), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1335, 1337), True, 'import tensorflow as tf\n'), ((1362, 1411), 'TensorFlowAgent.TensorFlowAgent', 'TensorFlowAgent', ([], {'name': '"""TFA"""', 'args': 'args', 'sess': 'sess'}), "(name='TFA', args=args, sess=sess)\n", (1377, 1411), False, 'from TensorFlowAgent import TensorFlowAgent\n'), ((1428, 1460), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'allow_empty': '(True)'}), '(allow_empty=True)\n', (1442, 1460), True, 'import tensorflow as tf\n'), ((559, 592), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (590, 592), True, 'import tensorflow as tf\n'), ((1073, 1089), 'numpy.array', 'np.array', (['reward'], {}), '(reward)\n', (1081, 1089), True, 'import numpy as np\n')]
import numpy as np import tensorflow as tf import unittest from xcenternet.model.evaluation.overlap import compute_overlap from xcenternet.model.evaluation.mean_average_precision import MAP class TestMeanAveragePrecision(unittest.TestCase): def setUp(self): self.map_bboxes = np.array( [ [[20, 10, 80, 60], [10, 40, 40, 90], [0, 0, 100, 100]], [[0, 0, 10, 10], [20, 20, 40, 90], [80, 20, 100, 50]], ], dtype=np.float64, ) self.map_labels = np.array([[0, 0, 1], [0, 0, 0]]) self.map_predictions = np.array( [ [ [10, 40, 40, 90, 0.1, 0], # overlap 1.00 with bbox #2, low prob [60, 10, 90, 60, 0.5, 0], # overlap 0.29 with bbox #1 [10, 30, 50, 90, 0.7, 0], # overlap 0.625 with bbox #2 [0, 0, 100, 90, 0.7, 1], # overlap 0.9 with bbox #3 [0, 0, 100, 80, 0.7, 1], # overlap 0.8 with bbox #3 ], [ [20, 20, 30, 50, 0.6, 0], # 0.21 overlap with #2 [2, 0, 10, 11, 0.8, 0], # overlap with #1 [0, 2, 14, 10, 0.9, 0], # overlap with #1 [0, 0, 10, 10, 0.7, 1], # no ground truth for 1 [80, 20, 100, 50, 0.1, 1], # no ground truth for 1 ], ], dtype=np.float32, ) self.map_masks = np.array([[1, 1, 1], [1, 1, 1]], dtype=np.float32) self.result_1 = {"overall": 3 / 4, "weighted": 2 / 3, "per_class": {0: (0.5, 2), 1: (1.0, 1)}} self.result_both = {"overall": 2 / 3, "weighted": 4 / 9, "per_class": {0: (1 / 3, 5), 1: (1.0, 1)}} def test_compute_overlap(self): boxes1 = np.array([[10, 10, 30, 50], [10, 10, 30, 30]], dtype=np.float64) boxes2 = np.array([[10, 10, 30, 50], [10, 10, 40, 40], [100, 70, 110, 90]], dtype=np.float64) overlap = compute_overlap(boxes1, boxes2) self.assertAlmostEqual(1.0, overlap[0][0]) self.assertAlmostEqual(6 / 11, overlap[0][1]) self.assertAlmostEqual(0.0, overlap[0][2]) self.assertAlmostEqual(0.5, overlap[1][0]) self.assertAlmostEqual(4 / 9, overlap[1][1]) self.assertAlmostEqual(0.0, overlap[1][2]) def test_map_update_one(self): mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3) mean_average_precision.update_state(self.map_predictions[0], self.map_bboxes[0], self.map_labels[0]) result = mean_average_precision.result() self._assert_map(result, self.result_1) def test_map_update_both(self): mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3) mean_average_precision.update_state(self.map_predictions[0], self.map_bboxes[0], self.map_labels[0]) mean_average_precision.update_state(self.map_predictions[1], self.map_bboxes[1], self.map_labels[1]) result = mean_average_precision.result() self._assert_map(result, self.result_both) def test_map_update_batch_one(self): mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3) mean_average_precision.update_state_batch( tf.constant([self.map_predictions[0]]), tf.constant([self.map_bboxes[0]]), tf.constant([self.map_labels[0]]), tf.constant([self.map_masks[0]]), ) result = mean_average_precision.result() self._assert_map(result, self.result_1) def test_map_update_batch_both(self): mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3) mean_average_precision.update_state_batch( tf.constant(self.map_predictions), tf.constant(self.map_bboxes), tf.constant(self.map_labels), tf.constant(self.map_masks), ) result = mean_average_precision.result() self._assert_map(result, self.result_both) def _assert_map(self, first, second): self.assertAlmostEqual(first["overall"], second["overall"]) self.assertAlmostEqual(first["weighted"], second["weighted"]) self.assertAlmostEqual(first["per_class"][0][0], second["per_class"][0][0]) # mAP self.assertAlmostEqual(first["per_class"][0][1], second["per_class"][0][1]) # num objects self.assertAlmostEqual(first["per_class"][1][0], second["per_class"][1][0]) # mAP self.assertAlmostEqual(first["per_class"][1][1], second["per_class"][1][1]) # num objects if __name__ == "__main__": unittest.main()
[ "numpy.array", "tensorflow.constant", "unittest.main", "xcenternet.model.evaluation.overlap.compute_overlap", "xcenternet.model.evaluation.mean_average_precision.MAP" ]
[((4629, 4644), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4642, 4644), False, 'import unittest\n'), ((291, 435), 'numpy.array', 'np.array', (['[[[20, 10, 80, 60], [10, 40, 40, 90], [0, 0, 100, 100]], [[0, 0, 10, 10], [\n 20, 20, 40, 90], [80, 20, 100, 50]]]'], {'dtype': 'np.float64'}), '([[[20, 10, 80, 60], [10, 40, 40, 90], [0, 0, 100, 100]], [[0, 0, \n 10, 10], [20, 20, 40, 90], [80, 20, 100, 50]]], dtype=np.float64)\n', (299, 435), True, 'import numpy as np\n'), ((539, 571), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, 0, 0]]'], {}), '([[0, 0, 1], [0, 0, 0]])\n', (547, 571), True, 'import numpy as np\n'), ((603, 901), 'numpy.array', 'np.array', (['[[[10, 40, 40, 90, 0.1, 0], [60, 10, 90, 60, 0.5, 0], [10, 30, 50, 90, 0.7,\n 0], [0, 0, 100, 90, 0.7, 1], [0, 0, 100, 80, 0.7, 1]], [[20, 20, 30, 50,\n 0.6, 0], [2, 0, 10, 11, 0.8, 0], [0, 2, 14, 10, 0.9, 0], [0, 0, 10, 10,\n 0.7, 1], [80, 20, 100, 50, 0.1, 1]]]'], {'dtype': 'np.float32'}), '([[[10, 40, 40, 90, 0.1, 0], [60, 10, 90, 60, 0.5, 0], [10, 30, 50,\n 90, 0.7, 0], [0, 0, 100, 90, 0.7, 1], [0, 0, 100, 80, 0.7, 1]], [[20, \n 20, 30, 50, 0.6, 0], [2, 0, 10, 11, 0.8, 0], [0, 2, 14, 10, 0.9, 0], [0,\n 0, 10, 10, 0.7, 1], [80, 20, 100, 50, 0.1, 1]]], dtype=np.float32)\n', (611, 901), True, 'import numpy as np\n'), ((1500, 1550), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 1, 1]]'], {'dtype': 'np.float32'}), '([[1, 1, 1], [1, 1, 1]], dtype=np.float32)\n', (1508, 1550), True, 'import numpy as np\n'), ((1817, 1881), 'numpy.array', 'np.array', (['[[10, 10, 30, 50], [10, 10, 30, 30]]'], {'dtype': 'np.float64'}), '([[10, 10, 30, 50], [10, 10, 30, 30]], dtype=np.float64)\n', (1825, 1881), True, 'import numpy as np\n'), ((1899, 1988), 'numpy.array', 'np.array', (['[[10, 10, 30, 50], [10, 10, 40, 40], [100, 70, 110, 90]]'], {'dtype': 'np.float64'}), '([[10, 10, 30, 50], [10, 10, 40, 40], [100, 70, 110, 90]], dtype=np\n .float64)\n', (1907, 1988), True, 'import numpy as np\n'), ((2003, 2034), 'xcenternet.model.evaluation.overlap.compute_overlap', 'compute_overlap', (['boxes1', 'boxes2'], {}), '(boxes1, boxes2)\n', (2018, 2034), False, 'from xcenternet.model.evaluation.overlap import compute_overlap\n'), ((2415, 2461), 'xcenternet.model.evaluation.mean_average_precision.MAP', 'MAP', (['(2)'], {'iou_threshold': '(0.5)', 'score_threshold': '(0.3)'}), '(2, iou_threshold=0.5, score_threshold=0.3)\n', (2418, 2461), False, 'from xcenternet.model.evaluation.mean_average_precision import MAP\n'), ((2739, 2785), 'xcenternet.model.evaluation.mean_average_precision.MAP', 'MAP', (['(2)'], {'iou_threshold': '(0.5)', 'score_threshold': '(0.3)'}), '(2, iou_threshold=0.5, score_threshold=0.3)\n', (2742, 2785), False, 'from xcenternet.model.evaluation.mean_average_precision import MAP\n'), ((3180, 3226), 'xcenternet.model.evaluation.mean_average_precision.MAP', 'MAP', (['(2)'], {'iou_threshold': '(0.5)', 'score_threshold': '(0.3)'}), '(2, iou_threshold=0.5, score_threshold=0.3)\n', (3183, 3226), False, 'from xcenternet.model.evaluation.mean_average_precision import MAP\n'), ((3654, 3700), 'xcenternet.model.evaluation.mean_average_precision.MAP', 'MAP', (['(2)'], {'iou_threshold': '(0.5)', 'score_threshold': '(0.3)'}), '(2, iou_threshold=0.5, score_threshold=0.3)\n', (3657, 3700), False, 'from xcenternet.model.evaluation.mean_average_precision import MAP\n'), ((3290, 3328), 'tensorflow.constant', 'tf.constant', (['[self.map_predictions[0]]'], {}), '([self.map_predictions[0]])\n', (3301, 3328), True, 'import tensorflow as tf\n'), ((3342, 3375), 'tensorflow.constant', 'tf.constant', (['[self.map_bboxes[0]]'], {}), '([self.map_bboxes[0]])\n', (3353, 3375), True, 'import tensorflow as tf\n'), ((3389, 3422), 'tensorflow.constant', 'tf.constant', (['[self.map_labels[0]]'], {}), '([self.map_labels[0]])\n', (3400, 3422), True, 'import tensorflow as tf\n'), ((3436, 3468), 'tensorflow.constant', 'tf.constant', (['[self.map_masks[0]]'], {}), '([self.map_masks[0]])\n', (3447, 3468), True, 'import tensorflow as tf\n'), ((3764, 3797), 'tensorflow.constant', 'tf.constant', (['self.map_predictions'], {}), '(self.map_predictions)\n', (3775, 3797), True, 'import tensorflow as tf\n'), ((3811, 3839), 'tensorflow.constant', 'tf.constant', (['self.map_bboxes'], {}), '(self.map_bboxes)\n', (3822, 3839), True, 'import tensorflow as tf\n'), ((3853, 3881), 'tensorflow.constant', 'tf.constant', (['self.map_labels'], {}), '(self.map_labels)\n', (3864, 3881), True, 'import tensorflow as tf\n'), ((3895, 3922), 'tensorflow.constant', 'tf.constant', (['self.map_masks'], {}), '(self.map_masks)\n', (3906, 3922), True, 'import tensorflow as tf\n')]
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for sonnet.examples.rmc_nth_farthest.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sonnet as snt from sonnet.examples import learn_to_execute from sonnet.examples import rmc_learn_to_execute import tensorflow as tf class RMCLearnTest(tf.test.TestCase): def setUp(self): self._batch_size = 2 self._seq_sz_in = 10 self._seq_sz_out = 3 self._feature_size = 8 self._nesting = 2 self._literal_length = 3 def test_object_sequence_model(self): """Test the model class.""" core = snt.RelationalMemory( mem_slots=2, head_size=4, num_heads=1, num_blocks=1, gate_style="unit") final_mlp = snt.nets.MLP( output_sizes=(5,), activate_final=True) model = rmc_learn_to_execute.SequenceModel( core=core, target_size=self._feature_size, final_mlp=final_mlp) dummy_in = tf.zeros( (self._seq_sz_in, self._batch_size, self._feature_size)) dummy_out = tf.zeros( (self._seq_sz_out, self._batch_size, self._feature_size)) sizes = tf.ones((self._batch_size)) logits = model(dummy_in, dummy_out, sizes, sizes) self.assertAllEqual( logits.shape, (self._seq_sz_out, self._batch_size, self._feature_size)) def test_build_and_train(self): """Test the example TF graph build.""" total_iterations = 2 reporting_interval = 1 rmc_learn_to_execute.build_and_train( total_iterations, reporting_interval, test=True) def test_learn_to_execute_datset(self): """Test the dataset class.""" dataset = learn_to_execute.LearnToExecute( self._batch_size, self._literal_length, self._nesting) dataset_iter = dataset.make_one_shot_iterator().get_next() logit_size = dataset.state.vocab_size seq_sz_in = dataset.state.num_steps seq_sz_out = dataset.state.num_steps_out self.assertAllEqual( dataset_iter[0].shape, (seq_sz_in, self._batch_size, logit_size)) self.assertAllEqual( dataset_iter[1].shape, (seq_sz_out, self._batch_size, logit_size)) self.assertAllEqual( dataset_iter[2].shape, (seq_sz_out, self._batch_size, logit_size)) self.assertAllEqual(dataset_iter[3].shape, (self._batch_size,)) self.assertAllEqual(dataset_iter[4].shape, (self._batch_size,)) if __name__ == "__main__": tf.test.main()
[ "sonnet.examples.rmc_learn_to_execute.build_and_train", "tensorflow.ones", "sonnet.examples.learn_to_execute.LearnToExecute", "sonnet.nets.MLP", "tensorflow.test.main", "sonnet.RelationalMemory", "sonnet.examples.rmc_learn_to_execute.SequenceModel", "tensorflow.zeros" ]
[((3048, 3062), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (3060, 3062), True, 'import tensorflow as tf\n'), ((1280, 1376), 'sonnet.RelationalMemory', 'snt.RelationalMemory', ([], {'mem_slots': '(2)', 'head_size': '(4)', 'num_heads': '(1)', 'num_blocks': '(1)', 'gate_style': '"""unit"""'}), "(mem_slots=2, head_size=4, num_heads=1, num_blocks=1,\n gate_style='unit')\n", (1300, 1376), True, 'import sonnet as snt\n'), ((1398, 1450), 'sonnet.nets.MLP', 'snt.nets.MLP', ([], {'output_sizes': '(5,)', 'activate_final': '(True)'}), '(output_sizes=(5,), activate_final=True)\n', (1410, 1450), True, 'import sonnet as snt\n'), ((1472, 1575), 'sonnet.examples.rmc_learn_to_execute.SequenceModel', 'rmc_learn_to_execute.SequenceModel', ([], {'core': 'core', 'target_size': 'self._feature_size', 'final_mlp': 'final_mlp'}), '(core=core, target_size=self.\n _feature_size, final_mlp=final_mlp)\n', (1506, 1575), False, 'from sonnet.examples import rmc_learn_to_execute\n'), ((1611, 1676), 'tensorflow.zeros', 'tf.zeros', (['(self._seq_sz_in, self._batch_size, self._feature_size)'], {}), '((self._seq_sz_in, self._batch_size, self._feature_size))\n', (1619, 1676), True, 'import tensorflow as tf\n'), ((1702, 1768), 'tensorflow.zeros', 'tf.zeros', (['(self._seq_sz_out, self._batch_size, self._feature_size)'], {}), '((self._seq_sz_out, self._batch_size, self._feature_size))\n', (1710, 1768), True, 'import tensorflow as tf\n'), ((1790, 1815), 'tensorflow.ones', 'tf.ones', (['self._batch_size'], {}), '(self._batch_size)\n', (1797, 1815), True, 'import tensorflow as tf\n'), ((2111, 2200), 'sonnet.examples.rmc_learn_to_execute.build_and_train', 'rmc_learn_to_execute.build_and_train', (['total_iterations', 'reporting_interval'], {'test': '(True)'}), '(total_iterations, reporting_interval,\n test=True)\n', (2147, 2200), False, 'from sonnet.examples import rmc_learn_to_execute\n'), ((2297, 2387), 'sonnet.examples.learn_to_execute.LearnToExecute', 'learn_to_execute.LearnToExecute', (['self._batch_size', 'self._literal_length', 'self._nesting'], {}), '(self._batch_size, self._literal_length,\n self._nesting)\n', (2328, 2387), False, 'from sonnet.examples import learn_to_execute\n')]
import os.path from typing import Sequence, Optional, Dict import numpy as np import pandas as pd from nk_sent2vec import Sent2Vec as _Sent2Vec from d3m import container, utils from d3m.primitive_interfaces.transformer import TransformerPrimitiveBase from d3m.primitive_interfaces.base import CallResult from d3m.container import DataFrame as d3m_DataFrame from d3m.metadata import hyperparams, base as metadata_base, params __author__ = "Distil" __version__ = "1.3.0" __contact__ = "mailto:<EMAIL>" Inputs = container.pandas.DataFrame Outputs = container.pandas.DataFrame class Hyperparams(hyperparams.Hyperparams): use_columns = hyperparams.Set( elements=hyperparams.Hyperparameter[int](-1), default=(), semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'], description="A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.", ) class Sent2VecPrimitive(TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]): """ Produce numerical representations (features) for short texts or sentences. Parameters ---------- inputs : Input pandas dataframe Returns ------- Outputs The output is a pandas dataframe """ metadata = metadata_base.PrimitiveMetadata( { # Simply an UUID generated once and fixed forever. Generated using "uuid.uuid4()". "id": "cf450079-9333-4a3f-aed4-b77a4e8c7be7", "version": __version__, "name": "sent2vec_wrapper", # Keywords do not have a controlled vocabulary. Authors can put here whatever they find suitable. "keywords": ["Sent2Vec", "Embedding", "NLP", "Natural Language Processing"], "source": { "name": __author__, "contact": __contact__, "uris": [ # Unstructured URIs. "https://github.com/kungfuai/d3m-primitives" ], }, # A list of dependencies in order. These can be Python packages, system packages, or Docker images. # Of course Python packages can also have their own dependencies, but sometimes it is necessary to # install a Python package first to be even able to run setup.py of another package. Or you have # a dependency which is not on PyPi. "installation": [ {"type": "PIP", "package": "cython", "version": "0.29.16"}, { "type": metadata_base.PrimitiveInstallationType.PIP, "package_uri": "git+https://github.com/kungfuai/d3m-primitives.git@{git_commit}#egg=kf-d3m-primitives".format( git_commit=utils.current_git_commit(os.path.dirname(__file__)), ), }, { "type": "FILE", "key": "sent2vec_model", "file_uri": "http://public.datadrivendiscovery.org/twitter_bigrams.bin", "file_digest": "9e8ccfea2aaa4435ca61b05b11b60e1a096648d56fff76df984709339f423dd6", }, ], # The same path the primitive is registered with entry points in setup.py. "python_path": "d3m.primitives.feature_extraction.nk_sent2vec.Sent2Vec", # Choose these from a controlled vocabulary in the schema. If anything is missing which would # best describe the primitive, make a merge request. "algorithm_types": [metadata_base.PrimitiveAlgorithmType.VECTORIZATION], "primitive_family": metadata_base.PrimitiveFamily.FEATURE_EXTRACTION, } ) # class instance to avoid unnecessary re-init on subsequent produce calls _vectorizer: Optional[_Sent2Vec] = None def __init__( self, *, hyperparams: Hyperparams, random_seed: int = 0, volumes: Dict[str, str] = None ) -> None: super().__init__( hyperparams=hyperparams, random_seed=random_seed, volumes=volumes ) self.volumes = volumes def produce( self, *, inputs: Inputs, timeout: float = None, iterations: int = None ) -> CallResult[Outputs]: """ Produce numerical representations (features) for short texts or sentences. Parameters ---------- inputs : Input pandas dataframe Returns ------- Outputs The output is a pandas dataframe """ # figure out columns to operate on cols = self._get_operating_columns(inputs, self.hyperparams['use_columns'], ('http://schema.org/Text',)) frame = inputs.iloc[:, cols] outputs = inputs.copy() try: # lazy load the model and keep it around for subsequent produce calls if Sent2VecPrimitive._vectorizer is None: Sent2VecPrimitive._vectorizer = _Sent2Vec(path=self.volumes["sent2vec_model"]) output_vectors = [] for col in range(frame.shape[1]): text = frame.iloc[:, col].tolist() embedded_sentences = Sent2VecPrimitive._vectorizer.embed_sentences(sentences=text) output_vectors.append(embedded_sentences) embedded_df = pd.DataFrame(np.array(output_vectors).reshape(len(embedded_sentences), -1)) except ValueError: # just return inputs with file names deleted if vectorizing fails return CallResult(outputs) # create df with vectorized columns and append to input df embedded_df = d3m_DataFrame(embedded_df) for col in range(embedded_df.shape[1]): col_dict = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS, col))) col_dict['structural_type'] = type(1.0) col_dict['name'] = "vector_" + str(col) col_dict["semantic_types"] = ( "http://schema.org/Float", "https://metadata.datadrivendiscovery.org/types/Attribute", ) embedded_df.metadata = embedded_df.metadata.update( (metadata_base.ALL_ELEMENTS, col), col_dict ) df_dict = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS, ))) df_dict_1 = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS, ))) df_dict['dimension'] = df_dict_1 df_dict_1['name'] = 'columns' df_dict_1['semantic_types'] = ('https://metadata.datadrivendiscovery.org/types/TabularColumn',) df_dict_1['length'] = embedded_df.shape[1] embedded_df.metadata = embedded_df.metadata.update((metadata_base.ALL_ELEMENTS,), df_dict) return CallResult(outputs.append_columns(embedded_df)) @classmethod def _get_operating_columns(cls, inputs: container.DataFrame, use_columns: Sequence[int], semantic_types: Sequence[str], require_attribute: bool = True) -> Sequence[int]: # use caller supplied columns if supplied cols = set(use_columns) type_cols = set(inputs.metadata.list_columns_with_semantic_types(semantic_types)) if require_attribute: attributes = set(inputs.metadata.list_columns_with_semantic_types(('https://metadata.datadrivendiscovery.org/types/Attribute',))) type_cols = type_cols & attributes if len(cols) > 0: cols = type_cols & cols else: cols = type_cols return list(cols)
[ "d3m.primitive_interfaces.base.CallResult", "numpy.array", "nk_sent2vec.Sent2Vec", "d3m.container.DataFrame" ]
[((5704, 5730), 'd3m.container.DataFrame', 'd3m_DataFrame', (['embedded_df'], {}), '(embedded_df)\n', (5717, 5730), True, 'from d3m.container import DataFrame as d3m_DataFrame\n'), ((5034, 5080), 'nk_sent2vec.Sent2Vec', '_Sent2Vec', ([], {'path': "self.volumes['sent2vec_model']"}), "(path=self.volumes['sent2vec_model'])\n", (5043, 5080), True, 'from nk_sent2vec import Sent2Vec as _Sent2Vec\n'), ((5594, 5613), 'd3m.primitive_interfaces.base.CallResult', 'CallResult', (['outputs'], {}), '(outputs)\n', (5604, 5613), False, 'from d3m.primitive_interfaces.base import CallResult\n'), ((5407, 5431), 'numpy.array', 'np.array', (['output_vectors'], {}), '(output_vectors)\n', (5415, 5431), True, 'import numpy as np\n')]
# ============================================================================ # # Copyright (c) 2007-2010 Integral Technology Solutions Pty Ltd, # All Rights Reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE # LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # # FOR FURTHER INFORMATION PLEASE SEE THE INTEGRAL TECHNOLOGY SOLUTIONS # END USER LICENSE AGREEMENT (ELUA). # # ============================================================================ import validation_helper as helper from java.io import File def run(config): if validateAdminServerProperty(config): return False return True def validateAdminServerProperty(domainProperties): error = 0 helper.printHeader('[VALIDATING] admin server properties') adminPort = domainProperties.getProperty('wls.admin.listener.port') if not adminPort is None and len(adminPort)>0: try: int(adminPort) except ValueError: log.error('Please verify wls.admin.listener.port [' + str(adminPort) + '] property.') else: if int(adminPort)<0 or int(adminPort)>65535: log.error('Please verify wls.admin.listener.port property, port number is not in valid range [0-65535].') else: log.debug('Admin server port [' + str(adminPort) + '] is valid.') enableSSL = domainProperties.getProperty('wls.admin.listener.enableSSL') if not enableSSL is None and len(enableSSL)>0: if not enableSSL.upper()=='TRUE' and not enableSSL.upper()=='FALSE': error = 1 log.error('The wls.admin.listener.enableSSL property supports only [true,false].') else: log.debug('Admin server ssl enable property [' + str(enableSSL) + '] is valid.') if enableSSL.upper()=='TRUE': sslPort = domainProperties.getProperty('wls.admin.listener.sslPort') if not sslPort is None and len(sslPort)>0: try: int(sslPort) except ValueError: log.error('Please verify wls.admin.listener.sslPort [' + str(sslPort) + '] property.') else: if int(sslPort)<0 or int(sslPort)>65535: log.error('Please verify wls.admin.listener.sslPort property, port number is not in valid range [0-65535].') else: log.debug('Admin server ssl port [' + str(sslPort) + '] is valid.') adminchprotocol = domainProperties.getProperty('wls.admin.channel.protocol') if not adminchprotocol is None and len(adminchprotocol)>0: if not adminchprotocol=='t3' and not adminchprotocol=='t3s' and not adminchprotocol=='http' and not adminchprotocol=='https' and not adminchprotocol=='iiop' and not adminchprotocol=='iiops' and not adminchprotocol=='ldap' and not adminchprotocol=='ldaps' and not adminchprotocol=='admin': error = 1 log.error('The wls.admin.channel.protocol property supports only [t3,t3s,http,https,iiop,iiops,ldap,ldaps,admin].') else: log.debug('Admin channel protocol property [' + str(adminchprotocol) + '] is valid.') adminChannelPort = domainProperties.getProperty('wls.admin.channel.listener.port') if not adminChannelPort is None and len(adminChannelPort)>0: try: int(adminChannelPort) except ValueError: log.error('Please verify wls.admin.channel.listener.port [' + str(adminChannelPort) + '] property.') else: if int(adminChannelPort)<0 or int(adminChannelPort)>65535: log.error('Please verify wls.admin.channel.listener.port property, port number is not in valid range [0-65535].') else: log.debug('Admin channel port [' + str(adminChannelPort) + '] is valid.') adminChannelPublicPort = domainProperties.getProperty('wls.admin.channel.listener.publicPort') if not adminChannelPublicPort is None and len(adminChannelPublicPort)>0: try: int(adminChannelPublicPort) except ValueError: log.error('Please verify wls.admin.channel.listener.publicPort [' + str(adminChannelPublicPort) + '] property.') else: if int(adminChannelPublicPort)<0 or int(adminChannelPublicPort)>65535: log.error('Please verify wls.admin.channel.listener.publicPort property, port number is not in valid range [0-65535].') else: log.debug('Admin channel public port [' + str(adminChannelPublicPort) + '] is valid.') httpEnable = domainProperties.getProperty('wls.admin.channel.httpEnable') if not httpEnable is None and len(httpEnable)>0: if not httpEnable.upper()=='TRUE' and not httpEnable.upper()=='FALSE': error = 1 log.error('The wls.admin.channel.httpEnable property supports only [true,false].') else: log.debug('Admin http channel enable property [' + str(httpEnable) + '] is valid.') enableTunneling = domainProperties.getProperty('wls.admin.enableTunneling') if not enableTunneling is None and len(enableTunneling)>0: if not enableTunneling.upper()=='TRUE' and not enableTunneling.upper()=='FALSE': error = 1 log.error('The wls.admin.enableTunneling property supports only [true,false].') else: log.debug('Admin tunnelling enable property [' + str(enableTunneling) + '] is valid.') admincustomlog = domainProperties.getProperty('wls.admin.log.custom') if not admincustomlog is None and len(admincustomlog)>0: if not admincustomlog.upper()=='TRUE' and not admincustomlog.upper()=='FALSE': error = 1 log.error('The wls.admin.log.custom property supports only [true,false].') else: log.debug('Admin custom log enable property [' + str(admincustomlog) + '] is valid.') if admincustomlog.upper()=='TRUE': filename = domainProperties.getProperty('wls.admin.log.filename') if not filename is None and len(filename)>0: file = File(filename) if file.isAbsolute(): if not file.exists(): log.debug('[NOTE] Please make sure the user running this script has permission to create directory and file [' + str(filename) + '].') limitNumberOfFile = domainProperties.getProperty('wls.admin.log.limitNumOfFile') if not limitNumberOfFile is None and len(limitNumberOfFile)>0: if not limitNumberOfFile.upper()=='TRUE' and not limitNumberOfFile.upper()=='FALSE': error = 1 log.error('The wls.admin.log.limitNumOfFile property supports only [true,false].') else: log.debug('Admin log limit number of file property [' + str(limitNumberOfFile) + '] is valid.') fileToRetain = domainProperties.getProperty('wls.admin.log.fileToRetain') if not fileToRetain is None and len(fileToRetain)>0: if not fileToRetain is None and len(fileToRetain)>0: try: int(fileToRetain) except ValueError: log.error('Please verify wls.admin.log.fileToRetain [' + str(fileToRetain) + '] property.') else: if int(fileToRetain)<1 or int(fileToRetain)>99999: log.error('Please verify wls.admin.log.fileToRetain property, number is not in valid range [1-99999].') else: log.debug('Admin log file to retain [' + str(fileToRetain) + '] is valid.') logRotateOnStartup = domainProperties.getProperty('wls.admin.log.rotateLogOnStartup') if not logRotateOnStartup is None and len(logRotateOnStartup)>0: if not logRotateOnStartup.upper()=='TRUE' and not logRotateOnStartup.upper()=='FALSE': error = 1 log.error('The wls.admin.log.rotateLogOnStartup property supports only [true,false].') else: log.debug('Admin log rotate on startup property [' + str(logRotateOnStartup) + '] is valid.') rotationType = domainProperties.getProperty('wls.admin.log.rotationType') if not rotationType is None and len(rotationType)>0: if not rotationType == 'bySize' and not rotationType == 'byTime': error = 1 log.error('The wls.admin.log.rotationType property supports only [bySize,byTime].') else: log.debug('Admin log rotation type property [' + str(rotationType) + '] is valid.') if rotationType == 'bySize': fileMinSize = domainProperties.getProperty('wls.admin.log.fileMinSize') if not fileMinSize is None and len(fileMinSize)>0: try: int(fileMinSize) except ValueError: log.error('Please verify wls.admin.log.fileMinSize [' + str(fileMinSize) + '] property.') else: if int(fileMinSize)<0 or int(fileMinSize)>65535: log.error('Please verify wls.admin.log.fileMinSize [' + str(fileMinSize) + '] property, number is not in valid range [0-65535].') else: log.debug('Admin log file min size [' + str(fileMinSize) + '] is valid.') if rotationType == 'byTime': rotationTime = domainProperties.getProperty('wls.admin.log.rotationTime') if not rotationTime is None and len(rotationTime)>0: if rotationTime.find(':')==-1: error = 1 log.error('Please verify wls.admin.log.rotationTime [' + str(rotationTime) + '] property, the property supports time format [HH:MM].') else: if len(rotationTime)<4 or len(rotationTime)>5: error = 1 log.error('The wls.admin.log.rotationTime [' + str(rotationTime) + '] property, the property supports time format [HH:MM].') else: log.debug('Admin log rotation time [' + str(rotationTime) + '] is valid.') fileTimespan = domainProperties.getProperty('wls.admin.log.fileTimeSpan') if not fileTimespan is None and len(fileTimespan)>0: try: int(fileTimespan) except ValueError: log.error('Please verify wls.admin.log.fileTimeSpan [' + str(fileTimespan) + '] property.') else: if int(fileTimespan)<1: log.error('Please verify wls.admin.log.fileTimeSpan [' + str(fileTimespan) + '] property, number is not in valid range [>=1].') else: log.debug('Admin log file timespan [' + str(fileTimespan) + '] is valid.') rotationDir = domainProperties.getProperty('wls.admin.log.rotationDir') if not rotationDir is None and len(rotationDir)>0: file = File(rotationDir) if file.isAbsolute(): if not file.exists(): log.debug('[NOTE] Please make sure the user running this script has permission to create directory and file [' + str(rotationDir) + '].') fileSeverity = domainProperties.getProperty('wls.admin.log.logFileSeverity') if not fileSeverity is None and len(fileSeverity)>0: if not fileSeverity == 'Debug' and not fileSeverity == 'Info' and not fileSeverity == 'Warning': error = 1 log.error('The wls.admin.log.logFileSeverity property supports only [Debug,Info,Warning].') else: log.debug('Admin log file severity property [' + str(fileSeverity) + '] is valid.') broadcastSeverity = domainProperties.getProperty('wls.admin.log.broadcastSeverity') if not broadcastSeverity is None and len(broadcastSeverity)>0: if not broadcastSeverity == 'Trace' and not broadcastSeverity == 'Debug' and not broadcastSeverity == 'Info' and not broadcastSeverity == 'Notice' and not broadcastSeverity == 'Warning' and not broadcastSeverity == 'Error' and not broadcastSeverity == 'Critical' and not broadcastSeverity == 'Alert' and not broadcastSeverity == 'Emergency' and not broadcastSeverity == 'Off': error = 1 log.error('The wls.admin.log.broadcastSeverity property supports only [Trace,Debug,Info,Notice,Warning,Error,Critical,Alert,Emergency,Off].') else: log.debug('Admin broadcast severity property [' + str(broadcastSeverity) + '] is valid.') memoryBufferSeverity = domainProperties.getProperty('wls.admin.log.memoryBufferSeverity') if not memoryBufferSeverity is None and len(memoryBufferSeverity)>0: if not memoryBufferSeverity == 'Trace' and not memoryBufferSeverity == 'Debug' and not fileSeverity == 'Info' and not fileSeverity == 'Notice' and not fileSeverity == 'Warning' and not fileSeverity == 'Error' and not fileSeverity == 'Critical' and not fileSeverity == 'Alert' and not fileSeverity == 'Emergency' and not fileSeverity == 'Off': error = 1 log.error('The wls.admin.log.memoryBufferSeverity property supports only [Trace,Debug,Info,Notice,Warning,Error,Critical,Alert,Emergency,Off].') else: log.debug('Admin memory buffer severity property [' + str(memoryBufferSeverity) + '] is valid.') adminhttpcustomlog = domainProperties.getProperty('wls.admin.httplog.enable') if not adminhttpcustomlog is None and len(adminhttpcustomlog)>0: if not adminhttpcustomlog.upper()=='TRUE' and not adminhttpcustomlog.upper()=='FALSE': error = 1 log.error('The wls.admin.httplog.enable property supports only [true,false].') else: log.debug('Admin http custom log enable property [' + str(adminhttpcustomlog) + '] is valid.') if adminhttpcustomlog.upper()=='TRUE': filename = domainProperties.getProperty('wls.admin.httplog.filename') if not filename is None and len(filename)>0: file = File(filename) if file.isAbsolute(): if not file.exists(): log.debug('[NOTE] Please make sure the user running this script has permission to create directory and file for [' + str(filename) + '].') limitNumberOfFile = domainProperties.getProperty('wls.admin.httplog.limitNumOfFile') if not limitNumberOfFile is None and len(limitNumberOfFile)>0: if not limitNumberOfFile.upper()=='TRUE' and not limitNumberOfFile.upper()=='FALSE': error = 1 log.error('The wls.admin.httplog.limitNumOfFile property supports only [true,false].') else: log.debug('Admin http log limit number of file property [' + str(limitNumberOfFile) + '] is valid.') fileToRetain = domainProperties.getProperty('wls.admin.httplog.fileToRetain') if not fileToRetain is None and len(fileToRetain)>0: if not fileToRetain is None and len(fileToRetain)>0: try: int(fileToRetain) except ValueError: log.error('Please verify wls.admin.httplog.fileToRetain [' + str(fileToRetain) + '] property.') else: if int(fileToRetain)<1 or int(fileToRetain)>99999: log.error('Please verify wls.admin.httplog.fileToRetain property, number is not in valid range [1-99999].') else: log.debug('Admin http log file to retain [' + str(fileToRetain) + '] is valid.') logRotateOnStartup = domainProperties.getProperty('wls.admin.httplog.rotateLogOnStartup') if not logRotateOnStartup is None and len(logRotateOnStartup)>0: if not logRotateOnStartup.upper()=='TRUE' and not logRotateOnStartup.upper()=='FALSE': error = 1 log.error('The wls.admin.httplog.rotateLogOnStartup property supports only [true,false].') else: log.debug('Admin http log rotate on startup property [' + str(logRotateOnStartup) + '] is valid.') rotationType = domainProperties.getProperty('wls.admin.httplog.rotationType') if not rotationType is None and len(rotationType)>0: if not rotationType == 'bySize' and not rotationType == 'byTime': error = 1 log.error('The wls.admin.httplog.rotationType property supports only [bySize,byTime].') else: log.debug('Admin http log rotation type property [' + str(rotationType) + '] is valid.') if rotationType == 'bySize': fileMinSize = domainProperties.getProperty('wls.admin.httplog.fileMinSize') if not fileMinSize is None and len(fileMinSize)>0: try: int(fileMinSize) except ValueError: log.error('Please verify wls.admin.httplog.fileMinSize [' + str(fileMinSize) + '] property.') else: if int(fileMinSize)<0 or int(fileMinSize)>65535: log.error('Please verify wls.admin.httplog.fileMinSize [' + str(fileMinSize) + '] property, number is not in valid range [0-65535].') else: log.debug('Admin http log file min size [' + str(fileMinSize) + '] is valid.') if rotationType == 'byTime': rotationTime = domainProperties.getProperty('wls.admin.httplog.rotationTime') if not rotationTime is None and len(rotationTime)>0: if rotationTime.find(':')==-1: error = 1 log.error('Please verify wls.admin.httplog.rotationTime [' + str(rotationTime) + '] property, the property supports time format [HH:MM].') else: if len(rotationTime)<4 or len(rotationTime)>5: error = 1 log.error('The wls.admin.httplog.rotationTime [' + str(rotationTime) + '] property, the property supports time format [HH:MM].') else: log.debug('Admin http log rotation time [' + str(rotationTime) + '] is valid.') fileTimespan = domainProperties.getProperty('wls.admin.httplog.fileTimeSpan') if not fileTimespan is None and len(fileTimespan)>0: try: int(fileTimespan) except ValueError: log.error('Please verify wls.admin.httplog.fileTimeSpan [' + str(fileTimespan) + '] property.') else: if int(fileTimespan)<1: log.error('Please verify wls.admin.httplog.fileTimeSpan [' + str(fileTimespan) + '] property, number is not in valid range [>=1].') else: log.debug('Admin http log file timespan [' + str(fileTimespan) + '] is valid.') rotationDir = domainProperties.getProperty('wls.admin.httplog.rotationDir') if not rotationDir is None and len(rotationDir)>0: file = File(rotationDir) if file.isAbsolute(): if not file.exists(): log.debug('[NOTE] Please make sure the user running this script has permission to create directory and file for [' + str(rotationDir) + '].') return error
[ "validation_helper.printHeader", "java.io.File" ]
[((1218, 1276), 'validation_helper.printHeader', 'helper.printHeader', (['"""[VALIDATING] admin server properties"""'], {}), "('[VALIDATING] admin server properties')\n", (1236, 1276), True, 'import validation_helper as helper\n'), ((6765, 6779), 'java.io.File', 'File', (['filename'], {}), '(filename)\n', (6769, 6779), False, 'from java.io import File\n'), ((12488, 12505), 'java.io.File', 'File', (['rotationDir'], {}), '(rotationDir)\n', (12492, 12505), False, 'from java.io import File\n'), ((15913, 15927), 'java.io.File', 'File', (['filename'], {}), '(filename)\n', (15917, 15927), False, 'from java.io import File\n'), ((21767, 21784), 'java.io.File', 'File', (['rotationDir'], {}), '(rotationDir)\n', (21771, 21784), False, 'from java.io import File\n')]
import sys # import the GameState of the game from Game.GameStateConnect4 import GameState # import all agents from Agents.MCTS import MCTSTree from Agents.Random import RandomAgent from Agents.AlphaBeta import AlphaBetaAgent # creates the board string for connect4 (full of zeros) start_board_list = ["000000 " for i in range(0, 7)] start_board = "".join(start_board_list) class Match: def __init__(self, agents, start_board): # names of agents: string self.agent1, self.agent2 = agents # board: string self.start_board = start_board # initial GameState: GameState self.initialState = GameState(start_board, 1) # functions of the agents: returns action (int) self.func_agent1 = getattr(self, self.agent1) self.func_agent2 = getattr(self, self.agent2) # runs the match self.run_Match(self.initialState) def MCTS(self, state): """ returns: the result of MCTS agent (action int) """ player_id = state.player_id return MCTSTree(state).runMCTS(player_id) def RANDOM(self, state): """ returns: the result of RANDOM agent (action int) """ return RandomAgent().get_action(state) def ALPHABETA(self, state): """ returns: the result of ALPHABETA agent (action int) """ player_id = state.player_id return AlphaBetaAgent().get_action(state, player_id) def REALWORLD(self, state): """ returns: action -> you can choose the action -type in a number between 1 and 8 """ int_input = False action = None while not int_input: try: action = int(input("Please type in your action. " + "It has to be a number between 1 and 7." + "Type in 0 to stop the game!")) if action == 0: print("\nSomebody gave up!") sys.exit() int_input = True except ValueError: print("Please type in a number between 1 and 7.") continue return action def run_Match(self, state): """ runs the match: asks for actions until the game is over then prints the winner """ while not state.terminal_state(): if state.player_id == 1: action = self.func_agent1(state) print(self.agent1 + " plays action: "+str(action)) else: action = self.func_agent2(state) print(self.agent2 + " plays action: "+str(action)) if action not in state.actions: print("\n\nSorry, but this action isn't AVAILABLE.\n\n") continue state = state.result(action) print(str(state)) winner = state.winner print("Player "+str(winner)+" has won." if winner else "Nobody won!") if __name__ == "__main__": """ Create Match Object first parameter = agents (choose between "MCTS", "ALPHABETA", "RANDOM", "REALWORLD") second parameter = start_board (string) """ match = Match(("MCTS", "ALPHABETA"), start_board)
[ "Game.GameStateConnect4.GameState", "Agents.Random.RandomAgent", "Agents.MCTS.MCTSTree", "sys.exit", "Agents.AlphaBeta.AlphaBetaAgent" ]
[((663, 688), 'Game.GameStateConnect4.GameState', 'GameState', (['start_board', '(1)'], {}), '(start_board, 1)\n', (672, 688), False, 'from Game.GameStateConnect4 import GameState\n'), ((1115, 1130), 'Agents.MCTS.MCTSTree', 'MCTSTree', (['state'], {}), '(state)\n', (1123, 1130), False, 'from Agents.MCTS import MCTSTree\n'), ((1307, 1320), 'Agents.Random.RandomAgent', 'RandomAgent', ([], {}), '()\n', (1318, 1320), False, 'from Agents.Random import RandomAgent\n'), ((1539, 1555), 'Agents.AlphaBeta.AlphaBetaAgent', 'AlphaBetaAgent', ([], {}), '()\n', (1553, 1555), False, 'from Agents.AlphaBeta import AlphaBetaAgent\n'), ((2198, 2208), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2206, 2208), False, 'import sys\n')]
import os import subprocess import tempfile try: from PyQt5.QtCore import QBuffer, QIODevice, Qt from PyQt5.QtGui import QImage except ImportError: from PySide2.QtCore import QBuffer, QIODevice, Qt from PySide2.QtGui import QImage from .texture_format import TextureFormat def imageToBytes(image): buffer = QBuffer() buffer.open(QIODevice.ReadWrite) image.save(buffer, 'png') data = buffer.data() buffer.close() return data def loadImage(path): tex_format = TextureFormat(path) if tex_format in {'png', 'bmp', 'tga', 'tif', 'tiff', 'jpg', 'jpeg'}: image = QImage(path) if not image.isNull(): return image else: return temp_path = os.path.join(tempfile.gettempdir(), str(os.getpid()) + 'hammer_temp_image.png') temp_path = temp_path.replace('\\', '/') subprocess.call('iconvert -g off "{0}" "{1}"'.format(path, temp_path)) if os.path.exists(temp_path): image = QImage(temp_path) os.remove(temp_path) return image
[ "os.path.exists", "tempfile.gettempdir", "os.getpid", "PySide2.QtCore.QBuffer", "PySide2.QtGui.QImage", "os.remove" ]
[((331, 340), 'PySide2.QtCore.QBuffer', 'QBuffer', ([], {}), '()\n', (338, 340), False, 'from PySide2.QtCore import QBuffer, QIODevice, Qt\n'), ((944, 969), 'os.path.exists', 'os.path.exists', (['temp_path'], {}), '(temp_path)\n', (958, 969), False, 'import os\n'), ((618, 630), 'PySide2.QtGui.QImage', 'QImage', (['path'], {}), '(path)\n', (624, 630), False, 'from PySide2.QtGui import QImage\n'), ((750, 771), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (769, 771), False, 'import tempfile\n'), ((987, 1004), 'PySide2.QtGui.QImage', 'QImage', (['temp_path'], {}), '(temp_path)\n', (993, 1004), False, 'from PySide2.QtGui import QImage\n'), ((1013, 1033), 'os.remove', 'os.remove', (['temp_path'], {}), '(temp_path)\n', (1022, 1033), False, 'import os\n'), ((777, 788), 'os.getpid', 'os.getpid', ([], {}), '()\n', (786, 788), False, 'import os\n')]
########################################################################## # PyPipe - Copyright (C) AGrigis, 2017 # Distributed under the terms of the CeCILL-B license, as published by # the CEA-CNRS-INRIA. Refer to the LICENSE file or to # http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html # for details. ########################################################################## # Soma import from PySide2 import QtWidgets from PySide2 import QtGui # Global parameters font = QtGui.QFont("", 9, QtGui.QFont.Bold) def fill_treectrl(treectrl, menu, match=""): """ Fill a tree control with the different menu items. This procedure is able to filter the menu items. Loadable functions appear in bold in the tree control. Insert four elements (current name, function module path, function input parameters, function output parameters) When the function module path is not None we have reached a leaf that contains a function description. Parameters ---------- treectrl: QTreeControl (mandatory) the tree control where we want to insert the menu menu: hierachic dict (mandatory) each key is a sub module of the module. Leafs contain a list with the url to the documentation. match: str (optional) the string used to filter the menu items """ treectrl.headerItem().setText(0, "Functions") add_tree_nodes(treectrl, menu, match) def add_tree_nodes(parent_item, menu, match, parent_module=""): """ Add the menu to tree control if match in current module name or child modules. The match is insensitive to the cast. Parameters ---------- parent_item: QTreeWidgetItem (mandatory) a tree control item where we want to insert the menu menu: hierachic dict (mandatory) each key is a sub module of the module. Leafs contain a list with the url to the documentation. match: str (mandatory) the string used to filter the menu items parent_module: str (optional) the parent module string description ('module.sub_module') """ # Go through the current module sub modules for module_name, child_modules in menu.items(): # Filtering: check if we need to add this module in the tree if (match == "" or match in module_name.lower() or search_in_menu(child_modules, match)): # Add the module name to the tree control if isinstance(child_modules, dict): tree_item = QtWidgets.QTreeWidgetItem( parent_item, [module_name, "None", "None", "None"]) if parent_module: current_module = parent_module + "." + module_name else: current_module = module_name add_tree_nodes(tree_item, child_modules, match, current_module) else: tree_item = QtWidgets.QTreeWidgetItem( parent_item, [ module_name, child_modules[0], str(child_modules[1]), str(child_modules[2])]) tree_item.setFont(0, font) def search_in_menu(menu, match): """ Recursive search in tree. The search procedure is insensitive to the cast. Parameters ---------- menu: hierachic dict (mandatory) each key is a sub module of the module. Leafs contain a list with the url to the documentation. match: str (mandatory) the string used to filter the menu items Returns ------- is_included: bool True if we found match in the tree, False otherwise. """ # Initialize the default value: match not found is_included = False # If we are on a leaf, check in the module list if isinstance(menu, list): return is_included # Go through the current module sub modules for module_name, child_modules in menu.items(): # Stop criteria if isinstance(child_modules, list): return is_included or match in module_name.lower() # Recursive search is_included = ( is_included or match in module_name.lower() or search_in_menu(child_modules, match)) # Stop criteria if is_included: return is_included return is_included
[ "PySide2.QtGui.QFont", "PySide2.QtWidgets.QTreeWidgetItem" ]
[((490, 526), 'PySide2.QtGui.QFont', 'QtGui.QFont', (['""""""', '(9)', 'QtGui.QFont.Bold'], {}), "('', 9, QtGui.QFont.Bold)\n", (501, 526), False, 'from PySide2 import QtGui\n'), ((2512, 2589), 'PySide2.QtWidgets.QTreeWidgetItem', 'QtWidgets.QTreeWidgetItem', (['parent_item', "[module_name, 'None', 'None', 'None']"], {}), "(parent_item, [module_name, 'None', 'None', 'None'])\n", (2537, 2589), False, 'from PySide2 import QtWidgets\n')]
""" Module implements simple ORM for SQLite. Module excludes using many-to-many and one-to-many relationships. Trying to save the same object (update) with another aggregated object will rewrite old object! """ import os import sqlite3 from array import array from inspect import * import builtins import sys import logging from .util import * from .demo_classes import * class Py2SQL: def __init__(self, logs_enabled=False, log_file=""): self.filename = None self.connection = None self.cursor = None def __setup_logger(self, logs_enabled: bool, log_file: str): """ Creates and returns logger. :param logs_enabled: True to enable, False to disable :param log_file: absolute path with file name of file for logging to :return: logger instance from 'logging' module """ logging.basicConfig(level=logging.DEBUG, filename=log_file, filemode="a") logger = logging.getLogger("main_logger") logger.addFilter(lambda r: bool(logs_enabled)) return logger def db_connect(self, db_filepath: str) -> None: """ Connect to the database in given path :type db_filepath: str :param db_filepath: path to the database file :return: None """ self.filename = db_filepath self.connection = sqlite3.connect(db_filepath) self.cursor = self.connection.cursor() def db_disconnect(self) -> None: """ Disconnect from the current database :return: None """ self.connection.close() self.filename = None self.connection = None self.cursor = None def db_engine(self) -> tuple: """ Retrieve database name and version :rtype: tuple :return: database name and version tuple """ self.cursor.execute('SELECT sqlite_version();') version = self.cursor.fetchone()[0] name = self.db_name() return name, version def db_name(self) -> str: query = "PRAGMA database_list;" self.cursor.execute(query) db_info = self.cursor.fetchone() if db_info: return db_info[1] return "" def db_size(self) -> float: """ Retrieve connected database size in Mb :rtype: float :return: database size in Mb """ return os.path.getsize(self.filename) / (1024 * 1024.0) def db_tables(self): """ Retrieve all the tables names present in database. :return: list of database tables names """ query = "SELECT tbl_name FROM sqlite_master;" self.cursor.execute(query) tables_info = self.cursor.fetchall() return list(map(lambda t: t[0], list(tables_info))) def db_table_structure(self, table_name: str) -> list: """ Retrieve ordered list of tuples of form (id, name, type) which describe given table's columns :type table_name: str :param table_name: name of the table to retrieve structure of :return: ordered list of tuples of form (id, name, type) """ return list(map(lambda x: x[:3], self.cursor.execute('PRAGMA table_info(' + table_name + ');').fetchall())) def db_table_size(self, table_name: str) -> float: """ Dynamically calculates data size stored in the table with table name provided in Mb. :table_name: table name to get size of :rtype: float :return: size of table ib Mb """ if not type(table_name) == str: raise ValueError( "str type expected as table_name. Got " + str(type(table_name))) q = "SELECT * FROM {}".format(table_name) try: self.cursor.execute(q) except Exception: raise Exception('No table' + table_name + ' found') rows = self.cursor.fetchall() col_names = list( map(lambda descr_tuple: descr_tuple[0], self.cursor.description)) int_size = 8 text_charsize = 2 bytes_size = 0 for r in rows: for i in range(len(r)): if r[i] is None: continue elif (col_names[i] == PY2SQL_COLUMN_ID_NAME) or (col_names[i] == PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME): bytes_size += int_size elif type(r[i]) == int: bytes_size += int_size elif type(r[i]) == str: bytes_size += len(r[i]) * text_charsize else: continue return float(bytes_size / 1024 / 1024) # Python -> SQLite def save_object(self, obj) -> int: """ Save representation of given object instance into database or update it if it already exists :param obj: object instance to be saved :rtype: int :return: id of object instance that was saved """ table_name = Py2SQL.__get_object_table_name(obj) # print('saving', obj, 'to', table_name, 'id:', id(obj)) if not self.__table_exists(table_name): self.__create_table(type(obj)) else: self.__update_table(type(obj)) if not Py2SQL.__is_of_primitive_type(obj): # object values = [] self.__add_object_attrs_columns(obj, table_name) columns = self.__get_object_bound_columns(table_name).split(', ') for col in columns[:]: if not Py2SQL.__has_attr_for_column(obj, col): columns.remove(col) continue if col == PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME: values.append(id(obj)) continue attr_value = Py2SQL.__get_attr_for_column(obj, col) if isclass(attr_value): continue values.append(self.__get_sqlite_repr(attr_value)) else: columns = self.__get_object_bound_columns(table_name).split(', ') values = (id(obj), self.__get_sqlite_repr(obj)) obj_pk = self.__get_pk_if_exists(obj) if obj_pk: query = 'UPDATE {} SET {} WHERE {} = ?'.format( table_name, ', '.join(['{} = ?'.format(c) for c in columns]), PY2SQL_COLUMN_ID_NAME ) params = (*values, obj_pk) # print(query, params) self.cursor.execute(query, params) self.connection.commit() return obj_pk query = 'INSERT INTO {}({}) VALUES ({});'.format( table_name, ', '.join(columns), ('?,' * len(values))[:-1] ) # print(query, values) try: self.cursor.execute(query, values) except sqlite3.OperationalError: self.cursor.execute( 'ALTER TABLE {} ADD COLUMN {} TEXT'.format( table_name, PY2SQL_PRIMITIVE_TYPES_VALUE_COLUMN_NAME) ) columns = self.__get_object_bound_columns(table_name) query = 'INSERT INTO {}({}) VALUES ({});'.format( table_name, columns, ('?,' * len(values))[:-1] ) self.cursor.execute(query, values) self.connection.commit() return self.__get_last_inserted_id() @staticmethod def __get_attr_for_column(obj, column_name): """ Retrieve attribute of an object corresponding to the given column name :param obj: object to get attribute of :param column_name: column name corresponding to desired attribute :return: attribute of an object corresponding to the given column name """ if column_name == PY2SQL_PRIMITIVE_TYPES_VALUE_COLUMN_NAME and Py2SQL.__is_of_primitive_type(obj): return str(obj) return getattr(obj, Py2SQL.__object_column_name_to_attr_name(column_name)) @staticmethod def __has_attr_for_column(obj, column_name): """ Check if object still has attribute corresponding to given column name :param obj: object to check for :param column_name: column name to check for :return: True if object has attribute corresponding to given column name, False otherwise """ if column_name == PY2SQL_PRIMITIVE_TYPES_VALUE_COLUMN_NAME and Py2SQL.__is_of_primitive_type(obj): return True if column_name == PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME: return True if isclass(getattr(obj, Py2SQL.__object_column_name_to_attr_name(column_name), type)): return False return hasattr(obj, Py2SQL.__object_column_name_to_attr_name(column_name)) @staticmethod def __object_column_name_to_attr_name(column_name): """ Retrieve name of object's attribute corresponding to given column name :param column_name: column name to get attribute name for :return: name of object's attribute corresponding to given column name """ attr_name = column_name.replace(PY2SQL_SEPARATOR, '').replace(PY2SQL_OBJECT_ATTR_PREFIX, '') \ .replace(PY2SQL_OBJECT_METHOD_PREFIX, '') return attr_name def __get_pk_if_exists(self, obj): """ Retrieve primary key of given object from corresponding table :param obj: obj to get primary key of if it exists in corresponding table :rtype: int or None :return: primary key of object if it is in the table, otherwise None """ table_name = Py2SQL.__get_object_table_name(obj) existed_id = self.cursor.execute( 'SELECT {} FROM {} WHERE {} = ?'.format( PY2SQL_COLUMN_ID_NAME, table_name, PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME ), (str(id(obj)),) ).fetchone() if existed_id: return existed_id[0] return None def __get_last_inserted_id(self): """ Retrieve last id inserted into the database :rtype: int :return: last id inserted into the database """ return self.cursor.execute('SELECT last_insert_rowid()').fetchone()[0] @staticmethod def __get_object_column_name(attr_name: str, attr_value): """ Retrieve name of the column responsible for storing given object instance attribute :type attr_name: str :param attr_name: name of the object instance attribute to get the column name :return: name of the column responsible for storing given attribute """ if isfunction(attr_value) or ismethod(attr_value): return PY2SQL_OBJECT_METHOD_PREFIX + PY2SQL_SEPARATOR + attr_name return PY2SQL_OBJECT_ATTR_PREFIX + PY2SQL_SEPARATOR + attr_name @staticmethod def __get_class_column_name(attr_name: str, attr_value) -> str: """ Retrieve name of the column responsible for storing given class instance attribute :type attr_name: str :param attr_name: name of the class instance attribute to get the column name :param attr_value: value of the class instance attribute to get the column name :rtype: str :return: name of the column responsible for storing given attribute """ if isfunction(attr_value) or ismethod(attr_value): return PY2SQL_CLASS_METHOD_PREFIX + PY2SQL_SEPARATOR + attr_name return PY2SQL_CLASS_ATTR_PREFIX + PY2SQL_SEPARATOR + attr_name @staticmethod def __get_association_reference(obj, ref_id): """ Retrieve association reference string for a given object instance and its primary key i.e. a string that represents association relationship between two objects :param obj: object instance to get the association reference for :param ref_id: primary key of object instance to be referenced in the corresponding table :rtype: str :return: association reference string """ return PY2SQL_ASSOCIATION_REFERENCE_PREFIX + PY2SQL_SEPARATOR + Py2SQL.__get_object_table_name(obj) + \ PY2SQL_SEPARATOR + str(ref_id) @staticmethod def __get_base_class_table_reference_name(cls) -> str: """ Retrieve base class reference string for a given class instance i.e. a string that represents inheritance relationship between two classes :param cls: class instance to get base class table reference for :rtype: str :return: base class table reference string """ return PY2SQL_BASE_CLASS_REFERENCE_PREFIX + PY2SQL_SEPARATOR + Py2SQL.__get_class_table_name(cls) @staticmethod def __is_magic_attr(attr_name: str) -> bool: """ Defines is given attribute name is built-in magic attribute name :param attr_name: :return: bool """ return attr_name.startswith("__") and attr_name.endswith("__") def __get_sqlite_repr(self, obj) -> str or None: """ Retrieve SQLite representation of given object All primitives are represented by respective type copy constructor call string with the actual value passed, so that object instances of primitive types can be easily recreated from the database via eval() function Composite objects are represented by association reference strings, whereas functions are represented with their source code :param obj: object to be represented in SQLite database :rtype: str or None :return: sqlite representation of an object to be stored in the respective database table """ if obj is None: result = None elif type(obj) == array: result = '{}("{}", {})'.format( type(obj).__name__, obj.typecode, list(obj)) elif type(obj) == frozenset: result = str(obj) elif type(obj) == str: result = '{}("{}")'.format(type(obj).__name__, obj) elif Py2SQL.__is_of_primitive_type(obj): result = '{}({})'.format(type(obj).__name__, obj) elif isfunction(obj) or ismethod(obj): result = getsource(obj) else: # object if obj.__dict__: result = Py2SQL.__get_association_reference( obj, self.save_object(obj)) else: result = str(obj) if result is not None: return result.replace("'", '"') @staticmethod def __is_of_primitive_type(obj) -> bool: """ Check whether given object is of primitive type i.e. is represented by a single field in SQLite database, thus can be embedded into 'composite' objects :param obj: object instance to be type-checked :rtype: bool :return: True if object is of primitive type, False otherwise """ return Py2SQL.__is_primitive_type(type(obj)) or not hasattr(obj, '__dict__') @staticmethod def __is_primitive_type(cls): """ Checks if input class object belongs to primitive built-in types :param cls: class instance to check :rtype: bool :return: True if class is primitive type, False otherwise """ return cls in (int, float, str, bool, dict, tuple, list, set, frozenset, array) or isbuiltin(cls) @staticmethod def __get_object_table_name(obj) -> str: """ Retrieve name of the table which should store objects of the same type as given one :param obj: object to build respective table name from :rtype: str :return: name of table to store object in """ return Py2SQL.__get_class_table_name(type(obj)) @staticmethod def __get_class_name_by_table_name(table_name: str) -> tuple: """ Parses given table name to find out name of class this table was created for :param table_name: table name of class to get name of :return: tuple (<full_module_name>, <class_name>) """ divider = '$' ind = table_name.rfind(divider) module = table_name[:ind].replace(divider, ".") class_name = table_name[ind + 1:] return module, class_name @staticmethod def __get_attribute_name(self, tbl_name, col_name) -> str: """ DO NOT USE :param tbl_name: table the column taken from :param col_name: column name :return: """ cls = Py2SQL.__get_class_object_by_table_name(tbl_name) attr_name = "" if Py2SQL.__is_primitive_type(cls): pass else: pass # PY2SQL_PRIMITIVE_TYPES_VALUE_COLUMN_NAME # PY2SQL_OBJECT_ATTR_PREFIX + PY2SQL_SEPARATOR # todo return attr_name @staticmethod def __get_class_object_by_table_name(tbl_name): """ Returns class object of corresponding tbl name or raise an Exception :param tbl_name: table name to get corresponding class object of :return: class object """ module_nm, cls_nm = Py2SQL.__get_class_name_by_table_name(tbl_name) cls_obj = None try: cls_obj = getattr(sys.modules[module_nm], cls_nm) except (AttributeError, KeyError) as e: msg = 'No such class: ' + module_nm + "." + cls_nm raise Exception(msg) except Exception: raise Exception('Unpredictable error') return cls_obj @staticmethod def __get_class_table_name(cls) -> str: """ Retrieve name of the database table used to represent given class :param cls: class instance to get table name for :rtype: str :return: name of the table that represents given class """ prefix = cls.__module__.replace(".", "$") + "$" if Py2SQL.__is_of_primitive_type(cls): return prefix + cls.__name__ return prefix + cls.__name__ def __table_exists(self, table_name): """ Check if table with table name exists in database :param table_name: table name :return: bool, exists or not """ for tbl_name in self.db_tables(): if tbl_name == table_name: return True return False def __add_object_attrs_columns(self, obj, table_name): """ Add columns representing attributes of given object instance to the table with given name :param obj: object to add attributes of to the table :param table_name: name of the table to add columns into :return: None """ for attr_name, attr_value in obj.__dict__.items(): if isclass(attr_value): continue try: self.cursor.execute( 'ALTER TABLE {} ADD COLUMN {} TEXT'.format( table_name, Py2SQL.__get_object_column_name(attr_name, attr_value) ) ) except sqlite3.OperationalError: # column already exists pass @staticmethod def __get_data_fields(cls_obj): """ Retrieves from class object data field names. Not includes magic attributes and functions (methods) :param cls_obj: :return: list of two-element tuples containing data field name and value respectively """ return [(k, v) for k, v in cls_obj.__dict__.items() if not Py2SQL.__is_magic_attr(k) and PY2SQL_ID_NAME != k] def __table_is_empty(self, table_name) -> bool: """ Check if table is empty :param table_name: name of the table to check :rtype: bool :return: True if table is empty, False otherwise """ return self.cursor.execute('SELECT count(*) FROM {}'.format(table_name)).fetchone()[0] == 0 def __get_object_bound_columns(self, table_name) -> str: """ Retrieve comma separated list of object bound column names as string :param table_name: name of the table to get columns bound to object instances from :rtype: str :return: comma separated list of object bound column names """ columns = ', '.join([column_name for _, column_name, _ in self.db_table_structure(table_name) if Py2SQL.__is_object_bound_column(column_name)]) return columns @staticmethod def __is_object_bound_column(column_name): """ Check if column is object bound attribute or method :param column_name: column name to be checked :return: True if column is object bound, False otherwise """ return column_name.startswith(PY2SQL_OBJECT_ATTR_PREFIX) or \ column_name.startswith(PY2SQL_OBJECT_METHOD_PREFIX) or \ column_name == PY2SQL_PRIMITIVE_TYPES_VALUE_COLUMN_NAME or \ column_name == PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME @staticmethod def __get_columns_to_be_modified(old_columns, new_columns): """ Retrieve columns to be deleted from the table during update, as well as columns to be added :param old_columns: columns that were stored in the table prior to the class update call :param new_columns: class columns to be added through class update call :return: two-element tuple: column names to be deleted, column names to be added """ old_columns = [col for col in old_columns if not Py2SQL.__is_object_bound_column(col) or col == PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME] to_be_deleted = set(old_columns) - set(new_columns) to_be_added = set(new_columns) - set(old_columns) return to_be_deleted, to_be_added def __get_class_bound_columns_queries(self, cls, columns=None): """ Retrieve list of class bound column queries :param cls: class to retrieve column queries for :param columns: columns list which optionally extends class bound columns list :return: list of class bound column queries """ data_fields = Py2SQL.__get_data_fields(cls) base_ref_columns = ['{} REFERENCES {}(ID) DEFAULT {}'.format( Py2SQL.__get_base_class_table_reference_name(b), Py2SQL.__get_class_table_name(b), PY2SQL_DEFAULT_CLASS_BOUND_ROW_ID ) for b in cls.__bases__ if b != object and (columns is None or Py2SQL.__get_base_class_table_reference_name(b) in columns)] class_bound_columns = ['{} TEXT DEFAULT \'{}\''.format( Py2SQL.__get_class_column_name(k, v), self.__get_sqlite_repr(v) ) for k, v in data_fields if not type(v) == cls # prevent undesired recursion and (columns is None or Py2SQL.__get_class_column_name(k, v) in columns)] if not columns: columns = [] object_bound_columns = ['{} TEXT'.format(c) for c in columns if Py2SQL.__is_object_bound_column(c) and not c == PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME] return base_ref_columns + class_bound_columns + object_bound_columns @staticmethod def __get_class_bound_columns(cls) -> list: """ Retrieve list of class bound column names :param cls: class to retrieve column names for :return: list of class bound column names """ data_fields = Py2SQL.__get_data_fields(cls) base_ref_columns = [Py2SQL.__get_base_class_table_reference_name( b) for b in cls.__bases__ if b != object] # prevent undesired recursion attr_columns = [Py2SQL.__get_class_column_name( k, v) for k, v in data_fields if not type(v) == cls] return [PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME] + base_ref_columns + attr_columns def __get_columns(self, table_name): return [column_name for _, column_name, _ in self.db_table_structure(table_name) if not column_name == PY2SQL_COLUMN_ID_NAME] def __update_table(self, cls): """ Updates table of class cls :param cls: :return: None """ table_name = Py2SQL.__get_class_table_name(cls) old_columns = self.__get_columns(table_name) new_columns = self.__get_class_bound_columns(cls) to_be_deleted, to_be_added = Py2SQL.__get_columns_to_be_modified( old_columns, new_columns) if not to_be_deleted and not to_be_added: return columns = (set(old_columns) - set(to_be_deleted)) | set(to_be_added) self.cursor.execute( 'ALTER TABLE {} RENAME TO {}$backup;'.format(table_name, table_name)) self.__create_table(cls, columns) columns_query = ', '.join(columns - set(to_be_added)) query = 'INSERT INTO {}({}) SELECT {} FROM {}$backup WHERE {} <> ?;'.format( table_name, columns_query, columns_query, table_name, PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME) # print(query) self.cursor.execute( query, (PY2SQL_DEFAULT_CLASS_BOUND_ROW_ID,) ) self.cursor.execute('DROP TABLE {}$backup;'.format(table_name)) self.connection.commit() def __create_table(self, cls, columns=None) -> str: """ Create SQLite table representation for given class instance :param cls: class instance to create SQLite table representation for :rtype: str :return: name of the table created """ table_name = self.__get_class_table_name(cls) query_start = 'CREATE TABLE IF NOT EXISTS {} ({} INTEGER PRIMARY KEY AUTOINCREMENT, {} {}' \ .format(table_name, PY2SQL_COLUMN_ID_NAME, PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME, PY2SQL_OBJECT_PYTHON_ID_COLUMN_TYPE ) if self.__is_primitive_type(cls): query = query_start + \ ', {} TEXT)'.format(PY2SQL_PRIMITIVE_TYPES_VALUE_COLUMN_NAME) else: columns = self.__get_class_bound_columns_queries(cls, columns) columns_query = ', '.join(columns) if columns_query: columns_query = ', ' + columns_query query = query_start + ' ' + columns_query + ')' # print(query) self.cursor.execute(query) if not self.__is_primitive_type(cls): if self.__table_is_empty(table_name): self.cursor.execute( 'INSERT INTO {} DEFAULT VALUES'.format(table_name)) self.connection.commit() return table_name def save_class(self, cls) -> None: """ Save given class instance's representation into database or update it if it already exists Creates or updates tables structure to represent class object :param cls: class instance to be saved :return: None """ table_name = Py2SQL.__get_class_table_name(cls) if not self.__table_exists(table_name): self.__create_table(cls) for base in cls.__bases__: if not base == object: self.__create_table(base) if not self.__is_primitive_type(cls): self.__update_table(cls) self.connection.commit() def save_hierarchy(self, root_class) -> None: """ Saves all classes derived from root_class and classes these classes depends on :param root_class: Base class to save with all derived classes :return: None """ self.save_class(root_class) subclasses = root_class.__subclasses__() if len(subclasses) == 0: return for c in subclasses: self.save_hierarchy(c) def delete_object(self, obj) -> None: """ Delete given object instance's representation from database if it already existed :param obj: object instance to be deleted :return: None """ table_name = Py2SQL.__get_object_table_name(obj) self.cursor.execute( 'DELETE FROM {} WHERE {} = ?;'.format( table_name, PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME), (id(obj),) ) if not Py2SQL.__is_of_primitive_type(obj): # object for value in obj.__dict__.values(): if not Py2SQL.__is_of_primitive_type(value) and isclass(value): self.delete_object(value) # cascade delete self.connection.commit() def delete_class(self, cls) -> None: """ Delete given class instance's representation from database if it already existed. Drops corresponding table. :param cls: object instance to be delete :return: None """ tbl_name = Py2SQL.__get_class_table_name(cls) query = "DROP TABLE IF EXISTS {}".format(tbl_name) self.cursor.execute(query) self.connection.commit() def delete_hierarchy(self, root_class) -> None: """ Deletes root_class representation from database with all derived classes. Drops class corresponding table and all derived classes corresponding tables. :param root_class: Class which representation to be deleted with all derived classes :return: None """ # consider foreign key constraints! todo self.delete_class(root_class) subclasses = root_class.__subclasses__() if len(subclasses) == 0: return for c in subclasses: self.delete_hierarchy(c) def __redefine_id_function(self, my_id): """ Replace id() global function so that it returns my_id To cancel effect of this func call __reset_id_function() method. Use carefully. Reflection used. :param my_id: value to be returned after id() call :return: my_id """ def id(ob): return my_id globals()['id'] = id def __reset_id_function(self) -> None: """ Sets global module attribute 'id' to built-in python id() function Use carefully. Reflection used. """ globals()['id'] = builtins.id def __redefine_pyid_col_name(self) -> None: """ Replaces some constant values from util module. To cancel effect of func call use __reset_pyid_col_name Use carefully. Reflection used. """ global PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME = str(PY2SQL_COLUMN_ID_NAME) def __reset_pyid_col_name(self) -> None: """ Cancels the effect of __redefine_pyid_col_name method. Use carefully. Reflection used. """ global PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME = getattr( sys.modules['util'], 'PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME') def save_object_with_update(self, obj): """ Inserts or updates obj related data by ID provided. Obj expected to be ModelPy2SQL instance object. If so, row is updated if provided ID exists, and fails otherwise. If not - object will be inserted or updated as provided :param obj: object to be saved or updated in db :return: object of type util.ModelPy2SQL """ w = None if type(obj) != ModelPy2SQL: new_id = self.save_object(obj) w = ModelPy2SQL(obj, new_id) else: tbl_nm = Py2SQL.__get_object_table_name(obj.obj) q = "SELECT * FROM {} WHERE {}={}" \ .format(tbl_nm, PY2SQL_COLUMN_ID_NAME, obj.get_id()) self.cursor.execute(q) rows = self.cursor.fetchall() if len(rows) == 0: mes = "No " + str(obj.obj.__class__.__name__) + " instance objects in " + tbl_nm + " with id: " + str( obj.get_id()) raise Exception(mes) self.__redefine_id_function(obj.get_id()) self.__redefine_pyid_col_name() self.save_object(obj.obj) self.__reset_pyid_col_name() self.__reset_id_function() w = obj return w def __get_columns_names(self, table_name) -> list: """ Retrieves from database table columns name for table with name provided :param table_name: table name :rtype: list :return: columns names """ self.cursor.execute('PRAGMA table_info({})'.format(table_name)) rows = self.cursor.fetchall() return list(map(lambda t: t[1], list(rows))) @staticmethod def __get_tbl_nm_and_id_assoc(association_ref_value: str) -> tuple: """ Retrieves from given string table name find references on and row id :param association_ref_value: :return: table name, id :rtype: tuple """ tbl_name = association_ref_value[ association_ref_value.find(PY2SQL_SEPARATOR) + 1: association_ref_value.rfind(PY2SQL_SEPARATOR)] id_ = int( association_ref_value[association_ref_value.rfind(PY2SQL_SEPARATOR) + 1:]) return tbl_name, id_ def get_object_by_id(self, table_name: str, id_: int, parent_obj=None) -> tuple: """ Retrieves the object related data from table with table name and converts it into the object. :param table_name: table name tp represent object :param id_: row id was given to the object as it was inserted :param parent_obj: do not use this param externally """ ob = None py_id, db_id = -1, -1 try: cls_o = Py2SQL.__get_class_object_by_table_name(table_name) obj = cls_o.__new__(cls_o) cols_names = self.__get_columns_names(table_name) q = "SELECT * FROM {} WHERE {}={}".format( table_name, PY2SQL_COLUMN_ID_NAME, id_) self.cursor.execute(q) row = self.cursor.fetchone() if Py2SQL.__is_primitive_type(cls_o): for i in range(len(row)): if cols_names[i] == PY2SQL_COLUMN_ID_NAME: db_id = row[i] elif cols_names[i] == PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME: py_id = row[i] elif cols_names[i] == PY2SQL_PRIMITIVE_TYPES_VALUE_COLUMN_NAME: ob = cls_o(eval(row[i])) else: if parent_obj is not None: obj = parent_obj for i in range(len(row)): if cols_names[i] == PY2SQL_COLUMN_ID_NAME: db_id = row[i] elif cols_names[i] == PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME: py_id = row[i] elif cols_names[i].startswith(PY2SQL_BASE_CLASS_REFERENCE_PREFIX): ref_tbl_name = cols_names[i][cols_names[i].rfind( PY2SQL_SEPARATOR) + 1:] ref_id = int(row[i]) self.get_object_by_id(ref_tbl_name, ref_id, obj) elif cols_names[i].startswith(PY2SQL_OBJECT_ATTR_PREFIX): attr_real_name = cols_names[cols_names.rfind( PY2SQL_SEPARATOR) + 1:] if row[i].startswith(PY2SQL_ASSOCIATION_REFERENCE_PREFIX): tbl_nm, prm_id = Py2SQL.__get_tbl_nm_and_id_assoc( row[i]) if attr_real_name.startswith("__"): attr_mdf = "_" + cls_o.__name__ + attr_real_name setattr(obj, attr_mdf, self.get_object_by_id( tbl_nm, prm_id)[0]) else: setattr(obj, attr_real_name, self.get_object_by_id( tbl_nm, prm_id)[0]) else: if attr_real_name.startswith("__"): attr_mdf = "_" + cls_o.__name__ + attr_real_name setattr(obj, attr_mdf, row[i]) else: setattr(obj, attr_real_name, row[i]) ob = obj except Exception: print("exc") return ob, db_id, py_id
[ "logging.basicConfig", "sqlite3.connect", "os.path.getsize", "logging.getLogger" ]
[((864, 937), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'filename': 'log_file', 'filemode': '"""a"""'}), "(level=logging.DEBUG, filename=log_file, filemode='a')\n", (883, 937), False, 'import logging\n'), ((983, 1015), 'logging.getLogger', 'logging.getLogger', (['"""main_logger"""'], {}), "('main_logger')\n", (1000, 1015), False, 'import logging\n'), ((1386, 1414), 'sqlite3.connect', 'sqlite3.connect', (['db_filepath'], {}), '(db_filepath)\n', (1401, 1414), False, 'import sqlite3\n'), ((2439, 2469), 'os.path.getsize', 'os.path.getsize', (['self.filename'], {}), '(self.filename)\n', (2454, 2469), False, 'import os\n')]
from setuptools import setup, find_packages with open("README.md", "r") as fh: long_description = fh.read() setup( name="algorithms", version="0.1", description="Implements a few optimisation algorithms", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/chichilele/algorithms", packages=find_packages(), entry_points={"console_scripts": ["root_finding=algorithms.root_finding:cli"]}, )
[ "setuptools.find_packages" ]
[((379, 394), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (392, 394), False, 'from setuptools import setup, find_packages\n')]
import pandas as pd import dimensionality_reduction_functions as dim_red from plotting_functions import colored_line_plot, colored_line_and_scatter_plot, colored_line_plot_projected_data # Number of PCA components ndim = 3 ####################################### EXAMPLE 4: CYCLOPROPYLIDENE BIFURCATION ######################################## # Inputs file = './examples/bifurcation/bifur_IRC.xyz' stereo_atoms_B = [3, 4, 5, 7] # "New Files" to test transforming trajectories into already generated reduced dimensional space new_file1 = './examples/bifurcation/bifur_traj1.xyz' new_file2 = './examples/bifurcation/bifur_traj2.xyz' new_file3 = './examples/bifurcation/bifur_traj3.xyz' new_file4 = './examples/bifurcation/bifur_traj4.xyz' # DISTANCES INPUT system_name1, direc1, D_pca, D_pca_fit, D_pca_components, D_mean, D_values, traj_lengths1, aligned_original_coords = \ dim_red.pathreducer( file, ndim, stereo_atoms=stereo_atoms_B, input_type="Distances") # Transforming new data into RD space new_data_df1 = dim_red.transform_new_data(new_file1, direc1 + "/new_data", ndim, D_pca_fit, D_pca_components, D_mean, aligned_original_coords, stereo_atoms=stereo_atoms_B, input_type="Distances")[1] # new_data_df2 = dim_red.transform_new_data(new_file2, direc1 + "/new_data", ndim, D_pca_fit, D_pca_components, D_mean, # aligned_original_coords, stereo_atoms=stereo_atoms_B, input_type="Distances")[1] # new_data_df3 = dim_red.transform_new_data(new_file3, direc1 + "/new_data", ndim, D_pca_fit, D_pca_components, D_mean, # aligned_original_coords, stereo_atoms=stereo_atoms_B, input_type="Distances")[1] # new_data_df4 = dim_red.transform_new_data(new_file4, direc1 + "/new_data", ndim, D_pca_fit, D_pca_components, D_mean, # aligned_original_coords, stereo_atoms=stereo_atoms_B, input_type="Distances")[1] # Plotting # DISTANCES INPUT D_pca_df = pd.DataFrame(D_pca) D_pca_df1 = D_pca_df[0:183] D_pca_df2 = D_pca_df.drop(D_pca_df.index[106:184], axis=0) # Figure 14 colored_line_and_scatter_plot(D_pca_df1[0], y=D_pca_df1[1], y1=D_pca_df1[2], x2=D_pca_df2[0], y2=D_pca_df2[1], y12=D_pca_df2[2], output_directory=direc1, imgname=(system_name1 + "_Distances_noMW")) # figures 15 A-D aber ohne MD trajektorie colored_line_plot_projected_data(D_pca_df1[0], y=D_pca_df1[1], z=D_pca_df1[2], x2=D_pca_df2[0], y2=D_pca_df2[1], z2=D_pca_df2[2], same_axis=False, new_data_x=new_data_df1[0], new_data_y=new_data_df1[1], new_data_z=new_data_df1[2], output_directory=direc1 + "/new_data", imgname=(system_name1 + "_Distances_noMW_traj1_D")) # colored_line_plot_projected_data(D_pca_df1[0], y=D_pca_df1[1], z=D_pca_df1[2], x2=D_pca_df2[0], y2=D_pca_df2[1], z2=D_pca_df2[2], # same_axis=False, new_data_x=new_data_df2[0], new_data_y=new_data_df2[1], new_data_z=new_data_df2[2], output_directory=direc1 + "/new_data", # imgname=(system_name1 + "_Distances_noMW_traj2_A")) # colored_line_plot_projected_data(D_pca_df1[0], y=D_pca_df1[1], z=D_pca_df1[2], x2=D_pca_df2[0], y2=D_pca_df2[1], z2=D_pca_df2[2], # same_axis=False, new_data_x=new_data_df3[0], new_data_y=new_data_df3[1], new_data_z=new_data_df3[2], output_directory=direc1 + "/new_data", # imgname=(system_name1 + "_Distances_noMW_traj3_B")) # colored_line_plot_projected_data(D_pca_df1[0], y=D_pca_df1[1], z=D_pca_df1[2], x2=D_pca_df2[0], y2=D_pca_df2[1], z2=D_pca_df2[2], # same_axis=False, new_data_x=new_data_df4[0], new_data_y=new_data_df4[1], new_data_z=new_data_df4[2], output_directory=direc1 + "/new_data", # imgname=(system_name1 + "_Distances_noMW_traj4_C"))
[ "dimensionality_reduction_functions.pathreducer", "dimensionality_reduction_functions.transform_new_data", "plotting_functions.colored_line_and_scatter_plot", "pandas.DataFrame", "plotting_functions.colored_line_plot_projected_data" ]
[((882, 971), 'dimensionality_reduction_functions.pathreducer', 'dim_red.pathreducer', (['file', 'ndim'], {'stereo_atoms': 'stereo_atoms_B', 'input_type': '"""Distances"""'}), "(file, ndim, stereo_atoms=stereo_atoms_B, input_type=\n 'Distances')\n", (901, 971), True, 'import dimensionality_reduction_functions as dim_red\n'), ((2030, 2049), 'pandas.DataFrame', 'pd.DataFrame', (['D_pca'], {}), '(D_pca)\n', (2042, 2049), True, 'import pandas as pd\n'), ((2149, 2353), 'plotting_functions.colored_line_and_scatter_plot', 'colored_line_and_scatter_plot', (['D_pca_df1[0]'], {'y': 'D_pca_df1[1]', 'y1': 'D_pca_df1[2]', 'x2': 'D_pca_df2[0]', 'y2': 'D_pca_df2[1]', 'y12': 'D_pca_df2[2]', 'output_directory': 'direc1', 'imgname': "(system_name1 + '_Distances_noMW')"}), "(D_pca_df1[0], y=D_pca_df1[1], y1=D_pca_df1[2],\n x2=D_pca_df2[0], y2=D_pca_df2[1], y12=D_pca_df2[2], output_directory=\n direc1, imgname=system_name1 + '_Distances_noMW')\n", (2178, 2353), False, 'from plotting_functions import colored_line_plot, colored_line_and_scatter_plot, colored_line_plot_projected_data\n'), ((2419, 2757), 'plotting_functions.colored_line_plot_projected_data', 'colored_line_plot_projected_data', (['D_pca_df1[0]'], {'y': 'D_pca_df1[1]', 'z': 'D_pca_df1[2]', 'x2': 'D_pca_df2[0]', 'y2': 'D_pca_df2[1]', 'z2': 'D_pca_df2[2]', 'same_axis': '(False)', 'new_data_x': 'new_data_df1[0]', 'new_data_y': 'new_data_df1[1]', 'new_data_z': 'new_data_df1[2]', 'output_directory': "(direc1 + '/new_data')", 'imgname': "(system_name1 + '_Distances_noMW_traj1_D')"}), "(D_pca_df1[0], y=D_pca_df1[1], z=D_pca_df1[\n 2], x2=D_pca_df2[0], y2=D_pca_df2[1], z2=D_pca_df2[2], same_axis=False,\n new_data_x=new_data_df1[0], new_data_y=new_data_df1[1], new_data_z=\n new_data_df1[2], output_directory=direc1 + '/new_data', imgname=\n system_name1 + '_Distances_noMW_traj1_D')\n", (2451, 2757), False, 'from plotting_functions import colored_line_plot, colored_line_and_scatter_plot, colored_line_plot_projected_data\n'), ((1030, 1219), 'dimensionality_reduction_functions.transform_new_data', 'dim_red.transform_new_data', (['new_file1', "(direc1 + '/new_data')", 'ndim', 'D_pca_fit', 'D_pca_components', 'D_mean', 'aligned_original_coords'], {'stereo_atoms': 'stereo_atoms_B', 'input_type': '"""Distances"""'}), "(new_file1, direc1 + '/new_data', ndim, D_pca_fit,\n D_pca_components, D_mean, aligned_original_coords, stereo_atoms=\n stereo_atoms_B, input_type='Distances')\n", (1056, 1219), True, 'import dimensionality_reduction_functions as dim_red\n')]
''' This code compares the loc and iloc in pandas dataframe ''' __author__ = "<NAME>" __email__ = "<EMAIL>" import pandas as pd import timeit df_test = pd.DataFrame() tlist = [] tlist2 = [] ################ this code creates a dataframe df_test ################## ###############with two columns and 5000000 entries ##################### for i in range (0,50): tlist.append(i) tlist2.append(i+5) df_test['A'] = tlist df_test['B'] = tlist2 print('Original Dataframe:') print(df_test.head(5)) print("-----------------") ######################### Done creating DF ############################## ############################ iloc ####################################### print('iloc dataframe: 3rd row and 1st to 2nd column:') # since iloc ignores the last part of slice # iloc works with only numbers for columns print(df_test.iloc[2:3,0:2]) print("-----------------") print('loc dataframe: 3rd row and 1st to 2nd column:') # since loc includes the last part of slice # loc works with only column names print(df_test.loc[2:3,['A','B']]) print("-----------------") ######################### Done iloc #################################### ##########*******************************************#################### # ***** Observing loc and iloc when index is different ********** # ##########*******************************************#################### ''' Now the index is altered for dataframe which gives the actual difference between what loc and iloc varies with in terms of rows. while iloc works by checking index number and counting from start, loc works by checking where the index label comes. eg. index: (4,5,6,1,2), iloc considers 2 index at 2nd position whereas loc considers it at 5th position ''' ############################### changing index ########################## as_list = df_test.index.tolist() print(as_list[3:7]) as_list[0:5] = [63,64,65,66,67] for i in range(5,len(as_list)): as_list[i] = as_list[i]-5 df_test.index = as_list ######################################################################## print('-----------------Dataframe after index updated -------------- ') print(df_test.head(10)) print('-------------- iloc dataframe with updated index-------------') print(df_test.iloc[:7]) # iloc watches for 7 index counts from start print('-------------- loc dataframe with updated index-------------') print(df_test.loc[:7]) # loc watches for index=7 where it appears
[ "pandas.DataFrame" ]
[((156, 170), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (168, 170), True, 'import pandas as pd\n')]
from functools import total_ordering from random import shuffle class Player: def __init__(self, name): self.name = name self.hand = [] def __str__(self): return self.name def play(self): return self.hand.pop() def receive(self, cards): for card in cards: self.hand.insert(0, card) def is_hand_empty(self): return not self.hand FACE = ("Jack", "Queen", "King", "Ace") SUIT = ("Club", "Spade", "Diamond", "Heart") @total_ordering class Card: def __init__(self, suit, value): self.suit = suit self.value = value def __str__(self): return f"{self.value} of {self.suit}" def __lt__(self, other): return self.value < other.value def __eq__(self, other): return self.value == other.value class FaceCard(Card): def __init__(self, suit, face): value = FACE.index(face) + 11 super().__init__(suit, value) self.face = face def __str__(self): return f"{self.face} of {self.suit}" class Deck: def __init__(self): self.cards = [] for suit in SUIT: for i in range(2, 11): self.cards.append(Card(suit, i)) for face in FACE: self.cards.append(FaceCard(suit, face)) shuffle(self.cards) def deal(self, players): while self.cards: for player in players: card = self.cards.pop() player.receive([card]) if not self.cards: return class Game: def __init__(self, name_1, name_2): self.player_1 = Player(name_1) self.player_2 = Player(name_2) deck = Deck() deck.deal([self.player_1, self.player_2]) def is_game_over(self): return self.player_1.is_hand_empty() or self.player_2.is_hand_empty() def play(self): previous_hands = [] total_hands = 0 while not self.is_game_over(): c1 = self.player_1.play() c2 = self.player_2.play() if c1 < c2: # player 2 is the winner self.player_2.receive([c1, c2] + previous_hands) previous_hands = [] elif c1 > c2: self.player_1.receive([c1, c2] + previous_hands) previous_hands = [] else: previous_hands.extend([c1, c2]) for i in range(3): if not self.is_game_over(): previous_hands.append(self.player_1.play()) previous_hands.append(self.player_2.play()) total_hands += 1 if self.player_1.is_hand_empty(): print(f"Player {self.player_2} is the winner in {total_hands} hands.") else: print(f"Player {self.player_1} is the winner in {total_hands} hands.")
[ "random.shuffle" ]
[((1331, 1350), 'random.shuffle', 'shuffle', (['self.cards'], {}), '(self.cards)\n', (1338, 1350), False, 'from random import shuffle\n')]
import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator cz2 = (0.7, 0.7, 0.7) cz = (0.3, 0.3, 0.3) cy = (0.7, 0.4, 0.12) ci = (0.1, 0.3, 0.5) ct = (0.7, 0.2, 0.1) ax = plt.figure(figsize=(5,4)).gca() ax.xaxis.set_major_locator(MaxNLocator(integer=True)) ax.yaxis.grid(True) ax.set_ylim([40,75]) plt.yticks(list(range(40,80,10)),[str(i) for i in range(40,80,10)]) ax.set_title('Test') ax.set_xlabel('Context Level $K$') ax.set_ylabel('Micro F$_1$ Score (%)') y=[62.1,58.5,70.1,68.4] x=[0,1,2,3] bt, = ax.plot(x,y, '--', label='BERT Test', marker='^') y=[54.0,64.0,72.2,66.9] x=[0,1,2,3] cat, = ax.plot(x,y, '-.', label='C. Attn. Test', marker='^') y=[69.3, 66.4, 72.7, 68.8] x=[0,1,2,3] cet, = ax.plot(x,y, '-.', label='C. Emb. Test', marker='^') y=[54.6,62.1,69.0,69.9] x=[0,1,2,3] mat, = ax.plot(x,y, '-', label='Mask$_{AVG}$ Test', marker='o') y=[62.0,64.0,72.6,71.1] x=[0,1,2,3] mmt, = ax.plot(x,y, '-', label='Mask$_{MAX}$ Test', marker='o') y=[49.2, 55.4, 67.4, 58.3] x=[0,1, 2, 3] ht, = ax.plot(x,y, ':', label='HBMP Test', marker='s') plt.legend(handles=[bt, cat, cet, mat, mmt, ht]) #plt.show() plt.savefig('curvetest.png', dpi=1500)
[ "matplotlib.pyplot.figure", "matplotlib.ticker.MaxNLocator", "matplotlib.pyplot.savefig", "matplotlib.pyplot.legend" ]
[((1091, 1139), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[bt, cat, cet, mat, mmt, ht]'}), '(handles=[bt, cat, cet, mat, mmt, ht])\n', (1101, 1139), True, 'import matplotlib.pyplot as plt\n'), ((1153, 1191), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""curvetest.png"""'], {'dpi': '(1500)'}), "('curvetest.png', dpi=1500)\n", (1164, 1191), True, 'import matplotlib.pyplot as plt\n'), ((266, 291), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (277, 291), False, 'from matplotlib.ticker import MaxNLocator\n'), ((207, 233), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (217, 233), True, 'import matplotlib.pyplot as plt\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- """ DataWorkshop: application to handle data, e.g. generated from imageviewer Author: <NAME> Created: Sep. 23rd, 2015 """ from ...utils import datautils from ...utils import miscutils from ...utils import funutils from ...utils import resutils import wx import wx.lib.mixins.inspection as wit import os __version__ = miscutils.AppVersions().getVersion('dataworkshop') __author__ = "<NAME>" class InspectApp(wx.App, wit.InspectionMixin): def OnInit(self): self.Init() #configFile = os.path.expanduser("~/.felapps/config/imageviewer.xml") #if not os.path.isfile(configFile): # configFile = funutils.getFileToLoad(None, ext = 'xml') myframe = datautils.DataWorkshop(None, config=None, title=u'DataWorkshop \u2014 Data Analysis Framwork (debug mode, CTRL+ALT+I)', appversion = __version__, style = wx.DEFAULT_FRAME_STYLE) myframe.Show() myframe.SetIcon(resutils.dicon_s.GetIcon()) self.SetTopWindow(myframe) return True def run(maximize = True, logon = False, debug=True): """ function to make dataworkshop app run. """ if debug == True: app = InspectApp() app.MainLoop() else: app = wx.App(redirect=logon, filename='log') #configFile = os.path.expanduser("~/.felapps/config/imageviewer.xml") #if not os.path.isfile(configFile): # configFile = funutils.getFileToLoad(None, ext = 'xml') if maximize == True: myframe = datautils.DataWorkshop(None, config=None, title=u'DataWorkshop \u2014 Data Analysis Framwork', appversion=__version__, style=wx.DEFAULT_FRAME_STYLE) else: myframe = datautils.DataWorkshop(None, config=None, title = u'DataWorkshop \u2014 Data Analysis Framwork', appversion=__version__, style=wx.DEFAULT_FRAME_STYLE & ~(wx.RESIZE_BORDER | wx.MAXIMIZE_BOX)) myframe.Show() myframe.SetIcon(resutils.dicon_s.GetIcon()) app.MainLoop() if __name__ == '__main__': run()
[ "wx.App" ]
[((1259, 1297), 'wx.App', 'wx.App', ([], {'redirect': 'logon', 'filename': '"""log"""'}), "(redirect=logon, filename='log')\n", (1265, 1297), False, 'import wx\n')]
"""Authorization token handling.""" import logging from functools import wraps from flask import g, request from requests import get from pydantic.error_wrappers import ValidationError from bayesian.utility.user_utils import get_user, UserException, UserNotFoundException from bayesian.utility.v2.sa_models import HeaderData from bayesian.exceptions import HTTPError from f8a_utils.user_token_utils import UserStatus from .default_config import AUTH_URL logger = logging.getLogger(__name__) def get_access_token(service_name): """Return the access token for service.""" services = {'github': 'https://github.com'} url = '{auth_url}/api/token?for={service}'.format( auth_url=AUTH_URL, service=services.get(service_name)) token = request.headers.get('Authorization') headers = {"Authorization": token} try: _response = get(url, headers=headers) if _response.status_code == 200: response = _response.json() return {"access_token": response.get('access_token')} else: return {"access_token": None} except Exception: logger.error('Unable to connect to Auth service') def validate_user(view): """Validate and get user type based on UUID from the request.""" @wraps(view) def wrapper(*args, **kwargs): """Read uuid and decides user type based on its validity.""" # Rule of UUID validation and setting user status :: # ============================================================== # UUID in request | UUID in RDS | RDS User State | User Status # ============================================================== # MISSING | -- NA -- | -- NA -- | FREE # PRESENT | MISSING | -- NA -- | FREE # PRESENT | PRESENT | REGISTERED | REGISTERED # PRESENT | PRESENT | !REGISTERED | FREE # ============================================================== # By default set this to 'freetier' and uuid to None g.user_status = UserStatus.FREETIER g.uuid = None try: header_data = HeaderData(uuid=request.headers.get('uuid', None)) if header_data.uuid: g.uuid = str(header_data.uuid) user = get_user(g.uuid) g.user_status = UserStatus[user.status] except ValidationError as e: raise HTTPError(400, "Not a valid uuid") from e except UserNotFoundException: logger.warning("No User Found corresponding to UUID {}".format(header_data.uuid)) except UserException: logger.warning("Unable to get user status for uuid '{}'".format(header_data.uuid)) logger.debug('For UUID: %s, got user type: %s final uuid: %s', header_data.uuid, g.user_status, g.uuid) return view(*args, **kwargs) return wrapper
[ "logging.getLogger", "requests.get", "functools.wraps", "bayesian.utility.user_utils.get_user", "bayesian.exceptions.HTTPError", "flask.request.headers.get" ]
[((466, 493), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (483, 493), False, 'import logging\n'), ((757, 793), 'flask.request.headers.get', 'request.headers.get', (['"""Authorization"""'], {}), "('Authorization')\n", (776, 793), False, 'from flask import g, request\n'), ((1273, 1284), 'functools.wraps', 'wraps', (['view'], {}), '(view)\n', (1278, 1284), False, 'from functools import wraps\n'), ((862, 887), 'requests.get', 'get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (865, 887), False, 'from requests import get\n'), ((2335, 2351), 'bayesian.utility.user_utils.get_user', 'get_user', (['g.uuid'], {}), '(g.uuid)\n', (2343, 2351), False, 'from bayesian.utility.user_utils import get_user, UserException, UserNotFoundException\n'), ((2463, 2497), 'bayesian.exceptions.HTTPError', 'HTTPError', (['(400)', '"""Not a valid uuid"""'], {}), "(400, 'Not a valid uuid')\n", (2472, 2497), False, 'from bayesian.exceptions import HTTPError\n'), ((2197, 2230), 'flask.request.headers.get', 'request.headers.get', (['"""uuid"""', 'None'], {}), "('uuid', None)\n", (2216, 2230), False, 'from flask import g, request\n')]
import time from signal import pause import logging import RPi.GPIO as GPIO GPIO.setmode(GPIO.BCM) logger = logging.getLogger(__name__) map_edge_parse = {'falling':GPIO.FALLING, 'rising':GPIO.RISING, 'both':GPIO.BOTH} map_pull_parse = {'pull_up':GPIO.PUD_UP, 'pull_down':GPIO.PUD_DOWN, 'pull_off':GPIO.PUD_OFF} map_edge_print = {GPIO.FALLING: 'falling', GPIO.RISING: 'rising', GPIO.BOTH: 'both'} map_pull_print = {GPIO.PUD_UP:'pull_up', GPIO.PUD_DOWN: 'pull_down', GPIO.PUD_OFF: 'pull_off'} def parse_edge_key(edge): if edge in [GPIO.FALLING, GPIO.RISING, GPIO.BOTH]: return edge try: result = map_edge_parse[edge.lower()] except KeyError: result = edge raise KeyError('Unknown Edge type {edge}'.format(edge=edge)) return result def parse_pull_up_down(pull_up_down): if pull_up_down in [GPIO.PUD_UP, GPIO.PUD_DOWN, GPIO.PUD_OFF]: return pull_up_down try: result = map_pull_parse[pull_up_down] except KeyError: result = pull_up_down raise KeyError('Unknown Pull Up/Down type {pull_up_down}'.format(pull_up_down=pull_up_down)) return result def print_edge_key(edge): try: result = map_edge_print[edge] except KeyError: result = edge return result def print_pull_up_down(pull_up_down): try: result = map_pull_print[pull_up_down] except KeyError: result = pull_up_down return result # This function takes a holding time (fractional seconds), a channel, a GPIO state and an action reference (function). # It checks if the GPIO is in the state since the function was called. If the state # changes it return False. If the time is over the function returns True. def checkGpioStaysInState(holdingTime, gpioChannel, gpioHoldingState): # Get a reference start time (https://docs.python.org/3/library/time.html#time.perf_counter) startTime = time.perf_counter() # Continously check if time is not over while True: time.sleep(0.1) currentState = GPIO.input(gpioChannel) if holdingTime < (time.perf_counter() - startTime): break # Return if state does not match holding state if (gpioHoldingState != currentState): return False # Else: Wait if (gpioHoldingState != currentState): return False return True class SimpleButton: def __init__(self, pin, action=lambda *args: None, action2=lambda *args: None, name=None, bouncetime=500, antibouncehack=False, edge='falling', hold_time=.3, hold_mode=None, pull_up_down='pull_up'): self.edge = parse_edge_key(edge) self.hold_time = hold_time self.hold_mode = hold_mode self.pull_up = True self.pull_up_down = parse_pull_up_down(pull_up_down) self.pin = pin self.name = name self.bouncetime = bouncetime self.antibouncehack = antibouncehack GPIO.setup(self.pin, GPIO.IN, pull_up_down=self.pull_up_down) self._action = action self._action2 = action2 GPIO.add_event_detect(self.pin, edge=self.edge, callback=self.callbackFunctionHandler, bouncetime=self.bouncetime) self.callback_with_pin_argument = False def callbackFunctionHandler(self, *args): if len(args) > 0 and args[0] == self.pin and not self.callback_with_pin_argument: logger.debug('Remove pin argument by callbackFunctionHandler - args before: {}'.format(args)) args = args[1:] logger.debug('args after: {}'.format(args)) if self.antibouncehack: time.sleep(0.1) inval = GPIO.input(self.pin) if inval != GPIO.LOW: return None if self.hold_mode in ('Repeat', 'Postpone', 'SecondFunc', 'SecondFuncRepeat'): return self.longPressHandler(*args) else: logger.info('{}: execute callback'.format(self.name)) return self.when_pressed(*args) @property def when_pressed(self): logger.info('{}: action'.format(self.name)) return self._action @property def when_held(self): logger.info('{}: action2'.format(self.name)) return self._action2 @when_pressed.setter def when_pressed(self, func): logger.info('{}: set when_pressed') self._action = func GPIO.remove_event_detect(self.pin) logger.info('add new action') GPIO.add_event_detect(self.pin, edge=self.edge, callback=self.callbackFunctionHandler, bouncetime=self.bouncetime) def set_callbackFunction(self, callbackFunction): self.when_pressed = callbackFunction def longPressHandler(self, *args): logger.info('{}: longPressHandler, mode: {}'.format(self.name, self.hold_mode)) # instant action (except Postpone mode) if self.hold_mode != "Postpone": self.when_pressed(*args) # action(s) after hold_time if self.hold_mode == "Repeat": # Repeated call of main action (multiple times if button is held long enough) while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW): self.when_pressed(*args) elif self.hold_mode == "Postpone": # Postponed call of main action (once) if checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW): self.when_pressed(*args) while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW): pass elif self.hold_mode == "SecondFunc": # Call of secondary action (once) if checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW): self.when_held(*args) while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW): pass elif self.hold_mode == "SecondFuncRepeat": # Repeated call of secondary action (multiple times if button is held long enough) while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW): self.when_held(*args) def __del__(self): logger.debug('remove event detection') GPIO.remove_event_detect(self.pin) @property def is_pressed(self): if self.pull_up: return not GPIO.input(self.pin) return GPIO.input(self.pin) def __repr__(self): return '<SimpleButton-{}(pin={},edge={},hold_mode={},hold_time={},bouncetime={},antibouncehack={},pull_up_down={})>'.format( self.name, self.pin, print_edge_key(self.edge), self.hold_mode, self.hold_time, self.bouncetime,self.antibouncehack,print_pull_up_down(self.pull_up_down) ) if __name__ == "__main__": print('please enter pin no to test') pin = int(input()) func = lambda *args: print('FunctionCall with {}'.format(args)) btn = SimpleButton(pin=pin, action=func, hold_mode='Repeat') pause()
[ "logging.getLogger", "RPi.GPIO.add_event_detect", "RPi.GPIO.setup", "time.perf_counter", "time.sleep", "signal.pause", "RPi.GPIO.remove_event_detect", "RPi.GPIO.input", "RPi.GPIO.setmode" ]
[((76, 98), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (88, 98), True, 'import RPi.GPIO as GPIO\n'), ((109, 136), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (126, 136), False, 'import logging\n'), ((1899, 1918), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1916, 1918), False, 'import time\n'), ((6971, 6978), 'signal.pause', 'pause', ([], {}), '()\n', (6976, 6978), False, 'from signal import pause\n'), ((1987, 2002), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1997, 2002), False, 'import time\n'), ((2026, 2049), 'RPi.GPIO.input', 'GPIO.input', (['gpioChannel'], {}), '(gpioChannel)\n', (2036, 2049), True, 'import RPi.GPIO as GPIO\n'), ((2938, 2999), 'RPi.GPIO.setup', 'GPIO.setup', (['self.pin', 'GPIO.IN'], {'pull_up_down': 'self.pull_up_down'}), '(self.pin, GPIO.IN, pull_up_down=self.pull_up_down)\n', (2948, 2999), True, 'import RPi.GPIO as GPIO\n'), ((3070, 3189), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['self.pin'], {'edge': 'self.edge', 'callback': 'self.callbackFunctionHandler', 'bouncetime': 'self.bouncetime'}), '(self.pin, edge=self.edge, callback=self.\n callbackFunctionHandler, bouncetime=self.bouncetime)\n', (3091, 3189), True, 'import RPi.GPIO as GPIO\n'), ((4401, 4435), 'RPi.GPIO.remove_event_detect', 'GPIO.remove_event_detect', (['self.pin'], {}), '(self.pin)\n', (4425, 4435), True, 'import RPi.GPIO as GPIO\n'), ((4482, 4601), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['self.pin'], {'edge': 'self.edge', 'callback': 'self.callbackFunctionHandler', 'bouncetime': 'self.bouncetime'}), '(self.pin, edge=self.edge, callback=self.\n callbackFunctionHandler, bouncetime=self.bouncetime)\n', (4503, 4601), True, 'import RPi.GPIO as GPIO\n'), ((6226, 6260), 'RPi.GPIO.remove_event_detect', 'GPIO.remove_event_detect', (['self.pin'], {}), '(self.pin)\n', (6250, 6260), True, 'import RPi.GPIO as GPIO\n'), ((6386, 6406), 'RPi.GPIO.input', 'GPIO.input', (['self.pin'], {}), '(self.pin)\n', (6396, 6406), True, 'import RPi.GPIO as GPIO\n'), ((3636, 3651), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3646, 3651), False, 'import time\n'), ((3672, 3692), 'RPi.GPIO.input', 'GPIO.input', (['self.pin'], {}), '(self.pin)\n', (3682, 3692), True, 'import RPi.GPIO as GPIO\n'), ((2076, 2095), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2093, 2095), False, 'import time\n'), ((6350, 6370), 'RPi.GPIO.input', 'GPIO.input', (['self.pin'], {}), '(self.pin)\n', (6360, 6370), True, 'import RPi.GPIO as GPIO\n')]
from setuptools import setup setup(name='gtkpass', version='0.2.7', description='A GTK+ 3 program for the standard unix password manager', url='http://github.com/raghavsub/gtkpass', author='<NAME>', author_email='<EMAIL>', license='MIT', packages=['gtkpass'], entry_points={'console_scripts': ['gtkpass=gtkpass.main:main']}, install_requires=[])
[ "setuptools.setup" ]
[((30, 366), 'setuptools.setup', 'setup', ([], {'name': '"""gtkpass"""', 'version': '"""0.2.7"""', 'description': '"""A GTK+ 3 program for the standard unix password manager"""', 'url': '"""http://github.com/raghavsub/gtkpass"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'packages': "['gtkpass']", 'entry_points': "{'console_scripts': ['gtkpass=gtkpass.main:main']}", 'install_requires': '[]'}), "(name='gtkpass', version='0.2.7', description=\n 'A GTK+ 3 program for the standard unix password manager', url=\n 'http://github.com/raghavsub/gtkpass', author='<NAME>', author_email=\n '<EMAIL>', license='MIT', packages=['gtkpass'], entry_points={\n 'console_scripts': ['gtkpass=gtkpass.main:main']}, install_requires=[])\n", (35, 366), False, 'from setuptools import setup\n')]
import pytest import time from .utils import ( init_app, init_db, clean_db, add_flow, add_run, add_step, add_task, add_artifact, _test_list_resources, _test_single_resource, add_metadata, get_heartbeat_ts ) pytestmark = [pytest.mark.integration_tests] # Fixtures begin @pytest.fixture def cli(loop, aiohttp_client): return init_app(loop, aiohttp_client) @pytest.fixture async def db(cli): async_db = await init_db(cli) yield async_db await clean_db(async_db) # Fixtures end async def test_list_tasks(cli, db): _flow = (await add_flow(db, flow_id="HelloFlow")).body _run = (await add_run(db, flow_id=_flow.get("flow_id"))).body _step = (await add_step(db, flow_id=_run.get("flow_id"), step_name="step", run_number=_run.get("run_number"), run_id=_run.get("run_id"))).body await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/tasks".format(**_step), 200, []) await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_step), 200, []) _task = await create_task(db, step=_step) _task['duration'] = None _task['status'] = 'pending' await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/tasks".format(**_task), 200, [_task]) await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_task), 200, [_task]) async def test_list_tasks_non_numerical(cli, db): _flow = (await add_flow(db, flow_id="HelloFlow")).body _run = (await add_run(db, flow_id=_flow.get("flow_id"))).body _step = (await add_step(db, flow_id=_run.get("flow_id"), step_name="step", run_number=_run.get("run_number"), run_id=_run.get("run_id"))).body await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/tasks".format(**_step), 200, []) await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_step), 200, []) _task = await create_task(db, step=_step, task_name="bar") _, data = await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/tasks".format(**_task), 200, None) _, data = await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_task), 200, None) assert len(data) == 1 assert data[0]['task_name'] == 'bar' assert data[0]['task_id'] != 'bar' async def test_single_task(cli, db): await _test_single_resource(cli, db, "/flows/HelloFlow/runs/404/steps/none/tasks/5", 404, {}) _task = await create_task(db) _task['duration'] = None _task['status'] = 'pending' await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task) async def test_single_task_non_numerical(cli, db): _task = await create_task(db, task_name="bar") _, data = await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/bar".format(**_task), 200, None) assert data['task_name'] == 'bar' assert data['task_id'] != 'bar' async def test_list_old_metadata_task_attempts(cli, db): # Test tasks with old (missing attempt) metadata _task = await create_task(db) _task['duration'] = None _task['status'] = 'pending' await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task]) _artifact_first = await create_ok_artifact_for_task(db, _task) _artifact_second = await create_ok_artifact_for_task(db, _task, attempt=1) _task['status'] = 'unknown' _task['task_ok'] = 'location' _task_first_attempt = dict(_task) _task_second_attempt = dict(_task) _task_first_attempt['attempt_id'] = 0 _task_first_attempt['finished_at'] = _artifact_first['ts_epoch'] _task_first_attempt['duration'] = _artifact_first['ts_epoch'] - \ _task_first_attempt['ts_epoch'] _task_second_attempt['attempt_id'] = 1 _task_second_attempt['finished_at'] = _artifact_second['ts_epoch'] _task_second_attempt['duration'] = _artifact_second['ts_epoch'] - \ _task_second_attempt['ts_epoch'] await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt]) await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt]) async def test_old_metadata_task_with_multiple_attempts(cli, db): # Test tasks with old (missing attempt) metadata _task = await create_task(db) _task['duration'] = None _task['status'] = 'pending' await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task]) _artifact_first = await create_ok_artifact_for_task(db, _task) _artifact_second = await create_ok_artifact_for_task(db, _task, attempt=1) _task['status'] = 'unknown' _task['task_ok'] = 'location' _task['attempt_id'] = 1 _task['finished_at'] = _artifact_second['ts_epoch'] _task['duration'] = _artifact_second['ts_epoch'] - \ _task['ts_epoch'] await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task) async def test_task_with_attempt_metadata(cli, db): _task = await create_task(db) _task['duration'] = None _task['status'] = 'pending' await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task]) _attempt_first = await create_task_attempt_metadata(db, _task) _artifact_first = await create_ok_artifact_for_task(db, _task) _task['started_at'] = _attempt_first['ts_epoch'] _task['finished_at'] = _artifact_first['ts_epoch'] _task['duration'] = _task['finished_at'] - _task['started_at'] _task['status'] = 'unknown' _task['task_ok'] = 'location' await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task) _attempt_done_first = await create_task_attempt_done_metadata(db, _task) _task['status'] = 'unknown' _task['finished_at'] = _attempt_done_first['ts_epoch'] _task['duration'] = _attempt_done_first['ts_epoch'] - _task['started_at'] await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task) _attempt_ok_first = await create_task_attempt_ok_metadata(db, _task, 0, True) # status 'completed' _task['status'] = 'completed' _task['finished_at'] = _attempt_ok_first['ts_epoch'] _task['duration'] = _attempt_ok_first['ts_epoch'] - _task['started_at'] _task['task_ok'] = None # intended behavior, status refinement location field should remain empty when metadata exists. await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task) async def test_task_failed_status_with_heartbeat(cli, db): _task = await create_task(db, last_heartbeat_ts=1, status="failed") _task['finished_at'] = 1000 # should be last heartbeat in this case, due to every other timestamp missing. _task['duration'] = _task['last_heartbeat_ts'] * 1000 - _task['ts_epoch'] await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task]) async def test_task_running_status_with_heartbeat(cli, db): hb_freeze = get_heartbeat_ts() _task = await create_task(db, last_heartbeat_ts=hb_freeze) _task['finished_at'] = None # should not have a finished at for running tasks. _task['duration'] = hb_freeze * 1000 - _task['ts_epoch'] await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task]) async def test_list_task_attempts(cli, db): _task = await create_task(db) _task['duration'] = None _task['status'] = 'pending' await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task]) _attempt_first = await create_task_attempt_metadata(db, _task) _artifact_first = await create_ok_artifact_for_task(db, _task) _attempt_done_first = await create_task_attempt_done_metadata(db, _task) _attempt_second = await create_task_attempt_metadata(db, _task, attempt=1) _artifact_second = await create_ok_artifact_for_task(db, _task, attempt=1) _task_first_attempt = dict(_task) _task_second_attempt = dict(_task) _task_first_attempt['attempt_id'] = 0 _task_first_attempt['status'] = 'unknown' _task_first_attempt['task_ok'] = 'location' _task_first_attempt['started_at'] = _attempt_first['ts_epoch'] _task_first_attempt['finished_at'] = _attempt_done_first['ts_epoch'] _task_first_attempt['duration'] = _task_first_attempt['finished_at'] \ - _task_first_attempt['started_at'] # Second attempt counts as completed as well due to the _task_ok existing. _task_second_attempt['attempt_id'] = 1 _task_second_attempt['status'] = 'unknown' _task_second_attempt['task_ok'] = 'location' _task_second_attempt['started_at'] = _attempt_second['ts_epoch'] _task_second_attempt['finished_at'] = _artifact_second['ts_epoch'] _task_second_attempt['duration'] = _task_second_attempt['finished_at'] \ - _task_second_attempt['started_at'] await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt]) await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt]) async def test_task_with_attempt_ok_completed(cli, db): _task = await create_task(db) _attempt_first = await create_task_attempt_metadata(db, _task) _artifact_first = await create_ok_artifact_for_task(db, _task) _attempt_ok = await create_task_attempt_ok_metadata(db, _task, 0, True) # status = 'completed' _task['started_at'] = _attempt_first['ts_epoch'] _task['finished_at'] = _attempt_ok['ts_epoch'] _task['duration'] = _attempt_ok['ts_epoch'] - _task['started_at'] _task['status'] = 'completed' await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task) async def test_task_with_attempt_ok_failed(cli, db): _task = await create_task(db) _attempt_first = await create_task_attempt_metadata(db, _task) _artifact_first = await create_ok_artifact_for_task(db, _task) _task['started_at'] = _attempt_first['ts_epoch'] _task['finished_at'] = _artifact_first['ts_epoch'] _task['duration'] = _task['finished_at'] - _task['started_at'] _task['status'] = 'failed' _attempt_ok = await create_task_attempt_ok_metadata(db, _task, 0, False) # status = 'failed' _task['finished_at'] = _attempt_ok['ts_epoch'] _task['duration'] = _attempt_ok['ts_epoch'] - _task['started_at'] await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task) async def test_list_task_multiple_attempts_failure(cli, db): _task = await create_task(db) _attempt_first = await create_task_attempt_metadata(db, _task) _artifact_first = await create_ok_artifact_for_task(db, _task) _attempt_done_first = await create_task_attempt_done_metadata(db, _task) _attempt_second = await create_task_attempt_metadata(db, _task, attempt=1) _artifact_second = await create_ok_artifact_for_task(db, _task, attempt=1) # Mark first attempt as 'failure' and second as 'completed' _attempt_ok_first = await create_task_attempt_ok_metadata(db, _task, 0, False) # status = 'failed' _attempt_ok_second = await create_task_attempt_ok_metadata(db, _task, 1, True) # status = 'completed' _task_first_attempt = dict(_task) _task_second_attempt = dict(_task) _task_first_attempt['attempt_id'] = 0 _task_first_attempt['status'] = 'failed' _task_first_attempt['started_at'] = _attempt_first['ts_epoch'] _task_first_attempt['finished_at'] = _attempt_done_first['ts_epoch'] _task_first_attempt['duration'] = _task_first_attempt['finished_at'] \ - _task_first_attempt['started_at'] _task_first_attempt['finished_at'] = _attempt_ok_first['ts_epoch'] _task_first_attempt['duration'] = _attempt_ok_first['ts_epoch'] - _task_first_attempt['started_at'] # Second attempt counts as completed as well due to the _task_ok existing. _task_second_attempt['attempt_id'] = 1 _task_second_attempt['status'] = 'completed' _task_second_attempt['started_at'] = _attempt_second['ts_epoch'] _task_second_attempt['finished_at'] = _artifact_second['ts_epoch'] _task_second_attempt['duration'] = _task_second_attempt['finished_at'] \ - _task_second_attempt['started_at'] _task_second_attempt['finished_at'] = _attempt_ok_second['ts_epoch'] _task_second_attempt['duration'] = _attempt_ok_second['ts_epoch'] - _task_second_attempt['started_at'] await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt]) await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt]) async def test_task_attempts_with_attempt_metadata(cli, db): _task = await create_task(db) _task['duration'] = None _task['status'] = 'pending' await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task]) _attempt_first = await create_task_attempt_metadata(db, _task) _artifact_first = await create_ok_artifact_for_task(db, _task) _attempt_done_first = await create_task_attempt_done_metadata(db, _task) # attempt metadata is written but no artifacts exist yet. # Queries should return a second attempt at this point already! _attempt_second = await create_task_attempt_metadata(db, _task, attempt=1) _task_first_attempt = dict(_task) _task_second_attempt = dict(_task) _task_first_attempt['attempt_id'] = 0 _task_first_attempt['task_ok'] = 'location' # should have location for status artifact _task_first_attempt['status'] = 'unknown' # 'unknown' because we cannot determine correct status from DB as attempt_ok is missing _task_first_attempt['started_at'] = _attempt_first['ts_epoch'] _task_first_attempt['finished_at'] = _attempt_done_first['ts_epoch'] _task_first_attempt['duration'] = _task_first_attempt['finished_at'] \ - _task_first_attempt['started_at'] _task_second_attempt['attempt_id'] = 1 _task_second_attempt['status'] = 'running' _task_second_attempt['started_at'] = _attempt_second['ts_epoch'] _task_second_attempt['duration'] = int(round(time.time() * 1000)) - _task_second_attempt['started_at'] await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt], approx_keys=["duration"]) await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt], approx_keys=["duration"]) # Write attempt_ok data for first attempt to check for status changes. _first_attempt_ok = await create_task_attempt_ok_metadata(db, _task, 0, False) # NOTE: in current implementation, attempt_ok overrides attempt-done as a more accurate timestamp for finished_at. _task_first_attempt['finished_at'] = _first_attempt_ok['ts_epoch'] _task_first_attempt['duration'] = _task_first_attempt['finished_at'] \ - _task_first_attempt['started_at'] _task_first_attempt['task_ok'] = None # should have no task_ok location, as status can be determined from db. _task_first_attempt['status'] = 'failed' # 'failed' because now we have attempt_ok false in db. await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt], approx_keys=["duration"]) await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt], approx_keys=["duration"]) async def test_task_attempt_statuses_with_attempt_ok_failed(cli, db): _task = await create_task(db) _task['duration'] = None _task['status'] = 'pending' await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task]) _attempt_first = await create_task_attempt_metadata(db, _task) _artifact_first = await create_ok_artifact_for_task(db, _task) _attempt_done_first = await create_task_attempt_done_metadata(db, _task) _attempt_ok_first = await create_task_attempt_ok_metadata(db, _task, 0, False) # status = 'failed' _attempt_second = await create_task_attempt_metadata(db, _task, attempt=1) _attempt_done_second = await create_task_attempt_done_metadata(db, _task, attempt=1) _attempt_ok_second = await create_task_attempt_ok_metadata(db, _task, 1, True) # status = 'completed' _task_first_attempt = dict(_task) _task_second_attempt = dict(_task) # NOTE: In the current implementation attempt_ok overrides attempt-done ts_epoch as the finished_at # as a more accurate timestamp for when a task finished. _task_first_attempt['attempt_id'] = 0 _task_first_attempt['status'] = 'failed' _task_first_attempt['started_at'] = _attempt_first['ts_epoch'] _task_first_attempt['finished_at'] = _attempt_ok_first['ts_epoch'] _task_first_attempt['duration'] = _task_first_attempt['finished_at'] \ - _task_first_attempt['started_at'] _task_second_attempt['attempt_id'] = 1 _task_second_attempt['status'] = 'completed' _task_second_attempt['started_at'] = _attempt_second['ts_epoch'] _task_second_attempt['finished_at'] = _attempt_ok_second['ts_epoch'] _task_second_attempt['duration'] = _task_second_attempt['finished_at'] \ - _task_second_attempt['started_at'] await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt]) await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt]) # Test cases from the google docs table. # status 'completed' tests # # STATUS: attempt_ok in task metadata for the attempt is set to True # STARTED_AT: created_at property for attempt attribute for the attempt in task metadata # FINISHED_AT: created_at property for attempt_ok attribute for the attempt in task metadata # NOTE: for a more accurate finished_at timestamp, use the greatest timestamp out of task_ok / attempt_ok / attempt-done # as this is the latest write_timestamp for the task async def test_task_attempt_status_completed(cli, db): _task = await create_task(db) _task['duration'] = None _task['status'] = 'pending' await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task]) _attempt = await create_task_attempt_metadata(db, _task, 0) _attempt_ok = await create_task_attempt_ok_metadata(db, _task, 0, True) _attempt_done = await create_task_attempt_done_metadata(db, _task, 0) _task['status'] = 'completed' _task['started_at'] = _attempt['ts_epoch'] _task['finished_at'] = _attempt_done['ts_epoch'] _task['duration'] = _task['finished_at'] - _task['started_at'] await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task]) # status 'running' tests # # STATUS 'running': # Has all of # Has a start time (NOTE: this requires 'attempt' metadata to be present) # attempt_ok does not exist in the task metadata # Has logged a heartbeat in the last x minutes (NOTE: we actually rely on heartbeat for running status.) # No subsequent attempt exists # STARTED_AT: created_at property for attempt attribute for the attempt in task metadata # FINISHED_AT: does not apply (NULL) async def test_task_attempt_status_running(cli, db): _task = await create_task(db, last_heartbeat_ts=get_heartbeat_ts()) # default status: 'running' _task['duration'] = _task['last_heartbeat_ts'] * 1000 - _task['ts_epoch'] await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task]) _attempt = await create_task_attempt_metadata(db, _task, 0) _task['started_at'] = _attempt['ts_epoch'] _task['finished_at'] = None _task['duration'] = _task['last_heartbeat_ts'] * 1000 - _task['started_at'] await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task]) # status 'failed' tests # # STATUS: # Either of # attempt_ok in task metadata for the attempt is set to False # No heartbeat has been logged for the task in the last x minutes and no new attempt has started # A newer attempt exists # STARTED_AT: created_at property for attempt attribute for the attempt in task metadata # FINISHED_AT: # Either of (in priority) # created_at property for attempt_ok attribute for the attempt in task metadata # The timestamp in the heartbeat column for the task if no subsequent attempt is detected # If a subsequent attempt exists, use the start time of the subsequent attempt async def test_task_attempt_status_failed_with_existing_subsequent_attempt(cli, db): _task = await create_task(db, last_heartbeat_ts=get_heartbeat_ts()) _task['duration'] = _task['last_heartbeat_ts'] * 1000 - _task['ts_epoch'] await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task]) _first_attempt = dict(_task) _second_attempt = dict(_task) # we explicitly leave out attempt completion metadata for attempt 0 to test that it fails correctly # when attempt 1 exists. # ATTEMPT-0 _first_attempt_meta = await create_task_attempt_metadata(db, _task, 0) _first_attempt['started_at'] = _first_attempt_meta['ts_epoch'] _first_attempt['duration'] = _first_attempt['last_heartbeat_ts'] * 1000 - _first_attempt['started_at'] await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_first_attempt]) # ATTEMPT-1 _second_attempt_meta = await create_task_attempt_metadata(db, _task, 1) _second_attempt['attempt_id'] = 1 _second_attempt['started_at'] = _second_attempt_meta['ts_epoch'] _second_attempt['duration'] = _second_attempt['last_heartbeat_ts'] * 1000 - _second_attempt['started_at'] # first attempt should be failed due to second attempt existing. # finished_at timestamp should be the started_at of the second attempt due to it existing. _first_attempt['status'] = 'failed' _first_attempt['finished_at'] = _second_attempt['started_at'] _first_attempt['duration'] = _first_attempt['finished_at'] - _first_attempt['started_at'] await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_second_attempt, _first_attempt]) # Resource Helpers / factories async def create_ok_artifact_for_task(db, task, attempt=0): "Creates and returns a _task_ok artifact for a task" _task = (await add_artifact( db, flow_id=task.get("flow_id"), run_number=task.get("run_number"), run_id=task.get("run_id"), step_name=task.get("step_name"), task_id=task.get("task_id"), task_name=task.get("task_name"), artifact={ "name": "_task_ok", "location": "location", "ds_type": "ds_type", "sha": "sha", "type": "type", "content_type": "content_type", "attempt_id": attempt }) ).body return _task async def create_task(db, step=None, status="running", task_id=None, task_name=None, last_heartbeat_ts=None): "Creates and returns a task with specific status. Optionally creates the task for a specific step if provided." if not step: _flow = (await add_flow(db, flow_id="HelloFlow")).body _run = (await add_run(db, flow_id=_flow.get("flow_id"))).body step = (await add_step( db, flow_id=_run.get("flow_id"), run_number=_run.get("run_number"), step_name="step") ).body _task = (await add_task( db, flow_id=step.get("flow_id"), run_number=step.get("run_number"), step_name=step.get("step_name"), task_id=task_id, task_name=task_name, last_heartbeat_ts=last_heartbeat_ts) ).body _task['status'] = status return _task async def create_metadata_for_task(db, task, metadata={}, tags=None): "Creates a metadata record for a task" _meta = (await add_metadata(db, flow_id=task.get("flow_id"), run_number=task.get("run_number"), run_id=task.get("run_id"), step_name=task.get("step_name"), task_id=task.get("task_id"), task_name=task.get("task_name"), tags=tags, metadata=metadata) ).body return _meta async def create_task_attempt_metadata(db, task, attempt=0): "Create 'attempt' metadata for a task" return await create_metadata_for_task( db, task, metadata={ "type": "attempt", "field_name": "attempt", "value": str(attempt) } ) async def create_task_attempt_done_metadata(db, task, attempt: int = 0): "Create 'attempt-done' metadata for a task" return await create_metadata_for_task( db, task, metadata={ "type": "attempt-done", "field_name": "attempt-done", "value": str(attempt) } ) async def create_task_attempt_ok_metadata(db, task, attempt_id: int, attempt_ok: bool = False): "Create 'attempt_ok' metadata for a task" return await create_metadata_for_task( db, task, tags=["attempt_id:{attempt_id}".format(attempt_id=attempt_id)], metadata={ "type": "internal_attempt_status", "field_name": "attempt_ok", "value": str(attempt_ok) } )
[ "time.time" ]
[((15496, 15507), 'time.time', 'time.time', ([], {}), '()\n', (15505, 15507), False, 'import time\n')]
import torch from torch.optim import lr_scheduler from tqdm import tqdm from torchsummary import summary from torch.utils.tensorboard import SummaryWriter from apex import amp from loss import dice from pathlib import Path from data import CaseDataset, load_case, save_pred, \ orient_crop_case, regions_crop_case, resample_normalize_case import nibabel as nib import numpy as np import scipy.special as spe from transform import pad, crop_pad, to_numpy, to_tensor, resize def predict_per_patch(input, model, num_classes=3, patch_size=(96, 96, 96), step_per_patch=4, verbose=True, one_hot=False): device = next(model.parameters()).device # add padding if patch is larger than input shape origial_shape = input.shape[:3] input = pad(input, patch_size) padding_shape = input.shape[:3] coord_start = np.array([i // 2 for i in patch_size]) coord_end = np.array([padding_shape[i] - patch_size[i] // 2 for i in range(len(patch_size))]) num_steps = np.ceil([(coord_end[i] - coord_start[i]) / (patch_size[i] / step_per_patch) for i in range(3)]) step_size = np.array([(coord_end[i] - coord_start[i]) / (num_steps[i] + 1e-8) for i in range(3)]) step_size[step_size == 0] = 9999999 x_steps = np.arange(coord_start[0], coord_end[0] + 1e-8, step_size[0], dtype=np.int) y_steps = np.arange(coord_start[1], coord_end[1] + 1e-8, step_size[1], dtype=np.int) z_steps = np.arange(coord_start[2], coord_end[2] + 1e-8, step_size[2], dtype=np.int) result = torch.zeros([num_classes] + list(padding_shape)).to(device) result_n = torch.zeros_like(result).to(device) if verbose: print('Image Shape: {} Patch Size: {}'.format(padding_shape, patch_size)) print('X step: %d Y step: %d Z step: %d' % (len(x_steps), len(y_steps), len(z_steps))) # W H D C => C W H D => N C W H D for model input input = torch.from_numpy(to_tensor(input)[None]).to(device) patchs_slices = [] for x in x_steps: x_mix = x - patch_size[0] // 2 x_max = x + patch_size[0] // 2 for y in y_steps: y_min = y - patch_size[1] // 2 y_max = y + patch_size[1] // 2 for z in z_steps: z_min = z - patch_size[2] // 2 z_max = z + patch_size[2] // 2 patchs_slices.append([slice(x_mix, x_max), slice(y_min, y_max), slice(z_min, z_max)]) # predict loop predict_loop = tqdm(patchs_slices) if verbose else patchs_slices model.eval() with torch.no_grad(): for slices in predict_loop: output = model(input[[slice(None), slice(None)]+slices]) if num_classes == 1: output = torch.sigmoid(output) else: output = torch.softmax(output, dim=1) result[[slice(None)]+slices] += output[0] result_n[[slice(None)]+slices] += 1 # merge all patchs if verbose: print('Merging all patchs...') result = result / result_n if one_hot: result = to_numpy(result.cpu().numpy()).astype(np.float32) else: if num_classes == 1: result = torch.squeeze(result, dim=0) else: result = torch.softmax(result, dim=0) result = torch.argmax(result, axis=0) result = np.round(result.cpu().numpy()).astype(np.uint8) return crop_pad(result, origial_shape) def predict_case(case, model, target_spacing, normalize_stats, num_classes=3, patch_size=(96, 96, 96), step_per_patch=4, verbose=True, one_hot=False): orig_shape = case['image'].shape[:-1] affine = case['affine'] # resample case for predict if verbose: print('Resampling the case for prediction...') case_ = resample_normalize_case(case, target_spacing, normalize_stats) if verbose: print('Predicting the case...') pred = predict_per_patch(case_['image'], model, num_classes, patch_size, step_per_patch, verbose, one_hot) if verbose: print('Resizing the case to origial shape...') case['pred'] = resize(pred, orig_shape, is_label=one_hot is False) case['affine'] = affine if verbose: print('All done!') return case def batch_predict_case(load_dir, save_dir, model, target_spacing, normalize_stats, num_classes=3, patch_size=(240, 240, 80), step_per_patch=4, data_range=None): load_dir = Path(load_dir) cases = CaseDataset(load_dir, load_meta=True) if data_range is None: data_range = range(len(cases)) for i in tqdm(data_range): case = predict_case(cases[i], model, target_spacing, normalize_stats, num_classes, patch_size, step_per_patch, False) save_pred(case, save_dir) def cascade_predict_case(case, coarse_model, coarse_target_spacing, coarse_normalize_stats, coarse_patch_size, detail_model, detail_target_spacing, detail_normalize_stats, detail_patch_size, num_classes=3, step_per_patch=4, region_threshold=10000, crop_padding=20, verbose=True): if verbose: print('Predicting the rough shape for further prediction...') case = predict_case(case, coarse_model, coarse_target_spacing, coarse_normalize_stats, 1, coarse_patch_size, step_per_patch, verbose=verbose) regions = regions_crop_case(case, region_threshold, crop_padding, 'pred') num_classes = detail_model.out_channels orig_shape = case['image'].shape[:-1] result = np.zeros(list(orig_shape)+[num_classes]) result_n = np.zeros_like(result) if verbose: print('Cropping regions (%d)...' % len(regions)) for idx, region in enumerate(regions): bbox = region['bbox'] shape = region['image'].shape[:-1] if verbose: print('Region {} {} predicting...'.format(idx, shape)) region = predict_case(region, detail_model, detail_target_spacing, detail_normalize_stats, num_classes, detail_patch_size, step_per_patch, verbose=verbose, one_hot=True) region_slices = [] result_slices = [] for i in range(len(bbox)): region_slice_min = 0 + max(0 - bbox[i][0], 0) region_slice_max = shape[i] - max(bbox[i][1] - orig_shape[i], 0) region_slices.append(slice(region_slice_min, region_slice_max)) origin_slice_min = max(bbox[i][0], 0) origin_slice_max = min(bbox[i][1], orig_shape[i]) result_slices.append(slice(origin_slice_min, origin_slice_max)) region_slices.append(slice(None)) result_slices.append(slice(None)) result[result_slices] += region['pred'][region_slices] result_n[result_slices] += 1 if verbose: print('Merging all regions...') # avoid orig_pred_n = 0 mask = np.array(result_n > 0) result[mask] = result[mask] / result_n[mask] if num_classes == 1: result = np.squeeze(result, axis=-1) result = np.around(result) else: result = spe.softmax(result, axis=-1) result = np.argmax(result, axis=-1) case['pred'] = result.astype(np.uint8) if verbose: print('All done!') return case def cascade_predict(image_file, coarse_model, coarse_target_spacing, coarse_normalize_stats, coarse_patch_size, detail_model, detail_target_spacing, detail_normalize_stats, detail_patch_size, air=-200, num_classes=3, step_per_patch=4, region_threshold=10000, crop_padding=20, label_file=None, verbose=True): orig_case = load_case(image_file, label_file) case = orient_crop_case(orig_case, air) case = cascade_predict_case(case, coarse_model, coarse_target_spacing, coarse_normalize_stats, coarse_patch_size, detail_model, detail_target_spacing, detail_normalize_stats, detail_patch_size, num_classes, step_per_patch, region_threshold, crop_padding, verbose) orient = nib.orientations.io_orientation(orig_case['affine']) indices = orient[:, 0].astype(np.int) orig_shape = np.array(orig_case['image'].shape[:3]) orig_shape = np.take(orig_shape, indices) bbox = case['bbox'] orig_pred = np.zeros(orig_shape, dtype=np.uint8) result_slices = [] for i in range(len(bbox)): orig_slice_min = max(bbox[i][0], 0) orig_slice_max = min(bbox[i][1], orig_shape[i]) result_slices.append(slice(orig_slice_min, orig_slice_max)) orig_pred[result_slices] = case['pred'] # orient orig_case['pred'] = nib.orientations.apply_orientation(orig_pred, orient) if len(orig_case['image'].shape) == 3: orig_case['image'] = np.expand_dims(orig_case['image'], -1) return orig_case def batch_cascade_predict(image_dir, save_dir, coarse_model, coarse_target_spacing, coarse_normalize_stats, coarse_patch_size, detail_model, detail_target_spacing, detail_normalize_stats, detail_patch_size, air=-200, num_classes=3, step_per_patch=4, region_threshold=10000, crop_padding=20, data_range=None): image_dir = Path(image_dir) image_files = [path for path in sorted(image_dir.iterdir()) if path.is_file()] if data_range is None: data_range = range(len(image_files)) for i in tqdm(data_range): case = cascade_predict(image_files[i], coarse_model, coarse_target_spacing, coarse_normalize_stats, coarse_patch_size, detail_model, detail_target_spacing, detail_normalize_stats, detail_patch_size, air, num_classes, step_per_patch, region_threshold, crop_padding, None, False) save_pred(case, save_dir) def evaluate_case(case): num_classes = case['label'].max() evaluate_result = [] for c in range(num_classes): pred = np.array(case['pred'] == c+1).astype(np.float32) label = np.array(case['label'] == c+1).astype(np.float32) dsc = dice(torch.tensor(pred), torch.tensor(label)).item() evaluate_result.append(dsc) return evaluate_result def evaluate(label_file, pred_file): label_nib = nib.load(str(label_file)) pred_nib = nib.load(str(pred_file)) case = {} case['label'] = label_nib.get_fdata().astype(np.uint8) case['pred'] = pred_nib.get_fdata().astype(np.uint8) evaluate_result = evaluate_case(case) return evaluate_result def batch_evaluate(label_dir, pred_dir, data_range=None): label_dir = Path(label_dir) pred_dir = Path(pred_dir) label_files = sorted(list(label_dir.glob('*.nii.gz'))) pred_files = sorted(list(pred_dir.glob('*.nii.gz'))) if data_range is None: data_range = range(len(label_files)) evaluate_results = [] par = tqdm(data_range) for i in par: evaluate_result = evaluate(label_files[i], pred_files[i]) evaluate_results.append(evaluate_result) evaluate_dict = {} for idx, e in enumerate(evaluate_result): evaluate_dict["label_%d" % (idx+1)] = e par.set_description("Case %d" % i) par.set_postfix(evaluate_dict) print('\nThe mean dsc of each label:') means = np.array(evaluate_results).mean(axis=0) for i, mean in enumerate(means): print("label_%d: %f" % (i+1, mean)) return evaluate_results class Subset(torch.utils.data.Subset): def __init__(self, dataset, indices, transform): super(Subset, self).__init__(dataset, indices) self.transform = transform def __getitem__(self, idx): case = self.dataset[self.indices[idx]] if self.transform: case = self.transform(case) return case class Trainer(): def __init__(self, model, optimizer, loss, dataset, batch_size=10, dataloader_kwargs={'num_workers': 2, 'pin_memory': True}, valid_split=0.2, num_samples=None, metrics=None, scheduler=None, train_transform=None, valid_transform=None): self.model = model self.optimizer = optimizer self.loss = loss self.dataset = dataset self.metrics = metrics self.scheduler = scheduler self.train_transform = train_transform self.valid_transform = valid_transform dataset_size = len(self.dataset) indices = list(range(dataset_size)) split = int(np.floor(valid_split * dataset_size)) np.random.shuffle(indices) self.train_indices = indices[split:] self.valid_indices = indices[:split] self.dataloader_kwargs = {'batch_size': batch_size, **dataloader_kwargs} self.num_samples = num_samples self.valid_split = valid_split self.device = next(model.parameters()).device self.best_result = {'loss': float('inf')} self.current_epoch = 0 self.patience_counter = 0 self.amp_state_dict = None def get_lr(self, idx=0): return self.optimizer.param_groups[idx]['lr'] def set_lr(self, lr, idx=0): self.optimizer.param_groups[idx]['lr'] = lr def summary(self, input_shape): return summary(self.model, input_shape) def batch_loop(self, data_loader, is_train=True): results = [] self.progress_bar.reset(len(data_loader)) desc = "Epoch %d/%d (LR %.2g)" % (self.current_epoch+1, self.num_epochs, self.get_lr()) self.progress_bar.set_description(desc) for batch_idx, batch in enumerate(data_loader): x = batch['image'].to(self.device) y = batch['label'].to(self.device) # forward if is_train: self.model.train() y_pred = self.model(x) else: self.model.eval() with torch.no_grad(): y_pred = self.model(x) loss = self.loss(y_pred, y) # backward if is_train: self.optimizer.zero_grad() if self.use_amp: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() self.optimizer.step() result = {'loss': loss.item()} # calc the other metrics if self.metrics is not None: for key, metric_fn in self.metrics.items(): result[key] = metric_fn(y_pred, y).item() if not torch.isnan(loss): results.append(result) self.progress_bar.set_postfix(result) self.progress_bar.update() mean_result = {} for key in results[0].keys(): mean_result[key] = np.mean(np.array([x[key] for x in results])) name = 'train' if is_train else 'valid' if self.save_dir is not None: writer = SummaryWriter(self.save_dir) for key in mean_result.keys(): writer.add_scalar('%s/%s' % (key, name), mean_result[key], self.current_epoch) writer.close() return mean_result def fit(self, num_epochs=10, save_dir=None, use_amp=False, opt_level='O1'): # ---------------------- # initialize # ---------------------- self.num_epochs = num_epochs self.use_amp = use_amp self.save_dir = save_dir if use_amp: self.model, self.optimizer = amp.initialize( self.model, self.optimizer, opt_level=opt_level) if self.amp_state_dict is not None: amp.load_state_dict(self.amp_state_dict) self.progress_bar = tqdm(total=0) # ---------------------- # prepare data # ---------------------- train_set = Subset(self.dataset, self.train_indices, self.train_transform) if self.num_samples is not None: sampler = torch.utils.data.RandomSampler(train_set, True, self.num_samples) train_loader = torch.utils.data.DataLoader(train_set, sampler=sampler, **self.dataloader_kwargs) else: train_loader = torch.utils.data.DataLoader(train_set, shuffle=True, **self.dataloader_kwargs) if len(self.valid_indices) > 0: valid_set = Subset(self.dataset, self.valid_indices, self.valid_transform) if self.num_samples is not None: num_samples = round(self.num_samples * self.valid_split) sampler = torch.utils.data.RandomSampler(valid_set, True, num_samples) valid_loader = torch.utils.data.DataLoader(valid_set, sampler=sampler, **self.dataloader_kwargs) else: valid_loader = torch.utils.data.DataLoader(valid_set, **self.dataloader_kwargs) else: valid_loader = None # ---------------------- # main loop # ---------------------- for epoch in range(self.current_epoch, num_epochs): self.current_epoch = epoch # train loop result = self.batch_loop(train_loader, is_train=True) # vaild loop if valid_loader is not None: result = self.batch_loop(valid_loader, is_train=False) # build-in fn: lr_scheduler if self.scheduler is not None: if isinstance(self.scheduler, lr_scheduler.ReduceLROnPlateau): self.scheduler.step(result['loss']) else: self.scheduler.step() # save best if result['loss'] < self.best_result['loss']-1e-3: self.best_result = result if save_dir is not None: self.save_checkpoint(save_dir+'-best.pt') if save_dir is not None: self.save_checkpoint(save_dir+'-last.pt') self.progress_bar.close() def save_checkpoint(self, file_path): checkpoint = {'model_state_dict': self.model.state_dict(), 'optimizer_state_dict': self.optimizer.state_dict(), 'current_epoch': self.current_epoch, 'train_indices': self.train_indices, 'valid_indices': self.valid_indices, 'best_result': self.best_result} if self.scheduler is not None: checkpoint['scheduler_state_dict'] = self.scheduler.state_dict() if self.use_amp: checkpoint['amp_state_dict'] = amp.state_dict() torch.save(checkpoint, file_path) def load_checkpoint(self, file_path): checkpoint = torch.load(file_path) self.model.load_state_dict(checkpoint['model_state_dict']) self.optimizer.load_state_dict(checkpoint['optimizer_state_dict']) self.current_epoch = checkpoint['current_epoch']+1 self.train_indices = checkpoint['train_indices'] self.valid_indices = checkpoint['valid_indices'] self.best_result = checkpoint['best_result'] if 'amp_state_dict' in checkpoint: self.amp_state_dict = checkpoint['amp_state_dict'] if 'scheduler_state_dict' in checkpoint and self.scheduler is not None: self.scheduler.load_state_dict(checkpoint['scheduler_state_dict']) # cross valid # elif num_folds > 1: # # split the dataset into k-fold # fold_len = len(dataset) // num_folds # fold_len_list = [] # for i in range(num_folds-1): # fold_len_list.append(fold_len) # fold_len_list.append(len(dataset)-fold_len * (num_folds-1)) # fold_subsets = torch.utils.data.random_split(dataset, fold_len_list) # fold_metrics = [] # avg_metrics = {} # self.save('init.pt') # for i, fold_subset in enumerate(fold_subsets): # train_subsets = fold_subsets.copy() # train_subsets.remove(fold_subset) # train_subset = torch.utils.data.ConcatDataset(train_subsets) # train_set = DatasetFromSubset(train_subset, tr_transform) # valid_set = DatasetFromSubset(fold_subset, vd_transform) # print('Fold %d/%d:' % (i+1, num_folds)) # self.load('init.pt') # train_kwargs['log_dir'] = '%s_%d' % (log_dir, i) # metrics = self.train(train_set, valid_set, **train_kwargs) # fold_metrics.append(metrics) # # calc the avg # for name in fold_metrics[0].keys(): # sum_metric = 0 # for fold_metric in fold_metrics: # sum_metric += fold_metric[name] # avg_metrics[name] = sum_metric / num_folds # for i, fold_metric in enumerate(fold_metrics): # print('Fold %d metrics:\t%s' % # (i+1, self.metrics_stringify(fold_metric))) # print('Avg metrics:\t%s' % self.metrics_stringify(avg_metrics)) # manual ctrl @lr_factor @min_lr @patience # if metrics['Loss'] < best_metrics['Loss']-1e-4: # if save_dir and save_best: # self.save(save_dir+'-best.pt') # best_metrics = metrics # patience_counter = 0 # elif patience > 0: # patience_counter += 1 # if patience_counter > patience: # print("│\n├Loss stopped improving for %d num_epochs." % # patience_counter) # patience_counter = 0 # lr = self.get_lr() * lr_factor # if min_lr and lr < min_lr: # print("│LR below the min LR, stop training.") # break # else: # print('│Reduce LR to %.3g' % lr) # self.set_lr(lr) # def get_lr(self): # for param_group in self.optimizer.param_groups: # return param_group['lr'] # def set_lr(self, lr): # for param_group in self.optimizer.param_groups: # param_group['lr'] = lr # # save best & early_stop_patience counter # if result['loss'] < self.best_result['loss']-1e-3: # self.best_result = result # self.patience_counter = 0 # if save_dir and save_best: # self.save_checkpoint(save_dir+'-best.pt') # elif early_stop_patience > 0: # self.patience_counter += 1 # if self.patience_counter > early_stop_patience: # print(("\nLoss stopped improving for %d num_epochs. " # "stop training.") % self.patience_counter) # self.patience_counter = 0 # break
[ "apex.amp.scale_loss", "torch.softmax", "numpy.array", "apex.amp.initialize", "transform.crop_pad", "torch.squeeze", "transform.pad", "numpy.arange", "torch.isnan", "torch.utils.tensorboard.SummaryWriter", "apex.amp.load_state_dict", "pathlib.Path", "data.resample_normalize_case", "numpy.t...
[((881, 903), 'transform.pad', 'pad', (['input', 'patch_size'], {}), '(input, patch_size)\n', (884, 903), False, 'from transform import pad, crop_pad, to_numpy, to_tensor, resize\n'), ((958, 998), 'numpy.array', 'np.array', (['[(i // 2) for i in patch_size]'], {}), '([(i // 2) for i in patch_size])\n', (966, 998), True, 'import numpy as np\n'), ((1441, 1516), 'numpy.arange', 'np.arange', (['coord_start[0]', '(coord_end[0] + 1e-08)', 'step_size[0]'], {'dtype': 'np.int'}), '(coord_start[0], coord_end[0] + 1e-08, step_size[0], dtype=np.int)\n', (1450, 1516), True, 'import numpy as np\n'), ((1530, 1605), 'numpy.arange', 'np.arange', (['coord_start[1]', '(coord_end[1] + 1e-08)', 'step_size[1]'], {'dtype': 'np.int'}), '(coord_start[1], coord_end[1] + 1e-08, step_size[1], dtype=np.int)\n', (1539, 1605), True, 'import numpy as np\n'), ((1619, 1694), 'numpy.arange', 'np.arange', (['coord_start[2]', '(coord_end[2] + 1e-08)', 'step_size[2]'], {'dtype': 'np.int'}), '(coord_start[2], coord_end[2] + 1e-08, step_size[2], dtype=np.int)\n', (1628, 1694), True, 'import numpy as np\n'), ((3653, 3684), 'transform.crop_pad', 'crop_pad', (['result', 'origial_shape'], {}), '(result, origial_shape)\n', (3661, 3684), False, 'from transform import pad, crop_pad, to_numpy, to_tensor, resize\n'), ((4160, 4222), 'data.resample_normalize_case', 'resample_normalize_case', (['case', 'target_spacing', 'normalize_stats'], {}), '(case, target_spacing, normalize_stats)\n', (4183, 4222), False, 'from data import CaseDataset, load_case, save_pred, orient_crop_case, regions_crop_case, resample_normalize_case\n'), ((4655, 4706), 'transform.resize', 'resize', (['pred', 'orig_shape'], {'is_label': '(one_hot is False)'}), '(pred, orig_shape, is_label=one_hot is False)\n', (4661, 4706), False, 'from transform import pad, crop_pad, to_numpy, to_tensor, resize\n'), ((5157, 5171), 'pathlib.Path', 'Path', (['load_dir'], {}), '(load_dir)\n', (5161, 5171), False, 'from pathlib import Path\n'), ((5185, 5222), 'data.CaseDataset', 'CaseDataset', (['load_dir'], {'load_meta': '(True)'}), '(load_dir, load_meta=True)\n', (5196, 5222), False, 'from data import CaseDataset, load_case, save_pred, orient_crop_case, regions_crop_case, resample_normalize_case\n'), ((5303, 5319), 'tqdm.tqdm', 'tqdm', (['data_range'], {}), '(data_range)\n', (5307, 5319), False, 'from tqdm import tqdm\n'), ((6698, 6761), 'data.regions_crop_case', 'regions_crop_case', (['case', 'region_threshold', 'crop_padding', '"""pred"""'], {}), "(case, region_threshold, crop_padding, 'pred')\n", (6715, 6761), False, 'from data import CaseDataset, load_case, save_pred, orient_crop_case, regions_crop_case, resample_normalize_case\n'), ((6917, 6938), 'numpy.zeros_like', 'np.zeros_like', (['result'], {}), '(result)\n', (6930, 6938), True, 'import numpy as np\n'), ((8407, 8429), 'numpy.array', 'np.array', (['(result_n > 0)'], {}), '(result_n > 0)\n', (8415, 8429), True, 'import numpy as np\n'), ((9415, 9448), 'data.load_case', 'load_case', (['image_file', 'label_file'], {}), '(image_file, label_file)\n', (9424, 9448), False, 'from data import CaseDataset, load_case, save_pred, orient_crop_case, regions_crop_case, resample_normalize_case\n'), ((9460, 9492), 'data.orient_crop_case', 'orient_crop_case', (['orig_case', 'air'], {}), '(orig_case, air)\n', (9476, 9492), False, 'from data import CaseDataset, load_case, save_pred, orient_crop_case, regions_crop_case, resample_normalize_case\n'), ((10192, 10244), 'nibabel.orientations.io_orientation', 'nib.orientations.io_orientation', (["orig_case['affine']"], {}), "(orig_case['affine'])\n", (10223, 10244), True, 'import nibabel as nib\n'), ((10304, 10342), 'numpy.array', 'np.array', (["orig_case['image'].shape[:3]"], {}), "(orig_case['image'].shape[:3])\n", (10312, 10342), True, 'import numpy as np\n'), ((10360, 10388), 'numpy.take', 'np.take', (['orig_shape', 'indices'], {}), '(orig_shape, indices)\n', (10367, 10388), True, 'import numpy as np\n'), ((10430, 10466), 'numpy.zeros', 'np.zeros', (['orig_shape'], {'dtype': 'np.uint8'}), '(orig_shape, dtype=np.uint8)\n', (10438, 10466), True, 'import numpy as np\n'), ((10771, 10824), 'nibabel.orientations.apply_orientation', 'nib.orientations.apply_orientation', (['orig_pred', 'orient'], {}), '(orig_pred, orient)\n', (10805, 10824), True, 'import nibabel as nib\n'), ((11676, 11691), 'pathlib.Path', 'Path', (['image_dir'], {}), '(image_dir)\n', (11680, 11691), False, 'from pathlib import Path\n'), ((11862, 11878), 'tqdm.tqdm', 'tqdm', (['data_range'], {}), '(data_range)\n', (11866, 11878), False, 'from tqdm import tqdm\n'), ((13446, 13461), 'pathlib.Path', 'Path', (['label_dir'], {}), '(label_dir)\n', (13450, 13461), False, 'from pathlib import Path\n'), ((13477, 13491), 'pathlib.Path', 'Path', (['pred_dir'], {}), '(pred_dir)\n', (13481, 13491), False, 'from pathlib import Path\n'), ((13719, 13735), 'tqdm.tqdm', 'tqdm', (['data_range'], {}), '(data_range)\n', (13723, 13735), False, 'from tqdm import tqdm\n'), ((2724, 2743), 'tqdm.tqdm', 'tqdm', (['patchs_slices'], {}), '(patchs_slices)\n', (2728, 2743), False, 'from tqdm import tqdm\n'), ((2800, 2815), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2813, 2815), False, 'import torch\n'), ((5651, 5676), 'data.save_pred', 'save_pred', (['case', 'save_dir'], {}), '(case, save_dir)\n', (5660, 5676), False, 'from data import CaseDataset, load_case, save_pred, orient_crop_case, regions_crop_case, resample_normalize_case\n'), ((8522, 8549), 'numpy.squeeze', 'np.squeeze', (['result'], {'axis': '(-1)'}), '(result, axis=-1)\n', (8532, 8549), True, 'import numpy as np\n'), ((8567, 8584), 'numpy.around', 'np.around', (['result'], {}), '(result)\n', (8576, 8584), True, 'import numpy as np\n'), ((8612, 8640), 'scipy.special.softmax', 'spe.softmax', (['result'], {'axis': '(-1)'}), '(result, axis=-1)\n', (8623, 8640), True, 'import scipy.special as spe\n'), ((8658, 8684), 'numpy.argmax', 'np.argmax', (['result'], {'axis': '(-1)'}), '(result, axis=-1)\n', (8667, 8684), True, 'import numpy as np\n'), ((10897, 10935), 'numpy.expand_dims', 'np.expand_dims', (["orig_case['image']", '(-1)'], {}), "(orig_case['image'], -1)\n", (10911, 10935), True, 'import numpy as np\n'), ((12639, 12664), 'data.save_pred', 'save_pred', (['case', 'save_dir'], {}), '(case, save_dir)\n', (12648, 12664), False, 'from data import CaseDataset, load_case, save_pred, orient_crop_case, regions_crop_case, resample_normalize_case\n'), ((15569, 15595), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (15586, 15595), True, 'import numpy as np\n'), ((16272, 16304), 'torchsummary.summary', 'summary', (['self.model', 'input_shape'], {}), '(self.model, input_shape)\n', (16279, 16304), False, 'from torchsummary import summary\n'), ((19003, 19016), 'tqdm.tqdm', 'tqdm', ([], {'total': '(0)'}), '(total=0)\n', (19007, 19016), False, 'from tqdm import tqdm\n'), ((22253, 22286), 'torch.save', 'torch.save', (['checkpoint', 'file_path'], {}), '(checkpoint, file_path)\n', (22263, 22286), False, 'import torch\n'), ((22351, 22372), 'torch.load', 'torch.load', (['file_path'], {}), '(file_path)\n', (22361, 22372), False, 'import torch\n'), ((1783, 1807), 'torch.zeros_like', 'torch.zeros_like', (['result'], {}), '(result)\n', (1799, 1807), False, 'import torch\n'), ((3432, 3460), 'torch.squeeze', 'torch.squeeze', (['result'], {'dim': '(0)'}), '(result, dim=0)\n', (3445, 3460), False, 'import torch\n'), ((3496, 3524), 'torch.softmax', 'torch.softmax', (['result'], {'dim': '(0)'}), '(result, dim=0)\n', (3509, 3524), False, 'import torch\n'), ((3546, 3574), 'torch.argmax', 'torch.argmax', (['result'], {'axis': '(0)'}), '(result, axis=0)\n', (3558, 3574), False, 'import torch\n'), ((14140, 14166), 'numpy.array', 'np.array', (['evaluate_results'], {}), '(evaluate_results)\n', (14148, 14166), True, 'import numpy as np\n'), ((15523, 15559), 'numpy.floor', 'np.floor', (['(valid_split * dataset_size)'], {}), '(valid_split * dataset_size)\n', (15531, 15559), True, 'import numpy as np\n'), ((18113, 18141), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['self.save_dir'], {}), '(self.save_dir)\n', (18126, 18141), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((18788, 18851), 'apex.amp.initialize', 'amp.initialize', (['self.model', 'self.optimizer'], {'opt_level': 'opt_level'}), '(self.model, self.optimizer, opt_level=opt_level)\n', (18802, 18851), False, 'from apex import amp\n'), ((19258, 19323), 'torch.utils.data.RandomSampler', 'torch.utils.data.RandomSampler', (['train_set', '(True)', 'self.num_samples'], {}), '(train_set, True, self.num_samples)\n', (19288, 19323), False, 'import torch\n'), ((19351, 19437), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_set'], {'sampler': 'sampler'}), '(train_set, sampler=sampler, **self.\n dataloader_kwargs)\n', (19378, 19437), False, 'import torch\n'), ((19584, 19662), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_set'], {'shuffle': '(True)'}), '(train_set, shuffle=True, **self.dataloader_kwargs)\n', (19611, 19662), False, 'import torch\n'), ((22228, 22244), 'apex.amp.state_dict', 'amp.state_dict', ([], {}), '()\n', (22242, 22244), False, 'from apex import amp\n'), ((2981, 3002), 'torch.sigmoid', 'torch.sigmoid', (['output'], {}), '(output)\n', (2994, 3002), False, 'import torch\n'), ((3046, 3074), 'torch.softmax', 'torch.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (3059, 3074), False, 'import torch\n'), ((12803, 12834), 'numpy.array', 'np.array', (["(case['pred'] == c + 1)"], {}), "(case['pred'] == c + 1)\n", (12811, 12834), True, 'import numpy as np\n'), ((12868, 12900), 'numpy.array', 'np.array', (["(case['label'] == c + 1)"], {}), "(case['label'] == c + 1)\n", (12876, 12900), True, 'import numpy as np\n'), ((17717, 17734), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (17728, 17734), False, 'import torch\n'), ((17968, 18003), 'numpy.array', 'np.array', (['[x[key] for x in results]'], {}), '([x[key] for x in results])\n', (17976, 18003), True, 'import numpy as np\n'), ((18933, 18973), 'apex.amp.load_state_dict', 'amp.load_state_dict', (['self.amp_state_dict'], {}), '(self.amp_state_dict)\n', (18952, 18973), False, 'from apex import amp\n'), ((20045, 20105), 'torch.utils.data.RandomSampler', 'torch.utils.data.RandomSampler', (['valid_set', '(True)', 'num_samples'], {}), '(valid_set, True, num_samples)\n', (20075, 20105), False, 'import torch\n'), ((20137, 20223), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_set'], {'sampler': 'sampler'}), '(valid_set, sampler=sampler, **self.\n dataloader_kwargs)\n', (20164, 20223), False, 'import torch\n'), ((20386, 20450), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_set'], {}), '(valid_set, **self.dataloader_kwargs)\n', (20413, 20450), False, 'import torch\n'), ((2112, 2128), 'transform.to_tensor', 'to_tensor', (['input'], {}), '(input)\n', (2121, 2128), False, 'from transform import pad, crop_pad, to_numpy, to_tensor, resize\n'), ((12937, 12955), 'torch.tensor', 'torch.tensor', (['pred'], {}), '(pred)\n', (12949, 12955), False, 'import torch\n'), ((12957, 12976), 'torch.tensor', 'torch.tensor', (['label'], {}), '(label)\n', (12969, 12976), False, 'import torch\n'), ((17005, 17020), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17018, 17020), False, 'import torch\n'), ((17256, 17292), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'self.optimizer'], {}), '(loss, self.optimizer)\n', (17270, 17292), False, 'from apex import amp\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Jan 09 22:25:07 2019 @author: arnaudhub """ #import pandas as pd from sqlalchemy import create_engine from sqlalchemy.sql import text import configparser,os from urllib import parse #import sql.connector config = configparser.ConfigParser() config.read_file(open(os.path.expanduser("~/Bureau/OBJDOMO.cnf"))) DB = "OBJETDOMO_V13_1.1?charset=utf8" CNF="OBJDOMO" engine = create_engine("mysql://%s:%s@%s/%s" % (config[CNF]['user'], parse.quote_plus(config[CNF]['password']), config[CNF]['host'], DB)) user = config['OBJDOMO']['user'] password=config['<PASSWORD>']['password'] import mysql.connector from mysql.connector import Error try: connection = mysql.connector.connect(host="127.0.0.1", database="OBJETDOMO_V13_1.1", user=user, password=password) cursor = connection.cursor() cursor.execute("""SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0;""") cursor.execute("""SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;""") cursor.execute("""SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='TRADITIONAL,ALLOW_INVALID_DATES';""") cursor.execute("""DROP SCHEMA IF EXISTS `OBJETDOMO_V13_1.1`;""") print("DROP SCHEMA") cursor.execute("""CREATE SCHEMA IF NOT EXISTS `OBJETDOMO_V13_1.1` DEFAULT CHARACTER SET utf8 ;""") cursor.execute("""USE `OBJETDOMO_V13_1.1`;""") cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_A_TYPE_ADRESSE_TAD` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_A_TYPE_ADRESSE_TAD` ( `TAD_ID` INT NOT NULL AUTO_INCREMENT, `TAD_LIBELLE` VARCHAR(45) NOT NULL, PRIMARY KEY (`TAD_ID`)) ENGINE = InnoDB;""") print("T_A_TYPE_ADRESSE_TAD Table created successfully ") cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_GENRE_GEN` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_GENRE_GEN` ( `GEN_ID` INT NOT NULL AUTO_INCREMENT, `GEN_LIBELLE` VARCHAR(16) NOT NULL, PRIMARY KEY (`GEN_ID`)) ENGINE = InnoDB;""") print("T_R_GENRE_GEN Table created successfully ") cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_A_STATUT_STT` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_A_STATUT_STT` ( `STT_ID` INT NOT NULL AUTO_INCREMENT, `STT_LIBELLE` VARCHAR(45) NOT NULL, `STT_TYPE` VARCHAR(45) NOT NULL, PRIMARY KEY (`STT_ID`)) ENGINE = InnoDB;""") print("T_A_STATUT_STT Table created successfully ") cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_PERSONNEPHYSIQUE_PRS` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_PERSONNEPHYSIQUE_PRS` ( `PRS_ID` INT NOT NULL AUTO_INCREMENT, `PRS_NOM` VARCHAR(40) NOT NULL, `PRS_PRENOM` VARCHAR(40) NOT NULL, `GEN_ID` INT NOT NULL, `PRS_NOTES` VARCHAR(300) NULL, `STT_ID` INT NOT NULL, PRIMARY KEY (`PRS_ID`), INDEX `fk_TE_PERSONNE_PRS_1_idx` (`GEN_ID` ASC), INDEX `fk_TE_PERSONNE_PRS_2_idx` (`STT_ID` ASC), INDEX `index4` (`PRS_NOM` ASC, `PRS_PRENOM` ASC), CONSTRAINT `fk_TE_PERSONNE_PRS_1` FOREIGN KEY (`GEN_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`T_R_GENRE_GEN` (`GEN_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION, CONSTRAINT `fk_TE_PERSONNE_PRS_2` FOREIGN KEY (`STT_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`T_A_STATUT_STT` (`STT_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION) ENGINE = InnoDB;""") print('T_E_PERSONNEPHYSIQUE_PRS Table created successfully') cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_A_VILLE_CITY` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_A_VILLE_CITY` ( `CITY_ID` INT NOT NULL AUTO_INCREMENT, `CITY_CODEPOSTAL` CHAR(5) NOT NULL, `CITY_COMMUNE` VARCHAR(60) NOT NULL, PRIMARY KEY (`CITY_ID`), INDEX `index2` (`CITY_CODEPOSTAL` ASC, `CITY_COMMUNE` ASC)) ENGINE = InnoDB;""") print("T_A_VILLE_CITY Table created successfully ") cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_ADRESSEPOSTALE_ADR` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_ADRESSEPOSTALE_ADR` ( `ADR_ID` INT NOT NULL AUTO_INCREMENT, `ADR_VOIEPRINCIPALE` VARCHAR(38) NOT NULL, `ADR_COMPLEMENTIDENTIFICATION` VARCHAR(38) NOT NULL, `CITY_ID` INT NOT NULL, `TAD_ID` INT NOT NULL COMMENT ' ', PRIMARY KEY (`ADR_ID`), INDEX `fk_TE_ADRESSE_ADR_1_idx` (`TAD_ID` ASC), INDEX `fk_TE_ADRESSEPOSTALE_ADR_1_idx` (`CITY_ID` ASC), CONSTRAINT `fk_TE_ADRESSE_ADR_1` FOREIGN KEY (`TAD_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`T_A_TYPE_ADRESSE_TAD` (`TAD_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION, CONSTRAINT `fk_TE_ADRESSEPOSTALE_ADR_1` FOREIGN KEY (`CITY_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`T_A_VILLE_CITY` (`CITY_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION) ENGINE = InnoDB;""") print('T_E_ADRESSEPOSTALE_ADR Table created successfully') cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_TYPEPRODUIT_TPDT` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_TYPEPRODUIT_TPDT` ( `TPDT_ID` INT NOT NULL AUTO_INCREMENT, `TPDT_CATEGORIE` VARCHAR(60) NULL, PRIMARY KEY (`TPDT_ID`)) ENGINE = InnoDB;""") print('T_R_TYPEPRODUIT_TPDT Table created successfully') cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_PRODUIT_PDT` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_PRODUIT_PDT` ( `PDT_SERIALNUMBER` INT NOT NULL AUTO_INCREMENT, `PDT_NOM` VARCHAR(45) NOT NULL, `PDT_MARQUE` VARCHAR(45) NOT NULL, `PDT_VALEUR` VARCHAR(45) NOT NULL, `PDT_HEURE` VARCHAR(45) NOT NULL, `PDT_DUREE` VARCHAR(45) NOT NULL, `PDT_SOURCE` VARCHAR(45) NOT NULL, `PDT_REGLE` VARCHAR(45) NOT NULL, `TPDT_ID` INT NOT NULL, PRIMARY KEY (`PDT_SERIALNUMBER`), INDEX `index2` (`PDT_NOM` ASC, `PDT_MARQUE` ASC), INDEX `fk_TE_PRODUIT_PDT_1_idx` (`TPDT_ID` ASC), CONSTRAINT `fk_TE_PRODUIT_PDT_1` FOREIGN KEY (`TPDT_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`T_R_TYPEPRODUIT_TPDT` (`TPDT_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION) ENGINE = InnoDB;""") print('T_E_PRODUIT_PDT Table created successfully') cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_AUTHENTIFICATION_AUTH` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_AUTHENTIFICATION_AUTH` ( `AUTH_ID` INT NOT NULL AUTO_INCREMENT, `AUTH_USERNAME` VARCHAR(45) NOT NULL, `AUTH_PASSWORD` VARCHAR(45) NOT NULL, `PRS_ID` INT NOT NULL, PRIMARY KEY (`AUTH_ID`), INDEX `index2` (`AUTH_USERNAME` ASC, `AUTH_PASSWORD` ASC), INDEX `fk_TR_AUTHENTIFICATION_AUTH_1_idx` (`PRS_ID` ASC), CONSTRAINT `fk_TR_AUTHENTIFICATION_AUTH_1` FOREIGN KEY (`PRS_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PERSONNEPHYSIQUE_PRS` (`PRS_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION) ENGINE = InnoDB;""") print('T_R_AUTHENTIFICATION_AUTH Table created successfully') cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_LOCALISATIONPRODUIT_LOC` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_LOCALISATIONPRODUIT_LOC` ( `LOC_ID` INT NOT NULL AUTO_INCREMENT, `LOC_LIBELLE` VARCHAR(45) NOT NULL, `LOC_TYPE` VARCHAR(45) NOT NULL, `LOC_NOTES` VARCHAR(300) NULL, PRIMARY KEY (`LOC_ID`), INDEX `index2` (`LOC_LIBELLE` ASC, `LOC_TYPE` ASC)) ENGINE = InnoDB;""") print('T_E_LOCALISATIONPRODUIT_LOC Table created successfully') cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_TYPEINTERVENTION_TPI` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_TYPEINTERVENTION_TPI` ( `TPI_ID` INT NOT NULL AUTO_INCREMENT, `TPI_LIBELLE` VARCHAR(45) NOT NULL, `TPI_TYPE` VARCHAR(45) NOT NULL, PRIMARY KEY (`TPI_ID`), INDEX `index2` (`TPI_LIBELLE` ASC, `TPI_TYPE` ASC)) ENGINE = InnoDB;""") print('T_R_TYPEINTERVENTION_TPI Table created successfully') cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_A_AUTONOMIE_AUT` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_A_AUTONOMIE_AUT` ( `AUT_ID` INT NOT NULL AUTO_INCREMENT, `AUT_DEPENDANCE` VARCHAR(5) NOT NULL, `AUT_DEFINITION` VARCHAR(105) NOT NULL, PRIMARY KEY (`AUT_ID`), INDEX `index2` (`AUT_DEPENDANCE` ASC, `AUT_DEFINITION` ASC)) ENGINE = InnoDB;""") print('T_A_AUTONOMIE_AUT Table created successfully') cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_BENEFICIAIRE_CTT` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_BENEFICIAIRE_CTT` ( `CTT_ID` INT NOT NULL AUTO_INCREMENT, `CTT_INTITULECONTRAT` VARCHAR(45) NOT NULL, `CTT_REFCONTRAT` VARCHAR(45) NOT NULL, `AUT_ID` INT NOT NULL, `CTT_DEBUTCONTRAT` DATE NOT NULL, `CTT_DATENAISSANCEBENEFICIAIRE` DATE NOT NULL, `CTT_TEL` VARCHAR(45) NULL, `PRS_ID` INT NOT NULL, PRIMARY KEY (`CTT_ID`), INDEX `fk_TR_CONTRAT_CTT_1_idx` (`AUT_ID` ASC), INDEX `fk_TR_CONTRATBENEFICIAIRE_CTT_TE_PERSONNE_PRS1_idx` (`PRS_ID` ASC), CONSTRAINT `fk_TR_CONTRAT_CTT_1` FOREIGN KEY (`AUT_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`T_A_AUTONOMIE_AUT` (`AUT_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION, CONSTRAINT `fk_TE_PERSONNE_PRS1` FOREIGN KEY (`PRS_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PERSONNEPHYSIQUE_PRS` (`PRS_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION) ENGINE = InnoDB;""") print('T_R_BENEFICIAIRE_CTT Table created successfully') cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_INTERVENTION_INT` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_INTERVENTION_INT` ( `INT_ID` INT NOT NULL AUTO_INCREMENT, `ADR_ID` INT NOT NULL, `INT_DATEINTERVENTION` DATE NOT NULL, `INT_PRESENCEANIMALMOYEN` TINYINT(1) NOT NULL DEFAULT 0, `NOTES` VARCHAR(300) NULL, `CTT_ID` INT NOT NULL, `TPI_ID` INT NOT NULL, PRIMARY KEY (`INT_ID`), INDEX `fk_TR_INTERVENTION_INT_1_idx` (`TPI_ID` ASC), INDEX `fk_TR_INTERVENTION_INT_2_idx` (`CTT_ID` ASC), CONSTRAINT `fk_TR_INTERVENTION_INT_1` FOREIGN KEY (`TPI_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`TR_TYPEINTERVENTION_TPI` (`TPI_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION, CONSTRAINT `fk_TR_INTERVENTION_INT_2` FOREIGN KEY (`CTT_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`T_R_BENEFICIAIRE_CTT` (`CTT_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION) ENGINE = InnoDB;""") print('T_E_INTERVENTION_INT Table created successfully') ############## cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_INTERCONNEXION_INTCO` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_INTERCONNEXION_INTCO` ( `INTCO_ID` INT NOT NULL AUTO_INCREMENT, `DATEEVENEMENT` DATETIME(6) NOT NULL, `VALEUR` VARCHAR(45) NOT NULL, `PDT_ID` INT NOT NULL, `INTCO_ADRESSEIP` VARCHAR(20) NOT NULL, PRIMARY KEY (`INTCO_ID`), INDEX `fk_TR_COMMUNICATION_COM_1_idx` (`PDT_ID` ASC), CONSTRAINT `fk_TR_COMMUNICATION_COM_1` FOREIGN KEY (`PDT_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PRODUIT_PDT` (`PDT_SERIALNUMBER`) ON DELETE NO ACTION ON UPDATE NO ACTION) ENGINE = InnoDB;""") print('T_R_INTERCONNEXION_INTCO Table created successfully') cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_J_CTT_ADR_PDT_INT` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_J_CTT_ADR_PDT_INT` ( `PDT_SERIALNUMBER` INT NOT NULL, `INT_ID` INT NOT NULL, `NOTES` VARCHAR(300) NULL, `LOC_ID` INT NOT NULL, `CTT_ID` INT NOT NULL, `ADR_ID` INT NOT NULL, INDEX `fk_TJ_CTT_ADR_PDT_INT_2_idx` (`LOC_ID` ASC), INDEX `fk_TJ_CTT_ADR_PDT_INT_3_idx` (`PDT_SERIALNUMBER` ASC), INDEX `fk_TJ_CTT_ADR_PDT_INT_4_idx` (`INT_ID` ASC), INDEX `fk_TJ_CTT_ADR_PDT_INT_5_idx` (`CTT_ID` ASC), INDEX `fk_TJ_CTT_ADR_PDT_INT_1_idx` (`ADR_ID` ASC), CONSTRAINT `fk_TJ_CTT_ADR_PDT_INT_2` FOREIGN KEY (`LOC_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`TE_LOCALISATIONPRODUIT_LOC` (`LOC_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION, CONSTRAINT `fk_TJ_CTT_ADR_PDT_INT_3` FOREIGN KEY (`PDT_SERIALNUMBER`) REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PRODUIT_PDT` (`PDT_SERIALNUMBER`) ON DELETE NO ACTION ON UPDATE NO ACTION, CONSTRAINT `fk_TJ_CTT_ADR_PDT_INT_4` FOREIGN KEY (`INT_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`T_E_INTERVENTION_INT` (`INT_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION, CONSTRAINT `fk_TJ_CTT_ADR_PDT_INT_5` FOREIGN KEY (`CTT_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`T_R_BENEFICIAIRE_CTT` (`CTT_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION, CONSTRAINT `fk_TJ_CTT_ADR_PDT_INT_1` FOREIGN KEY (`ADR_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`T_E_ADRESSEPOSTALE_ADR` (`ADR_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION) ENGINE = InnoDB;""") print("table jointure T_J_CTT_ADR_PDT_INT créée ") cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_PERSONNEMORALE_PEM` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_PERSONNEMORALE_PEM` ( `PEM_NUMEROSIREN` INT NOT NULL, `PEM_RAISONSOCIALE` VARCHAR(45) NOT NULL, `PEM_TYPEACTIVITE` VARCHAR(60) NOT NULL, `PEM_SIRET` VARCHAR(45) NULL, PRIMARY KEY (`PEM_NUMEROSIREN`), INDEX `index2` (`PEM_RAISONSOCIALE` ASC)) ENGINE = InnoDB;""") print('T_E_PERSONNEMORALE_PEM créée') cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_J_EMPLOYE_EMP` ;""") cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_J_EMPLOYE_EMP` ( `EMP_ID` INT NOT NULL, `PEM_ID` INT NOT NULL, `INT_ID` INT NOT NULL, `EMP_TELEPHONE` CHAR(15) NOT NULL, `EMP_EMAIL` VARCHAR(45) NOT NULL, INDEX `fk_TE_PRESTATAIRE_PREST_2_idx` (`PEM_ID` ASC), INDEX `fk_TE_PRESTATAIRE_PREST_3_idx` (`INT_ID` ASC), CONSTRAINT `fk_TE_PRESTATAIRE_PREST_1` FOREIGN KEY (`EMP_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PERSONNEPHYSIQUE_PRS` (`PRS_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION, CONSTRAINT `fk_TE_PRESTATAIRE_PREST_2` FOREIGN KEY (`PEM_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PERSONNEMORALE_PEM` (`PEM_NUMEROSIREN`) ON DELETE NO ACTION ON UPDATE NO ACTION, CONSTRAINT `fk_TE_PRESTATAIRE_PREST_3` FOREIGN KEY (`INT_ID`) REFERENCES `OBJETDOMO_V13_1.1`.`T_E_INTERVENTION_INT` (`INT_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION) ENGINE = InnoDB;""") print('T_J_EMPLOYE_EMP Table created successfully') cursor.execute("""SET SQL_MODE=@OLD_SQL_MODE;""") cursor.execute("""SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;""") cursor.execute("""SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS;""") except mysql.connector.Error as error: print("Failed to create table in MySQL: {}".format(error)) finally: if (connection.is_connected()): cursor.close() connection.close() print("MySQL connection is closed")
[ "urllib.parse.quote_plus", "configparser.ConfigParser", "os.path.expanduser" ]
[((283, 310), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (308, 310), False, 'import configparser, os\n'), ((333, 375), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Bureau/OBJDOMO.cnf"""'], {}), "('~/Bureau/OBJDOMO.cnf')\n", (351, 375), False, 'import configparser, os\n'), ((502, 543), 'urllib.parse.quote_plus', 'parse.quote_plus', (["config[CNF]['password']"], {}), "(config[CNF]['password'])\n", (518, 543), False, 'from urllib import parse\n')]
from multiml import logger from multiml.task.pytorch import PytorchASNGNASTask from multiml.task.pytorch import PytorchASNGNASBlockTask from . import PytorchConnectionRandomSearchAgent from multiml.task.pytorch.datasets import StoreGateDataset, NumpyDataset import numpy as np class PytorchASNGNASAgent(PytorchConnectionRandomSearchAgent): """Agent packing subtasks using Pytorch ASNG-NAS Model.""" def __init__( self, verbose=1, num_epochs=1000, max_patience=5, batch_size={ 'type': 'equal_length', 'length': 500, 'test': 100 }, asng_args={ 'lam': 2, 'delta': 0.0, 'alpha': 1.5, 'clipping_value': None, 'range_restriction': True }, #lam=2, delta_init_factor=1, alpha = 1.5, clipping_value = None, optimizer=None, optimizer_args=None, scheduler=None, scheduler_args=None, **kwargs): """ Args: training_choiceblock_model (bool): Training choiceblock model after connecting submodels **kwargs: Arbitrary keyword arguments """ super().__init__(**kwargs) self.do_pretraining = kwargs['do_pretraining'] self._verbose = verbose self._num_epochs = num_epochs self.asng_args = asng_args self.batch_size = batch_size self._max_patience = max_patience self._optimizer = optimizer self._optimizer_args = optimizer_args self._scheduler = scheduler self._scheduler_args = scheduler_args # this variable will be set in _build_block funciton self._loss_weights = {} @logger.logging def execute(self): """Execute Currently, only categorical ASNG NAS is implemented.""" asng_block_list, task_ids = self._build_disconnected_task_block_list() asng_task = PytorchASNGNASTask( asng_args=self.asng_args, subtasks=asng_block_list, variable_mapping=self._connectiontask_args["variable_mapping"], saver=self._saver, device=self._connectiontask_args['device'], gpu_ids=None, amp=False, # expert option metrics=self._connectiontask_args["metrics"], verbose=self._verbose, num_epochs=self._num_epochs, batch_size=self.batch_size, max_patience=self._max_patience, loss_weights=self._loss_weights, optimizer=self._optimizer, optimizer_args=self._optimizer_args, scheduler=self._scheduler, scheduler_args=self._scheduler_args, ) self._task_scheduler.add_task(task_id='ASNG-NAS', add_to_dag=False) self._task_scheduler.add_subtask('ASNG-NAS', 'main-task', env=asng_task) asng_subtask = self._task_scheduler.get_subtask('ASNG-NAS', 'main-task') if not self._connectiontask_args["load_weights"]: unique_id = asng_task.get_unique_id() self.saver.dump_ml(unique_id, ml_type='pytorch', model=asng_task.ml.model) # Save model ordering (model index) submodel_names = asng_subtask.env.get_submodel_names() self._saver.add(f'ASNG-NAS_{submodel_names}', submodel_names) asng_subtask.env.verbose = self._verbose self._execute_subtask(asng_subtask, is_pretraining=False) # check best model asng_task.set_most_likely() # re-train best_task_ids, best_subtask_ids = asng_task.best_model() best_subtasks = [ self._task_scheduler.get_subtask(task_id, subtask_id) for task_id, subtask_id in zip(task_ids, best_subtask_ids) ] best_combination_task = self._build_connected_models( subtasks=[t.env for t in best_subtasks], job_id='ASNG-NAS-Final', use_task_scheduler=True) best_comb = '+'.join(s for s in best_subtask_ids) self._execute_subtask(best_combination_task, is_pretraining=False) self._metric.storegate = self._storegate metric = self._metric.calculate() ### evaluate # make results for json output # seed, nevents, walltime will be set at outside results_json = {'agent': 'ASNG-NAS', 'tasks': {}} c_cat, c_int = asng_task.get_most_likely() theta_cat, theta_int = asng_task.get_thetas() cat_idx = c_cat.argmax(axis=1) pred_result = best_combination_task.env.predict(label=True) best_combination_task.env._storegate.update_data( data=pred_result['pred'], var_names=best_combination_task.env._output_var_names, phase='auto') self._metric._storegate = best_combination_task.env._storegate test_metric = self._metric.calculate() self.result = dict(task_ids=['ASNG-NAS-Final'], subtask_ids=best_subtask_ids, subtask_hps=[None], metric_value=test_metric) test_result = dict(model_name='ASNG-NAS-Final', cat_idx=cat_idx, metric=test_metric) self._saver.add(f"results.ASNG-NAS-Final", test_result) results_json['loss_test'] = pred_result['loss'] results_json['subloss_test'] = pred_result['subloss'] results_json['metric_test'] = test_metric for task_idx, task_id in enumerate(task_ids): results_json['tasks'][task_id] = {} results_json['tasks'][task_id]['weight'] = best_combination_task.env.ml.loss_weights[ task_idx] results_json['tasks'][task_id]['models'] = [] results_json['tasks'][task_id]['theta_cat'] = [] subtasktuples = self._task_scheduler.get_subtasks_with_hps(task_id) for subtask_idx, subtask in enumerate(subtasktuples): this_id = subtask.subtask_id.split('-')[-1] # FIXME : hard coded theta = theta_cat[task_idx, subtask_idx] results_json['tasks'][task_id]['models'].append(this_id) results_json['tasks'][task_id]['theta_cat'].append(theta) if subtask_idx == cat_idx[task_idx]: results_json['tasks'][task_id]['model_selected'] = this_id if theta_cat is not None: logger.info(f' theta_cat is {this_id: >20} : {theta:.3e}') else: logger.info(f'theta_cat is None') if theta_int is not None: for theta, job_id in zip(theta_int.tolist(), ): for t, j in zip(theta, job_id): logger.info(f' theta_cat is {j: >20} : {t:.3e}') else: logger.info(f'theta_int is None') logger.info(f'best cat_idx is {cat_idx}') logger.info(f'best combination is {best_comb}') self.results_json = results_json def _build_disconnected_task_block_list(self): task_ids = [] asng_block_list = [] for task_idx, task_id in enumerate(self._task_scheduler.get_sorted_task_ids()): subtasktuples = self._task_scheduler.get_subtasks_with_hps(task_id) for subtask_idx, subtask in enumerate(subtasktuples): subtask_env = subtask.env subtask_hps = subtask.hps subtask_env.set_hps(subtask_hps) if self.do_pretraining: logger.info(f'pretraining of {subtask_env.subtask_id} is starting...') self._execute_subtask(subtask, is_pretraining=True) else: subtask.env.storegate = self._storegate subtask.env.saver = self._saver subtask.env.compile() if '_model_fit' in dir(subtask_env): if self._freeze_model_weights: self._set_trainable_flags(subtask_env._model_fit, False) l = ', '.join(subtask.env.subtask_id for subtask in subtasktuples) logger.info(f'{l}') params_list = [v.hps for v in subtasktuples] self._saver.add(f'asng_block_{task_id}_submodel_params', params_list) # build asng task block subtasks = [v.env for v in subtasktuples] asng_block_subtask = self._build_block_task(subtasks, task_id, is_pretraining=False) asng_block_list.append(asng_block_subtask.env) task_ids.append(task_id) return asng_block_list, task_ids def _build_block_task(self, subtasks, task_id, is_pretraining): asng_block = PytorchASNGNASBlockTask( subtasks=subtasks, job_id=f'ASNG-NAS-Block-{task_id}', saver=self._saver, load_weights=self._connectiontask_args['load_weights'], ) asng_task_id = 'ASNG-NAS-' + task_id self._loss_weights[asng_task_id] = self._connectiontask_args['loss_weights'][task_id] self._task_scheduler.add_task(task_id=asng_task_id) self._task_scheduler.add_subtask(asng_task_id, 'BlockTask', env=asng_block) asng_block_subtask = self._task_scheduler.get_subtask(asng_task_id, 'BlockTask') if is_pretraining: self._execute_subtask(asng_block_subtask, is_pretraining=True) else: asng_block_subtask.env.storegate = self._storegate asng_block_subtask.env.saver = self._saver asng_block_subtask.env.compile() if not self._connectiontask_args['load_weights']: unique_id = asng_block.get_unique_id() self.saver.dump_ml(unique_id, ml_type='pytorch', model=asng_block.ml.model) submodel_names = asng_block_subtask.env.get_submodel_names() self._saver.add(f'asng_block_{task_id}_submodel_names', submodel_names) return asng_block_subtask
[ "multiml.task.pytorch.PytorchASNGNASBlockTask", "multiml.logger.info", "multiml.task.pytorch.PytorchASNGNASTask" ]
[((2035, 2607), 'multiml.task.pytorch.PytorchASNGNASTask', 'PytorchASNGNASTask', ([], {'asng_args': 'self.asng_args', 'subtasks': 'asng_block_list', 'variable_mapping': "self._connectiontask_args['variable_mapping']", 'saver': 'self._saver', 'device': "self._connectiontask_args['device']", 'gpu_ids': 'None', 'amp': '(False)', 'metrics': "self._connectiontask_args['metrics']", 'verbose': 'self._verbose', 'num_epochs': 'self._num_epochs', 'batch_size': 'self.batch_size', 'max_patience': 'self._max_patience', 'loss_weights': 'self._loss_weights', 'optimizer': 'self._optimizer', 'optimizer_args': 'self._optimizer_args', 'scheduler': 'self._scheduler', 'scheduler_args': 'self._scheduler_args'}), "(asng_args=self.asng_args, subtasks=asng_block_list,\n variable_mapping=self._connectiontask_args['variable_mapping'], saver=\n self._saver, device=self._connectiontask_args['device'], gpu_ids=None,\n amp=False, metrics=self._connectiontask_args['metrics'], verbose=self.\n _verbose, num_epochs=self._num_epochs, batch_size=self.batch_size,\n max_patience=self._max_patience, loss_weights=self._loss_weights,\n optimizer=self._optimizer, optimizer_args=self._optimizer_args,\n scheduler=self._scheduler, scheduler_args=self._scheduler_args)\n", (2053, 2607), False, 'from multiml.task.pytorch import PytorchASNGNASTask\n'), ((6906, 6947), 'multiml.logger.info', 'logger.info', (['f"""best cat_idx is {cat_idx}"""'], {}), "(f'best cat_idx is {cat_idx}')\n", (6917, 6947), False, 'from multiml import logger\n'), ((6956, 7003), 'multiml.logger.info', 'logger.info', (['f"""best combination is {best_comb}"""'], {}), "(f'best combination is {best_comb}')\n", (6967, 7003), False, 'from multiml import logger\n'), ((8752, 8915), 'multiml.task.pytorch.PytorchASNGNASBlockTask', 'PytorchASNGNASBlockTask', ([], {'subtasks': 'subtasks', 'job_id': 'f"""ASNG-NAS-Block-{task_id}"""', 'saver': 'self._saver', 'load_weights': "self._connectiontask_args['load_weights']"}), "(subtasks=subtasks, job_id=\n f'ASNG-NAS-Block-{task_id}', saver=self._saver, load_weights=self.\n _connectiontask_args['load_weights'])\n", (8775, 8915), False, 'from multiml.task.pytorch import PytorchASNGNASBlockTask\n'), ((6863, 6896), 'multiml.logger.info', 'logger.info', (['f"""theta_int is None"""'], {}), "(f'theta_int is None')\n", (6874, 6896), False, 'from multiml import logger\n'), ((8176, 8195), 'multiml.logger.info', 'logger.info', (['f"""{l}"""'], {}), "(f'{l}')\n", (8187, 8195), False, 'from multiml import logger\n'), ((6488, 6547), 'multiml.logger.info', 'logger.info', (['f""" theta_cat is {this_id: >20} : {theta:.3e}"""'], {}), "(f' theta_cat is {this_id: >20} : {theta:.3e}')\n", (6499, 6547), False, 'from multiml import logger\n'), ((6590, 6623), 'multiml.logger.info', 'logger.info', (['f"""theta_cat is None"""'], {}), "(f'theta_cat is None')\n", (6601, 6623), False, 'from multiml import logger\n'), ((6787, 6836), 'multiml.logger.info', 'logger.info', (['f""" theta_cat is {j: >20} : {t:.3e}"""'], {}), "(f' theta_cat is {j: >20} : {t:.3e}')\n", (6798, 6836), False, 'from multiml import logger\n'), ((7579, 7649), 'multiml.logger.info', 'logger.info', (['f"""pretraining of {subtask_env.subtask_id} is starting..."""'], {}), "(f'pretraining of {subtask_env.subtask_id} is starting...')\n", (7590, 7649), False, 'from multiml import logger\n')]
import os import pytest from petisco import FlaskApplication SWAGGER_DIR = os.path.dirname(os.path.abspath(__file__)) + "/application/" app = FlaskApplication(application_name="petisco", swagger_dir=SWAGGER_DIR).get_app() @pytest.fixture def client(): with app.app.test_client() as c: yield c @pytest.fixture def given_any_apikey(): apikey = "apikey" return apikey
[ "os.path.abspath", "petisco.FlaskApplication" ]
[((95, 120), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (110, 120), False, 'import os\n'), ((146, 215), 'petisco.FlaskApplication', 'FlaskApplication', ([], {'application_name': '"""petisco"""', 'swagger_dir': 'SWAGGER_DIR'}), "(application_name='petisco', swagger_dir=SWAGGER_DIR)\n", (162, 215), False, 'from petisco import FlaskApplication\n')]
from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression from flask import Blueprint, jsonify, request, flash, redirect, render_template from web_app.models import User from web_app.statsmodels import load_model from web_app.services.basilica_service import connection as basilica_connection stats_routes = Blueprint("stats_routes", __name__) @stats_routes.route("/stats/iris") def iris(): X, y = load_iris(return_X_y=True) clf = load_model() # make sure to pre-train the model first! result = str(clf.predict(X[:2, :])) print("PREDICTION", result) return result # maybe return as JSON? @stats_routes.route("/stats/predict", methods=["POST"]) def twitoff_predict(): # 0. Grab data print("PREDICT ROUTE...") print("FORM DATA:", dict(request.form)) # {'screen_name_a': 'elonmusk', 'example: 'j_a_e_f', 'tweet_text': 'Example tweet text here'} screen_name_a = request.form["screen_name_a"] screen_name_b = request.form["screen_name_b"] tweet_text = request.form["tweet_text"] print(screen_name_a, screen_name_b, tweet_text) # 1. Train model tweet_embeddings = [] tweet_labels = [] user_a = User.query.filter(User.screen_name == screen_name_a).one() user_b = User.query.filter(User.screen_name == screen_name_b).one() tweets_a = user_a.tweets tweets_b = user_b.tweets all_tweets = tweets_a + tweets_b for tweet in all_tweets: tweet_embeddings.append(tweet.embedding) tweet_labels.append(tweet.user.screen_name) print("Embeddings:", len(tweet_embeddings), "Lables:", len(tweet_labels)) classifier = LogisticRegression(random_state=0, solver="lbfgs", multi_class="multinomial") classifier.fit(tweet_embeddings, tweet_labels) # 2. Make prediction example_tweet_embedding = basilica_connection.embed_sentence(tweet_text, model="twitter") result = classifier.predict([example_tweet_embedding]) print("Result:", result[0]) return render_template("prediction_results.html", screen_name_a=screen_name_a, screen_name_b=screen_name_b, tweet_text=tweet_text, screen_name_most_likely=result[0] )
[ "sklearn.datasets.load_iris", "flask.render_template", "web_app.statsmodels.load_model", "sklearn.linear_model.LogisticRegression", "web_app.models.User.query.filter", "web_app.services.basilica_service.connection.embed_sentence", "flask.Blueprint" ]
[((344, 379), 'flask.Blueprint', 'Blueprint', (['"""stats_routes"""', '__name__'], {}), "('stats_routes', __name__)\n", (353, 379), False, 'from flask import Blueprint, jsonify, request, flash, redirect, render_template\n'), ((439, 465), 'sklearn.datasets.load_iris', 'load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (448, 465), False, 'from sklearn.datasets import load_iris\n'), ((476, 488), 'web_app.statsmodels.load_model', 'load_model', ([], {}), '()\n', (486, 488), False, 'from web_app.statsmodels import load_model\n'), ((1644, 1721), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'solver': '"""lbfgs"""', 'multi_class': '"""multinomial"""'}), "(random_state=0, solver='lbfgs', multi_class='multinomial')\n", (1662, 1721), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1830, 1893), 'web_app.services.basilica_service.connection.embed_sentence', 'basilica_connection.embed_sentence', (['tweet_text'], {'model': '"""twitter"""'}), "(tweet_text, model='twitter')\n", (1864, 1893), True, 'from web_app.services.basilica_service import connection as basilica_connection\n'), ((1997, 2163), 'flask.render_template', 'render_template', (['"""prediction_results.html"""'], {'screen_name_a': 'screen_name_a', 'screen_name_b': 'screen_name_b', 'tweet_text': 'tweet_text', 'screen_name_most_likely': 'result[0]'}), "('prediction_results.html', screen_name_a=screen_name_a,\n screen_name_b=screen_name_b, tweet_text=tweet_text,\n screen_name_most_likely=result[0])\n", (2012, 2163), False, 'from flask import Blueprint, jsonify, request, flash, redirect, render_template\n'), ((1192, 1244), 'web_app.models.User.query.filter', 'User.query.filter', (['(User.screen_name == screen_name_a)'], {}), '(User.screen_name == screen_name_a)\n', (1209, 1244), False, 'from web_app.models import User\n'), ((1264, 1316), 'web_app.models.User.query.filter', 'User.query.filter', (['(User.screen_name == screen_name_b)'], {}), '(User.screen_name == screen_name_b)\n', (1281, 1316), False, 'from web_app.models import User\n')]
import argparse import os import pyprind import utils import treetk import treetk.rstdt def main(args): """ We use n-ary ctrees (ie., *.labeled.nary.ctree) to generate dtrees. Morey et al. (2018) demonstrate that scores evaluated on these dtrees are superficially lower than those on right-heavy binarized trees (ie., *.labeled.bin.ctree). """ path = args.path filenames = os.listdir(path) filenames = [n for n in filenames if n.endswith(".labeled.nary.ctree")] filenames.sort() def func_label_rule(node, i, j): relations = node.relation_label.split("/") if len(relations) == 1: return relations[0] # Left-most node is head. else: if i > j: return relations[j] else: return relations[j-1] for filename in pyprind.prog_bar(filenames): sexp = utils.read_lines( os.path.join(path, filename), process=lambda line: line.split()) assert len(sexp) == 1 sexp = sexp[0] # Constituency ctree = treetk.rstdt.postprocess(treetk.sexp2tree(sexp, with_nonterminal_labels=True, with_terminal_labels=False)) # Dependency # Assign heads ctree = treetk.rstdt.assign_heads(ctree) # Conversion dtree = treetk.ctree2dtree(ctree, func_label_rule=func_label_rule) arcs = dtree.tolist(labeled=True) # Write with open(os.path.join( path, filename.replace(".labeled.nary.ctree", ".arcs")), "w") as f: f.write("%s\n" % " ".join(["%d-%d-%s" % (h,d,l) for h,d,l in arcs])) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--path", type=str, required=True) args = parser.parse_args() main(args=args)
[ "os.listdir", "argparse.ArgumentParser", "treetk.ctree2dtree", "os.path.join", "treetk.sexp2tree", "treetk.rstdt.assign_heads", "pyprind.prog_bar" ]
[((402, 418), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (412, 418), False, 'import os\n'), ((844, 871), 'pyprind.prog_bar', 'pyprind.prog_bar', (['filenames'], {}), '(filenames)\n', (860, 871), False, 'import pyprind\n'), ((1723, 1748), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1746, 1748), False, 'import argparse\n'), ((1272, 1304), 'treetk.rstdt.assign_heads', 'treetk.rstdt.assign_heads', (['ctree'], {}), '(ctree)\n', (1297, 1304), False, 'import treetk\n'), ((1342, 1400), 'treetk.ctree2dtree', 'treetk.ctree2dtree', (['ctree'], {'func_label_rule': 'func_label_rule'}), '(ctree, func_label_rule=func_label_rule)\n', (1360, 1400), False, 'import treetk\n'), ((926, 954), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (938, 954), False, 'import os\n'), ((1129, 1214), 'treetk.sexp2tree', 'treetk.sexp2tree', (['sexp'], {'with_nonterminal_labels': '(True)', 'with_terminal_labels': '(False)'}), '(sexp, with_nonterminal_labels=True, with_terminal_labels=False\n )\n', (1145, 1214), False, 'import treetk\n')]
from contextlib import closing import h5py import numpy as np def save_h5(outfile, dictionary): """ Saves passed dictionary to an h5 file Parameters ---------- outfile : string Name of output h5 file dictionary : dictionary Dictionary that will be saved """ def save_layer(f, seed, dictionary): for key, value in dictionary.items(): fullKey = f"{seed}/{key}" if type(dictionary[key]) == dict: f = save_layer(f, fullKey, value) else: f[fullKey] = dictionary[key] return f with closing(h5py.File(outfile, 'w')) as f: for key, value in dictionary.items(): if type(dictionary[key]) == dict: f = save_layer(f, key, value) else: f[key] = dictionary[key] def load_h5(feature_file): """ Loads h5 contents to dictionary. Single level dictionary with keys being full h5 paths. Parameters ---------- feature_file : string Name of input h5 file Returns ------- dictionary : dictionary Dictionary of h5 contents """ def load_layer(f, seed, dictionary): for key in f[seed].keys(): fullKey = f"{seed}/{key}" if isinstance(f[fullKey], h5py.Dataset): if (seed in dictionary.keys()): dictionary[seed][key] = np.asarray(f[fullKey]) else: dictionary[seed] = {key: np.asarray(f[fullKey])} else: dictionary = load_layer(f, fullKey, dictionary) return dictionary with h5py.File(feature_file, 'r') as f: dictionary = {} for key in f.keys(): if isinstance(f[key], h5py.Dataset): dictionary[key] = np.asarray(f[key]) else: dictionary = load_layer(f, key, dictionary) return dictionary
[ "numpy.asarray", "h5py.File" ]
[((1659, 1687), 'h5py.File', 'h5py.File', (['feature_file', '"""r"""'], {}), "(feature_file, 'r')\n", (1668, 1687), False, 'import h5py\n'), ((622, 645), 'h5py.File', 'h5py.File', (['outfile', '"""w"""'], {}), "(outfile, 'w')\n", (631, 645), False, 'import h5py\n'), ((1830, 1848), 'numpy.asarray', 'np.asarray', (['f[key]'], {}), '(f[key])\n', (1840, 1848), True, 'import numpy as np\n'), ((1426, 1448), 'numpy.asarray', 'np.asarray', (['f[fullKey]'], {}), '(f[fullKey])\n', (1436, 1448), True, 'import numpy as np\n'), ((1516, 1538), 'numpy.asarray', 'np.asarray', (['f[fullKey]'], {}), '(f[fullKey])\n', (1526, 1538), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- """ Helper functions for VariationalModel class """ from __future__ import print_function from __future__ import division import math import random import tensorflow as tf from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s def linearOutcomePrediction(zs, params_pred, scope=None): """ English: Model for predictions outcomes from latent representations Z, zs = batch of z-vectors (encoder-states, matrix) Japanese: このモデルにおける、潜在表現Zから得られる出力の予測です。 zs = ベクトル z のバッチ(袋)です。 (encoder の状態であり、行列です) (恐らく、[z_0, z_1, z_2, ...] というような意味) """ with s2s.variable_scope.variable_scope(scope or "outcomepred", reuse=True): coefficients, bias = params_pred outcome_preds = tf.add(tf.matmul(zs, coefficients), bias) return outcome_preds def flexibleOutcomePrediction(zs, params_pred, use_sigmoid=False, scope=None): """ English: Model for nonlinearly predicting outcomes from latent representations Z. Uses a single hidden layer of pre-specified size, by default = d (the size of the RNN hidden-state) zs = batch of z-vectors (encoder-states, matrix) use_sigmoid = if True, then outcome-predictions are constrained to [0, 1] Japanese: このモデルにおける、潜在表現Zから得られる非線形な出力の予測です。 事前にサイズ (標準では d 、つまりRNNの隠れ層の数) が指定されている、一つの隠れ層を用います。 zs = ベクトル z のバッチ(袋)です。(encoder の状態であり、行列です。) use_sigmoid = これが True であるならば、出力はシグモイド関数によって [0, 1] 区間に抑えられます。 (d は encoder のための連なったRNNの最後の隠れ層を示している考えられます。) """ with s2s.variable_scope.variable_scope(scope or "outcomepred", reuse=True): weights_pred = params_pred[0] biases_pred = params_pred[1] hidden1 = tf.nn.tanh(tf.add(tf.matmul(zs, weights_pred['W1']), biases_pred['B1'])) outcome_preds = tf.add(tf.matmul(hidden1, weights_pred['W2']), biases_pred['B2']) if use_sigmoid: outcome_preds = tf.sigmoid(outcome_preds) return outcome_preds def outcomePrediction(zs, params_pred, which_outcomeprediction, use_sigmoid=False, scope=None): if which_outcomeprediction == 'linear': return linearOutcomePrediction(zs, params_pred, scope=scope) else: return flexibleOutcomePrediction(zs, params_pred, scope=scope) def getEncoding(inputs, cell, num_symbols, embedding_size, dtype=s2s.dtypes.float32, scope=None): """ English: Model for produce encoding z from x zs = batch of z-vectors (encoding-states, matrix) Japanese: このモデルにおける、入力 x から潜在表現 z の生成です。 zs = ベクトル z のバッチ(袋)です。(encoder の状態であり、行列です。) """ with s2s.variable_scope.variable_scope(scope or 'seq2seq', reuse=True): encoder_cell = s2s.core_rnn_cell.EmbeddingWrapper( cell, embedding_classes=num_symbols, embedding_size=embedding_size ) _, encoder_state = s2s.rnn.static_rnn(encoder_cell, inputs, dtype=dtype) # batch_size x cell.state_size # batch_size だけ、cell が含まれていると考えると良いでしょう。 return encoder_state def variationalEncoding(inputs, cell, num_symbols, embedding_size, variational_params, dtypes=s2s.dtypes.float32, scope=None): """ English: Model for produce encoding z from x. zs = batch of z-vectors (encoding-stats, matrix). sigmas: posterior standard devs for each dimension, produced using 2-layer neural net with Relu units. Japanese: このモデルにおける、入力 x から潜在表現 z の生成です。 zs = ベクトル z のバッチ(袋)です。(encoder の状態であり、行列です。) sigmas = それぞれの次元における、事後標準偏差(devs = deviations)であり、 Relu ユニットから成る2つのレイヤーを用いて生成されます。 variational_params = VAE 内で生成される \mu と \sigma を持っています。 """ min_sigma = 1e-6 # the smallest allowable sigma value # 許容できる最小の偏差です。 h_T = getEncoding(inputs, cell, num_symbols, embedding_size, dtype=dtypes, scope=scope) with s2s.variable_scope.variable_scope(scope or 'variational', reuse=True): mu_params, sigma_params = variational_params mu = tf.add(tf.matmul(h_T, mu_params['weights']), mu_params['biases']) hidden_layer_sigma = tf.nn.relu(tf.add(tf.matmul(h_T, sigma_params['weights1']), sigma_params['biases1'])) # Relu layer of same size as h_T # h_T と同じサイズの Relu レイヤーです。 sigma = tf.clip_by_value( tf.exp(- tf.abs(tf.add(tf.matmul(hidden_layer_sigma, sigma_params['weights2']), sigma_params['biases2']))), min_sigma, 1.0) return mu, sigma def getDecoding(encoder_state, inputs, cell, num_symbols, embedding_size, feed_previous=True, output_prejection=None, dtype=s2s.dtypes.float32, scope=None): """ English: Model for producing probabilities over x from z Japanese: このモデルにおける、z から x へ向かう確率を計算します。 """ with s2s.variable_scope.variable_scope(scope or 'seq2seq', reuse=True): if output_prejection is None: cell = s2s.core_rnn_cell.OutputProjectionWrapper(cell, num_symbols) decode_probs, _ = s2s.embedding_rnn_decoder( inputs, encoder_state, cell, num_symbols, embedding_size, output_projection=output_prejection, feed_previous=feed_previous) return decode_probs def createVariationalVar(inputs, cell, num_symbols, embedding_size, feed_previous=False, output_projection=None, dtype=s2s.dtypes.float32, scope=None): """ English: Creates Tensorflow variables which can reused. Japanese: 再利用可能な Tensorflow の変数を作ります。 """ with s2s.variable_scope.variable_scope(scope or 'seq2seq'): encoder_cell = s2s.core_rnn_cell.EmbeddingWrapper( cell, embedding_classes=num_symbols, embedding_size=embedding_size) _, encoder_state = s2s.rnn.static_rnn(encoder_cell, inputs, dtype=dtype) # batch_size x cell.state_size if output_projection is None: cell = s2s.core_rnn_cell.OutputProjectionWrapper(cell, num_symbols) decode_probs, _ = s2s.embedding_rnn_decoder( inputs, encoder_state, cell, num_symbols, embedding_size, output_projection=output_projection, feed_previous=feed_previous) return None def createDeterministicVar(inputs, cell, num_symbols, embedding_size, feed_previous=False, output_projection=None, dtype=s2s.dtypes.float32, scope=None): """ English: Creates Tensorflow variables which can be reused. Japanese: 再利用可能な Tensorflow の変数を作ります。 """ with s2s.variable_scope.variable_scope(scope or 'seq2seq'): encoder_cell = s2s.core_rnn_cell.EmbeddingWrapper( cell, embedding_classes=num_symbols, embedding_size=embedding_size) _, encoder_state = s2s.rnn.static_rnn(encoder_cell, inputs, dtype=dtype) # batch_size x cell.state_size if output_projection is None: cell = s2s.core_rnn_cell.OutputProjectionWrapper(cell, num_symbols) decode_probs, _ = s2s.embedding_rnn_decoder( inputs, encoder_state, cell, num_symbols, embedding_size, output_projection=output_projection, feed_previous=feed_previous) return None def levenshtein(seq1, seq2): """ English: Computes edit distance between two (possibly padded) sequences: Japanese: 2つのシーケンスにおける独自のレーベンシュタイン距離を計算する。 (padding である '<PAD>'が加えられている可能性を考慮しています) (ここにおけるレーベンシュタイン距離は、 恐らく単語ごとに分割した場合のレーベンシュタイン距離(一般には文字ごと)) """ s1 = [value for value in seq1 if value != '<PAD>'] s2 = [value for value in seq2 if value != '<PAD>'] if len(s1) > len(s2): s1, s2 = s2, s1 distances = range(len(s1) + 1) for i2, c2 in enumerate(s2): distances_ = [i2 + 1] for i1, c1 in enumerate(s1): if c1 == c2: distances_.append(distances[i1]) else: distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1]))) distances = distances_ return distances[-1] """ Info for i1, c1 in enumerate(['a', 'b', 'c']): print('{} : {}'.format(i1, c1)) => 0 : a 1 : b 2 : c """ def mutate_lengthconstrained(init_seq, num_edits, vocab, length_range=(10, 20)): """ English: Preforms random edits of sequences, respecting min/max sequence-length constraints. At each edit, possible operations (equally likely) are: (1) Do nothing (2) Substitution (3) Deletion (4) Insertion Each operation is uniform over possible symbols and possible positions Japanese: 最小/最大のシーケンスの長さに制約をかけながら、シーケンスのランダムな編集を行います。 編集時に可能な操作は以下の4つです。 (1) 何もしない (2) 置換 (3) 削除 (4) 挿入 それぞれの編集は、可能なシンボル(単語など)や位置に対して均一に(偏りなく)行われます。 """ min_seq_length, max_seq_length = length_range new_seq = init_seq[:] for i in range(num_edits): operation = random.randint(1, 4) # 1 = Do nothing, 2 = Substitution, 3 = Deletion, 4 = Insertion # 1 = 何もしない 2 = 置換 3 = 削除 4 = 挿入 if operation > 1: char = '<PAD>' # potential character, cannot be PAD. # 潜在的な element であり、 <PAD> になることはない。 # (つまり <PAD> 以外の任意の element(単語) になる) while char == '<PAD>': char = vocab[random.randint(0, len(vocab) - 1)] position = random.randint(0, len(new_seq) - 1) if (operation == 4) and (len(new_seq) < max_seq_length): position = random.randint(0, len(new_seq)) new_seq.insert(position, char) elif (operation == 3) and (len(new_seq) > min_seq_length): _ = new_seq.pop(position) elif operation == 2: new_seq[position] = char edit_dist = levenshtein(new_seq, init_seq) if edit_dist > num_edits: raise ValueError('edit distance invalid') return new_seq, edit_dist def mutate(init_seq, num_edits, vocab): new_seq = init_seq[:] for i in range(num_edits): operation = random.randint(1, 4) # 1 = Do nothing, 2 = Substitution, 3 = Deletion, 4 = Insertion # 1 = 何もしない 2 = 置換 3 = 削除 4 = 挿入 if operation > 1: char = '<PAD>' # potential character, cannot be PAD. # 潜在的な element であり、 <PAD> になることはない。 while char == '<PAD>': char = vocab[random.randint(0, len(vocab) - 1)] position = random.randint(0, len(new_seq) - 1) if operation == 4: position = random.randint(0, len(new_seq)) new_seq.insert(position, char) elif (operation == 3) and len(new_seq) > 1: _ = new_seq.pop(position) elif operation == 2: new_seq[position] = char edit_dist = levenshtein(new_seq, init_seq) if edit_dist > num_edits: raise ValueError("edit distance invalid") return new_seq, edit_dist def sigmoid(x): return 1 / (1 + math.exp(-x)) def smoothedsigmoid(x, b=1): """ English: b controls smoothness, lower = smoother Japanese: b は緩やかさを調整します。b が小さいほど緩やかに(変化が小さく)なります。 """ return 1 / (1 + math.exp(- b * x))
[ "tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.core_rnn_cell.EmbeddingWrapper", "tensorflow.sigmoid", "tensorflow.matmul", "tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.embedding_rnn_decoder", "tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.rnn.static_rnn", "tensorflow.contrib.legacy_...
[((648, 717), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.variable_scope.variable_scope', 's2s.variable_scope.variable_scope', (["(scope or 'outcomepred')"], {'reuse': '(True)'}), "(scope or 'outcomepred', reuse=True)\n", (681, 717), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((1576, 1645), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.variable_scope.variable_scope', 's2s.variable_scope.variable_scope', (["(scope or 'outcomepred')"], {'reuse': '(True)'}), "(scope or 'outcomepred', reuse=True)\n", (1609, 1645), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((2655, 2720), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.variable_scope.variable_scope', 's2s.variable_scope.variable_scope', (["(scope or 'seq2seq')"], {'reuse': '(True)'}), "(scope or 'seq2seq', reuse=True)\n", (2688, 2720), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((2746, 2852), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.core_rnn_cell.EmbeddingWrapper', 's2s.core_rnn_cell.EmbeddingWrapper', (['cell'], {'embedding_classes': 'num_symbols', 'embedding_size': 'embedding_size'}), '(cell, embedding_classes=num_symbols,\n embedding_size=embedding_size)\n', (2780, 2852), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((2914, 2967), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.rnn.static_rnn', 's2s.rnn.static_rnn', (['encoder_cell', 'inputs'], {'dtype': 'dtype'}), '(encoder_cell, inputs, dtype=dtype)\n', (2932, 2967), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((3953, 4022), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.variable_scope.variable_scope', 's2s.variable_scope.variable_scope', (["(scope or 'variational')"], {'reuse': '(True)'}), "(scope or 'variational', reuse=True)\n", (3986, 4022), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((4993, 5058), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.variable_scope.variable_scope', 's2s.variable_scope.variable_scope', (["(scope or 'seq2seq')"], {'reuse': '(True)'}), "(scope or 'seq2seq', reuse=True)\n", (5026, 5058), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((5207, 5365), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.embedding_rnn_decoder', 's2s.embedding_rnn_decoder', (['inputs', 'encoder_state', 'cell', 'num_symbols', 'embedding_size'], {'output_projection': 'output_prejection', 'feed_previous': 'feed_previous'}), '(inputs, encoder_state, cell, num_symbols,\n embedding_size, output_projection=output_prejection, feed_previous=\n feed_previous)\n', (5232, 5365), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((5773, 5826), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.variable_scope.variable_scope', 's2s.variable_scope.variable_scope', (["(scope or 'seq2seq')"], {}), "(scope or 'seq2seq')\n", (5806, 5826), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((5852, 5958), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.core_rnn_cell.EmbeddingWrapper', 's2s.core_rnn_cell.EmbeddingWrapper', (['cell'], {'embedding_classes': 'num_symbols', 'embedding_size': 'embedding_size'}), '(cell, embedding_classes=num_symbols,\n embedding_size=embedding_size)\n', (5886, 5958), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((5997, 6050), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.rnn.static_rnn', 's2s.rnn.static_rnn', (['encoder_cell', 'inputs'], {'dtype': 'dtype'}), '(encoder_cell, inputs, dtype=dtype)\n', (6015, 6050), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((6238, 6396), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.embedding_rnn_decoder', 's2s.embedding_rnn_decoder', (['inputs', 'encoder_state', 'cell', 'num_symbols', 'embedding_size'], {'output_projection': 'output_projection', 'feed_previous': 'feed_previous'}), '(inputs, encoder_state, cell, num_symbols,\n embedding_size, output_projection=output_projection, feed_previous=\n feed_previous)\n', (6263, 6396), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((6805, 6858), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.variable_scope.variable_scope', 's2s.variable_scope.variable_scope', (["(scope or 'seq2seq')"], {}), "(scope or 'seq2seq')\n", (6838, 6858), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((6884, 6990), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.core_rnn_cell.EmbeddingWrapper', 's2s.core_rnn_cell.EmbeddingWrapper', (['cell'], {'embedding_classes': 'num_symbols', 'embedding_size': 'embedding_size'}), '(cell, embedding_classes=num_symbols,\n embedding_size=embedding_size)\n', (6918, 6990), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((7042, 7095), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.rnn.static_rnn', 's2s.rnn.static_rnn', (['encoder_cell', 'inputs'], {'dtype': 'dtype'}), '(encoder_cell, inputs, dtype=dtype)\n', (7060, 7095), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((7283, 7441), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.embedding_rnn_decoder', 's2s.embedding_rnn_decoder', (['inputs', 'encoder_state', 'cell', 'num_symbols', 'embedding_size'], {'output_projection': 'output_projection', 'feed_previous': 'feed_previous'}), '(inputs, encoder_state, cell, num_symbols,\n embedding_size, output_projection=output_projection, feed_previous=\n feed_previous)\n', (7308, 7441), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((9224, 9244), 'random.randint', 'random.randint', (['(1)', '(4)'], {}), '(1, 4)\n', (9238, 9244), False, 'import random\n'), ((10383, 10403), 'random.randint', 'random.randint', (['(1)', '(4)'], {}), '(1, 4)\n', (10397, 10403), False, 'import random\n'), ((793, 820), 'tensorflow.matmul', 'tf.matmul', (['zs', 'coefficients'], {}), '(zs, coefficients)\n', (802, 820), True, 'import tensorflow as tf\n'), ((1848, 1886), 'tensorflow.matmul', 'tf.matmul', (['hidden1', "weights_pred['W2']"], {}), "(hidden1, weights_pred['W2'])\n", (1857, 1886), True, 'import tensorflow as tf\n'), ((1961, 1986), 'tensorflow.sigmoid', 'tf.sigmoid', (['outcome_preds'], {}), '(outcome_preds)\n', (1971, 1986), True, 'import tensorflow as tf\n'), ((4099, 4135), 'tensorflow.matmul', 'tf.matmul', (['h_T', "mu_params['weights']"], {}), "(h_T, mu_params['weights'])\n", (4108, 4135), True, 'import tensorflow as tf\n'), ((5119, 5179), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.core_rnn_cell.OutputProjectionWrapper', 's2s.core_rnn_cell.OutputProjectionWrapper', (['cell', 'num_symbols'], {}), '(cell, num_symbols)\n', (5160, 5179), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((6150, 6210), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.core_rnn_cell.OutputProjectionWrapper', 's2s.core_rnn_cell.OutputProjectionWrapper', (['cell', 'num_symbols'], {}), '(cell, num_symbols)\n', (6191, 6210), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((7195, 7255), 'tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq.core_rnn_cell.OutputProjectionWrapper', 's2s.core_rnn_cell.OutputProjectionWrapper', (['cell', 'num_symbols'], {}), '(cell, num_symbols)\n', (7236, 7255), True, 'from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s\n'), ((11356, 11368), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (11364, 11368), False, 'import math\n'), ((11562, 11578), 'math.exp', 'math.exp', (['(-b * x)'], {}), '(-b * x)\n', (11570, 11578), False, 'import math\n'), ((1761, 1794), 'tensorflow.matmul', 'tf.matmul', (['zs', "weights_pred['W1']"], {}), "(zs, weights_pred['W1'])\n", (1770, 1794), True, 'import tensorflow as tf\n'), ((4206, 4246), 'tensorflow.matmul', 'tf.matmul', (['h_T', "sigma_params['weights1']"], {}), "(h_T, sigma_params['weights1'])\n", (4215, 4246), True, 'import tensorflow as tf\n'), ((4471, 4526), 'tensorflow.matmul', 'tf.matmul', (['hidden_layer_sigma', "sigma_params['weights2']"], {}), "(hidden_layer_sigma, sigma_params['weights2'])\n", (4480, 4526), True, 'import tensorflow as tf\n')]
"""empty message Revision ID: b9ab1a9a2113 Revises: Create Date: 2021-11-28 22:41:01.160642 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'b9ab1a9a2113' down_revision = None branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('curpairs', sa.Column('id', sa.Integer(), nullable=False), sa.Column('base_code', sa.String(length=3), nullable=True), sa.Column('target_code', sa.String(length=3), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_table('currates', sa.Column('id', sa.Integer(), nullable=False), sa.Column('date', sa.Date(), nullable=True), sa.Column('rate', sa.Float(), nullable=True), sa.Column('pair_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['pair_id'], ['curpairs.id'], ), sa.PrimaryKeyConstraint('id') ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table('currates') op.drop_table('curpairs') # ### end Alembic commands ###
[ "sqlalchemy.ForeignKeyConstraint", "sqlalchemy.Float", "alembic.op.drop_table", "sqlalchemy.PrimaryKeyConstraint", "sqlalchemy.Date", "sqlalchemy.Integer", "sqlalchemy.String" ]
[((1073, 1098), 'alembic.op.drop_table', 'op.drop_table', (['"""currates"""'], {}), "('currates')\n", (1086, 1098), False, 'from alembic import op\n'), ((1103, 1128), 'alembic.op.drop_table', 'op.drop_table', (['"""curpairs"""'], {}), "('curpairs')\n", (1116, 1128), False, 'from alembic import op\n'), ((575, 604), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (598, 604), True, 'import sqlalchemy as sa\n'), ((852, 905), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['pair_id']", "['curpairs.id']"], {}), "(['pair_id'], ['curpairs.id'])\n", (875, 905), True, 'import sqlalchemy as sa\n'), ((913, 942), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (936, 942), True, 'import sqlalchemy as sa\n'), ((410, 422), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (420, 422), True, 'import sqlalchemy as sa\n'), ((468, 487), 'sqlalchemy.String', 'sa.String', ([], {'length': '(3)'}), '(length=3)\n', (477, 487), True, 'import sqlalchemy as sa\n'), ((534, 553), 'sqlalchemy.String', 'sa.String', ([], {'length': '(3)'}), '(length=3)\n', (543, 553), True, 'import sqlalchemy as sa\n'), ((663, 675), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (673, 675), True, 'import sqlalchemy as sa\n'), ((716, 725), 'sqlalchemy.Date', 'sa.Date', ([], {}), '()\n', (723, 725), True, 'import sqlalchemy as sa\n'), ((765, 775), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (773, 775), True, 'import sqlalchemy as sa\n'), ((818, 830), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (828, 830), True, 'import sqlalchemy as sa\n')]
import os from pathlib import Path import pandas as pd from lime.lime_tabular import LimeTabularExplainer from ml_editor.data_processing import get_split_by_author FEATURE_DISPLAY_NAMES = { "num_questions": "물음표 빈도", "num_periods": "마침표 빈도", "num_commas": "쉼표 빈도", "num_exclam": "느낌표 빈도", "num_quotes": "따옴표 빈도", "num_colon": "콜론 빈도", "num_semicolon": "세미콜론 빈도", "num_stops": "불용어 빈도", "num_words": "단어 개수", "num_chars": "문자 개수", "num_diff_words": "어휘 다양성", "avg_word_len": "평균 단어 길이", "polarity": "긍정적인 감성", "ADJ": "형용사 빈도", "ADP": "전치사 빈도", "ADV": "부사 빈도", "AUX": "조동사 빈도", "CONJ": "접속사 빈도", "DET": "한정사 빈도", "INTJ": "감탄사 빈도", "NOUN": "명사 빈도", "NUM": "숫자 빈도", "PART": "불변화사 빈도", "PRON": "대명사 빈도", "PROPN": "고유 명사 빈도", "PUNCT": "구두점 빈도", "SCONJ": "종속 접속사 빈도", "SYM": "기호 빈도", "VERB": "동사 빈도", "X": "다른 단어의 빈도", } POS_NAMES = { "ADJ": "adjective", "ADP": "adposition", "ADV": "adverb", "AUX": "auxiliary verb", "CONJ": "coordinating conjunction", "DET": "determiner", "INTJ": "interjection", "NOUN": "noun", "NUM": "numeral", "PART": "particle", "PRON": "pronoun", "PROPN": "proper noun", "PUNCT": "punctuation", "SCONJ": "subordinating conjunction", "SYM": "symbol", "VERB": "verb", "X": "other", } FEATURE_ARR = [ "num_questions", "num_periods", "num_commas", "num_exclam", "num_quotes", "num_colon", "num_stops", "num_semicolon", "num_words", "num_chars", "num_diff_words", "avg_word_len", "polarity", ] FEATURE_ARR.extend(POS_NAMES.keys()) def get_explainer(): """ 훈련 데이터를 사용해 LIME 설명 도구를 준비합니다. 직렬화하지 않아도 될만큼 충분히 빠릅니다. :return: LIME 설명 도구 객체 """ curr_path = Path(os.path.dirname(__file__)) data_path = Path("../data/writers_with_features.csv") df = pd.read_csv(curr_path / data_path) train_df, test_df = get_split_by_author(df, test_size=0.2, random_state=40) explainer = LimeTabularExplainer( train_df[FEATURE_ARR].values, feature_names=FEATURE_ARR, class_names=["low", "high"], ) return explainer EXPLAINER = get_explainer() def simplify_order_sign(order_sign): """ 사용자에게 명확한 출력을 위해 기호를 단순화합니다. :param order_sign: 비교 연산자 입력 :return: 단순화된 연산자 """ if order_sign in ["<=", "<"]: return "<" if order_sign in [">=", ">"]: return ">" return order_sign def get_recommended_modification(simple_order, impact): """ 연산자와 영향 타입에 따라 추천 문장을 생성합니다. :param simple_order: 단순화된 연산자 :param impact: 변화가 긍정적인지 부정적인지 여부 :return: 추천 문자열 """ bigger_than_threshold = simple_order == ">" has_positive_impact = impact > 0 if bigger_than_threshold and has_positive_impact: return "높일 필요가 없습니다" if not bigger_than_threshold and not has_positive_impact: return "높이세요" if bigger_than_threshold and not has_positive_impact: return "낮추세요" if not bigger_than_threshold and has_positive_impact: return "낮출 필요가 없습니다" def parse_explanations(exp_list): """ LIME이 반환한 설명을 사용자가 읽을 수 있도록 파싱합니다. :param exp_list: LIME 설명 도구가 반환한 설명 :return: 사용자에게 전달한 문자열을 담은 딕셔너리 배열 """ parsed_exps = [] for feat_bound, impact in exp_list: conditions = feat_bound.split(" ") # 추천으로 표현하기 힘들기 때문에 # 1 <= a < 3 와 같은 이중 경계 조건은 무시합니다 if len(conditions) == 3: feat_name, order, threshold = conditions simple_order = simplify_order_sign(order) recommended_mod = get_recommended_modification(simple_order, impact) parsed_exps.append( { "feature": feat_name, "feature_display_name": FEATURE_DISPLAY_NAMES[feat_name], "order": simple_order, "threshold": threshold, "impact": impact, "recommendation": recommended_mod, } ) return parsed_exps def get_recommendation_string_from_parsed_exps(exp_list): """ 플래스크 앱에서 출력할 수 있는 추천 텍스트를 생성합니다. :param exp_list: 설명을 담은 딕셔너리의 배열 :return: HTML 추천 텍스트 """ recommendations = [] for i, feature_exp in enumerate(exp_list): recommendation = "%s %s" % ( feature_exp["recommendation"], feature_exp["feature_display_name"], ) font_color = "green" if feature_exp["recommendation"] in ["Increase", "Decrease"]: font_color = "red" rec_str = """<font color="%s">%s) %s</font>""" % ( font_color, i + 1, recommendation, ) recommendations.append(rec_str) rec_string = "<br/>".join(recommendations) return rec_string
[ "pandas.read_csv", "pathlib.Path", "lime.lime_tabular.LimeTabularExplainer", "os.path.dirname", "ml_editor.data_processing.get_split_by_author" ]
[((1881, 1922), 'pathlib.Path', 'Path', (['"""../data/writers_with_features.csv"""'], {}), "('../data/writers_with_features.csv')\n", (1885, 1922), False, 'from pathlib import Path\n'), ((1932, 1966), 'pandas.read_csv', 'pd.read_csv', (['(curr_path / data_path)'], {}), '(curr_path / data_path)\n', (1943, 1966), True, 'import pandas as pd\n'), ((1991, 2046), 'ml_editor.data_processing.get_split_by_author', 'get_split_by_author', (['df'], {'test_size': '(0.2)', 'random_state': '(40)'}), '(df, test_size=0.2, random_state=40)\n', (2010, 2046), False, 'from ml_editor.data_processing import get_split_by_author\n'), ((2063, 2174), 'lime.lime_tabular.LimeTabularExplainer', 'LimeTabularExplainer', (['train_df[FEATURE_ARR].values'], {'feature_names': 'FEATURE_ARR', 'class_names': "['low', 'high']"}), "(train_df[FEATURE_ARR].values, feature_names=\n FEATURE_ARR, class_names=['low', 'high'])\n", (2083, 2174), False, 'from lime.lime_tabular import LimeTabularExplainer\n'), ((1838, 1863), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1853, 1863), False, 'import os\n')]
""" Tests for the test utils. """ import pytest from straitlets import Serializable, Integer from straitlets.test_utils import assert_serializables_equal def test_assert_serializables_equal(): class Foo(Serializable): x = Integer() y = Integer() class Bar(Serializable): x = Integer() y = Integer() assert_serializables_equal(Foo(x=1, y=1), Foo(x=1, y=1)) with pytest.raises(AssertionError): assert_serializables_equal(Foo(x=1, y=1), Bar(x=1, y=1)) with pytest.raises(AssertionError): assert_serializables_equal(Foo(x=1, y=1), Foo(x=1, y=2)) with pytest.raises(AssertionError): assert_serializables_equal( Foo(x=1, y=1), Foo(x=1, y=2), skip=('x',), ) assert_serializables_equal(Foo(x=1), Foo(x=1), skip=('y',)) assert_serializables_equal(Foo(y=1), Foo(y=1), skip=('x',))
[ "straitlets.Integer", "pytest.raises" ]
[((238, 247), 'straitlets.Integer', 'Integer', ([], {}), '()\n', (245, 247), False, 'from straitlets import Serializable, Integer\n'), ((260, 269), 'straitlets.Integer', 'Integer', ([], {}), '()\n', (267, 269), False, 'from straitlets import Serializable, Integer\n'), ((312, 321), 'straitlets.Integer', 'Integer', ([], {}), '()\n', (319, 321), False, 'from straitlets import Serializable, Integer\n'), ((334, 343), 'straitlets.Integer', 'Integer', ([], {}), '()\n', (341, 343), False, 'from straitlets import Serializable, Integer\n'), ((416, 445), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (429, 445), False, 'import pytest\n'), ((522, 551), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (535, 551), False, 'import pytest\n'), ((627, 656), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (640, 656), False, 'import pytest\n')]
import numpy from fdm.geometry import create_close_point_finder def create_weights_distributor(close_point_finder): def distribute(point, value): close_points = close_point_finder(point) distance_sum = sum(close_points.values()) return dict( {p: (1. - distance/distance_sum)*value for p, distance in close_points.items()}, ) return distribute def apply_statics_bc(variables, matrix, vector, bcs): extra_bcs = extract_extra_bcs(bcs) replace_bcs = extract_replace_bcs(bcs) extra_bcs_number = len(extra_bcs) _matrix = numpy.copy(matrix) _vector = numpy.copy(vector) assert (_rows_number(_matrix) == len(variables), 'Number of BCs must be equal "vars_number" - "real_nodes_number"') points = list(variables) matrix_bc_applicator = create_matrix_bc_applicator(_matrix, points, variables) vector_bc_applicator = create_vector_bc_applicator(_vector) for i, (scheme, value, replace) in enumerate(replace_bcs): matrix_bc_applicator(variables[replace], scheme) vector_bc_applicator(variables[replace], value) initial_idx = _rows_number(matrix) - extra_bcs_number for i, (scheme, value, _) in enumerate(extra_bcs): matrix_bc_applicator(initial_idx + i, scheme) vector_bc_applicator(initial_idx + i, value) return _matrix, _vector def apply_dynamics_bc(variables, matrix_a, matrix_b, bcs): extra_bcs = extract_extra_bcs(bcs) replace_bcs = extract_replace_bcs(bcs) extra_bcs_number = len(extra_bcs) _matrix_a = numpy.copy(matrix_a) _matrix_b = numpy.copy(matrix_b) assert _rows_number(_matrix_a) == len(variables), 'Number of BCs must be equal "vars_number" - "real_nodes_number"' points = list(variables) matrix_a_bc_applicator = create_matrix_bc_applicator(_matrix_a, points, variables) matrix_b_bc_applicator = create_matrix_bc_applicator(_matrix_b, points, variables) for i, (scheme_a, scheme_b, replace) in enumerate(replace_bcs): matrix_a_bc_applicator(variables[replace], scheme_a) matrix_b_bc_applicator(variables[replace], scheme_b) initial_idx = _rows_number(_matrix_a) - extra_bcs_number for i, (scheme_a, scheme_b, _) in enumerate(extra_bcs): matrix_a_bc_applicator(initial_idx + i, scheme_a) matrix_b_bc_applicator(initial_idx + i, scheme_b) return _matrix_a, _matrix_b def extract_extra_bcs(bcs): return [bc for bc in bcs if bc.replace is None] def extract_replace_bcs(bcs): return [bc for bc in bcs if bc.replace is not None] def create_matrix_bc_applicator(matrix, points, variables, tol=1e-6): def apply(row_idx, scheme): matrix[row_idx, :] = 0. if len(scheme): distributor = SchemeToNodesDistributor(points) scheme = distributor(scheme) scheme = scheme.drop(tol) for p, weight in scheme.items(): col_idx = variables[p] matrix[row_idx, col_idx] = weight return apply def create_vector_bc_applicator(vector): def apply(row_idx, value): vector[row_idx] = value return apply def _zero_vector_last_rows(vector, number): _vector = numpy.zeros(vector.shape) _vector[:-number] = vector[:-number] return _vector def _zero_matrix_last_rows(matrix, number): _matrix = numpy.zeros(matrix.shape) _matrix[:-number, :] = matrix[:-number, :] return _matrix def _rows_number(matrix): return matrix.shape[0] def _cols_number(matrix): return matrix.shape[1] class SchemeToNodesDistributor(object): def __init__(self, nodes): self._distributor = WeightsDistributor(nodes) def __call__(self, scheme): return scheme.distribute(self._distributor) class WeightsDistributor(object): def __init__(self, nodes): self._distributor = create_weights_distributor( create_close_point_finder(nodes) ) def __call__(self, point, weight): return self._distributor(point, weight)
[ "fdm.geometry.create_close_point_finder", "numpy.copy", "numpy.zeros" ]
[((589, 607), 'numpy.copy', 'numpy.copy', (['matrix'], {}), '(matrix)\n', (599, 607), False, 'import numpy\n'), ((622, 640), 'numpy.copy', 'numpy.copy', (['vector'], {}), '(vector)\n', (632, 640), False, 'import numpy\n'), ((1565, 1585), 'numpy.copy', 'numpy.copy', (['matrix_a'], {}), '(matrix_a)\n', (1575, 1585), False, 'import numpy\n'), ((1602, 1622), 'numpy.copy', 'numpy.copy', (['matrix_b'], {}), '(matrix_b)\n', (1612, 1622), False, 'import numpy\n'), ((3201, 3226), 'numpy.zeros', 'numpy.zeros', (['vector.shape'], {}), '(vector.shape)\n', (3212, 3226), False, 'import numpy\n'), ((3347, 3372), 'numpy.zeros', 'numpy.zeros', (['matrix.shape'], {}), '(matrix.shape)\n', (3358, 3372), False, 'import numpy\n'), ((3896, 3928), 'fdm.geometry.create_close_point_finder', 'create_close_point_finder', (['nodes'], {}), '(nodes)\n', (3921, 3928), False, 'from fdm.geometry import create_close_point_finder\n')]
import unittest from csound import output, orchestra from csound.orchestra import gen08 from data import constants as c from data import get class TestSounds(unittest.TestCase): def test_simple_soundwaves(self): # Get all data place = "Madrid" mad2t = get(c.T, location=place) madp = get(c.P, location=place) madw = get(c.W, location=place) madc = get(c.C, location=place) # write orchestra + score duration = 30 points = 16777216 oscillator = orchestra.oscillator1(points) score = ["f1 0 8192 10 1 ; Table containing a sine wave.", gen08(2, mad2t, number_of_points=points, comment="Weather parameter table 2"), gen08(3, madp, number_of_points=points, comment="Weather parameter table 3", ), gen08(4, madw, number_of_points=points, comment="Weather parameter table 4"), gen08(5, madc, number_of_points=points, comment="Weather parameter table 5"), "i1 0 %s 10000 2 ; " % duration, "i1 0 %s 5000 3 ; " % duration, "i1 0 %s 5000 4 ; " % duration, "i1 0 %s 5000 5 ; " % duration ] output.write_and_play(output.get_csd([oscillator], score))
[ "csound.orchestra.oscillator1", "data.get", "csound.orchestra.gen08", "csound.output.get_csd" ]
[((285, 309), 'data.get', 'get', (['c.T'], {'location': 'place'}), '(c.T, location=place)\n', (288, 309), False, 'from data import get\n'), ((325, 349), 'data.get', 'get', (['c.P'], {'location': 'place'}), '(c.P, location=place)\n', (328, 349), False, 'from data import get\n'), ((365, 389), 'data.get', 'get', (['c.W'], {'location': 'place'}), '(c.W, location=place)\n', (368, 389), False, 'from data import get\n'), ((405, 429), 'data.get', 'get', (['c.C'], {'location': 'place'}), '(c.C, location=place)\n', (408, 429), False, 'from data import get\n'), ((535, 564), 'csound.orchestra.oscillator1', 'orchestra.oscillator1', (['points'], {}), '(points)\n', (556, 564), False, 'from csound import output, orchestra\n'), ((651, 728), 'csound.orchestra.gen08', 'gen08', (['(2)', 'mad2t'], {'number_of_points': 'points', 'comment': '"""Weather parameter table 2"""'}), "(2, mad2t, number_of_points=points, comment='Weather parameter table 2')\n", (656, 728), False, 'from csound.orchestra import gen08\n'), ((747, 823), 'csound.orchestra.gen08', 'gen08', (['(3)', 'madp'], {'number_of_points': 'points', 'comment': '"""Weather parameter table 3"""'}), "(3, madp, number_of_points=points, comment='Weather parameter table 3')\n", (752, 823), False, 'from csound.orchestra import gen08\n'), ((844, 920), 'csound.orchestra.gen08', 'gen08', (['(4)', 'madw'], {'number_of_points': 'points', 'comment': '"""Weather parameter table 4"""'}), "(4, madw, number_of_points=points, comment='Weather parameter table 4')\n", (849, 920), False, 'from csound.orchestra import gen08\n'), ((939, 1015), 'csound.orchestra.gen08', 'gen08', (['(5)', 'madc'], {'number_of_points': 'points', 'comment': '"""Weather parameter table 5"""'}), "(5, madc, number_of_points=points, comment='Weather parameter table 5')\n", (944, 1015), False, 'from csound.orchestra import gen08\n'), ((1263, 1298), 'csound.output.get_csd', 'output.get_csd', (['[oscillator]', 'score'], {}), '([oscillator], score)\n', (1277, 1298), False, 'from csound import output, orchestra\n')]
#!/usr/bin/env python """ Nicholas' Example API code for interacting with Alienvault API. This is just Example code written by NMA.IO. There isn't really much you can do with the API just yet, so this will be a work in progress. Grab your API key here: https://www.alienvault.com/documentation/usm-anywhere/api/alienvault-api.htm?cshid=1182 That said, you could use this to write an alerter bot. * Note, this isn't an SDK, just some example code to get people started... """ # import json # uncomment if you want pretty printing. import base64 import requests URL = "alienvault.cloud/api/2.0" HOST = "" # put your subdomain here. def Auth(apiuser, apikey): """Our Authentication Code. :params apiuser :params apikey :returns oauth_token """ headers = {"Authorization": "Basic {}".format(base64.b64encode("%s:%s" % (apiuser, apikey)))} r = requests.post("https://{}.{}/oauth/token?grant_type=client_credentials".format(HOST, URL), headers=headers) if r.status_code is 200: return r.json()["access_token"] else: print("Authentication failed. Check username/Password") exit(1) def Alarms(token): """Pull Alarms from the API Console.""" headers = {"Authorization": "Bearer {}".format(token)} r = requests.get("https://{}.{}/alarms/?page=1&size=20&suppressed=false&status=open".format(HOST, URL), headers=headers) if r.status_code is not 200: print("Something went wrong. \n{}".format(r.content)) exit(1) else: return (r.json()) def Events(): """Nothing yet.""" pass if __name__ == "__main__": print("Simple API Integration with Alienvault USM Anywhere - 2018 NMA.IO") token = Auth("username", "password") jdata = Alarms(token) for item in jdata["_embedded"]["alarms"]: # print(json.dumps(item, indent=2)) # uncomment if you want a pretty version of the whole block. print(item["rule_method"] + ": " + " ".join(item["alarm_sources"]))
[ "base64.b64encode" ]
[((828, 873), 'base64.b64encode', 'base64.b64encode', (["('%s:%s' % (apiuser, apikey))"], {}), "('%s:%s' % (apiuser, apikey))\n", (844, 873), False, 'import base64\n')]
from timebox.timebox import TimeBox from timebox.utils.exceptions import InvalidPandasIndexError import pandas as pd import numpy as np import unittest import os import logging class TestTimeBoxPandas(unittest.TestCase): def test_save_pandas(self): file_name = 'save_pandas.npb' df = pd.read_csv('timebox/tests/data/ETH-USD_combined_utc.csv', index_col=0) tb = TimeBox.save_pandas(df, file_name) self.assertTrue(os.path.exists(file_name)) tb_read = TimeBox(file_name) df2 = tb_read.to_pandas() df_columns = list(df) df_columns.sort() df2_columns = list(df2) df2_columns.sort() self.assertListEqual(df_columns, df2_columns) os.remove(file_name) return def test_pandas_errors(self): df = pd.DataFrame.from_dict( { 'value_1': np.array([0, 1, 2], dtype=np.uint8) }, orient='columns' ) with self.assertRaises(InvalidPandasIndexError): TimeBox.save_pandas(df, 'not_going_to_save.npb') return def test_io_pandas(self): file_name = 'save_pandas.npb' df = pd.read_csv('timebox/tests/data/test1.csv').set_index('date') logging.debug('Starting test_io_pandas with df\n{}'.format(df)) tb = TimeBox.save_pandas(df, file_name) tb_read = TimeBox(file_name) df2 = tb_read.to_pandas() self.assertListEqual(list(df.columns.sort_values()), list(df2.columns.sort_values())) df = df.sort_index() # ensure index is same for i in range(0, len(df.index)): self.assertEqual(pd.to_datetime(df.index[i]), pd.to_datetime(df2.index[i])) # ensure each value is the same columns = df.columns for c in columns: logging.debug('Testing column: {}'.format(c)) logging.debug('Original frame:{}'.format(df[c])) logging.debug('TB frame:{}'.format(df2[c])) self.assertEqual(df[c].sum(), df2[c].sum()) os.remove(file_name) return if __name__ == '__main__': unittest.main()
[ "os.path.exists", "pandas.read_csv", "timebox.timebox.TimeBox.save_pandas", "timebox.timebox.TimeBox", "numpy.array", "unittest.main", "pandas.to_datetime", "os.remove" ]
[((2124, 2139), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2137, 2139), False, 'import unittest\n'), ((306, 377), 'pandas.read_csv', 'pd.read_csv', (['"""timebox/tests/data/ETH-USD_combined_utc.csv"""'], {'index_col': '(0)'}), "('timebox/tests/data/ETH-USD_combined_utc.csv', index_col=0)\n", (317, 377), True, 'import pandas as pd\n'), ((391, 425), 'timebox.timebox.TimeBox.save_pandas', 'TimeBox.save_pandas', (['df', 'file_name'], {}), '(df, file_name)\n', (410, 425), False, 'from timebox.timebox import TimeBox\n'), ((496, 514), 'timebox.timebox.TimeBox', 'TimeBox', (['file_name'], {}), '(file_name)\n', (503, 514), False, 'from timebox.timebox import TimeBox\n'), ((728, 748), 'os.remove', 'os.remove', (['file_name'], {}), '(file_name)\n', (737, 748), False, 'import os\n'), ((1329, 1363), 'timebox.timebox.TimeBox.save_pandas', 'TimeBox.save_pandas', (['df', 'file_name'], {}), '(df, file_name)\n', (1348, 1363), False, 'from timebox.timebox import TimeBox\n'), ((1382, 1400), 'timebox.timebox.TimeBox', 'TimeBox', (['file_name'], {}), '(file_name)\n', (1389, 1400), False, 'from timebox.timebox import TimeBox\n'), ((2056, 2076), 'os.remove', 'os.remove', (['file_name'], {}), '(file_name)\n', (2065, 2076), False, 'import os\n'), ((450, 475), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (464, 475), False, 'import os\n'), ((1036, 1084), 'timebox.timebox.TimeBox.save_pandas', 'TimeBox.save_pandas', (['df', '"""not_going_to_save.npb"""'], {}), "(df, 'not_going_to_save.npb')\n", (1055, 1084), False, 'from timebox.timebox import TimeBox\n'), ((877, 912), 'numpy.array', 'np.array', (['[0, 1, 2]'], {'dtype': 'np.uint8'}), '([0, 1, 2], dtype=np.uint8)\n', (885, 912), True, 'import numpy as np\n'), ((1182, 1225), 'pandas.read_csv', 'pd.read_csv', (['"""timebox/tests/data/test1.csv"""'], {}), "('timebox/tests/data/test1.csv')\n", (1193, 1225), True, 'import pandas as pd\n'), ((1661, 1688), 'pandas.to_datetime', 'pd.to_datetime', (['df.index[i]'], {}), '(df.index[i])\n', (1675, 1688), True, 'import pandas as pd\n'), ((1690, 1718), 'pandas.to_datetime', 'pd.to_datetime', (['df2.index[i]'], {}), '(df2.index[i])\n', (1704, 1718), True, 'import pandas as pd\n')]
from typing import Any from flaskapp.models import Activities, Logs, LogsToActivities, db from sqlalchemy.sql.expression import desc, func """ Este modulo contiene funciones para escribir y leer los logs desde el chatbot """ MAX_LOGS_PER_QUERY = 5 MAX_STR_SIZE_LOGS = 128 def write_log( chatid: int, intent: str, activity: set, ts: Any, input: str, response: str, observaciones: str, id: int = -1, withargs: bool = False ): if id == -1: try: idlog = db.session.query(func.max(Logs.id)).scalar() + 1 except: idlog = 1 db.session.add( Logs( id=idlog, chatid=chatid, intent=intent, input=input, ts=ts, response=response, obs=observaciones, withargs=withargs ) ) else: db.session.add( Logs( chatid=chatid, intent=intent, input=input, ts=ts, response=response, obs=observaciones, id=id, withargs=withargs ) ) db.session.commit() print("ACTITVITIES TO LOG: " + str(activity)) for act in activity: if act == intent: continue idactivity = None try: idactivity = Activities.query.filter_by(activity=act).first().id except: db.session.add( Activities( activity=act ) ) db.session.commit() idactivity = db.session.query(func.max(Activities.id)).scalar() db.session.add( LogsToActivities(idlog=idlog, idactivity=idactivity) ) db.session.commit() def get_logs(limit=100): query = Logs.query.order_by(desc(Logs.ts)).limit(limit) return query.all(), query def writer(x: Logs): rep = repr(x) if len(rep) > MAX_STR_SIZE_LOGS: return "{}...".format(rep[0:MAX_STR_SIZE_LOGS]) else: return rep
[ "flaskapp.models.Activities.query.filter_by", "flaskapp.models.LogsToActivities", "flaskapp.models.db.session.commit", "sqlalchemy.sql.expression.desc", "flaskapp.models.Activities", "flaskapp.models.Logs", "sqlalchemy.sql.expression.func.max" ]
[((1232, 1251), 'flaskapp.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1249, 1251), False, 'from flaskapp.models import Activities, Logs, LogsToActivities, db\n'), ((1843, 1862), 'flaskapp.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1860, 1862), False, 'from flaskapp.models import Activities, Logs, LogsToActivities, db\n'), ((637, 763), 'flaskapp.models.Logs', 'Logs', ([], {'id': 'idlog', 'chatid': 'chatid', 'intent': 'intent', 'input': 'input', 'ts': 'ts', 'response': 'response', 'obs': 'observaciones', 'withargs': 'withargs'}), '(id=idlog, chatid=chatid, intent=intent, input=input, ts=ts, response=\n response, obs=observaciones, withargs=withargs)\n', (641, 763), False, 'from flaskapp.models import Activities, Logs, LogsToActivities, db\n'), ((957, 1079), 'flaskapp.models.Logs', 'Logs', ([], {'chatid': 'chatid', 'intent': 'intent', 'input': 'input', 'ts': 'ts', 'response': 'response', 'obs': 'observaciones', 'id': 'id', 'withargs': 'withargs'}), '(chatid=chatid, intent=intent, input=input, ts=ts, response=response,\n obs=observaciones, id=id, withargs=withargs)\n', (961, 1079), False, 'from flaskapp.models import Activities, Logs, LogsToActivities, db\n'), ((1772, 1824), 'flaskapp.models.LogsToActivities', 'LogsToActivities', ([], {'idlog': 'idlog', 'idactivity': 'idactivity'}), '(idlog=idlog, idactivity=idactivity)\n', (1788, 1824), False, 'from flaskapp.models import Activities, Logs, LogsToActivities, db\n'), ((1639, 1658), 'flaskapp.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1656, 1658), False, 'from flaskapp.models import Activities, Logs, LogsToActivities, db\n'), ((1922, 1935), 'sqlalchemy.sql.expression.desc', 'desc', (['Logs.ts'], {}), '(Logs.ts)\n', (1926, 1935), False, 'from sqlalchemy.sql.expression import desc, func\n'), ((1550, 1574), 'flaskapp.models.Activities', 'Activities', ([], {'activity': 'act'}), '(activity=act)\n', (1560, 1574), False, 'from flaskapp.models import Activities, Logs, LogsToActivities, db\n'), ((1438, 1478), 'flaskapp.models.Activities.query.filter_by', 'Activities.query.filter_by', ([], {'activity': 'act'}), '(activity=act)\n', (1464, 1478), False, 'from flaskapp.models import Activities, Logs, LogsToActivities, db\n'), ((531, 548), 'sqlalchemy.sql.expression.func.max', 'func.max', (['Logs.id'], {}), '(Logs.id)\n', (539, 548), False, 'from sqlalchemy.sql.expression import desc, func\n'), ((1701, 1724), 'sqlalchemy.sql.expression.func.max', 'func.max', (['Activities.id'], {}), '(Activities.id)\n', (1709, 1724), False, 'from sqlalchemy.sql.expression import desc, func\n')]
""" Luhn Algorithm """ from typing import List def is_luhn(string: str) -> bool: """ Perform Luhn validation on input string Algorithm: * Double every other digit starting from 2nd last digit. * Subtract 9 if number is greater than 9. * Sum the numbers * >>> test_cases = [79927398710, 79927398711, 79927398712, 79927398713, ... 79927398714, 79927398715, 79927398716, 79927398717, 79927398718, ... 79927398719] >>> test_cases = list(map(str, test_cases)) >>> list(map(is_luhn, test_cases)) [False, False, False, True, False, False, False, False, False, False] """ check_digit: int _vector: List[str] = list(string) __vector, check_digit = _vector[:-1], int(_vector[-1]) vector: List[int] = [*map(int, __vector)] vector.reverse() for idx, i in enumerate(vector): if idx & 1 == 0: doubled: int = vector[idx] * 2 if doubled > 9: doubled -= 9 check_digit += doubled else: check_digit += i if (check_digit) % 10 == 0: return True return False if __name__ == "__main__": import doctest doctest.testmod() assert is_luhn("79927398713")
[ "doctest.testmod" ]
[((1181, 1198), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (1196, 1198), False, 'import doctest\n')]
""" ================== welly ================== """ from .project import Project from .well import Well from .header import Header from .curve import Curve from .synthetic import Synthetic from .location import Location from .crs import CRS from . import tools from . import quality def read_las(path, **kwargs): """ A package namespace method to be called as `welly.read_las`. Just wraps `Project.from_las()`. Creates a `Project` from a .LAS file. Args: path (str): path or URL where LAS is located. `*.las` to load all files in dir **kwargs (): See `Project.from_las()`` for addictional arguments Returns: welly.Project. The Project object. """ return Project.from_las(path, **kwargs) def read_df(df, **kwargs): """ A package namespace method to be called as `welly.read_df`. Just wraps `Well.from_df()`. Creates a `Well` from your pd.DataFrame. Args: df (pd.DataFrame): Column data and column names Optional **kwargs: units (dict): Optional. Units of measurement of the curves in `df`. req (list): Optional. An alias list, giving all required curves. uwi (str): Unique Well Identifier (UWI) name (str): Name Returns: Well. The `Well` object. """ return Well.from_df(df, **kwargs) __all__ = [ 'Project', 'Well', 'Header', 'Curve', 'Synthetic', 'Location', 'CRS', 'quality', 'tools', # Various classes in here 'read_las' ] from pkg_resources import get_distribution, DistributionNotFound try: VERSION = get_distribution(__name__).version except DistributionNotFound: try: from ._version import version as VERSION except ImportError: raise ImportError( "Failed to find (autogenerated) _version.py. " "This might be because you are installing from GitHub's tarballs, " "use the PyPI ones." ) __version__ = VERSION
[ "pkg_resources.get_distribution" ]
[((1709, 1735), 'pkg_resources.get_distribution', 'get_distribution', (['__name__'], {}), '(__name__)\n', (1725, 1735), False, 'from pkg_resources import get_distribution, DistributionNotFound\n')]
#!/usr/bin/env python3 """ Build the demos Usage: python setup.py build_ext -i """ import numpy as np from distutils.core import setup from Cython.Build import cythonize from setuptools.extension import Extension from os.path import join extending = Extension("extending", sources=['extending.pyx'], include_dirs=[np.get_include()]) distributions = Extension("extending_distributions", sources=['extending_distributions.pyx', join('..', '..', 'src', 'distributions', 'distributions.c')], include_dirs=[np.get_include()]) extensions = [extending, distributions] setup( ext_modules=cythonize(extensions) )
[ "Cython.Build.cythonize", "os.path.join", "numpy.get_include" ]
[((760, 781), 'Cython.Build.cythonize', 'cythonize', (['extensions'], {}), '(extensions)\n', (769, 781), False, 'from Cython.Build import cythonize\n'), ((361, 377), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (375, 377), True, 'import numpy as np\n'), ((534, 593), 'os.path.join', 'join', (['""".."""', '""".."""', '"""src"""', '"""distributions"""', '"""distributions.c"""'], {}), "('..', '..', 'src', 'distributions', 'distributions.c')\n", (538, 593), False, 'from os.path import join\n'), ((676, 692), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (690, 692), True, 'import numpy as np\n')]
#!/usr/bin/env python # encoding: utf-8 import signal import sys #import pandas as pd #import numpy as np def setGlobals(g): #print globals() globals().update(g) #print globals() def exit(): mtsExit(0) def quit(signum): sys.exit(signum) def quitNow(signum,frame): quit(signum) def initialize(params,p2=None): signal.signal(signal.SIGINT, quitNow) signal.signal(signal.SIGTERM, quitNow) if (p2==None): return mtsInitialize(params) else: return mtsInitialize(params,p2) def execute(): mtsExecute() def info(*tp): return mtsInfo(tp) def log(*tp): return mtsLog(tp) def warn(*tp): return mtsWarn(tp) def error(*tp): return mtsError(tp) def file(*tp): return mtsFile(tp) def genMtsStratgyClass(): class Strategy(StrategyBase): def __init__(self,name, strategyId): super(Strategy, self).__init__(name,strategyId) def newDirectOrder(self,symbol,price,volume,direction,offsetFlag): return self.newOrder({"type":1,"symbol":symbol,"price":price,"volume":volume,"direction":direction,"offsetFlag":offsetFlag}) def newNetOrder(self,symbol,price,volume): return self.newOrder({"type":2,"symbol":symbol,"price":price,"volume":volume}) def newBoxOrder(self,symbol,price,volume): return self.newOrder({"type":3,"symbol":symbol,"price":price,"volume":volume}) # def history(self,symbol,count): # pf = pd.DataFrame(self.getHistory(symbol, count), # columns=['date', 'open', 'high', 'low', 'close', 'volume', 'vwap']) # pf['date'] = pd.to_datetime(pf['date'],format='%Y%m%d %H%M%S') # return pf.set_index('date').astype(np.float64) # # def dailyHistory(self, symbol, count): # pf = pd.DataFrame(self.getDailyHistory(symbol, count), # columns=['date', 'open', 'high', 'low', 'close', 'volume', 'vwap']) # pf['date'] = pd.to_datetime(pf['date'], format='%Y%m%d %H%M%S') # return pf.set_index('date').astype(np.float64) return Strategy
[ "signal.signal", "sys.exit" ]
[((244, 260), 'sys.exit', 'sys.exit', (['signum'], {}), '(signum)\n', (252, 260), False, 'import sys\n'), ((343, 380), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'quitNow'], {}), '(signal.SIGINT, quitNow)\n', (356, 380), False, 'import signal\n'), ((385, 423), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'quitNow'], {}), '(signal.SIGTERM, quitNow)\n', (398, 423), False, 'import signal\n')]
from enum import Enum import json from SCA11H.commands.base.PostCommand import PostCommand class Command(Enum): # Restore BCG factory settings.(BCG parameters, direction and running mode) Restore = ('restore', 'restore') # Restore default BCG parameters SetDefaultParameters = ('set_default_pars', 'reset-parameters') class RunCommand(PostCommand): """ Run a BCG command """ def __init__(self, command: Command, **kwargs): super().__init__(endpoint='/bcg/cmd', payload=json.dumps({"cmd": command.value[0]}), **kwargs) @staticmethod def get_parser_name(): return 'run-bcg-command' @staticmethod def get_help(): return 'Run a BCG command' @staticmethod def add_arguments(parser): subparsers = parser.add_subparsers(title='bcg-command', dest='bcg_command', help='BCG Command to run') subparsers.add_parser(Command.Restore.value[1], help='Restore BCG factory settings.(BCG parameters, direction and running mode)') subparsers.add_parser(Command.SetDefaultParameters.value[1], help='Restore default BCG parameters') @staticmethod def parse_arguments(args) -> dict: if args.bcg_command is None: raise Exception('Missing required argument: bcg-command') elif args.bcg_command == Command.Restore.value[1]: command = Command.Restore elif args.bcg_command == Command.SetDefaultParameters.value[1]: command = Command.SetDefaultParameters else: raise Exception('Invalid argument: bcg-command') return {'command': command}
[ "json.dumps" ]
[((533, 570), 'json.dumps', 'json.dumps', (["{'cmd': command.value[0]}"], {}), "({'cmd': command.value[0]})\n", (543, 570), False, 'import json\n')]
#!/usr/bin/env python3 import pandas as pd ft_input='TEMP_DIR/tmp-predictions_reformatted_gexpnn20200320allCOHORTS.tsv' df = pd.read_csv(ft_input,sep='\t') # Get all tumors present in df (ACC, BRCA, ...) temp = df['Label'].unique() u_tumor = {} #k=tumor, v=1 for t in temp: t= t.split(":")[0] if t not in u_tumor: u_tumor[t]=1 # write out files for each tumor for t in u_tumor: print('starting ', t) subset = df[df['Label'].str.contains(t)] subset.to_csv('TEMP_DIR/intermediat-'+t+".tsv",sep='\t',index=False)
[ "pandas.read_csv" ]
[((128, 159), 'pandas.read_csv', 'pd.read_csv', (['ft_input'], {'sep': '"""\t"""'}), "(ft_input, sep='\\t')\n", (139, 159), True, 'import pandas as pd\n')]