code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
# # This file is part of pysnmp software. # # Copyright (c) 2005-2018, <NAME> <<EMAIL>> # License: http://snmplabs.com/pysnmp/license.html # try: from hashlib import sha1 except ImportError: import sha sha1 = sha.new from pyasn1.type import univ from pysnmp.proto.secmod.rfc3414.auth import base from pysnmp.proto.secmod.rfc3414 import localkey from pysnmp.proto import errind, error _twelveZeros = univ.OctetString((0,) * 12).asOctets() _fortyFourZeros = (0,) * 44 # 7.2.4 class HmacSha(base.AbstractAuthenticationService): serviceID = (1, 3, 6, 1, 6, 3, 10, 1, 1, 3) # usmHMACSHAAuthProtocol __ipad = [0x36] * 64 __opad = [0x5C] * 64 def hashPassphrase(self, authKey): return localkey.hashPassphraseSHA(authKey) def localizeKey(self, authKey, snmpEngineID): return localkey.localizeKeySHA(authKey, snmpEngineID) @property def digestLength(self): return 12 # 7.3.1 def authenticateOutgoingMsg(self, authKey, wholeMsg): # 7.3.1.1 # Here we expect calling secmod to indicate where the digest # should be in the substrate. Also, it pre-sets digest placeholder # so we hash wholeMsg out of the box. # Yes, that's ugly but that's rfc... l = wholeMsg.find(_twelveZeros) if l == -1: raise error.ProtocolError('Cant locate digest placeholder') wholeHead = wholeMsg[:l] wholeTail = wholeMsg[l + 12:] # 7.3.1.2a extendedAuthKey = authKey.asNumbers() + _fortyFourZeros # 7.3.1.2b -- no-op # 7.3.1.2c k1 = univ.OctetString( map(lambda x, y: x ^ y, extendedAuthKey, self.__ipad) ) # 7.3.1.2d -- no-op # 7.3.1.2e k2 = univ.OctetString( map(lambda x, y: x ^ y, extendedAuthKey, self.__opad) ) # 7.3.1.3 d1 = sha1(k1.asOctets() + wholeMsg).digest() # 7.3.1.4 d2 = sha1(k2.asOctets() + d1).digest() mac = d2[:12] # 7.3.1.5 & 6 return wholeHead + mac + wholeTail # 7.3.2 def authenticateIncomingMsg(self, authKey, authParameters, wholeMsg): # 7.3.2.1 & 2 if len(authParameters) != 12: raise error.StatusInformation( errorIndication=errind.authenticationError ) # 7.3.2.3 l = wholeMsg.find(authParameters.asOctets()) if l == -1: raise error.ProtocolError('Cant locate digest in wholeMsg') wholeHead = wholeMsg[:l] wholeTail = wholeMsg[l + 12:] authenticatedWholeMsg = wholeHead + _twelveZeros + wholeTail # 7.3.2.4a extendedAuthKey = authKey.asNumbers() + _fortyFourZeros # 7.3.2.4b --> no-op # 7.3.2.4c k1 = univ.OctetString( map(lambda x, y: x ^ y, extendedAuthKey, self.__ipad) ) # 7.3.2.4d --> no-op # 7.3.2.4e k2 = univ.OctetString( map(lambda x, y: x ^ y, extendedAuthKey, self.__opad) ) # 7.3.2.5a d1 = sha1(k1.asOctets() + authenticatedWholeMsg).digest() # 7.3.2.5b d2 = sha1(k2.asOctets() + d1).digest() # 7.3.2.5c mac = d2[:12] # 7.3.2.6 if mac != authParameters: raise error.StatusInformation( errorIndication=errind.authenticationFailure ) return authenticatedWholeMsg
[ "pyasn1.type.univ.OctetString", "pysnmp.proto.secmod.rfc3414.localkey.hashPassphraseSHA", "pysnmp.proto.error.StatusInformation", "pysnmp.proto.error.ProtocolError", "pysnmp.proto.secmod.rfc3414.localkey.localizeKeySHA" ]
[((413, 440), 'pyasn1.type.univ.OctetString', 'univ.OctetString', (['((0,) * 12)'], {}), '((0,) * 12)\n', (429, 440), False, 'from pyasn1.type import univ\n'), ((721, 756), 'pysnmp.proto.secmod.rfc3414.localkey.hashPassphraseSHA', 'localkey.hashPassphraseSHA', (['authKey'], {}), '(authKey)\n', (747, 756), False, 'from pysnmp.proto.secmod.rfc3414 import localkey\n'), ((823, 869), 'pysnmp.proto.secmod.rfc3414.localkey.localizeKeySHA', 'localkey.localizeKeySHA', (['authKey', 'snmpEngineID'], {}), '(authKey, snmpEngineID)\n', (846, 869), False, 'from pysnmp.proto.secmod.rfc3414 import localkey\n'), ((1333, 1386), 'pysnmp.proto.error.ProtocolError', 'error.ProtocolError', (['"""Cant locate digest placeholder"""'], {}), "('Cant locate digest placeholder')\n", (1352, 1386), False, 'from pysnmp.proto import errind, error\n'), ((2245, 2312), 'pysnmp.proto.error.StatusInformation', 'error.StatusInformation', ([], {'errorIndication': 'errind.authenticationError'}), '(errorIndication=errind.authenticationError)\n', (2268, 2312), False, 'from pysnmp.proto import errind, error\n'), ((2453, 2506), 'pysnmp.proto.error.ProtocolError', 'error.ProtocolError', (['"""Cant locate digest in wholeMsg"""'], {}), "('Cant locate digest in wholeMsg')\n", (2472, 2506), False, 'from pysnmp.proto import errind, error\n'), ((3311, 3380), 'pysnmp.proto.error.StatusInformation', 'error.StatusInformation', ([], {'errorIndication': 'errind.authenticationFailure'}), '(errorIndication=errind.authenticationFailure)\n', (3334, 3380), False, 'from pysnmp.proto import errind, error\n')]
import torch.nn as nn import torch.nn.functional as F class FocalLoss(nn.Module): def __init__(self, gamma=2): super().__init__() self.gamma = gamma # TODO refactor def forward(self, outputs, targets): if targets.size() != outputs.size(): raise ValueError( f"Targets and inputs must be same size. " f"Got ({targets.size()}) and ({outputs.size()})" ) max_val = (-outputs).clamp(min=0) log_ = ((-max_val).exp() + (-outputs - max_val).exp()).log() loss = outputs - outputs * targets + max_val + log_ invprobs = F.logsigmoid(-outputs * (targets * 2.0 - 1.0)) loss = (invprobs * self.gamma).exp() * loss return loss.sum(dim=1).mean()
[ "torch.nn.functional.logsigmoid" ]
[((637, 683), 'torch.nn.functional.logsigmoid', 'F.logsigmoid', (['(-outputs * (targets * 2.0 - 1.0))'], {}), '(-outputs * (targets * 2.0 - 1.0))\n', (649, 683), True, 'import torch.nn.functional as F\n')]
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the conf key manager. """ import array from oslo.config import cfg from cinder import context from cinder import exception from cinder.keymgr import conf_key_mgr from cinder.keymgr import key from cinder.tests.keymgr import test_key_mgr CONF = cfg.CONF CONF.import_opt('fixed_key', 'cinder.keymgr.conf_key_mgr', group='keymgr') class ConfKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): def __init__(self, *args, **kwargs): super(ConfKeyManagerTestCase, self).__init__(*args, **kwargs) self._hex_key = '1' * 64 def _create_key_manager(self): CONF.set_default('fixed_key', default=self._hex_key, group='keymgr') return conf_key_mgr.ConfKeyManager() def setUp(self): super(ConfKeyManagerTestCase, self).setUp() self.ctxt = context.RequestContext('fake', 'fake') self.key_id = '00000000-0000-0000-0000-000000000000' encoded = array.array('B', self._hex_key.decode('hex')).tolist() self.key = key.SymmetricKey('AES', encoded) def test___init__(self): self.assertEqual(self.key_id, self.key_mgr.key_id) def test_create_key(self): key_id_1 = self.key_mgr.create_key(self.ctxt) key_id_2 = self.key_mgr.create_key(self.ctxt) # ensure that the UUIDs are the same self.assertEqual(key_id_1, key_id_2) def test_create_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.create_key, None) def test_store_key(self): key_id = self.key_mgr.store_key(self.ctxt, self.key) actual_key = self.key_mgr.get_key(self.ctxt, key_id) self.assertEqual(self.key, actual_key) def test_store_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.store_key, None, self.key) def test_store_key_invalid(self): encoded = self.key.get_encoded() inverse_key = key.SymmetricKey('AES', [~b for b in encoded]) self.assertRaises(exception.KeyManagerError, self.key_mgr.store_key, self.ctxt, inverse_key) def test_copy_key(self): key_id = self.key_mgr.create_key(self.ctxt) key = self.key_mgr.get_key(self.ctxt, key_id) copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id) copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id) self.assertEqual(key_id, copied_key_id) self.assertEqual(key, copied_key) def test_copy_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.copy_key, None, None) def test_delete_key(self): key_id = self.key_mgr.create_key(self.ctxt) self.key_mgr.delete_key(self.ctxt, key_id) # cannot delete key -- might have lingering references self.assertEqual(self.key, self.key_mgr.get_key(self.ctxt, self.key_id)) def test_delete_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.delete_key, None, None) def test_delete_unknown_key(self): self.assertRaises(exception.KeyManagerError, self.key_mgr.delete_key, self.ctxt, None) def test_get_key(self): self.assertEqual(self.key, self.key_mgr.get_key(self.ctxt, self.key_id)) def test_get_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.get_key, None, None) def test_get_unknown_key(self): self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, None)
[ "cinder.keymgr.key.SymmetricKey", "cinder.context.RequestContext", "cinder.keymgr.conf_key_mgr.ConfKeyManager" ]
[((1365, 1394), 'cinder.keymgr.conf_key_mgr.ConfKeyManager', 'conf_key_mgr.ConfKeyManager', ([], {}), '()\n', (1392, 1394), False, 'from cinder.keymgr import conf_key_mgr\n'), ((1490, 1528), 'cinder.context.RequestContext', 'context.RequestContext', (['"""fake"""', '"""fake"""'], {}), "('fake', 'fake')\n", (1512, 1528), False, 'from cinder import context\n'), ((1683, 1715), 'cinder.keymgr.key.SymmetricKey', 'key.SymmetricKey', (['"""AES"""', 'encoded'], {}), "('AES', encoded)\n", (1699, 1715), False, 'from cinder.keymgr import key\n'), ((2644, 2692), 'cinder.keymgr.key.SymmetricKey', 'key.SymmetricKey', (['"""AES"""', '[(~b) for b in encoded]'], {}), "('AES', [(~b) for b in encoded])\n", (2660, 2692), False, 'from cinder.keymgr import key\n')]
#!/usr/bin/env python3 """This script is used to generate the tables for `charmap-reference.rst`. Uses the tabulate module from PyPI. """ import argparse import unicodedata from typing import Iterable, Iterator from tabulate import tabulate import tcod.tileset def get_charmaps() -> Iterator[str]: """Return an iterator of the current character maps from tcod.tilest.""" for name in dir(tcod.tileset): if name.startswith("CHARMAP_"): yield name[len("CHARMAP_") :].lower() def escape_rst(string: str) -> str: """Escape RST symbols and disable Sphinx smart quotes.""" return ( string.replace("\\", "\\\\") .replace("*", "\\*") .replace("|", "\\|") .replace("`", "\\`") .replace("'", "\\'") .replace('"', '\\"') ) def generate_table(charmap: Iterable[int]) -> str: """Generate and RST table for `charmap`.""" headers = ("Tile Index", "Unicode", "String", "Name") table = [] for i, ch in enumerate(charmap): hex_len = len(f"{ch:x}") if hex_len % 2: # Prevent an odd number of hex digits. hex_len += 1 try: name = unicodedata.name(chr(ch)) except ValueError: # Skip names rather than guessing, the official names are enough. name = "" string = escape_rst(f"{chr(ch)!r}") table.append((i, f"0x{ch:0{hex_len}X}", string, name)) return tabulate(table, headers, tablefmt="rst") def main() -> None: parser = argparse.ArgumentParser( description="Generate an RST table for a tcod character map.", ) parser.add_argument( "charmap", action="store", choices=list(get_charmaps()), type=str, help="which character map to generate a table from", ) parser.add_argument( "-o", "--out-file", action="store", type=argparse.FileType("w", encoding="utf-8"), default="-", help="where to write the table to (stdout by default)", ) args = parser.parse_args() charmap = getattr(tcod.tileset, f"CHARMAP_{args.charmap.upper()}") with args.out_file as f: f.write(generate_table(charmap)) if __name__ == "__main__": main()
[ "tabulate.tabulate", "argparse.ArgumentParser", "argparse.FileType" ]
[((1443, 1483), 'tabulate.tabulate', 'tabulate', (['table', 'headers'], {'tablefmt': '"""rst"""'}), "(table, headers, tablefmt='rst')\n", (1451, 1483), False, 'from tabulate import tabulate\n'), ((1519, 1610), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate an RST table for a tcod character map."""'}), "(description=\n 'Generate an RST table for a tcod character map.')\n", (1542, 1610), False, 'import argparse\n'), ((1910, 1950), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {'encoding': '"""utf-8"""'}), "('w', encoding='utf-8')\n", (1927, 1950), False, 'import argparse\n')]
#! /usr/bin/python # -*- coding: utf-8 -*- import numpy as np import matplotlib.pyplot as plt plt.figure(figsize=(6.6, 3), dpi=90) x = np.linspace(-2*np.pi, 2*np.pi, 1e3) plt.plot(x, np.sin(x), label="$\sin(x)$") plt.plot(x, np.cos(x), label="$\sin(x)$") plt.xlim((np.min(x), np.max(x))) plt.ylim((-1.1, 1.1)) plt.xlabel("$x$") plt.ylabel("$y$") plt.savefig('trig.png', bbox_inches='tight', dpi=300) plt.figure(figsize=(6.6, 3), dpi=90) data = np.random.normal(0, 1, 100) plt.hist(data, bins=10) plt.xlabel(r"Data") plt.ylabel("$\#$") plt.savefig('hist.pgf', bbox_inches='tight', dpi=200)
[ "matplotlib.pyplot.hist", "matplotlib.pyplot.ylim", "matplotlib.pyplot.figure", "numpy.sin", "numpy.min", "numpy.max", "numpy.linspace", "numpy.random.normal", "numpy.cos", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig" ]
[((96, 132), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.6, 3)', 'dpi': '(90)'}), '(figsize=(6.6, 3), dpi=90)\n', (106, 132), True, 'import matplotlib.pyplot as plt\n'), ((137, 179), 'numpy.linspace', 'np.linspace', (['(-2 * np.pi)', '(2 * np.pi)', '(1000.0)'], {}), '(-2 * np.pi, 2 * np.pi, 1000.0)\n', (148, 179), True, 'import numpy as np\n'), ((292, 313), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.1, 1.1)'], {}), '((-1.1, 1.1))\n', (300, 313), True, 'import matplotlib.pyplot as plt\n'), ((315, 332), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (325, 332), True, 'import matplotlib.pyplot as plt\n'), ((333, 350), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y$"""'], {}), "('$y$')\n", (343, 350), True, 'import matplotlib.pyplot as plt\n'), ((351, 404), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""trig.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "('trig.png', bbox_inches='tight', dpi=300)\n", (362, 404), True, 'import matplotlib.pyplot as plt\n'), ((406, 442), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.6, 3)', 'dpi': '(90)'}), '(figsize=(6.6, 3), dpi=90)\n', (416, 442), True, 'import matplotlib.pyplot as plt\n'), ((451, 478), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (467, 478), True, 'import numpy as np\n'), ((480, 503), 'matplotlib.pyplot.hist', 'plt.hist', (['data'], {'bins': '(10)'}), '(data, bins=10)\n', (488, 503), True, 'import matplotlib.pyplot as plt\n'), ((505, 523), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Data"""'], {}), "('Data')\n", (515, 523), True, 'import matplotlib.pyplot as plt\n'), ((525, 544), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\#$"""'], {}), "('$\\\\#$')\n", (535, 544), True, 'import matplotlib.pyplot as plt\n'), ((545, 598), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""hist.pgf"""'], {'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('hist.pgf', bbox_inches='tight', dpi=200)\n", (556, 598), True, 'import matplotlib.pyplot as plt\n'), ((186, 195), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (192, 195), True, 'import numpy as np\n'), ((228, 237), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (234, 237), True, 'import numpy as np\n'), ((269, 278), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (275, 278), True, 'import numpy as np\n'), ((280, 289), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (286, 289), True, 'import numpy as np\n')]
#!/usr/bin/env python import consul import urllib.parse import sys import re import os from pprint import pprint from argparse import ArgumentParser VERSION = "0.5.0" args = None consul_inst_cache = {} def _get_consul_for_url(consul_url) -> consul.Consul: if consul_url not in consul_inst_cache: parsed_url = urllib.parse.urlparse(consul_url) if ":" in parsed_url.netloc: # port specified? items = parsed_url.netloc.split(":") # extract and use host, port = (items[0], int(items[1])) else: host = parsed_url.netloc port = 8500 # create consul instance and put in cache consul_inst_cache[consul_url] = \ consul.Consul(host=host, port=port, scheme=parsed_url.scheme) return consul_inst_cache[consul_url] def _get_consul_for_service(consul_url, consul_svc) -> consul.Consul: pu = urllib.parse.urlparse(consul_url) consul_port = "" if ":" not in pu.netloc else ":" + pu.netloc.split(":")[1] tmp = "{}://{}{}".format(pu.scheme, consul_svc['Address'], consul_port) return _get_consul_for_url(tmp) def _get_all_service_names() -> list: con = _get_consul_for_url(args.consul_url) return [key for key, value in con.catalog.services()[1].items()] def _get_all_service_tags() -> list: con = _get_consul_for_url(args.consul_url) # gives us: { SVC_NAME: [SVC_TAG,...] } svcs = con.catalog.services()[1] tags = [] for svc, svc_tags in svcs.items(): tags += svc_tags return list(set(tags)) def _get_all_services() -> [dict]: """ Returns a list of consul service dicts :return: A list like [SVC1, ...] """ # now, get the consul service dict for each service name. # catalog.service() returns (INDX, [NODE1, ...]), where NODEx is a dict con = _get_consul_for_url(args.consul_url) chk_svc_names = _get_all_service_names() chk_svcs = [] for svc_name in chk_svc_names: chk_svcs += con.catalog.service(svc_name)[1] return chk_svcs def _unregister(svc) -> bool: """ Deregisters a service from consul. :param service: The service dict of the service to be deregistered. :return: True on success, False on failure """ con = _get_consul_for_service(args.consul_url, svc) res = con.agent.service.deregister(svc['ServiceID']) status = "OK" if res else "FAIL" print("DEREGISTER_{:<7} CONSUL {:<40} ID {}" .format(status, con.http.base_uri, svc['ServiceID'])) return res def _get_filtered_services() -> [dict]: def match(svc): for fname, fex in filter_dict.items(): if fname in svc: val = svc[fname] type_val = type(val) if type_val is str: if not fex.search(svc[fname]): return False elif type_val in (list, tuple): matches = 0 for _tmp in val: matches = 1 if fex.search(_tmp) else matches if matches == 0: return False else: return False return True filter_dict = {} for fltr in args.filter: field, expr = fltr.split("=", maxsplit=1) filter_dict[field] = re.compile(expr) svcs = _get_all_services() return list(filter(match, svcs)) def del_by_id() -> None: svcs = _get_all_services() filtered = filter(lambda x: args.service_id == x['ServiceID'], svcs) for svc in filtered: _unregister(svc) def del_by_name() -> None: svcs = _get_all_services() filtered = filter(lambda x: args.service_name == x['ServiceName'], svcs) for svc in filtered: _unregister(svc) def del_by_tag() -> None: svcs = _get_all_services() filtered = filter(lambda x: args.tag_name in x['ServiceTags'], svcs) for svc in filtered: _unregister(svc) def list_filtered() -> None: svcs = sorted(_get_filtered_services(), key=lambda x: x['ServiceName']) if args.verbose: pprint(svcs) else: svc_names = [x['ServiceName'] for x in svcs] if args.unique: svc_names = sorted(list(set(svc_names))) for svc in svc_names: print(svc) def del_filtered() -> None: svcs = sorted(_get_filtered_services(), key=lambda x: x['ServiceName']) for svc in svcs: _unregister(svc) def list_services() -> None: svcs = _get_all_service_names() if args.filter: fltr = re.compile(args.filter) svcs = filter(lambda x: fltr.search(x), svcs) for svc in sorted(svcs): print(svc) def list_tags() -> None: tags = _get_all_service_tags() if args.filter: fltr = re.compile(args.filter) tags = filter(lambda x: fltr.search(x), tags) for tag in sorted(tags): print(tag) def service_info() -> None: svcs = _get_all_services() filtered = filter(lambda x: args.service_name == x['ServiceName'], svcs) pprint(list(filtered)) def version() -> None: print(VERSION) def run(argv): global args parser = ArgumentParser() parser.add_argument("-c", "--consul-url", default=os.environ.get("CONSUL_URL", "http://localhost:8500"), help="Specify consul URL to use. " "Default: http://localhost:8500 " "or $CONSUL_URL if set") subs = parser.add_subparsers(dest='command') subs.required = True # no parameters, we don't need to assign them to variables sub = subs.add_parser('list-services', help="List all service names. Use 'list-filtered' to " "get full service information.") sub.add_argument('filter', nargs="?", default=None, help="Optional RegEx which is used to filter the result " "list using regex.search (not match).") sub = subs.add_parser('list-tags', help="List all tags used in all services.") sub.add_argument('filter', nargs="?", default=None, help="Optional RegEx which is used to filter the result " "list using regex.search (not match).") # now we do. sub = subs.add_parser('service-info', help="Print full information about a single service.") sub.add_argument("service_name", help="Print detailed information about a service") sub = subs.add_parser('del-by-name', help="Delete all services with a certain name. The " "name mast match exactly.") sub.add_argument("service_name", help="Name of the service to delete") sub = subs.add_parser('del-by-id', help="Delete all services with a given ID. The ID " "must match exactly.") sub.add_argument("service_id", help="ID of the service to delete") sub = subs.add_parser('del-by-tag', help="Delete all services which have a given tag. " "The tag must match exactly.") sub.add_argument("tag_name", help="Delete services with this tag") sub = subs.add_parser('list-filtered', help="List services based on filter criteria. " "Currently you cannot filter for nested dicts, " "but you can filter for top-level arrays. In " "that case the regex is matched against every " "item of the array, and if one matches the " "filter matches.") sub.add_argument("-f", "--filter", default=[], help="Add service filter (-f FIELD=VALUE). " "VALUE can be a regex.", action="append") sub.add_argument("-u", "--unique", help="Output all services only once when NOT using -v. " "Ignored with -v set.", default=False, action="store_true") sub.add_argument("-v", "--verbose", help="Output all service details, not only service name", default=False, action="store_true") sub = subs.add_parser('del-filtered', help="Same as list-filtered, but deletes the " "service. The same restrictions apply, and " "del-filtered requires a filter list to " "prevent accidently deleting all services.") sub.add_argument("-f", "--filter", required=True, default=[], help="Add service filter (-f FIELD=VALUE). " "VALUE can be a regex.", action="append") subs.add_parser('version', help="Print version and exit") subs.add_parser('v', help="Alias for version") args = parser.parse_args(argv) { "del-by-id": del_by_id, "del-by-name": del_by_name, "del-by-tag": del_by_tag, "del-filtered": del_filtered, "list-filtered": list_filtered, "list-services": list_services, "list-tags": list_tags, "service-info": service_info, "version": version, "v": version, }[args.command]() def console_entrypoint(): run(sys.argv[1:]) if __name__ == "__main__": run(sys.argv[1:])
[ "argparse.ArgumentParser", "consul.Consul", "os.environ.get", "pprint.pprint", "re.compile" ]
[((5153, 5169), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (5167, 5169), False, 'from argparse import ArgumentParser\n'), ((716, 777), 'consul.Consul', 'consul.Consul', ([], {'host': 'host', 'port': 'port', 'scheme': 'parsed_url.scheme'}), '(host=host, port=port, scheme=parsed_url.scheme)\n', (729, 777), False, 'import consul\n'), ((3318, 3334), 're.compile', 're.compile', (['expr'], {}), '(expr)\n', (3328, 3334), False, 'import re\n'), ((4089, 4101), 'pprint.pprint', 'pprint', (['svcs'], {}), '(svcs)\n', (4095, 4101), False, 'from pprint import pprint\n'), ((4549, 4572), 're.compile', 're.compile', (['args.filter'], {}), '(args.filter)\n', (4559, 4572), False, 'import re\n'), ((4772, 4795), 're.compile', 're.compile', (['args.filter'], {}), '(args.filter)\n', (4782, 4795), False, 'import re\n'), ((5248, 5301), 'os.environ.get', 'os.environ.get', (['"""CONSUL_URL"""', '"""http://localhost:8500"""'], {}), "('CONSUL_URL', 'http://localhost:8500')\n", (5262, 5301), False, 'import os\n')]
import logging import click from vogue.load.sample import load_one, load_all, load_recent, load_one_dry, load_all_dry from datetime import date, timedelta from genologics.entities import Sample LOG = logging.getLogger(__name__) @click.command("sample", short_help="load sample/samples into db.") @click.option("-s", "--sample-lims-id", help="Input sample lims id") @click.option( "-a", "--all_samples", is_flag=True, help="Loads all lims samples if no other options are selected", ) @click.option( "-f", "--load-from", help="Use together with --all_samples. Load from this sample lims id. Use if load all broke. Start where it ended", ) @click.option( "-d", "--days", type=int, help="Update only samples updated in the latest number of days" ) @click.option("--dry", is_flag=True, help="Load from sample or not. (dry-run)") @click.pass_context def sample(ctx: click.Context, sample_lims_id, all_samples, load_from, days, dry): """Read and load lims data for one sample, all samples or the most recently updated samples.""" adapter = ctx.obj["adapter"] lims = ctx.obj["lims"] if not lims: LOG.warning("Lims connection failed.") raise click.Abort() lims = lims if days: some_days_ago = date.today() - timedelta(days=days) the_date = some_days_ago.strftime("%Y-%m-%dT00:00:00Z") load_recent(adapter, lims, the_date) elif all_samples: if dry: load_all_dry() else: load_all(adapter, lims=lims, start_sample=load_from) elif sample_lims_id: lims_sample = Sample(lims, id=sample_lims_id) if not lims_sample: LOG.critical("The sample does not exist in the database in the LIMS database.") raise SyntaxError() if dry: load_one_dry(lims_sample) else: load_one(adapter, lims_sample, lims=lims)
[ "vogue.load.sample.load_all", "genologics.entities.Sample", "click.option", "datetime.date.today", "click.command", "vogue.load.sample.load_all_dry", "datetime.timedelta", "vogue.load.sample.load_recent", "vogue.load.sample.load_one", "vogue.load.sample.load_one_dry", "logging.getLogger", "click.Abort" ]
[((202, 229), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (219, 229), False, 'import logging\n'), ((233, 299), 'click.command', 'click.command', (['"""sample"""'], {'short_help': '"""load sample/samples into db."""'}), "('sample', short_help='load sample/samples into db.')\n", (246, 299), False, 'import click\n'), ((301, 368), 'click.option', 'click.option', (['"""-s"""', '"""--sample-lims-id"""'], {'help': '"""Input sample lims id"""'}), "('-s', '--sample-lims-id', help='Input sample lims id')\n", (313, 368), False, 'import click\n'), ((370, 488), 'click.option', 'click.option', (['"""-a"""', '"""--all_samples"""'], {'is_flag': '(True)', 'help': '"""Loads all lims samples if no other options are selected"""'}), "('-a', '--all_samples', is_flag=True, help=\n 'Loads all lims samples if no other options are selected')\n", (382, 488), False, 'import click\n'), ((504, 663), 'click.option', 'click.option', (['"""-f"""', '"""--load-from"""'], {'help': '"""Use together with --all_samples. Load from this sample lims id. Use if load all broke. Start where it ended"""'}), "('-f', '--load-from', help=\n 'Use together with --all_samples. Load from this sample lims id. Use if load all broke. Start where it ended'\n )\n", (516, 663), False, 'import click\n'), ((670, 778), 'click.option', 'click.option', (['"""-d"""', '"""--days"""'], {'type': 'int', 'help': '"""Update only samples updated in the latest number of days"""'}), "('-d', '--days', type=int, help=\n 'Update only samples updated in the latest number of days')\n", (682, 778), False, 'import click\n'), ((781, 859), 'click.option', 'click.option', (['"""--dry"""'], {'is_flag': '(True)', 'help': '"""Load from sample or not. (dry-run)"""'}), "('--dry', is_flag=True, help='Load from sample or not. (dry-run)')\n", (793, 859), False, 'import click\n'), ((1202, 1215), 'click.Abort', 'click.Abort', ([], {}), '()\n', (1213, 1215), False, 'import click\n'), ((1379, 1415), 'vogue.load.sample.load_recent', 'load_recent', (['adapter', 'lims', 'the_date'], {}), '(adapter, lims, the_date)\n', (1390, 1415), False, 'from vogue.load.sample import load_one, load_all, load_recent, load_one_dry, load_all_dry\n'), ((1271, 1283), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1281, 1283), False, 'from datetime import date, timedelta\n'), ((1286, 1306), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (1295, 1306), False, 'from datetime import date, timedelta\n'), ((1466, 1480), 'vogue.load.sample.load_all_dry', 'load_all_dry', ([], {}), '()\n', (1478, 1480), False, 'from vogue.load.sample import load_one, load_all, load_recent, load_one_dry, load_all_dry\n'), ((1507, 1559), 'vogue.load.sample.load_all', 'load_all', (['adapter'], {'lims': 'lims', 'start_sample': 'load_from'}), '(adapter, lims=lims, start_sample=load_from)\n', (1515, 1559), False, 'from vogue.load.sample import load_one, load_all, load_recent, load_one_dry, load_all_dry\n'), ((1607, 1638), 'genologics.entities.Sample', 'Sample', (['lims'], {'id': 'sample_lims_id'}), '(lims, id=sample_lims_id)\n', (1613, 1638), False, 'from genologics.entities import Sample\n'), ((1819, 1844), 'vogue.load.sample.load_one_dry', 'load_one_dry', (['lims_sample'], {}), '(lims_sample)\n', (1831, 1844), False, 'from vogue.load.sample import load_one, load_all, load_recent, load_one_dry, load_all_dry\n'), ((1871, 1912), 'vogue.load.sample.load_one', 'load_one', (['adapter', 'lims_sample'], {'lims': 'lims'}), '(adapter, lims_sample, lims=lims)\n', (1879, 1912), False, 'from vogue.load.sample import load_one, load_all, load_recent, load_one_dry, load_all_dry\n')]
# -*- coding: utf-8 -*- # Copyright 2015-2019 grafana-dashboard-builder contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals import base64 import json import logging try: from cookielib import CookieJar except ImportError: from http.cookiejar import CookieJar try: from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPCookieProcessor, HTTPDefaultErrorHandler, \ Request, BaseHandler except ImportError: from urllib.request import build_opener, HTTPHandler, HTTPSHandler, HTTPCookieProcessor, HTTPDefaultErrorHandler, \ Request, BaseHandler try: from urlparse import urlparse except ImportError: from urllib.parse import urlparse import requests from requests_kerberos import HTTPKerberosAuth __author__ = '<NAME> <<EMAIL>>' logger = logging.getLogger(__name__) class BaseConnection(object): _headers = { 'Content-type': 'application/json', 'Accept': 'application/json' } def __init__(self, host, auth_header, debug=0): self._host = host self._headers['Authorization'] = auth_header self._opener = build_opener(HTTPHandler(debuglevel=debug), HTTPSHandler(debuglevel=debug), HTTPCookieProcessor(CookieJar()), LoggingHandler(), HTTPDefaultErrorHandler()) def make_request(self, uri, body=None): request = Request('{0}{1}'.format(self._host, uri), json.dumps(body).encode('utf-8') if body else None, headers=self._headers) response_body = self._opener.open(request).read() return {} if (response_body is None or response_body == '') else json.loads(response_body) class BasicAuthConnection(BaseConnection): def __init__(self, username, password, host, debug=0): logger.debug('Creating new connection with username=%s host=%s', username, host) base64string = base64.encodestring(('%s:%s' % (username, password)).encode('utf-8')).replace(b'\n', b'') super(BasicAuthConnection, self).__init__(host, b'Basic ' + base64string, debug) class BearerAuthConnection(BaseConnection): def __init__(self, token, host, debug=0): logger.debug('Creating new connection with token=%s host=%s', token[:5], host) super(BearerAuthConnection, self).__init__(host, 'Bearer %s' % token.strip(), debug) class LoggingHandler(BaseHandler): def __init__(self): pass # noinspection PyMethodMayBeStatic def http_request(self, request): path = urlparse(request.get_full_url()).path logger.debug('Sending request: method=%s uri=%s', request.get_method(), path) return request # noinspection PyMethodMayBeStatic,PyUnusedLocal def http_response(self, request, response): logger.debug('Response received: status=%s msg=%s', response.getcode(), response.msg) return response https_request = http_request https_response = http_response class KerberosConnection(object): def __init__(self, host): logger.debug('Creating new kerberos connection with host=%s', host) self._host = host def make_request(self, uri, body=None): response = requests.post('{0}{1}'.format(self._host, uri), json=body, auth=HTTPKerberosAuth(), verify=False) return response.json()
[ "urllib.request.HTTPHandler", "urllib.request.HTTPSHandler", "json.loads", "http.cookiejar.CookieJar", "json.dumps", "requests_kerberos.HTTPKerberosAuth", "urllib.request.HTTPDefaultErrorHandler", "logging.getLogger" ]
[((1332, 1359), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1349, 1359), False, 'import logging\n'), ((1665, 1694), 'urllib.request.HTTPHandler', 'HTTPHandler', ([], {'debuglevel': 'debug'}), '(debuglevel=debug)\n', (1676, 1694), False, 'from urllib.request import build_opener, HTTPHandler, HTTPSHandler, HTTPCookieProcessor, HTTPDefaultErrorHandler, Request, BaseHandler\n'), ((1732, 1762), 'urllib.request.HTTPSHandler', 'HTTPSHandler', ([], {'debuglevel': 'debug'}), '(debuglevel=debug)\n', (1744, 1762), False, 'from urllib.request import build_opener, HTTPHandler, HTTPSHandler, HTTPCookieProcessor, HTTPDefaultErrorHandler, Request, BaseHandler\n'), ((1924, 1949), 'urllib.request.HTTPDefaultErrorHandler', 'HTTPDefaultErrorHandler', ([], {}), '()\n', (1947, 1949), False, 'from urllib.request import build_opener, HTTPHandler, HTTPSHandler, HTTPCookieProcessor, HTTPDefaultErrorHandler, Request, BaseHandler\n'), ((2314, 2339), 'json.loads', 'json.loads', (['response_body'], {}), '(response_body)\n', (2324, 2339), False, 'import json\n'), ((1820, 1831), 'http.cookiejar.CookieJar', 'CookieJar', ([], {}), '()\n', (1829, 1831), False, 'from http.cookiejar import CookieJar\n'), ((3908, 3926), 'requests_kerberos.HTTPKerberosAuth', 'HTTPKerberosAuth', ([], {}), '()\n', (3924, 3926), False, 'from requests_kerberos import HTTPKerberosAuth\n'), ((2082, 2098), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (2092, 2098), False, 'import json\n')]
from django.urls import path from core.apps import CoreConfig from core import views app_name = CoreConfig.name urlpatterns = [ path('get', views.SearchViewSet.as_view({'post': 'search'}), name='search'), path('number_of_links', views.SearchViewSet.as_view({'post': 'number_of_links'}), name='number_of_links'), path('put', views.SearchViewSet.as_view({'get': 'put'}), name='put'), ]
[ "core.views.SearchViewSet.as_view" ]
[((147, 194), 'core.views.SearchViewSet.as_view', 'views.SearchViewSet.as_view', (["{'post': 'search'}"], {}), "({'post': 'search'})\n", (174, 194), False, 'from core import views\n'), ((249, 305), 'core.views.SearchViewSet.as_view', 'views.SearchViewSet.as_view', (["{'post': 'number_of_links'}"], {}), "({'post': 'number_of_links'})\n", (276, 305), False, 'from core import views\n'), ((357, 400), 'core.views.SearchViewSet.as_view', 'views.SearchViewSet.as_view', (["{'get': 'put'}"], {}), "({'get': 'put'})\n", (384, 400), False, 'from core import views\n')]
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'design.ui' # # Created by: PyQt5 UI code generator 5.15.4 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(549, 439) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(160, 10, 201, 41)) font = QtGui.QFont() font.setFamily("Sans") font.setPointSize(24) font.setBold(True) font.setWeight(75) self.label.setFont(font) self.label.setObjectName("label") self.tabWidget = QtWidgets.QTabWidget(self.centralwidget) self.tabWidget.setGeometry(QtCore.QRect(10, 50, 531, 371)) self.tabWidget.setObjectName("tabWidget") self.tb_gen_key = QtWidgets.QWidget() self.tb_gen_key.setObjectName("tb_gen_key") self.txtUserId = QtWidgets.QLineEdit(self.tb_gen_key) self.txtUserId.setGeometry(QtCore.QRect(20, 50, 421, 25)) self.txtUserId.setObjectName("txtUserId") self.btnGenerateRSA = QtWidgets.QPushButton(self.tb_gen_key) self.btnGenerateRSA.setGeometry(QtCore.QRect(180, 100, 111, 25)) self.btnGenerateRSA.setObjectName("btnGenerateRSA") self.label_6 = QtWidgets.QLabel(self.tb_gen_key) self.label_6.setGeometry(QtCore.QRect(20, 20, 201, 21)) font = QtGui.QFont() font.setFamily("Abyssinica SIL") font.setPointSize(12) font.setBold(True) font.setWeight(75) self.label_6.setFont(font) self.label_6.setTextFormat(QtCore.Qt.AutoText) self.label_6.setObjectName("label_6") self.tabWidget.addTab(self.tb_gen_key, "") self.tab_sign = QtWidgets.QWidget() self.tab_sign.setObjectName("tab_sign") self.label_3 = QtWidgets.QLabel(self.tab_sign) self.label_3.setGeometry(QtCore.QRect(30, 120, 191, 31)) font = QtGui.QFont() font.setFamily("Abyssinica SIL") font.setPointSize(12) font.setBold(True) font.setWeight(75) self.label_3.setFont(font) self.label_3.setTextFormat(QtCore.Qt.AutoText) self.label_3.setObjectName("label_3") self.btnSign = QtWidgets.QPushButton(self.tab_sign) self.btnSign.setGeometry(QtCore.QRect(210, 220, 83, 25)) self.btnSign.setObjectName("btnSign") self.txtSign = QtWidgets.QLineEdit(self.tab_sign) self.txtSign.setEnabled(False) self.txtSign.setGeometry(QtCore.QRect(30, 150, 421, 25)) self.txtSign.setObjectName("txtSign") self.btnBrowseFile = QtWidgets.QPushButton(self.tab_sign) self.btnBrowseFile.setGeometry(QtCore.QRect(450, 150, 21, 25)) self.btnBrowseFile.setObjectName("btnBrowseFile") self.txtNewSigner = QtWidgets.QLineEdit(self.tab_sign) self.txtNewSigner.setEnabled(False) self.txtNewSigner.setGeometry(QtCore.QRect(30, 50, 421, 25)) self.txtNewSigner.setObjectName("txtNewSigner") self.btnBrowseNewSigner = QtWidgets.QPushButton(self.tab_sign) self.btnBrowseNewSigner.setGeometry(QtCore.QRect(450, 50, 21, 25)) self.btnBrowseNewSigner.setObjectName("btnBrowseNewSigner") self.label_4 = QtWidgets.QLabel(self.tab_sign) self.label_4.setGeometry(QtCore.QRect(30, 20, 191, 31)) font = QtGui.QFont() font.setFamily("Abyssinica SIL") font.setPointSize(12) font.setBold(True) font.setWeight(75) self.label_4.setFont(font) self.label_4.setTextFormat(QtCore.Qt.AutoText) self.label_4.setObjectName("label_4") self.tabWidget.addTab(self.tab_sign, "") self.tab_verify = QtWidgets.QWidget() self.tab_verify.setObjectName("tab_verify") self.txtSigned = QtWidgets.QLineEdit(self.tab_verify) self.txtSigned.setEnabled(False) self.txtSigned.setGeometry(QtCore.QRect(30, 140, 421, 25)) self.txtSigned.setObjectName("txtSigned") self.btnVerify = QtWidgets.QPushButton(self.tab_verify) self.btnVerify.setGeometry(QtCore.QRect(200, 220, 83, 25)) self.btnVerify.setObjectName("btnVerify") self.btnBrowseSigned = QtWidgets.QPushButton(self.tab_verify) self.btnBrowseSigned.setGeometry(QtCore.QRect(450, 140, 21, 25)) self.btnBrowseSigned.setObjectName("btnBrowseSigned") self.label_2 = QtWidgets.QLabel(self.tab_verify) self.label_2.setGeometry(QtCore.QRect(30, 110, 201, 21)) font = QtGui.QFont() font.setFamily("Abyssinica SIL") font.setPointSize(12) font.setBold(True) font.setWeight(75) self.label_2.setFont(font) self.label_2.setTextFormat(QtCore.Qt.AutoText) self.label_2.setObjectName("label_2") self.label_5 = QtWidgets.QLabel(self.tab_verify) self.label_5.setGeometry(QtCore.QRect(30, 20, 201, 21)) font = QtGui.QFont() font.setFamily("Abyssinica SIL") font.setPointSize(12) font.setBold(True) font.setWeight(75) self.label_5.setFont(font) self.label_5.setTextFormat(QtCore.Qt.AutoText) self.label_5.setObjectName("label_5") self.txtActualSigner = QtWidgets.QLineEdit(self.tab_verify) self.txtActualSigner.setEnabled(False) self.txtActualSigner.setGeometry(QtCore.QRect(30, 50, 421, 25)) self.txtActualSigner.setObjectName("txtActualSigner") self.btnBrowseActualSigner = QtWidgets.QPushButton(self.tab_verify) self.btnBrowseActualSigner.setGeometry(QtCore.QRect(450, 50, 21, 25)) self.btnBrowseActualSigner.setObjectName("btnBrowseActualSigner") self.tabWidget.addTab(self.tab_verify, "") MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) self.tabWidget.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) self.label.setText(_translate("MainWindow", "RSA signer")) self.btnGenerateRSA.setText(_translate("MainWindow", "Generate RSA")) self.label_6.setText(_translate("MainWindow", "Enter your user id:")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tb_gen_key), _translate("MainWindow", "Generate RSA Key")) self.label_3.setText(_translate("MainWindow", "Select the file to sign:")) self.btnSign.setText(_translate("MainWindow", "Sign")) self.txtSign.setPlaceholderText(_translate("MainWindow", "Select file")) self.btnBrowseFile.setText(_translate("MainWindow", "...")) self.txtNewSigner.setText(_translate("MainWindow", "Select the signer key:")) self.txtNewSigner.setPlaceholderText(_translate("MainWindow", "Select file")) self.btnBrowseNewSigner.setText(_translate("MainWindow", "...")) self.label_4.setText(_translate("MainWindow", "Select signer key:")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_sign), _translate("MainWindow", "Sign")) self.txtSigned.setPlaceholderText(_translate("MainWindow", "Select file")) self.btnVerify.setText(_translate("MainWindow", "Verify")) self.btnBrowseSigned.setText(_translate("MainWindow", "...")) self.label_2.setText(_translate("MainWindow", "Select the signed file:")) self.label_5.setText(_translate("MainWindow", "Select signer key:")) self.txtActualSigner.setPlaceholderText(_translate("MainWindow", "Select file")) self.btnBrowseActualSigner.setText(_translate("MainWindow", "...")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_verify), _translate("MainWindow", "Verify"))
[ "PyQt5.QtWidgets.QLabel", "PyQt5.QtWidgets.QWidget", "PyQt5.QtCore.QRect", "PyQt5.QtWidgets.QStatusBar", "PyQt5.QtWidgets.QLineEdit", "PyQt5.QtWidgets.QPushButton", "PyQt5.QtGui.QFont", "PyQt5.QtCore.QMetaObject.connectSlotsByName", "PyQt5.QtWidgets.QTabWidget" ]
[((510, 539), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (527, 539), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((619, 655), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (635, 655), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((734, 747), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (745, 747), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((963, 1003), 'PyQt5.QtWidgets.QTabWidget', 'QtWidgets.QTabWidget', (['self.centralwidget'], {}), '(self.centralwidget)\n', (983, 1003), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1147, 1166), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (1164, 1166), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1244, 1280), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.tb_gen_key'], {}), '(self.tb_gen_key)\n', (1263, 1280), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1427, 1465), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tb_gen_key'], {}), '(self.tb_gen_key)\n', (1448, 1465), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1622, 1655), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tb_gen_key'], {}), '(self.tb_gen_key)\n', (1638, 1655), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1735, 1748), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1746, 1748), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2085, 2104), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (2102, 2104), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2176, 2207), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab_sign'], {}), '(self.tab_sign)\n', (2192, 2207), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2288, 2301), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2299, 2301), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2586, 2622), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_sign'], {}), '(self.tab_sign)\n', (2607, 2622), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2757, 2791), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.tab_sign'], {}), '(self.tab_sign)\n', (2776, 2791), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2971, 3007), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_sign'], {}), '(self.tab_sign)\n', (2992, 3007), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3165, 3199), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.tab_sign'], {}), '(self.tab_sign)\n', (3184, 3199), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3403, 3439), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_sign'], {}), '(self.tab_sign)\n', (3424, 3439), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3606, 3637), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab_sign'], {}), '(self.tab_sign)\n', (3622, 3637), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3717, 3730), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (3728, 3730), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4067, 4086), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (4084, 4086), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4164, 4200), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.tab_verify'], {}), '(self.tab_verify)\n', (4183, 4200), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4384, 4422), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_verify'], {}), '(self.tab_verify)\n', (4405, 4422), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4571, 4609), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_verify'], {}), '(self.tab_verify)\n', (4592, 4609), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4768, 4801), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab_verify'], {}), '(self.tab_verify)\n', (4784, 4801), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4882, 4895), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (4893, 4895), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5180, 5213), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab_verify'], {}), '(self.tab_verify)\n', (5196, 5213), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5293, 5306), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (5304, 5306), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5599, 5635), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.tab_verify'], {}), '(self.tab_verify)\n', (5618, 5635), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5854, 5892), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_verify'], {}), '(self.tab_verify)\n', (5875, 5892), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6177, 6209), 'PyQt5.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['MainWindow'], {}), '(MainWindow)\n', (6197, 6209), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6398, 6447), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (6435, 6447), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((687, 717), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(160)', '(10)', '(201)', '(41)'], {}), '(160, 10, 201, 41)\n', (699, 717), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1039, 1069), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(50)', '(531)', '(371)'], {}), '(10, 50, 531, 371)\n', (1051, 1069), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1316, 1345), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(50)', '(421)', '(25)'], {}), '(20, 50, 421, 25)\n', (1328, 1345), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1506, 1537), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(180)', '(100)', '(111)', '(25)'], {}), '(180, 100, 111, 25)\n', (1518, 1537), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1689, 1718), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(20)', '(201)', '(21)'], {}), '(20, 20, 201, 21)\n', (1701, 1718), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2241, 2271), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(120)', '(191)', '(31)'], {}), '(30, 120, 191, 31)\n', (2253, 2271), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2656, 2686), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(210)', '(220)', '(83)', '(25)'], {}), '(210, 220, 83, 25)\n', (2668, 2686), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2864, 2894), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(150)', '(421)', '(25)'], {}), '(30, 150, 421, 25)\n', (2876, 2894), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3047, 3077), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(450)', '(150)', '(21)', '(25)'], {}), '(450, 150, 21, 25)\n', (3059, 3077), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3282, 3311), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(50)', '(421)', '(25)'], {}), '(30, 50, 421, 25)\n', (3294, 3311), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3484, 3513), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(450)', '(50)', '(21)', '(25)'], {}), '(450, 50, 21, 25)\n', (3496, 3513), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3671, 3700), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(20)', '(191)', '(31)'], {}), '(30, 20, 191, 31)\n', (3683, 3700), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4277, 4307), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(140)', '(421)', '(25)'], {}), '(30, 140, 421, 25)\n', (4289, 4307), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4458, 4488), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(200)', '(220)', '(83)', '(25)'], {}), '(200, 220, 83, 25)\n', (4470, 4488), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4651, 4681), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(450)', '(140)', '(21)', '(25)'], {}), '(450, 140, 21, 25)\n', (4663, 4681), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4835, 4865), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(110)', '(201)', '(21)'], {}), '(30, 110, 201, 21)\n', (4847, 4865), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5247, 5276), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(20)', '(201)', '(21)'], {}), '(30, 20, 201, 21)\n', (5259, 5276), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5724, 5753), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(50)', '(421)', '(25)'], {}), '(30, 50, 421, 25)\n', (5736, 5753), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5940, 5969), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(450)', '(50)', '(21)', '(25)'], {}), '(450, 50, 21, 25)\n', (5952, 5969), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')]
#!/usr/bin/env python """ Author: <NAME> Date: 11.2018 # Description Map model metabolites and reactions to metanetx identifiers using KEGG, metacyc and BiGG annotations. """ import cobra import pandas as pd from pathlib import Path import libchebipy def map_model_metabolites(model, metanetx_fn): df = pd.read_csv(metanetx_fn, header = None, sep = "\t", comment = "#") df.columns = ["db:id", "metanetx", "reason", "name"] kegg_df = df[df["db:id"].str.contains("kegg:")] bigg_df = df[df["db:id"].str.contains("bigg:")] metacyc_df = df[df["db:id"].str.contains("metacyc:")] # Deprecated? del df new_df_list = [] for m in model.metabolites: # print(m.annotation) mnx_annotations = [] bigg_match = bigg_df.loc[bigg_df["db:id"] == "bigg:{}".format(m.id[:-2]), :] mnx_annotations += list(bigg_match["metanetx"]) try: kegg_id = m.annotation["kegg.compound"] except KeyError: kegg_match = None else: kegg_match = kegg_df.loc[kegg_df["db:id"] == "kegg:{}".format(kegg_id), :] mnx_annotations += list(kegg_match["metanetx"]) # metacyc if len(mnx_annotations) == 0: try: metacyc_id = m.annotation["biocyc"] except KeyError: metacyc_match = None print(m.id, None) else: metacyc_match = metacyc_df.loc[metacyc_df["db:id"] == "metacyc:{}".format(metacyc_id), :] mnx_annotations += list(metacyc_match["metanetx"]) print(m.id, list(metacyc_match["metanetx"])) # mnx_annotations = list(set(mnx_annotations)) if "metanetx.chemical" in list(m.annotation.keys()): mnx_annot = m.annotation["metanetx.chemical"] else: mnx_annot = None if not mnx_annot and len(mnx_annotations): new_df_list.append([m.id]+ mnx_annotations) elif (len(mnx_annotations) == 1) and mnx_annot != mnx_annotations[0]: new_df_list.append([m.id]+ mnx_annotations) elif (len(mnx_annotations) == 2) and isinstance(mnx_annot, str): new_df_list.append([m.id]+ mnx_annotations) elif (len(mnx_annotations) == 2) and isinstance(mnx_annot, list): print("#######", m.id, mnx_annotations, mnx_annot) else: pass # print(new_df_list) new_df = pd.DataFrame(new_df_list, columns = ["Met ID", "MNX 1", "MNX 2"]) new_df.to_csv("../../ComplementaryData/curation/metanetx_to_change.csv", index_label = "index") # print(new_df) def map_model_reactions(model, metanetx_fn): df = pd.read_csv(metanetx_fn, header = None, sep = "\t", comment = "#") df.columns = ["db:id", "metanetx", "reaction string"] kegg_df = df[df["db:id"].str.contains("kegg:")] bigg_df = df[df["db:id"].str.contains("bigg:")] metacyc_df = df[df["db:id"].str.contains("metacyc:")] deprecated_df = df[df["db:id"].str.contains("deprecated:")] del df new_df_list = [] for r in model.reactions: mnx_annotations = [] # BiGG bigg_match = bigg_df.loc[bigg_df["db:id"] == "bigg:{}".format(r.id), :] mnx_annotations += list(bigg_match["metanetx"]) # KEGG try: kegg_id = r.annotation["kegg.reaction"] except KeyError: kegg_match = None else: kegg_match = kegg_df.loc[kegg_df["db:id"] == "kegg:{}".format(kegg_id), :] mnx_annotations += list(kegg_match["metanetx"]) # Metacyc try: metacyc_id = r.annotation["biocyc"] except KeyError: metacyc_match = None else: metacyc_match = metacyc_df.loc[metacyc_df["db:id"] == "metacyc:{}".format(metacyc_id), :] mnx_annotations += list(metacyc_match["metanetx"]) # print(r.id, list(metacyc_match["metanetx"])) # Remove duplicates mnx_annotations = list(set(mnx_annotations)) # print("{0:<3} {1:<100} {2:<20} {3:<50} {4}".format(i, str(r), str(mnx_annot), ", ".join(mnx_annotations), origin)) if len(mnx_annotations): new_df_list.append([r.id] + mnx_annotations) new_df = pd.DataFrame(new_df_list, columns = ["Reaction ID", "MNX 1", "MNX 2", "MNX 3"]) new_df.to_csv("../../ComplementaryData/curation/metanetx_reaction_annotations_to_change.csv", index_label = "index") def map_metabolites_to_chebi(scoGEM, metanetx_fn): df = pd.read_csv(metanetx_fn, header = None, sep = "\t", comment = "#") df.columns = ["db:id", "metanetx", "reason", "name"] chebi_df = df[df["db:id"].str.contains("chebi:")] del df new_df_list = [] for m in model.metabolites: try: mnx_annot = m.annotation["metanetx.chemical"] except KeyError: print("No metanetx annotation for {0}, {1}".format(m.id, ["{0}:{1}".format(key, value) for key, value in m.annotation.items()])) continue mnx_annot = as_list(mnx_annot) chebi_ids = [] for mnx_i in mnx_annot: mnx_match = chebi_df.loc[chebi_df["metanetx"] == mnx_i] chebi_ids += list(mnx_match["db:id"].values) chebi_ids = [x.upper() for x in chebi_ids] parent_chebis = [] for chebi_id in list(set(chebi_ids)): lib_data = libchebipy.ChebiEntity(chebi_id.upper()) parent = lib_data.get_parent_id() if parent: parent_chebis.append(parent) else: parent_chebis.append(chebi_id.upper()) parent_chebis = list(set(parent_chebis)) try: current_chebi_list = as_list(m.annotation["chebi"]) except: current_chebi_list = [None] in_new_chebis = False else: in_new_chebis = True for current_chebi in current_chebi_list: if not current_chebi in chebi_ids: in_new_chebis = False print("{2}: {0} is not in the new set {1}".format(current_chebi, parent_chebis, m.id)) new_df_list.append([m.id, parent_chebis, current_chebi_list, in_new_chebis]) new_df = pd.DataFrame(new_df_list, columns = ["Met ID", "New chebi annotation", "Current chebi annotation", "Old chebi in new (including secondary chebis)"]) new_df.to_csv("../../ComplementaryData/curation/chebi_annotation.csv", index = False) def as_list(param): if isinstance(param, list): return param else: return [param] def apply_metanetx_mapping(scoGEM, met_to_metanetx_fn): """ Depreceated: moved to fix_issue33_annotation_bugs.py """ df = pd.read_csv(met_to_metanetx_fn, index_col = 0) for i, row in df.iterrows(): m_id = row[0] m = scoGEM.metabolites.get_by_id(m_id) try: old_anno = m.annotation["metanetx.chemical"] except KeyError: old_anno = None if isinstance(row[2], str): m.annotation["metanetx.chemical"] = row[1:2] elif isinstance(row[1], str): m.annotation["metanetx.chemical"] = row[1] else: continue logging.info("Changed metanetx.chemical annotation of metabolite {0} from {1} to {2}".format( m.id, old_anno, m.annotation["metanetx.chemical"])) if __name__ == '__main__': repo_path = Path(__file__).parent.parent.parent model_fn = repo_path / "ModelFiles" / "xml" / "scoGEM.xml" model = cobra.io.read_sbml_model(str(model_fn)) # map_model_metabolites(model, metanetx_fn) fn = repo_path / "ComplementaryData" / "curation" /"metanetx_to_change.csv" if 0: apply_metanetx_mapping(model, fn) if 0: metanetx_fn = repo_path / "ComplementaryData" / "curation" / "metanetx_chem_xref.tsv" map_metabolites_to_chebi(model, metanetx_fn) if 1: metanetx_fn = repo_path / "ComplementaryData" / "curation" / "metanetx_reac_xref.tsv" map_model_reactions(model, metanetx_fn)
[ "pandas.read_csv", "pathlib.Path", "pandas.DataFrame" ]
[((309, 369), 'pandas.read_csv', 'pd.read_csv', (['metanetx_fn'], {'header': 'None', 'sep': '"""\t"""', 'comment': '"""#"""'}), "(metanetx_fn, header=None, sep='\\t', comment='#')\n", (320, 369), True, 'import pandas as pd\n'), ((2445, 2508), 'pandas.DataFrame', 'pd.DataFrame', (['new_df_list'], {'columns': "['Met ID', 'MNX 1', 'MNX 2']"}), "(new_df_list, columns=['Met ID', 'MNX 1', 'MNX 2'])\n", (2457, 2508), True, 'import pandas as pd\n'), ((2686, 2746), 'pandas.read_csv', 'pd.read_csv', (['metanetx_fn'], {'header': 'None', 'sep': '"""\t"""', 'comment': '"""#"""'}), "(metanetx_fn, header=None, sep='\\t', comment='#')\n", (2697, 2746), True, 'import pandas as pd\n'), ((4281, 4358), 'pandas.DataFrame', 'pd.DataFrame', (['new_df_list'], {'columns': "['Reaction ID', 'MNX 1', 'MNX 2', 'MNX 3']"}), "(new_df_list, columns=['Reaction ID', 'MNX 1', 'MNX 2', 'MNX 3'])\n", (4293, 4358), True, 'import pandas as pd\n'), ((4544, 4604), 'pandas.read_csv', 'pd.read_csv', (['metanetx_fn'], {'header': 'None', 'sep': '"""\t"""', 'comment': '"""#"""'}), "(metanetx_fn, header=None, sep='\\t', comment='#')\n", (4555, 4604), True, 'import pandas as pd\n'), ((6269, 6423), 'pandas.DataFrame', 'pd.DataFrame', (['new_df_list'], {'columns': "['Met ID', 'New chebi annotation', 'Current chebi annotation',\n 'Old chebi in new (including secondary chebis)']"}), "(new_df_list, columns=['Met ID', 'New chebi annotation',\n 'Current chebi annotation',\n 'Old chebi in new (including secondary chebis)'])\n", (6281, 6423), True, 'import pandas as pd\n'), ((6754, 6798), 'pandas.read_csv', 'pd.read_csv', (['met_to_metanetx_fn'], {'index_col': '(0)'}), '(met_to_metanetx_fn, index_col=0)\n', (6765, 6798), True, 'import pandas as pd\n'), ((7472, 7486), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (7476, 7486), False, 'from pathlib import Path\n')]
""" This module provides utility methods. """ import datetime import os import numpy as np import pandas as pd from matplotlib import pyplot as plt from plotnine import * from statsmodels.tsa.statespace.kalman_smoother import SmootherResults from statsmodels.tsa.statespace.mlemodel import MLEResults, MLEResultsWrapper from state_space import SSMS def plot_states(filtered_results: MLEResultsWrapper, smoothed_results: SmootherResults, regions: list, z_names: list, save_path: str): """ Plots states (all variables specified in z_names) and saves it in save_path. The dataframe states contains all the states (mu, nu, z_names) over time. :param filtered_results: filtered results from a SSMS class :param smoothed_results: smoothed results from a SSMS class, smoothed results should be an MLEResultsWrapper if you don't wanted smoothed states :param regions: list of region names :param z_names: a list of column names of the independent variables to be placed in the Z (design) matrix :param save_path: save path for plots :return: """ n_regions = len(regions) n_betas = len(z_names) # Create confidence intervals for states (first n_regions*3 parameters are for the variances of y, mu and nu) if isinstance(smoothed_results, MLEResultsWrapper): states = np.transpose(filtered_results.filtered_state) cis = np.zeros((states.shape[0], n_betas * 3)) # We use the state_cov (covariance matrix of state equation Q) to calculate the ci's bound = 1.96 * np.sqrt(filtered_results.params[n_regions * 3:]) else: states = np.transpose(smoothed_results.smoothed_state) cis = np.zeros((states.shape[0], n_betas * 3)) # We use the state_cov (covariance matrix of state equation Q) to calculate the ci's bound = 1.96 * np.sqrt(filtered_results.params[n_regions * 3:]) for i in range(n_betas): cis[:, i] = states[:, n_regions * 2 + i] - bound[i] cis[:, i + n_betas] = states[:, n_regions * 2 + i] + bound[i] cis[:, i + n_betas * 2] = np.multiply(cis[:, i], cis[:, i + n_betas]) cis[:, i + n_betas * 2][cis[:, i + n_betas * 2] < 0] = 0 cis[:, i + n_betas * 2][cis[:, i + n_betas * 2] > 0] = 1 # Create list cols with columns names for states Dataframe cols = [] for i in range(states.shape[1] + n_betas * 3): if i < n_regions: cols.append('nu_' + regions[i]) elif n_regions <= i < n_regions * 2: cols.append('mu_' + regions[i - n_regions]) elif n_regions * 2 <= i < n_regions * 2 + n_betas: cols.append(z_names[i - n_regions * 2]) elif n_regions * 2 + n_betas <= i < n_regions * 2 + n_betas * 2: cols.append(z_names[i - (n_regions * 2 + n_betas)] + '_lb') elif n_regions * 2 + n_betas * 2 <= i < n_regions * 2 + n_betas * 3: cols.append(z_names[i - (n_regions * 2 + n_betas * 2)] + '_ub') else: cols.append(z_names[i - (n_regions * 2 + n_betas * 3)] + '_significant') states_df = pd.DataFrame(np.concatenate((states, cis), axis=1), columns=cols) states_df['Date'] = pd.date_range(start='1/7/2018', periods=len(states_df), freq='W') states_df_01 = states_df.iloc[:, -(n_betas * 4 + 1):] states_df_01['Date'] = states_df_01['Date'].dt.strftime('%G%V') if isinstance(smoothed_results, MLEResultsWrapper): states_df_01.to_excel(os.path.join(save_path, 'states_filtered.xlsx')) else: states_df_01.to_excel(os.path.join(save_path, 'states_smoothed.xlsx')) # The first 5 observations are removed for nice graphs states_df = states_df.iloc[5:, :] # Important events are the first intelligent lockdown and relaxation of rules events = [datetime.datetime.strptime('2020-11-7', '%G-%V-%u'), datetime.datetime.strptime('2020-27-7', '%G-%V-%u')] events_full = [*events, *[datetime.datetime.strptime('2020-51-7', '%G-%V-%u'), datetime.datetime.strptime('2021-25-7', '%G-%V-%u')]] for i in range(n_betas): if i == z_names.index('StringencyIndex'): # Remove 0-values when plotting StringencyIndex states_df_02 = states_df[108:] else: states_df_02 = states_df p = ggplot(states_df_02, aes(x='Date')) + scale_x_datetime(breaks=get_ticks(states_df_02, 8)[0], labels=get_ticks(states_df_02, 8)[1]) + geom_ribbon( aes(ymin=states_df_02.iloc[:, n_regions * 2 + n_betas + i], ymax=states_df_02.iloc[:, n_regions * 2 + n_betas * 2 + i], color='"95% CI"'), alpha=0.1) + geom_line( aes(y=states_df_02.columns[n_regions * 2 + i], color='"State"')) + geom_vline(xintercept=events_full, linetype="dotted") + \ geom_vline( xintercept=[datetime.datetime.strptime('2020-50-7', '%G-%V-%u')], linetype="solid") + scale_color_manual( values=['#dedede', '#4472c4']) + labs(x='Date', y='State', color='Legend') if isinstance(smoothed_results, MLEResultsWrapper): ggsave(plot=p, filename='coefficient_for_filtered_' + z_names[i], path=save_path, verbose=False, dpi=600) else: ggsave(plot=p, filename='coefficient_for_smoothed_' + z_names[i], path=save_path, verbose=False, dpi=600) # print(p) def forecast_error(results: MLEResults, regions: list, save_path: str, first=int, last=int, ci=bool, tp=str, n_plots=4): """ Computes forecast error with one-step ahead forecasts for each region and saves it in save_path. Moreover, plots forecasts, actual sales and errors of the n_plots best/worst MASE/MdASE regions (only MASE plots are saved). :param results: (extended) results (from prepare_forecast()) :param regions: list of region names, the order of the names should be exactly the same as the order of the regions in the model :param save_path: save path for plots :param first: the time index from where your plots should start :param last: this time index should exactly be equal to the time index-1 where the sample of the model ends :param ci: whether to plot a confidence interval (True) or not (False), if the CI's become too big set ci=False otherwise the sales will be plotted as straight lines :param tp: specify the type of data (e.g. one_step_ahead_forecast) you want to plot, use _ instead of spaces for tp, since the name of the plots/excel files will also have this name :param n_plots: the number of regions to plot, 4 (default) implies plotting the forecasts, actual sales and errors of the 4 best/worst MASE/MdASE regions (= 3 * 4 * 2 * 2 = 48 plots) :return: """ n_regions = len(regions) model = results.model data = results.get_prediction(start=first, end=last) # Calculate MASE using one-step ahead forecasts mases = np.zeros(len(regions)) maes = np.zeros((38, len(regions))) maes_naive = np.zeros((152, len(regions))) mdases = np.zeros(len(regions)) for region in range(len(regions)): maes[:, region] = np.abs(model.endog[first:, region] - data.predicted_mean[:, region]) maes_naive[:, region] = np.abs( [x - model.endog[0:153, region][i - 1] for i, x in enumerate(model.endog[0:153, region])][1:]) mases[region] = np.mean(maes[:, region]) / np.mean(maes_naive[:, region]) mdases[region] = np.median(maes[:, region]) / np.median(maes_naive[:, region]) mean_mase = np.mean(mases) med_mase = np.median(mases) mean_mdase = np.mean(mdases) med_mdase = np.median(mdases) l1_mase = sum(x < 1 for x in mases) / mases.shape[0] l1_mdase = sum(x < 1 for x in mdases) / mdases.shape[0] best_mase, worst_mase = np.argmin(mases), np.argmax(mases) best_mdase, worst_mdase = np.argmin(mdases), np.argmax(mdases) mase_df = pd.DataFrame(np.transpose(mases.reshape(1, n_regions)), index=regions, columns=['MASE']) mdase_df = pd.DataFrame(np.transpose(mdases.reshape(1, n_regions)), index=regions, columns=['MdASE']) error_df = mase_df.merge(mdase_df, left_index=True, right_index=True, how='left') error_df[''] = '' error_df['Best MASE'] = [regions[best_mase], mases[best_mase], '', 'Best MdASE', regions[best_mdase], mdases[best_mdase]] + [''] * (len(error_df) - 6) error_df['Worst MASE'] = [regions[worst_mase], mases[worst_mase], '', 'Worst MdASE', regions[worst_mdase], mdases[worst_mdase]] + [''] * (len(error_df) - 6) error_df['Mean MASE'] = [mean_mase] + [''] * 2 + ['Mean MdASE', mean_mdase] + [''] * (len(error_df) - 5) error_df['Median MASE'] = [med_mase] + [''] * 2 + ['Median MdASE', med_mdase] + [''] * (len(error_df) - 5) error_df['Proportion of regions MASE<1'] = [l1_mase] + [''] * 2 + ['Proportion of regions MdASE<1', l1_mdase] + [ ''] * (len(error_df) - 5) error_df.to_excel(os.path.join(save_path, 'errors_' + tp + '.xlsx')) # Plot forecasts (df_pred), actual sales (df_full) and MAE/MAE_naive (df_mae) df_pred = pd.DataFrame(np.concatenate((model.endog[first:, :], data.predicted_mean, data.conf_int()), axis=1)) start_date = datetime.datetime(2018, 1, 7) + datetime.timedelta(weeks=first) df_pred['Date'] = pd.date_range(start=start_date, periods=len(df_pred), freq='W') df_full = pd.DataFrame(model.endog) df_full['Date'] = pd.date_range(start=datetime.datetime(2018, 1, 7), periods=len(df_full), freq='W') df_mae = pd.DataFrame(np.concatenate((maes_naive, maes), axis=0)) # MAE starts in 2018 week 2 (sunday) because the naive forecast (denominator of mase) starts at t=2 df_mae['Date'] = pd.date_range(start=datetime.datetime(2018, 1, 14), periods=len(df_mae), freq='W') plot_regions = np.concatenate((mases.argsort()[:n_plots], mases.argsort()[-n_plots:][::-1], mdases.argsort()[:n_plots], mdases.argsort()[-n_plots:][::-1]), axis=0) # Important events are the second lockdown and relaxation of (almost all) rules events_test = [datetime.datetime.strptime('2020-51-7', '%G-%V-%u'), datetime.datetime.strptime('2021-25-7', '%G-%V-%u')] events_full = [ *[datetime.datetime.strptime('2020-11-7', '%G-%V-%u'), datetime.datetime.strptime('2020-27-7', '%G-%V-%u')], *events_test] for i in range(plot_regions.shape[0]): if ci: p = ggplot(df_pred, aes(x='Date')) + scale_x_datetime(breaks=get_ticks(df_pred, 8)[0], labels=get_ticks(df_pred, 8)[1]) + geom_ribbon( aes(ymin=df_pred.iloc[:, n_regions * 2 + plot_regions[i]], ymax=df_pred.iloc[:, n_regions * 3 + plot_regions[i]], color='"95% CI"'), alpha=0.1) + geom_line( aes(y=df_pred.iloc[:, plot_regions[i]], color='"Actual"')) + geom_line( aes(y=df_pred.iloc[:, n_regions + plot_regions[i]], color='"Forecast"')) + geom_vline( xintercept=events_test, linetype="dotted") + scale_color_manual( values=['#dedede', '#4472c4', '#ed7d31']) + labs(x='Date', y='Sales', color='Legend') q = ggplot(df_full, aes(x='Date')) + scale_x_datetime(breaks=get_ticks(df_full, 8)[0], labels=get_ticks(df_full, 8)[1]) + geom_line( aes(y=df_full.iloc[:, plot_regions[i]], color='"Actual"')) + geom_vline(xintercept=events_full, linetype="dotted") + geom_vline( xintercept=[datetime.datetime.strptime('2020-50-7', '%G-%V-%u')], linetype="solid") + scale_color_manual(values=['#4472c4']) + labs(x='Date', y='Sales', color='Legend') m = ggplot(df_mae, aes(x='Date')) + scale_x_datetime(breaks=get_ticks(df_mae, 8)[0], labels=get_ticks(df_mae, 8)[1]) + geom_line( aes(y=df_mae.iloc[0:153, plot_regions[i]], color='"AE_naive"'), data=df_mae['Date'][0:153].to_frame()) + geom_line( aes(y=df_mae.iloc[152:190, plot_regions[i]], color='"AE"'), data=df_mae['Date'][152:190].to_frame()) + geom_vline(xintercept=events_full, linetype="dotted") + geom_vline( xintercept=[datetime.datetime.strptime('2020-50-7', '%G-%V-%u')], linetype="solid") + scale_color_manual(values=['#4472c4', '#ed7d31']) + labs(x='Date', y='Error', color='Legend') else: p = ggplot(df_pred, aes(x='Date')) + scale_x_datetime(breaks=get_ticks(df_pred, 8)[0], labels=get_ticks(df_pred, 8)[1]) + geom_line( aes(y=df_pred.iloc[:, plot_regions[i]], color='"Actual"')) + geom_line( aes(y=df_pred.iloc[:, n_regions + plot_regions[i]], color='"Forecast"')) + geom_vline( xintercept=events_test, linetype="dotted") + labs(x='Date', y='Sales') # print(m) if i < n_plots: ggsave(plot=p, filename=tp + '_mase_best_' + str(i + 1) + '_' + regions[plot_regions[i]], path=save_path, verbose=False, dpi=600) ggsave(plot=q, filename='actual_sales_mase_best_' + str(i + 1) + '_' + regions[plot_regions[i]], path=save_path, verbose=False, dpi=600) ggsave(plot=m, filename='mase_best_' + str(i + 1) + '_' + regions[plot_regions[i]], path=save_path, verbose=False, dpi=600) elif i < n_plots * 2: ggsave(plot=p, filename=tp + '_mase_worst_' + str(i - n_plots + 1) + '_' + regions[plot_regions[i]], path=save_path, verbose=False, dpi=600) ggsave(plot=q, filename='actual_sales_mase_worst_' + str(i - n_plots + 1) + '_' + regions[plot_regions[i]], path=save_path, verbose=False, dpi=600) ggsave(plot=m, filename='mase_worst_' + str(i - n_plots + 1) + '_' + regions[plot_regions[i]], path=save_path, verbose=False, dpi=600) """ elif i < n_plots * 3: ggsave(plot=p, filename=tp + '_mdase_best_' + str(i - n_plots * 2 + 1) + '_' + regions[plot_regions[i]], path=save_path, verbose=False, dpi=600) ggsave(plot=q, filename='actual_sales_mdase_best_' + str(i - n_plots * 2 + 1) + '_' + regions[plot_regions[i]], path=save_path, verbose=False, dpi=600) ggsave(plot=m, filename='mdase_best_' + str(i - n_plots * 2 + 1) + '_' + regions[plot_regions[i]], path=save_path, verbose=False, dpi=600) else: ggsave(plot=p, filename=tp + '_mdase_worst_' + str(i - n_plots * 3 + 1) + '_' + regions[plot_regions[i]], path=save_path, verbose=False, dpi=600) ggsave(plot=q, filename='actual_sales_mdase_worst_' + str(i - n_plots * 3 + 1) + '_' + regions[plot_regions[i]], path=save_path, verbose=False, dpi=600) ggsave(plot=m, filename='mdase_worst_' + str(i - n_plots * 3 + 1) + '_' + regions[plot_regions[i]], path=save_path, verbose=False, dpi=600) """ def get_ticks(data: pd.DataFrame, n_ticks: int): """ Returns x_axis ticks as dates, :param data: dataframe where the last column should contain pandas.Timestamp objects :param n_ticks: number of ticks :return: ticks (breaks) and their labels """ x_breaks = [] x_labels = [] n_ticks = n_ticks - 1 interval = data.shape[0] / n_ticks for i in range(n_ticks + 1): x_breaks.append(data.iloc[0, -1:][0] + datetime.timedelta(weeks=interval * i)) x_labels.append((data.iloc[0, -1:][0] + datetime.timedelta(weeks=interval * i)).strftime('%G-%V')) return x_breaks, x_labels def print_results(results: MLEResults, save_path: str, name: str): """ Pretty-prints the results for an SSMS model with k variables of interest (in beta equations). Assumes n > k. :param results: results object for an SSMS model :param save_path: path to save location :param name: model name :return: """ model = results.model if not isinstance(model, SSMS): print("Can't print parameters for a non-SSMS model.") return # Print AIC, BIC, MSE, and MAE. with open(os.path.join(save_path, name + '_stats.csv'), 'w') as out: header = ','.join(['AIC', 'BIC', 'MSE', 'MAE']) stats = ','.join([str(results.aic), str(results.bic), str(results.mse), str(results.mae)]) out.write('\n'.join([header, stats])) # Print fitted parameters, standard errors, and p-values. regions = model.group_names params = results.params n = len(regions) k = model.k n_cov = model.n_cov param_names = model.z_names y = ','.join(['region', 'var (y)']) mu = 'var (mu)' nu = 'var (nu)' lm = ','.join(['param', 'var']) header = ',,'.join([y, mu, nu, lm]) param_from = 0 if model.cov_rest == 'GC': param_to = n + n_cov y_var = params[param_from:param_from + n] else: param_to = n y_var = params[param_from:param_to] param_from = param_to param_to += n mu_var = params[param_from:param_to] param_from = param_to param_to += n nu_var = params[param_from:param_to] param_from = param_to param_to += k param_var = params[param_from:] with open(os.path.join(save_path, name + '_params.csv'), 'w') as out: out.write(header + '\n') for i in range(n): y = ','.join([regions[i], str(y_var[i])]) mu = str(mu_var[i]) nu = str(nu_var[i]) line = ',,'.join([y, mu, nu]) if i < k: lm = ','.join([param_names[i], str(param_var[i])]) line = ',,'.join([line, lm]) out.write(line + '\n') def plot_variables(data: list, info: list, all_regions: False): """ Plots variables. :param data: list of form [y, mu, threshold, obs_sd] :param info: list of from [index, name] :param all_regions: boolean to plot regions 1-by-1 (True) or all at the same time (False) :return: """ if all_regions: if info: t = np.arange(1, len(data[0][0]) + 1) for i in range(len(info)): index = info[i][0] plt.figure() plt.suptitle(info[i][1]) plt.plot(t, data[index][0], 'b') plt.plot(t, data[index][1] + data[index][2] * data[index][3], 'r') plt.plot(t, data[index][1] - data[index][2] * data[index][3], 'r') plt.show() else: print('No outliers') else: if info: t = np.arange(1, len(data[0][0]) + 1) for i in range(len(info)): index = info[i][0] plt.figure() plt.suptitle(info[i][1]) plt.plot(t, data[index][0], 'b') plt.plot(t, data[index][1] + data[index][2] * data[index][3], 'r') plt.plot(t, data[index][1] - data[index][2] * data[index][3], 'r') else: print('No outliers') plt.show() def prepare_forecast(results: MLEResults, data: pd.DataFrame): """ Prepares a new MLEResults object, such that regular methods can be used to compute forecasts. For out-of-sample forecasts, we can simply use 'in-sample' forecasts of a model with fixed parameters, obtained from the initial fit. :param results: the MLEResults object of the training fit :param data: the extended data (train + test) :return: a new NLEResults object, fitted with fixed parameters obtained from the initial training fit """ model = results.model if not isinstance(model, SSMS): print("Can't prepare forecasts for a non-SSMS model.") return new_model = SSMS(data, group_name=model.group_name, y_name=model.y_name, z_names=model.z_names, cov_rest=model.cov_rest) fitted_params = results.params new_result = new_model.filter(fitted_params) return new_model, new_result
[ "numpy.abs", "numpy.argmax", "matplotlib.pyplot.suptitle", "numpy.argmin", "matplotlib.pyplot.figure", "numpy.mean", "os.path.join", "pandas.DataFrame", "numpy.multiply", "numpy.transpose", "datetime.timedelta", "matplotlib.pyplot.show", "numpy.median", "datetime.datetime", "datetime.datetime.strptime", "state_space.SSMS", "numpy.concatenate", "matplotlib.pyplot.plot", "numpy.zeros", "numpy.sqrt" ]
[((7666, 7680), 'numpy.mean', 'np.mean', (['mases'], {}), '(mases)\n', (7673, 7680), True, 'import numpy as np\n'), ((7696, 7712), 'numpy.median', 'np.median', (['mases'], {}), '(mases)\n', (7705, 7712), True, 'import numpy as np\n'), ((7730, 7745), 'numpy.mean', 'np.mean', (['mdases'], {}), '(mdases)\n', (7737, 7745), True, 'import numpy as np\n'), ((7762, 7779), 'numpy.median', 'np.median', (['mdases'], {}), '(mdases)\n', (7771, 7779), True, 'import numpy as np\n'), ((9545, 9570), 'pandas.DataFrame', 'pd.DataFrame', (['model.endog'], {}), '(model.endog)\n', (9557, 9570), True, 'import pandas as pd\n'), ((20451, 20564), 'state_space.SSMS', 'SSMS', (['data'], {'group_name': 'model.group_name', 'y_name': 'model.y_name', 'z_names': 'model.z_names', 'cov_rest': 'model.cov_rest'}), '(data, group_name=model.group_name, y_name=model.y_name, z_names=model.\n z_names, cov_rest=model.cov_rest)\n', (20455, 20564), False, 'from state_space import SSMS\n'), ((1347, 1392), 'numpy.transpose', 'np.transpose', (['filtered_results.filtered_state'], {}), '(filtered_results.filtered_state)\n', (1359, 1392), True, 'import numpy as np\n'), ((1407, 1447), 'numpy.zeros', 'np.zeros', (['(states.shape[0], n_betas * 3)'], {}), '((states.shape[0], n_betas * 3))\n', (1415, 1447), True, 'import numpy as np\n'), ((1640, 1685), 'numpy.transpose', 'np.transpose', (['smoothed_results.smoothed_state'], {}), '(smoothed_results.smoothed_state)\n', (1652, 1685), True, 'import numpy as np\n'), ((1700, 1740), 'numpy.zeros', 'np.zeros', (['(states.shape[0], n_betas * 3)'], {}), '((states.shape[0], n_betas * 3))\n', (1708, 1740), True, 'import numpy as np\n'), ((2099, 2142), 'numpy.multiply', 'np.multiply', (['cis[:, i]', 'cis[:, i + n_betas]'], {}), '(cis[:, i], cis[:, i + n_betas])\n', (2110, 2142), True, 'import numpy as np\n'), ((3109, 3146), 'numpy.concatenate', 'np.concatenate', (['(states, cis)'], {'axis': '(1)'}), '((states, cis), axis=1)\n', (3123, 3146), True, 'import numpy as np\n'), ((3795, 3846), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-11-7"""', '"""%G-%V-%u"""'], {}), "('2020-11-7', '%G-%V-%u')\n", (3821, 3846), False, 'import datetime\n'), ((3848, 3899), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-27-7"""', '"""%G-%V-%u"""'], {}), "('2020-27-7', '%G-%V-%u')\n", (3874, 3899), False, 'import datetime\n'), ((7265, 7333), 'numpy.abs', 'np.abs', (['(model.endog[first:, region] - data.predicted_mean[:, region])'], {}), '(model.endog[first:, region] - data.predicted_mean[:, region])\n', (7271, 7333), True, 'import numpy as np\n'), ((7926, 7942), 'numpy.argmin', 'np.argmin', (['mases'], {}), '(mases)\n', (7935, 7942), True, 'import numpy as np\n'), ((7944, 7960), 'numpy.argmax', 'np.argmax', (['mases'], {}), '(mases)\n', (7953, 7960), True, 'import numpy as np\n'), ((7991, 8008), 'numpy.argmin', 'np.argmin', (['mdases'], {}), '(mdases)\n', (8000, 8008), True, 'import numpy as np\n'), ((8010, 8027), 'numpy.argmax', 'np.argmax', (['mdases'], {}), '(mdases)\n', (8019, 8027), True, 'import numpy as np\n'), ((9114, 9163), 'os.path.join', 'os.path.join', (['save_path', "('errors_' + tp + '.xlsx')"], {}), "(save_path, 'errors_' + tp + '.xlsx')\n", (9126, 9163), False, 'import os\n'), ((9380, 9409), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(1)', '(7)'], {}), '(2018, 1, 7)\n', (9397, 9409), False, 'import datetime\n'), ((9412, 9443), 'datetime.timedelta', 'datetime.timedelta', ([], {'weeks': 'first'}), '(weeks=first)\n', (9430, 9443), False, 'import datetime\n'), ((9703, 9745), 'numpy.concatenate', 'np.concatenate', (['(maes_naive, maes)'], {'axis': '(0)'}), '((maes_naive, maes), axis=0)\n', (9717, 9745), True, 'import numpy as np\n'), ((10262, 10313), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-51-7"""', '"""%G-%V-%u"""'], {}), "('2020-51-7', '%G-%V-%u')\n", (10288, 10313), False, 'import datetime\n'), ((10334, 10385), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2021-25-7"""', '"""%G-%V-%u"""'], {}), "('2021-25-7', '%G-%V-%u')\n", (10360, 10385), False, 'import datetime\n'), ((19746, 19756), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19754, 19756), True, 'from matplotlib import pyplot as plt\n'), ((1564, 1612), 'numpy.sqrt', 'np.sqrt', (['filtered_results.params[n_regions * 3:]'], {}), '(filtered_results.params[n_regions * 3:])\n', (1571, 1612), True, 'import numpy as np\n'), ((1857, 1905), 'numpy.sqrt', 'np.sqrt', (['filtered_results.params[n_regions * 3:]'], {}), '(filtered_results.params[n_regions * 3:])\n', (1864, 1905), True, 'import numpy as np\n'), ((3464, 3511), 'os.path.join', 'os.path.join', (['save_path', '"""states_filtered.xlsx"""'], {}), "(save_path, 'states_filtered.xlsx')\n", (3476, 3511), False, 'import os\n'), ((3553, 3600), 'os.path.join', 'os.path.join', (['save_path', '"""states_smoothed.xlsx"""'], {}), "(save_path, 'states_smoothed.xlsx')\n", (3565, 3600), False, 'import os\n'), ((7505, 7529), 'numpy.mean', 'np.mean', (['maes[:, region]'], {}), '(maes[:, region])\n', (7512, 7529), True, 'import numpy as np\n'), ((7532, 7562), 'numpy.mean', 'np.mean', (['maes_naive[:, region]'], {}), '(maes_naive[:, region])\n', (7539, 7562), True, 'import numpy as np\n'), ((7588, 7614), 'numpy.median', 'np.median', (['maes[:, region]'], {}), '(maes[:, region])\n', (7597, 7614), True, 'import numpy as np\n'), ((7617, 7649), 'numpy.median', 'np.median', (['maes_naive[:, region]'], {}), '(maes_naive[:, region])\n', (7626, 7649), True, 'import numpy as np\n'), ((9613, 9642), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(1)', '(7)'], {}), '(2018, 1, 7)\n', (9630, 9642), False, 'import datetime\n'), ((9892, 9922), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(1)', '(14)'], {}), '(2018, 1, 14)\n', (9909, 9922), False, 'import datetime\n'), ((16864, 16908), 'os.path.join', 'os.path.join', (['save_path', "(name + '_stats.csv')"], {}), "(save_path, name + '_stats.csv')\n", (16876, 16908), False, 'import os\n'), ((17972, 18017), 'os.path.join', 'os.path.join', (['save_path', "(name + '_params.csv')"], {}), "(save_path, name + '_params.csv')\n", (17984, 18017), False, 'import os\n'), ((3931, 3982), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-51-7"""', '"""%G-%V-%u"""'], {}), "('2020-51-7', '%G-%V-%u')\n", (3957, 3982), False, 'import datetime\n'), ((4014, 4065), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2021-25-7"""', '"""%G-%V-%u"""'], {}), "('2021-25-7', '%G-%V-%u')\n", (4040, 4065), False, 'import datetime\n'), ((10417, 10468), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-11-7"""', '"""%G-%V-%u"""'], {}), "('2020-11-7', '%G-%V-%u')\n", (10443, 10468), False, 'import datetime\n'), ((10470, 10521), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-27-7"""', '"""%G-%V-%u"""'], {}), "('2020-27-7', '%G-%V-%u')\n", (10496, 10521), False, 'import datetime\n'), ((16160, 16198), 'datetime.timedelta', 'datetime.timedelta', ([], {'weeks': '(interval * i)'}), '(weeks=interval * i)\n', (16178, 16198), False, 'import datetime\n'), ((18912, 18924), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18922, 18924), True, 'from matplotlib import pyplot as plt\n'), ((18941, 18965), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['info[i][1]'], {}), '(info[i][1])\n', (18953, 18965), True, 'from matplotlib import pyplot as plt\n'), ((18982, 19014), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'data[index][0]', '"""b"""'], {}), "(t, data[index][0], 'b')\n", (18990, 19014), True, 'from matplotlib import pyplot as plt\n'), ((19031, 19097), 'matplotlib.pyplot.plot', 'plt.plot', (['t', '(data[index][1] + data[index][2] * data[index][3])', '"""r"""'], {}), "(t, data[index][1] + data[index][2] * data[index][3], 'r')\n", (19039, 19097), True, 'from matplotlib import pyplot as plt\n'), ((19114, 19180), 'matplotlib.pyplot.plot', 'plt.plot', (['t', '(data[index][1] - data[index][2] * data[index][3])', '"""r"""'], {}), "(t, data[index][1] - data[index][2] * data[index][3], 'r')\n", (19122, 19180), True, 'from matplotlib import pyplot as plt\n'), ((19197, 19207), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19205, 19207), True, 'from matplotlib import pyplot as plt\n'), ((19422, 19434), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19432, 19434), True, 'from matplotlib import pyplot as plt\n'), ((19451, 19475), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['info[i][1]'], {}), '(info[i][1])\n', (19463, 19475), True, 'from matplotlib import pyplot as plt\n'), ((19492, 19524), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'data[index][0]', '"""b"""'], {}), "(t, data[index][0], 'b')\n", (19500, 19524), True, 'from matplotlib import pyplot as plt\n'), ((19541, 19607), 'matplotlib.pyplot.plot', 'plt.plot', (['t', '(data[index][1] + data[index][2] * data[index][3])', '"""r"""'], {}), "(t, data[index][1] + data[index][2] * data[index][3], 'r')\n", (19549, 19607), True, 'from matplotlib import pyplot as plt\n'), ((19624, 19690), 'matplotlib.pyplot.plot', 'plt.plot', (['t', '(data[index][1] - data[index][2] * data[index][3])', '"""r"""'], {}), "(t, data[index][1] - data[index][2] * data[index][3], 'r')\n", (19632, 19690), True, 'from matplotlib import pyplot as plt\n'), ((16248, 16286), 'datetime.timedelta', 'datetime.timedelta', ([], {'weeks': '(interval * i)'}), '(weeks=interval * i)\n', (16266, 16286), False, 'import datetime\n'), ((4992, 5043), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-50-7"""', '"""%G-%V-%u"""'], {}), "('2020-50-7', '%G-%V-%u')\n", (5018, 5043), False, 'import datetime\n'), ((11856, 11907), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-50-7"""', '"""%G-%V-%u"""'], {}), "('2020-50-7', '%G-%V-%u')\n", (11882, 11907), False, 'import datetime\n'), ((12685, 12736), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-50-7"""', '"""%G-%V-%u"""'], {}), "('2020-50-7', '%G-%V-%u')\n", (12711, 12736), False, 'import datetime\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Jul 5 22:43:38 2019 @author: anhtu """ from __future__ import division import numpy as np import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import cv2 from util import * class EmptyLayer(nn.Module): def __init__(self): super(EmptyLayer, self).__init__() class YoloLayer(nn.Module): def __init__(self, anchors): super(YoloLayer, self).__init__() self.anchors = anchors def parse_cfg(filename): """ Inputs: - cfg's file name, e.g. 'yolov3.cfg' Returns: - a list of NN blocks, each block is represented as a dictionary """ file = open(filename, 'r') lines = file.read().split('\n') lines = [x.rstrip().lstrip() for x in lines if len(x) > 0 and x[0] != '#'] blocks = [] block = {} for line in lines: if line[0] == '[': if len(block) != 0: blocks.append(block) block = {} block['type'] = line[1:-1].rstrip() else: s = line.split('=') block[s[0].lstrip().rstrip()] = s[1].lstrip().rstrip() blocks.append(block) return blocks def create_modules(blocks): net_info = blocks[0] # [net] contains the info of the entire network module_list = nn.ModuleList() prev_filters = 3 # initialized with first number of channels (3 - R, G, B) output_filters = [] for idx, layer in enumerate(blocks[1:]): module = nn.Sequential() # CONV layer if layer['type'] == 'convolutional': activation = layer['activation'] try: batchnorm = int(layer['batch_normalize']) bias = False except: batchnorm = 0 bias = True filters = int(layer['filters']) kernel_size = int(layer['size']) stride = int(layer['stride']) pad = int(layer['pad']) padding = None # pad & padding are different if pad == 0: padding = 0 else: padding = kernel_size // 2 conv = nn.Conv2d(prev_filters, filters, kernel_size, stride, padding, bias=bias) module.add_module('conv_{}'.format(idx), conv) if batchnorm: bn = nn.BatchNorm2d(filters) module.add_module('batch_norm_{}'.format(idx), bn) if activation == 'leaky': leaky = nn.LeakyReLU(0.1) # 0.1 according to YOLOv1 paper module.add_module('leaky_{}'.format(idx), leaky) # Upsample layer elif layer['type'] == 'upsample': stride = int(layer['stride']) upsample = nn.Upsample(scale_factor=2, mode='nearest') module.add_module('upsample_{}'.format(idx), upsample) # Concatenation layer elif layer['type'] == 'route': layer['layers'] = layer['layers'].split(',') start_layer = int(layer['layers'][0]) try: end_layer = int(layer['layers'][1]) except: end_layer = 0 route = EmptyLayer() module.add_module('route_{}'.format(idx), route) if end_layer == 0: filters = output_filters[start_layer + idx] else: filters = output_filters[start_layer + idx] + output_filters[end_layer] # Shortcut layer (skip connection) elif layer['type'] == 'shortcut': shortcut = EmptyLayer() module.add_module('shortcut_{}'.format(idx), shortcut) # YOLO layer elif layer["type"] == "yolo": mask = layer["mask"].split(",") mask = [int(x) for x in mask] anchors = layer["anchors"].split(",") anchors = [int(a) for a in anchors] anchors = [(anchors[i], anchors[i+1]) for i in range(0, len(anchors),2)] anchors = [anchors[i] for i in mask] detection = YoloLayer(anchors) module.add_module("Detection_{}".format(idx), detection) module_list.append(module) prev_filters = filters output_filters.append(filters) return (net_info, module_list) ## create network class Net(nn.Module): def __init__(self, filename): super(Net, self).__init__() self.blocks = parse_cfg(filename) self.net_info, self.module_list = create_modules(self.blocks) def __str__(self): return ('** Information about the network: ' + str(self.net_info) + '\n\n' + '** All layers of the network: \n' + str(self.module_list)) def forward(self, x): layers = self.blocks[1:] # except the 'net' module outputs = {} yolo_calc = 0 for idx, layer in enumerate(layers): if layer['type'] == 'convolutional' or layer['type'] == 'upsample': x = self.module_list[idx](x) elif layer['type'] == 'route': l = [int(x) for x in layer['layers']] if len(l) == 1: x = outputs[idx + l[0]] else: out1 = outputs[idx + l[0]] out2 = outputs[l[1]] x = torch.cat([out1, out2], dim=1) elif layer['type'] == 'shortcut': x = outputs[int(layer['from'])+idx] + outputs[idx-1] elif layer['type'] == 'yolo': anchors = self.module_list[idx][0].anchors inp_dims = (int(self.net_info['height']), int(self.net_info['width'])) num_classes = int(layer['classes']) # x has shape (batch_size, (4+1+80)*3, N, N) # in which, 4: bbox offsets, 1: objectness score, 80: classes, 3: num of boxes, N: box's dimension x = x.data # just need the data, seperate from autograd x = process_prediction(x, inp_dims, anchors, num_classes) if not yolo_calc: #if no collector has been intialised. detections = x yolo_calc = 1 else: detections = torch.cat((detections, x), 1) outputs[idx] = x return detections def load_weights(self, weightfile): fp = open(weightfile, "rb") track = 0 # track is the total number of params which have been already used #The first 5 values are header information # 1. Major version number # 2. Minor Version Number # 3. Subversion number # 4,5. Images seen by the network (during training) header = np.fromfile(fp, dtype = np.int32, count = 5) params = np.fromfile(fp, dtype = np.float32) fp.close() for i in range(len(self.module_list)): block = self.blocks[i+1] # ignore the first net info block if block['type'] == 'convolutional': try: batchnorm = int(block['batch_normalize']) except: batchnorm = 0 model = self.module_list[i] # CNN module contains: CNN, batchnorm, leaky ReLU (no weights -> ignore) conv = model[0] if batchnorm: bn = model[1] num_bn_params = bn.weight.numel() # get parameters, then reshape to the same shape as parameter tensors bn_bias = torch.from_numpy(params[track:track+num_bn_params]).view_as(bn.bias) track += num_bn_params bn_weights = torch.from_numpy(params[track:track+num_bn_params]).view_as(bn.weight) track += num_bn_params bn_running_mean = torch.from_numpy(params[track:track+num_bn_params]).view_as(bn.running_mean) track += num_bn_params bn_running_var = torch.from_numpy(params[track:track+num_bn_params]).view_as(bn.running_var) track += num_bn_params # copy values into parameter tensors bn.bias.data.copy_(bn_bias) bn.weight.data.copy_(bn_weights) bn.running_mean.detach().copy_(bn_running_mean) bn.running_var.data.copy_(bn_running_var) else: num_conv_bias = conv.bias.numel() conv_bias = torch.from_numpy(params[track:track+num_conv_bias]).view_as(conv.bias) track += num_conv_bias conv.bias.data.copy_(conv_bias) num_conv_weights = conv.weight.numel() conv_weights = torch.from_numpy(params[track:track+num_conv_weights]).view_as(conv.weight) track += num_conv_weights conv.weight.data.copy_(conv_weights) print('* Weights have been successfully loaded!\ \n- Number of model\'s params: %d\ \n- Number of cfg\'s params: %d' %(track, len(params))) def get_test_input(): img = cv2.imread("dog-cycle-car.png") img = cv2.resize(img, (416,416)) #Resize to the input dimension img_ = img[:,:,::-1].transpose((2,0,1)) # BGR -> RGB | H x W x C -> C x H x W img_ = img_[np.newaxis,:,:,:]/255.0 #Add a channel at 0 (for batch) | Normalise img_ = torch.from_numpy(img_).float() #Convert to float return img_ #model = None #model = Net("cfg/yolov3.cfg") ##model.load_weights('yolov3.weights') #inp = get_test_input() #pred = model(inp)
[ "torch.nn.Sequential", "numpy.fromfile", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.cat", "cv2.imread", "torch.nn.BatchNorm2d", "torch.nn.Upsample", "torch.nn.LeakyReLU", "cv2.resize", "torch.from_numpy" ]
[((1369, 1384), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1382, 1384), True, 'import torch.nn as nn\n'), ((9441, 9472), 'cv2.imread', 'cv2.imread', (['"""dog-cycle-car.png"""'], {}), "('dog-cycle-car.png')\n", (9451, 9472), False, 'import cv2\n'), ((9483, 9510), 'cv2.resize', 'cv2.resize', (['img', '(416, 416)'], {}), '(img, (416, 416))\n', (9493, 9510), False, 'import cv2\n'), ((1556, 1571), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (1569, 1571), True, 'import torch.nn as nn\n'), ((6887, 6927), 'numpy.fromfile', 'np.fromfile', (['fp'], {'dtype': 'np.int32', 'count': '(5)'}), '(fp, dtype=np.int32, count=5)\n', (6898, 6927), True, 'import numpy as np\n'), ((6949, 6982), 'numpy.fromfile', 'np.fromfile', (['fp'], {'dtype': 'np.float32'}), '(fp, dtype=np.float32)\n', (6960, 6982), True, 'import numpy as np\n'), ((2260, 2333), 'torch.nn.Conv2d', 'nn.Conv2d', (['prev_filters', 'filters', 'kernel_size', 'stride', 'padding'], {'bias': 'bias'}), '(prev_filters, filters, kernel_size, stride, padding, bias=bias)\n', (2269, 2333), True, 'import torch.nn as nn\n'), ((9736, 9758), 'torch.from_numpy', 'torch.from_numpy', (['img_'], {}), '(img_)\n', (9752, 9758), False, 'import torch\n'), ((2440, 2463), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['filters'], {}), '(filters)\n', (2454, 2463), True, 'import torch.nn as nn\n'), ((2594, 2611), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (2606, 2611), True, 'import torch.nn as nn\n'), ((2858, 2901), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (2869, 2901), True, 'import torch.nn as nn\n'), ((5464, 5494), 'torch.cat', 'torch.cat', (['[out1, out2]'], {'dim': '(1)'}), '([out1, out2], dim=1)\n', (5473, 5494), False, 'import torch\n'), ((9034, 9090), 'torch.from_numpy', 'torch.from_numpy', (['params[track:track + num_conv_weights]'], {}), '(params[track:track + num_conv_weights])\n', (9050, 9090), False, 'import torch\n'), ((7754, 7807), 'torch.from_numpy', 'torch.from_numpy', (['params[track:track + num_bn_params]'], {}), '(params[track:track + num_bn_params])\n', (7770, 7807), False, 'import torch\n'), ((7899, 7952), 'torch.from_numpy', 'torch.from_numpy', (['params[track:track + num_bn_params]'], {}), '(params[track:track + num_bn_params])\n', (7915, 7952), False, 'import torch\n'), ((8051, 8104), 'torch.from_numpy', 'torch.from_numpy', (['params[track:track + num_bn_params]'], {}), '(params[track:track + num_bn_params])\n', (8067, 8104), False, 'import torch\n'), ((8208, 8261), 'torch.from_numpy', 'torch.from_numpy', (['params[track:track + num_bn_params]'], {}), '(params[track:track + num_bn_params])\n', (8224, 8261), False, 'import torch\n'), ((8744, 8797), 'torch.from_numpy', 'torch.from_numpy', (['params[track:track + num_conv_bias]'], {}), '(params[track:track + num_conv_bias])\n', (8760, 8797), False, 'import torch\n'), ((6407, 6436), 'torch.cat', 'torch.cat', (['(detections, x)', '(1)'], {}), '((detections, x), 1)\n', (6416, 6436), False, 'import torch\n')]
from typing import Tuple from vyper.exceptions import TypeMismatch from vyper.old_codegen.abi import ( ABI_Tuple, abi_encode, abi_type_of, abi_type_of2, lll_tuple_from_args, ) from vyper.old_codegen.context import Context from vyper.old_codegen.keccak256_helper import keccak256_helper from vyper.old_codegen.lll_node import LLLnode from vyper.old_codegen.parser_utils import getpos, unwrap_location from vyper.old_codegen.types.types import ( BaseType, ByteArrayLike, get_type_for_exact_size, ) from vyper.semantics.types import Event # docs.soliditylang.org/en/v0.8.6/abi-spec.html#indexed-event-encoding def _encode_log_topics(expr, event_id, arg_nodes, context): topics = [event_id] for arg in arg_nodes: if isinstance(arg.typ, BaseType): value = unwrap_location(arg) elif isinstance(arg.typ, ByteArrayLike): value = keccak256_helper(expr, [arg], kwargs=None, context=context) else: # TODO block at higher level raise TypeMismatch("Event indexes may only be value types", expr) topics.append(value) return topics def _gas_bound(num_topics, data_maxlen): LOG_BASE_GAS = 375 GAS_PER_TOPIC = 375 GAS_PER_LOG_BYTE = 8 return LOG_BASE_GAS + GAS_PER_TOPIC * num_topics + GAS_PER_LOG_BYTE * data_maxlen def allocate_buffer_for_log(event: Event, context: Context) -> Tuple[int, int]: """Allocate a buffer to ABI-encode the non-indexed (data) arguments into This must be done BEFORE compiling the event arguments to LLL, registering the buffer with the `context` variable (otherwise any function calls inside the event literal will clobber the buffer). """ arg_types = list(event.arguments.values()) # the types of the arguments # remove non-data args, as those don't go into the buffer arg_types = [arg_t for arg_t, is_index in zip(arg_types, event.indexed) if not is_index] # all args get encoded as one big tuple abi_t = ABI_Tuple([abi_type_of2(arg_t) for arg_t in arg_types]) # make a buffer for the encoded data output buf_maxlen = abi_t.size_bound() t = get_type_for_exact_size(buf_maxlen) return context.new_internal_variable(t), buf_maxlen # docs.soliditylang.org/en/v0.8.6/abi-spec.html#events def lll_node_for_log(expr, buf, _maxlen, event, topic_nodes, data_nodes, context): """Taking LLL nodes as arguments, create the LLL node for a Log statement. Arguments: expr: The original Log expression buf: A pre-allocated buffer for the output _maxlen: The length of the buffer, for sanity checking event: The Event type topic_nodes: list of LLLnodes which calculate the event topics data_nodes: list of LLLnodes which calculate the event data context: current memory/frame context """ _pos = getpos(expr) topics = _encode_log_topics(expr, event.event_id, topic_nodes, context) data = lll_tuple_from_args(data_nodes) # sanity check, abi size_bound is the same calculated both ways assert abi_type_of(data.typ).size_bound() == _maxlen, "bad buffer size" # encode_data is an LLLnode which, cleverly, both encodes the data # and returns the length of the encoded data as a stack item. encode_data = abi_encode(buf, data, pos=_pos, returns_len=True) assert len(topics) <= 4, "too many topics" # sanity check log_opcode = "log" + str(len(topics)) return LLLnode.from_list( [log_opcode, buf, encode_data] + topics, add_gas_estimate=_gas_bound(len(topics), _maxlen), typ=None, pos=_pos, annotation=f"LOG event {event.signature}", )
[ "vyper.old_codegen.keccak256_helper.keccak256_helper", "vyper.old_codegen.abi.lll_tuple_from_args", "vyper.old_codegen.abi.abi_type_of2", "vyper.old_codegen.types.types.get_type_for_exact_size", "vyper.old_codegen.abi.abi_encode", "vyper.exceptions.TypeMismatch", "vyper.old_codegen.abi.abi_type_of", "vyper.old_codegen.parser_utils.unwrap_location", "vyper.old_codegen.parser_utils.getpos" ]
[((2162, 2197), 'vyper.old_codegen.types.types.get_type_for_exact_size', 'get_type_for_exact_size', (['buf_maxlen'], {}), '(buf_maxlen)\n', (2185, 2197), False, 'from vyper.old_codegen.types.types import BaseType, ByteArrayLike, get_type_for_exact_size\n'), ((2865, 2877), 'vyper.old_codegen.parser_utils.getpos', 'getpos', (['expr'], {}), '(expr)\n', (2871, 2877), False, 'from vyper.old_codegen.parser_utils import getpos, unwrap_location\n'), ((2967, 2998), 'vyper.old_codegen.abi.lll_tuple_from_args', 'lll_tuple_from_args', (['data_nodes'], {}), '(data_nodes)\n', (2986, 2998), False, 'from vyper.old_codegen.abi import ABI_Tuple, abi_encode, abi_type_of, abi_type_of2, lll_tuple_from_args\n'), ((3300, 3349), 'vyper.old_codegen.abi.abi_encode', 'abi_encode', (['buf', 'data'], {'pos': '_pos', 'returns_len': '(True)'}), '(buf, data, pos=_pos, returns_len=True)\n', (3310, 3349), False, 'from vyper.old_codegen.abi import ABI_Tuple, abi_encode, abi_type_of, abi_type_of2, lll_tuple_from_args\n'), ((815, 835), 'vyper.old_codegen.parser_utils.unwrap_location', 'unwrap_location', (['arg'], {}), '(arg)\n', (830, 835), False, 'from vyper.old_codegen.parser_utils import getpos, unwrap_location\n'), ((2024, 2043), 'vyper.old_codegen.abi.abi_type_of2', 'abi_type_of2', (['arg_t'], {}), '(arg_t)\n', (2036, 2043), False, 'from vyper.old_codegen.abi import ABI_Tuple, abi_encode, abi_type_of, abi_type_of2, lll_tuple_from_args\n'), ((906, 965), 'vyper.old_codegen.keccak256_helper.keccak256_helper', 'keccak256_helper', (['expr', '[arg]'], {'kwargs': 'None', 'context': 'context'}), '(expr, [arg], kwargs=None, context=context)\n', (922, 965), False, 'from vyper.old_codegen.keccak256_helper import keccak256_helper\n'), ((1039, 1098), 'vyper.exceptions.TypeMismatch', 'TypeMismatch', (['"""Event indexes may only be value types"""', 'expr'], {}), "('Event indexes may only be value types', expr)\n", (1051, 1098), False, 'from vyper.exceptions import TypeMismatch\n'), ((3079, 3100), 'vyper.old_codegen.abi.abi_type_of', 'abi_type_of', (['data.typ'], {}), '(data.typ)\n', (3090, 3100), False, 'from vyper.old_codegen.abi import ABI_Tuple, abi_encode, abi_type_of, abi_type_of2, lll_tuple_from_args\n')]
import json import pytest import re import yaml # Functions # ============================================================================== # Common checks def common_tests(data, result): # General assert assert result.exit_code == 0 assert result.exception is None # Structure assert assert_root_directory(data, result) assert_directories(result) assert_directories_with_main_file(result) assert_testing_files(result) assert_license_file(result) assert_readme_file(data, result) assert_meta_yaml_file(data, result) assert_github_actions_yaml_file(data, result) # Check root project def assert_root_directory(data, result): assert result.project.basename == 'ansible-role-{}'.format( data.get('ansible_role_name')) assert result.project.isdir() # Check directories def assert_directories(result): # Root directories project_directories = ['defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'tests', 'vars', 'vars/os_distribution', 'vars/os_family', 'molecule', 'molecule/default'] # Check project directories for directory in project_directories: assert result.project.join(directory).isdir() # Check directories with main.yml file def assert_directories_with_main_file(result): # Root directories contains main.yml file directories_with_main_file = ['defaults', 'handlers', 'meta', 'tasks', 'vars'] # Check project directories with main.yml file for directory in directories_with_main_file: assert result.project.join(directory).isdir() assert result.project.join(directory + '/main.yml').isfile() # Check test files def assert_testing_files(result): # All files about tests test_files = [ 'molecule/default/Dockerfile.j2', 'molecule/default/converge.yml', 'molecule/default/molecule.yml', 'molecule/default/tests/test_installation.py', 'tests/test_filter_plugins.py', '.github/workflows/ci.yml'] # Check project directories with main.yml file for test_file in test_files: assert result.project.join(test_file).isfile() # Check tasks/main.yml file def assert_tasks_main_file(result): task_file = result.project.join('tasks/main.yml') task_lines = task_file.readlines(cr=False) assert task_file.isfile() assert "- 'role::foobar::init'" in task_lines assert ( 'include: "{{ role_path }}/tasks/manage_variables.yml"') in task_lines # Check tasks/manage_variables.yml file def assert_tasks_manage_vars_file(result): task_file = result.project.join('tasks/manage_variables.yml') task_lines = task_file.readlines(cr=False) assert task_file.isfile() assert "register: 'foobar_check_os_release_vars'" in task_lines assert ( 'path: "{{ role_path }}/vars/os_family/' '{{ ansible_os_family | lower }}.yml"') in task_lines # Check license file def assert_license_file(result): license_file = result.project.join('LICENSE') license_lines = license_file.readlines(cr=False) assert license_file.isfile() assert 'The MIT License (MIT)' in license_lines # Check README file def assert_readme_file(data, result): readme_file = result.project.join('README.md') readme_lines = readme_file.readlines(cr=False) # Regex used to check galaxy role name RE = re.compile(r'^\s*-\s*\{\s*role\s*:\s*%s\.%s\s*\}\s*$' % ( data.get('author_github_username'), data.get('ansible_role_name'))) assert readme_file.isfile() assert 'Install %s package.' % data.get('ansible_role_name') \ in readme_lines assert len([ True for line in readme_lines if RE.match(line)]) == 1 # Check meta/main.yml file def assert_meta_yaml_file(data, result): meta_file = result.project.join('meta/main.yml') assert meta_file.isfile() # Test if yaml file is valid with open(str(meta_file.realpath()), 'r') as content: assert yaml.load(content, Loader=yaml.SafeLoader) # Check .github/workflows/ci.yml file def assert_github_actions_yaml_file(data, result): action_file = result.project.join('.github/workflows/ci.yml') assert action_file.isfile() # Test if yaml file is valid with open(str(action_file.realpath()), 'r') as content: assert yaml.load(content, Loader=yaml.SafeLoader) # Tests # ============================================================================== # Template test @pytest.mark.parametrize('data_filename, role_name', [ ('./cookiecutter.json', 'role_name'), ('./tests/test_01.json', 'test_01'), ('./tests/test_02.json', 'test_02'), ('./tests/test_03.json', 'test_03'), ('./tests/test_04.json', 'test_04') ]) def test_json_values(cookies, data_filename, role_name): # Load data file with open(data_filename) as data_file: data = json.load(data_file) # Create project result = cookies.bake(extra_context=data) # Common tests assert data.get('ansible_role_name') == role_name common_tests(data, result)
[ "pytest.mark.parametrize", "yaml.load", "json.load" ]
[((4593, 4845), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_filename, role_name"""', "[('./cookiecutter.json', 'role_name'), ('./tests/test_01.json', 'test_01'),\n ('./tests/test_02.json', 'test_02'), ('./tests/test_03.json', 'test_03'\n ), ('./tests/test_04.json', 'test_04')]"], {}), "('data_filename, role_name', [('./cookiecutter.json',\n 'role_name'), ('./tests/test_01.json', 'test_01'), (\n './tests/test_02.json', 'test_02'), ('./tests/test_03.json', 'test_03'),\n ('./tests/test_04.json', 'test_04')])\n", (4616, 4845), False, 'import pytest\n'), ((4099, 4141), 'yaml.load', 'yaml.load', (['content'], {'Loader': 'yaml.SafeLoader'}), '(content, Loader=yaml.SafeLoader)\n', (4108, 4141), False, 'import yaml\n'), ((4441, 4483), 'yaml.load', 'yaml.load', (['content'], {'Loader': 'yaml.SafeLoader'}), '(content, Loader=yaml.SafeLoader)\n', (4450, 4483), False, 'import yaml\n'), ((4992, 5012), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (5001, 5012), False, 'import json\n')]
import signal import time from zeroos.orchestrator.sal import templates from js9 import j class InfluxDB(): def __init__(self, container, ip, port, rpcport): self.container = container self.ip = ip self.port = port # Only client-server port is forwarded self.rpcport = rpcport self._client = None @property def client(self): if not self._client: self._client = j.clients.influxdb.get(self.ip, port=self.port) return self._client def apply_config(self): influx_conf = templates.render('influxdb.conf', ip=self.ip, port=self.port, rpcport=self.rpcport) self.container.upload_content('/etc/influxdb/influxdb.conf', influx_conf) def is_running(self): for process in self.container.client.process.list(): if 'influxd' in process['cmdline']: try: self.list_databases() except: return False, process['pid'] else: return True, process['pid'] return False, None def stop(self, timeout=30): is_running, pid = self.is_running() if not is_running: return self.container.client.process.kill(pid, signal.SIGTERM) start = time.time() end = start + timeout is_running, _ = self.is_running() while is_running and time.time() < end: time.sleep(1) is_running, _ = self.is_running() if is_running: raise RuntimeError('Failed to stop influxd.') if self.container.node.client.nft.rule_exists(self.port): self.container.node.client.nft.drop_port(self.port) def start(self, timeout=30): is_running, _ = self.is_running() if is_running: return self.apply_config() if not self.container.node.client.nft.rule_exists(self.port): self.container.node.client.nft.open_port(self.port) self.container.client.system('influxd') time.sleep(1) start = time.time() end = start + timeout is_running, _ = self.is_running() while not is_running and time.time() < end: time.sleep(1) is_running, _ = self.is_running() if not is_running: if self.container.node.client.nft.rule_exists(self.port): self.container.node.client.nft.drop_port(self.port) raise RuntimeError('Failed to start influxd.') def list_databases(self): return self.client.get_list_database() def create_databases(self, databases): for database in databases: self.client.create_database(database) def drop_databases(self, databases): for database in databases: self.client.drop_database(database)
[ "js9.j.clients.influxdb.get", "time.sleep", "zeroos.orchestrator.sal.templates.render", "time.time" ]
[((576, 664), 'zeroos.orchestrator.sal.templates.render', 'templates.render', (['"""influxdb.conf"""'], {'ip': 'self.ip', 'port': 'self.port', 'rpcport': 'self.rpcport'}), "('influxdb.conf', ip=self.ip, port=self.port, rpcport=self.\n rpcport)\n", (592, 664), False, 'from zeroos.orchestrator.sal import templates\n'), ((1354, 1365), 'time.time', 'time.time', ([], {}), '()\n', (1363, 1365), False, 'import time\n'), ((2110, 2123), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2120, 2123), False, 'import time\n'), ((2141, 2152), 'time.time', 'time.time', ([], {}), '()\n', (2150, 2152), False, 'import time\n'), ((449, 496), 'js9.j.clients.influxdb.get', 'j.clients.influxdb.get', (['self.ip'], {'port': 'self.port'}), '(self.ip, port=self.port)\n', (471, 496), False, 'from js9 import j\n'), ((1498, 1511), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1508, 1511), False, 'import time\n'), ((2289, 2302), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2299, 2302), False, 'import time\n'), ((1467, 1478), 'time.time', 'time.time', ([], {}), '()\n', (1476, 1478), False, 'import time\n'), ((2258, 2269), 'time.time', 'time.time', ([], {}), '()\n', (2267, 2269), False, 'import time\n')]
# Youtube Trending Feed Reader # Written by XZANATOL from optparse import OptionParser from pymongo import MongoClient import pandas as pd import sys # Help menu usage = """ <Script> [Options] [Options] -h, --help Shows this help message and exit -c, --csv Reads data from "Youtube.csv" file -m, --mongo Reads data from MongoDB """ # Load args parser = OptionParser() parser.add_option("-c", "--csv", action="store_true", dest="csv", help="Saves extracted contents to a CSV file.") parser.add_option("-m", "--mongo", action="store_true", dest="mongo", help="Saves extracted contents to a MongoDB.") def read_mongo(): # Connect to service client = MongoClient("127.0.0.1") # Create an object db = client.Youtube.trending return db.find() # Return all values def read_csv(): # read databse df = pd.read_csv("Youtube.csv") data = [] for index, row in df.iterrows(): data.append(row) # Append each dictionary to the list return data # Return all values def display(data): i = 0 for card in data: # For every 10 cards print section if i % 10 == 0: c = input("Show Section? [y/n] > ") if c.lower() == "y": print("***********************************") print(f"""{card["section"]} section""") print("***********************************") else: sys.exit() # If had enough of reading i += 1 # Increament print("Title:", card["title"]) print("Link:", card["link"]) print("Channel:", card["channel"]) print("Views:", card["views"]) print("Time:", card["date"]) print("==============================================") if __name__ == "__main__": (options, args) = parser.parse_args() # Flags csv = options.csv mongo = options.mongo # Validate flags if not (bool(csv) ^ bool(mongo)): # XNOR Gate print(usage) sys.exit() if mongo: data = read_mongo() else: data = read_csv() display(data)
[ "pymongo.MongoClient", "sys.exit", "pandas.read_csv", "optparse.OptionParser" ]
[((377, 391), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (389, 391), False, 'from optparse import OptionParser\n'), ((717, 741), 'pymongo.MongoClient', 'MongoClient', (['"""127.0.0.1"""'], {}), "('127.0.0.1')\n", (728, 741), False, 'from pymongo import MongoClient\n'), ((886, 912), 'pandas.read_csv', 'pd.read_csv', (['"""Youtube.csv"""'], {}), "('Youtube.csv')\n", (897, 912), True, 'import pandas as pd\n'), ((2038, 2048), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2046, 2048), False, 'import sys\n'), ((1477, 1487), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1485, 1487), False, 'import sys\n')]
__copyright__ = "Copyright 2013-2016, http://radical.rutgers.edu" __license__ = "MIT" import copy import time import radical.utils as ru from . import states as rps from . import constants as rpc # ------------------------------------------------------------------------------ # class ComputePilot(object): ''' A ComputePilot represent a resource overlay on a local or remote resource. .. note:: A ComputePilot cannot be created directly. The factory method :meth:`radical.pilot.PilotManager.submit_pilots` has to be used instead. **Example**:: pm = radical.pilot.PilotManager(session=s) pd = radical.pilot.ComputePilotDescription() pd.resource = "local.localhost" pd.cores = 2 pd.runtime = 5 # minutes pilot = pm.submit_pilots(pd) ''' # -------------------------------------------------------------------------- # In terms of implementation, a Pilot is not much more than a dict whose # content are dynamically updated to reflect the state progression through # the PMGR components. As a Pilot is always created via a PMGR, it is # considered to *belong* to that PMGR, and all activities are actually # implemented by that PMGR. # # Note that this implies that we could create Pilots before submitting them # to a PMGR, w/o any problems. (FIXME?) # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # def __init__(self, pmgr, descr): # 'static' members self._descr = descr.as_dict() # sanity checks on description for check in ['resource', 'cores', 'runtime']: if not self._descr.get(check): raise ValueError("ComputePilotDescription needs '%s'" % check) # initialize state self._pmgr = pmgr self._session = self._pmgr.session self._prof = self._session._prof self._uid = ru.generate_id('pilot.%(item_counter)04d', ru.ID_CUSTOM, ns=self._session.uid) self._state = rps.NEW self._log = pmgr._log self._pilot_dict = dict() self._callbacks = dict() self._cache = dict() # cache of SAGA dir handles self._cb_lock = ru.RLock() # pilot failures can trigger app termination self._exit_on_error = self._descr.get('exit_on_error') for m in rpc.PMGR_METRICS: self._callbacks[m] = dict() # we always invoke the default state cb self._callbacks[rpc.PILOT_STATE][self._default_state_cb.__name__] = { 'cb' : self._default_state_cb, 'cb_data' : None} # `as_dict()` needs `pilot_dict` and other attributes. Those should all # be available at this point (apart from the sandboxes), so we now # query for those sandboxes. self._pilot_jsurl = ru.Url() self._pilot_jshop = ru.Url() self._resource_sandbox = ru.Url() self._session_sandbox = ru.Url() self._pilot_sandbox = ru.Url() self._client_sandbox = ru.Url() pilot = self.as_dict() self._pilot_jsurl, self._pilot_jshop \ = self._session._get_jsurl (pilot) self._resource_sandbox = self._session._get_resource_sandbox(pilot) self._session_sandbox = self._session._get_session_sandbox (pilot) self._pilot_sandbox = self._session._get_pilot_sandbox (pilot) self._client_sandbox = self._session._get_client_sandbox() # we need to expand plaaceholders in the sandboxes # FIXME: this code is a duplication from the pilot launcher code expand = dict() for k,v in pilot['description'].items(): if v is None: v = '' expand['pd.%s' % k] = v if isinstance(v, str): expand['pd.%s' % k.upper()] = v.upper() expand['pd.%s' % k.lower()] = v.lower() else: expand['pd.%s' % k.upper()] = v expand['pd.%s' % k.lower()] = v self._resource_sandbox.path = self._resource_sandbox.path % expand self._session_sandbox .path = self._session_sandbox .path % expand self._pilot_sandbox .path = self._pilot_sandbox .path % expand # -------------------------------------------------------------------------- # def __repr__(self): return str(self) # -------------------------------------------------------------------------- # def __str__(self): return str([self.uid, self.resource, self.state]) # -------------------------------------------------------------------------- # def _default_state_cb(self, pilot, state=None): uid = self.uid state = self.state self._log.info("[Callback]: pilot %s state: %s.", uid, state) if state == rps.FAILED and self._exit_on_error: self._log.error("[Callback]: pilot '%s' failed (exit)", uid) # There are different ways to tell main... ru.cancel_main_thread('int') # raise RuntimeError('pilot %s failed - fatal!' % self.uid) # os.kill(os.getpid()) # sys.exit() # -------------------------------------------------------------------------- # def _update(self, pilot_dict): ''' This will update the facade object after state changes etc, and is invoked by whatever component receiving that updated information. Return True if state changed, False otherwise ''' self._log.debug('update %s', pilot_dict['uid']) if pilot_dict['uid'] != self.uid: self._log.error('invalid uid: %s / %s', pilot_dict['uid'], self.uid) assert(pilot_dict['uid'] == self.uid), 'update called on wrong instance' # NOTE: this method relies on state updates to arrive in order and # without gaps. current = self.state target = pilot_dict['state'] if target not in [rps.FAILED, rps.CANCELED]: # ensure valid state transition state_diff = rps._pilot_state_value(target) - \ rps._pilot_state_value(current) if state_diff != 1: raise RuntimeError('%s: invalid state transition %s -> %s', self.uid, current, target) self._state = target # keep all information around self._pilot_dict = copy.deepcopy(pilot_dict) # invoke pilot specific callbacks # FIXME: this iteration needs to be thread-locked! for _,cb_val in self._callbacks[rpc.PILOT_STATE].items(): cb = cb_val['cb'] cb_data = cb_val['cb_data'] self._log.debug('call %s', cb) self._log.debug('%s calls cb %s', self.uid, cb) if cb_data: cb([self], cb_data) else : cb([self]) # ask pmgr to invoke any global callbacks self._pmgr._call_pilot_callbacks(self) # -------------------------------------------------------------------------- # def as_dict(self): ''' Returns a Python dictionary representation of the object. ''' ret = {'session': self.session.uid, 'pmgr': self.pmgr.uid, 'uid': self.uid, 'type': 'pilot', 'state': self.state, 'log': self.log, 'stdout': self.stdout, 'stderr': self.stderr, 'resource': self.resource, 'resource_sandbox': str(self._resource_sandbox), 'session_sandbox': str(self._session_sandbox), 'pilot_sandbox': str(self._pilot_sandbox), 'client_sandbox': str(self._client_sandbox), 'js_url': str(self._pilot_jsurl), 'js_hop': str(self._pilot_jshop), 'description': self.description, # this is a deep copy 'resource_details': self.resource_details } return ret # -------------------------------------------------------------------------- # @property def session(self): ''' Returns the pilot's session. **Returns:** * A :class:`Session`. ''' return self._session # -------------------------------------------------------------------------- # @property def pmgr(self): ''' Returns the pilot's manager. **Returns:** * A :class:`PilotManager`. ''' return self._pmgr # ------------------------------------------------------------------------- # @property def resource_details(self): ''' Returns agent level resource information ''' return self._pilot_dict.get('resource_details') # -------------------------------------------------------------------------- # @property def uid(self): ''' Returns the pilot's unique identifier. The uid identifies the pilot within a :class:`PilotManager`. **Returns:** * A unique identifier (string). ''' return self._uid # -------------------------------------------------------------------------- # @property def state(self): ''' Returns the current state of the pilot. **Returns:** * state (string enum) ''' return self._state # -------------------------------------------------------------------------- # @property def log(self): ''' Returns a list of human readable [timestamp, string] tuples describing various events during the pilot's lifetime. Those strings are not normative, only informative! **Returns:** * log (list of [timestamp, string] tuples) ''' return self._pilot_dict.get('log') # -------------------------------------------------------------------------- # @property def stdout(self): ''' Returns a snapshot of the pilot's STDOUT stream. If this property is queried before the pilot has reached 'DONE' or 'FAILED' state it will return None. .. warning: This can be inefficient. Output may be incomplete and/or filtered. **Returns:** * stdout (string) ''' return self._pilot_dict.get('stdout') # -------------------------------------------------------------------------- # @property def stderr(self): ''' Returns a snapshot of the pilot's STDERR stream. If this property is queried before the pilot has reached 'DONE' or 'FAILED' state it will return None. .. warning: This can be inefficient. Output may be incomplete and/or filtered. **Returns:** * stderr (string) ''' return self._pilot_dict.get('stderr') # -------------------------------------------------------------------------- # @property def resource(self): ''' Returns the resource tag of this pilot. **Returns:** * A resource tag (string) ''' return self._descr.get('resource') # -------------------------------------------------------------------------- # @property def pilot_sandbox(self): ''' Returns the full sandbox URL of this pilot, if that is already known, or 'None' otherwise. **Returns:** * A string ''' # NOTE: The pilot has a sandbox property, containing the full sandbox # path, which is used by the pmgr to stage data back and forth. # However, the full path as visible from the pmgr side might not # be what the agent is seeing, specifically in the case of # non-shared filesystems (OSG). The agent thus uses # `$PWD` as sandbox, with the assumption that this will # get mapped to whatever is here returned as sandbox URL. # # There is thus implicit knowledge shared between the RP client # and the RP agent that `$PWD` *is* the sandbox! The same # implicitly also holds for the staging area, which is relative # to the pilot sandbox. if self._pilot_sandbox: return str(self._pilot_sandbox) @property def resource_sandbox(self): return self._resource_sandbox @property def session_sandbox(self): return self._session_sandbox @property def client_sandbox(self): return self._client_sandbox # -------------------------------------------------------------------------- # @property def description(self): ''' Returns the description the pilot was started with, as a dictionary. **Returns:** * description (dict) ''' return copy.deepcopy(self._descr) # -------------------------------------------------------------------------- # def register_callback(self, cb, metric=rpc.PILOT_STATE, cb_data=None): ''' Registers a callback function that is triggered every time the pilot's state changes. All callback functions need to have the same signature:: def cb(obj, state) where ``object`` is a handle to the object that triggered the callback and ``state`` is the new state of that object. If 'cb_data' is given, then the 'cb' signature changes to def cb(obj, state, cb_data) and 'cb_data' are passed along. ''' if metric not in rpc.PMGR_METRICS : raise ValueError ("invalid pmgr metric '%s'" % metric) with self._cb_lock: cb_name = cb.__name__ self._callbacks[metric][cb_name] = {'cb' : cb, 'cb_data' : cb_data} # -------------------------------------------------------------------------- # def unregister_callback(self, cb, metric=rpc.PILOT_STATE): if metric and metric not in rpc.UMGR_METRICS : raise ValueError ("invalid pmgr metric '%s'" % metric) if not metric : metrics = rpc.PMGR_METRICS elif not isinstance(metric, list): metrics = [metric] else : metrics = metric with self._cb_lock: for metric in metrics: if cb: to_delete = [cb.__name__] else : to_delete = list(self._callbacks[metric].keys()) for cb_name in to_delete: if cb_name not in self._callbacks[metric]: raise ValueError("unknown callback '%s'" % cb_name) del(self._callbacks[metric][cb_name]) # -------------------------------------------------------------------------- # def wait(self, state=None, timeout=None): ''' Returns when the pilot reaches a specific state or when an optional timeout is reached. **Arguments:** * **state** [`list of strings`] The state(s) that pilot has to reach in order for the call to return. By default `wait` waits for the pilot to reach a **final** state, which can be one of the following: * :data:`radical.pilot.states.DONE` * :data:`radical.pilot.states.FAILED` * :data:`radical.pilot.states.CANCELED` * **timeout** [`float`] Optional timeout in seconds before the call returns regardless whether the pilot has reached the desired state or not. The default value **None** never times out. ''' if not state : states = rps.FINAL elif not isinstance(state, list): states = [state] else : states = state if self.state in rps.FINAL: # we will never see another state progression. Raise an error # (unless we waited for this) if self.state in states: return # FIXME: do we want a raise here, really? This introduces a race, # really, on application level # raise RuntimeError("can't wait on a pilot in final state") return self.state start_wait = time.time() while self.state not in states: time.sleep(0.1) if timeout and (timeout <= (time.time() - start_wait)): break if self._pmgr._terminate.is_set(): break return self.state # -------------------------------------------------------------------------- # def cancel(self): ''' Cancel the pilot. ''' # clean connection cache try: for key in self._cache: self._cache[key].close() self._cache = dict() except: pass self._pmgr.cancel_pilots(self.uid) # -------------------------------------------------------------------------- # def stage_in(self, directives): ''' Stages the content of the staging directive into the pilot's staging area ''' # This staging request is actually served by the pmgr *launching* # component, because that already has a channel open to the target # resource which we can reuse. We might eventually implement or # interface to a dedicated data movement service though. # send the staging request to the pmg launcher self._pmgr._pilot_staging_input(self.as_dict(), directives) # -------------------------------------------------------------------------- # def stage_out(self): ''' fetch `staging_output.tgz` from the pilot sandbox, and store in $PWD ''' try: psbox = self._session.get_fs_dir(self._pilot_sandbox) psbox.copy('staging_output.tgz', self._client_sandbox) except Exception: self._log.exception('output staging failed') raise # ------------------------------------------------------------------------------
[ "copy.deepcopy", "radical.utils.cancel_main_thread", "radical.utils.generate_id", "time.time", "radical.utils.Url", "time.sleep", "radical.utils.RLock" ]
[((2113, 2191), 'radical.utils.generate_id', 'ru.generate_id', (['"""pilot.%(item_counter)04d"""', 'ru.ID_CUSTOM'], {'ns': 'self._session.uid'}), "('pilot.%(item_counter)04d', ru.ID_CUSTOM, ns=self._session.uid)\n", (2127, 2191), True, 'import radical.utils as ru\n'), ((2510, 2520), 'radical.utils.RLock', 'ru.RLock', ([], {}), '()\n', (2518, 2520), True, 'import radical.utils as ru\n'), ((3154, 3162), 'radical.utils.Url', 'ru.Url', ([], {}), '()\n', (3160, 3162), True, 'import radical.utils as ru\n'), ((3196, 3204), 'radical.utils.Url', 'ru.Url', ([], {}), '()\n', (3202, 3204), True, 'import radical.utils as ru\n'), ((3238, 3246), 'radical.utils.Url', 'ru.Url', ([], {}), '()\n', (3244, 3246), True, 'import radical.utils as ru\n'), ((3280, 3288), 'radical.utils.Url', 'ru.Url', ([], {}), '()\n', (3286, 3288), True, 'import radical.utils as ru\n'), ((3322, 3330), 'radical.utils.Url', 'ru.Url', ([], {}), '()\n', (3328, 3330), True, 'import radical.utils as ru\n'), ((3364, 3372), 'radical.utils.Url', 'ru.Url', ([], {}), '()\n', (3370, 3372), True, 'import radical.utils as ru\n'), ((6806, 6831), 'copy.deepcopy', 'copy.deepcopy', (['pilot_dict'], {}), '(pilot_dict)\n', (6819, 6831), False, 'import copy\n'), ((13516, 13542), 'copy.deepcopy', 'copy.deepcopy', (['self._descr'], {}), '(self._descr)\n', (13529, 13542), False, 'import copy\n'), ((17022, 17033), 'time.time', 'time.time', ([], {}), '()\n', (17031, 17033), False, 'import time\n'), ((5381, 5409), 'radical.utils.cancel_main_thread', 'ru.cancel_main_thread', (['"""int"""'], {}), "('int')\n", (5402, 5409), True, 'import radical.utils as ru\n'), ((17087, 17102), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (17097, 17102), False, 'import time\n'), ((17143, 17154), 'time.time', 'time.time', ([], {}), '()\n', (17152, 17154), False, 'import time\n')]
# Oracle OCI - Instance report script # Version: 2.0 19-February 2020 # Written by: <EMAIL> # # This script will create a CSV report for all compute and DB instances (including ADW and ATP) # in your OCI account, including predefined tags # # Instructions: # - you need to specify two variables and that is Tenancy and User and change the value according to your Tenancy. # - this script uses Cloud Shell Delegation Token, so you don't need any private key or fingerprint. import oci import json import shapes import logging # Script configuation ################################################################################### Tenancy = "" User = "" AllPredefinedTags = True # use only predefined tags from root compartment or include all compartment tags as well NoValueString = "n/a" # what value should be used when no data is available FieldSeperator = "," # what value should be used as field seperator ReportFile = "instances.csv" EndLine = "\n" # ####################################################################################################### def DisplayInstances(instances, compartmentName, instancetype, regionname): for instance in instances: privateips = "" publicips = "" instancetypename = "" tagtxt = "" OS = "" LicenseIncluded = "" # Handle details for Compute Instances if instancetype=="Compute": OCPU, MEM, SSD = shapes.ComputeShape(instance.shape) response = ComputeClient.list_vnic_attachments(compartment_id = instance.compartment_id, instance_id = instance.id) vnics = response.data try: for vnic in vnics: responsenic = NetworkClient.get_vnic(vnic_id=vnic.vnic_id) nicinfo = responsenic.data privateips = privateips + nicinfo.private_ip + " " publicips = publicips + nicinfo.public_ip + " " except: privateips = NoValueString publicips = NoValueString instancetypename = "Compute" version = NoValueString namespaces = instance.defined_tags shape = instance.shape # Get OS Details try: response = ComputeClient.get_image(instance.source_details.image_id) imagedetails = response.data OS = imagedetails.display_name except: OS = NoValueString prefix,AD = instance.availability_domain.split(":") LicenseIncluded = "BYOL" # Handle details for Database Instances if instancetype=="DB": OCPU, MEM, SSD = shapes.ComputeShape(instance.shape) OCPU = instance.cpu_core_count response = databaseClient.list_db_nodes(compartment_id = instance.compartment_id, db_system_id = instance.id) dbnodes = response.data try: for dbnode in dbnodes: responsenic = NetworkClient.get_vnic(vnic_id=dbnode.vnic_id) nicinfo = responsenic.data privateips = privateips + nicinfo.private_ip + " " publicips = publicips + nicinfo.public_ip + " " except: privateips = NoValueString publicips = NoValueString if instance.license_model == "LICENSE_INCLUDED": LicenseIncluded = "YES" else: LicenseIncluded = "BYOL" instancetypename= "DB " + instance.database_edition version = instance.version OS = "Oracle Linux 6.8" shape = instance.shape prefix,AD = instance.availability_domain.split(":") # Handle details for Autonomous Database (ATP) if instancetype == "ATP": OCPU = instance.cpu_core_count MEM = NoValueString SSD = instance.data_storage_size_in_tbs instancetypename = "ATP" version = NoValueString OS = NoValueString shape = "ATP" AD = regionname.upper() privateips = NoValueString publicips = NoValueString if instance.license_model == "LICENSE_INCLUDED": LicenseIncluded = "YES" else: LicenseIncluded = "BYOL" # Handle details for Autonomous Database (ADW) if instancetype == "ADW": OCPU = instance.cpu_core_count MEM = NoValueString SSD = instance.data_storage_size_in_tbs instancetypename = "ADW" version = NoValueString OS = NoValueString shape = "ADW" AD = regionname.upper() privateips = NoValueString publicips = NoValueString if instance.license_model == "LICENSE_INCLUDED": LicenseIncluded = "YES" else: LicenseIncluded = "BYOL" try: namespaces = instance.defined_tags for customertag in customertags: try: tagtxt = tagtxt + FieldSeperator + namespaces[customertag[0]][customertag[1]] except: tagtxt = tagtxt + FieldSeperator + NoValueString except: tagtxt = "" line = "{}{}".format( instance.display_name, FieldSeperator) line = "{}{}{}".format(line, instance.lifecycle_state, FieldSeperator) line = "{}{}{}".format(line, instancetypename, FieldSeperator) line = "{}{}{}".format(line, LicenseIncluded, FieldSeperator) line = "{}{}{}".format(line, version, FieldSeperator) line = "{}{}{}".format(line, OS, FieldSeperator) line = "{}{}{}".format(line, shape, FieldSeperator) line = "{}{}{}".format(line, OCPU, FieldSeperator) line = "{}{}{}".format(line, MEM, FieldSeperator) line = "{}{}{}".format(line, SSD, FieldSeperator) line = "{}{}{}".format(line, compartmentName, FieldSeperator) line = "{}{}{}".format(line, AD, FieldSeperator) line = "{}{}{}".format(line, privateips, FieldSeperator) line = "{}{}".format(line, publicips) line = "{}{}".format(line, tagtxt) print (line) report.write(line + EndLine) #Do only once report = open(ReportFile,'w') customertags = [] header = "Name,State,Service,Licensed,Version,OS,Shape,OCPU,MEMORY,SSD TB,Compartment,AD,PrivateIP,PublicIP".replace(",", FieldSeperator) delegation_token = open('/etc/oci/delegation_token', 'r').read() signer = oci.auth.signers.InstancePrincipalsDelegationTokenSigner(delegation_token=delegation_token) identity = oci.identity.IdentityClient({}, signer=signer) user = identity.get_user(User).data RootCompartmentID = user.compartment_id print ("Querying Enabled Regions:") response = identity.list_region_subscriptions(Tenancy) regions = response.data for region in regions: if region.is_home_region: home = "Home region" else: home = "" print ("- {} ({}) {}".format(region.region_name, region.status, home)) # Get all the predefined tags, so the initial header line can be created. response = identity.list_tag_namespaces(RootCompartmentID) tags_namespaces = response.data for namespace in tags_namespaces: tagresponse = identity.list_tags(namespace.id) tags = tagresponse.data for tag in tags: customertags.append([namespace.name,tag.name]) header = header + FieldSeperator + "{}.{}".format(namespace.name,tag.name) if AllPredefinedTags: response = identity.list_compartments(RootCompartmentID) compartments = response.data for compartment in compartments: response = identity.list_tag_namespaces(compartment.id) tags_namespaces = response.data for namespace in tags_namespaces: tagresponse = identity.list_tags(namespace.id) tags = tagresponse.data for tag in tags: customertags.append([namespace.name,tag.name]) header = header + FieldSeperator + "{}.{}".format(namespace.name,tag.name) print (header) report.write(header+EndLine) #Retrieve all instances for region in regions: delegation_token = open('/etc/oci/delegation_token', 'r').read() signer = oci.auth.signers.InstancePrincipalsDelegationTokenSigner(delegation_token=delegation_token) identity = oci.identity.IdentityClient({}, signer=signer) user = identity.get_user(User).data RootCompartmentID = user.compartment_id ComputeClient = oci.core.ComputeClient({}, signer=signer) NetworkClient = oci.core.VirtualNetworkClient({}, signer=signer) # Check instances for all the underlaying Compartments response = oci.pagination.list_call_get_all_results(identity.list_compartments,RootCompartmentID,compartment_id_in_subtree=True) compartments = response.data # Insert (on top) the root compartment RootCompartment = oci.identity.models.Compartment() RootCompartment.id = RootCompartmentID RootCompartment.name = "root" RootCompartment.lifecycle_state = "ACTIVE" compartments.insert(0, RootCompartment) for compartment in compartments: compartmentName = compartment.name if compartment.lifecycle_state == "ACTIVE": print ("process Compartment:" + compartmentName) compartmentID = compartment.id try: response = oci.pagination.list_call_get_all_results(ComputeClient.list_instances,compartment_id=compartmentID) if len(response.data) > 0: DisplayInstances(response.data, compartmentName, "Compute", region.region_name) except: print ("Error?") databaseClient = oci.database.DatabaseClient({}, signer=signer) try: response = oci.pagination.list_call_get_all_results(databaseClient.list_db_systems,compartment_id=compartmentID) if len(response.data) > 0: DisplayInstances(response.data, compartmentName, "DB", region.region_name) except: print ("Error?") try: response = oci.pagination.list_call_get_all_results(databaseClient.list_autonomous_data_warehouses,compartment_id=compartmentID) if len(response.data) > 0: DisplayInstances(response.data, compartmentName, "ADW", region.region_name) except: print ("Error?") try: response = oci.pagination.list_call_get_all_results(databaseClient.list_autonomous_databases,compartment_id=compartmentID) if len(response.data) > 0: DisplayInstances(response.data, compartmentName, "ATP", region.region_name) except: print ("Error?") print (" ") print ("Done, report written to: {}".format(ReportFile)) report.close()
[ "oci.identity.IdentityClient", "oci.identity.models.Compartment", "shapes.ComputeShape", "oci.core.ComputeClient", "oci.core.VirtualNetworkClient", "oci.database.DatabaseClient", "oci.pagination.list_call_get_all_results", "oci.auth.signers.InstancePrincipalsDelegationTokenSigner" ]
[((6383, 6479), 'oci.auth.signers.InstancePrincipalsDelegationTokenSigner', 'oci.auth.signers.InstancePrincipalsDelegationTokenSigner', ([], {'delegation_token': 'delegation_token'}), '(delegation_token=\n delegation_token)\n', (6439, 6479), False, 'import oci\n'), ((6491, 6537), 'oci.identity.IdentityClient', 'oci.identity.IdentityClient', (['{}'], {'signer': 'signer'}), '({}, signer=signer)\n', (6518, 6537), False, 'import oci\n'), ((8080, 8176), 'oci.auth.signers.InstancePrincipalsDelegationTokenSigner', 'oci.auth.signers.InstancePrincipalsDelegationTokenSigner', ([], {'delegation_token': 'delegation_token'}), '(delegation_token=\n delegation_token)\n', (8136, 8176), False, 'import oci\n'), ((8190, 8236), 'oci.identity.IdentityClient', 'oci.identity.IdentityClient', (['{}'], {'signer': 'signer'}), '({}, signer=signer)\n', (8217, 8236), False, 'import oci\n'), ((8341, 8382), 'oci.core.ComputeClient', 'oci.core.ComputeClient', (['{}'], {'signer': 'signer'}), '({}, signer=signer)\n', (8363, 8382), False, 'import oci\n'), ((8402, 8450), 'oci.core.VirtualNetworkClient', 'oci.core.VirtualNetworkClient', (['{}'], {'signer': 'signer'}), '({}, signer=signer)\n', (8431, 8450), False, 'import oci\n'), ((8534, 8657), 'oci.pagination.list_call_get_all_results', 'oci.pagination.list_call_get_all_results', (['identity.list_compartments', 'RootCompartmentID'], {'compartment_id_in_subtree': '(True)'}), '(identity.list_compartments,\n RootCompartmentID, compartment_id_in_subtree=True)\n', (8574, 8657), False, 'import oci\n'), ((8749, 8782), 'oci.identity.models.Compartment', 'oci.identity.models.Compartment', ([], {}), '()\n', (8780, 8782), False, 'import oci\n'), ((1467, 1502), 'shapes.ComputeShape', 'shapes.ComputeShape', (['instance.shape'], {}), '(instance.shape)\n', (1486, 1502), False, 'import shapes\n'), ((2596, 2631), 'shapes.ComputeShape', 'shapes.ComputeShape', (['instance.shape'], {}), '(instance.shape)\n', (2615, 2631), False, 'import shapes\n'), ((9520, 9566), 'oci.database.DatabaseClient', 'oci.database.DatabaseClient', (['{}'], {'signer': 'signer'}), '({}, signer=signer)\n', (9547, 9566), False, 'import oci\n'), ((9216, 9320), 'oci.pagination.list_call_get_all_results', 'oci.pagination.list_call_get_all_results', (['ComputeClient.list_instances'], {'compartment_id': 'compartmentID'}), '(ComputeClient.list_instances,\n compartment_id=compartmentID)\n', (9256, 9320), False, 'import oci\n'), ((9599, 9705), 'oci.pagination.list_call_get_all_results', 'oci.pagination.list_call_get_all_results', (['databaseClient.list_db_systems'], {'compartment_id': 'compartmentID'}), '(databaseClient.list_db_systems,\n compartment_id=compartmentID)\n', (9639, 9705), False, 'import oci\n'), ((9898, 10021), 'oci.pagination.list_call_get_all_results', 'oci.pagination.list_call_get_all_results', (['databaseClient.list_autonomous_data_warehouses'], {'compartment_id': 'compartmentID'}), '(databaseClient.\n list_autonomous_data_warehouses, compartment_id=compartmentID)\n', (9938, 10021), False, 'import oci\n'), ((10214, 10331), 'oci.pagination.list_call_get_all_results', 'oci.pagination.list_call_get_all_results', (['databaseClient.list_autonomous_databases'], {'compartment_id': 'compartmentID'}), '(databaseClient.\n list_autonomous_databases, compartment_id=compartmentID)\n', (10254, 10331), False, 'import oci\n')]
r""" Graded rings of modular forms for Hecke triangle groups AUTHORS: - <NAME> (2013): initial version """ from __future__ import absolute_import #***************************************************************************** # Copyright (C) 2013-2014 <NAME> <<EMAIL>> # # Distributed under the terms of the GNU General Public License (GPL) # as published by the Free Software Foundation; either version 2 of # the License, or (at your option) any later version. # http://www.gnu.org/licenses/ #***************************************************************************** from sage.rings.all import ZZ, QQ, infinity from sage.rings.ring import CommutativeAlgebra from sage.categories.all import CommutativeAlgebras from sage.structure.unique_representation import UniqueRepresentation from sage.misc.cachefunc import cached_method from .hecke_triangle_groups import HeckeTriangleGroup from .abstract_ring import FormsRing_abstract def canonical_parameters(group, base_ring, red_hom, n=None): r""" Return a canonical version of the parameters. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import canonical_parameters sage: canonical_parameters(4, ZZ, 1) (Hecke triangle group for n = 4, Integer Ring, True, 4) sage: canonical_parameters(infinity, RR, 0) (Hecke triangle group for n = +Infinity, Real Field with 53 bits of precision, False, +Infinity) """ if not (n is None): group = n if (group == infinity): group = HeckeTriangleGroup(infinity) else: try: group = HeckeTriangleGroup(ZZ(group)) except TypeError: group = HeckeTriangleGroup(group.n()) red_hom = bool(red_hom) n = group.n() return (group, base_ring, red_hom, n) class QuasiMeromorphicModularFormsRing(FormsRing_abstract, CommutativeAlgebra, UniqueRepresentation): r""" Graded ring of (Hecke) quasi meromorphic modular forms for the given group and base ring. """ @staticmethod def __classcall__(cls, group = HeckeTriangleGroup(3), base_ring = ZZ, red_hom = False, n=None): r""" Return a (cached) instance with canonical parameters. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import (canonical_parameters, QuasiMeromorphicModularFormsRing) sage: (group, base_ring, red_hom, n) = canonical_parameters(4, ZZ, 1) sage: QuasiMeromorphicModularFormsRing(4, ZZ, 1) == QuasiMeromorphicModularFormsRing(group, base_ring, red_hom, n) True """ (group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n) return super(FormsRing_abstract,cls).__classcall__(cls, group=group, base_ring=base_ring, red_hom=red_hom, n=n) def __init__(self, group, base_ring, red_hom, n): r""" Return the graded ring of (Hecke) quasi meromorphic modular forms for the given ``group`` and ``base_ring``. INPUT: - ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``) - ``base_ring`` -- The base_ring (default: ``ZZ``). - ``red_hom`` -- If True then results of binary operations are considered homogeneous whenever it makes sense (default: False). This is mainly used by the spaces of homogeneous elements. OUTPUT: The corresponding graded ring of (Hecke) quasi meromorphic modular forms for the given ``group`` and ``base_ring``. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import QuasiMeromorphicModularFormsRing sage: MR = QuasiMeromorphicModularFormsRing(4, ZZ, 1) sage: MR QuasiMeromorphicModularFormsRing(n=4) over Integer Ring sage: MR.analytic_type() quasi meromorphic modular sage: MR.category() Category of commutative algebras over Fraction Field of Univariate Polynomial Ring in d over Integer Ring sage: QuasiMeromorphicModularFormsRing(n=infinity) QuasiMeromorphicModularFormsRing(n=+Infinity) over Integer Ring """ FormsRing_abstract.__init__(self, group=group, base_ring=base_ring, red_hom=red_hom, n=n) CommutativeAlgebra.__init__(self, base_ring=self.coeff_ring(), category=CommutativeAlgebras(self.coeff_ring())) self._analytic_type = self.AT(["quasi", "mero"]) class QuasiWeakModularFormsRing(FormsRing_abstract, CommutativeAlgebra, UniqueRepresentation): r""" Graded ring of (Hecke) quasi weakly holomorphic modular forms for the given group and base ring. """ @staticmethod def __classcall__(cls, group = HeckeTriangleGroup(3), base_ring = ZZ, red_hom = False, n=None): r""" Return a (cached) instance with canonical parameters. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import (canonical_parameters, QuasiWeakModularFormsRing) sage: (group, base_ring, red_hom, n) = canonical_parameters(5, CC, 0) sage: QuasiWeakModularFormsRing(5, CC, 0) == QuasiWeakModularFormsRing(group, base_ring, red_hom, n) True """ (group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n) return super(FormsRing_abstract,cls).__classcall__(cls, group=group, base_ring=base_ring, red_hom=red_hom, n=n) def __init__(self, group, base_ring, red_hom, n): r""" Return the graded ring of (Hecke) quasi weakly holomorphic modular forms for the given ``group`` and ``base_ring``. INPUT: - ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``) - ``base_ring`` -- The base_ring (default: ``ZZ``). - ``red_hom`` -- If True then results of binary operations are considered homogeneous whenever it makes sense (default: False). This is mainly used by the spaces of homogeneous elements. OUTPUT: The corresponding graded ring of (Hecke) quasi weakly holomorphic modular forms for the given ``group`` and ``base_ring``. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import QuasiWeakModularFormsRing sage: MR = QuasiWeakModularFormsRing(5, CC, 0) sage: MR QuasiWeakModularFormsRing(n=5) over Complex Field with 53 bits of precision sage: MR.analytic_type() quasi weakly holomorphic modular sage: MR.category() Category of commutative algebras over Fraction Field of Univariate Polynomial Ring in d over Complex Field with 53 bits of precision """ FormsRing_abstract.__init__(self, group=group, base_ring=base_ring, red_hom=red_hom, n=n) CommutativeAlgebra.__init__(self, base_ring=self.coeff_ring(), category=CommutativeAlgebras(self.coeff_ring())) self._analytic_type = self.AT(["quasi", "weak"]) class QuasiModularFormsRing(FormsRing_abstract, CommutativeAlgebra, UniqueRepresentation): r""" Graded ring of (Hecke) quasi modular forms for the given group and base ring """ @staticmethod def __classcall__(cls, group = HeckeTriangleGroup(3), base_ring = ZZ, red_hom = False, n=None): r""" Return a (cached) instance with canonical parameters. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import (canonical_parameters, QuasiModularFormsRing) sage: (group, base_ring, red_hom, n) = canonical_parameters(6, ZZ, True) sage: QuasiModularFormsRing(6, ZZ, True) == QuasiModularFormsRing(group, base_ring, red_hom, n) True """ (group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n) return super(FormsRing_abstract,cls).__classcall__(cls, group=group, base_ring=base_ring, red_hom=red_hom, n=n) def __init__(self, group, base_ring, red_hom, n): r""" Return the graded ring of (Hecke) quasi modular forms for the given ``group`` and ``base_ring``. INPUT: - ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``) - ``base_ring`` -- The base_ring (default: ``ZZ``). - ``red_hom`` -- If True then results of binary operations are considered homogeneous whenever it makes sense (default: False). This is mainly used by the spaces of homogeneous elements. OUTPUT: The corresponding graded ring of (Hecke) quasi modular forms for the given ``group`` and ``base_ring``. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import QuasiModularFormsRing sage: MR = QuasiModularFormsRing(6, ZZ, True) sage: MR QuasiModularFormsRing(n=6) over Integer Ring sage: MR.analytic_type() quasi modular sage: MR.category() Category of commutative algebras over Fraction Field of Univariate Polynomial Ring in d over Integer Ring """ FormsRing_abstract.__init__(self, group=group, base_ring=base_ring, red_hom=red_hom, n=n) CommutativeAlgebra.__init__(self, base_ring=self.coeff_ring(), category=CommutativeAlgebras(self.coeff_ring())) self._analytic_type = self.AT(["quasi", "holo"]) class QuasiCuspFormsRing(FormsRing_abstract, CommutativeAlgebra, UniqueRepresentation): r""" Graded ring of (Hecke) quasi cusp forms for the given group and base ring. """ @staticmethod def __classcall__(cls, group = HeckeTriangleGroup(3), base_ring = ZZ, red_hom = False, n=None): r""" Return a (cached) instance with canonical parameters. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import (canonical_parameters, QuasiCuspFormsRing) sage: (group, base_ring, red_hom, n) = canonical_parameters(7, ZZ, 1) sage: QuasiCuspFormsRing(7, ZZ, 1) == QuasiCuspFormsRing(group, base_ring, red_hom, n) True """ (group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n) return super(FormsRing_abstract,cls).__classcall__(cls, group=group, base_ring=base_ring, red_hom=red_hom, n=n) def __init__(self, group, base_ring, red_hom, n): r""" Return the graded ring of (Hecke) quasi cusp forms for the given ``group`` and ``base_ring``. INPUT: - ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``) - ``base_ring`` -- The base_ring (default: ``ZZ``). - ``red_hom`` -- If True then results of binary operations are considered homogeneous whenever it makes sense (default: False). This is mainly used by the spaces of homogeneous elements. OUTPUT: The corresponding graded ring of (Hecke) quasi cusp forms for the given ``group`` and ``base_ring``. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import QuasiCuspFormsRing sage: MR = QuasiCuspFormsRing(7, ZZ, 1) sage: MR QuasiCuspFormsRing(n=7) over Integer Ring sage: MR.analytic_type() quasi cuspidal sage: MR.category() Category of commutative algebras over Fraction Field of Univariate Polynomial Ring in d over Integer Ring """ FormsRing_abstract.__init__(self, group=group, base_ring=base_ring, red_hom=red_hom, n=n) CommutativeAlgebra.__init__(self, base_ring=self.coeff_ring(), category=CommutativeAlgebras(self.coeff_ring())) self._analytic_type = self.AT(["quasi", "cusp"]) class MeromorphicModularFormsRing(FormsRing_abstract, CommutativeAlgebra, UniqueRepresentation): r""" Graded ring of (Hecke) meromorphic modular forms for the given group and base ring """ @staticmethod def __classcall__(cls, group = HeckeTriangleGroup(3), base_ring = ZZ, red_hom = False, n=None): r""" Return a (cached) instance with canonical parameters. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import (canonical_parameters, MeromorphicModularFormsRing) sage: (group, base_ring, red_hom, n) = canonical_parameters(4, ZZ, 1) sage: MeromorphicModularFormsRing(4, ZZ, 1) == MeromorphicModularFormsRing(group, base_ring, red_hom, n) True """ (group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n) return super(FormsRing_abstract,cls).__classcall__(cls, group=group, base_ring=base_ring, red_hom=red_hom, n=n) def __init__(self, group, base_ring, red_hom, n): r""" Return the graded ring of (Hecke) meromorphic modular forms for the given ``group`` and ``base_ring``. INPUT: - ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``) - ``base_ring`` -- The base_ring (default: ``ZZ``). - ``red_hom`` -- If True then results of binary operations are considered homogeneous whenever it makes sense (default: False). This is mainly used by the spaces of homogeneous elements. OUTPUT: The corresponding graded ring of (Hecke) meromorphic modular forms for the given ``group`` and ``base_ring``. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import MeromorphicModularFormsRing sage: MR = MeromorphicModularFormsRing(4, ZZ, 1) sage: MR MeromorphicModularFormsRing(n=4) over Integer Ring sage: MR.analytic_type() meromorphic modular sage: MR.category() Category of commutative algebras over Fraction Field of Univariate Polynomial Ring in d over Integer Ring """ FormsRing_abstract.__init__(self, group=group, base_ring=base_ring, red_hom=red_hom, n=n) CommutativeAlgebra.__init__(self, base_ring=self.coeff_ring(), category=CommutativeAlgebras(self.coeff_ring())) self._analytic_type = self.AT(["mero"]) class WeakModularFormsRing(FormsRing_abstract, CommutativeAlgebra, UniqueRepresentation): r""" Graded ring of (Hecke) weakly holomorphic modular forms for the given group and base ring """ @staticmethod def __classcall__(cls, group = HeckeTriangleGroup(3), base_ring = ZZ, red_hom = False, n=None): r""" Return a (cached) instance with canonical parameters. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import (canonical_parameters, WeakModularFormsRing) sage: (group, base_ring, red_hom, n) = canonical_parameters(5, ZZ, 0) sage: WeakModularFormsRing(5, ZZ, 0) == WeakModularFormsRing(group, base_ring, red_hom, n) True """ (group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n) return super(FormsRing_abstract,cls).__classcall__(cls, group=group, base_ring=base_ring, red_hom=red_hom, n=n) def __init__(self, group, base_ring, red_hom, n): r""" Return the graded ring of (Hecke) weakly holomorphic modular forms for the given ``group`` and ``base_ring``. INPUT: - ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``) - ``base_ring`` -- The base_ring (default: ``ZZ``). - ``red_hom`` -- If True then results of binary operations are considered homogeneous whenever it makes sense (default: False). This is mainly used by the spaces of homogeneous elements. OUTPUT: The corresponding graded ring of (Hecke) weakly holomorphic modular forms for the given ``group`` and ``base_ring``. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import WeakModularFormsRing sage: MR = WeakModularFormsRing(5, ZZ, 0) sage: MR WeakModularFormsRing(n=5) over Integer Ring sage: MR.analytic_type() weakly holomorphic modular sage: MR.category() Category of commutative algebras over Fraction Field of Univariate Polynomial Ring in d over Integer Ring """ FormsRing_abstract.__init__(self, group=group, base_ring=base_ring, red_hom=red_hom, n=n) CommutativeAlgebra.__init__(self, base_ring=self.coeff_ring(), category=CommutativeAlgebras(self.coeff_ring())) self._analytic_type = self.AT(["weak"]) class ModularFormsRing(FormsRing_abstract, CommutativeAlgebra, UniqueRepresentation): r""" Graded ring of (Hecke) modular forms for the given group and base ring """ @staticmethod def __classcall__(cls, group = HeckeTriangleGroup(3), base_ring = ZZ, red_hom = False, n=None): r""" Return a (cached) instance with canonical parameters. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import ModularFormsRing sage: ModularFormsRing(3, ZZ, 0) == ModularFormsRing() True """ (group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n) return super(FormsRing_abstract,cls).__classcall__(cls, group=group, base_ring=base_ring, red_hom=red_hom, n=n) def __init__(self, group, base_ring, red_hom, n): r""" Return the graded ring of (Hecke) modular forms for the given ``group`` and ``base_ring``. INPUT: - ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``) - ``base_ring`` -- The base_ring (default: ``ZZ``). - ``red_hom`` -- If True then results of binary operations are considered homogeneous whenever it makes sense (default: False). This is mainly used by the spaces of homogeneous elements. OUTPUT: The corresponding graded ring of (Hecke) modular forms for the given ``group`` and ``base_ring``. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import ModularFormsRing sage: MR = ModularFormsRing() sage: MR ModularFormsRing(n=3) over Integer Ring sage: MR.analytic_type() modular sage: MR.category() Category of commutative algebras over Fraction Field of Univariate Polynomial Ring in d over Integer Ring """ FormsRing_abstract.__init__(self, group=group, base_ring=base_ring, red_hom=red_hom, n=n) CommutativeAlgebra.__init__(self, base_ring=self.coeff_ring(), category=CommutativeAlgebras(self.coeff_ring())) self._analytic_type = self.AT(["holo"]) class CuspFormsRing(FormsRing_abstract, CommutativeAlgebra, UniqueRepresentation): r""" Graded ring of (Hecke) cusp forms for the given group and base ring """ @staticmethod def __classcall__(cls, group = HeckeTriangleGroup(3), base_ring = ZZ, red_hom = False, n=None): r""" Return a (cached) instance with canonical parameters. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import (canonical_parameters, CuspFormsRing) sage: (group, base_ring, red_hom, n) = canonical_parameters(5, CC, True) sage: CuspFormsRing(5, CC, True) == CuspFormsRing(group, base_ring, red_hom, n) True """ (group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n) return super(FormsRing_abstract,cls).__classcall__(cls, group=group, base_ring=base_ring, red_hom=red_hom, n=n) def __init__(self, group, base_ring, red_hom, n): r""" Return the graded ring of (Hecke) cusp forms for the given ``group`` and ``base_ring``. INPUT: - ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``) - ``base_ring`` -- The base_ring (default: ``ZZ``). - ``red_hom`` -- If True then results of binary operations are considered homogeneous whenever it makes sense (default: False). This is mainly used by the spaces of homogeneous elements. OUTPUT: The corresponding graded ring of (Hecke) cusp forms for the given ``group`` and ``base_ring``. EXAMPLES:: sage: from sage.modular.modform_hecketriangle.graded_ring import CuspFormsRing sage: MR = CuspFormsRing(5, CC, True) sage: MR CuspFormsRing(n=5) over Complex Field with 53 bits of precision sage: MR.analytic_type() cuspidal sage: MR.category() Category of commutative algebras over Fraction Field of Univariate Polynomial Ring in d over Complex Field with 53 bits of precision sage: CuspFormsRing(n=infinity, base_ring=CC, red_hom=True) CuspFormsRing(n=+Infinity) over Complex Field with 53 bits of precision """ FormsRing_abstract.__init__(self, group=group, base_ring=base_ring, red_hom=red_hom, n=n) CommutativeAlgebra.__init__(self, base_ring=self.coeff_ring(), category=CommutativeAlgebras(self.coeff_ring())) self._analytic_type = self.AT(["cusp"])
[ "sage.rings.all.ZZ" ]
[((1650, 1659), 'sage.rings.all.ZZ', 'ZZ', (['group'], {}), '(group)\n', (1652, 1659), False, 'from sage.rings.all import ZZ, QQ, infinity\n')]
import torch import torch.nn as nn from model.backbone.conv import conv_block, ConvEncoder from model.backbone.resnet import resnet12 as resnet def deconv_block(in_channels, out_channels, kernel_size=2, stride=2, padding=0, output_padding=0): return nn.Sequential( nn.ConvTranspose2d(in_channels, out_channels, kernel_size=(kernel_size, kernel_size), padding=(padding, padding), stride=(stride, stride), output_padding=(output_padding, output_padding)), nn.BatchNorm2d(out_channels), nn.ReLU()) def decoder(in_channels: int, hid_channels: int, code_channels: int, fc_channels: list): assert len(fc_channels) == 3, "can only have 3 dims" decoder_fc1 = nn.Sequential( nn.Linear(in_features=fc_channels[0], out_features=fc_channels[1]), nn.ReLU() ) decoder_fc2 = nn.Sequential( nn.Linear(in_features=fc_channels[1], out_features=fc_channels[2]), nn.ReLU() ) # feature map size (code_channels, 5, 5) unflatten = nn.Unflatten(dim=1, unflattened_size=(code_channels, 5, 5)) deconv1 = deconv_block(code_channels, hid_channels) deconv2 = deconv_block(hid_channels, hid_channels) deconv3 = deconv_block(hid_channels, hid_channels) deconv4 = deconv_block(hid_channels, in_channels) conv_sigmoid = conv_block(in_channels, in_channels, kernel_size=3, stride=1, padding=1, if_relu=False, if_max_pool=False) layers = [decoder_fc1, decoder_fc2, unflatten, deconv1, deconv2, deconv3, deconv4, conv_sigmoid] return nn.Sequential(*layers) def classifier(fc_channels: list): assert len(fc_channels) == 4, "can only have 4 dims" fc1 = nn.Linear(in_features=fc_channels[0], out_features=fc_channels[1]) fc2 = nn.Linear(in_features=fc_channels[1], out_features=fc_channels[2]) softmax = nn.Linear(in_features=fc_channels[2], out_features=fc_channels[3]) return nn.Sequential(fc1, fc2, softmax) # backbone: Conv4 class ConvAutoEncoder(nn.Module): def __init__(self, in_channels, hid_channels, code_channels, num_classes=None): super(ConvAutoEncoder, self).__init__() if isinstance(num_classes, int): self.is_pretrain = True else: self.is_pretrain = False # encoder self.encoder = ConvEncoder(in_channels, hid_channels, code_channels) # classifier if self.is_pretrain: self.classifier = classifier([1600, 512, 32, num_classes]) # decoder self.decoder = decoder(in_channels, hid_channels, code_channels, [1600, 512, 1600]) def forward(self, x): code = self.encode(x) if self.is_pretrain: out = self.classify(code) else: out = code recon_image = self.decode(code) return out, recon_image def encode(self, x): return self.encoder(x) def decode(self, code): return self.decoder(code) def classify(self, code): return self.classifier(code) # backbone: ResNet class ResAutoEncoder(nn.Module): def __init__(self, num_classes=None): super(ResAutoEncoder, self).__init__() if isinstance(num_classes, int): self.is_pretrain = True else: self.is_pretrain = False # encoder self.res_encoder = resnet() # classifier if self.is_pretrain: self.classifier = classifier([640, 256, 128, num_classes]) # decoder self.decoder = decoder(in_channels=3, hid_channels=32, code_channels=640, fc_channels=[640, 4068, 16000]) def forward(self, x): code = self.encode(x) if self.is_pretrain: output = self.classify(code) else: output = code recon_image = self.decode(code) return output, recon_image def encode(self, x): return self.res_encoder(x) def decode(self, code): return self.decoder(code) def classify(self, code): return self.classifier(code) if __name__ == '__main__': # model = ConvAutoEncoder(in_channels=3, hid_channels=64, code_channels=64) model = ResAutoEncoder() data = torch.randn(5, 3, 80, 80) feature_map = model(data) print(feature_map)
[ "model.backbone.conv.conv_block", "torch.nn.Unflatten", "torch.nn.ConvTranspose2d", "torch.nn.ReLU", "torch.nn.Sequential", "torch.randn", "model.backbone.conv.ConvEncoder", "torch.nn.BatchNorm2d", "model.backbone.resnet.resnet12", "torch.nn.Linear" ]
[((1148, 1207), 'torch.nn.Unflatten', 'nn.Unflatten', ([], {'dim': '(1)', 'unflattened_size': '(code_channels, 5, 5)'}), '(dim=1, unflattened_size=(code_channels, 5, 5))\n', (1160, 1207), True, 'import torch.nn as nn\n'), ((1447, 1557), 'model.backbone.conv.conv_block', 'conv_block', (['in_channels', 'in_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'if_relu': '(False)', 'if_max_pool': '(False)'}), '(in_channels, in_channels, kernel_size=3, stride=1, padding=1,\n if_relu=False, if_max_pool=False)\n', (1457, 1557), False, 'from model.backbone.conv import conv_block, ConvEncoder\n'), ((1711, 1733), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (1724, 1733), True, 'import torch.nn as nn\n'), ((1838, 1904), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'fc_channels[0]', 'out_features': 'fc_channels[1]'}), '(in_features=fc_channels[0], out_features=fc_channels[1])\n', (1847, 1904), True, 'import torch.nn as nn\n'), ((1915, 1981), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'fc_channels[1]', 'out_features': 'fc_channels[2]'}), '(in_features=fc_channels[1], out_features=fc_channels[2])\n', (1924, 1981), True, 'import torch.nn as nn\n'), ((1996, 2062), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'fc_channels[2]', 'out_features': 'fc_channels[3]'}), '(in_features=fc_channels[2], out_features=fc_channels[3])\n', (2005, 2062), True, 'import torch.nn as nn\n'), ((2074, 2106), 'torch.nn.Sequential', 'nn.Sequential', (['fc1', 'fc2', 'softmax'], {}), '(fc1, fc2, softmax)\n', (2087, 2106), True, 'import torch.nn as nn\n'), ((4401, 4426), 'torch.randn', 'torch.randn', (['(5)', '(3)', '(80)', '(80)'], {}), '(5, 3, 80, 80)\n', (4412, 4426), False, 'import torch\n'), ((279, 474), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['in_channels', 'out_channels'], {'kernel_size': '(kernel_size, kernel_size)', 'padding': '(padding, padding)', 'stride': '(stride, stride)', 'output_padding': '(output_padding, output_padding)'}), '(in_channels, out_channels, kernel_size=(kernel_size,\n kernel_size), padding=(padding, padding), stride=(stride, stride),\n output_padding=(output_padding, output_padding))\n', (297, 474), True, 'import torch.nn as nn\n'), ((611, 639), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (625, 639), True, 'import torch.nn as nn\n'), ((649, 658), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (656, 658), True, 'import torch.nn as nn\n'), ((862, 928), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'fc_channels[0]', 'out_features': 'fc_channels[1]'}), '(in_features=fc_channels[0], out_features=fc_channels[1])\n', (871, 928), True, 'import torch.nn as nn\n'), ((938, 947), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (945, 947), True, 'import torch.nn as nn\n'), ((995, 1061), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'fc_channels[1]', 'out_features': 'fc_channels[2]'}), '(in_features=fc_channels[1], out_features=fc_channels[2])\n', (1004, 1061), True, 'import torch.nn as nn\n'), ((1071, 1080), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1078, 1080), True, 'import torch.nn as nn\n'), ((2463, 2516), 'model.backbone.conv.ConvEncoder', 'ConvEncoder', (['in_channels', 'hid_channels', 'code_channels'], {}), '(in_channels, hid_channels, code_channels)\n', (2474, 2516), False, 'from model.backbone.conv import conv_block, ConvEncoder\n'), ((3521, 3529), 'model.backbone.resnet.resnet12', 'resnet', ([], {}), '()\n', (3527, 3529), True, 'from model.backbone.resnet import resnet12 as resnet\n')]
""" Clean all Docker containers, volumes etc. from using the Docker backend. """ import click from dcos_e2e_cli._vendor import vertigo_py from dcos_e2e_cli.common.options import verbosity_option from dcos_e2e_cli.dcos_vagrant.commands.destroy import destroy_cluster from ._common import vm_names_by_cluster @click.command('clean') @click.option( '--destroy-running-clusters', is_flag=True, default=False, show_default=True, help='Destroy running clusters.', ) @verbosity_option def clean(destroy_running_clusters: bool) -> None: """ Remove VMs created by this tool. This is useful in removing paused and aborted VMs. VMs are aborted when the host is shut down. """ running_clusters = vm_names_by_cluster(running_only=True) all_clusters = vm_names_by_cluster(running_only=False) not_running_cluster_names = set( all_clusters.keys() - running_clusters.keys(), ) if destroy_running_clusters: for cluster_id in running_clusters.keys(): destroy_cluster(cluster_id=cluster_id) for cluster_id in not_running_cluster_names: for vm_name in all_clusters[cluster_id]: virtualbox_vm = vertigo_py.VM(name=vm_name) # type: ignore virtualbox_vm.unregistervm(delete=True) print('not running ', vm_name)
[ "dcos_e2e_cli._vendor.vertigo_py.VM", "click.option", "dcos_e2e_cli.dcos_vagrant.commands.destroy.destroy_cluster", "click.command" ]
[((313, 335), 'click.command', 'click.command', (['"""clean"""'], {}), "('clean')\n", (326, 335), False, 'import click\n'), ((337, 465), 'click.option', 'click.option', (['"""--destroy-running-clusters"""'], {'is_flag': '(True)', 'default': '(False)', 'show_default': '(True)', 'help': '"""Destroy running clusters."""'}), "('--destroy-running-clusters', is_flag=True, default=False,\n show_default=True, help='Destroy running clusters.')\n", (349, 465), False, 'import click\n'), ((1027, 1065), 'dcos_e2e_cli.dcos_vagrant.commands.destroy.destroy_cluster', 'destroy_cluster', ([], {'cluster_id': 'cluster_id'}), '(cluster_id=cluster_id)\n', (1042, 1065), False, 'from dcos_e2e_cli.dcos_vagrant.commands.destroy import destroy_cluster\n'), ((1193, 1220), 'dcos_e2e_cli._vendor.vertigo_py.VM', 'vertigo_py.VM', ([], {'name': 'vm_name'}), '(name=vm_name)\n', (1206, 1220), False, 'from dcos_e2e_cli._vendor import vertigo_py\n')]
from __future__ import division, print_function import pickle import pdb import os import time from sklearn.cross_validation import StratifiedKFold from sklearn import svm from sklearn import metrics import gensim import random from learners import SK_SVM,SK_KNN,SK_MLP from tuner import DE_Tune_ML from model import PaperData from utility import study from results import results_process import numpy as np #import wget import zipfile from sklearn import neighbors from sklearn import metrics import threading from threading import Barrier import timeit import multiprocessing from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.lda import LDA from sklearn.decomposition import NMF, LatentDirichletAllocation from sklearn.neighbors import NearestNeighbors from sklearn.cluster import KMeans from sklearn.cluster import AffinityPropagation import collections from multiprocessing import Queue import pandas as pd import warnings from sklearn.neural_network import MLPClassifier def tune_learner(learner, train_X, train_Y, tune_X, tune_Y, goal, target_class=None): """ :param learner: :param train_X: :param train_Y: :param tune_X: :param tune_Y: :param goal: :param target_class: :return: """ if not target_class: target_class = goal clf = learner(train_X, train_Y, tune_X, tune_Y, goal) tuner = DE_Tune_ML(clf, clf.get_param(), goal, target_class) return tuner.Tune() def load_vec(d, data, use_pkl=False, file_name=None): if use_pkl: if os.path.isfile(file_name): with open(file_name, "rb") as my_pickle: return pickle.load(my_pickle) else: # print("call get_document_vec") return d.get_document_vec(data, file_name) def print_results(clfs,stop,start): file_name = time.strftime(os.path.sep.join([".", "results", "%Y%m%d_%H:%M:%S.txt"])) file_name = os.path.sep.join(["20171103.txt"]) content = "" for each in clfs: content += each.confusion print(content) print("Model training time: ", stop - start) with open(file_name, "w") as f: f.write(content) results_process.reports(file_name) def get_acc(cm): out = [] for i in range(4): out.append(cm[i][i] / 400) return out @study def run_tuning_SVM(word2vec_src, repeats=3, fold=10, tuning=True): """ :param word2vec_src:str, path of word2vec model :param repeats:int, number of repeats :param fold: int,number of folds :param tuning: boolean, tuning or not. :return: None """ print("# word2vec:", word2vec_src) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, file_name=False) print(train_pd) test_pd = load_vec(data, data.test_data, file_name=False) learner = [SK_SVM][0] goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F", 7: "Micro_F"}[6] print(goal) F = {} clfs = [] start = timeit.default_timer() for i in range(repeats): # repeat n times here kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold, shuffle=True) for train_index, tune_index in kf: print(train_pd) print(train_index) train_data = train_pd.ix[train_index] print(train_data) tune_data = train_pd.ix[tune_index] train_X = train_data.loc[:, "Output"].values train_Y = train_data.loc[:, "LinkTypeId"].values tune_X = tune_data.loc[:, "Output"].values tune_Y = tune_data.loc[:, "LinkTypeId"].values test_X = test_pd.loc[:, "Output"].values test_Y = test_pd.loc[:, "LinkTypeId"].values params, evaluation = tune_learner(learner, train_X, train_Y, tune_X, tune_Y, goal) if tuning else ({}, 0) clf = learner(train_X, train_Y, test_X, test_Y, goal) F = clf.learn(F, **params) clfs.append(clf) stop = timeit.default_timer() print("Model training time: ", stop - start) print_results(clfs,stop,start) @study def run_tuning_MLP(word2vec_src, repeats=1, fold=2, tuning=True): """ :param word2vec_src:str, path of word2vec model :param repeats:int, number of repeats :param fold: int,number of folds :param tuning: boolean, tuning or not. :return: None """ print("# word2vec:", word2vec_src) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, file_name=False) print(train_pd) test_pd = load_vec(data, data.test_data, file_name=False) learner = [SK_MLP][0] goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F", 7: "Micro_F"}[6] print(goal) F = {} clfs = [] start = timeit.default_timer() for i in range(repeats): # repeat n times here kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold, shuffle=True) for train_index, tune_index in kf: print(train_pd) print(train_index) train_data = train_pd.ix[train_index] print(train_data) tune_data = train_pd.ix[tune_index] train_X = train_data.loc[:, "Output"].values train_Y = train_data.loc[:, "LinkTypeId"].values tune_X = tune_data.loc[:, "Output"].values tune_Y = tune_data.loc[:, "LinkTypeId"].values test_X = test_pd.loc[:, "Output"].values test_Y = test_pd.loc[:, "LinkTypeId"].values params, evaluation = tune_learner(learner, train_X, train_Y, tune_X, tune_Y, goal) if tuning else ({}, 0) clf = learner(train_X, train_Y, test_X, test_Y, goal) F = clf.learn(F, **params) clfs.append(clf) stop = timeit.default_timer() print("Model training time: ", stop - start) print_results(clfs,stop,start) @study def run_tuning_KNN(word2vec_src, repeats=6, fold=10, tuning=True): """ :param word2vec_src:str, path of word2vec model :param repeats:int, number of repeats :param fold: int,number of folds :param tuning: boolean, tuning or not. :return: None """ print("# word2vec:", word2vec_src) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, file_name=False) test_pd = load_vec(data, data.test_data, file_name=False) learner = [SK_KNN][0] goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F", 7: "Micro_F"}[6] F = {} clfs = [] start = timeit.default_timer() for i in range(repeats): # repeat n times here kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold, shuffle=True) for train_index, tune_index in kf: train_data = train_pd.ix[train_index] tune_data = train_pd.ix[tune_index] train_X = train_data.loc[:, "Output"].values train_Y = train_data.loc[:, "LinkTypeId"].values tune_X = tune_data.loc[:, "Output"].values tune_Y = tune_data.loc[:, "LinkTypeId"].values test_X = test_pd.loc[:, "Output"].values test_Y = test_pd.loc[:, "LinkTypeId"].values params, evaluation = tune_learner(learner, train_X, train_Y, tune_X, tune_Y, goal) if tuning else ({}, 0) clf = learner(train_X, train_Y, test_X, test_Y, goal) F = clf.learn(F, **params) clfs.append(clf) stop = timeit.default_timer() print("Model training time: ", stop - start) print_results(clfs) @study def run_SVM_baseline(word2vec_src): """ Run SVM+word embedding experiment ! This is the baseline method. :return:None """ # Create a subplot with 1 row and 2 columns print("# word2vec:", word2vec_src) clf = svm.SVC(kernel="rbf", gamma=0.005) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() test_X = test_pd.loc[:, "Output"].tolist() test_Y = test_pd.loc[:, "LinkTypeId"].tolist() start = timeit.default_timer() clf.fit(train_X, train_Y) stop = timeit.default_timer() predicted = clf.predict(test_X) print(metrics.classification_report(test_Y, predicted, labels=["1", "2", "3", "4"], digits=3)) cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"]) print("accuracy ", get_acc(cm)) print("Model training time: ", stop - start) @study def run_KNN_baseline(word2vec_src): """ Run KNN+word embedding experiment ! This is the baseline method. :return:None """ # Create a subplot with 1 row and 2 columns print("# word2vec:", word2vec_src) clf = neighbors.KNeighborsClassifier(n_neighbors = 5) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() test_X = test_pd.loc[:, "Output"].tolist() test_Y = test_pd.loc[:, "LinkTypeId"].tolist() start = timeit.default_timer() clf.fit(train_X, train_Y) stop = timeit.default_timer() predicted = clf.predict(test_X) print(metrics.classification_report(test_Y, predicted, labels=["1", "2", "3", "4"], digits=3)) cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"]) print("accuracy ", get_acc(cm)) print("Model training time: ", stop - start) @study def run_MLP(word2vec_src): """ Run SVM+word embedding experiment ! This is the baseline method. :return:None """ # Create a subplot with 1 row and 2 columns print("# word2vec:", word2vec_src) clf = MLPClassifier() word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() test_X = test_pd.loc[:, "Output"].tolist() test_Y = test_pd.loc[:, "LinkTypeId"].tolist() start = timeit.default_timer() clf.fit(train_X, train_Y) stop = timeit.default_timer() predicted = clf.predict(test_X) print(metrics.classification_report(test_Y, predicted, labels=["1", "2", "3", "4"], digits=3)) cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"]) print("accuracy ", get_acc(cm)) print("Model training time: ", stop - start) #################Katie's Code +++++++++++++++++++++++++++++++ # returns the svm model def run_SVM_C(word2vec_src, train_pd, queue, l, test_pd_n): clf = svm.SVC(kernel="rbf", gamma=0.005) clfs = [] # word2vec_model = gensim.models.Word2Vec.load(word2vec_src) # data = PaperData(word2vec=word2vec_model) # print("Train data: " + str(train_pd.shape)) # if train_pd is None: train_pd = load_vec( # data, data.train_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() start = timeit.default_timer() clf.fit(train_X, train_Y) stop = timeit.default_timer() print("SVM Model Train Time", (stop-start)) clfs.append(clf) clfs.append(l) queue.put(clfs) return clf def run_KNN_C(word2vec_src, train_pd, queue, l, test_pd_n): clf = neighbors.KNeighborsClassifier(n_neighbors = 5) clfs = [] # word2vec_model = gensim.models.Word2Vec.load(word2vec_src) # data = PaperData(word2vec=word2vec_model) # print("Train data: " + str(train_pd.shape)) # if train_pd is None: train_pd = load_vec( # data, data.train_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() start = timeit.default_timer() clf.fit(train_X, train_Y) stop = timeit.default_timer() print("Th", l) print("KNN Model Train Time", (stop-start)) clfs.append(clf) clfs.append(l) queue.put(clfs) return clf @study def run_tuning_SVM_C(word2vec_src,train_pd_c,queue,l,test_pd_c,repeats=1, fold=2, tuning=True): """ :param word2vec_src:str, path of word2vec model :param repeats:int, number of repeats :param fold: int,number of folds :param tuning: boolean, tuning or not. :return: None """ #print("# word2vec:", word2vec_src) #word2vec_model = gensim.models.Word2Vec.load(word2vec_src) #data = PaperData(word2vec=word2vec_model) train_pd_c = train_pd_c.reset_index() train_pd = train_pd_c test_pd = test_pd_c learner = [SK_SVM][0] goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F", 7: "Micro_F"}[6] F = {} clfs = [] for i in range(repeats): # repeat n times here kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold, shuffle=True) for train_index, tune_index in kf: train_data = train_pd.ix[train_index] tune_data = train_pd.ix[tune_index] train_X = train_data.loc[:, "Output"].values train_Y = train_data.loc[:, "LinkTypeId"].values tune_X = tune_data.loc[:, "Output"].values tune_Y = tune_data.loc[:, "LinkTypeId"].values test_X = test_pd.loc[:, "Output"].values test_Y = test_pd.loc[:, "LinkTypeId"].values params, evaluation = tune_learner(learner, train_X, train_Y, tune_X, tune_Y, goal) if tuning else ({}, 0) clf = learner(train_X, train_Y, test_X, test_Y, goal) F = clf.learn(F, **params) clfs.append(clf) clfs.append(l) queue.put(clfs) return clfs @study def run_tuning_KNN_C(word2vec_src,train_pd_c,queue,l,test_pd_c, repeats=10, fold=10, tuning=True): """ :param word2vec_src:str, path of word2vec model :param repeats:int, number of repeats :param fold: int,number of folds :param tuning: boolean, tuning or not. :return: None """ #print("# word2vec:", word2vec_src) #word2vec_model = gensim.models.Word2Vec.load(word2vec_src) #data = PaperData(word2vec=word2vec_model) train_pd_c = train_pd_c.reset_index() train_pd = train_pd_c test_pd = test_pd_c learner = [SK_KNN][0] goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F", 7: "Micro_F"}[6] F = {} clfs = [] for i in range(repeats): # repeat n times here kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold, shuffle=True) for train_index, tune_index in kf: train_data = train_pd.ix[train_index] tune_data = train_pd.ix[tune_index] train_X = train_data.loc[:, "Output"].values train_Y = train_data.loc[:, "LinkTypeId"].values tune_X = tune_data.loc[:, "Output"].values tune_Y = tune_data.loc[:, "LinkTypeId"].values test_X = test_pd.loc[:, "Output"].values test_Y = test_pd.loc[:, "LinkTypeId"].values params, evaluation = tune_learner(learner, train_X, train_Y, tune_X, tune_Y, goal) if tuning else ({}, 0) clf = learner(train_X, train_Y, test_X, test_Y, goal) F = clf.learn(F, **params) clfs.append(clf) clfs.append(l) queue.put(clfs) return clfs # parses and returns a given svm in the format of dictionary - # [class](precision, recall, f1score, support) def results_SVM(clf, test_X, test_Y): predicted = clf.predict(test_X) # labels: ["Duplicates", "DirectLink","IndirectLink", "Isolated"] report_gen = metrics.classification_report( test_Y, predicted, labels=["1", "2", "3", "4"], digits=3) parsed_report = parse_classification_report(report_gen) return parsed_report #cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"]) #print("accuracy ", get_acc(cm) def results_SVM_C(predicted, test_Y): #predicted = clf.predict(test_X) # labels: ["Duplicates", "DirectLink","IndirectLink", "Isolated"] report_gen = metrics.classification_report( test_Y, predicted, labels=["1", "2", "3", "4"], digits=3) print(report_gen) classifaction_report_csv(report_gen) parsed_report = parse_classification_report(report_gen) return parsed_report def classifaction_report_csv(report): report_data = [] lines = report.split('\n') for line in lines[2:-3]: row = {} row_data = line.split(' ') row['class'] = row_data[2] row['precision'] = float(row_data[3].strip()) row['recall'] = float(row_data[4]) row['f1_score'] = float(row_data[5]) row['support'] = float(row_data[6].strip()) report_data.append(row) dataframe = pd.DataFrame.from_dict(report_data) dataframe.to_csv('classification_report.csv',mode = 'a' ,index = False) def total_summary(result_set, num_rows, start0,start1,stop0,stop1,start,stop): weightedAvgs = [0, 0, 0] for l in result_set: avg_list = l['avg'] for i in range(3): support_count = avg_list[3] weightedAvgs[i] += (avg_list[i] * support_count)/num_rows result = {} result['precision'] = weightedAvgs[0] result['recall'] = weightedAvgs[1] result['f1'] = weightedAvgs[2] #print(result) print("GAP statistics Time:", (stop - start)) print("1st Model training time: ", (stop0 - start0)) print("layer 2 Models training time: ", (stop1 - start1)) print("Total Model training time: ", (stop1 - start1)) print("Total training time: ", (stop1 - start)) def run_kmeans(word2vec_src): print("# word2vec:", word2vec_src) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() queue = Queue() start = timeit.default_timer() numClusters = optimalK(pd.DataFrame(train_X)) stop = timeit.default_timer() #numClusters = 5 print("Found optimal k: " + str(numClusters)) clf = KMeans(n_clusters=numClusters, init='k-means++', max_iter=200, n_init=1) start0 = timeit.default_timer() clf.fit(train_X) stop0 = timeit.default_timer() svm_models = [] # maintain a list of svms s1 = timeit.default_timer() data.train_data['clabel'] = clf.labels_ s2 = timeit.default_timer() print("Inter - ", (s2-s1)) start1 = timeit.default_timer() #b = Barrier(numClusters) #Change the target here as this will be used result validation purpose target_model = run_tuning_KNN_C for l in range(numClusters): cluster = data.train_data.loc[data.train_data['clabel'] == l] print("Thread No", l) t = threading.Thread(target = run_tuning_KNN_C, args = [word2vec_src,cluster,queue,l]) threads.append(t) t.start() response = queue.get() svm_models.append(response) #b.wait() for thread in threads: thread.join() stop1 = timeit.default_timer() print("Done all models - ", (stop1 - start0)) svm_results = [] # maintain a list of svm results test_X = test_pd.loc[:, "Output"].tolist() predicted = clf.predict(test_X) data.test_data['clabel'] = predicted total_predicted = [] total_cluster_Y = [] avg_predicted = [] avg_cluster_Y = [] for i in range(len(svm_models[l])-1): total_predicted = [] total_cluster_Y = [] for l in range(numClusters): cluster = data.test_data.loc[data.test_data['clabel'] == l] svm_model = svm_models[l][i] cluster_X = cluster.loc[:, "Output"].tolist() cluster_Y = cluster.loc[:, "LinkTypeId"].tolist() total_cluster_Y = np.append(total_cluster_Y,cluster_Y) avg_cluster_Y = np.append(avg_cluster_Y,cluster_Y) if target_model == run_tuning_SVM_C or target_model == run_tuning_KNN_C: predicted_C = svm_model.learner.predict(cluster_X) else: predicted_C = svm_model.predict(cluster_X) total_predicted = np.append(total_predicted,predicted_C) avg_predicted = np.append(avg_predicted,predicted_C) svm_results.append(results_SVM_C(total_predicted, total_cluster_Y))# store all the SVM result report in a dictionary svm_results.append(results_SVM_C(avg_predicted, avg_cluster_Y)) # call the helper method to summarize the svm results total_summary(svm_results, test_pd.shape[0],start0,start1,stop0,stop1,start,stop) def run_kmeans_m(word2vec_src): print("# word2vec:", word2vec_src) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() queue = Queue() start = timeit.default_timer() numClusters = optimalK(pd.DataFrame(train_X)) stop = timeit.default_timer() #numClusters = 5 print("Found optimal k: " + str(numClusters)) clf = KMeans(n_clusters=numClusters, init='k-means++', max_iter=200, n_init=1) start0 = timeit.default_timer() clf.fit(train_X) stop0 = timeit.default_timer() svm_models = [] # maintain a list of svms s1 = timeit.default_timer() data.train_data['clabel'] = clf.labels_ s2 = timeit.default_timer() print("Inter - ", (s2-s1)) start1 = timeit.default_timer() #Change the target here as this will be used result validation purpose target_model = run_tuning_SVM_C for l in range(numClusters): cluster = data.train_data.loc[data.train_data['clabel'] == l] print("Thread No", l) #result.append(pool.apply_async(run_KNN_C, args = (word2vec_src,cluster,queue,))) t = threading.Thread(target = run_tuning_SVM_C, args = [word2vec_src,cluster,queue,l,test_pd]) threads.append(t) for th in threads: th.start() for th in threads: response = queue.get() svm_models.append(response) svm_models = sorted(svm_models, key = lambda th: th[-1] ) stop1 = timeit.default_timer() svm_results = [] # maintain a list of svm results test_X = test_pd.loc[:, "Output"].tolist() predicted = clf.predict(test_X) data.test_data['clabel'] = predicted total_predicted = [] total_cluster_Y = [] avg_predicted = [] avg_cluster_Y = [] print(len(svm_models[l])-1) for i in range(len(svm_models[l])-1): total_predicted = [] total_cluster_Y = [] for l in range(numClusters): cluster = data.test_data.loc[data.test_data['clabel'] == l] svm_model = svm_models[l][i] cluster_X = cluster.loc[:, "Output"].tolist() cluster_Y = cluster.loc[:, "LinkTypeId"].tolist() total_cluster_Y = np.append(total_cluster_Y,cluster_Y) avg_cluster_Y = np.append(avg_cluster_Y,cluster_Y) if target_model == run_tuning_SVM_C or target_model == run_tuning_KNN_C: predicted_C = svm_model.learner.predict(cluster_X) else: predicted_C = svm_model.predict(cluster_X) total_predicted = np.append(total_predicted,predicted_C) avg_predicted = np.append(avg_predicted,predicted_C) svm_results.append(results_SVM_C(total_predicted, total_cluster_Y))# store all the SVM result report in a dictionary svm_results.append(results_SVM_C(avg_predicted, avg_cluster_Y)) # call the helper method to summarize the svm results total_summary(svm_results, test_pd.shape[0],start0,start1,stop0,stop1,start,stop) def run_kmeans_mp(word2vec_src): print("# word2vec:", word2vec_src) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() queue = Queue() pool = multiprocessing.Pool() processes = [] start = timeit.default_timer() numClusters = optimalK(pd.DataFrame(train_X)) stop = timeit.default_timer() #numClusters = 5 print("Found optimal k: " + str(numClusters)) clf = KMeans(n_clusters=numClusters, init='k-means++', max_iter=200, n_init=1) start0 = timeit.default_timer() clf.fit(train_X) stop0 = timeit.default_timer() svm_models = [] # maintain a list of svms s1 = timeit.default_timer() data.train_data['clabel'] = clf.labels_ s2 = timeit.default_timer() print("Inter - ", (s2-s1)) start1 = timeit.default_timer() #Change the target here as this will be used result validation purpose target_model = run_tuning_KNN_C for l in range(numClusters): cluster = data.train_data.loc[data.train_data['clabel'] == l] print("Thread No", l) pool.apply_async(run_tuning_KNN_C, (word2vec_src,cluster,queue,l,test_pd,)) #t = threading.Thread(target = run_tuning_SVM_C, args = [word2vec_src,cluster,queue,l,test_pd]) # for pr in processes: # pr.start() for pr in range(numClusters): response = queue.get() svm_models.append(response) print(svm_models) svm_models = sorted(svm_models, key = lambda th: th[-1] ) stop1 = timeit.default_timer() print(svm_models) svm_results = [] # maintain a list of svm results test_X = test_pd.loc[:, "Output"].tolist() predicted = clf.predict(test_X) data.test_data['clabel'] = predicted total_predicted = [] total_cluster_Y = [] avg_predicted = [] avg_cluster_Y = [] for i in range(len(svm_models[l])-1): total_predicted = [] total_cluster_Y = [] for l in range(numClusters): cluster = data.test_data.loc[data.test_data['clabel'] == l] svm_model = svm_models[l][i] cluster_X = cluster.loc[:, "Output"].tolist() cluster_Y = cluster.loc[:, "LinkTypeId"].tolist() total_cluster_Y = np.append(total_cluster_Y,cluster_Y) avg_cluster_Y = np.append(avg_cluster_Y,cluster_Y) if target_model == run_tuning_SVM_C or target_model == run_tuning_KNN_C: predicted_C = svm_model.learner.predict(cluster_X) else: predicted_C = svm_model.predict(cluster_X) total_predicted = np.append(total_predicted,predicted_C) avg_predicted = np.append(avg_predicted,predicted_C) svm_results.append(results_SVM_C(total_predicted, total_cluster_Y))# store all the SVM result report in a dictionary svm_results.append(results_SVM_C(avg_predicted, avg_cluster_Y)) # call the helper method to summarize the svm results total_summary(svm_results, test_pd.shape[0],start0,start1,stop0,stop1,start,stop) # Source: https://anaconda.org/milesgranger/gap-statistic/notebook def optimalK(data, nrefs=3, maxClusters=15): """ Calculates KMeans optimal K using Gap Statistic from Tibshirani, Walther, Hastie Params: data: ndarry of shape (n_samples, n_features) nrefs: number of sample reference datasets to create maxClusters: Maximum number of clusters to test for Returns: (gaps, optimalK) """ gaps = np.zeros((len(range(1, maxClusters)),)) resultsdf = pd.DataFrame({'clusterCount': [], 'gap': []}) for gap_index, k in enumerate(range(1, maxClusters)): # Holder for reference dispersion results refDisps = np.zeros(nrefs) # For n references, generate random sample and perform kmeans getting resulting dispersion of each loop for i in range(nrefs): # Create new random reference set randomReference = np.random.random_sample(size=data.shape) # Fit to it km = KMeans(n_clusters=k, init='k-means++', max_iter=200, n_init=1) km.fit(randomReference) refDisp = km.inertia_ refDisps[i] = refDisp # Fit cluster to original data and create dispersion km = KMeans(k) km.fit(data) origDisp = km.inertia_ # print(str(i+1) + ": " + str(origDisp)) # Calculate gap statistic gap = np.log(np.mean(refDisps)) - np.log(origDisp) # Assign this loop's gap statistic to gaps gaps[gap_index] = gap resultsdf = resultsdf.append( {'clusterCount': k, 'gap': gap}, ignore_index=True) # return (gaps.argmax() + 1, resultsdf) # Plus 1 because index of 0 means 1 cluster is optimal, index 2 = 3 clusters are optimal return gaps.argmax() # Not used, but wanted to put this code somewhere def results_kmeans(clf, train_X, train_Y, test_X, test_Y): predicted = clf.predict(test_X) print("Homogeneity: %0.3f" % metrics.homogeneity_score(train_Y, clf.labels_)) print("Completeness: %0.3f" % metrics.completeness_score(train_Y, clf.labels_)) print("V-measure: %0.3f" % metrics.v_measure_score(train_Y, clf.labels_)) print("Adjusted Rand-Index: %.3f" % metrics.adjusted_rand_score(train_Y, clf.labels_)) print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(train_X, clf.labels_, sample_size=1000)) """ Parse a sklearn classification report into a dict keyed by class name and containing a tuple (precision, recall, fscore, support) for each class Reference: https://gist.github.com/julienr/6b9b9a03bd8224db7b4f """ def parse_classification_report(clfreport): lines = clfreport.split('\n') # Remove empty lines lines = list(filter(lambda l: not len(l.strip()) == 0, lines)) # Starts with a header, then score for each class and finally an average header = lines[0] cls_lines = lines[1:-1] avg_line = lines[-1] assert header.split() == ['precision', 'recall', 'f1-score', 'support'] assert avg_line.split()[0] == 'avg' # class names can have spaces - figure the width of the class field # using indentation of the precision header cls_field_width = len(header) - len(header.lstrip()) # Now, collect all the class names and score in a dict def parse_line(l): """Parse a line of classification_report""" cls_name = l[:cls_field_width].strip() precision, recall, fscore, support = l[cls_field_width:].split() precision = float(precision) recall = float(recall) fscore = float(fscore) support = int(support) return (cls_name, precision, recall, fscore, support) data = collections.OrderedDict() for l in cls_lines: ret = parse_line(l) cls_name = ret[0] scores = ret[1:] data[cls_name] = scores data['avg'] = parse_line(avg_line)[1:] # average return data #################Katie's Code +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ def prepare_word2vec(): print("Downloading pretrained word2vec models") url = "https://zenodo.org/record/807727/files/word2vecs_models.zip" file_name = wget.download(url) with zipfile.ZipFile(file_name, "r") as zip_ref: zip_ref.extractall() if __name__ == "__main__": word_src = "word2vecs_models" threads = [] warnings.filterwarnings("ignore") if not os.path.exists(word_src): prepare_word2vec() elif len(os.listdir(word_src)) == 0: os.rmdir(word_src) prepare_word2vec() for x in range(1): random.seed(x) np.random.seed(x) myword2vecs = [os.path.join(word_src, i) for i in os.listdir(word_src) if "syn" not in i] #run_MLP(myword2vecs[x]) run_tuning_MLP(myword2vecs[x]) #run_KNN_baseline(myword2vecs[x]) #run_SVM_baseline(myword2vecs[x]) #print("Run completed for baseline model--------------------------------------------------") #run_tuning_SVM(myword2vecs[x]) #run_tuning_KNN(myword2vecs[x]) #print("Run completed for DE model--------------------------------------------------")
[ "numpy.random.seed", "numpy.random.random_sample", "sklearn.metrics.v_measure_score", "sklearn.metrics.classification_report", "os.path.isfile", "pickle.load", "numpy.mean", "sklearn.neural_network.MLPClassifier", "sklearn.svm.SVC", "multiprocessing.Queue", "sklearn.metrics.adjusted_rand_score", "gensim.models.Word2Vec.load", "os.path.join", "os.path.sep.join", "pandas.DataFrame", "sklearn.cluster.KMeans", "os.path.exists", "numpy.append", "random.seed", "results.results_process.reports", "threading.Thread", "pandas.DataFrame.from_dict", "model.PaperData", "sklearn.metrics.silhouette_score", "sklearn.metrics.homogeneity_score", "multiprocessing.Pool", "os.rmdir", "os.listdir", "sklearn.metrics.completeness_score", "zipfile.ZipFile", "numpy.log", "warnings.filterwarnings", "timeit.default_timer", "numpy.zeros", "sklearn.neighbors.KNeighborsClassifier", "collections.OrderedDict", "sklearn.metrics.confusion_matrix", "sklearn.cross_validation.StratifiedKFold" ]
[((1916, 1950), 'os.path.sep.join', 'os.path.sep.join', (["['20171103.txt']"], {}), "(['20171103.txt'])\n", (1932, 1950), False, 'import os\n'), ((2141, 2175), 'results.results_process.reports', 'results_process.reports', (['file_name'], {}), '(file_name)\n', (2164, 2175), False, 'from results import results_process\n'), ((2635, 2676), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (2662, 2676), False, 'import gensim\n'), ((2686, 2720), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (2695, 2720), False, 'from model import PaperData\n'), ((3036, 3058), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (3056, 3058), False, 'import timeit\n'), ((3998, 4020), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4018, 4020), False, 'import timeit\n'), ((4473, 4514), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (4500, 4514), False, 'import gensim\n'), ((4524, 4558), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (4533, 4558), False, 'from model import PaperData\n'), ((4874, 4896), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4894, 4896), False, 'import timeit\n'), ((5836, 5858), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (5856, 5858), False, 'import timeit\n'), ((6312, 6353), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (6339, 6353), False, 'import gensim\n'), ((6363, 6397), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (6372, 6397), False, 'from model import PaperData\n'), ((6681, 6703), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (6701, 6703), False, 'import timeit\n'), ((7570, 7592), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (7590, 7592), False, 'import timeit\n'), ((7900, 7934), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""rbf"""', 'gamma': '(0.005)'}), "(kernel='rbf', gamma=0.005)\n", (7907, 7934), False, 'from sklearn import svm\n'), ((7954, 7995), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (7981, 7995), False, 'import gensim\n'), ((8005, 8039), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (8014, 8039), False, 'from model import PaperData\n'), ((8360, 8382), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (8380, 8382), False, 'import timeit\n'), ((8420, 8442), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (8440, 8442), False, 'import timeit\n'), ((8655, 8727), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['test_Y', 'predicted'], {'labels': "['1', '2', '3', '4']"}), "(test_Y, predicted, labels=['1', '2', '3', '4'])\n", (8679, 8727), False, 'from sklearn import metrics\n'), ((9041, 9086), 'sklearn.neighbors.KNeighborsClassifier', 'neighbors.KNeighborsClassifier', ([], {'n_neighbors': '(5)'}), '(n_neighbors=5)\n', (9071, 9086), False, 'from sklearn import neighbors\n'), ((9108, 9149), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (9135, 9149), False, 'import gensim\n'), ((9159, 9193), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (9168, 9193), False, 'from model import PaperData\n'), ((9514, 9536), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (9534, 9536), False, 'import timeit\n'), ((9574, 9596), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (9594, 9596), False, 'import timeit\n'), ((9809, 9881), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['test_Y', 'predicted'], {'labels': "['1', '2', '3', '4']"}), "(test_Y, predicted, labels=['1', '2', '3', '4'])\n", (9833, 9881), False, 'from sklearn import metrics\n'), ((10188, 10203), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {}), '()\n', (10201, 10203), False, 'from sklearn.neural_network import MLPClassifier\n'), ((10223, 10264), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (10250, 10264), False, 'import gensim\n'), ((10274, 10308), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (10283, 10308), False, 'from model import PaperData\n'), ((10629, 10651), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (10649, 10651), False, 'import timeit\n'), ((10689, 10711), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (10709, 10711), False, 'import timeit\n'), ((10924, 10996), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['test_Y', 'predicted'], {'labels': "['1', '2', '3', '4']"}), "(test_Y, predicted, labels=['1', '2', '3', '4'])\n", (10948, 10996), False, 'from sklearn import metrics\n'), ((11234, 11268), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""rbf"""', 'gamma': '(0.005)'}), "(kernel='rbf', gamma=0.005)\n", (11241, 11268), False, 'from sklearn import svm\n'), ((11633, 11655), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (11653, 11655), False, 'import timeit\n'), ((11693, 11715), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (11713, 11715), False, 'import timeit\n'), ((11898, 11943), 'sklearn.neighbors.KNeighborsClassifier', 'neighbors.KNeighborsClassifier', ([], {'n_neighbors': '(5)'}), '(n_neighbors=5)\n', (11928, 11943), False, 'from sklearn import neighbors\n'), ((12310, 12332), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (12330, 12332), False, 'import timeit\n'), ((12370, 12392), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (12390, 12392), False, 'import timeit\n'), ((16050, 16142), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['test_Y', 'predicted'], {'labels': "['1', '2', '3', '4']", 'digits': '(3)'}), "(test_Y, predicted, labels=['1', '2', '3', '4'\n ], digits=3)\n", (16079, 16142), False, 'from sklearn import metrics\n'), ((16497, 16589), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['test_Y', 'predicted'], {'labels': "['1', '2', '3', '4']", 'digits': '(3)'}), "(test_Y, predicted, labels=['1', '2', '3', '4'\n ], digits=3)\n", (16526, 16589), False, 'from sklearn import metrics\n'), ((17161, 17196), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['report_data'], {}), '(report_data)\n', (17183, 17196), True, 'import pandas as pd\n'), ((18049, 18090), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (18076, 18090), False, 'import gensim\n'), ((18100, 18134), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (18109, 18134), False, 'from model import PaperData\n'), ((18310, 18317), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (18315, 18317), False, 'from multiprocessing import Queue\n'), ((18329, 18351), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18349, 18351), False, 'import timeit\n'), ((18409, 18431), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18429, 18431), False, 'import timeit\n'), ((18507, 18579), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'numClusters', 'init': '"""k-means++"""', 'max_iter': '(200)', 'n_init': '(1)'}), "(n_clusters=numClusters, init='k-means++', max_iter=200, n_init=1)\n", (18513, 18579), False, 'from sklearn.cluster import KMeans\n'), ((18609, 18631), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18629, 18631), False, 'import timeit\n'), ((18661, 18683), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18681, 18683), False, 'import timeit\n'), ((18737, 18759), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18757, 18759), False, 'import timeit\n'), ((18809, 18831), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18829, 18831), False, 'import timeit\n'), ((18872, 18894), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18892, 18894), False, 'import timeit\n'), ((19407, 19429), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (19427, 19429), False, 'import timeit\n'), ((20938, 20979), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (20965, 20979), False, 'import gensim\n'), ((20989, 21023), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (20998, 21023), False, 'from model import PaperData\n'), ((21199, 21206), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (21204, 21206), False, 'from multiprocessing import Queue\n'), ((21218, 21240), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (21238, 21240), False, 'import timeit\n'), ((21298, 21320), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (21318, 21320), False, 'import timeit\n'), ((21396, 21468), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'numClusters', 'init': '"""k-means++"""', 'max_iter': '(200)', 'n_init': '(1)'}), "(n_clusters=numClusters, init='k-means++', max_iter=200, n_init=1)\n", (21402, 21468), False, 'from sklearn.cluster import KMeans\n'), ((21498, 21520), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (21518, 21520), False, 'import timeit\n'), ((21550, 21572), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (21570, 21572), False, 'import timeit\n'), ((21626, 21648), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (21646, 21648), False, 'import timeit\n'), ((21698, 21720), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (21718, 21720), False, 'import timeit\n'), ((21761, 21783), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (21781, 21783), False, 'import timeit\n'), ((22422, 22444), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (22442, 22444), False, 'import timeit\n'), ((23939, 23980), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (23966, 23980), False, 'import gensim\n'), ((23990, 24024), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (23999, 24024), False, 'from model import PaperData\n'), ((24200, 24207), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (24205, 24207), False, 'from multiprocessing import Queue\n'), ((24217, 24239), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (24237, 24239), False, 'import multiprocessing\n'), ((24268, 24290), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24288, 24290), False, 'import timeit\n'), ((24348, 24370), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24368, 24370), False, 'import timeit\n'), ((24446, 24518), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'numClusters', 'init': '"""k-means++"""', 'max_iter': '(200)', 'n_init': '(1)'}), "(n_clusters=numClusters, init='k-means++', max_iter=200, n_init=1)\n", (24452, 24518), False, 'from sklearn.cluster import KMeans\n'), ((24548, 24570), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24568, 24570), False, 'import timeit\n'), ((24600, 24622), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24620, 24622), False, 'import timeit\n'), ((24676, 24698), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24696, 24698), False, 'import timeit\n'), ((24748, 24770), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24768, 24770), False, 'import timeit\n'), ((24811, 24833), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24831, 24833), False, 'import timeit\n'), ((25482, 25504), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (25502, 25504), False, 'import timeit\n'), ((27373, 27418), 'pandas.DataFrame', 'pd.DataFrame', (["{'clusterCount': [], 'gap': []}"], {}), "({'clusterCount': [], 'gap': []})\n", (27385, 27418), True, 'import pandas as pd\n'), ((30446, 30471), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (30469, 30471), False, 'import collections\n'), ((31128, 31161), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (31151, 31161), False, 'import warnings\n'), ((1527, 1552), 'os.path.isfile', 'os.path.isfile', (['file_name'], {}), '(file_name)\n', (1541, 1552), False, 'import os\n'), ((1797, 1854), 'os.path.sep.join', 'os.path.sep.join', (["['.', 'results', '%Y%m%d_%H:%M:%S.txt']"], {}), "(['.', 'results', '%Y%m%d_%H:%M:%S.txt'])\n", (1813, 1854), False, 'import os\n'), ((3118, 3191), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', (["train_pd.loc[:, 'LinkTypeId'].values", 'fold'], {'shuffle': '(True)'}), "(train_pd.loc[:, 'LinkTypeId'].values, fold, shuffle=True)\n", (3133, 3191), False, 'from sklearn.cross_validation import StratifiedKFold\n'), ((4956, 5029), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', (["train_pd.loc[:, 'LinkTypeId'].values", 'fold'], {'shuffle': '(True)'}), "(train_pd.loc[:, 'LinkTypeId'].values, fold, shuffle=True)\n", (4971, 5029), False, 'from sklearn.cross_validation import StratifiedKFold\n'), ((6763, 6836), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', (["train_pd.loc[:, 'LinkTypeId'].values", 'fold'], {'shuffle': '(True)'}), "(train_pd.loc[:, 'LinkTypeId'].values, fold, shuffle=True)\n", (6778, 6836), False, 'from sklearn.cross_validation import StratifiedKFold\n'), ((8485, 8577), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['test_Y', 'predicted'], {'labels': "['1', '2', '3', '4']", 'digits': '(3)'}), "(test_Y, predicted, labels=['1', '2', '3', '4'\n ], digits=3)\n", (8514, 8577), False, 'from sklearn import metrics\n'), ((9639, 9731), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['test_Y', 'predicted'], {'labels': "['1', '2', '3', '4']", 'digits': '(3)'}), "(test_Y, predicted, labels=['1', '2', '3', '4'\n ], digits=3)\n", (9668, 9731), False, 'from sklearn import metrics\n'), ((10754, 10846), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['test_Y', 'predicted'], {'labels': "['1', '2', '3', '4']", 'digits': '(3)'}), "(test_Y, predicted, labels=['1', '2', '3', '4'\n ], digits=3)\n", (10783, 10846), False, 'from sklearn import metrics\n'), ((13300, 13373), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', (["train_pd.loc[:, 'LinkTypeId'].values", 'fold'], {'shuffle': '(True)'}), "(train_pd.loc[:, 'LinkTypeId'].values, fold, shuffle=True)\n", (13315, 13373), False, 'from sklearn.cross_validation import StratifiedKFold\n'), ((14931, 15004), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', (["train_pd.loc[:, 'LinkTypeId'].values", 'fold'], {'shuffle': '(True)'}), "(train_pd.loc[:, 'LinkTypeId'].values, fold, shuffle=True)\n", (14946, 15004), False, 'from sklearn.cross_validation import StratifiedKFold\n'), ((18377, 18398), 'pandas.DataFrame', 'pd.DataFrame', (['train_X'], {}), '(train_X)\n', (18389, 18398), True, 'import pandas as pd\n'), ((19162, 19247), 'threading.Thread', 'threading.Thread', ([], {'target': 'run_tuning_KNN_C', 'args': '[word2vec_src, cluster, queue, l]'}), '(target=run_tuning_KNN_C, args=[word2vec_src, cluster,\n queue, l])\n', (19178, 19247), False, 'import threading\n'), ((21266, 21287), 'pandas.DataFrame', 'pd.DataFrame', (['train_X'], {}), '(train_X)\n', (21278, 21287), True, 'import pandas as pd\n'), ((22109, 22203), 'threading.Thread', 'threading.Thread', ([], {'target': 'run_tuning_SVM_C', 'args': '[word2vec_src, cluster, queue, l, test_pd]'}), '(target=run_tuning_SVM_C, args=[word2vec_src, cluster,\n queue, l, test_pd])\n', (22125, 22203), False, 'import threading\n'), ((24316, 24337), 'pandas.DataFrame', 'pd.DataFrame', (['train_X'], {}), '(train_X)\n', (24328, 24337), True, 'import pandas as pd\n'), ((27537, 27552), 'numpy.zeros', 'np.zeros', (['nrefs'], {}), '(nrefs)\n', (27545, 27552), True, 'import numpy as np\n'), ((28044, 28053), 'sklearn.cluster.KMeans', 'KMeans', (['k'], {}), '(k)\n', (28050, 28053), False, 'from sklearn.cluster import KMeans\n'), ((30981, 31012), 'zipfile.ZipFile', 'zipfile.ZipFile', (['file_name', '"""r"""'], {}), "(file_name, 'r')\n", (30996, 31012), False, 'import zipfile\n'), ((31171, 31195), 'os.path.exists', 'os.path.exists', (['word_src'], {}), '(word_src)\n', (31185, 31195), False, 'import os\n'), ((31330, 31344), 'random.seed', 'random.seed', (['x'], {}), '(x)\n', (31341, 31344), False, 'import random\n'), ((31349, 31366), 'numpy.random.seed', 'np.random.seed', (['x'], {}), '(x)\n', (31363, 31366), True, 'import numpy as np\n'), ((20093, 20130), 'numpy.append', 'np.append', (['total_cluster_Y', 'cluster_Y'], {}), '(total_cluster_Y, cluster_Y)\n', (20102, 20130), True, 'import numpy as np\n'), ((20152, 20187), 'numpy.append', 'np.append', (['avg_cluster_Y', 'cluster_Y'], {}), '(avg_cluster_Y, cluster_Y)\n', (20161, 20187), True, 'import numpy as np\n'), ((20416, 20455), 'numpy.append', 'np.append', (['total_predicted', 'predicted_C'], {}), '(total_predicted, predicted_C)\n', (20425, 20455), True, 'import numpy as np\n'), ((20477, 20514), 'numpy.append', 'np.append', (['avg_predicted', 'predicted_C'], {}), '(avg_predicted, predicted_C)\n', (20486, 20514), True, 'import numpy as np\n'), ((23091, 23128), 'numpy.append', 'np.append', (['total_cluster_Y', 'cluster_Y'], {}), '(total_cluster_Y, cluster_Y)\n', (23100, 23128), True, 'import numpy as np\n'), ((23150, 23185), 'numpy.append', 'np.append', (['avg_cluster_Y', 'cluster_Y'], {}), '(avg_cluster_Y, cluster_Y)\n', (23159, 23185), True, 'import numpy as np\n'), ((23414, 23453), 'numpy.append', 'np.append', (['total_predicted', 'predicted_C'], {}), '(total_predicted, predicted_C)\n', (23423, 23453), True, 'import numpy as np\n'), ((23475, 23512), 'numpy.append', 'np.append', (['avg_predicted', 'predicted_C'], {}), '(avg_predicted, predicted_C)\n', (23484, 23512), True, 'import numpy as np\n'), ((26144, 26181), 'numpy.append', 'np.append', (['total_cluster_Y', 'cluster_Y'], {}), '(total_cluster_Y, cluster_Y)\n', (26153, 26181), True, 'import numpy as np\n'), ((26203, 26238), 'numpy.append', 'np.append', (['avg_cluster_Y', 'cluster_Y'], {}), '(avg_cluster_Y, cluster_Y)\n', (26212, 26238), True, 'import numpy as np\n'), ((26467, 26506), 'numpy.append', 'np.append', (['total_predicted', 'predicted_C'], {}), '(total_predicted, predicted_C)\n', (26476, 26506), True, 'import numpy as np\n'), ((26528, 26565), 'numpy.append', 'np.append', (['avg_predicted', 'predicted_C'], {}), '(avg_predicted, predicted_C)\n', (26537, 26565), True, 'import numpy as np\n'), ((27756, 27796), 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': 'data.shape'}), '(size=data.shape)\n', (27779, 27796), True, 'import numpy as np\n'), ((27827, 27889), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k', 'init': '"""k-means++"""', 'max_iter': '(200)', 'n_init': '(1)'}), "(n_clusters=k, init='k-means++', max_iter=200, n_init=1)\n", (27833, 27889), False, 'from sklearn.cluster import KMeans\n'), ((28209, 28225), 'numpy.log', 'np.log', (['origDisp'], {}), '(origDisp)\n', (28215, 28225), True, 'import numpy as np\n'), ((28722, 28769), 'sklearn.metrics.homogeneity_score', 'metrics.homogeneity_score', (['train_Y', 'clf.labels_'], {}), '(train_Y, clf.labels_)\n', (28747, 28769), False, 'from sklearn import metrics\n'), ((28811, 28859), 'sklearn.metrics.completeness_score', 'metrics.completeness_score', (['train_Y', 'clf.labels_'], {}), '(train_Y, clf.labels_)\n', (28837, 28859), False, 'from sklearn import metrics\n'), ((28890, 28935), 'sklearn.metrics.v_measure_score', 'metrics.v_measure_score', (['train_Y', 'clf.labels_'], {}), '(train_Y, clf.labels_)\n', (28913, 28935), False, 'from sklearn import metrics\n'), ((28983, 29032), 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', (['train_Y', 'clf.labels_'], {}), '(train_Y, clf.labels_)\n', (29010, 29032), False, 'from sklearn import metrics\n'), ((29084, 29148), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['train_X', 'clf.labels_'], {'sample_size': '(1000)'}), '(train_X, clf.labels_, sample_size=1000)\n', (29108, 29148), False, 'from sklearn import metrics\n'), ((31263, 31281), 'os.rmdir', 'os.rmdir', (['word_src'], {}), '(word_src)\n', (31271, 31281), False, 'import os\n'), ((31386, 31411), 'os.path.join', 'os.path.join', (['word_src', 'i'], {}), '(word_src, i)\n', (31398, 31411), False, 'import os\n'), ((1616, 1638), 'pickle.load', 'pickle.load', (['my_pickle'], {}), '(my_pickle)\n', (1627, 1638), False, 'import pickle\n'), ((28188, 28205), 'numpy.mean', 'np.mean', (['refDisps'], {}), '(refDisps)\n', (28195, 28205), True, 'import numpy as np\n'), ((31231, 31251), 'os.listdir', 'os.listdir', (['word_src'], {}), '(word_src)\n', (31241, 31251), False, 'import os\n'), ((31421, 31441), 'os.listdir', 'os.listdir', (['word_src'], {}), '(word_src)\n', (31431, 31441), False, 'import os\n')]
def transform_scalars(dataset): """ Normalize tilt series so that each tilt image has the same total intensity. """ from tomviz import utils import numpy as np data = utils.get_array(dataset) # Get data as numpy array if data is None: # Check if data exists raise RuntimeError("No data array found!") data = data.astype(np.float32) # Change tilt series type to float. # Calculate average intensity of tilt series. intensity = np.sum(np.average(data, 2)) for i in range(0, data.shape[2]): # Normalize each tilt image. data[:, :, i] = data[:, :, i] / np.sum(data[:, :, i]) * intensity utils.set_array(dataset, data)
[ "numpy.average", "tomviz.utils.get_array", "numpy.sum", "tomviz.utils.set_array" ]
[((193, 217), 'tomviz.utils.get_array', 'utils.get_array', (['dataset'], {}), '(dataset)\n', (208, 217), False, 'from tomviz import utils\n'), ((662, 692), 'tomviz.utils.set_array', 'utils.set_array', (['dataset', 'data'], {}), '(dataset, data)\n', (677, 692), False, 'from tomviz import utils\n'), ((486, 505), 'numpy.average', 'np.average', (['data', '(2)'], {}), '(data, 2)\n', (496, 505), True, 'import numpy as np\n'), ((623, 644), 'numpy.sum', 'np.sum', (['data[:, :, i]'], {}), '(data[:, :, i])\n', (629, 644), True, 'import numpy as np\n')]
from digi.xbee.devices import DigiPointDevice, RemoteDigiPointDevice, XBee64BitAddress #transmitting XBee should be in coordinader mode #should be set to API mode aswell #reciever should be in router mode #can be either API or AT depending on if using XTCU or python API #make sure to scan for device, using portscan.sh script to get the right /dev/... device = DigiPointDevice("/dev/serial0", 9600) device.open() remote_device = RemoteDigiPointDevice(device, XBee64BitAddress.from_hex_string("13A20041C7BFFC")) #MAC adress of other Xbee while True: device.send_data(remote_device, "test data")
[ "digi.xbee.devices.DigiPointDevice", "digi.xbee.devices.XBee64BitAddress.from_hex_string" ]
[((368, 405), 'digi.xbee.devices.DigiPointDevice', 'DigiPointDevice', (['"""/dev/serial0"""', '(9600)'], {}), "('/dev/serial0', 9600)\n", (383, 405), False, 'from digi.xbee.devices import DigiPointDevice, RemoteDigiPointDevice, XBee64BitAddress\n'), ((467, 517), 'digi.xbee.devices.XBee64BitAddress.from_hex_string', 'XBee64BitAddress.from_hex_string', (['"""13A20041C7BFFC"""'], {}), "('13A20041C7BFFC')\n", (499, 517), False, 'from digi.xbee.devices import DigiPointDevice, RemoteDigiPointDevice, XBee64BitAddress\n')]
import imp import inspect import os import sys import uuid from conans.client.generators import registered_generators from conans.client.loader_txt import ConanFileTextLoader from conans.client.tools.files import chdir from conans.errors import ConanException, NotFoundException from conans.model.conan_file import ConanFile from conans.model.conan_generator import Generator from conans.model.options import OptionsValues from conans.model.ref import ConanFileReference from conans.model.settings import Settings from conans.model.values import Values from conans.util.files import load class ProcessedProfile(object): def __init__(self, profile, create_reference=None): self._settings = profile.processed_settings self._user_options = profile.options.copy() self._package_settings = profile.package_settings_values self._env_values = profile.env_values # Make sure the paths are normalized first, so env_values can be just a copy self._dev_reference = create_reference class ConanFileLoader(object): def __init__(self, runner, output, python_requires): self._runner = runner self._output = output self._python_requires = python_requires sys.modules["conans"].python_requires = python_requires self.cached_conanfiles = {} def load_class(self, conanfile_path): try: return self.cached_conanfiles[conanfile_path] except KeyError: self._python_requires.valid = True _, conanfile = parse_conanfile(conanfile_path, self._python_requires) self._python_requires.valid = False self.cached_conanfiles[conanfile_path] = conanfile return conanfile def load_export(self, conanfile_path, name, version, user, channel): conanfile = self.load_class(conanfile_path) # Export does a check on existing name & version if "name" in conanfile.__dict__: if name and name != conanfile.name: raise ConanException("Package recipe exported with name %s!=%s" % (name, conanfile.name)) elif not name: raise ConanException("conanfile didn't specify name") else: conanfile.name = name if "version" in conanfile.__dict__: if version and version != conanfile.version: raise ConanException("Package recipe exported with version %s!=%s" % (version, conanfile.version)) elif not version: raise ConanException("conanfile didn't specify version") else: conanfile.version = version ref = ConanFileReference(conanfile.name, conanfile.version, user, channel) return conanfile(self._output, self._runner, str(ref), user, channel) @staticmethod def _initialize_conanfile(conanfile, processed_profile): # Prepare the settings for the loaded conanfile # Mixing the global settings with the specified for that name if exist tmp_settings = processed_profile._settings.copy() if (processed_profile._package_settings and conanfile.name in processed_profile._package_settings): # Update the values, keeping old ones (confusing assign) values_tuple = processed_profile._package_settings[conanfile.name] tmp_settings.values = Values.from_list(values_tuple) conanfile.initialize(tmp_settings, processed_profile._env_values) def load_consumer(self, conanfile_path, processed_profile, name=None, version=None, user=None, channel=None, test=None): conanfile_class = self.load_class(conanfile_path) conanfile_class.name = name or conanfile_class.name conanfile_class.version = version or conanfile_class.version if test: display_name = "%s (test package)" % test else: ref = ConanFileReference(conanfile_class.name, conanfile_class.version, user, channel, validate=False) if ref.name or ref.version or ref.user or ref.channel: display_name = "%s (%s)" % (os.path.basename(conanfile_path), ref) else: display_name = os.path.basename(conanfile_path) conanfile = conanfile_class(self._output, self._runner, display_name, user, channel) conanfile.in_local_cache = False try: self._initialize_conanfile(conanfile, processed_profile) # The consumer specific conanfile.develop = True processed_profile._user_options.descope_options(conanfile.name) conanfile.options.initialize_upstream(processed_profile._user_options, name=conanfile.name) processed_profile._user_options.clear_unscoped_options() return conanfile except Exception as e: # re-raise with file name raise ConanException("%s: %s" % (conanfile_path, str(e))) def load_conanfile(self, conanfile_path, processed_profile, ref): conanfile_class = self.load_class(conanfile_path) conanfile_class.name = ref.name conanfile_class.version = ref.version conanfile = conanfile_class(self._output, self._runner, str(ref), ref.user, ref.channel) if processed_profile._dev_reference and processed_profile._dev_reference == ref: conanfile.develop = True try: self._initialize_conanfile(conanfile, processed_profile) return conanfile except Exception as e: # re-raise with file name raise ConanException("%s: %s" % (conanfile_path, str(e))) def load_conanfile_txt(self, conan_txt_path, processed_profile): if not os.path.exists(conan_txt_path): raise NotFoundException("Conanfile not found!") contents = load(conan_txt_path) path, basename = os.path.split(conan_txt_path) conanfile = self._parse_conan_txt(contents, path, basename, processed_profile) return conanfile def _parse_conan_txt(self, contents, path, display_name, processed_profile): conanfile = ConanFile(self._output, self._runner, display_name) conanfile.initialize(Settings(), processed_profile._env_values) # It is necessary to copy the settings, because the above is only a constraint of # conanfile settings, and a txt doesn't define settings. Necessary for generators, # as cmake_multi, that check build_type. conanfile.settings = processed_profile._settings.copy_values() try: parser = ConanFileTextLoader(contents) except Exception as e: raise ConanException("%s:\n%s" % (path, str(e))) for reference in parser.requirements: ConanFileReference.loads(reference) # Raise if invalid conanfile.requires.add(reference) for build_reference in parser.build_requirements: ConanFileReference.loads(build_reference) if not hasattr(conanfile, "build_requires"): conanfile.build_requires = [] conanfile.build_requires.append(build_reference) conanfile.generators = parser.generators options = OptionsValues.loads(parser.options) conanfile.options.values = options conanfile.options.initialize_upstream(processed_profile._user_options) # imports method conanfile.imports = parser.imports_method(conanfile) conanfile._conan_env_values.update(processed_profile._env_values) return conanfile def load_virtual(self, references, processed_profile, scope_options=True, build_requires_options=None): # If user don't specify namespace in options, assume that it is # for the reference (keep compatibility) conanfile = ConanFile(self._output, self._runner, display_name="virtual") conanfile.initialize(processed_profile._settings.copy(), processed_profile._env_values) conanfile.settings = processed_profile._settings.copy_values() for reference in references: conanfile.requires.add(reference.full_repr()) # Convert to string necessary # Allows options without package namespace in conan install commands: # conan install zlib/1.2.8@lasote/stable -o shared=True if scope_options: assert len(references) == 1 processed_profile._user_options.scope_options(references[0].name) if build_requires_options: conanfile.options.initialize_upstream(build_requires_options) else: conanfile.options.initialize_upstream(processed_profile._user_options) conanfile.generators = [] # remove the default txt generator return conanfile def _parse_module(conanfile_module, module_id): """ Parses a python in-memory module, to extract the classes, mainly the main class defining the Recipe, but also process possible existing generators @param conanfile_module: the module to be processed @return: the main ConanFile class from the module """ result = None for name, attr in conanfile_module.__dict__.items(): if (name.startswith("_") or not inspect.isclass(attr) or attr.__dict__.get("__module__") != module_id): continue if issubclass(attr, ConanFile) and attr != ConanFile: if result is None: result = attr else: raise ConanException("More than 1 conanfile in the file") elif issubclass(attr, Generator) and attr != Generator: registered_generators.add(attr.__name__, attr) if result is None: raise ConanException("No subclass of ConanFile") return result def parse_conanfile(conanfile_path, python_requires): with python_requires.capture_requires() as py_requires: module, filename = _parse_conanfile(conanfile_path) try: conanfile = _parse_module(module, filename) conanfile.python_requires = py_requires return module, conanfile except Exception as e: # re-raise with file name raise ConanException("%s: %s" % (conanfile_path, str(e))) def _parse_conanfile(conan_file_path): """ From a given path, obtain the in memory python import module """ if not os.path.exists(conan_file_path): raise NotFoundException("%s not found!" % conan_file_path) module_id = str(uuid.uuid1()) current_dir = os.path.dirname(conan_file_path) sys.path.insert(0, current_dir) try: old_modules = list(sys.modules.keys()) with chdir(current_dir): sys.dont_write_bytecode = True loaded = imp.load_source(module_id, conan_file_path) sys.dont_write_bytecode = False # These lines are necessary, otherwise local conanfile imports with same name # collide, but no error, and overwrite other packages imports!! added_modules = set(sys.modules).difference(old_modules) for added in added_modules: module = sys.modules[added] if module: try: folder = os.path.dirname(module.__file__) except AttributeError: # some module doesn't have __file__ pass else: if folder.startswith(current_dir): module = sys.modules.pop(added) sys.modules["%s.%s" % (module_id, added)] = module except Exception: import traceback trace = traceback.format_exc().split('\n') raise ConanException("Unable to load conanfile in %s\n%s" % (conan_file_path, '\n'.join(trace[3:]))) finally: sys.path.pop(0) return loaded, module_id
[ "sys.path.pop", "conans.errors.NotFoundException", "conans.client.tools.files.chdir", "conans.model.ref.ConanFileReference.loads", "inspect.isclass", "os.path.dirname", "conans.client.loader_txt.ConanFileTextLoader", "conans.model.options.OptionsValues.loads", "os.path.exists", "traceback.format_exc", "conans.model.conan_file.ConanFile", "conans.util.files.load", "os.path.basename", "conans.errors.ConanException", "uuid.uuid1", "imp.load_source", "sys.modules.pop", "conans.client.generators.registered_generators.add", "conans.model.values.Values.from_list", "sys.modules.keys", "sys.path.insert", "conans.model.ref.ConanFileReference", "conans.model.settings.Settings", "os.path.split" ]
[((10629, 10661), 'os.path.dirname', 'os.path.dirname', (['conan_file_path'], {}), '(conan_file_path)\n', (10644, 10661), False, 'import os\n'), ((10666, 10697), 'sys.path.insert', 'sys.path.insert', (['(0)', 'current_dir'], {}), '(0, current_dir)\n', (10681, 10697), False, 'import sys\n'), ((2698, 2766), 'conans.model.ref.ConanFileReference', 'ConanFileReference', (['conanfile.name', 'conanfile.version', 'user', 'channel'], {}), '(conanfile.name, conanfile.version, user, channel)\n', (2716, 2766), False, 'from conans.model.ref import ConanFileReference\n'), ((5956, 5976), 'conans.util.files.load', 'load', (['conan_txt_path'], {}), '(conan_txt_path)\n', (5960, 5976), False, 'from conans.util.files import load\n'), ((6002, 6031), 'os.path.split', 'os.path.split', (['conan_txt_path'], {}), '(conan_txt_path)\n', (6015, 6031), False, 'import os\n'), ((6246, 6297), 'conans.model.conan_file.ConanFile', 'ConanFile', (['self._output', 'self._runner', 'display_name'], {}), '(self._output, self._runner, display_name)\n', (6255, 6297), False, 'from conans.model.conan_file import ConanFile\n'), ((7333, 7368), 'conans.model.options.OptionsValues.loads', 'OptionsValues.loads', (['parser.options'], {}), '(parser.options)\n', (7352, 7368), False, 'from conans.model.options import OptionsValues\n'), ((7948, 8009), 'conans.model.conan_file.ConanFile', 'ConanFile', (['self._output', 'self._runner'], {'display_name': '"""virtual"""'}), "(self._output, self._runner, display_name='virtual')\n", (7957, 8009), False, 'from conans.model.conan_file import ConanFile\n'), ((9822, 9864), 'conans.errors.ConanException', 'ConanException', (['"""No subclass of ConanFile"""'], {}), "('No subclass of ConanFile')\n", (9836, 9864), False, 'from conans.errors import ConanException, NotFoundException\n'), ((10476, 10507), 'os.path.exists', 'os.path.exists', (['conan_file_path'], {}), '(conan_file_path)\n', (10490, 10507), False, 'import os\n'), ((10523, 10575), 'conans.errors.NotFoundException', 'NotFoundException', (["('%s not found!' % conan_file_path)"], {}), "('%s not found!' % conan_file_path)\n", (10540, 10575), False, 'from conans.errors import ConanException, NotFoundException\n'), ((10597, 10609), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (10607, 10609), False, 'import uuid\n'), ((11951, 11966), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (11963, 11966), False, 'import sys\n'), ((3424, 3454), 'conans.model.values.Values.from_list', 'Values.from_list', (['values_tuple'], {}), '(values_tuple)\n', (3440, 3454), False, 'from conans.model.values import Values\n'), ((3969, 4069), 'conans.model.ref.ConanFileReference', 'ConanFileReference', (['conanfile_class.name', 'conanfile_class.version', 'user', 'channel'], {'validate': '(False)'}), '(conanfile_class.name, conanfile_class.version, user,\n channel, validate=False)\n', (3987, 4069), False, 'from conans.model.ref import ConanFileReference\n'), ((5844, 5874), 'os.path.exists', 'os.path.exists', (['conan_txt_path'], {}), '(conan_txt_path)\n', (5858, 5874), False, 'import os\n'), ((5894, 5935), 'conans.errors.NotFoundException', 'NotFoundException', (['"""Conanfile not found!"""'], {}), "('Conanfile not found!')\n", (5911, 5935), False, 'from conans.errors import ConanException, NotFoundException\n'), ((6327, 6337), 'conans.model.settings.Settings', 'Settings', ([], {}), '()\n', (6335, 6337), False, 'from conans.model.settings import Settings\n'), ((6706, 6735), 'conans.client.loader_txt.ConanFileTextLoader', 'ConanFileTextLoader', (['contents'], {}), '(contents)\n', (6725, 6735), False, 'from conans.client.loader_txt import ConanFileTextLoader\n'), ((6886, 6921), 'conans.model.ref.ConanFileReference.loads', 'ConanFileReference.loads', (['reference'], {}), '(reference)\n', (6910, 6921), False, 'from conans.model.ref import ConanFileReference\n'), ((7058, 7099), 'conans.model.ref.ConanFileReference.loads', 'ConanFileReference.loads', (['build_reference'], {}), '(build_reference)\n', (7082, 7099), False, 'from conans.model.ref import ConanFileReference\n'), ((10734, 10752), 'sys.modules.keys', 'sys.modules.keys', ([], {}), '()\n', (10750, 10752), False, 'import sys\n'), ((10767, 10785), 'conans.client.tools.files.chdir', 'chdir', (['current_dir'], {}), '(current_dir)\n', (10772, 10785), False, 'from conans.client.tools.files import chdir\n'), ((10851, 10894), 'imp.load_source', 'imp.load_source', (['module_id', 'conan_file_path'], {}), '(module_id, conan_file_path)\n', (10866, 10894), False, 'import imp\n'), ((2023, 2110), 'conans.errors.ConanException', 'ConanException', (["('Package recipe exported with name %s!=%s' % (name, conanfile.name))"], {}), "('Package recipe exported with name %s!=%s' % (name,\n conanfile.name))\n", (2037, 2110), False, 'from conans.errors import ConanException, NotFoundException\n'), ((2185, 2232), 'conans.errors.ConanException', 'ConanException', (['"""conanfile didn\'t specify name"""'], {}), '("conanfile didn\'t specify name")\n', (2199, 2232), False, 'from conans.errors import ConanException, NotFoundException\n'), ((2405, 2501), 'conans.errors.ConanException', 'ConanException', (["('Package recipe exported with version %s!=%s' % (version, conanfile.version))"], {}), "('Package recipe exported with version %s!=%s' % (version,\n conanfile.version))\n", (2419, 2501), False, 'from conans.errors import ConanException, NotFoundException\n'), ((2579, 2629), 'conans.errors.ConanException', 'ConanException', (['"""conanfile didn\'t specify version"""'], {}), '("conanfile didn\'t specify version")\n', (2593, 2629), False, 'from conans.errors import ConanException, NotFoundException\n'), ((4302, 4334), 'os.path.basename', 'os.path.basename', (['conanfile_path'], {}), '(conanfile_path)\n', (4318, 4334), False, 'import os\n'), ((9336, 9357), 'inspect.isclass', 'inspect.isclass', (['attr'], {}), '(attr)\n', (9351, 9357), False, 'import inspect\n'), ((9609, 9660), 'conans.errors.ConanException', 'ConanException', (['"""More than 1 conanfile in the file"""'], {}), "('More than 1 conanfile in the file')\n", (9623, 9660), False, 'from conans.errors import ConanException, NotFoundException\n'), ((9737, 9783), 'conans.client.generators.registered_generators.add', 'registered_generators.add', (['attr.__name__', 'attr'], {}), '(attr.__name__, attr)\n', (9762, 9783), False, 'from conans.client.generators import registered_generators\n'), ((11312, 11344), 'os.path.dirname', 'os.path.dirname', (['module.__file__'], {}), '(module.__file__)\n', (11327, 11344), False, 'import os\n'), ((11717, 11739), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (11737, 11739), False, 'import traceback\n'), ((4214, 4246), 'os.path.basename', 'os.path.basename', (['conanfile_path'], {}), '(conanfile_path)\n', (4230, 4246), False, 'import os\n'), ((11556, 11578), 'sys.modules.pop', 'sys.modules.pop', (['added'], {}), '(added)\n', (11571, 11578), False, 'import sys\n')]
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from matplotlib import pyplot as plt from torch import nn as nn from torch.nn import functional as F from habitat_baselines.slambased.utils import generate_2dgrid def safe_roi_2d(array2d, ymin, ymax, xmin, xmax): (h, w) = array2d.shape return max(0, ymin), min(ymax, h), max(0, xmin), min(xmax, w) def f2ind(ten, i): # Float to index return torch.round(ten[i]).long() def init_neights_to_channels(ks=3): r"""Convolutional kernel, which maps nighborhood into channels """ weights = np.zeros((ks * ks, 1, ks, ks), dtype=np.float32) for y in range(ks): for x in range(ks): weights[x * ks + y, 0, y, x] = 1.0 return weights class SoftArgMin(nn.Module): def __init__(self, beta=5): super(SoftArgMin, self).__init__() self.beta = beta return def forward(self, x, coords2d=None): bx_sm = F.softmax(self.beta * (-x).view(1, -1), dim=1) if coords2d is None: coords2d = generate_2dgrid(x.size(2), x.size(3), False) coords2d_flat = coords2d.view(2, -1) return (bx_sm.expand_as(coords2d_flat) * coords2d_flat).sum( dim=1 ) / bx_sm.sum(dim=1) class HardArgMin(nn.Module): def __init__(self): super(HardArgMin, self).__init__() return def forward(self, x, coords2d=None): val, idx = x.view(-1).min(dim=0) if coords2d is None: coords2d = generate_2dgrid(x.size(2), x.size(3), False) coords2d_flat = coords2d.view(2, -1) return coords2d_flat[:, idx].view(2) class DifferentiableStarPlanner(nn.Module): def __init__( self, max_steps=500, visualize=False, preprocess=False, beta=100, connectivity="eight", device=torch.device("cpu"), # noqa: B008 **kwargs ): super(DifferentiableStarPlanner, self).__init__() self.eps = 1e-12 self.max_steps = max_steps self.visualize = visualize self.inf = 1e7 self.ob_cost = 10000.0 self.device = device self.beta = beta self.preprocess = preprocess # self.argmin = SoftArgMin(beta) self.argmin = HardArgMin() self.neights2channels = nn.Conv2d(1, 9, kernel_size=(3, 3), bias=False) self.neights2channels.weight.data = torch.from_numpy( init_neights_to_channels(3) ) self.neights2channels.to(device) self.preprocessNet = nn.Conv2d( 1, 1, kernel_size=(3, 3), padding=1, bias=False ) self.preprocessNet.weight.data = torch.from_numpy( np.array( [ [ [ [0.00001, 0.0001, 0.00001], [0.0001, 1, 0.0001], [0.00001, 0.0001, 0.00001], ] ] ], dtype=np.float32, ) ) self.preprocessNet.to(device) if connectivity == "eight": self.gx_to_right = nn.Conv2d(1, 1, kernel_size=(1, 3), bias=False) self.gx_to_right.weight.data = torch.from_numpy( np.array([[[[0, 1, -1]]]], dtype=np.float32) ) self.gx_to_right.to(device) self.gx_to_left = nn.Conv2d(1, 1, kernel_size=(1, 3), bias=False) self.gx_to_left.weight.data = torch.from_numpy( np.array([[[[-1, 1, 0]]]], dtype=np.float32) ) self.gx_to_left.to(device) self.gy_to_up = nn.Conv2d(1, 1, kernel_size=(3, 1), bias=False) self.gy_to_up.weight.data = torch.from_numpy( np.array([[[[0], [1], [-1]]]], dtype=np.float32) ) self.gy_to_up.to(device) self.gy_to_down = nn.Conv2d(1, 1, kernel_size=(3, 1), bias=False) self.gy_to_down.weight.data = torch.from_numpy( np.array([[[[-1], [1], [0]]]], dtype=np.float32) ) self.gy_to_down.to(device) else: raise ValueError('Only "eight" connectivity now supported') return def preprocess_obstacle_map(self, obstacle_map): if self.preprocess: return self.preprocessNet(obstacle_map) return obstacle_map def coords2grid(self, node_coords, h, w): grid = node_coords.squeeze() - torch.FloatTensor( (h / 2.0, w / 2.0) ).to(self.device) grid = grid / torch.FloatTensor((h / 2.0, w / 2.0)).to(self.device) return grid.view(1, 1, 1, 2).flip(3) def init_closelistmap(self): return torch.zeros_like(self.start_map).float() def init_openlistmap(self): return self.start_map.clone() def init_g_map(self): return torch.clamp( self.inf * (torch.ones_like(self.start_map) - self.start_map.clone()), min=0, max=self.inf, ) def safe_roi_2d(self, ymin, ymax, xmin, xmax): return ( int(max(0, torch.round(ymin).item())), int(min(torch.round(ymax).item(), self.height)), int(max(0, torch.round(xmin).item())), int(min(torch.round(xmax).item(), self.width)), ) def forward( self, obstacles, coords, start_map, goal_map, non_obstacle_cost_map=None, additional_steps=50, return_path=True, ): self.trav_init_time = 0 self.trav_mask_time = 0 self.trav_soft_time = 0 self.conv_time = 0 self.close_time = 0 self.obstacles = self.preprocess_obstacle_map( obstacles.to(self.device) ) self.start_map = start_map.to(self.device) self.been_there = torch.zeros_like(self.start_map).to( torch.device("cpu") ) self.coords = coords.to(self.device) self.goal_map = goal_map.to(self.device) self.been_there = torch.zeros_like(self.goal_map).to(self.device) self.height = obstacles.size(2) self.width = obstacles.size(3) m, goal_idx = torch.max(self.goal_map.view(-1), 0) c_map = self.calculate_local_path_costs(non_obstacle_cost_map) # c_map might be non persistent in map update self.g_map = self.init_g_map() self.close_list_map = self.init_closelistmap() self.open_list_map = self.init_openlistmap() not_done = False step = 0 stopped_by_max_iter = False if self.visualize: self.fig, self.ax = plt.subplots(1, 1) self.image = self.ax.imshow( self.g_map.squeeze().cpu().detach().numpy().astype(np.float32), animated=True, ) self.fig.canvas.draw() not_done = (self.close_list_map.view(-1)[goal_idx].item() < 1.0) or ( self.g_map.view(-1)[goal_idx].item() >= 0.9 * self.ob_cost ) rad = 1 self.start_coords = ( (self.coords * self.start_map.expand_as(self.coords)) .sum(dim=2) .sum(dim=2) .squeeze() ) node_coords = self.start_coords self.goal_coords = ( (self.coords * self.goal_map.expand_as(self.coords)) .sum(dim=2) .sum(dim=2) .squeeze() ) self.max_steps = 4 * int( torch.sqrt( ((self.start_coords - self.goal_coords) ** 2).sum() + 1e-6 ).item() ) while not_done: ymin, ymax, xmin, xmax = self.safe_roi_2d( node_coords[0] - rad, node_coords[0] + rad + 1, node_coords[1] - rad, node_coords[1] + rad + 1, ) if ( (ymin - 1 > 0) and (xmin - 1 > 0) and (ymax + 1 < self.height) and (xmax + 1 < self.width) ): n2c = self.neights2channels( self.g_map[:, :, ymin - 1 : ymax + 1, xmin - 1 : xmax + 1] ) self.g_map[:, :, ymin:ymax, xmin:xmax] = torch.min( self.g_map[:, :, ymin:ymax, xmin:xmax].clone(), (n2c + c_map[:, :, ymin:ymax, xmin:xmax]).min( dim=1, keepdim=True )[0], ) self.close_list_map[:, :, ymin:ymax, xmin:xmax] = torch.max( self.close_list_map[:, :, ymin:ymax, xmin:xmax], self.open_list_map[:, :, ymin:ymax, xmin:xmax], ) self.open_list_map[:, :, ymin:ymax, xmin:xmax] = F.relu( F.max_pool2d( self.open_list_map[ :, :, ymin - 1 : ymax + 1, xmin - 1 : xmax + 1 ], 3, stride=1, padding=0, ) - self.close_list_map[:, :, ymin:ymax, xmin:xmax] - self.obstacles[:, :, ymin:ymax, xmin:xmax] ) else: self.g_map = torch.min( self.g_map, ( self.neights2channels( F.pad(self.g_map, (1, 1, 1, 1), "replicate") ) + c_map ).min(dim=1, keepdim=True)[0], ) self.close_list_map = torch.max( self.close_list_map, self.open_list_map ) self.open_list_map = F.relu( F.max_pool2d(self.open_list_map, 3, stride=1, padding=1) - self.close_list_map - self.obstacles ) step += 1 if step >= self.max_steps: stopped_by_max_iter = True break not_done = ( self.close_list_map.view(-1)[goal_idx].item() < 1.0 ) or (self.g_map.view(-1)[goal_idx].item() >= 0.1 * self.inf) rad += 1 if not stopped_by_max_iter: for _ in range(additional_steps): # now propagating beyong start point self.g_map = torch.min( self.g_map, ( self.neights2channels( F.pad(self.g_map, (1, 1, 1, 1), "replicate") ) + c_map ).min(dim=1, keepdim=True)[0], ) self.close_list_map = torch.max( self.close_list_map, self.open_list_map ) self.open_list_map = F.relu( F.max_pool2d(self.open_list_map, 3, stride=1, padding=1) - self.close_list_map - self.obstacles ) if return_path: out_path, cost = self.reconstruct_path() return out_path, cost return None def calculate_local_path_costs(self, non_obstacle_cost_map=None): coords = self.coords h = coords.size(2) w = coords.size(3) obstacles_pd = F.pad(self.obstacles, (1, 1, 1, 1), "replicate") if non_obstacle_cost_map is None: learned_bias = torch.ones_like(self.obstacles).to( obstacles_pd.device ) else: learned_bias = non_obstacle_cost_map.to(obstacles_pd.device) left_diff_sq = ( self.gx_to_left( F.pad(coords[:, 1:2, :, :], (1, 1, 0, 0), "replicate") ) ** 2 ) right_diff_sq = ( self.gx_to_right( F.pad(coords[:, 1:2, :, :], (1, 1, 0, 0), "replicate") ) ** 2 ) up_diff_sq = ( self.gy_to_up( F.pad(coords[:, 0:1, :, :], (0, 0, 1, 1), "replicate") ) ** 2 ) down_diff_sq = ( self.gy_to_down( F.pad(coords[:, 0:1, :, :], (0, 0, 1, 1), "replicate") ) ** 2 ) out = torch.cat( [ # Order in from up to down, from left to right # hopefully same as in PyTorch torch.sqrt(left_diff_sq + up_diff_sq + self.eps) + self.ob_cost * torch.max( obstacles_pd[:, :, 0:h, 0:w], obstacles_pd[:, :, 1 : h + 1, 1 : w + 1], ), torch.sqrt(left_diff_sq + self.eps) + self.ob_cost * torch.max( obstacles_pd[:, :, 0:h, 1 : w + 1], obstacles_pd[:, :, 1 : h + 1, 1 : w + 1], ), torch.sqrt(left_diff_sq + down_diff_sq + self.eps) + self.ob_cost * torch.max( obstacles_pd[:, :, 2 : h + 2, 0:w], obstacles_pd[:, :, 1 : h + 1, 1 : w + 1], ), torch.sqrt(up_diff_sq + self.eps) + self.ob_cost * torch.max( obstacles_pd[:, :, 0:h, 1 : w + 1], obstacles_pd[:, :, 1 : h + 1, 1 : w + 1], ), 0 * right_diff_sq + self.ob_cost * obstacles_pd[:, :, 1 : h + 1, 1 : w + 1], # current center torch.sqrt(down_diff_sq + self.eps) + self.ob_cost * torch.max( obstacles_pd[:, :, 2 : h + 2, 1 : w + 1], obstacles_pd[:, :, 1 : h + 1, 1 : w + 1], ), torch.sqrt(right_diff_sq + up_diff_sq + self.eps) + self.ob_cost * torch.max( obstacles_pd[:, :, 0:h, 2 : w + 2], obstacles_pd[:, :, 1 : h + 1, 1 : w + 1], ), torch.sqrt(right_diff_sq + self.eps) + self.ob_cost * torch.max( obstacles_pd[:, :, 1 : h + 1, 2 : w + 2], obstacles_pd[:, :, 1 : h + 1, 1 : w + 1], ), torch.sqrt(right_diff_sq + down_diff_sq + self.eps) + self.ob_cost * torch.max( obstacles_pd[:, :, 2 : h + 2, 2 : w + 2], obstacles_pd[:, :, 1 : h + 1, 1 : w + 1], ), ], dim=1, ) return out + torch.clamp( learned_bias.expand_as(out), min=0, max=self.ob_cost ) def propagate_traversal(self, node_coords, close, g, coords): ymin, ymax, xmin, xmax = self.safe_roi_2d( node_coords[0] - 1, node_coords[0] + 2, node_coords[1] - 1, node_coords[1] + 2, ) mask = close[:, :, ymin:ymax, xmin:xmax] > 0 mask[ :, :, f2ind(node_coords, 0) - ymin, f2ind(node_coords, 1) - xmin ] = 0 mask = mask > 0 current_g_cost = g[:, :, ymin:ymax, xmin:xmax][mask].clone() if len(current_g_cost.view(-1)) == 0: # we are kind surrounded by obstacles, # but still need to output something mask = torch.relu( 1.0 - self.been_there[:, :, ymin:ymax, xmin:xmax] ) mask[ :, :, f2ind(node_coords, 0) - ymin, f2ind(node_coords, 1) - xmin, ] = 0 mask = mask > 0 current_g_cost = g[:, :, ymin:ymax, xmin:xmax][mask].clone() if len(current_g_cost.view(-1)) > 1: current_g_cost = current_g_cost - torch.min(current_g_cost).item() current_g_cost = ( current_g_cost + 0.41 * torch.randperm( len(current_g_cost), dtype=torch.float32, device=torch.device("cpu"), ) / (len(current_g_cost)) ) # coords_roi = coords[:, :, ymin:ymax, xmin:xmax] out = self.argmin( current_g_cost, coords_roi[mask.expand_as(coords_roi)] ) return out def get_clean_costmap_and_goodmask(self): good_mask = 1 - F.max_pool2d(self.obstacles, 3, stride=1, padding=1) costmap = self.g_map obstacle_cost_corrected = 10000.0 sampling_map = torch.clamp(costmap, min=0, max=obstacle_cost_corrected) return sampling_map, good_mask def reconstruct_path(self): out_path = [] goal_coords = self.goal_coords.cpu() start_coords = self.start_coords.cpu() cost = self.g_map[:, :, f2ind(goal_coords, 0), f2ind(goal_coords, 1)] # Traversing done = False node_coords = goal_coords.cpu() out_path.append(node_coords) self.been_there = 0 * self.been_there.cpu() self.been_there[ :, :, f2ind(node_coords, 0), f2ind(node_coords, 1) ] = 1.0 self.close_list_map = self.close_list_map.cpu() self.g_map = self.g_map.cpu() self.coords = self.coords.cpu() count1 = 0 while not done: node_coords = self.propagate_traversal( node_coords, self.close_list_map, self.g_map, self.coords ) self.been_there[ :, :, f2ind(node_coords, 0), f2ind(node_coords, 1) ] = 1.0 if torch.norm(node_coords - out_path[-1], 2).item() < 0.3: y = node_coords.flatten()[0].long() x = node_coords.flatten()[1].long() print(self.g_map[0, 0, y - 2 : y + 3, x - 2 : x + 3]) print("loop in out_path", node_coords) raise ValueError("loop in out_path") out_path.append(node_coords) done = torch.norm(node_coords - start_coords.cpu(), 2).item() < 0.3 count1 += 1 if count1 > 250: break return out_path, cost
[ "torch.ones_like", "torch.relu", "torch.zeros_like", "torch.sqrt", "torch.norm", "torch.nn.Conv2d", "numpy.zeros", "torch.FloatTensor", "torch.clamp", "numpy.array", "torch.nn.functional.max_pool2d", "torch.max", "torch.device", "torch.min", "matplotlib.pyplot.subplots", "torch.round", "torch.nn.functional.pad" ]
[((752, 800), 'numpy.zeros', 'np.zeros', (['(ks * ks, 1, ks, ks)'], {'dtype': 'np.float32'}), '((ks * ks, 1, ks, ks), dtype=np.float32)\n', (760, 800), True, 'import numpy as np\n'), ((2026, 2045), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2038, 2045), False, 'import torch\n'), ((2491, 2538), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(9)'], {'kernel_size': '(3, 3)', 'bias': '(False)'}), '(1, 9, kernel_size=(3, 3), bias=False)\n', (2500, 2538), True, 'from torch import nn as nn\n'), ((2721, 2779), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(1)'], {'kernel_size': '(3, 3)', 'padding': '(1)', 'bias': '(False)'}), '(1, 1, kernel_size=(3, 3), padding=1, bias=False)\n', (2730, 2779), True, 'from torch import nn as nn\n'), ((11596, 11644), 'torch.nn.functional.pad', 'F.pad', (['self.obstacles', '(1, 1, 1, 1)', '"""replicate"""'], {}), "(self.obstacles, (1, 1, 1, 1), 'replicate')\n", (11601, 11644), True, 'from torch.nn import functional as F\n'), ((16938, 16994), 'torch.clamp', 'torch.clamp', (['costmap'], {'min': '(0)', 'max': 'obstacle_cost_corrected'}), '(costmap, min=0, max=obstacle_cost_corrected)\n', (16949, 16994), False, 'import torch\n'), ((594, 613), 'torch.round', 'torch.round', (['ten[i]'], {}), '(ten[i])\n', (605, 613), False, 'import torch\n'), ((2873, 2979), 'numpy.array', 'np.array', (['[[[[1e-05, 0.0001, 1e-05], [0.0001, 1, 0.0001], [1e-05, 0.0001, 1e-05]]]]'], {'dtype': 'np.float32'}), '([[[[1e-05, 0.0001, 1e-05], [0.0001, 1, 0.0001], [1e-05, 0.0001, \n 1e-05]]]], dtype=np.float32)\n', (2881, 2979), True, 'import numpy as np\n'), ((3340, 3387), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(1)'], {'kernel_size': '(1, 3)', 'bias': '(False)'}), '(1, 1, kernel_size=(1, 3), bias=False)\n', (3349, 3387), True, 'from torch import nn as nn\n'), ((3595, 3642), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(1)'], {'kernel_size': '(1, 3)', 'bias': '(False)'}), '(1, 1, kernel_size=(1, 3), bias=False)\n', (3604, 3642), True, 'from torch import nn as nn\n'), ((3846, 3893), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(1)'], {'kernel_size': '(3, 1)', 'bias': '(False)'}), '(1, 1, kernel_size=(3, 1), bias=False)\n', (3855, 3893), True, 'from torch import nn as nn\n'), ((4099, 4146), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(1)'], {'kernel_size': '(3, 1)', 'bias': '(False)'}), '(1, 1, kernel_size=(3, 1), bias=False)\n', (4108, 4146), True, 'from torch import nn as nn\n'), ((6122, 6141), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6134, 6141), False, 'import torch\n'), ((6867, 6885), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (6879, 6885), True, 'from matplotlib import pyplot as plt\n'), ((15727, 15788), 'torch.relu', 'torch.relu', (['(1.0 - self.been_there[:, :, ymin:ymax, xmin:xmax])'], {}), '(1.0 - self.been_there[:, :, ymin:ymax, xmin:xmax])\n', (15737, 15788), False, 'import torch\n'), ((16791, 16843), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['self.obstacles', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(self.obstacles, 3, stride=1, padding=1)\n', (16803, 16843), True, 'from torch.nn import functional as F\n'), ((3465, 3509), 'numpy.array', 'np.array', (['[[[[0, 1, -1]]]]'], {'dtype': 'np.float32'}), '([[[[0, 1, -1]]]], dtype=np.float32)\n', (3473, 3509), True, 'import numpy as np\n'), ((3719, 3763), 'numpy.array', 'np.array', (['[[[[-1, 1, 0]]]]'], {'dtype': 'np.float32'}), '([[[[-1, 1, 0]]]], dtype=np.float32)\n', (3727, 3763), True, 'import numpy as np\n'), ((3968, 4016), 'numpy.array', 'np.array', (['[[[[0], [1], [-1]]]]'], {'dtype': 'np.float32'}), '([[[[0], [1], [-1]]]], dtype=np.float32)\n', (3976, 4016), True, 'import numpy as np\n'), ((4223, 4271), 'numpy.array', 'np.array', (['[[[[-1], [1], [0]]]]'], {'dtype': 'np.float32'}), '([[[[-1], [1], [0]]]], dtype=np.float32)\n', (4231, 4271), True, 'import numpy as np\n'), ((4920, 4952), 'torch.zeros_like', 'torch.zeros_like', (['self.start_map'], {}), '(self.start_map)\n', (4936, 4952), False, 'import torch\n'), ((6073, 6105), 'torch.zeros_like', 'torch.zeros_like', (['self.start_map'], {}), '(self.start_map)\n', (6089, 6105), False, 'import torch\n'), ((6272, 6303), 'torch.zeros_like', 'torch.zeros_like', (['self.goal_map'], {}), '(self.goal_map)\n', (6288, 6303), False, 'import torch\n'), ((8757, 8868), 'torch.max', 'torch.max', (['self.close_list_map[:, :, ymin:ymax, xmin:xmax]', 'self.open_list_map[:, :, ymin:ymax, xmin:xmax]'], {}), '(self.close_list_map[:, :, ymin:ymax, xmin:xmax], self.\n open_list_map[:, :, ymin:ymax, xmin:xmax])\n', (8766, 8868), False, 'import torch\n'), ((9844, 9894), 'torch.max', 'torch.max', (['self.close_list_map', 'self.open_list_map'], {}), '(self.close_list_map, self.open_list_map)\n', (9853, 9894), False, 'import torch\n'), ((10980, 11030), 'torch.max', 'torch.max', (['self.close_list_map', 'self.open_list_map'], {}), '(self.close_list_map, self.open_list_map)\n', (10989, 11030), False, 'import torch\n'), ((11957, 12011), 'torch.nn.functional.pad', 'F.pad', (['coords[:, 1:2, :, :]', '(1, 1, 0, 0)', '"""replicate"""'], {}), "(coords[:, 1:2, :, :], (1, 1, 0, 0), 'replicate')\n", (11962, 12011), True, 'from torch.nn import functional as F\n'), ((12125, 12179), 'torch.nn.functional.pad', 'F.pad', (['coords[:, 1:2, :, :]', '(1, 1, 0, 0)', '"""replicate"""'], {}), "(coords[:, 1:2, :, :], (1, 1, 0, 0), 'replicate')\n", (12130, 12179), True, 'from torch.nn import functional as F\n'), ((12287, 12341), 'torch.nn.functional.pad', 'F.pad', (['coords[:, 0:1, :, :]', '(0, 0, 1, 1)', '"""replicate"""'], {}), "(coords[:, 0:1, :, :], (0, 0, 1, 1), 'replicate')\n", (12292, 12341), True, 'from torch.nn import functional as F\n'), ((12453, 12507), 'torch.nn.functional.pad', 'F.pad', (['coords[:, 0:1, :, :]', '(0, 0, 1, 1)', '"""replicate"""'], {}), "(coords[:, 0:1, :, :], (0, 0, 1, 1), 'replicate')\n", (12458, 12507), True, 'from torch.nn import functional as F\n'), ((4674, 4711), 'torch.FloatTensor', 'torch.FloatTensor', (['(h / 2.0, w / 2.0)'], {}), '((h / 2.0, w / 2.0))\n', (4691, 4711), False, 'import torch\n'), ((4772, 4809), 'torch.FloatTensor', 'torch.FloatTensor', (['(h / 2.0, w / 2.0)'], {}), '((h / 2.0, w / 2.0))\n', (4789, 4809), False, 'import torch\n'), ((5123, 5154), 'torch.ones_like', 'torch.ones_like', (['self.start_map'], {}), '(self.start_map)\n', (5138, 5154), False, 'import torch\n'), ((11714, 11745), 'torch.ones_like', 'torch.ones_like', (['self.obstacles'], {}), '(self.obstacles)\n', (11729, 11745), False, 'import torch\n'), ((12714, 12762), 'torch.sqrt', 'torch.sqrt', (['(left_diff_sq + up_diff_sq + self.eps)'], {}), '(left_diff_sq + up_diff_sq + self.eps)\n', (12724, 12762), False, 'import torch\n'), ((12970, 13005), 'torch.sqrt', 'torch.sqrt', (['(left_diff_sq + self.eps)'], {}), '(left_diff_sq + self.eps)\n', (12980, 13005), False, 'import torch\n'), ((13219, 13269), 'torch.sqrt', 'torch.sqrt', (['(left_diff_sq + down_diff_sq + self.eps)'], {}), '(left_diff_sq + down_diff_sq + self.eps)\n', (13229, 13269), False, 'import torch\n'), ((13483, 13516), 'torch.sqrt', 'torch.sqrt', (['(up_diff_sq + self.eps)'], {}), '(up_diff_sq + self.eps)\n', (13493, 13516), False, 'import torch\n'), ((13873, 13908), 'torch.sqrt', 'torch.sqrt', (['(down_diff_sq + self.eps)'], {}), '(down_diff_sq + self.eps)\n', (13883, 13908), False, 'import torch\n'), ((14128, 14177), 'torch.sqrt', 'torch.sqrt', (['(right_diff_sq + up_diff_sq + self.eps)'], {}), '(right_diff_sq + up_diff_sq + self.eps)\n', (14138, 14177), False, 'import torch\n'), ((14391, 14427), 'torch.sqrt', 'torch.sqrt', (['(right_diff_sq + self.eps)'], {}), '(right_diff_sq + self.eps)\n', (14401, 14427), False, 'import torch\n'), ((14647, 14698), 'torch.sqrt', 'torch.sqrt', (['(right_diff_sq + down_diff_sq + self.eps)'], {}), '(right_diff_sq + down_diff_sq + self.eps)\n', (14657, 14698), False, 'import torch\n'), ((12812, 12889), 'torch.max', 'torch.max', (['obstacles_pd[:, :, 0:h, 0:w]', 'obstacles_pd[:, :, 1:h + 1, 1:w + 1]'], {}), '(obstacles_pd[:, :, 0:h, 0:w], obstacles_pd[:, :, 1:h + 1, 1:w + 1])\n', (12821, 12889), False, 'import torch\n'), ((13055, 13140), 'torch.max', 'torch.max', (['obstacles_pd[:, :, 0:h, 1:w + 1]', 'obstacles_pd[:, :, 1:h + 1, 1:w + 1]'], {}), '(obstacles_pd[:, :, 0:h, 1:w + 1], obstacles_pd[:, :, 1:h + 1, 1:w +\n 1])\n', (13064, 13140), False, 'import torch\n'), ((13319, 13404), 'torch.max', 'torch.max', (['obstacles_pd[:, :, 2:h + 2, 0:w]', 'obstacles_pd[:, :, 1:h + 1, 1:w + 1]'], {}), '(obstacles_pd[:, :, 2:h + 2, 0:w], obstacles_pd[:, :, 1:h + 1, 1:w +\n 1])\n', (13328, 13404), False, 'import torch\n'), ((13566, 13651), 'torch.max', 'torch.max', (['obstacles_pd[:, :, 0:h, 1:w + 1]', 'obstacles_pd[:, :, 1:h + 1, 1:w + 1]'], {}), '(obstacles_pd[:, :, 0:h, 1:w + 1], obstacles_pd[:, :, 1:h + 1, 1:w +\n 1])\n', (13575, 13651), False, 'import torch\n'), ((13958, 14047), 'torch.max', 'torch.max', (['obstacles_pd[:, :, 2:h + 2, 1:w + 1]', 'obstacles_pd[:, :, 1:h + 1, 1:w + 1]'], {}), '(obstacles_pd[:, :, 2:h + 2, 1:w + 1], obstacles_pd[:, :, 1:h + 1,\n 1:w + 1])\n', (13967, 14047), False, 'import torch\n'), ((14227, 14312), 'torch.max', 'torch.max', (['obstacles_pd[:, :, 0:h, 2:w + 2]', 'obstacles_pd[:, :, 1:h + 1, 1:w + 1]'], {}), '(obstacles_pd[:, :, 0:h, 2:w + 2], obstacles_pd[:, :, 1:h + 1, 1:w +\n 1])\n', (14236, 14312), False, 'import torch\n'), ((14477, 14566), 'torch.max', 'torch.max', (['obstacles_pd[:, :, 1:h + 1, 2:w + 2]', 'obstacles_pd[:, :, 1:h + 1, 1:w + 1]'], {}), '(obstacles_pd[:, :, 1:h + 1, 2:w + 2], obstacles_pd[:, :, 1:h + 1,\n 1:w + 1])\n', (14486, 14566), False, 'import torch\n'), ((14748, 14837), 'torch.max', 'torch.max', (['obstacles_pd[:, :, 2:h + 2, 2:w + 2]', 'obstacles_pd[:, :, 1:h + 1, 1:w + 1]'], {}), '(obstacles_pd[:, :, 2:h + 2, 2:w + 2], obstacles_pd[:, :, 1:h + 1,\n 1:w + 1])\n', (14757, 14837), False, 'import torch\n'), ((16177, 16202), 'torch.min', 'torch.min', (['current_g_cost'], {}), '(current_g_cost)\n', (16186, 16202), False, 'import torch\n'), ((17983, 18024), 'torch.norm', 'torch.norm', (['(node_coords - out_path[-1])', '(2)'], {}), '(node_coords - out_path[-1], 2)\n', (17993, 18024), False, 'import torch\n'), ((5329, 5346), 'torch.round', 'torch.round', (['ymin'], {}), '(ymin)\n', (5340, 5346), False, 'import torch\n'), ((5377, 5394), 'torch.round', 'torch.round', (['ymax'], {}), '(ymax)\n', (5388, 5394), False, 'import torch\n'), ((5441, 5458), 'torch.round', 'torch.round', (['xmin'], {}), '(xmin)\n', (5452, 5458), False, 'import torch\n'), ((5489, 5506), 'torch.round', 'torch.round', (['xmax'], {}), '(xmax)\n', (5500, 5506), False, 'import torch\n'), ((9016, 9120), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['self.open_list_map[:, :, ymin - 1:ymax + 1, xmin - 1:xmax + 1]', '(3)'], {'stride': '(1)', 'padding': '(0)'}), '(self.open_list_map[:, :, ymin - 1:ymax + 1, xmin - 1:xmax + 1],\n 3, stride=1, padding=0)\n', (9028, 9120), True, 'from torch.nn import functional as F\n'), ((9998, 10054), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['self.open_list_map', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(self.open_list_map, 3, stride=1, padding=1)\n', (10010, 10054), True, 'from torch.nn import functional as F\n'), ((11134, 11190), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['self.open_list_map', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(self.open_list_map, 3, stride=1, padding=1)\n', (11146, 11190), True, 'from torch.nn import functional as F\n'), ((16438, 16457), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (16450, 16457), False, 'import torch\n'), ((9634, 9678), 'torch.nn.functional.pad', 'F.pad', (['self.g_map', '(1, 1, 1, 1)', '"""replicate"""'], {}), "(self.g_map, (1, 1, 1, 1), 'replicate')\n", (9639, 9678), True, 'from torch.nn import functional as F\n'), ((10770, 10814), 'torch.nn.functional.pad', 'F.pad', (['self.g_map', '(1, 1, 1, 1)', '"""replicate"""'], {}), "(self.g_map, (1, 1, 1, 1), 'replicate')\n", (10775, 10814), True, 'from torch.nn import functional as F\n')]
#!/usr/bin/env python from __future__ import print_function def analyze_match_table(path): # Extract the instruction table. data = open(path).read() start = data.index("static const MatchEntry MatchTable") end = data.index("\n};\n", start) lines = data[start:end].split("\n")[1:] # Parse the instructions. insns = [] for ln in lines: ln = ln.split("{", 1)[1] ln = ln.rsplit("}", 1)[0] a,bc = ln.split("{", 1) b,c = bc.split("}", 1) code, string, converter, _ = [s.strip() for s in a.split(",")] items = [s.strip() for s in b.split(",")] _,features = [s.strip() for s in c.split(",")] assert string[0] == string[-1] == '"' string = string[1:-1] insns.append((code,string,converter,items,features)) # For every mnemonic, compute whether or not it can have a carry setting # operand and whether or not it can have a predication code. mnemonic_flags = {} for insn in insns: mnemonic = insn[1] items = insn[3] flags = mnemonic_flags[mnemonic] = mnemonic_flags.get(mnemonic, set()) flags.update(items) mnemonics = set(mnemonic_flags) ccout_mnemonics = set(m for m in mnemonics if 'MCK_CCOut' in mnemonic_flags[m]) condcode_mnemonics = set(m for m in mnemonics if 'MCK_CondCode' in mnemonic_flags[m]) noncondcode_mnemonics = mnemonics - condcode_mnemonics print(' || '.join('Mnemonic == "%s"' % m for m in ccout_mnemonics)) print(' || '.join('Mnemonic == "%s"' % m for m in noncondcode_mnemonics)) def main(): import sys if len(sys.argv) == 1: import os from lit.Util import capture llvm_obj_root = capture(["llvm-config", "--obj-root"]) file = os.path.join(llvm_obj_root, "lib/Target/ARM/ARMGenAsmMatcher.inc") elif len(sys.argv) == 2: file = sys.argv[1] else: raise NotImplementedError analyze_match_table(file) if __name__ == '__main__': main()
[ "lit.Util.capture", "os.path.join" ]
[((1851, 1889), 'lit.Util.capture', 'capture', (["['llvm-config', '--obj-root']"], {}), "(['llvm-config', '--obj-root'])\n", (1858, 1889), False, 'from lit.Util import capture\n'), ((1905, 1971), 'os.path.join', 'os.path.join', (['llvm_obj_root', '"""lib/Target/ARM/ARMGenAsmMatcher.inc"""'], {}), "(llvm_obj_root, 'lib/Target/ARM/ARMGenAsmMatcher.inc')\n", (1917, 1971), False, 'import os\n')]
import pytest from unittest import TestCase from pyflamegpu import * import random as rand TEST_LEN = 256 INT8_MAX = 127 INT16_MAX = 32767 INT32_MAX = 2147483647 INT64_MAX = 9223372036854775807 class step_func_max(pyflamegpu.HostFunctionCallback): def __init__(self, Type, variable): super().__init__() self.Type = Type self.variable = variable self.max = 0 def run(self, FLAMEGPU): agent = FLAMEGPU.agent("agent") max_func = getattr(agent, f"max{self.Type}") self.max = max_func(self.variable) def assert_max(self, expected): assert self.max == expected class MiniSim(): def __init__(self, Type, variable, range_max=INT16_MAX): self.model = pyflamegpu.ModelDescription("model") self.agent = self.model.newAgent("agent") # add agent variable new_var_func = getattr(self.agent, f"newVariable{Type}") new_var_func(variable) # create a count step function self.step = step_func_max(Type, variable) self.model.addStepFunctionCallback(self.step) # create a population and set random values to be counted self.population = pyflamegpu.AgentVector(self.agent, TEST_LEN) rand.seed() # Seed does not matter self.expected_max = 0; for instance in self.population: value = rand.randint(0, range_max) if (value > self.expected_max): self.expected_max = value # set instance value (will be cast to correct type) set_var_func = getattr(instance, f"setVariable{Type}") set_var_func(variable, value) def run(self): self.cudaSimulation = pyflamegpu.CUDASimulation(self.model) self.cudaSimulation.SimulationConfig().steps = 1 self.cudaSimulation.setPopulationData(self.population) self.cudaSimulation.simulate() self.cudaSimulation.getPopulationData(self.population) # check assertions self.step.assert_max(self.expected_max) class HostReductionTest(TestCase): def test_MaxFloat(self): ms = MiniSim("Float", "float") ms.run() def test_MaxDouble(self): ms = MiniSim("Double", "double") ms.run() def test_MaxInt8(self): ms = MiniSim("Int8", "int8", INT8_MAX) ms.run() def test_MaxUInt8(self): ms = MiniSim("UInt8", "uint8", INT8_MAX) ms.run() def test_MaxInt16(self): ms = MiniSim("Int16", "int16", INT16_MAX) ms.run() def test_MaxUInt16(self): ms = MiniSim("UInt16", "uint16", INT16_MAX) ms.run() def test_MaxInt32(self): ms = MiniSim("Int32", "int32") ms.run() def test_MaxUInt32(self): ms = MiniSim("UInt32", "uint32") ms.run() def test_MaxInt64(self): ms = MiniSim("Int64", "int64") ms.run() def test_MaxUInt64(self): ms = MiniSim("UInt64", "uint64") ms.run()
[ "random.seed", "random.randint" ]
[((1305, 1316), 'random.seed', 'rand.seed', ([], {}), '()\n', (1314, 1316), True, 'import random as rand\n'), ((1433, 1459), 'random.randint', 'rand.randint', (['(0)', 'range_max'], {}), '(0, range_max)\n', (1445, 1459), True, 'import random as rand\n')]
# # JiWER - Jitsi Word Error Rate # # Copyright @ 2018 - present 8x8, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This file implements methods for calculating a number of similarity error measures between a ground-truth sentence and a hypothesis sentence, which are commonly used to measure the performance for an automatic speech recognition (ASR) system. The following measures are implemented: - Word Error Rate (WER), which is where this library got its name from. This has has long been (and arguably still is) the de facto standard for computing ASR performance. - Match Error Rate (MER) - Word Information Lost (WIL) - Word Information Preserved (WIP) """ import Levenshtein from typing import List, Mapping, Tuple, Union import jiwer.transforms as tr __all__ = ["wer", "mer", "wil", "wip", "compute_measures", "ops"] ################################################################################ # Implementation of the WER method, exposed publicly _default_transform = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.SentencesToListOfWords(), tr.RemoveEmptyStrings(), ] ) _standardize_transform = tr.Compose( [ tr.ToLowerCase(), tr.ExpandCommonEnglishContractions(), tr.RemoveKaldiNonWords(), tr.RemoveWhiteSpace(replace_by_space=True), ] ) def ops( truth: Union[str, List[str]], hypothesis: Union[str, List[str]], truth_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform, hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform, **kwargs ): """ return edit ops """ all_ops = get_operations( truth, hypothesis, truth_transform, hypothesis_transform, **kwargs ) return all_ops def wer( truth: Union[str, List[str]], hypothesis: Union[str, List[str]], truth_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform, hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform, **kwargs ) -> float: """ Calculate word error rate (WER) between a set of ground-truth sentences and a set of hypothesis sentences. See `compute_measures` for details on the arguments. :return: WER as a floating point number """ measures = compute_measures( truth, hypothesis, truth_transform, hypothesis_transform, **kwargs ) return measures["wer"] def mer( truth: Union[str, List[str]], hypothesis: Union[str, List[str]], truth_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform, hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform, **kwargs ) -> float: """ Calculate match error rate (MER) between a set of ground-truth sentences and a set of hypothesis sentences. See `compute_measures` for details on the arguments. :return: MER as a floating point number """ measures = compute_measures( truth, hypothesis, truth_transform, hypothesis_transform, **kwargs ) return measures["mer"] def wip( truth: Union[str, List[str]], hypothesis: Union[str, List[str]], truth_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform, hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform, **kwargs ) -> float: """ Calculate Word Information Preserved (WIP) between a set of ground-truth sentences and a set of hypothesis sentences. See `compute_measures` for details on the arguments. :return: WIP as a floating point number """ measures = compute_measures( truth, hypothesis, truth_transform, hypothesis_transform, **kwargs ) return measures["wip"] def wil( truth: Union[str, List[str]], hypothesis: Union[str, List[str]], truth_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform, hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform, **kwargs ) -> float: """ Calculate Word Information Lost (WIL) between a set of ground-truth sentences and a set of hypothesis sentences. See `compute_measures` for details on the arguments. :return: WIL as a floating point number """ measures = compute_measures( truth, hypothesis, truth_transform, hypothesis_transform, **kwargs ) return measures["wil"] def compute_measures( truth: Union[str, List[str]], hypothesis: Union[str, List[str]], truth_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform, hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform, **kwargs ) -> Mapping[str, float]: """ Calculate error measures between a set of ground-truth sentences and a set of hypothesis sentences. The set of sentences can be given as a string or a list of strings. A string input is assumed to be a single sentence. A list of strings is assumed to be multiple sentences. Each word in a sentence is separated by one or more spaces. A sentence is not expected to end with a specific token (such as a `.`). If the ASR does delimit sentences it is expected that these tokens are filtered out. The optional `transforms` arguments can be used to apply pre-processing to respectively the ground truth and hypotheses input. Note that the transform should ALWAYS include `SentencesToListOfWords`, as that is the expected input. :param truth: the ground-truth sentence(s) as a string or list of strings :param hypothesis: the hypothesis sentence(s) as a string or list of strings :param truth_transform: the transformation to apply on the truths input :param hypothesis_transform: the transformation to apply on the hypothesis input :return: a dict with WER, MER, WIP and WIL measures as floating point numbers """ # deal with old API if "standardize" in kwargs: truth = _standardize_transform(truth) hypothesis = _standardize_transform(hypothesis) if "words_to_filter" in kwargs: t = tr.RemoveSpecificWords(kwargs["words_to_filter"]) truth = t(truth) hypothesis = t(hypothesis) # Preprocess truth and hypothesisi truth, hypothesis = _preprocess( truth, hypothesis, truth_transform, hypothesis_transform ) # Get the operation counts (#hits, #substitutions, #deletions, #insertions) H, S, D, I = _get_operation_counts(truth, hypothesis) # Compute Word Error Rate wer = float(S + D + I) / float(H + S + D) # Compute Match Error Rate mer = float(S + D + I) / float(H + S + D + I) # Compute Word Information Preserved wip = (float(H) / len(truth)) * (float(H) / len(hypothesis)) if hypothesis else 0 # Compute Word Information Lost wil = 1 - wip return { "wer": wer, "mer": mer, "wil": wil, "wip": wip, } def get_operations( truth: Union[str, List[str]], hypothesis: Union[str, List[str]], truth_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform, hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform, **kwargs ): """ blah """ # deal with old API if "standardize" in kwargs: truth = _standardize_transform(truth) hypothesis = _standardize_transform(hypothesis) if "words_to_filter" in kwargs: t = tr.RemoveSpecificWords(kwargs["words_to_filter"]) truth = t(truth) hypothesis = t(hypothesis) # Preprocess truth and hypothesisi truth, hypothesis = _preprocess( truth, hypothesis, truth_transform, hypothesis_transform ) # Get the operation counts (#hits, #substitutions, #deletions, #insertions) operations = _get_editops(truth, hypothesis) return operations ################################################################################ # Implementation of helper methods def _preprocess( truth: Union[str, List[str]], hypothesis: Union[str, List[str]], truth_transform: Union[tr.Compose, tr.AbstractTransform], hypothesis_transform: Union[tr.Compose, tr.AbstractTransform], ) -> Tuple[str, str]: """ Pre-process the truth and hypothesis into a form that Levenshtein can handle. :param truth: the ground-truth sentence(s) as a string or list of strings :param hypothesis: the hypothesis sentence(s) as a string or list of strings :param truth_transform: the transformation to apply on the truths input :param hypothesis_transform: the transformation to apply on the hypothesis input :return: the preprocessed truth and hypothesis """ # Apply transforms. By default, it collapses input to a list of words truth = truth_transform(truth) hypothesis = hypothesis_transform(hypothesis) # raise an error if the ground truth is empty if len(truth) == 0: raise ValueError("the ground truth cannot be an empty") # tokenize each word into an integer vocabulary = set(truth + hypothesis) word2char = dict(zip(vocabulary, range(len(vocabulary)))) truth_chars = [chr(word2char[w]) for w in truth] hypothesis_chars = [chr(word2char[w]) for w in hypothesis] truth_str = "".join(truth_chars) hypothesis_str = "".join(hypothesis_chars) return truth_str, hypothesis_str def _get_operation_counts( source_string: str, destination_string: str ) -> Tuple[int, int, int, int]: """ Check how many edit operations (delete, insert, replace) are required to transform the source string into the destination string. The number of hits can be given by subtracting the number of deletes and substitutions from the total length of the source string. :param source_string: the source string to transform into the destination string :param destination_string: the destination to transform the source string into :return: a tuple of #hits, #substitutions, #deletions, #insertions """ editops = Levenshtein.editops(source_string, destination_string) substitutions = sum(1 if op[0] == "replace" else 0 for op in editops) deletions = sum(1 if op[0] == "delete" else 0 for op in editops) insertions = sum(1 if op[0] == "insert" else 0 for op in editops) hits = len(source_string) - (substitutions + deletions) return hits, substitutions, deletions, insertions def _get_editops( source_string: str, destination_string: str ): editops = Levenshtein.editops(source_string, destination_string) # type(editops) # substitutions = sum(1 if op[0] == "replace" else 0 for op in editops) # deletions = sum(1 if op[0] == "delete" else 0 for op in editops) # insertions = sum(1 if op[0] == "insert" else 0 for op in editops) # hits = len(source_string) - (substitutions + deletions) return editops
[ "jiwer.transforms.Strip", "jiwer.transforms.SentencesToListOfWords", "jiwer.transforms.RemoveMultipleSpaces", "jiwer.transforms.RemoveSpecificWords", "Levenshtein.editops", "jiwer.transforms.RemoveKaldiNonWords", "jiwer.transforms.RemoveWhiteSpace", "jiwer.transforms.ToLowerCase", "jiwer.transforms.RemoveEmptyStrings", "jiwer.transforms.ExpandCommonEnglishContractions" ]
[((10573, 10627), 'Levenshtein.editops', 'Levenshtein.editops', (['source_string', 'destination_string'], {}), '(source_string, destination_string)\n', (10592, 10627), False, 'import Levenshtein\n'), ((11047, 11101), 'Levenshtein.editops', 'Levenshtein.editops', (['source_string', 'destination_string'], {}), '(source_string, destination_string)\n', (11066, 11101), False, 'import Levenshtein\n'), ((1532, 1557), 'jiwer.transforms.RemoveMultipleSpaces', 'tr.RemoveMultipleSpaces', ([], {}), '()\n', (1555, 1557), True, 'import jiwer.transforms as tr\n'), ((1567, 1577), 'jiwer.transforms.Strip', 'tr.Strip', ([], {}), '()\n', (1575, 1577), True, 'import jiwer.transforms as tr\n'), ((1587, 1614), 'jiwer.transforms.SentencesToListOfWords', 'tr.SentencesToListOfWords', ([], {}), '()\n', (1612, 1614), True, 'import jiwer.transforms as tr\n'), ((1624, 1647), 'jiwer.transforms.RemoveEmptyStrings', 'tr.RemoveEmptyStrings', ([], {}), '()\n', (1645, 1647), True, 'import jiwer.transforms as tr\n'), ((1709, 1725), 'jiwer.transforms.ToLowerCase', 'tr.ToLowerCase', ([], {}), '()\n', (1723, 1725), True, 'import jiwer.transforms as tr\n'), ((1735, 1771), 'jiwer.transforms.ExpandCommonEnglishContractions', 'tr.ExpandCommonEnglishContractions', ([], {}), '()\n', (1769, 1771), True, 'import jiwer.transforms as tr\n'), ((1781, 1805), 'jiwer.transforms.RemoveKaldiNonWords', 'tr.RemoveKaldiNonWords', ([], {}), '()\n', (1803, 1805), True, 'import jiwer.transforms as tr\n'), ((1815, 1857), 'jiwer.transforms.RemoveWhiteSpace', 'tr.RemoveWhiteSpace', ([], {'replace_by_space': '(True)'}), '(replace_by_space=True)\n', (1834, 1857), True, 'import jiwer.transforms as tr\n'), ((6638, 6687), 'jiwer.transforms.RemoveSpecificWords', 'tr.RemoveSpecificWords', (["kwargs['words_to_filter']"], {}), "(kwargs['words_to_filter'])\n", (6660, 6687), True, 'import jiwer.transforms as tr\n'), ((7990, 8039), 'jiwer.transforms.RemoveSpecificWords', 'tr.RemoveSpecificWords', (["kwargs['words_to_filter']"], {}), "(kwargs['words_to_filter'])\n", (8012, 8039), True, 'import jiwer.transforms as tr\n')]
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/api/logging.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name="google/api/logging.proto", package="google.api", syntax="proto3", serialized_options=b"\n\016com.google.apiB\014LoggingProtoP\001ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\242\002\004GAPI", create_key=_descriptor._internal_create_key, serialized_pb=b'\n\x18google/api/logging.proto\x12\ngoogle.api"\xd7\x01\n\x07Logging\x12\x45\n\x15producer_destinations\x18\x01 \x03(\x0b\x32&.google.api.Logging.LoggingDestination\x12\x45\n\x15\x63onsumer_destinations\x18\x02 \x03(\x0b\x32&.google.api.Logging.LoggingDestination\x1a>\n\x12LoggingDestination\x12\x1a\n\x12monitored_resource\x18\x03 \x01(\t\x12\x0c\n\x04logs\x18\x01 \x03(\tBn\n\x0e\x63om.google.apiB\x0cLoggingProtoP\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\xa2\x02\x04GAPIb\x06proto3', ) _LOGGING_LOGGINGDESTINATION = _descriptor.Descriptor( name="LoggingDestination", full_name="google.api.Logging.LoggingDestination", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="monitored_resource", full_name="google.api.Logging.LoggingDestination.monitored_resource", index=0, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="logs", full_name="google.api.Logging.LoggingDestination.logs", index=1, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], serialized_start=194, serialized_end=256, ) _LOGGING = _descriptor.Descriptor( name="Logging", full_name="google.api.Logging", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="producer_destinations", full_name="google.api.Logging.producer_destinations", index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="consumer_destinations", full_name="google.api.Logging.consumer_destinations", index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[_LOGGING_LOGGINGDESTINATION], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], serialized_start=41, serialized_end=256, ) _LOGGING_LOGGINGDESTINATION.containing_type = _LOGGING _LOGGING.fields_by_name[ "producer_destinations" ].message_type = _LOGGING_LOGGINGDESTINATION _LOGGING.fields_by_name[ "consumer_destinations" ].message_type = _LOGGING_LOGGINGDESTINATION DESCRIPTOR.message_types_by_name["Logging"] = _LOGGING _sym_db.RegisterFileDescriptor(DESCRIPTOR) Logging = _reflection.GeneratedProtocolMessageType( "Logging", (_message.Message,), { "LoggingDestination": _reflection.GeneratedProtocolMessageType( "LoggingDestination", (_message.Message,), { "DESCRIPTOR": _LOGGING_LOGGINGDESTINATION, "__module__": "google.api.logging_pb2" # @@protoc_insertion_point(class_scope:google.api.Logging.LoggingDestination) }, ), "DESCRIPTOR": _LOGGING, "__module__": "google.api.logging_pb2" # @@protoc_insertion_point(class_scope:google.api.Logging) }, ) _sym_db.RegisterMessage(Logging) _sym_db.RegisterMessage(Logging.LoggingDestination) DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope)
[ "google.protobuf.symbol_database.Default", "google.protobuf.descriptor.FieldDescriptor", "google.protobuf.reflection.GeneratedProtocolMessageType", "google.protobuf.descriptor.FileDescriptor" ]
[((1000, 1026), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (1024, 1026), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((1042, 1883), 'google.protobuf.descriptor.FileDescriptor', '_descriptor.FileDescriptor', ([], {'name': '"""google/api/logging.proto"""', 'package': '"""google.api"""', 'syntax': '"""proto3"""', 'serialized_options': "b'\\n\\x0ecom.google.apiB\\x0cLoggingProtoP\\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\\xa2\\x02\\x04GAPI'", 'create_key': '_descriptor._internal_create_key', 'serialized_pb': 'b\'\\n\\x18google/api/logging.proto\\x12\\ngoogle.api"\\xd7\\x01\\n\\x07Logging\\x12E\\n\\x15producer_destinations\\x18\\x01 \\x03(\\x0b2&.google.api.Logging.LoggingDestination\\x12E\\n\\x15consumer_destinations\\x18\\x02 \\x03(\\x0b2&.google.api.Logging.LoggingDestination\\x1a>\\n\\x12LoggingDestination\\x12\\x1a\\n\\x12monitored_resource\\x18\\x03 \\x01(\\t\\x12\\x0c\\n\\x04logs\\x18\\x01 \\x03(\\tBn\\n\\x0ecom.google.apiB\\x0cLoggingProtoP\\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\\xa2\\x02\\x04GAPIb\\x06proto3\''}), '(name=\'google/api/logging.proto\', package=\n \'google.api\', syntax=\'proto3\', serialized_options=\n b\'\\n\\x0ecom.google.apiB\\x0cLoggingProtoP\\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\\xa2\\x02\\x04GAPI\'\n , create_key=_descriptor._internal_create_key, serialized_pb=\n b\'\\n\\x18google/api/logging.proto\\x12\\ngoogle.api"\\xd7\\x01\\n\\x07Logging\\x12E\\n\\x15producer_destinations\\x18\\x01 \\x03(\\x0b2&.google.api.Logging.LoggingDestination\\x12E\\n\\x15consumer_destinations\\x18\\x02 \\x03(\\x0b2&.google.api.Logging.LoggingDestination\\x1a>\\n\\x12LoggingDestination\\x12\\x1a\\n\\x12monitored_resource\\x18\\x03 \\x01(\\t\\x12\\x0c\\n\\x04logs\\x18\\x01 \\x03(\\tBn\\n\\x0ecom.google.apiB\\x0cLoggingProtoP\\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\\xa2\\x02\\x04GAPIb\\x06proto3\'\n )\n', (1068, 1883), True, 'from google.protobuf import descriptor as _descriptor\n'), ((5836, 6013), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""LoggingDestination"""', '(_message.Message,)', "{'DESCRIPTOR': _LOGGING_LOGGINGDESTINATION, '__module__':\n 'google.api.logging_pb2'}"], {}), "('LoggingDestination', (_message.\n Message,), {'DESCRIPTOR': _LOGGING_LOGGINGDESTINATION, '__module__':\n 'google.api.logging_pb2'})\n", (5876, 6013), True, 'from google.protobuf import reflection as _reflection\n'), ((2825, 3220), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""logs"""', 'full_name': '"""google.api.Logging.LoggingDestination.logs"""', 'index': '(1)', 'number': '(1)', 'type': '(9)', 'cpp_type': '(9)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='logs', full_name=\n 'google.api.Logging.LoggingDestination.logs', index=1, number=1, type=9,\n cpp_type=9, label=3, has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR,\n create_key=_descriptor._internal_create_key)\n", (2852, 3220), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3876, 4288), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""producer_destinations"""', 'full_name': '"""google.api.Logging.producer_destinations"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='producer_destinations', full_name=\n 'google.api.Logging.producer_destinations', index=0, number=1, type=11,\n cpp_type=10, label=3, has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR,\n create_key=_descriptor._internal_create_key)\n", (3903, 4288), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4491, 4903), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""consumer_destinations"""', 'full_name': '"""google.api.Logging.consumer_destinations"""', 'index': '(1)', 'number': '(2)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='consumer_destinations', full_name=\n 'google.api.Logging.consumer_destinations', index=1, number=2, type=11,\n cpp_type=10, label=3, has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR,\n create_key=_descriptor._internal_create_key)\n", (4518, 4903), True, 'from google.protobuf import descriptor as _descriptor\n')]
#!/usr/bin/env python ############################################################################## # Copyright 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. ############################################################################## from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import json import os import re import shlex import shutil import time from six import string_types from platforms.platform_base import PlatformBase from utils.custom_logger import getLogger from utils.utilities import getRunStatus, setRunStatus from profilers.profilers import getProfilerByUsage class AndroidPlatform(PlatformBase): def __init__(self, tempdir, adb, args): super(AndroidPlatform, self).__init__( tempdir, args.android_dir, adb, args.hash_platform_mapping) self.args = args platform = adb.shell( ['getprop', 'ro.product.model'], default="")[0].strip() + \ '-' + \ adb.shell( ['getprop', 'ro.build.version.release'], default="")[0].strip() + \ '-' + \ adb.shell(['getprop', 'ro.build.version.sdk'], default="")[0].strip() self.type = "android" self.setPlatform(platform) self.setPlatformHash(adb.device) self._setLogCatSize() self.app = None if self.args.set_freq: self.util.setFrequency(self.args.set_freq) def _setLogCatSize(self): repeat = True size = 131072 while (repeat and size > 256): repeat = False # We know this command may fail. Avoid propogating this # failure to the upstream success = getRunStatus() ret = self.util.logcat("-G", str(size) + "K") setRunStatus(success, overwrite=True) if len(ret) > 0 and ret[0].find("failed to") >= 0: repeat = True size = int(size / 2) def fileExistsOnPlatform(self, files): if isinstance(files, string_types): exists=self.util.shell("test -e {} && echo True || echo False".format(files).split(" ")) if "True" not in exists: return False return True elif isinstance(files, list): for f in files: if not self.fileExistsOnPlatform(f): return False return True raise TypeError("fileExistsOnPlatform takes either a string or list of strings.") def preprocess(self, *args, **kwargs): assert "programs" in kwargs, "Must have programs specified" programs = kwargs["programs"] benchmark = kwargs["benchmark"] # find the first zipped app file assert "program" in programs, "program is not specified" if "platform" in benchmark["model"] and \ benchmark["model"]["platform"].startswith("android"): if "app" in benchmark["model"]: self.app = benchmark["model"]["app"] if not self.app: if "intent.txt" in programs: # temporary to rename the program with adb suffix with open(programs["intent.txt"], "r") as f: self.app = json.load(f) else: return # Uninstall if exist package = self.util.shell(["pm", "list", "packages", self.app["package"]]) if len(package) > 0 and \ package[0].strip() == "package:" + self.app["package"]: self.util.shell(["pm", "uninstall", self.app["package"]]) # temporary fix to allow install apk files if not programs["program"].endswith(".apk"): new_name = programs["program"] + ".apk" shutil.copyfile(programs["program"], new_name) programs["program"] = new_name self.util.run(["install", programs["program"]]) del programs["program"] def rebootDevice(self): self.util.reboot() self.waitForDevice(180) # Need to wait a bit more after the device is rebooted time.sleep(20) # may need to set log size again after reboot self._setLogCatSize() if self.args.set_freq: self.util.setFrequency(self.args.set_freq) def runBenchmark(self, cmd, *args, **kwargs): if not isinstance(cmd, list): cmd = shlex.split(cmd) # meta is used to store any data about the benchmark run # that is not the output of the command meta = {} # We know this command may fail. Avoid propogating this # failure to the upstream success = getRunStatus() self.util.logcat('-b', 'all', '-c') setRunStatus(success, overwrite=True) if self.app: log, meta = self.runAppBenchmark(cmd, *args, **kwargs) else: log, meta = self.runBinaryBenchmark(cmd, *args, **kwargs) return log, meta def runAppBenchmark(self, cmd, *args, **kwargs): arguments = self.getPairedArguments(cmd) argument_filename = os.path.join(self.tempdir, "benchmark.json") arguments_json = json.dumps(arguments, indent=2, sort_keys=True) with open(argument_filename, "w") as f: f.write(arguments_json) tgt_argument_filename = os.path.join(self.tgt_dir, "benchmark.json") activity = os.path.join(self.app["package"], self.app["activity"]) self.util.push(argument_filename, tgt_argument_filename) platform_args = {} if "platform_args" in kwargs: platform_args = kwargs["platform_args"] if "power" in platform_args and platform_args["power"]: platform_args["non_blocking"] = True self.util.shell(["am", "start", "-S", activity]) return [] if platform_args.get("enable_profiling",False): getLogger().warn("Profiling for app benchmarks is not implemented.") patterns = [] pattern = re.compile( r".*{}.*{}.*BENCHMARK_DONE".format(self.app["package"], self.app["activity"])) patterns.append(pattern) pattern = re.compile( r".*ActivityManager: Killing .*{}".format(self.app["package"])) patterns.append(pattern) platform_args["patterns"] = patterns self.util.shell(["am", "start", "-S", "-W", activity]) log_logcat = self.util.run(["logcat"], **platform_args) self.util.shell(["am", "force-stop", self.app["package"]]) return log_logcat def runBinaryBenchmark(self, cmd, *args, **kwargs): log_to_screen_only = 'log_to_screen_only' in kwargs and \ kwargs['log_to_screen_only'] platform_args = {} meta = {} if "platform_args" in kwargs: platform_args = kwargs["platform_args"] if "taskset" in platform_args: taskset = platform_args["taskset"] cmd = ["taskset", taskset] + cmd del platform_args["taskset"] if "sleep_before_run" in platform_args: sleep_before_run = str(platform_args["sleep_before_run"]) cmd = ["sleep", sleep_before_run, "&&"] + cmd del platform_args["sleep_before_run"] if "power" in platform_args and platform_args["power"]: # launch settings page to prevent the phone # to go into sleep mode self.util.shell(["am", "start", "-a", "android.settings.SETTINGS"]) time.sleep(1) cmd = ["nohup"] + ["sh", "-c", "'" + " ".join(cmd) + "'"] + \ [">", "/dev/null", "2>&1"] platform_args["non_blocking"] = True del platform_args["power"] if platform_args.get("enable_profiling", False): # attempt to run with profiling, else fallback to standard run success = getRunStatus() try: simpleperf = getProfilerByUsage("android", None, platform=self, model_name=platform_args.get("model_name",None), cmd=cmd) if simpleperf: f = simpleperf.start() output, meta = f.result() log_logcat = [] if not log_to_screen_only: log_logcat = self.util.logcat('-d') return output + log_logcat, meta # if this has not succeeded for some reason reset run status and run without profiling. except RuntimeError as ex: getLogger().error("An error occurred when running Simpleperf profiler. {}".format(ex)) setRunStatus(success, overwrite=True) except FileNotFoundError as ex: getLogger().error("An error occurred when running Simpleperf profiler. {}".format(ex)) setRunStatus(success, overwrite=True) except Exception: getLogger().exception("An error has occurred when running Simpleperf profiler.") setRunStatus(success, overwrite=True) log_screen = self.util.shell(cmd, **platform_args) log_logcat = [] if not log_to_screen_only: log_logcat = self.util.logcat('-d') return log_screen + log_logcat, meta def collectMetaData(self, info): meta = super(AndroidPlatform, self).collectMetaData(info) meta['platform_hash'] = self.platform_hash return meta def killProgram(self, program): basename = os.path.basename(program) # if the program doesn't exist, the grep may fail # do not update status code success = getRunStatus() res = self.util.shell(["ps", "|", "grep", basename]) setRunStatus(success, overwrite=True) if len(res) == 0: return results = res[0].split("\n") pattern = re.compile(r"^shell\s+(\d+)\s+") for result in results: match = pattern.match(result) if match: pid = match.group(1) self.util.shell(["kill", pid]) def waitForDevice(self, timeout): period = int(timeout / 20) + 1 num = int(timeout / period) count = 0 ls = [] while len(ls) == 0 and count < num: ls = self.util.shell(['ls', self.tgt_dir]) time.sleep(period) if len(ls) == 0: getLogger().error("Cannot reach device {} ({}) after {}.". format(self.platform, self.platform_hash, timeout))
[ "utils.utilities.setRunStatus", "utils.utilities.getRunStatus", "json.load", "os.path.basename", "shlex.split", "json.dumps", "time.sleep", "shutil.copyfile", "utils.custom_logger.getLogger", "os.path.join", "re.compile" ]
[((4308, 4322), 'time.sleep', 'time.sleep', (['(20)'], {}), '(20)\n', (4318, 4322), False, 'import time\n'), ((4866, 4880), 'utils.utilities.getRunStatus', 'getRunStatus', ([], {}), '()\n', (4878, 4880), False, 'from utils.utilities import getRunStatus, setRunStatus\n'), ((4933, 4970), 'utils.utilities.setRunStatus', 'setRunStatus', (['success'], {'overwrite': '(True)'}), '(success, overwrite=True)\n', (4945, 4970), False, 'from utils.utilities import getRunStatus, setRunStatus\n'), ((5299, 5343), 'os.path.join', 'os.path.join', (['self.tempdir', '"""benchmark.json"""'], {}), "(self.tempdir, 'benchmark.json')\n", (5311, 5343), False, 'import os\n'), ((5369, 5416), 'json.dumps', 'json.dumps', (['arguments'], {'indent': '(2)', 'sort_keys': '(True)'}), '(arguments, indent=2, sort_keys=True)\n', (5379, 5416), False, 'import json\n'), ((5533, 5577), 'os.path.join', 'os.path.join', (['self.tgt_dir', '"""benchmark.json"""'], {}), "(self.tgt_dir, 'benchmark.json')\n", (5545, 5577), False, 'import os\n'), ((5597, 5652), 'os.path.join', 'os.path.join', (["self.app['package']", "self.app['activity']"], {}), "(self.app['package'], self.app['activity'])\n", (5609, 5652), False, 'import os\n'), ((9932, 9957), 'os.path.basename', 'os.path.basename', (['program'], {}), '(program)\n', (9948, 9957), False, 'import os\n'), ((10070, 10084), 'utils.utilities.getRunStatus', 'getRunStatus', ([], {}), '()\n', (10082, 10084), False, 'from utils.utilities import getRunStatus, setRunStatus\n'), ((10154, 10191), 'utils.utilities.setRunStatus', 'setRunStatus', (['success'], {'overwrite': '(True)'}), '(success, overwrite=True)\n', (10166, 10191), False, 'from utils.utilities import getRunStatus, setRunStatus\n'), ((10292, 10326), 're.compile', 're.compile', (['"""^shell\\\\s+(\\\\d+)\\\\s+"""'], {}), "('^shell\\\\s+(\\\\d+)\\\\s+')\n", (10302, 10326), False, 'import re\n'), ((1884, 1898), 'utils.utilities.getRunStatus', 'getRunStatus', ([], {}), '()\n', (1896, 1898), False, 'from utils.utilities import getRunStatus, setRunStatus\n'), ((1969, 2006), 'utils.utilities.setRunStatus', 'setRunStatus', (['success'], {'overwrite': '(True)'}), '(success, overwrite=True)\n', (1981, 2006), False, 'from utils.utilities import getRunStatus, setRunStatus\n'), ((3969, 4015), 'shutil.copyfile', 'shutil.copyfile', (["programs['program']", 'new_name'], {}), "(programs['program'], new_name)\n", (3984, 4015), False, 'import shutil\n'), ((4600, 4616), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (4611, 4616), False, 'import shlex\n'), ((10763, 10781), 'time.sleep', 'time.sleep', (['period'], {}), '(period)\n', (10773, 10781), False, 'import time\n'), ((7849, 7862), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7859, 7862), False, 'import time\n'), ((8250, 8264), 'utils.utilities.getRunStatus', 'getRunStatus', ([], {}), '()\n', (8262, 8264), False, 'from utils.utilities import getRunStatus, setRunStatus\n'), ((3423, 3435), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3432, 3435), False, 'import json\n'), ((10819, 10830), 'utils.custom_logger.getLogger', 'getLogger', ([], {}), '()\n', (10828, 10830), False, 'from utils.custom_logger import getLogger\n'), ((6123, 6134), 'utils.custom_logger.getLogger', 'getLogger', ([], {}), '()\n', (6132, 6134), False, 'from utils.custom_logger import getLogger\n'), ((9046, 9083), 'utils.utilities.setRunStatus', 'setRunStatus', (['success'], {'overwrite': '(True)'}), '(success, overwrite=True)\n', (9058, 9083), False, 'from utils.utilities import getRunStatus, setRunStatus\n'), ((9259, 9296), 'utils.utilities.setRunStatus', 'setRunStatus', (['success'], {'overwrite': '(True)'}), '(success, overwrite=True)\n', (9271, 9296), False, 'from utils.utilities import getRunStatus, setRunStatus\n'), ((9452, 9489), 'utils.utilities.setRunStatus', 'setRunStatus', (['success'], {'overwrite': '(True)'}), '(success, overwrite=True)\n', (9464, 9489), False, 'from utils.utilities import getRunStatus, setRunStatus\n'), ((8939, 8950), 'utils.custom_logger.getLogger', 'getLogger', ([], {}), '()\n', (8948, 8950), False, 'from utils.custom_logger import getLogger\n'), ((9152, 9163), 'utils.custom_logger.getLogger', 'getLogger', ([], {}), '()\n', (9161, 9163), False, 'from utils.custom_logger import getLogger\n'), ((9351, 9362), 'utils.custom_logger.getLogger', 'getLogger', ([], {}), '()\n', (9360, 9362), False, 'from utils.custom_logger import getLogger\n')]
""" Copyright (c) 2021, NVIDIA CORPORATION. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import argparse import os import sys sys.path.append('../../../') # where to find plugin import sparse_operation_kit as sok import utils import tensorflow as tf import numpy as np import horovod.tensorflow as hvd class SOKDenseDemo(tf.keras.models.Model): def __init__(self, max_vocabulary_size_per_gpu, embedding_vec_size, slot_num, nnz_per_slot, num_dense_layers, num_dense_units, **kwargs): super(SOKDenseDemo, self).__init__(**kwargs) self.max_vocabulary_size_per_gpu = max_vocabulary_size_per_gpu self.slot_num = slot_num self.nnz_per_slot = nnz_per_slot self.num_dense_layers = num_dense_layers self.embedding_vec_size = embedding_vec_size self.embedding_layer = sok.All2AllDenseEmbedding(max_vocabulary_size_per_gpu=self.max_vocabulary_size_per_gpu, embedding_vec_size=self.embedding_vec_size, slot_num=self.slot_num, nnz_per_slot=self.nnz_per_slot) self.dense_layers = [] for _ in range(self.num_dense_layers): self.layer = tf.keras.layers.Dense(units=num_dense_units, activation='relu') self.dense_layers.append(self.layer) self.out_layer = tf.keras.layers.Dense(units=1, activation=None, kernel_initializer='ones', bias_initializer='zeros') def call(self, inputs, training=True): # [batchsize, slot_num, nnz_per_slot, embedding_vec_size] embedding_vector = self.embedding_layer(inputs, training=training) # [batchsize, slot_num * nnz_per_slot * embedding_vec_size] embedding_vector = tf.reshape(embedding_vector, shape=[-1, self.slot_num * self.nnz_per_slot * self.embedding_vec_size]) hidden = embedding_vector for layer in self.dense_layers: hidden = layer(hidden) # [batchsize, 1] logit = self.out_layer(hidden) return logit class TfDenseDemo(tf.keras.models.Model): def __init__(self, global_batch_size, vocabulary_size, slot_num, nnz_per_slot, num_dense_layers, num_dense_units, embedding_vec_size, **kwargs): super(TfDenseDemo, self).__init__(**kwargs) self.global_batch_size = global_batch_size self.vocabulary_size = vocabulary_size self.slot_num = slot_num self.nnz_per_slot = nnz_per_slot self.num_dense_layers = num_dense_layers self.embedding_vec_size = embedding_vec_size self.params = self.add_weight(shape=(self.vocabulary_size, self.embedding_vec_size), dtype=tf.float32, name='embedding_table', initializer='glorot_normal') self.dense_layers = [] for _ in range(self.num_dense_layers): self.layer = tf.keras.layers.Dense(units=num_dense_units, activation='relu') self.dense_layers.append(self.layer) self.out_layer = tf.keras.layers.Dense(units=1, activation=None, kernel_initializer='ones', bias_initializer='zeros') def call(self, inputs, training=True): # [batchsize * slot_num * nnz_per_slot, embedding_vec_size] embedding_vector = tf.nn.embedding_lookup(params=self.params, ids=inputs) # [batchsize, slot_num * nnz_per_slot * embedding_vec_size] embedding_vector = tf.reshape(embedding_vector, shape=[self.global_batch_size, self.slot_num * self.nnz_per_slot * self.embedding_vec_size]) hidden = embedding_vector for layer in self.dense_layers: hidden = layer(hidden) # [batchsize, 1] logit = self.out_layer(hidden) return logit def generate_dense_variables(input_channel, units): w, b = [], [] for i in range(len(units)): if i == 0: w.append(tf.random.normal([input_channel, units[i]])) else: w.append(tf.random.normal([units[i-1], units[i]])) b.append(tf.random.normal([units[i]])) return w, b def generate_vocabulary_table(vocabulary_size_per_gpu, embedding_vec_size, num_of_gpu): tensors = [tf.random.normal([vocabulary_size_per_gpu, embedding_vec_size]) for _ in range(num_of_gpu)] return tensors def generate_data(args): dense_variables = generate_dense_variables(args.slot_num * args.nnz_per_slot * args.embedding_vec_size, [args.num_dense_units for _ in range(args.num_dense_layers)]) vocabulary_tensors = generate_vocabulary_table(args.max_vocabulary_size_per_gpu, args.embedding_vec_size, hvd.size()) samples, labels = utils.generate_random_samples(num_of_samples=args.global_batch_size, vocabulary_size=args.max_vocabulary_size_per_gpu * hvd.size(), slot_num=args.slot_num, max_nnz=args.nnz_per_slot, use_sparse_mask=False) samples, labels = tf.convert_to_tensor(samples), tf.convert_to_tensor(labels) for i in range(args.num_dense_layers): # dense_variables[0] means weight, dense_variables[1] means bias dense_variables[0][i] = hvd.broadcast(dense_variables[0][i], root_rank=0) dense_variables[1][i] = hvd.broadcast(dense_variables[1][i], root_rank=0) for i in range(hvd.size()): vocabulary_tensors[i] = hvd.broadcast(vocabulary_tensors[i], root_rank=0) samples = hvd.broadcast(samples, root_rank=0) labels = hvd.broadcast(labels, root_rank=0) return dense_variables, vocabulary_tensors, samples, labels def run_sok_model(args, dense_variables, vocabulary_tensors, samples, labels): # split sample and labels assert(args.global_batch_size % hvd.size() == 0) local_batch_size = args.global_batch_size // hvd.size() local_id = hvd.local_rank() samples = samples[local_id*local_batch_size : (local_id+1)*local_batch_size] labels = labels[local_id*local_batch_size : (local_id+1)*local_batch_size] sok.Init(global_batch_size=args.global_batch_size) model = SOKDenseDemo(max_vocabulary_size_per_gpu=args.max_vocabulary_size_per_gpu, embedding_vec_size=args.embedding_vec_size, slot_num=args.slot_num, nnz_per_slot=args.nnz_per_slot, num_dense_layers=args.num_dense_layers, num_dense_units=args.num_dense_units) #model.build(input_shape=(local_batch_size, args.slot_num * args.nnz_per_slot * args.embedding_vec_size)) model(samples, training=False) for i in range(args.num_dense_layers): model.dense_layers[i].trainable_variables[0].assign(dense_variables[0][i]) model.dense_layers[i].trainable_variables[1].assign(dense_variables[1][i]) sok_saver = sok.Saver() init_tensors = [tensor.numpy() for tensor in vocabulary_tensors] sok_saver.load_embedding_values(model.embedding_layer.embedding_variable, init_tensors) embedding_optimizer = utils.get_embedding_optimizer(args.optimizer)(learning_rate=0.1) dense_optimizer = utils.get_dense_optimizer(args.optimizer)(learning_rate=0.1) loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) def _replica_loss(labels, logits): loss = loss_fn(labels, logits) return tf.nn.compute_average_loss(loss, global_batch_size=args.global_batch_size) @tf.function def _train_step(inputs, labels, first_batch): with tf.GradientTape() as tape, tf.GradientTape() as emb_tape: logit = model(inputs, training=True) replica_loss = _replica_loss(labels, logit) tape = hvd.DistributedGradientTape(tape) emb_variable, other_variable = sok.split_embedding_variable_from_others(model.trainable_variables) emb_grads = emb_tape.gradient(replica_loss, emb_variable) grads = tape.gradient(replica_loss, other_variable) if 'plugin' not in args.optimizer: with sok.OptimizerScope(emb_variable): embedding_optimizer.apply_gradients(zip(emb_grads, emb_variable), experimental_aggregate_gradients=False) else: embedding_optimizer.apply_gradients(zip(emb_grads, emb_variable), experimental_aggregate_gradients=False) dense_optimizer.apply_gradients(zip(grads, other_variable)) # Note: broadcast should be done after the first gradient step to ensure optimizer initialization. if first_batch: hvd.broadcast_variables(other_variable, root_rank=0) hvd.broadcast_variables(dense_optimizer.variables(), root_rank=0) return replica_loss loss_list = [] for i in range(args.iter_num): loss = _train_step(samples, labels, i == 0) loss_list.append(loss) print("[INFO]: Iteration: {}, loss={}".format(i, loss)) return loss_list def run_tf_model(args, dense_variables, vocabulary_tensors, samples, labels): model = TfDenseDemo(global_batch_size=args.global_batch_size, vocabulary_size=args.max_vocabulary_size_per_gpu * hvd.size(), slot_num=args.slot_num, nnz_per_slot=args.nnz_per_slot, num_dense_layers=args.num_dense_layers, num_dense_units=args.num_dense_units, embedding_vec_size=args.embedding_vec_size) #model.build(input_shape=(args.global_batch_size, args.slot_num * args.nnz_per_slot * args.embedding_vec_size)) model(samples, training=False) for i in range(args.num_dense_layers): model.dense_layers[i].trainable_variables[0].assign(dense_variables[0][i]) model.dense_layers[i].trainable_variables[1].assign(dense_variables[1][i]) vocabulary_table = tf.concat(vocabulary_tensors, axis=0) for i in range(hvd.size()): model.params.assign(vocabulary_table) optimizer = utils.get_dense_optimizer(args.optimizer)(learning_rate=0.1) loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True) @tf.function def _train_step(inputs, labels): with tf.GradientTape() as tape: logit = model(inputs, training=True) loss = loss_fn(labels, logit) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) return loss loss_list = [] for i in range(args.iter_num): loss = _train_step(samples, labels) loss_list.append(loss) print("[INFO]: Iteration: {}, loss={}".format(i, loss)) return loss_list def get_args(): parser = argparse.ArgumentParser(description='test demo model with horovod.') parser.add_argument('--iter_num', type=int, help='the number of testing iterations.', required=False, default=100) parser.add_argument('--max_vocabulary_size_per_gpu', type=int, required=False, default=8192) parser.add_argument('--slot_num', type=int, help='the number of feature fields', required=False, default=100) parser.add_argument('--nnz_per_slot', type=int, help='the number of keys in each slot', required=False, default=10) parser.add_argument('--embedding_vec_size', type=int, help='the dimention of embedding vector', required=False, default=4) parser.add_argument('--global_batch_size', type=int, required=False, default=1024) parser.add_argument('--optimizer', type=str, help="use what optimizer", required=False, default='adam', choices=['plugin_adam', 'adam', 'sgd']) parser.add_argument('--num_dense_layers', type=int, required=False, default=6) parser.add_argument('--num_dense_units', type=int, required=False, default=1024) args = parser.parse_args() return args if __name__ == '__main__': args = get_args() local_rank = os.getenv("OMPI_COMM_WORLD_RANK") os.environ["CUDA_VISIBLE_DEVICES"] = str(local_rank) hvd.init() dense_variables, vocabulary_tensors, samples, labels = generate_data(args) sok_loss_list = run_sok_model(args, dense_variables, vocabulary_tensors, samples, labels) # compute_average_loss for i in range(args.iter_num): sok_loss_list[i] = hvd.allreduce(sok_loss_list[i]) if hvd.local_rank() == 0: tf_loss_list = run_tf_model(args, dense_variables, vocabulary_tensors, samples, labels) if hvd.local_rank() == 0: for i in range(args.iter_num): print('Iteration: {}, sok={}, tf={}'.format(i, sok_loss_list[i], tf_loss_list[i]))
[ "tensorflow.nn.compute_average_loss", "argparse.ArgumentParser", "tensorflow.keras.layers.Dense", "horovod.tensorflow.size", "sparse_operation_kit.OptimizerScope", "tensorflow.reshape", "utils.get_dense_optimizer", "sparse_operation_kit.Init", "sparse_operation_kit.Saver", "sys.path.append", "horovod.tensorflow.broadcast_variables", "horovod.tensorflow.allreduce", "tensorflow.concat", "horovod.tensorflow.local_rank", "horovod.tensorflow.broadcast", "horovod.tensorflow.init", "tensorflow.random.normal", "tensorflow.nn.embedding_lookup", "horovod.tensorflow.DistributedGradientTape", "sparse_operation_kit.split_embedding_variable_from_others", "sparse_operation_kit.All2AllDenseEmbedding", "utils.get_embedding_optimizer", "os.getenv", "tensorflow.convert_to_tensor", "tensorflow.keras.losses.BinaryCrossentropy", "tensorflow.GradientTape" ]
[((622, 650), 'sys.path.append', 'sys.path.append', (['"""../../../"""'], {}), "('../../../')\n", (637, 650), False, 'import sys\n'), ((6704, 6739), 'horovod.tensorflow.broadcast', 'hvd.broadcast', (['samples'], {'root_rank': '(0)'}), '(samples, root_rank=0)\n', (6717, 6739), True, 'import horovod.tensorflow as hvd\n'), ((6753, 6787), 'horovod.tensorflow.broadcast', 'hvd.broadcast', (['labels'], {'root_rank': '(0)'}), '(labels, root_rank=0)\n', (6766, 6787), True, 'import horovod.tensorflow as hvd\n'), ((7091, 7107), 'horovod.tensorflow.local_rank', 'hvd.local_rank', ([], {}), '()\n', (7105, 7107), True, 'import horovod.tensorflow as hvd\n'), ((7273, 7323), 'sparse_operation_kit.Init', 'sok.Init', ([], {'global_batch_size': 'args.global_batch_size'}), '(global_batch_size=args.global_batch_size)\n', (7281, 7323), True, 'import sparse_operation_kit as sok\n'), ((8092, 8103), 'sparse_operation_kit.Saver', 'sok.Saver', ([], {}), '()\n', (8101, 8103), True, 'import sparse_operation_kit as sok\n'), ((8459, 8558), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), '(from_logits=True, reduction=tf.keras.\n losses.Reduction.NONE)\n', (8493, 8558), True, 'import tensorflow as tf\n'), ((11225, 11262), 'tensorflow.concat', 'tf.concat', (['vocabulary_tensors'], {'axis': '(0)'}), '(vocabulary_tensors, axis=0)\n', (11234, 11262), True, 'import tensorflow as tf\n'), ((11433, 11485), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (11467, 11485), True, 'import tensorflow as tf\n'), ((12077, 12145), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""test demo model with horovod."""'}), "(description='test demo model with horovod.')\n", (12100, 12145), False, 'import argparse\n'), ((13530, 13563), 'os.getenv', 'os.getenv', (['"""OMPI_COMM_WORLD_RANK"""'], {}), "('OMPI_COMM_WORLD_RANK')\n", (13539, 13563), False, 'import os\n'), ((13626, 13636), 'horovod.tensorflow.init', 'hvd.init', ([], {}), '()\n', (13634, 13636), True, 'import horovod.tensorflow as hvd\n'), ((1444, 1640), 'sparse_operation_kit.All2AllDenseEmbedding', 'sok.All2AllDenseEmbedding', ([], {'max_vocabulary_size_per_gpu': 'self.max_vocabulary_size_per_gpu', 'embedding_vec_size': 'self.embedding_vec_size', 'slot_num': 'self.slot_num', 'nnz_per_slot': 'self.nnz_per_slot'}), '(max_vocabulary_size_per_gpu=self.\n max_vocabulary_size_per_gpu, embedding_vec_size=self.embedding_vec_size,\n slot_num=self.slot_num, nnz_per_slot=self.nnz_per_slot)\n', (1469, 1640), True, 'import sparse_operation_kit as sok\n'), ((2054, 2158), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)', 'activation': 'None', 'kernel_initializer': '"""ones"""', 'bias_initializer': '"""zeros"""'}), "(units=1, activation=None, kernel_initializer='ones',\n bias_initializer='zeros')\n", (2075, 2158), True, 'import tensorflow as tf\n'), ((2533, 2638), 'tensorflow.reshape', 'tf.reshape', (['embedding_vector'], {'shape': '[-1, self.slot_num * self.nnz_per_slot * self.embedding_vec_size]'}), '(embedding_vector, shape=[-1, self.slot_num * self.nnz_per_slot *\n self.embedding_vec_size])\n', (2543, 2638), True, 'import tensorflow as tf\n'), ((3986, 4090), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)', 'activation': 'None', 'kernel_initializer': '"""ones"""', 'bias_initializer': '"""zeros"""'}), "(units=1, activation=None, kernel_initializer='ones',\n bias_initializer='zeros')\n", (4007, 4090), True, 'import tensorflow as tf\n'), ((4324, 4378), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', ([], {'params': 'self.params', 'ids': 'inputs'}), '(params=self.params, ids=inputs)\n', (4346, 4378), True, 'import tensorflow as tf\n'), ((4525, 4650), 'tensorflow.reshape', 'tf.reshape', (['embedding_vector'], {'shape': '[self.global_batch_size, self.slot_num * self.nnz_per_slot * self.\n embedding_vec_size]'}), '(embedding_vector, shape=[self.global_batch_size, self.slot_num *\n self.nnz_per_slot * self.embedding_vec_size])\n', (4535, 4650), True, 'import tensorflow as tf\n'), ((5296, 5359), 'tensorflow.random.normal', 'tf.random.normal', (['[vocabulary_size_per_gpu, embedding_vec_size]'], {}), '([vocabulary_size_per_gpu, embedding_vec_size])\n', (5312, 5359), True, 'import tensorflow as tf\n'), ((5761, 5771), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (5769, 5771), True, 'import horovod.tensorflow as hvd\n'), ((6235, 6264), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['samples'], {}), '(samples)\n', (6255, 6264), True, 'import tensorflow as tf\n'), ((6266, 6294), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['labels'], {}), '(labels)\n', (6286, 6294), True, 'import tensorflow as tf\n'), ((6444, 6493), 'horovod.tensorflow.broadcast', 'hvd.broadcast', (['dense_variables[0][i]'], {'root_rank': '(0)'}), '(dense_variables[0][i], root_rank=0)\n', (6457, 6493), True, 'import horovod.tensorflow as hvd\n'), ((6526, 6575), 'horovod.tensorflow.broadcast', 'hvd.broadcast', (['dense_variables[1][i]'], {'root_rank': '(0)'}), '(dense_variables[1][i], root_rank=0)\n', (6539, 6575), True, 'import horovod.tensorflow as hvd\n'), ((6595, 6605), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (6603, 6605), True, 'import horovod.tensorflow as hvd\n'), ((6640, 6689), 'horovod.tensorflow.broadcast', 'hvd.broadcast', (['vocabulary_tensors[i]'], {'root_rank': '(0)'}), '(vocabulary_tensors[i], root_rank=0)\n', (6653, 6689), True, 'import horovod.tensorflow as hvd\n'), ((7065, 7075), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (7073, 7075), True, 'import horovod.tensorflow as hvd\n'), ((8296, 8341), 'utils.get_embedding_optimizer', 'utils.get_embedding_optimizer', (['args.optimizer'], {}), '(args.optimizer)\n', (8325, 8341), False, 'import utils\n'), ((8383, 8424), 'utils.get_dense_optimizer', 'utils.get_dense_optimizer', (['args.optimizer'], {}), '(args.optimizer)\n', (8408, 8424), False, 'import utils\n'), ((8647, 8721), 'tensorflow.nn.compute_average_loss', 'tf.nn.compute_average_loss', (['loss'], {'global_batch_size': 'args.global_batch_size'}), '(loss, global_batch_size=args.global_batch_size)\n', (8673, 8721), True, 'import tensorflow as tf\n'), ((8994, 9027), 'horovod.tensorflow.DistributedGradientTape', 'hvd.DistributedGradientTape', (['tape'], {}), '(tape)\n', (9021, 9027), True, 'import horovod.tensorflow as hvd\n'), ((9068, 9135), 'sparse_operation_kit.split_embedding_variable_from_others', 'sok.split_embedding_variable_from_others', (['model.trainable_variables'], {}), '(model.trainable_variables)\n', (9108, 9135), True, 'import sparse_operation_kit as sok\n'), ((11282, 11292), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (11290, 11292), True, 'import horovod.tensorflow as hvd\n'), ((11358, 11399), 'utils.get_dense_optimizer', 'utils.get_dense_optimizer', (['args.optimizer'], {}), '(args.optimizer)\n', (11383, 11399), False, 'import utils\n'), ((13901, 13932), 'horovod.tensorflow.allreduce', 'hvd.allreduce', (['sok_loss_list[i]'], {}), '(sok_loss_list[i])\n', (13914, 13932), True, 'import horovod.tensorflow as hvd\n'), ((13941, 13957), 'horovod.tensorflow.local_rank', 'hvd.local_rank', ([], {}), '()\n', (13955, 13957), True, 'import horovod.tensorflow as hvd\n'), ((14068, 14084), 'horovod.tensorflow.local_rank', 'hvd.local_rank', ([], {}), '()\n', (14082, 14084), True, 'import horovod.tensorflow as hvd\n'), ((1915, 1978), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'num_dense_units', 'activation': '"""relu"""'}), "(units=num_dense_units, activation='relu')\n", (1936, 1978), True, 'import tensorflow as tf\n'), ((3847, 3910), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'num_dense_units', 'activation': '"""relu"""'}), "(units=num_dense_units, activation='relu')\n", (3868, 3910), True, 'import tensorflow as tf\n'), ((5146, 5174), 'tensorflow.random.normal', 'tf.random.normal', (['[units[i]]'], {}), '([units[i]])\n', (5162, 5174), True, 'import tensorflow as tf\n'), ((6999, 7009), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (7007, 7009), True, 'import horovod.tensorflow as hvd\n'), ((8807, 8824), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8822, 8824), True, 'import tensorflow as tf\n'), ((8834, 8851), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8849, 8851), True, 'import tensorflow as tf\n'), ((9923, 9975), 'horovod.tensorflow.broadcast_variables', 'hvd.broadcast_variables', (['other_variable'], {'root_rank': '(0)'}), '(other_variable, root_rank=0)\n', (9946, 9975), True, 'import horovod.tensorflow as hvd\n'), ((11554, 11571), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (11569, 11571), True, 'import tensorflow as tf\n'), ((5007, 5050), 'tensorflow.random.normal', 'tf.random.normal', (['[input_channel, units[i]]'], {}), '([input_channel, units[i]])\n', (5023, 5050), True, 'import tensorflow as tf\n'), ((5087, 5129), 'tensorflow.random.normal', 'tf.random.normal', (['[units[i - 1], units[i]]'], {}), '([units[i - 1], units[i]])\n', (5103, 5129), True, 'import tensorflow as tf\n'), ((5968, 5978), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (5976, 5978), True, 'import horovod.tensorflow as hvd\n'), ((9323, 9355), 'sparse_operation_kit.OptimizerScope', 'sok.OptimizerScope', (['emb_variable'], {}), '(emb_variable)\n', (9341, 9355), True, 'import sparse_operation_kit as sok\n'), ((10530, 10540), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (10538, 10540), True, 'import horovod.tensorflow as hvd\n')]
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Builds the C3D network. Implements the inference pattern for model building. inference_c3d(): Builds the model as far as is required for running the network forward to make predictions. """ import tensorflow as tf class C3DModel(object): """""" # The UCF-101 dataset has 101 classes NUM_CLASSES = 101 # Images are cropped to (CROP_SIZE, CROP_SIZE) CROP_SIZE = 112 CHANNELS = 3 # Number of frames per video clip NUM_FRAMES_PER_CLIP = 16 DROPOUT_VALUES = 0.5 BATCH_SIZE = 16 def __init__(self, batch_size=16, num_classes=101,test=False): """Constructor for C3D""" if test:self.DROPOUT_VALUES=1 self.BATCH_SIZE = batch_size self.NUM_CLASSES = num_classes with tf.variable_scope('var_name') as var_scope: self.weights = { 'wc1': self._variable_with_weight_decay('wc1', [3, 3, 3, 3, 64], 0.04, 0.00), 'wc2': self._variable_with_weight_decay('wc2', [3, 3, 3, 64, 128], 0.04, 0.00), 'wc3a': self._variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256], 0.04, 0.00), 'wc3b': self._variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256], 0.04, 0.00), 'wc4a': self._variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512], 0.04, 0.00), 'wc4b': self._variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512], 0.04, 0.00), 'wc5a': self._variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512], 0.04, 0.00), 'wc5b': self._variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512], 0.04, 0.00), 'wd1': self._variable_with_weight_decay('wd1', [8192, 4096], 0.04, 0.001), 'wd2': self._variable_with_weight_decay('wd2', [4096, 4096], 0.04, 0.002), 'out': self._variable_with_weight_decay('wout', [4096, self.NUM_CLASSES], 0.04, 0.005) } self.biases = { 'bc1': self._variable_with_weight_decay('bc1', [64], 0.04, 0.0), 'bc2': self._variable_with_weight_decay('bc2', [128], 0.04, 0.0), 'bc3a': self._variable_with_weight_decay('bc3a', [256], 0.04, 0.0), 'bc3b': self._variable_with_weight_decay('bc3b', [256], 0.04, 0.0), 'bc4a': self._variable_with_weight_decay('bc4a', [512], 0.04, 0.0), 'bc4b': self._variable_with_weight_decay('bc4b', [512], 0.04, 0.0), 'bc5a': self._variable_with_weight_decay('bc5a', [512], 0.04, 0.0), 'bc5b': self._variable_with_weight_decay('bc5b', [512], 0.04, 0.0), 'bd1': self._variable_with_weight_decay('bd1', [4096], 0.04, 0.0), 'bd2': self._variable_with_weight_decay('bd2', [4096], 0.04, 0.0), 'out': self._variable_with_weight_decay('bout', [self.NUM_CLASSES], 0.04, 0.0), } def placeholder_inputs(self): """Generate placeholder variables to represent the input tensors. These placeholders are used as inputs by the rest of the model building code and will be fed from the downloaded data in the .run() loop, below. Args: batch_size: The batch size will be baked into both placeholders. Returns: images_placeholder: Images placeholder. labels_placeholder: Labels placeholder. """ # Note that the shapes of the placeholders match the shapes of the full # image and label tensors, except the first dimension is now batch_size # rather than the full size of the train or test data sets. images_placeholder = tf.placeholder(tf.float32, shape=(self.BATCH_SIZE, self.NUM_FRAMES_PER_CLIP, self.CROP_SIZE, self.CROP_SIZE, self.CHANNELS)) labels_placeholder = tf.placeholder(tf.int64, shape=(self.BATCH_SIZE)) return images_placeholder, labels_placeholder def _variable_on_cpu(self, name, shape, initializer): # with tf.device('/cpu:%d' % cpu_id): with tf.device('/cpu:0'): var = tf.get_variable(name, shape, initializer=initializer) return var def _variable_with_weight_decay(self, name, shape, stddev, wd): var = self._variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev)) if wd is not None: weight_decay = tf.nn.l2_loss(var) * wd tf.add_to_collection('losses', weight_decay) return var def conv3d(self, name, l_input, w, b): return tf.nn.bias_add( tf.nn.conv3d(l_input, w, strides=[1, 1, 1, 1, 1], padding='SAME'), b ) def max_pool(self, name, l_input, k): return tf.nn.max_pool3d(l_input, ksize=[1, k, 2, 2, 1], strides=[1, k, 2, 2, 1], padding='SAME', name=name) def inference_c3d(self, _X): end_points = {} # Convolution Layer conv1 = self.conv3d('conv1', _X, self.weights['wc1'], self.biases['bc1']) conv1 = tf.nn.relu(conv1, 'relu1') pool1 = self.max_pool('pool1', conv1, k=1) # Convolution Layer conv2 = self.conv3d('conv2', pool1, self.weights['wc2'], self.biases['bc2']) conv2 = tf.nn.relu(conv2, 'relu2') pool2 = self.max_pool('pool2', conv2, k=2) # Convolution Layer conv3 = self.conv3d('conv3a', pool2, self.weights['wc3a'], self.biases['bc3a']) conv3 = tf.nn.relu(conv3, 'relu3a') conv3 = self.conv3d('conv3b', conv3, self.weights['wc3b'], self.biases['bc3b']) conv3 = tf.nn.relu(conv3, 'relu3b') pool3 = self.max_pool('pool3', conv3, k=2) # Convolution Layer conv4 = self.conv3d('conv4a', pool3, self.weights['wc4a'], self.biases['bc4a']) conv4 = tf.nn.relu(conv4, 'relu4a') conv4 = self.conv3d('conv4b', conv4, self.weights['wc4b'], self.biases['bc4b']) conv4 = tf.nn.relu(conv4, 'relu4b') pool4 = self.max_pool('pool4', conv4, k=2) # Convolution Layer conv5 = self.conv3d('conv5a', pool4, self.weights['wc5a'], self.biases['bc5a']) conv5 = tf.nn.relu(conv5, 'relu5a') conv5 = self.conv3d('conv5b', conv5, self.weights['wc5b'], self.biases['bc5b']) conv5 = tf.nn.relu(conv5, 'relu5b') pool5 = self.max_pool('pool5', conv5, k=2) # Fully connected layer # pool5 = tf.transpose(pool5, perm=[0, 1, 4, 2, 3]) dense1 = tf.reshape(pool5, [self.BATCH_SIZE, self.weights['wd1'].get_shape().as_list()[ 0]]) # Reshape conv3 output to fit dense layer input dense1 = tf.matmul(dense1, self.weights['wd1']) + self.biases['bd1'] dense1 = tf.nn.relu(dense1, name='fc1') # Relu activation dense1 = tf.nn.dropout(dense1, self.DROPOUT_VALUES) end_points['fc_6']=dense1 dense2 = tf.nn.relu(tf.matmul(dense1, self.weights['wd2']) + self.biases['bd2'], name='fc2') # Relu activation dense2 = tf.nn.dropout(dense2, self.DROPOUT_VALUES) end_points['fc_7'] = dense2 # Output: class prediction out = tf.matmul(dense2, self.weights['out']) + self.biases['out'] return out, end_points def main(): c3d = C3DModel(batch_size=1) images_placeholder, labels_placeholder = c3d.placeholder_inputs() print(c3d.inference_c3d(images_placeholder)) if __name__ == '__main__': main()
[ "tensorflow.nn.relu", "tensorflow.nn.max_pool3d", "tensorflow.device", "tensorflow.variable_scope", "tensorflow.add_to_collection", "tensorflow.nn.conv3d", "tensorflow.placeholder", "tensorflow.matmul", "tensorflow.nn.l2_loss", "tensorflow.truncated_normal_initializer", "tensorflow.nn.dropout", "tensorflow.get_variable" ]
[((4302, 4430), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(self.BATCH_SIZE, self.NUM_FRAMES_PER_CLIP, self.CROP_SIZE, self.CROP_SIZE,\n self.CHANNELS)'}), '(tf.float32, shape=(self.BATCH_SIZE, self.NUM_FRAMES_PER_CLIP,\n self.CROP_SIZE, self.CROP_SIZE, self.CHANNELS))\n', (4316, 4430), True, 'import tensorflow as tf\n'), ((4708, 4755), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': 'self.BATCH_SIZE'}), '(tf.int64, shape=self.BATCH_SIZE)\n', (4722, 4755), True, 'import tensorflow as tf\n'), ((5598, 5702), 'tensorflow.nn.max_pool3d', 'tf.nn.max_pool3d', (['l_input'], {'ksize': '[1, k, 2, 2, 1]', 'strides': '[1, k, 2, 2, 1]', 'padding': '"""SAME"""', 'name': 'name'}), "(l_input, ksize=[1, k, 2, 2, 1], strides=[1, k, 2, 2, 1],\n padding='SAME', name=name)\n", (5614, 5702), True, 'import tensorflow as tf\n'), ((5883, 5909), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv1', '"""relu1"""'], {}), "(conv1, 'relu1')\n", (5893, 5909), True, 'import tensorflow as tf\n'), ((6091, 6117), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv2', '"""relu2"""'], {}), "(conv2, 'relu2')\n", (6101, 6117), True, 'import tensorflow as tf\n'), ((6302, 6329), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv3', '"""relu3a"""'], {}), "(conv3, 'relu3a')\n", (6312, 6329), True, 'import tensorflow as tf\n'), ((6434, 6461), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv3', '"""relu3b"""'], {}), "(conv3, 'relu3b')\n", (6444, 6461), True, 'import tensorflow as tf\n'), ((6646, 6673), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv4', '"""relu4a"""'], {}), "(conv4, 'relu4a')\n", (6656, 6673), True, 'import tensorflow as tf\n'), ((6778, 6805), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv4', '"""relu4b"""'], {}), "(conv4, 'relu4b')\n", (6788, 6805), True, 'import tensorflow as tf\n'), ((6990, 7017), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv5', '"""relu5a"""'], {}), "(conv5, 'relu5a')\n", (7000, 7017), True, 'import tensorflow as tf\n'), ((7122, 7149), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv5', '"""relu5b"""'], {}), "(conv5, 'relu5b')\n", (7132, 7149), True, 'import tensorflow as tf\n'), ((7551, 7581), 'tensorflow.nn.relu', 'tf.nn.relu', (['dense1'], {'name': '"""fc1"""'}), "(dense1, name='fc1')\n", (7561, 7581), True, 'import tensorflow as tf\n'), ((7618, 7660), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['dense1', 'self.DROPOUT_VALUES'], {}), '(dense1, self.DROPOUT_VALUES)\n', (7631, 7660), True, 'import tensorflow as tf\n'), ((7833, 7875), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['dense2', 'self.DROPOUT_VALUES'], {}), '(dense2, self.DROPOUT_VALUES)\n', (7846, 7875), True, 'import tensorflow as tf\n'), ((1430, 1459), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""var_name"""'], {}), "('var_name')\n", (1447, 1459), True, 'import tensorflow as tf\n'), ((4930, 4949), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (4939, 4949), True, 'import tensorflow as tf\n'), ((4969, 5022), 'tensorflow.get_variable', 'tf.get_variable', (['name', 'shape'], {'initializer': 'initializer'}), '(name, shape, initializer=initializer)\n', (4984, 5022), True, 'import tensorflow as tf\n'), ((5160, 5206), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': 'stddev'}), '(stddev=stddev)\n', (5191, 5206), True, 'import tensorflow as tf\n'), ((5298, 5342), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""losses"""', 'weight_decay'], {}), "('losses', weight_decay)\n", (5318, 5342), True, 'import tensorflow as tf\n'), ((5449, 5514), 'tensorflow.nn.conv3d', 'tf.nn.conv3d', (['l_input', 'w'], {'strides': '[1, 1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(l_input, w, strides=[1, 1, 1, 1, 1], padding='SAME')\n", (5461, 5514), True, 'import tensorflow as tf\n'), ((7473, 7511), 'tensorflow.matmul', 'tf.matmul', (['dense1', "self.weights['wd1']"], {}), "(dense1, self.weights['wd1'])\n", (7482, 7511), True, 'import tensorflow as tf\n'), ((7961, 7999), 'tensorflow.matmul', 'tf.matmul', (['dense2', "self.weights['out']"], {}), "(dense2, self.weights['out'])\n", (7970, 7999), True, 'import tensorflow as tf\n'), ((5262, 5280), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['var'], {}), '(var)\n', (5275, 5280), True, 'import tensorflow as tf\n'), ((7724, 7762), 'tensorflow.matmul', 'tf.matmul', (['dense1', "self.weights['wd2']"], {}), "(dense1, self.weights['wd2'])\n", (7733, 7762), True, 'import tensorflow as tf\n')]
import pytest from packaging import version import qcodes as qc from numpy.testing import assert_array_equal from plottr.data.datadict import DataDict from plottr.node.tools import linearFlowchart from plottr.data.qcodes_dataset import QCodesDSLoader from plottr.node.data_selector import DataSelector from plottr.node.grid import DataGridder, GridOption from plottr.node.scaleunits import ScaleUnits @pytest.mark.skipif(version.parse(qc.__version__) < version.parse("0.20.0"), reason="Requires QCoDes 0.20.0 or later") def test_qcodes_flow_shaped_data(qtbot, dataset_with_shape): fc = linearFlowchart( ('Data loader', QCodesDSLoader), ('Data selection', DataSelector), ('Grid', DataGridder), ('Scale Units', ScaleUnits) ) loader = fc.nodes()['Data loader'] selector = fc.nodes()['Data selection'] selector.selectedData = 'z_0' gridder = fc.nodes()['Grid'] gridder.grid = (GridOption.metadataShape, {}) loader.pathAndId = dataset_with_shape.path_to_db, dataset_with_shape.run_id loader.update() expected_shape = dataset_with_shape.description.shapes['z_0'] datadict = fc.output()['dataOut'] for key in ('x', 'y', 'z_0'): assert datadict[key]['values'].shape == expected_shape assert datadict.shapes()[key] == expected_shape assert_array_equal( datadict[key]['values'], dataset_with_shape.get_parameter_data()['z_0'][key] ) assert datadict.shape() == expected_shape
[ "packaging.version.parse", "plottr.node.tools.linearFlowchart" ]
[((635, 773), 'plottr.node.tools.linearFlowchart', 'linearFlowchart', (["('Data loader', QCodesDSLoader)", "('Data selection', DataSelector)", "('Grid', DataGridder)", "('Scale Units', ScaleUnits)"], {}), "(('Data loader', QCodesDSLoader), ('Data selection',\n DataSelector), ('Grid', DataGridder), ('Scale Units', ScaleUnits))\n", (650, 773), False, 'from plottr.node.tools import linearFlowchart\n'), ((425, 454), 'packaging.version.parse', 'version.parse', (['qc.__version__'], {}), '(qc.__version__)\n', (438, 454), False, 'from packaging import version\n'), ((477, 500), 'packaging.version.parse', 'version.parse', (['"""0.20.0"""'], {}), "('0.20.0')\n", (490, 500), False, 'from packaging import version\n')]
from setuptools import setup setup( name = 'aiohttp_dynamic', packages = ['aiohttp_dynamic'], version = '1.3.0', license='Apache License 2.0', description = 'aiohttp extension for creating and modifying dynamic routes in runtime', author = 'bitrate16', author_email = '<EMAIL>', url = 'https://github.com/bitrate16/aiohttp-dynamic', download_url = 'https://github.com/bitrate16/aiohttp-dynamic/archive/1.2.0.tar.gz', keywords = ['aiohttp', 'dynamic', 'routing', 'mutable', 'aiohttp-server'], install_requires = [ 'aiohttp', 'yarl' ], classifiers = [ # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Software Development :: Libraries :: Python Modules', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11' ] )
[ "setuptools.setup" ]
[((30, 1073), 'setuptools.setup', 'setup', ([], {'name': '"""aiohttp_dynamic"""', 'packages': "['aiohttp_dynamic']", 'version': '"""1.3.0"""', 'license': '"""Apache License 2.0"""', 'description': '"""aiohttp extension for creating and modifying dynamic routes in runtime"""', 'author': '"""bitrate16"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/bitrate16/aiohttp-dynamic"""', 'download_url': '"""https://github.com/bitrate16/aiohttp-dynamic/archive/1.2.0.tar.gz"""', 'keywords': "['aiohttp', 'dynamic', 'routing', 'mutable', 'aiohttp-server']", 'install_requires': "['aiohttp', 'yarl']", 'classifiers': "['Development Status :: 4 - Beta', 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11']"}), "(name='aiohttp_dynamic', packages=['aiohttp_dynamic'], version='1.3.0',\n license='Apache License 2.0', description=\n 'aiohttp extension for creating and modifying dynamic routes in runtime',\n author='bitrate16', author_email='<EMAIL>', url=\n 'https://github.com/bitrate16/aiohttp-dynamic', download_url=\n 'https://github.com/bitrate16/aiohttp-dynamic/archive/1.2.0.tar.gz',\n keywords=['aiohttp', 'dynamic', 'routing', 'mutable', 'aiohttp-server'],\n install_requires=['aiohttp', 'yarl'], classifiers=[\n 'Development Status :: 4 - Beta', 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11'])\n", (35, 1073), False, 'from setuptools import setup\n')]
# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-04-17 18:40 import json from django.db import migrations def reset_actions_and_quests(apps, schema_editor): for hero in apps.get_model("heroes", "Hero").objects.all(): actions = json.loads(hero.actions) actions['actions'] = [actions['actions'][0]] hero.actions = json.dumps(actions, ensure_ascii=False) hero.save() class Migration(migrations.Migration): dependencies = [ ('heroes', '0011_make_id_equal_to_account_id'), ] operations = [ migrations.RunPython(reset_actions_and_quests) ]
[ "django.db.migrations.RunPython", "json.loads", "json.dumps" ]
[((256, 280), 'json.loads', 'json.loads', (['hero.actions'], {}), '(hero.actions)\n', (266, 280), False, 'import json\n'), ((359, 398), 'json.dumps', 'json.dumps', (['actions'], {'ensure_ascii': '(False)'}), '(actions, ensure_ascii=False)\n', (369, 398), False, 'import json\n'), ((573, 619), 'django.db.migrations.RunPython', 'migrations.RunPython', (['reset_actions_and_quests'], {}), '(reset_actions_and_quests)\n', (593, 619), False, 'from django.db import migrations\n')]
from django.contrib import admin from .models import Setting, Profile, Inbox, Dislike, Match, UserPhoto, Like admin.site.register(Like) admin.site.register(Inbox) admin.site.register(Match) admin.site.register(UserPhoto) admin.site.register(Setting) admin.site.register(Dislike) admin.site.register(Profile) # Register your models here.
[ "django.contrib.admin.site.register" ]
[((111, 136), 'django.contrib.admin.site.register', 'admin.site.register', (['Like'], {}), '(Like)\n', (130, 136), False, 'from django.contrib import admin\n'), ((137, 163), 'django.contrib.admin.site.register', 'admin.site.register', (['Inbox'], {}), '(Inbox)\n', (156, 163), False, 'from django.contrib import admin\n'), ((164, 190), 'django.contrib.admin.site.register', 'admin.site.register', (['Match'], {}), '(Match)\n', (183, 190), False, 'from django.contrib import admin\n'), ((191, 221), 'django.contrib.admin.site.register', 'admin.site.register', (['UserPhoto'], {}), '(UserPhoto)\n', (210, 221), False, 'from django.contrib import admin\n'), ((222, 250), 'django.contrib.admin.site.register', 'admin.site.register', (['Setting'], {}), '(Setting)\n', (241, 250), False, 'from django.contrib import admin\n'), ((251, 279), 'django.contrib.admin.site.register', 'admin.site.register', (['Dislike'], {}), '(Dislike)\n', (270, 279), False, 'from django.contrib import admin\n'), ((280, 308), 'django.contrib.admin.site.register', 'admin.site.register', (['Profile'], {}), '(Profile)\n', (299, 308), False, 'from django.contrib import admin\n')]
import math import numpy as np import re class bitstream: def __init__(self,array = None): if(str(locals()['array']) == 'None'): self.array_unit_size = 8 self.array_type = 'uint' self.valid = True self.read_index = 0 self.r_bit_index = 0 self.write_index = 0 self.w_bit_index = 0 self.array = np.zeros((8),dtype = 'uint8') self.capacity = 8*self.array_unit_size self.size = 0 else: self.array_unit_size = int(re.search(r'\d+',str(array.dtype)).group()) self.array_type = re.findall('[a-zA-Z]+',str(array.dtype))[0] if(not(math.floor(math.log(self.array_unit_size,2)) == math.log(self.array_unit_size,2)) or not(self.array_type == 'uint')): print('Error : Array must be of valid dtype (uint) ',self.array_type,' ',math.log(self.array_unit_size,2)) self.valid = False return if(len(array.shape)>1): print(array.shape) print('Error : Array must be one dimensional') self.valid = False return self.valid = True self.read_index = 0 self.r_bit_index = 0 array_size = 2**math.ceil(math.log(len(array)+1,2)) self.array = np.zeros((array_size),dtype = array.dtype) self.array[0:len(array)] = array self.capacity = array_size * self.array_unit_size self.write_index = len(array) self.w_bit_index = 0 self.size = len(array)*self.array_unit_size """ def size(self): ri = self.read_index wi = self.write_index if(self.read_index> self.write_index): wi = wi + len(self.array) count = (wi - ri -1)*self.array_unit_size count = count + self.w_bit_index + (self.array_unit_size - self.r_bit_index + 1) return count """ def get_next(self, number_of_bits): if ((not self.valid) or self.is_empty()): print('Error : Either stream doesnt have enough bits or stream does not contain valid data') return if ((number_of_bits > self.size)): number_of_bits = self.size rbi = self.r_bit_index ri = self.read_index s = self.size if (self.r_bit_index + number_of_bits - 1 < self.array_unit_size): #print('before : ',self.read_index, ' ', self.r_bit_index) mask = int(2 ** number_of_bits) - 1 mask = mask << (self.r_bit_index) ans = (mask & self.array[self.read_index]) >> self.r_bit_index self.r_bit_index = (self.r_bit_index + number_of_bits) % self.array_unit_size if (self.r_bit_index == 0): self.read_index = (self.read_index + 1) % len(self.array) #print('after : ',self.read_index,' ',self.r_bit_index) #print(bin(ans)) else: num_bits_frm_cur = self.array_unit_size - self.r_bit_index num_bits_frm_nxt = number_of_bits - num_bits_frm_cur ans1 = self.read(num_bits_frm_cur) if (not (self.r_bit_index == 0)): self.read_index = (self.read_index + 1) % len(self.array) ans2 = self.read(num_bits_frm_nxt) # ans = (ans2<<math.ceil(math.log(ans1+1,2))) + ans1 ans = (ans2 << (num_bits_frm_cur)) + ans1 #print(ans2 << (num_bits_frm_cur) ,'+', ans1) self.r_bit_index = rbi self.read_index = ri self.size = s return ans def read(self,number_of_bits): s = self.size if((not self.valid )or self.is_empty()): print('Error : Either stream doesnt have enough bits or stream does not contain valid data') return if((number_of_bits > self.size)): number_of_bits = self.size if(self.r_bit_index + number_of_bits-1 < self.array_unit_size): mask = int(2**number_of_bits) - 1 mask = mask<<(self.r_bit_index) ans = (mask & self.array[self.read_index])>>self.r_bit_index self.r_bit_index = (self.r_bit_index + number_of_bits)%self.array_unit_size if(self.r_bit_index == 0): self.read_index = (self.read_index +1)%len(self.array) self.size = self.size - number_of_bits #print(self.read_index,' ',self.r_bit_index) #print(bin(ans)) else: num_bits_frm_cur = self.array_unit_size - self.r_bit_index num_bits_frm_nxt = number_of_bits - num_bits_frm_cur ans1 = self.read(num_bits_frm_cur) if(not(self.r_bit_index == 0)): self.read_index = (self.read_index +1)%len(self.array) ans2 = self.read(num_bits_frm_nxt) #ans = (ans2<<math.ceil(math.log(ans1+1,2))) + ans1 ans = (ans2<<(num_bits_frm_cur)) + ans1 s2 = self.size #print(s-s2,'removed : ',ans) return ans def write(self,number_of_bits,val): s = self.array_unit_size w = self.w_bit_index wi = self.write_index r = self.r_bit_index ri = self.read_index rb = ri*s + r wb = wi *s + w if(self.size+number_of_bits > self.capacity): a = self.array self.array = np.zeros((2*len(a)),dtype= a.dtype) self.capacity = 2*len(a)*s if(rb<wb): self.array[0:(wi-ri+1)] = a[r:(wi+1)] self.read_index = 0 self.write_index = wi-ri else: self.array[0:wi]=a[0:wi] self.array[-(len(a) - ri):] = a[ri:] self.read_index = len(self.array) -(len(a) - ri) if(number_of_bits + self.w_bit_index -1 < self.array_unit_size): x = self.array[self.write_index] val = val << self.w_bit_index mask = 2**number_of_bits - 1 mask = mask<<(self.w_bit_index) self.array[self.write_index]= (val & mask) + (x &(~mask)) self.w_bit_index = (self.w_bit_index + number_of_bits)%self.array_unit_size if(self.w_bit_index == 0): self.write_index = (self.write_index +1)%len(self.array) self.size = self.size + number_of_bits else: num_bits_in_cur = self.array_unit_size - self.w_bit_index num_bits_in_nxt = number_of_bits - num_bits_in_cur self.write(num_bits_in_cur, val) if(not(self.w_bit_index == 0)): self.write_index = (self.write_index +1)%len(self.array) val = val>>(num_bits_in_cur) self.write(num_bits_in_nxt,val ) def read_from_end(self, number_of_bits): s = self.size if ((not self.valid) or self.is_empty()): print('Error : Either stream doesnt have enough bits or stream does not contain valid data') return if ((number_of_bits > self.size)): number_of_bits = self.size if (self.r_bit_index + number_of_bits - 1 < self.array_unit_size): mask = int(2 ** number_of_bits) - 1 mask = mask << (self.r_bit_index) ans = (mask & self.array[self.read_index]) >> self.r_bit_index self.r_bit_index = (self.r_bit_index + number_of_bits) % self.array_unit_size if (self.r_bit_index == 0): self.read_index = (self.read_index + 1) % len(self.array) self.size = self.size - number_of_bits # print(self.read_index,' ',self.r_bit_index) # print(bin(ans)) else: num_bits_frm_cur = self.array_unit_size - self.r_bit_index num_bits_frm_nxt = number_of_bits - num_bits_frm_cur ans1 = self.read(num_bits_frm_cur) if (not (self.r_bit_index == 0)): self.read_index = (self.read_index + 1) % len(self.array) ans2 = self.read(num_bits_frm_nxt) # ans = (ans2<<math.ceil(math.log(ans1+1,2))) + ans1 ans = (ans2 << (num_bits_frm_cur)) + ans1 s2 = self.size # print(s-s2,'removed : ',ans) return ans def write_in_front(self, number_of_bits, val): s = self.array_unit_size w = self.w_bit_index wi = self.write_index r = self.r_bit_index ri = self.read_index rb = ri * s + r wb = wi * s + w if (self.size + number_of_bits > self.capacity): a = self.array self.array = np.zeros((2 * len(a)), dtype=a.dtype) self.capacity = 2 * len(a) * s if (rb < wb): self.array[0:(wi - ri + 1)] = a[r:(wi + 1)] self.read_index = 0 self.write_index = wi - ri else: self.array[0:wi] = a[0:wi] self.array[-(len(a) - ri):] = a[ri:] self.read_index = len(self.array) - (len(a) - ri) if (number_of_bits + self.w_bit_index - 1 < self.array_unit_size): x = self.array[self.write_index] val = val << self.w_bit_index mask = 2 ** number_of_bits - 1 mask = mask << (self.w_bit_index) self.array[self.write_index] = (val & mask) + (x & (~mask)) self.w_bit_index = (self.w_bit_index + number_of_bits) % self.array_unit_size if (self.w_bit_index == 0): self.write_index = (self.write_index + 1) % len(self.array) self.size = self.size + number_of_bits else: num_bits_in_cur = self.array_unit_size - self.w_bit_index num_bits_in_nxt = number_of_bits - num_bits_in_cur self.write(num_bits_in_cur, val) if (not (self.w_bit_index == 0)): self.write_index = (self.write_index + 1) % len(self.array) val = val >> (num_bits_in_cur) self.write(num_bits_in_nxt, val) def show(self): i = self.read_index while(True): x = self.array[i] if(i == self.read_index): x = x >> self.r_bit_index print(bin(x)) i=(i+1)%len(self.array) if((i == self.write_index)): x = self.array[self.write_index] if(not(self.w_bit_index == 0)): x = (int(2**(self.w_bit_index)) - 1) & x print(bin(x)) break def get_array(self): i = 0 size = math.ceil(self.size/self.array_unit_size) ans = np.zeros((size),dtype = (str(self.array_type)+str(self.array_unit_size))) ri = self.read_index = 0 rbi = self.r_bit_index = 0 wi = self.write_index = len(ans) wbi = self.w_bit_index for i in range(size-1): #print('unit size',self.array_unit_size) ans[i] = self.read(self.array_unit_size) #print(ans[i]) ans[size - 1] = self.read(self.size) #self.array[0:len(ans)] = ans self.read_index = ri self.r_bit_index = rbi self.write_index = wi self.w_bit_index = wbi self.size = size * self.array_unit_size return ans def is_empty(self): return (self.size == 0) """ a = np.array([1,2,4,5],dtype='uint8') bs = bitstream(a); bs.show() print(bs.read(3)) print(bs.read(3)) print(bs.read(3)) print("now") bs.show() ar = bs.get_array() print(ar) print("now") bs.show() """
[ "math.log", "numpy.zeros", "math.ceil" ]
[((11166, 11209), 'math.ceil', 'math.ceil', (['(self.size / self.array_unit_size)'], {}), '(self.size / self.array_unit_size)\n', (11175, 11209), False, 'import math\n'), ((438, 464), 'numpy.zeros', 'np.zeros', (['(8)'], {'dtype': '"""uint8"""'}), "(8, dtype='uint8')\n", (446, 464), True, 'import numpy as np\n'), ((1447, 1486), 'numpy.zeros', 'np.zeros', (['array_size'], {'dtype': 'array.dtype'}), '(array_size, dtype=array.dtype)\n', (1455, 1486), True, 'import numpy as np\n'), ((951, 984), 'math.log', 'math.log', (['self.array_unit_size', '(2)'], {}), '(self.array_unit_size, 2)\n', (959, 984), False, 'import math\n'), ((791, 824), 'math.log', 'math.log', (['self.array_unit_size', '(2)'], {}), '(self.array_unit_size, 2)\n', (799, 824), False, 'import math\n'), ((754, 787), 'math.log', 'math.log', (['self.array_unit_size', '(2)'], {}), '(self.array_unit_size, 2)\n', (762, 787), False, 'import math\n')]
import datetime import pytz from django.test import SimpleTestCase from .schedules import repeater, every_day_at, every_dow_at DENVER = pytz.timezone('America/Denver') def tztime(tz, *args, **kwargs): if not isinstance(tz, datetime.tzinfo): tz = pytz.timezone(tz) naive = datetime.datetime(*args, **kwargs) return tz.localize(naive) class TestSchedulers(SimpleTestCase): def test_repeater(self): now = tztime('UTC', 2019, 1, 1, 0, 0, 0) n = repeater(datetime.timedelta(seconds=5))(now) self.assertEqual(n, tztime('UTC', 2019, 1, 1, 0, 0, 5)) def test_every_day_at_before(self): now = tztime('UTC', 2019, 1, 1, 0, 0, 0) n = every_day_at(datetime.time(8), pytz.UTC)(now) self.assertEqual(n, tztime('UTC', 2019, 1, 1, 8)) def test_every_day_at_after(self): now = tztime('UTC', 2019, 1, 1, 9, 0, 0) n = every_day_at(datetime.time(8), pytz.UTC)(now) self.assertEqual(n, tztime('UTC', 2019, 1, 2, 8)) def test_every_day_dst_transition(self): now = tztime(DENVER, 2019, 11, 2, 8, 0, 0) n = every_day_at(datetime.time(8), DENVER)(now) self.assertEqual(n, tztime(DENVER, 2019, 11, 3, 8)) def test_every_day_different_time_zones(self): now = tztime('UTC', 2019, 11, 3, 0, 0, 0) n = every_day_at(datetime.time(22), pytz.FixedOffset(-180))(now) self.assertEqual(n, tztime(pytz.FixedOffset(-180), 2019, 11, 2, 22)) def test_every_dow_at_before(self): now = tztime(DENVER, 2019, 11, 2, 8, 0, 0) n = every_dow_at(6, datetime.time(8), DENVER)(now) self.assertEqual(n, tztime(DENVER, 2019, 11, 3, 8)) def test_every_dow_at_after(self): now = tztime(DENVER, 2019, 11, 3, 8, 0, 0) n = every_dow_at(2, datetime.time(8), DENVER)(now) self.assertEqual(n, tztime(DENVER, 2019, 11, 6, 8)) def test_every_dow_equal(self): now = tztime(DENVER, 2019, 11, 3, 8, 0, 0) n = every_dow_at(6, datetime.time(8), DENVER)(now) self.assertEqual(n, tztime(DENVER, 2019, 11, 10, 8)) def test_every_dow_different_time_zones(self): now = tztime('UTC', 2019, 11, 4, 0, 0, 0) n = every_dow_at(6, datetime.time(22), pytz.FixedOffset(-180))(now) self.assertEqual(n, tztime(pytz.FixedOffset(-180), 2019, 11, 3, 22))
[ "pytz.FixedOffset", "datetime.datetime", "datetime.timedelta", "pytz.timezone", "datetime.time" ]
[((140, 171), 'pytz.timezone', 'pytz.timezone', (['"""America/Denver"""'], {}), "('America/Denver')\n", (153, 171), False, 'import pytz\n'), ((294, 328), 'datetime.datetime', 'datetime.datetime', (['*args'], {}), '(*args, **kwargs)\n', (311, 328), False, 'import datetime\n'), ((264, 281), 'pytz.timezone', 'pytz.timezone', (['tz'], {}), '(tz)\n', (277, 281), False, 'import pytz\n'), ((498, 527), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(5)'}), '(seconds=5)\n', (516, 527), False, 'import datetime\n'), ((713, 729), 'datetime.time', 'datetime.time', (['(8)'], {}), '(8)\n', (726, 729), False, 'import datetime\n'), ((918, 934), 'datetime.time', 'datetime.time', (['(8)'], {}), '(8)\n', (931, 934), False, 'import datetime\n'), ((1131, 1147), 'datetime.time', 'datetime.time', (['(8)'], {}), '(8)\n', (1144, 1147), False, 'import datetime\n'), ((1349, 1366), 'datetime.time', 'datetime.time', (['(22)'], {}), '(22)\n', (1362, 1366), False, 'import datetime\n'), ((1368, 1390), 'pytz.FixedOffset', 'pytz.FixedOffset', (['(-180)'], {}), '(-180)\n', (1384, 1390), False, 'import pytz\n'), ((1432, 1454), 'pytz.FixedOffset', 'pytz.FixedOffset', (['(-180)'], {}), '(-180)\n', (1448, 1454), False, 'import pytz\n'), ((1594, 1610), 'datetime.time', 'datetime.time', (['(8)'], {}), '(8)\n', (1607, 1610), False, 'import datetime\n'), ((1804, 1820), 'datetime.time', 'datetime.time', (['(8)'], {}), '(8)\n', (1817, 1820), False, 'import datetime\n'), ((2011, 2027), 'datetime.time', 'datetime.time', (['(8)'], {}), '(8)\n', (2024, 2027), False, 'import datetime\n'), ((2233, 2250), 'datetime.time', 'datetime.time', (['(22)'], {}), '(22)\n', (2246, 2250), False, 'import datetime\n'), ((2252, 2274), 'pytz.FixedOffset', 'pytz.FixedOffset', (['(-180)'], {}), '(-180)\n', (2268, 2274), False, 'import pytz\n'), ((2316, 2338), 'pytz.FixedOffset', 'pytz.FixedOffset', (['(-180)'], {}), '(-180)\n', (2332, 2338), False, 'import pytz\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # * * * * * * * * * * * * * * * * * * * * # pokeyproxy: a simple TCP proxy # Requires python3 # # Help & Usage: # $ python3 pokeyproxy.py -h # # * * * * * * * * * * * * * * * * * * * * # # MIT License # # Copyright (c) 2017 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # * * * * * * * * * * * * * * * * * * * * import signal import socket import threading import sys import argparse if sys.version_info[0] < 3: # Python 3 required for command line execution raise AssertionError("Must use Python 3") this = sys.modules[__name__] def cli(): parser = argparse.ArgumentParser() parser.add_argument('--local-port', type=int, help='Specify bind port (default = 8520)', default=8520) parser.add_argument('--remote-port', type=int, help='Specify the remote port') parser.add_argument('--remote-host', type=str, help='Specify the remote host') parser.add_argument('--receive-first', action='store_true', help='Connect and receive before sending data') parser.add_argument('--nocolor', action='store_true', help='Skip colors in output') parser.add_argument('--verbose', action='store_true', help='Enable verbose output') parser.add_argument('--timeout', type=float, help='Request timeout in s (Default=3s)', default=3) return parser.parse_args() def cprint(val, col=None, verbose=False): if not args.verbose and verbose: return if col==None: msg = val else: msg = color_wrap(val, col) print(msg) def color_wrap(val, col): if args.nocolor: return str(val) return ''.join([col, str(val), Color.END]) class InterruptHandler: ''' Interrupt Handler as context manager ''' def __init__(self, sig=signal.SIGINT): self.sig = sig def __enter__(self): self.interrupted = False self.released = False self.sig_orig = signal.getsignal(self.sig) def handler(signum, frame): self.release() self.interrupted = True signal.signal(self.sig, handler) return self def __exit__(self, type, value, tb): self.release() def release(self): if self.released: return False signal.signal(self.sig, self.sig_orig) self.released = True return True class Color: BLACK_ON_GREEN = '\x1b[1;30;42m' PURPLE = '\033[95m' CYAN = '\033[96m' DARKCYAN = '\033[36m' BLUE = '\033[94m' GREEN = '\033[92m' YELLOW = '\033[93m' RED = '\033[91m' BOLD = '\033[1m' UNDERLINE = '\033[4m' END = '\033[0m' MSG = '\x1b[1;32;44m' ERR = '\x1b[1;31;44m' TST = '\x1b[7;34;46m' def hexdump(src, length=16): result = [] digits = 4 for i in range(0, len(src), length): s = src[i:i+length] hexa = b' '.join(['{0:0{1}X}'.format(ord(x), digits) for x in s]) text = b''.join([ x if 0x20 <= ord(x) < 0x7F else b'.' for x in s]) result.append(b'{0:04X} {1:-{2}} {3}' % (i, length*(digits+1), hexa, text)) cprint(b'\n'.join(result), Color.BLUE) def receive_from(cxn): buff = '' cprint(' - Setting timeout to {}s'.format(args.timeout), Color.BLUE, True) cxn.settimeout(args.timeout) failed = 0 try: while True: if failed % 3 == 0: cprint(' - 3 cxn.recv() errors, exiting', Color.BLUE, True) break data = cxn.recv() if not data: failed += 1 else: buff += data except socket.timeout: cprint(' - Socket timeout', Color.BLUE, True) except KeyboardInterrupt: pass except: raise return buff def request_handler(buff): # Not yet implemented, packet modifications, etc can go here return buff def response_handler(buff): # Not yet implemented, modify responses destined to localhost return buff def proxy_handler(c_sock, remote_host, remote_port, recv_first): remote_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) remote_sock.connect((remote_host, remote_port)) # Handle receive_first if True if recv_first: r_buff = receive_from(remote_sock) hexdump(r_buff) r_buff = response_handler(r_buff) if len(r_buff): cprint('[<] Sending {} bytes to localhost'.format(len(r_buff)), Color.GREEN) c_sock.send(r_buff) # Start proxy loop while True: try: l_buff = receive_from(c_sock) if len(l_buff): cprint('[>] Received {} bytes from localhost'.format(len(l_buff), Color.GREEN)) hexdump(l_buff) l_buff = response_handler(l_buff) remote_sock.send(l_buff) cprint('[>] Sent to remote', Color.GREEN) r_buff = receive_from(remote_sock) if len(r_buff): cprint('[<] Received {} bytes from remote'.format(len(r_buff), Color.GREEN)) hexdump(r_buff) r_buff = response_handler(r_buff) c_sock.send(r_buff) cprint('[<] Sent to localhost', Color.GREEN) if not len(l_buff) and not len(r_buff): safe_close(c_sock) safe_close(remote_sock) cprint('[!] No more data, closing connections', Color.GREEN) break except KeyboardInterrupt: safe_close(c_sock) safe_close(remote_sock) break def safe_close(cxn): cxn.shutdown(1) cxn.close() def server_loop(args): server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: server_sock.bind(('', args.local_port)) except: cprint('[!] Failed to bind to localhost:{}'.format(args.local_port), Color.ERR) cprint(' - Check permissions and for active sockets', Color.BLUE, True) raise sys.exit(1) cprint('[*] Listening on port {}'.format(args.local_port), Color.GREEN) server_sock.listen(args.timeout) with InterruptHandler() as h: while not h.interrupted: client_sock, addr = server_sock.accept() cprint('[>] Received incoming connection from {}:{}'.format(*addr), Color.GREEN) proxy_thread = threading.Thread(target=proxy_handler, args=(client_sock, args.remote_host, args.remote_port, args.receive_first)) proxy_thread.start() client_sock.close() server_sock.close() if __name__ == '__main__': this.args = cli() server_loop(args)
[ "threading.Thread", "argparse.ArgumentParser", "socket.socket", "signal.getsignal", "signal.signal", "sys.exit" ]
[((1677, 1702), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1700, 1702), False, 'import argparse\n'), ((5111, 5160), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (5124, 5160), False, 'import socket\n'), ((6715, 6764), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (6728, 6764), False, 'import socket\n'), ((2987, 3013), 'signal.getsignal', 'signal.getsignal', (['self.sig'], {}), '(self.sig)\n', (3003, 3013), False, 'import signal\n'), ((3123, 3155), 'signal.signal', 'signal.signal', (['self.sig', 'handler'], {}), '(self.sig, handler)\n', (3136, 3155), False, 'import signal\n'), ((3324, 3362), 'signal.signal', 'signal.signal', (['self.sig', 'self.sig_orig'], {}), '(self.sig, self.sig_orig)\n', (3337, 3362), False, 'import signal\n'), ((7026, 7037), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7034, 7037), False, 'import sys\n'), ((7391, 7509), 'threading.Thread', 'threading.Thread', ([], {'target': 'proxy_handler', 'args': '(client_sock, args.remote_host, args.remote_port, args.receive_first)'}), '(target=proxy_handler, args=(client_sock, args.remote_host,\n args.remote_port, args.receive_first))\n', (7407, 7509), False, 'import threading\n')]
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals from functools import partial from itertools import chain, takewhile from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound from .comparison import ExpressionMatcher from .compat import mock from .utils import ( build_identity_map, copy_and_update, get_item_attr, indexof, raiser, setattr_tmp, ) Call = type(mock.call) class UnorderedTuple(tuple): """ Same as tuple except in comparison order does not matter For example:: >>> UnorderedTuple((1, 2, 3)) == (3, 2, 1) True """ def __eq__(self, other): if len(self) != len(other): return False other = list(other) for i in self: try: other.remove(i) except ValueError: return False return True class UnorderedCall(Call): """ Same as Call except in comparison order of parameters does not matter For example:: >>> UnorderedCall(((1, 2, 3), {'hello': 'world'})) == Call(((3, 2, 1), {'hello': 'world'})) True """ def __eq__(self, other): _other = list(other) _other[-2] = UnorderedTuple(other[-2]) other = Call( tuple(_other), **{k.replace("_mock_", ""): v for k, v in vars(other).items()} ) return super(UnorderedCall, self).__eq__(other) def sqlalchemy_call(call, with_name=False, base_call=Call): """ Convert ``mock.call()`` into call with all parameters wrapped with ``ExpressionMatcher`` For example:: >>> args, kwargs = sqlalchemy_call(mock.call(5, foo='bar')) >>> isinstance(args[0], ExpressionMatcher) True >>> isinstance(kwargs['foo'], ExpressionMatcher) True """ try: args, kwargs = call except ValueError: name, args, kwargs = call else: name = "" args = tuple([ExpressionMatcher(i) for i in args]) kwargs = {k: ExpressionMatcher(v) for k, v in kwargs.items()} if with_name: return base_call((name, args, kwargs)) else: return base_call((args, kwargs), two=True) class AlchemyMagicMock(mock.MagicMock): """ MagicMock for SQLAlchemy which can compare alchemys expressions in assertions For example:: >>> from sqlalchemy import or_ >>> from sqlalchemy.sql.expression import column >>> c = column('column') >>> s = AlchemyMagicMock() >>> _ = s.filter(or_(c == 5, c == 10)) >>> _ = s.filter.assert_called_once_with(or_(c == 5, c == 10)) >>> _ = s.filter.assert_any_call(or_(c == 5, c == 10)) >>> _ = s.filter.assert_has_calls([mock.call(or_(c == 5, c == 10))]) >>> s.reset_mock() >>> _ = s.filter(c == 5) >>> _ = s.filter.assert_called_once_with(c == 10) Traceback (most recent call last): ... AssertionError: Expected call: filter(BinaryExpression(sql='"column" = :column_1', params={'column_1': 10})) Actual call: filter(BinaryExpression(sql='"column" = :column_1', params={'column_1': 5})) """ def __init__(self, *args, **kwargs): kwargs.setdefault("__name__", "Session") super(AlchemyMagicMock, self).__init__(*args, **kwargs) def _format_mock_call_signature(self, args, kwargs): name = self._mock_name or "mock" args, kwargs = sqlalchemy_call(mock.call(*args, **kwargs)) return mock._format_call_signature(name, args, kwargs) def assert_called_with(self, *args, **kwargs): args, kwargs = sqlalchemy_call(mock.call(*args, **kwargs)) return super(AlchemyMagicMock, self).assert_called_with(*args, **kwargs) def assert_any_call(self, *args, **kwargs): args, kwargs = sqlalchemy_call(mock.call(*args, **kwargs)) with setattr_tmp( self, "call_args_list", [sqlalchemy_call(i) for i in self.call_args_list], ): return super(AlchemyMagicMock, self).assert_any_call( *args, **kwargs ) def assert_has_calls(self, calls, any_order=False): calls = [sqlalchemy_call(i) for i in calls] with setattr_tmp( self, "mock_calls", type(self.mock_calls)( [sqlalchemy_call(i) for i in self.mock_calls] ), ): return super(AlchemyMagicMock, self).assert_has_calls( calls, any_order ) class UnifiedAlchemyMagicMock(AlchemyMagicMock): """ MagicMock which unifies common SQLALchemy session functions for easier assertions. For example:: >>> from sqlalchemy.sql.expression import column >>> c = column('column') >>> s = UnifiedAlchemyMagicMock() >>> s.query(None).filter(c == 'one').filter(c == 'two').all() [] >>> s.query(None).filter(c == 'three').filter(c == 'four').all() [] >>> s.filter.call_count 2 >>> s.filter.assert_any_call(c == 'one', c == 'two') >>> s.filter.assert_any_call(c == 'three', c == 'four') In addition, mock data be specified to stub real DB interactions. Result-sets are specified per filtering criteria so that unique data can be returned depending on query/filter/options criteria. Data is given as a list of ``(criteria, result)`` tuples where ``criteria`` is a list of calls. Reason for passing data as a list vs a dict is that calls and SQLAlchemy expressions are not hashable hence cannot be dict keys. For example:: >>> from sqlalchemy import Column, Integer, String >>> from sqlalchemy.ext.declarative import declarative_base >>> Base = declarative_base() >>> class SomeClass(Base): ... __tablename__ = 'some_table' ... pk1 = Column(Integer, primary_key=True) ... pk2 = Column(Integer, primary_key=True) ... name = Column(String(50)) ... def __repr__(self): ... return str(self.pk1) >>> s = UnifiedAlchemyMagicMock(data=[ ... ( ... [mock.call.query('foo'), ... mock.call.filter(c == 'one', c == 'two')], ... [SomeClass(pk1=1, pk2=1), SomeClass(pk1=2, pk2=2)] ... ), ... ( ... [mock.call.query('foo'), ... mock.call.filter(c == 'one', c == 'two'), ... mock.call.order_by(c)], ... [SomeClass(pk1=2, pk2=2), SomeClass(pk1=1, pk2=1)] ... ), ... ( ... [mock.call.filter(c == 'three')], ... [SomeClass(pk1=3, pk2=3)] ... ), ... ]) # .all() >>> s.query('foo').filter(c == 'one').filter(c == 'two').all() [1, 2] >>> s.query('bar').filter(c == 'one').filter(c == 'two').all() [] >>> s.query('foo').filter(c == 'one').filter(c == 'two').order_by(c).all() [2, 1] >>> s.query('foo').filter(c == 'one').filter(c == 'three').order_by(c).all() [] >>> s.query('foo').filter(c == 'three').all() [3] >>> s.query(None).filter(c == 'four').all() [] # .iter() >>> list(s.query('foo').filter(c == 'two').filter(c == 'one')) [1, 2] # .count() >>> s.query('foo').filter(c == 'two').filter(c == 'one').count() 2 # .first() >>> s.query('foo').filter(c == 'one').filter(c == 'two').first() 1 >>> s.query('bar').filter(c == 'one').filter(c == 'two').first() # .one() >>> s.query('foo').filter(c == 'three').one() 3 >>> s.query('bar').filter(c == 'one').filter(c == 'two').one() Traceback (most recent call last): ... NoResultFound: No row was found for one() >>> s.query('foo').filter(c == 'one').filter(c == 'two').one() Traceback (most recent call last): ... MultipleResultsFound: Multiple rows were found for one() >>> s.query('bar').filter(c == 'one').filter(c == 'two').one_or_none() # .get() >>> s.query('foo').get((1, 1)) 1 >>> s.query('foo').get((4, 4)) >>> s.query('foo').filter(c == 'two').filter(c == 'one').get((1, 1)) 1 >>> s.query('foo').filter(c == 'three').get((1, 1)) 1 >>> s.query('foo').filter(c == 'three').get((4, 4)) # dynamic session >>> class Model(Base): ... __tablename__ = 'model_table' ... pk1 = Column(Integer, primary_key=True) ... name = Column(String) ... def __repr__(self): ... return str(self.pk1) >>> s = UnifiedAlchemyMagicMock() >>> s.add(SomeClass(pk1=1, pk2=1)) >>> s.add_all([SomeClass(pk1=2, pk2=2)]) >>> s.add_all([SomeClass(pk1=4, pk2=3)]) >>> s.add_all([Model(pk1=4, name='some_name')]) >>> s.query(SomeClass).all() [1, 2, 4] >>> s.query(SomeClass).get((1, 1)) 1 >>> s.query(SomeClass).get((2, 2)) 2 >>> s.query(SomeClass).get((3, 3)) >>> s.query(SomeClass).filter(c == 'one').all() [1, 2, 4] >>> s.query(SomeClass).get((4, 3)) 4 >>> s.query(SomeClass).get({"pk2": 3, "pk1": 4}) 4 >>> s.query(Model).get(4) 4 # .delete() >>> s = UnifiedAlchemyMagicMock(data=[ ... ( ... [mock.call.query('foo'), ... mock.call.filter(c == 'one', c == 'two')], ... [SomeClass(pk1=1, pk2=1), SomeClass(pk1=2, pk2=2)] ... ), ... ( ... [mock.call.query('foo'), ... mock.call.filter(c == 'one', c == 'two'), ... mock.call.order_by(c)], ... [SomeClass(pk1=2, pk2=2), SomeClass(pk1=1, pk2=1)] ... ), ... ( ... [mock.call.filter(c == 'three')], ... [SomeClass(pk1=3, pk2=3)] ... ), ... ( ... [mock.call.query('foo'), ... mock.call.filter(c == 'one', c == 'two', c == 'three')], ... [SomeClass(pk1=1, pk2=1), SomeClass(pk1=2, pk2=2), SomeClass(pk1=3, pk2=3)] ... ), ... ]) >>> s.query('foo').filter(c == 'three').all() [3] >>> s.query('foo').all() [] >>> s.query('foo').filter(c == 'three').delete() 1 >>> s.query('foo').filter(c == 'three').all() [] >>> s.query('foo').filter(c == 'one').filter(c == 'two').all() [1, 2] >>> s.query('foo').filter(c == 'one').filter(c == 'two').filter(c == 'three').all() [1, 2, 3] >>> s = UnifiedAlchemyMagicMock() >>> s.add(SomeClass(pk1=1, pk2=1)) >>> s.add_all([SomeClass(pk1=2, pk2=2)]) >>> s.query(SomeClass).all() [1, 2] >>> s.query(SomeClass).delete() 2 >>> s.query(SomeClass).all() [] >>> s = UnifiedAlchemyMagicMock() >>> s.add_all([SomeClass(pk1=2, pk2=2)]) >>> s.query(SomeClass).delete() 1 >>> s.query(SomeClass).delete() 0 Also note that only within same query functions are unified. After ``.all()`` is called or query is iterated over, future queries are not unified. """ boundary = { "all": lambda x: x, "__iter__": lambda x: iter(x), "count": lambda x: len(x), "first": lambda x: next(iter(x), None), "one": lambda x: ( x[0] if len(x) == 1 else raiser( MultipleResultsFound, "Multiple rows were found for one()" ) if x else raiser(NoResultFound, "No row was found for one()") ), "one_or_none": lambda x: ( x[0] if len(x) == 1 else raiser( MultipleResultsFound, "Multiple rows were found for one_or_none()", ) if x else None ), "get": lambda x, idmap: get_item_attr(build_identity_map(x), idmap), } unify = { "query": None, "add_columns": None, "join": None, "options": None, "group_by": None, "filter": UnorderedCall, "filter_by": UnorderedCall, "order_by": None, "limit": None, "distinct": None, } mutate = {"add", "add_all", "delete"} def __init__(self, *args, **kwargs): kwargs["_mock_default"] = kwargs.pop("default", []) kwargs["_mock_data"] = kwargs.pop("data", None) kwargs.update( { k: AlchemyMagicMock( side_effect=partial(self._get_data, _mock_name=k) ) for k in self.boundary } ) kwargs.update( { k: AlchemyMagicMock( return_value=self, side_effect=partial(self._unify, _mock_name=k), ) for k in self.unify } ) kwargs.update( { k: AlchemyMagicMock( return_value=None, side_effect=partial(self._mutate_data, _mock_name=k), ) for k in self.mutate } ) super(UnifiedAlchemyMagicMock, self).__init__(*args, **kwargs) def _get_previous_calls(self, calls): return iter( takewhile(lambda i: i[0] not in self.boundary, reversed(calls)) ) def _get_previous_call(self, name, calls): # get all previous session calls within same session query previous_calls = self._get_previous_calls(calls) # skip last call next(previous_calls) return next(iter(filter(lambda i: i[0] == name, previous_calls)), None) def _unify(self, *args, **kwargs): _mock_name = kwargs.pop("_mock_name") submock = getattr(self, _mock_name) previous_method_call = self._get_previous_call( _mock_name, self.method_calls ) previous_mock_call = self._get_previous_call( _mock_name, self.mock_calls ) if previous_mock_call is None: return submock.return_value # remove immediate call from both filter mock as well as the parent mock object # as it was already registered in self.__call__ before this side-effect is called submock.call_count -= 1 submock.call_args_list.pop() submock.mock_calls.pop() self.method_calls.pop() self.mock_calls.pop() # remove previous call since we will be inserting new call instead submock.call_args_list.pop() submock.mock_calls.pop() self.method_calls.pop(indexof(previous_method_call, self.method_calls)) self.mock_calls.pop(indexof(previous_mock_call, self.mock_calls)) name, pargs, pkwargs = previous_method_call args = pargs + args kwargs = copy_and_update(pkwargs, kwargs) submock.call_args = Call((args, kwargs), two=True) submock.call_args_list.append(Call((args, kwargs), two=True)) submock.mock_calls.append(Call(("", args, kwargs))) self.method_calls.append(Call((name, args, kwargs))) self.mock_calls.append(Call((name, args, kwargs))) return submock.return_value def _get_data(self, *args, **kwargs): _mock_name = kwargs.pop("_mock_name") _mock_default = self._mock_default _mock_data = self._mock_data if _mock_data is not None: previous_calls = [ sqlalchemy_call( i, with_name=True, base_call=self.unify.get(i[0]) or Call ) for i in self._get_previous_calls(self.mock_calls[:-1]) ] sorted_mock_data = sorted( _mock_data, key=lambda x: len(x[0]), reverse=True ) if _mock_name == "get": query_call = [c for c in previous_calls if c[0] == "query"][0] results = list( chain( *[ result for calls, result in sorted_mock_data if query_call in calls ] ) ) return self.boundary[_mock_name](results, *args, **kwargs) else: for calls, result in sorted_mock_data: calls = [ sqlalchemy_call( i, with_name=True, base_call=self.unify.get(i[0]) or Call, ) for i in calls ] if all(c in previous_calls for c in calls): return self.boundary[_mock_name]( result, *args, **kwargs ) return self.boundary[_mock_name](_mock_default, *args, **kwargs) def _mutate_data(self, *args, **kwargs): _mock_name = kwargs.get("_mock_name") _mock_data = self._mock_data = self._mock_data or [] if _mock_name == "add": to_add = args[0] query_call = mock.call.query(type(to_add)) mocked_data = next( iter(filter(lambda i: i[0] == [query_call], _mock_data)), None ) if mocked_data: mocked_data[1].append(to_add) else: _mock_data.append(([query_call], [to_add])) elif _mock_name == "add_all": to_add = args[0] _kwargs = kwargs.copy() _kwargs["_mock_name"] = "add" for i in to_add: self._mutate_data(i, *args[1:], **_kwargs) elif _mock_name == "delete": _kwargs = kwargs.copy() # pretend like all is being called to get data _kwargs["_mock_name"] = "all" # a list of deleted items to_delete = list(self._get_data(*args, **_kwargs)) num_deleted = len(to_delete) if to_delete: query_call = mock.call.query(type(to_delete[0])) mocked_data = next( iter(filter(lambda i: i[0] == [query_call], _mock_data)), None, ) if mocked_data: # remove objects based on the same instances num_deleted = len(to_delete) for row in to_delete: mocked_data[1].remove(row) # we delete the data from the specific query del to_delete return num_deleted
[ "functools.partial", "itertools.chain" ]
[((16429, 16506), 'itertools.chain', 'chain', (['*[result for calls, result in sorted_mock_data if query_call in calls]'], {}), '(*[result for calls, result in sorted_mock_data if query_call in calls])\n', (16434, 16506), False, 'from itertools import chain, takewhile\n'), ((12970, 13007), 'functools.partial', 'partial', (['self._get_data'], {'_mock_name': 'k'}), '(self._get_data, _mock_name=k)\n', (12977, 13007), False, 'from functools import partial\n'), ((13235, 13269), 'functools.partial', 'partial', (['self._unify'], {'_mock_name': 'k'}), '(self._unify, _mock_name=k)\n', (13242, 13269), False, 'from functools import partial\n'), ((13495, 13535), 'functools.partial', 'partial', (['self._mutate_data'], {'_mock_name': 'k'}), '(self._mutate_data, _mock_name=k)\n', (13502, 13535), False, 'from functools import partial\n')]
import embedtemplates import permissions async def Main(self, message, command, arguments): if arguments == "" or arguments is None or arguments == command: await message.channel.send(content="", embed=embedtemplates.help("Removes a Rank from the server.")) return if not await permissions.is_guild_admin(self, message.guild.id, message.author.id): await message.channel.send(content="", embed=embedtemplates.failure("Permission Denied", "User does not have permission to use this!")) return ranks = self.database.get_ranks(message.guild.id) try: ranks["Dictionary"].pop(int(arguments)) except TypeError or IndexError: await message.channel.send(content="", embed=embedtemplates.failure("Invalid Argument", "Please provide the ID of the rank!")) return for user in self.database.get_users_of_rank(message.guild.id, arguments): user["Rank"] = 0 self.database.set_user(message.guild.id, user) for user in self.database.get_users_over_rank(message.guild.id, arguments): user["Rank"] = int(user["Rank"]) - 1 self.database.set_user(message.guild.id, user) self.database.set_ranks(message.guild.id, ranks) await message.channel.send(content="", embed=embedtemplates.success("Rank Removed", str("Rank " + arguments + " has been removed and all user's ranks have been adjusted accordingly.")))
[ "permissions.is_guild_admin", "embedtemplates.failure", "embedtemplates.help" ]
[((304, 373), 'permissions.is_guild_admin', 'permissions.is_guild_admin', (['self', 'message.guild.id', 'message.author.id'], {}), '(self, message.guild.id, message.author.id)\n', (330, 373), False, 'import permissions\n'), ((216, 270), 'embedtemplates.help', 'embedtemplates.help', (['"""Removes a Rank from the server."""'], {}), "('Removes a Rank from the server.')\n", (235, 270), False, 'import embedtemplates\n'), ((428, 521), 'embedtemplates.failure', 'embedtemplates.failure', (['"""Permission Denied"""', '"""User does not have permission to use this!"""'], {}), "('Permission Denied',\n 'User does not have permission to use this!')\n", (450, 521), False, 'import embedtemplates\n'), ((810, 895), 'embedtemplates.failure', 'embedtemplates.failure', (['"""Invalid Argument"""', '"""Please provide the ID of the rank!"""'], {}), "('Invalid Argument', 'Please provide the ID of the rank!'\n )\n", (832, 895), False, 'import embedtemplates\n')]
import sys import csv import time import cvxopt import numpy as np import pandas as pd from svmutil import * import matplotlib.pyplot as plt from sklearn.metrics import f1_score from sklearn.metrics import confusion_matrix # reading data from csv files def get_data(data_path,issubset,digit1,digit2): train_data = np.array(pd.read_csv(data_path,header=None,dtype=float).values) train_output = np.array(train_data[:,784:785]) # True means we have to do binary classification between two digits only if issubset==True: train_data = train_data[np.ix_((train_data[:,784]==digit1) | (train_data[:,784]==digit2))] train_output = train_data[:,784:785] for i in range(len(train_data)): if train_output[i,0] == digit1: train_output[i,0] = 1 else: train_output[i,0] = -1 train_data = train_data/255 return (np.asmatrix(train_data[:,0:784]),np.asmatrix(train_output)) # plotting the confusion matrix def draw_confusion(confatrix): plt.imshow(confatrix) plt.title("Confusion Matrix") plt.colorbar() plt.set_cmap("Greens") plt.ylabel("True labels") plt.xlabel("Predicted label") plt.show() # Linear kernel using cvxopt for binary classification. Refer to doc attached and report for clarification def linear_kernel_cvxopt(train_data,train_output,penalty): m = len(train_data) X_Y = np.multiply(train_data,train_output) P = cvxopt.matrix(np.dot(X_Y,X_Y.transpose())) q = cvxopt.matrix(-1*np.ones((m,1))) A = cvxopt.matrix(train_output.transpose()) b = cvxopt.matrix(0.0) tmp1 = -1*np.identity(m) tmp2 = np.identity(m) G = cvxopt.matrix(np.vstack((tmp1,tmp2))) tmp1 = np.zeros((m,1)) tmp2 = penalty*np.ones((m,1)) h = cvxopt.matrix(np.vstack((tmp1,tmp2))) solution = cvxopt.solvers.qp(P,q,G,h,A,b) return solution # Calculating weights for linear kernel and storing them into files def calculate_linear_svm_params(kernel_soln,train_data,train_output,tolerance): nSV = 0 (m,n) = (train_data.shape[0],train_data.shape[1]) raveled = np.ravel(kernel_soln['x']) langrangian_params = np.arange(len(raveled)) [raveled>tolerance] weight_matrix = np.asmatrix(np.zeros((1,n),dtype=float)) for i in langrangian_params: for j in range(n): weight_matrix[0,j]+=(raveled[i]*train_data[i,j]*train_output[i,0]) nSV+=1 # writing indices of support vectors into text file print("Indices of support vectors have been stored in linear_support_vector_indices.txt") np.savetxt("linear_support_vector_indices.txt", langrangian_params , delimiter=', ',fmt='%d') # writing weight matrix into text file print("Weight matrix has been stored in weight_matrix.txt") with open('weight_matrix.txt','a') as f: for line in weight_matrix: np.savetxt(f, line, fmt='%.2f') b = 0 if nSV==0: print("No support vectors found for tolerance value of " + str(tolerance)) else: for sv_idx in langrangian_params: b+=(train_output[sv_idx,0] - np.dot(train_data[sv_idx,:],weight_matrix.transpose())[0,0]) b = b/(float(len(langrangian_params))) print(str(b) + " is the value of b") return (weight_matrix,b,nSV) # Calculates prediction over test_data def linear_kernel_svm_prediction(weight_matrix,b,test_data): predicted = np.asmatrix(np.zeros((len(test_data),1),dtype=int)) val = np.dot(test_data,weight_matrix.transpose()) + b predicted = 2*np.multiply((val>0),np.ones((len(test_data),1))) - 1 return predicted # Gaussian kernel using cvxopt for binary classification def gaussian_kernel_cvxopt(train_data,train_output,gamma,penalty): m = len(train_data) kernel = np.asmatrix(np.zeros((m,m),dtype=float)) X_XT = np.dot(train_data,train_data.transpose()) for i in range(m): for j in range(m): kernel[i,j] = float(X_XT[i,i] + X_XT[j,j] - 2*X_XT[i,j]) kernel = np.exp(-1*gamma*kernel) P = cvxopt.matrix(np.multiply(kernel,np.dot(train_output,train_output.transpose()))) q = cvxopt.matrix(-1*np.ones((m,1))) A = cvxopt.matrix(train_output.transpose()) b = cvxopt.matrix(0.0) tmp1 = -1*np.identity(m) tmp2 = np.identity(m) G = cvxopt.matrix(np.vstack((tmp1,tmp2))) tmp1 = np.zeros((m,1)) tmp2 = penalty*np.ones((m,1)) h = cvxopt.matrix(np.vstack((tmp1,tmp2))) solution = cvxopt.solvers.qp(P,q,G,h,A,b) return solution # Prediction using gaussian kernel. b depend on the test sample used; hence is calculated separately for each point def gaussian_prediction_cvxopt(kernel_soln,train_data,train_output,test_data,tolerance,gamma): (m,n) = (train_data.shape[0],train_data.shape[1]) raveled = np.ravel(kernel_soln['x']) nSV = 0 X_train = np.sum(np.multiply(train_data,train_data),axis=1) X_test = np.sum(np.multiply(test_data,test_data),axis=1) X_train_X_test = np.dot(train_data,test_data.transpose()) alpha_x_label = np.asmatrix(np.zeros((len(raveled),1),dtype=float)) for i in range(len(raveled)): if raveled[i]>tolerance: alpha_x_label[i,0] = train_output[i,0]*raveled[i] nSV+=1 langrangian_params = np.arange(len(raveled)) [raveled>tolerance] prediction = np.zeros((len(test_data),1),dtype=int) # writing indices of support vectors into text file print("Indices of support vectors have been saved in gaussian_support_vector_indices.txt") np.savetxt("gaussian_support_vector_indices.txt", langrangian_params , delimiter=', ',fmt='%d') if len(langrangian_params)<=0: print("No support vectors found for tolerance value= " + str(tolerance)) else: b = 0 for sv_idx in langrangian_params: b+=(train_output[sv_idx,0] - np.sum(np.multiply(alpha_x_label,np.exp(-1*gamma*np.sum(np.multiply(train_data-train_data[sv_idx,:],train_data-train_data[sv_idx,:]),axis=1))))) b = b/(float(len(langrangian_params))) print(str(b) + " is the value of b") for i in range(len(test_data)): prediction[i] = np.sign(np.sum(np.multiply(alpha_x_label,np.exp(-1*gamma*(X_train - 2*X_train_X_test[:,i] + X_test[i,0])))) + b) return (prediction,nSV) # Gaussian and linear kernel both using libsvm def libsvm_both(train_data,train_output,test_data,test_output,gamma,penalty): train_labels = [] train_input = train_data.tolist() for j in range(train_output.shape[0]): train_labels.append(train_output[j,0]) test_labels = [] test_input = test_data.tolist() for j in range(test_output.shape[0]): test_labels.append(test_output[j,0]) problem = svm_problem(train_labels,train_input) linear_param = svm_parameter("-s 0 -c 1 -t 0") linear_model = svm_train(problem,linear_param) linear_pred_lbl, linear_pred_acc, linear_pred_val = svm_predict(test_labels,test_input,linear_model) gaussian_param = svm_parameter("-s 0 -c " + str(penalty) + " -t 2 -g " + str(gamma)) gaussian_model = svm_train(problem,gaussian_param) gaussian_pred_lbl, gaussian_pred_acc, gaussian_pred_val = svm_predict(test_labels,test_input,gaussian_model) # ENDING OF BINARY CLASSIFICATION FUNCTIONS. BELOW CODE IS FOR MULTICLASS CLASSIFICATION # multiclass classification using cvxopt and 45 SVMs i.e. one vs all classification def multiclass_svm_cvxopt(train_data_path,test_data_path,gamma,penalty,tolerance): svm_dict = {} num_max = 1 # learning parameters phase for i in range(1+num_max): for j in range(i): idx = str(i)+str(j) svm_dict[idx] = [] (train_data,train_output) = get_data(train_data_path,True,i,j) kernel_soln = gaussian_kernel_cvxopt(train_data,train_output,gamma,penalty) svm_dict[idx] = np.ravel(kernel_soln['x']).tolist() print("langrangian parameters for svm with index value " + idx + " computed") # prediction phase (test_data,test_output) = get_data(test_data_path,False,0,0) prediction_dict = {} for i in range(len(test_data)): prediction_dict[i] = [0,0,0,0,0,0,0,0,0,0] prediction = np.asmatrix(np.zeros((len(test_data),1),dtype=int)) for i in range(1+num_max): for j in range(i): idx = str(i)+str(j) kernel_soln_x = svm_dict[idx] (train_data,train_output) = get_data(train_data_path,True,i,j) svm_prediction = gaussian_prediction_with_alphas(kernel_soln_x,train_data,train_output,test_data,tolerance,gamma) for k in range(len(svm_prediction)): if svm_prediction[k,0] == 1: prediction_dict[k][i]+=1 else: prediction_dict[k][j]+=1 print("predictions for svm with index value " + idx + " done") for i in range(len(test_data)): prediction[i] = np.argmax(prediction_dict[i]) return (test_output,np.array(prediction)) # Helper function for multiclass classification using cvxopt def gaussian_prediction_with_alphas(kernel_soln_x,train_data,train_output,test_data,tolerance,gamma): prediction = np.asmatrix(np.ones((len(test_data),1),dtype=int)) raveled = np.asmatrix(kernel_soln_x) X_train = np.sum(np.multiply(train_data,train_data),axis=1) X_test = np.sum(np.multiply(test_data,test_data),axis=1) X_train_X_test = np.dot(train_data,test_data.transpose()) alpha_x_label = np.multiply(train_output,np.multiply(raveled,raveled>tolerance)) langrangian_params = np.nonzero(raveled>tolerance)[0] if len(langrangian_params)==0: print("No support vectors found for tolerance value= " + str(tolerance)) else: b = 0 for sv_idx in langrangian_params: b+=(train_output[sv_idx,0] - np.sum(np.multiply(alpha_x_label,np.exp(-1*gamma*np.sum(np.multiply(train_data-train_data[sv_idx,:],train_data-train_data[sv_idx,:]),axis=1))))) b = b/(float(len(langrangian_params))) for i in range(len(test_data)): prediction[i,0] = np.sign(np.sum(np.multiply(alpha_x_label,np.exp(-1*gamma*(X_train - 2*X_train_X_test[:,i] + X_test[i,0])))) + b) return prediction # multiclass classification using libsvm using 45 individual libsvms i.e. one vs all classification def multiclass_svm_libsvm_45(train_data_path,test_data_path,gamma,penalty): svm_dict = {} prediction_dict = {} num_max = 9 (test_data,test_output) = get_data(test_data_path,False,0,0) for i in range(len(test_data)): prediction_dict[i] = [0,0,0,0,0,0,0,0,0,0] prediction = np.asmatrix(np.zeros((len(test_data),1),dtype=int)) # learning parameters phase (45 individual svms) for i in range(1+num_max): for j in range(i): (train_data,train_output) = get_data(train_data_path,True,i,j) idx = str(i)+str(j) train_labels = [] train_input = train_data.tolist() for i1 in range(train_output.shape[0]): train_labels.append(train_output[i1,0]) test_labels = [] test_input = test_data.tolist() for j1 in range(test_output.shape[0]): test_labels.append(test_output[j1,0]) problem = svm_problem(train_labels,train_input) gaussian_param = svm_parameter("-s 0 -c " + str(penalty) + " -t 2 -g " + str(gamma)) gaussian_model = svm_train(problem,gaussian_param) svm_prediction_lbl,svm_prediction_acc,svm_prediction_val = svm_predict(test_labels,test_input,gaussian_model) for k in range(len(svm_prediction_lbl)): if svm_prediction_lbl[k] == 1: prediction_dict[k][i]+=1 else: prediction_dict[k][j]+=1 print("prediction using gaussian kernel in libsvm completed for " + idx) for i in range(len(test_data)): prediction[i] = np.argmax(prediction_dict[i]) return(test_output,prediction) # Multiclass classification for 10 classes 0-9 def multiclass_svm_libsvm(train_data_path,test_data_path,gamma,penalty): (train_data,train_output) = get_data(train_data_path,False,0,0) (test_data,test_output) = get_data(test_data_path,False,0,0) train_labels = [] train_input = train_data.tolist() for i1 in range(train_output.shape[0]): train_labels.append(train_output[i1,0]) test_labels = [] test_input = test_data.tolist() for j1 in range(test_output.shape[0]): test_labels.append(test_output[j1,0]) problem = svm_problem(train_labels,train_input) gaussian_param = svm_parameter("-s 0 -c " + str(penalty) + " -t 2 -g " + str(gamma)) gaussian_model = svm_train(problem,gaussian_param) svm_prediction_lbl,svm_prediction_acc,svm_prediction_val = svm_predict(test_labels,test_input,gaussian_model) return (test_output,svm_prediction_lbl) # MAIN FUNCTION def main(): train_data_path = sys.argv[1] test_data_path = sys.argv[2] classification = sys.argv[3] part = sys.argv[4] issubset = (classification=='0') if issubset==True: digit1 = 5 digit2 = 6 (train_data,train_output) = get_data(train_data_path,issubset,digit1,digit2) (test_data,test_output) = get_data(test_data_path,issubset,digit1,digit2) if part == 'a': tolerance = 1e-4 penalty = 1 print("tolerance,penalty for linear kernel for binary classification = " + str(tolerance) + "," + str(penalty)) linear_kernel_soln = linear_kernel_cvxopt(train_data,train_output,penalty) (weight_matrix,b,nSV) = calculate_linear_svm_params(linear_kernel_soln,train_data,train_output,tolerance) print(str(nSV) + " support vectors") predicted = linear_kernel_svm_prediction(weight_matrix,b,test_data) confatrix = confusion_matrix(test_output,predicted) print("Confusion Matrix") print(confatrix) # draw_confusion(confatrix) elif part =='b': gamma = 0.05 penalty = 1 tolerance = 1e-4 print("tolerance,penalty,gamma for gaussian kernel for binary classification = " + str(tolerance) + "," + str(penalty) + "," + str(gamma)) gaussian_kernel_soln = gaussian_kernel_cvxopt(train_data,train_output,gamma,penalty) (predicted,nSV) = gaussian_prediction_cvxopt(gaussian_kernel_soln,train_data,train_output,test_data,tolerance,gamma) print(str(nSV) + " support vectors") confatrix = confusion_matrix(test_output,predicted) print("Confusion Matrix") print(confatrix) # draw_confusion(confatrix) elif part == 'c': gamma = 0.05 penalty = 1 libsvm_both(train_data,train_output,test_data,test_output,gamma,penalty) else: print("No such part for binary classification") else: if part == 'a': gamma = 0.05 penalty = 1 tolerance = 1e-6 print("tolerance value for gaussian kernel for multiclass classification= " + str(tolerance)) (test_output,prediction) = multiclass_svm_cvxopt(train_data_path,test_data_path,gamma,penalty,tolerance) confatrix = confusion_matrix(test_output,prediction) print(confatrix) elif part =='b': gamma = 0.05 penalty = 1 (test_output,prediction) = multiclass_svm_libsvm(train_data_path,test_data_path,gamma,penalty) confatrix = confusion_matrix(test_output,prediction) print(confatrix) # draw_confusion(confatrix) elif part == 'd': gamma = 0.05 penalty_array = [0.00001,0.01,1,5,10] validation_set_accuracy = np.zeros((1,5),dtype=float) test_accuracy = np.zeros((1,5),dtype=float) (train_data,train_output) = get_data(train_data_path,False,0,0) (test_data,test_output) = get_data(test_data_path,False,0,0) validation_data_X = train_data[18000:20000,:] validation_output_Y = train_output[18000:20000,:] training_data_X = train_data[0:18000,:] training_output_Y = train_output[0:18000,:] for i in range(len(penalty_array)): penalty = penalty_array[i] train_labels = [] train_input = training_data_X.tolist() for i1 in range(training_output_Y.shape[0]): train_labels.append(training_output_Y[i1,0]) validation_labels = [] validation_input = validation_data_X.tolist() for i1 in range(validation_output_Y.shape[0]): validation_labels.append(validation_output_Y[i1,0]) test_labels = [] test_input = test_data.tolist() for j1 in range(test_output.shape[0]): test_labels.append(test_output[j1,0]) problem = svm_problem(train_labels,train_input) gaussian_param = svm_parameter("-s 0 -c " + str(penalty) + " -t 2 -g " + str(gamma)) gaussian_model = svm_train(problem,gaussian_param) svm_prediction_lbl,svm_prediction_acc,svm_prediction_val = svm_predict(test_labels,test_input,gaussian_model) test_accuracy[i] = svm_prediction_acc[0] svm_prediction_lbl,svm_prediction_acc,svm_prediction_val = svm_predict(validation_labels,validation_input,gaussian_model) validation_set_accuracy[i] = svm_prediction_acc[0] print("Validation Set Accuracy") print(validation_set_accuracy) print("Test set Accuracy") print(test_accuracy) else: print("No such part for multiclass classification") if __name__ == "__main__": main()
[ "matplotlib.pyplot.title", "numpy.ravel", "numpy.argmax", "pandas.read_csv", "numpy.ones", "numpy.exp", "numpy.multiply", "matplotlib.pyplot.imshow", "numpy.savetxt", "numpy.identity", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.set_cmap", "cvxopt.solvers.qp", "matplotlib.pyplot.show", "cvxopt.matrix", "matplotlib.pyplot.ylabel", "numpy.vstack", "numpy.ix_", "numpy.zeros", "numpy.nonzero", "numpy.array", "numpy.asmatrix", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.xlabel" ]
[((396, 428), 'numpy.array', 'np.array', (['train_data[:, 784:785]'], {}), '(train_data[:, 784:785])\n', (404, 428), True, 'import numpy as np\n'), ((951, 972), 'matplotlib.pyplot.imshow', 'plt.imshow', (['confatrix'], {}), '(confatrix)\n', (961, 972), True, 'import matplotlib.pyplot as plt\n'), ((974, 1003), 'matplotlib.pyplot.title', 'plt.title', (['"""Confusion Matrix"""'], {}), "('Confusion Matrix')\n", (983, 1003), True, 'import matplotlib.pyplot as plt\n'), ((1005, 1019), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1017, 1019), True, 'import matplotlib.pyplot as plt\n'), ((1021, 1043), 'matplotlib.pyplot.set_cmap', 'plt.set_cmap', (['"""Greens"""'], {}), "('Greens')\n", (1033, 1043), True, 'import matplotlib.pyplot as plt\n'), ((1045, 1070), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True labels"""'], {}), "('True labels')\n", (1055, 1070), True, 'import matplotlib.pyplot as plt\n'), ((1072, 1101), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (1082, 1101), True, 'import matplotlib.pyplot as plt\n'), ((1103, 1113), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1111, 1113), True, 'import matplotlib.pyplot as plt\n'), ((1309, 1346), 'numpy.multiply', 'np.multiply', (['train_data', 'train_output'], {}), '(train_data, train_output)\n', (1320, 1346), True, 'import numpy as np\n'), ((1482, 1500), 'cvxopt.matrix', 'cvxopt.matrix', (['(0.0)'], {}), '(0.0)\n', (1495, 1500), False, 'import cvxopt\n'), ((1535, 1549), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (1546, 1549), True, 'import numpy as np\n'), ((1601, 1617), 'numpy.zeros', 'np.zeros', (['(m, 1)'], {}), '((m, 1))\n', (1609, 1617), True, 'import numpy as np\n'), ((1703, 1738), 'cvxopt.solvers.qp', 'cvxopt.solvers.qp', (['P', 'q', 'G', 'h', 'A', 'b'], {}), '(P, q, G, h, A, b)\n', (1720, 1738), False, 'import cvxopt\n'), ((1971, 1997), 'numpy.ravel', 'np.ravel', (["kernel_soln['x']"], {}), "(kernel_soln['x'])\n", (1979, 1997), True, 'import numpy as np\n'), ((2402, 2499), 'numpy.savetxt', 'np.savetxt', (['"""linear_support_vector_indices.txt"""', 'langrangian_params'], {'delimiter': '""", """', 'fmt': '"""%d"""'}), "('linear_support_vector_indices.txt', langrangian_params,\n delimiter=', ', fmt='%d')\n", (2412, 2499), True, 'import numpy as np\n'), ((3712, 3739), 'numpy.exp', 'np.exp', (['(-1 * gamma * kernel)'], {}), '(-1 * gamma * kernel)\n', (3718, 3739), True, 'import numpy as np\n'), ((3911, 3929), 'cvxopt.matrix', 'cvxopt.matrix', (['(0.0)'], {}), '(0.0)\n', (3924, 3929), False, 'import cvxopt\n'), ((3966, 3980), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (3977, 3980), True, 'import numpy as np\n'), ((4032, 4048), 'numpy.zeros', 'np.zeros', (['(m, 1)'], {}), '((m, 1))\n', (4040, 4048), True, 'import numpy as np\n'), ((4134, 4169), 'cvxopt.solvers.qp', 'cvxopt.solvers.qp', (['P', 'q', 'G', 'h', 'A', 'b'], {}), '(P, q, G, h, A, b)\n', (4151, 4169), False, 'import cvxopt\n'), ((4456, 4482), 'numpy.ravel', 'np.ravel', (["kernel_soln['x']"], {}), "(kernel_soln['x'])\n", (4464, 4482), True, 'import numpy as np\n'), ((5131, 5230), 'numpy.savetxt', 'np.savetxt', (['"""gaussian_support_vector_indices.txt"""', 'langrangian_params'], {'delimiter': '""", """', 'fmt': '"""%d"""'}), "('gaussian_support_vector_indices.txt', langrangian_params,\n delimiter=', ', fmt='%d')\n", (5141, 5230), True, 'import numpy as np\n'), ((8537, 8563), 'numpy.asmatrix', 'np.asmatrix', (['kernel_soln_x'], {}), '(kernel_soln_x)\n', (8548, 8563), True, 'import numpy as np\n'), ((826, 859), 'numpy.asmatrix', 'np.asmatrix', (['train_data[:, 0:784]'], {}), '(train_data[:, 0:784])\n', (837, 859), True, 'import numpy as np\n'), ((859, 884), 'numpy.asmatrix', 'np.asmatrix', (['train_output'], {}), '(train_output)\n', (870, 884), True, 'import numpy as np\n'), ((1512, 1526), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (1523, 1526), True, 'import numpy as np\n'), ((1569, 1592), 'numpy.vstack', 'np.vstack', (['(tmp1, tmp2)'], {}), '((tmp1, tmp2))\n', (1578, 1592), True, 'import numpy as np\n'), ((1633, 1648), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (1640, 1648), True, 'import numpy as np\n'), ((1667, 1690), 'numpy.vstack', 'np.vstack', (['(tmp1, tmp2)'], {}), '((tmp1, tmp2))\n', (1676, 1690), True, 'import numpy as np\n'), ((2093, 2122), 'numpy.zeros', 'np.zeros', (['(1, n)'], {'dtype': 'float'}), '((1, n), dtype=float)\n', (2101, 2122), True, 'import numpy as np\n'), ((3522, 3551), 'numpy.zeros', 'np.zeros', (['(m, m)'], {'dtype': 'float'}), '((m, m), dtype=float)\n', (3530, 3551), True, 'import numpy as np\n'), ((3943, 3957), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (3954, 3957), True, 'import numpy as np\n'), ((4000, 4023), 'numpy.vstack', 'np.vstack', (['(tmp1, tmp2)'], {}), '((tmp1, tmp2))\n', (4009, 4023), True, 'import numpy as np\n'), ((4064, 4079), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (4071, 4079), True, 'import numpy as np\n'), ((4098, 4121), 'numpy.vstack', 'np.vstack', (['(tmp1, tmp2)'], {}), '((tmp1, tmp2))\n', (4107, 4121), True, 'import numpy as np\n'), ((4511, 4546), 'numpy.multiply', 'np.multiply', (['train_data', 'train_data'], {}), '(train_data, train_data)\n', (4522, 4546), True, 'import numpy as np\n'), ((4571, 4604), 'numpy.multiply', 'np.multiply', (['test_data', 'test_data'], {}), '(test_data, test_data)\n', (4582, 4604), True, 'import numpy as np\n'), ((8224, 8253), 'numpy.argmax', 'np.argmax', (['prediction_dict[i]'], {}), '(prediction_dict[i])\n', (8233, 8253), True, 'import numpy as np\n'), ((8275, 8295), 'numpy.array', 'np.array', (['prediction'], {}), '(prediction)\n', (8283, 8295), True, 'import numpy as np\n'), ((8584, 8619), 'numpy.multiply', 'np.multiply', (['train_data', 'train_data'], {}), '(train_data, train_data)\n', (8595, 8619), True, 'import numpy as np\n'), ((8644, 8677), 'numpy.multiply', 'np.multiply', (['test_data', 'test_data'], {}), '(test_data, test_data)\n', (8655, 8677), True, 'import numpy as np\n'), ((8787, 8828), 'numpy.multiply', 'np.multiply', (['raveled', '(raveled > tolerance)'], {}), '(raveled, raveled > tolerance)\n', (8798, 8828), True, 'import numpy as np\n'), ((8849, 8880), 'numpy.nonzero', 'np.nonzero', (['(raveled > tolerance)'], {}), '(raveled > tolerance)\n', (8859, 8880), True, 'import numpy as np\n'), ((10951, 10980), 'numpy.argmax', 'np.argmax', (['prediction_dict[i]'], {}), '(prediction_dict[i])\n', (10960, 10980), True, 'import numpy as np\n'), ((325, 373), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'header': 'None', 'dtype': 'float'}), '(data_path, header=None, dtype=float)\n', (336, 373), True, 'import pandas as pd\n'), ((549, 620), 'numpy.ix_', 'np.ix_', (['((train_data[:, 784] == digit1) | (train_data[:, 784] == digit2))'], {}), '((train_data[:, 784] == digit1) | (train_data[:, 784] == digit2))\n', (555, 620), True, 'import numpy as np\n'), ((1416, 1431), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (1423, 1431), True, 'import numpy as np\n'), ((2672, 2703), 'numpy.savetxt', 'np.savetxt', (['f', 'line'], {'fmt': '"""%.2f"""'}), "(f, line, fmt='%.2f')\n", (2682, 2703), True, 'import numpy as np\n'), ((3845, 3860), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (3852, 3860), True, 'import numpy as np\n'), ((12731, 12771), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_output', 'predicted'], {}), '(test_output, predicted)\n', (12747, 12771), False, 'from sklearn.metrics import confusion_matrix\n'), ((13929, 13970), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_output', 'prediction'], {}), '(test_output, prediction)\n', (13945, 13970), False, 'from sklearn.metrics import confusion_matrix\n'), ((13326, 13366), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_output', 'predicted'], {}), '(test_output, predicted)\n', (13342, 13366), False, 'from sklearn.metrics import confusion_matrix\n'), ((14153, 14194), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_output', 'prediction'], {}), '(test_output, prediction)\n', (14169, 14194), False, 'from sklearn.metrics import confusion_matrix\n'), ((7303, 7329), 'numpy.ravel', 'np.ravel', (["kernel_soln['x']"], {}), "(kernel_soln['x'])\n", (7311, 7329), True, 'import numpy as np\n'), ((14351, 14380), 'numpy.zeros', 'np.zeros', (['(1, 5)'], {'dtype': 'float'}), '((1, 5), dtype=float)\n', (14359, 14380), True, 'import numpy as np\n'), ((14398, 14427), 'numpy.zeros', 'np.zeros', (['(1, 5)'], {'dtype': 'float'}), '((1, 5), dtype=float)\n', (14406, 14427), True, 'import numpy as np\n'), ((5740, 5812), 'numpy.exp', 'np.exp', (['(-1 * gamma * (X_train - 2 * X_train_X_test[:, i] + X_test[i, 0]))'], {}), '(-1 * gamma * (X_train - 2 * X_train_X_test[:, i] + X_test[i, 0]))\n', (5746, 5812), True, 'import numpy as np\n'), ((9358, 9430), 'numpy.exp', 'np.exp', (['(-1 * gamma * (X_train - 2 * X_train_X_test[:, i] + X_test[i, 0]))'], {}), '(-1 * gamma * (X_train - 2 * X_train_X_test[:, i] + X_test[i, 0]))\n', (9364, 9430), True, 'import numpy as np\n'), ((5474, 5562), 'numpy.multiply', 'np.multiply', (['(train_data - train_data[sv_idx, :])', '(train_data - train_data[sv_idx, :])'], {}), '(train_data - train_data[sv_idx, :], train_data - train_data[\n sv_idx, :])\n', (5485, 5562), True, 'import numpy as np\n'), ((9129, 9217), 'numpy.multiply', 'np.multiply', (['(train_data - train_data[sv_idx, :])', '(train_data - train_data[sv_idx, :])'], {}), '(train_data - train_data[sv_idx, :], train_data - train_data[\n sv_idx, :])\n', (9140, 9217), True, 'import numpy as np\n')]
# # Copyright (C) 2016-2020 by <NAME>, <NAME>, <NAME>, and contributors # # This file is part of Power Sequencer. # # Power Sequencer is free software: you can redistribute it and/or modify it under the terms of the # GNU General Public License as published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # Power Sequencer is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; # without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with Power Sequencer. If # not, see <https://www.gnu.org/licenses/>. # import bpy from .utils.functions import find_sequences_after from .utils.functions import convert_duration_to_frames from .utils.global_settings import SequenceTypes from .utils.doc import doc_name, doc_idname, doc_brief, doc_description class POWER_SEQUENCER_OT_crossfade_add(bpy.types.Operator): """ *brief* Adds cross fade between selected sequence and the closest sequence to its right Based on the active strip, finds the closest next sequence of a similar type, moves it so it overlaps the active strip, and adds a gamma cross effect between them. Works with MOVIE, IMAGE and META strips """ doc = { "name": doc_name(__qualname__), "demo": "https://i.imgur.com/ZyEd0jD.gif", "description": doc_description(__doc__), "shortcuts": [ ({"type": "C", "value": "PRESS", "ctrl": True, "alt": True}, {}, "Add Crossfade") ], "keymap": "Sequencer", } bl_idname = doc_idname(__qualname__) bl_label = doc["name"] bl_description = doc_brief(doc["description"]) bl_options = {"REGISTER", "UNDO"} crossfade_duration: bpy.props.FloatProperty( name="Crossfade Duration", description="The duration of the crossfade", default=0.5, min=0 ) auto_move_strip: bpy.props.BoolProperty( name="Auto Move Strip", description=( "When true, moves the second strip so the crossfade" " is of the length set in 'Crossfade Length'" ), default=True, ) @classmethod def poll(cls, context): return context.selected_sequences def execute(self, context): sorted_selection = sorted(context.selected_sequences, key=lambda s: s.frame_final_start) for s in sorted_selection: s_next = self.get_next_sequence_after(context, s) s_to_offset = s_next.input_1 if hasattr(s_next, "input_1") else s_next if self.auto_move_strip: offset = s_to_offset.frame_final_start - s.frame_final_end s_to_offset.frame_start -= offset if s_to_offset.frame_final_start == s.frame_final_end: self.offset_sequence_handles(context, s, s_to_offset) self.apply_crossfade(context, s, s_next) return {"FINISHED"} def get_next_sequence_after(self, context, sequence): """ Returns the first sequence after `sequence` by frame_final_start """ next_sequence = None next_in_channel = [ s for s in find_sequences_after(context, sequence) if s.channel == sequence.channel ] next_transitionable = (s for s in next_in_channel if s.type in SequenceTypes.TRANSITIONABLE) try: next_sequence = min(next_transitionable, key=lambda s: s.frame_final_start) except ValueError: pass return next_sequence def apply_crossfade(self, context, strip_from, strip_to): for s in bpy.context.selected_sequences: s.select = False strip_from.select = True strip_to.select = True context.scene.sequence_editor.active_strip = strip_to bpy.ops.sequencer.effect_strip_add(type="GAMMA_CROSS") def offset_sequence_handles(self, context, sequence_1, sequence_2): """ Moves the handles of the two sequences before adding the crossfade """ fade_duration = convert_duration_to_frames(context, self.crossfade_duration) fade_offset = fade_duration / 2 if hasattr(sequence_1, "input_1"): sequence_1.input_1.frame_final_end -= fade_offset else: sequence_1.frame_final_end -= fade_offset if hasattr(sequence_2, "input_1"): sequence_2.input_1.frame_final_start += fade_offset else: sequence_2.frame_final_start += fade_offset
[ "bpy.props.BoolProperty", "bpy.ops.sequencer.effect_strip_add", "bpy.props.FloatProperty" ]
[((1882, 2002), 'bpy.props.FloatProperty', 'bpy.props.FloatProperty', ([], {'name': '"""Crossfade Duration"""', 'description': '"""The duration of the crossfade"""', 'default': '(0.5)', 'min': '(0)'}), "(name='Crossfade Duration', description=\n 'The duration of the crossfade', default=0.5, min=0)\n", (1905, 2002), False, 'import bpy\n'), ((2033, 2212), 'bpy.props.BoolProperty', 'bpy.props.BoolProperty', ([], {'name': '"""Auto Move Strip"""', 'description': '"""When true, moves the second strip so the crossfade is of the length set in \'Crossfade Length\'"""', 'default': '(True)'}), '(name=\'Auto Move Strip\', description=\n "When true, moves the second strip so the crossfade is of the length set in \'Crossfade Length\'"\n , default=True)\n', (2055, 2212), False, 'import bpy\n'), ((3923, 3977), 'bpy.ops.sequencer.effect_strip_add', 'bpy.ops.sequencer.effect_strip_add', ([], {'type': '"""GAMMA_CROSS"""'}), "(type='GAMMA_CROSS')\n", (3957, 3977), False, 'import bpy\n')]
# -*- coding:utf-8 -*- import logging import subprocess from time import gmtime, strftime import datetime from apscheduler.schedulers.blocking import BlockingScheduler def clear(): command = "python clear.py" subprocess.call(command.split()) def once(): command = "python once.py" subprocess.call(command.split()) def main(): command = "python main.py" subprocess.call(command.split()) if __name__ == "__main__": logging.basicConfig(level=logging.INFO) sched = BlockingScheduler() sched.add_job(clear, 'cron', hour="0", minute="30") sched.add_job(once, 'cron', hour="5", minute="20") sched.add_job(main, 'cron', hour="8-23", minute="59") sched.start()
[ "logging.basicConfig", "apscheduler.schedulers.blocking.BlockingScheduler" ]
[((458, 497), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (477, 497), False, 'import logging\n'), ((510, 529), 'apscheduler.schedulers.blocking.BlockingScheduler', 'BlockingScheduler', ([], {}), '()\n', (527, 529), False, 'from apscheduler.schedulers.blocking import BlockingScheduler\n')]
import os import io import csv import ast import sys import math import struct from enum import Enum from exceptions import CSVError, SchemaError csv.field_size_limit(sys.maxsize) # Don't limit the size of user input fields. class Type(Enum): UNKNOWN = 0 BOOL = 1 DOUBLE = 2 FLOAT = 2 # alias to DOUBLE STRING = 3 LONG = 4 INT = 4 # alias to LONG INTEGER = 4 # alias to LONG ARRAY = 5 ID = 6 START_ID = 7 END_ID = 8 IGNORE = 9 def convert_schema_type(in_type): try: return Type[in_type] except KeyError: # Handling for ID namespaces # TODO think of better alternatives if in_type.startswith('ID('): return Type.ID elif in_type.startswith('START_ID('): return Type.START_ID elif in_type.startswith('END_ID('): return Type.END_ID else: raise SchemaError("Encountered invalid field type '%s'" % in_type) def array_prop_to_binary(format_str, prop_val): # Evaluate the array to convert its elements. # (This allows us to handle nested arrays.) array_val = ast.literal_eval(prop_val) # Send array length as a long. array_to_send = struct.pack(format_str + "q", Type.ARRAY.value, len(array_val)) # Recursively send each array element as a string. for elem in array_val: array_to_send += inferred_prop_to_binary(str(elem)) # Return the full array struct. return array_to_send # Convert a property field with an enforced type into a binary stream. # Supported property types are string, integer, float, and boolean. def typed_prop_to_binary(prop_val, prop_type): # All format strings start with an unsigned char to represent our prop_type enum format_str = "=B" # Remove leading and trailing whitespace prop_val = prop_val.strip() # TODO allow ID type specification if prop_type == Type.ID or prop_type == Type.LONG: try: numeric_prop = int(prop_val) return struct.pack(format_str + "q", Type.LONG.value, numeric_prop) except (ValueError, struct.error): # TODO ugly, rethink if prop_type == Type.LONG: raise SchemaError("Could not parse '%s' as a long" % prop_val) elif prop_type == Type.ID or prop_type == Type.DOUBLE: try: numeric_prop = float(prop_val) if not math.isnan(numeric_prop) and not math.isinf(numeric_prop): # Don't accept non-finite values. return struct.pack(format_str + "d", Type.DOUBLE.value, numeric_prop) except (ValueError, struct.error): # TODO ugly, rethink if prop_type == Type.DOUBLE: raise SchemaError("Could not parse '%s' as a double" % prop_val) elif prop_type == Type.BOOL: # If field is 'false' or 'true', it is a boolean if prop_val.lower() == 'false': return struct.pack(format_str + '?', Type.BOOL.value, False) elif prop_val.lower() == 'true': return struct.pack(format_str + '?', Type.BOOL.value, True) else: raise SchemaError("Could not parse '%s' as a boolean" % prop_val) elif prop_type == Type.STRING: # If we've reached this point, the property is a string encoded_str = str.encode(prop_val) # struct.pack requires bytes objects as arguments # Encoding len+1 adds a null terminator to the string format_str += "%ds" % (len(encoded_str) + 1) return struct.pack(format_str, Type.STRING.value, encoded_str) elif prop_type == Type.ARRAY: if prop_val[0] != '[' or prop_val[-1] != ']': raise SchemaError("Could not parse '%s' as an array" % prop_val) return array_prop_to_binary(format_str, prop_val) # If it hasn't returned by this point, it is trying to set it to a type that it can't adopt raise Exception("unable to parse [" + prop_val + "] with type ["+repr(prop_type)+"]") # Convert a single CSV property field with an inferred type into a binary stream. # Supported property types are string, integer, float, boolean, and (erroneously) null. def inferred_prop_to_binary(prop_val): # All format strings start with an unsigned char to represent our prop_type enum format_str = "=B" if prop_val == "": # An empty string indicates a NULL property. # TODO This is not allowed in Cypher, consider how to handle it here rather than in-module. return struct.pack(format_str, 0) # Remove leading and trailing whitespace prop_val = prop_val.strip() # Try to parse value as an integer. try: numeric_prop = int(prop_val) return struct.pack(format_str + "q", Type.LONG.value, numeric_prop) except (ValueError, struct.error): pass # Try to parse value as a float. try: numeric_prop = float(prop_val) if not math.isnan(numeric_prop) and not math.isinf(numeric_prop): # Don't accept non-finite values. return struct.pack(format_str + "d", Type.DOUBLE.value, numeric_prop) except (ValueError, struct.error): pass # If field is 'false' or 'true', it is a boolean. if prop_val.lower() == 'false': return struct.pack(format_str + '?', Type.BOOL.value, False) elif prop_val.lower() == 'true': return struct.pack(format_str + '?', Type.BOOL.value, True) # If the property string is bracket-interpolated, it is an array. if prop_val[0] == '[' and prop_val[-1] == ']': return array_prop_to_binary(format_str, prop_val) # If we've reached this point, the property is a string. encoded_str = str.encode(prop_val) # struct.pack requires bytes objects as arguments # Encoding len+1 adds a null terminator to the string format_str += "%ds" % (len(encoded_str) + 1) return struct.pack(format_str, Type.STRING.value, encoded_str) class EntityFile(object): """Superclass for Label and RelationType classes""" def __init__(self, filename, label, config): # The configurations for this run. self.config = config # The label or relation type string is the basename of the file if label: self.entity_str = label else: self.entity_str = os.path.splitext(os.path.basename(filename))[0] # Input file handling self.infile = io.open(filename, 'rt') # Initialize CSV reader that ignores leading whitespace in each field # and does not modify input quote characters self.reader = csv.reader(self.infile, delimiter=config.separator, skipinitialspace=True, quoting=config.quoting, escapechar='\\') self.packed_header = b'' self.binary_entities = [] self.binary_size = 0 # size of binary token self.convert_header() # Extract data from header row. self.count_entities() # Count number of entities/row in file. next(self.reader) # Skip the header row. # Count number of rows in file. def count_entities(self): self.entities_count = 0 self.entities_count = sum(1 for line in self.infile) # seek back self.infile.seek(0) return self.entities_count # Simple input validations for each row of a CSV file def validate_row(self, row): # Each row should have the same number of fields if len(row) != self.column_count: raise CSVError("%s:%d Expected %d columns, encountered %d ('%s')" % (self.infile.name, self.reader.line_num, self.column_count, len(row), self.config.separator.join(row))) # If part of a CSV file was sent to Redis, delete the processed entities and update the binary size def reset_partial_binary(self): self.binary_entities = [] self.binary_size = len(self.packed_header) # Convert property keys from a CSV file header into a binary string def pack_header(self): # String format entity_bytes = self.entity_str.encode() fmt = "=%dsI" % (len(entity_bytes) + 1) # Unaligned native, entity name, count of properties args = [entity_bytes, self.prop_count] for idx in range(self.column_count): if not self.column_names[idx]: continue prop = self.column_names[idx].encode() fmt += "%ds" % (len(prop) + 1) # encode string with a null terminator args.append(prop) return struct.pack(fmt, *args) def convert_header_with_schema(self, header): self.types = [None] * self.column_count # Value type of every column. for idx, field in enumerate(header): pair = field.split(':') # Multiple colons found in column name, emit error. # TODO might need to check for backtick escapes if len(pair) > 2: raise CSVError("Field '%s' had %d colons" % field, len(field)) # Convert the column type. col_type = convert_schema_type(pair[1].upper().strip()) # If the column did not have a name but the type requires one, emit an error. if len(pair[0]) == 0 and col_type not in (Type.ID, Type.START_ID, Type.END_ID, Type.IGNORE): raise SchemaError("Each property in the header should be a colon-separated pair") else: # We have a column name and a type. # Only store the name if the column's values should be added as properties. if len(pair[0]) > 0 and col_type not in (Type.START_ID, Type.END_ID, Type.IGNORE): column_name = pair[0].strip() self.column_names[idx] = column_name # Store the column type. self.types[idx] = col_type def convert_header(self): header = next(self.reader) self.column_count = len(header) self.column_names = [None] * self.column_count # Property names of every column; None if column does not update graph. if self.config.enforce_schema: # Use generic logic to convert the header with schema. self.convert_header_with_schema(header) # The subclass will perform post-processing. self.post_process_header_with_schema(header) else: # The subclass will process the header itself self.process_schemaless_header(header) # The number of properties is equal to the number of non-skipped columns. self.prop_count = self.column_count - self.column_names.count(None) self.packed_header = self.pack_header() self.binary_size += len(self.packed_header) # Convert a list of properties into a binary string def pack_props(self, line): props = [] for idx, field in enumerate(line): if not self.column_names[idx]: continue if self.config.enforce_schema: props.append(typed_prop_to_binary(field, self.types[idx])) else: props.append(inferred_prop_to_binary(field)) return b''.join(p for p in props) def to_binary(self): return self.packed_header + b''.join(self.binary_entities)
[ "exceptions.SchemaError", "math.isnan", "math.isinf", "csv.reader", "os.path.basename", "csv.field_size_limit", "struct.pack", "io.open", "ast.literal_eval" ]
[((147, 180), 'csv.field_size_limit', 'csv.field_size_limit', (['sys.maxsize'], {}), '(sys.maxsize)\n', (167, 180), False, 'import csv\n'), ((1152, 1178), 'ast.literal_eval', 'ast.literal_eval', (['prop_val'], {}), '(prop_val)\n', (1168, 1178), False, 'import ast\n'), ((5883, 5938), 'struct.pack', 'struct.pack', (['format_str', 'Type.STRING.value', 'encoded_str'], {}), '(format_str, Type.STRING.value, encoded_str)\n', (5894, 5938), False, 'import struct\n'), ((4521, 4547), 'struct.pack', 'struct.pack', (['format_str', '(0)'], {}), '(format_str, 0)\n', (4532, 4547), False, 'import struct\n'), ((4728, 4788), 'struct.pack', 'struct.pack', (["(format_str + 'q')", 'Type.LONG.value', 'numeric_prop'], {}), "(format_str + 'q', Type.LONG.value, numeric_prop)\n", (4739, 4788), False, 'import struct\n'), ((5275, 5328), 'struct.pack', 'struct.pack', (["(format_str + '?')", 'Type.BOOL.value', '(False)'], {}), "(format_str + '?', Type.BOOL.value, False)\n", (5286, 5328), False, 'import struct\n'), ((6415, 6438), 'io.open', 'io.open', (['filename', '"""rt"""'], {}), "(filename, 'rt')\n", (6422, 6438), False, 'import io\n'), ((6593, 6712), 'csv.reader', 'csv.reader', (['self.infile'], {'delimiter': 'config.separator', 'skipinitialspace': '(True)', 'quoting': 'config.quoting', 'escapechar': '"""\\\\"""'}), "(self.infile, delimiter=config.separator, skipinitialspace=True,\n quoting=config.quoting, escapechar='\\\\')\n", (6603, 6712), False, 'import csv\n'), ((8493, 8516), 'struct.pack', 'struct.pack', (['fmt', '*args'], {}), '(fmt, *args)\n', (8504, 8516), False, 'import struct\n'), ((2046, 2106), 'struct.pack', 'struct.pack', (["(format_str + 'q')", 'Type.LONG.value', 'numeric_prop'], {}), "(format_str + 'q', Type.LONG.value, numeric_prop)\n", (2057, 2106), False, 'import struct\n'), ((5054, 5116), 'struct.pack', 'struct.pack', (["(format_str + 'd')", 'Type.DOUBLE.value', 'numeric_prop'], {}), "(format_str + 'd', Type.DOUBLE.value, numeric_prop)\n", (5065, 5116), False, 'import struct\n'), ((5381, 5433), 'struct.pack', 'struct.pack', (["(format_str + '?')", 'Type.BOOL.value', '(True)'], {}), "(format_str + '?', Type.BOOL.value, True)\n", (5392, 5433), False, 'import struct\n'), ((4942, 4966), 'math.isnan', 'math.isnan', (['numeric_prop'], {}), '(numeric_prop)\n', (4952, 4966), False, 'import math\n'), ((4975, 4999), 'math.isinf', 'math.isinf', (['numeric_prop'], {}), '(numeric_prop)\n', (4985, 4999), False, 'import math\n'), ((9287, 9362), 'exceptions.SchemaError', 'SchemaError', (['"""Each property in the header should be a colon-separated pair"""'], {}), "('Each property in the header should be a colon-separated pair')\n", (9298, 9362), False, 'from exceptions import CSVError, SchemaError\n'), ((2244, 2300), 'exceptions.SchemaError', 'SchemaError', (['("Could not parse \'%s\' as a long" % prop_val)'], {}), '("Could not parse \'%s\' as a long" % prop_val)\n', (2255, 2300), False, 'from exceptions import CSVError, SchemaError\n'), ((2552, 2614), 'struct.pack', 'struct.pack', (["(format_str + 'd')", 'Type.DOUBLE.value', 'numeric_prop'], {}), "(format_str + 'd', Type.DOUBLE.value, numeric_prop)\n", (2563, 2614), False, 'import struct\n'), ((2963, 3016), 'struct.pack', 'struct.pack', (["(format_str + '?')", 'Type.BOOL.value', '(False)'], {}), "(format_str + '?', Type.BOOL.value, False)\n", (2974, 3016), False, 'import struct\n'), ((3545, 3600), 'struct.pack', 'struct.pack', (['format_str', 'Type.STRING.value', 'encoded_str'], {}), '(format_str, Type.STRING.value, encoded_str)\n', (3556, 3600), False, 'import struct\n'), ((6332, 6358), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (6348, 6358), False, 'import os\n'), ((927, 987), 'exceptions.SchemaError', 'SchemaError', (['("Encountered invalid field type \'%s\'" % in_type)'], {}), '("Encountered invalid field type \'%s\'" % in_type)\n', (938, 987), False, 'from exceptions import CSVError, SchemaError\n'), ((2436, 2460), 'math.isnan', 'math.isnan', (['numeric_prop'], {}), '(numeric_prop)\n', (2446, 2460), False, 'import math\n'), ((2469, 2493), 'math.isinf', 'math.isinf', (['numeric_prop'], {}), '(numeric_prop)\n', (2479, 2493), False, 'import math\n'), ((2754, 2812), 'exceptions.SchemaError', 'SchemaError', (['("Could not parse \'%s\' as a double" % prop_val)'], {}), '("Could not parse \'%s\' as a double" % prop_val)\n', (2765, 2812), False, 'from exceptions import CSVError, SchemaError\n'), ((3077, 3129), 'struct.pack', 'struct.pack', (["(format_str + '?')", 'Type.BOOL.value', '(True)'], {}), "(format_str + '?', Type.BOOL.value, True)\n", (3088, 3129), False, 'import struct\n'), ((3162, 3221), 'exceptions.SchemaError', 'SchemaError', (['("Could not parse \'%s\' as a boolean" % prop_val)'], {}), '("Could not parse \'%s\' as a boolean" % prop_val)\n', (3173, 3221), False, 'from exceptions import CSVError, SchemaError\n'), ((3708, 3766), 'exceptions.SchemaError', 'SchemaError', (['("Could not parse \'%s\' as an array" % prop_val)'], {}), '("Could not parse \'%s\' as an array" % prop_val)\n', (3719, 3766), False, 'from exceptions import CSVError, SchemaError\n')]
#! /usr/bin/env python # -*- coding: iso-8859-1 -*- '''Correction for gaseous absorption based on SMAC method (Rahman and Dedieu, 1994) ''' from math import * import numpy as np # ============================================================================================= def PdeZ(Z): """ PdeZ : Atmospheric pressure (in hpa) as a function of altitude (in meters) """ p = 1013.25 * pow(1 - 0.0065 * Z / 288.15, 5.31) return (p) # ============================================================================================= class coeff: ''' library for atmospheric correction using SMAC method (Rahman and Dedieu, 1994) Contains : smac_inv : inverse smac model for atmospheric correction TOA==>Surface smac dir : direct smac model Surface==>TOA coefs : reads smac coefficients PdeZ : # PdeZ : Atmospheric pressure (in hpa) as a function of altitude (in meters) Written by <NAME>, from the original SMAC C routine ============================================================================================= ''' def __init__(self, smac_filename): with file(smac_filename) as f: lines = f.readlines() # H20 temp = lines[0].strip().split() self.ah2o = float(temp[0]) self.nh2o = float(temp[1]) # O3 temp = lines[1].strip().split() self.ao3 = float(temp[0]) self.no3 = float(temp[1]) # O2 temp = lines[2].strip().split() self.ao2 = float(temp[0]) self.no2 = float(temp[1]) self.po2 = float(temp[2]) # CO2 temp = lines[3].strip().split() self.aco2 = float(temp[0]) self.nco2 = float(temp[1]) self.pco2 = float(temp[2]) # NH4 temp = lines[4].strip().split() self.ach4 = float(temp[0]) self.nch4 = float(temp[1]) self.pch4 = float(temp[2]) # NO2 temp = lines[5].strip().split() self.ano2 = float(temp[0]) self.nno2 = float(temp[1]) self.pno2 = float(temp[2]) # CO temp = lines[6].strip().split() self.aco = float(temp[0]) self.nco = float(temp[1]) self.pco = float(temp[2]) # rayleigh and aerosol scattering temp = lines[7].strip().split() self.a0s = float(temp[0]) self.a1s = float(temp[1]) self.a2s = float(temp[2]) self.a3s = float(temp[3]) temp = lines[8].strip().split() self.a0T = float(temp[0]) self.a1T = float(temp[1]) self.a2T = float(temp[2]) self.a3T = float(temp[3]) temp = lines[9].strip().split() self.taur = float(temp[0]) self.sr = float(temp[0]) temp = lines[10].strip().split() self.a0taup = float(temp[0]) self.a1taup = float(temp[1]) temp = lines[11].strip().split() self.wo = float(temp[0]) self.gc = float(temp[1]) temp = lines[12].strip().split() self.a0P = float(temp[0]) self.a1P = float(temp[1]) self.a2P = float(temp[2]) temp = lines[13].strip().split() self.a3P = float(temp[0]) self.a4P = float(temp[1]) temp = lines[14].strip().split() self.Rest1 = float(temp[0]) self.Rest2 = float(temp[1]) temp = lines[15].strip().split() self.Rest3 = float(temp[0]) self.Rest4 = float(temp[1]) temp = lines[16].strip().split() self.Resr1 = float(temp[0]) self.Resr2 = float(temp[1]) self.Resr3 = float(temp[2]) temp = lines[17].strip().split() self.Resa1 = float(temp[0]) self.Resa2 = float(temp[1]) temp = lines[18].strip().split() self.Resa3 = float(temp[0]) self.Resa4 = float(temp[1]) # ====================================================================== def smac_inv(r_toa, tetas, phis, tetav, phiv, pressure, taup550, uo3, uh2o, coef): """ r_surf=smac_inv( r_toa, tetas, phis, tetav, phiv,pressure,taup550, uo3, uh2o, coef) Corrections atmosphériques """ ah2o = coef.ah2o nh2o = coef.nh2o ao3 = coef.ao3 no3 = coef.no3 ao2 = coef.ao2 no2 = coef.no2 po2 = coef.po2 aco2 = coef.aco2 nco2 = coef.nco2 pco2 = coef.pco2 ach4 = coef.ach4 nch4 = coef.nch4 pch4 = coef.pch4 ano2 = coef.ano2 nno2 = coef.nno2 pno2 = coef.pno2 aco = coef.aco nco = coef.nco pco = coef.pco a0s = coef.a0s a1s = coef.a1s a2s = coef.a2s a3s = coef.a3s a0T = coef.a0T a1T = coef.a1T a2T = coef.a2T a3T = coef.a3T taur = coef.taur sr = coef.sr a0taup = coef.a0taup a1taup = coef.a1taup wo = coef.wo gc = coef.gc a0P = coef.a0P a1P = coef.a1P a2P = coef.a2P a3P = coef.a3P a4P = coef.a4P Rest1 = coef.Rest1 Rest2 = coef.Rest2 Rest3 = coef.Rest3 Rest4 = coef.Rest4 Resr1 = coef.Resr1 Resr2 = coef.Resr2 Resr3 = coef.Resr3 Resa1 = coef.Resa1 Resa2 = coef.Resa2 Resa3 = coef.Resa3 Resa4 = coef.Resa4 cdr = pi / 180 crd = 180 / pi # /*------: calcul de la reflectance de surface smac :--------*/ us = cos(tetas * cdr) uv = cos(tetav * cdr) Peq = pressure / 1013.25 # /*------: 1) air mass */ m = 1 / us + 1 / uv # /*------: 2) aerosol optical depth in the spectral band, taup :--------*/ taup = (a0taup) + (a1taup) * taup550 # /*------: 3) gaseous transmissions (downward and upward paths) :--------*/ to3 = 1. th2o = 1. to2 = 1. tco2 = 1. tch4 = 1. uo2 = (Peq ** (po2)) uco2 = (Peq ** (pco2)) uch4 = (Peq ** (pch4)) uno2 = (Peq ** (pno2)) uco = (Peq ** (pco)) # /*------: 4) if uh2o <= 0 and uo3 <=0 no gaseous absorption is computed :--------*/ to3 = exp((ao3) * ((uo3 * m) ** (no3))) th2o = exp((ah2o) * ((uh2o * m) ** (nh2o))) to2 = exp((ao2) * ((uo2 * m) ** (no2))) tco2 = exp((aco2) * ((uco2 * m) ** (nco2))) tch4 = exp((ach4) * ((uch4 * m) ** (nch4))) tno2 = exp((ano2) * ((uno2 * m) ** (nno2))) tco = exp((aco) * ((uco * m) ** (nco))) tg = th2o * to3 * to2 * tco2 * tch4 * tco * tno2 # /*------: 5) Total scattering transmission :--------*/ ttetas = (a0T) + (a1T) * taup550 / us + ((a2T) * Peq + (a3T)) / (1. + us) # /* downward */ ttetav = (a0T) + (a1T) * taup550 / uv + ((a2T) * Peq + (a3T)) / (1. + uv) # /* upward */ # /*------: 6) spherical albedo of the atmosphere :--------*/ s = (a0s) * Peq + (a3s) + (a1s) * taup550 + (a2s) * (taup550 ** 2) # /*------: 7) scattering angle cosine :--------*/ cksi = - ((us * uv) + (sqrt(1. - us * us) * sqrt(1. - uv * uv) * cos((phis - phiv) * cdr))) if (cksi < -1): cksi = -1.0 # /*------: 8) scattering angle in degree :--------*/ ksiD = crd * acos(cksi) # /*------: 9) rayleigh atmospheric reflectance :--------*/ ray_phase = 0.7190443 * (1. + (cksi * cksi)) + 0.0412742 ray_ref = (taur * ray_phase) / (4 * us * uv) ray_ref = ray_ref * pressure / 1013.25 taurz = (taur) * Peq # /*------: 10) Residu Rayleigh :--------*/ Res_ray = Resr1 + Resr2 * taur * ray_phase / (us * uv) + Resr3 * ((taur * ray_phase / (us * uv)) ** 2) # /*------: 11) aerosol atmospheric reflectance :--------*/ aer_phase = a0P + a1P * ksiD + a2P * ksiD * ksiD + a3P * (ksiD ** 3) + a4P * (ksiD ** 4) ak2 = (1. - wo) * (3. - wo * 3 * gc) ak = sqrt(ak2) e = -3 * us * us * wo / (4 * (1. - ak2 * us * us)) f = -(1. - wo) * 3 * gc * us * us * wo / (4 * (1. - ak2 * us * us)) dp = e / (3 * us) + us * f d = e + f b = 2 * ak / (3. - wo * 3 * gc) delta = np.exp(ak * taup) * (1. + b) * (1. + b) - np.exp(-ak * taup) * (1. - b) * (1. - b) ww = wo / 4. ss = us / (1. - ak2 * us * us) q1 = 2. + 3 * us + (1. - wo) * 3 * gc * us * (1. + 2 * us) q2 = 2. - 3 * us - (1. - wo) * 3 * gc * us * (1. - 2 * us) q3 = q2 * np.exp(-taup / us) c1 = ((ww * ss) / delta) * (q1 * np.exp(ak * taup) * (1. + b) + q3 * (1. - b)) c2 = -((ww * ss) / delta) * (q1 * np.exp(-ak * taup) * (1. - b) + q3 * (1. + b)) cp1 = c1 * ak / (3. - wo * 3 * gc) cp2 = -c2 * ak / (3. - wo * 3 * gc) z = d - wo * 3 * gc * uv * dp + wo * aer_phase / 4. x = c1 - wo * 3 * gc * uv * cp1 y = c2 - wo * 3 * gc * uv * cp2 aa1 = uv / (1. + ak * uv) aa2 = uv / (1. - ak * uv) aa3 = us * uv / (us + uv) aer_ref = x * aa1 * (1. - np.exp(-taup / aa1)) aer_ref = aer_ref + y * aa2 * (1. - np.exp(-taup / aa2)) aer_ref = aer_ref + z * aa3 * (1. - np.exp(-taup / aa3)) aer_ref = aer_ref / (us * uv) # /*------: 12) Residu Aerosol :--------*/ Res_aer = (Resa1 + Resa2 * (taup * m * cksi) + Resa3 * ((taup * m * cksi) ** 2)) + Resa4 * ((taup * m * cksi) ** 3) # /*------: 13) Terme de couplage molecule / aerosol :--------*/ tautot = taup + taurz Res_6s = (Rest1 + Rest2 * (tautot * m * cksi) + Rest3 * ((tautot * m * cksi) ** 2)) + Rest4 * ( (tautot * m * cksi) ** 3) # /*------: 14) total atmospheric reflectance :--------*/ atm_ref = ray_ref - Res_ray + aer_ref - Res_aer + Res_6s # /*------: 15) Surface reflectance :--------*/ r_surf = r_toa - (atm_ref * tg) r_surf = r_surf / ((tg * ttetas * ttetav) + (r_surf * s)) return r_surf # ======================================================================================================= def smac_dir(r_surf, tetas, phis, tetav, phiv, pressure, taup550, uo3, uh2o, coef): """ r_toa=smac_dir ( r_surf, tetas, phis, tetav, phiv,pressure,taup550, uo3, uh2o, coef) Application des effets atmosphériques """ ah2o = coef.ah2o nh2o = coef.nh2o ao3 = coef.ao3 no3 = coef.no3 ao2 = coef.ao2 no2 = coef.no2 po2 = coef.po2 aco2 = coef.aco2 nco2 = coef.nco2 pco2 = coef.pco2 ach4 = coef.ach4 nch4 = coef.nch4 pch4 = coef.pch4 ano2 = coef.ano2 nno2 = coef.nno2 pno2 = coef.pno2 aco = coef.aco nco = coef.nco pco = coef.pco a0s = coef.a0s a1s = coef.a1s a2s = coef.a2s a3s = coef.a3s a0T = coef.a0T a1T = coef.a1T a2T = coef.a2T a3T = coef.a3T taur = coef.taur sr = coef.sr a0taup = coef.a0taup a1taup = coef.a1taup wo = coef.wo gc = coef.gc a0P = coef.a0P a1P = coef.a1P a2P = coef.a2P a3P = coef.a3P a4P = coef.a4P Rest1 = coef.Rest1 Rest2 = coef.Rest2 Rest3 = coef.Rest3 Rest4 = coef.Rest4 Resr1 = coef.Resr1 Resr2 = coef.Resr2 Resr3 = coef.Resr3 Resa1 = coef.Resa1 Resa2 = coef.Resa2 Resa3 = coef.Resa3 Resa4 = coef.Resa4 cdr = pi / 180 crd = 180 / pi # /*------: calcul de la reflectance de surface smac :--------*/ us = cos(tetas * cdr) uv = cos(tetav * cdr) Peq = pressure / 1013.25 # /*------: 1) air mass */ m = 1 / us + 1 / uv # /*------: 2) aerosol optical depth in the spectral band, taup :--------*/ taup = (a0taup) + (a1taup) * taup550 # /*------: 3) gaseous transmissions (downward and upward paths) :--------*/ to3 = 1. th2o = 1. to2 = 1. tco2 = 1. tch4 = 1. uo2 = (Peq ** (po2)) uco2 = (Peq ** (pco2)) uch4 = (Peq ** (pch4)) uno2 = (Peq ** (pno2)) uco = (Peq ** (pco)) # /*------: 4) if uh2o <= 0 and uo3<= 0 no gaseous absorption is computed :--------*/ to3 = exp((ao3) * ((uo3 * m) ** (no3))) th2o = exp((ah2o) * ((uh2o * m) ** (nh2o))) to2 = exp((ao2) * ((uo2 * m) ** (no2))) tco2 = exp((aco2) * ((uco2 * m) ** (nco2))) tch4 = exp((ach4) * ((uch4 * m) ** (nch4))) tno2 = exp((ano2) * ((uno2 * m) ** (nno2))) tco = exp((aco) * ((uco * m) ** (nco))) tg = th2o * to3 * to2 * tco2 * tch4 * tco * tno2 # /*------: 5) Total scattering transmission :--------*/ ttetas = (a0T) + (a1T) * taup550 / us + ((a2T) * Peq + (a3T)) / (1. + us) # /* downward */ ttetav = (a0T) + (a1T) * taup550 / uv + ((a2T) * Peq + (a3T)) / (1. + uv) # /* upward */ # /*------: 6) spherical albedo of the atmosphere :--------*/ s = (a0s) * Peq + (a3s) + (a1s) * taup550 + (a2s) * (taup550 ** 2) # /*------: 7) scattering angle cosine :--------*/ cksi = - ((us * uv) + (sqrt(1. - us * us) * sqrt(1. - uv * uv) * cos((phis - phiv - 360) * cdr))) if (cksi < -1): cksi = -1.0 # /*------: 8) scattering angle in degree :--------*/ ksiD = crd * acos(cksi) # /*------: 9) rayleigh atmospheric reflectance :--------*/ ray_phase = 0.7190443 * (1. + (cksi * cksi)) + 0.0412742 ray_ref = (taur * ray_phase) / (4 * us * uv) ray_ref = ray_ref * pressure / 1013.25 taurz = (taur) * Peq # /*------: 10) Residu Rayleigh :--------*/ Res_ray = Resr1 + Resr2 * taur * ray_phase / (us * uv) + Resr3 * ((taur * ray_phase / (us * uv)) ** 2) # /*------: 11) aerosol atmospheric reflectance :--------*/ aer_phase = a0P + a1P * ksiD + a2P * ksiD * ksiD + a3P * (ksiD ** 3) + a4P * (ksiD ** 4) ak2 = (1. - wo) * (3. - wo * 3 * gc) ak = sqrt(ak2) e = -3 * us * us * wo / (4 * (1. - ak2 * us * us)) f = -(1. - wo) * 3 * gc * us * us * wo / (4 * (1. - ak2 * us * us)) dp = e / (3 * us) + us * f d = e + f b = 2 * ak / (3. - wo * 3 * gc) delta = np.exp(ak * taup) * (1. + b) * (1. + b) - np.exp(-ak * taup) * (1. - b) * (1. - b) ww = wo / 4. ss = us / (1. - ak2 * us * us) q1 = 2. + 3 * us + (1. - wo) * 3 * gc * us * (1. + 2 * us) q2 = 2. - 3 * us - (1. - wo) * 3 * gc * us * (1. - 2 * us) q3 = q2 * np.exp(-taup / us) c1 = ((ww * ss) / delta) * (q1 * np.exp(ak * taup) * (1. + b) + q3 * (1. - b)) c2 = -((ww * ss) / delta) * (q1 * np.exp(-ak * taup) * (1. - b) + q3 * (1. + b)) cp1 = c1 * ak / (3. - wo * 3 * gc) cp2 = -c2 * ak / (3. - wo * 3 * gc) z = d - wo * 3 * gc * uv * dp + wo * aer_phase / 4. x = c1 - wo * 3 * gc * uv * cp1 y = c2 - wo * 3 * gc * uv * cp2 aa1 = uv / (1. + ak * uv) aa2 = uv / (1. - ak * uv) aa3 = us * uv / (us + uv) aer_ref = x * aa1 * (1. - np.exp(-taup / aa1)) aer_ref = aer_ref + y * aa2 * (1. - np.exp(-taup / aa2)) aer_ref = aer_ref + z * aa3 * (1. - np.exp(-taup / aa3)) aer_ref = aer_ref / (us * uv) # /*------: 12) Residu Aerosol :--------*/ Res_aer = (Resa1 + Resa2 * (taup * m * cksi) + Resa3 * ((taup * m * cksi) ** 2)) + Resa4 * ((taup * m * cksi) ** 3) # /*------: 13) Terme de couplage molecule / aerosol :--------*/ tautot = taup + taurz Res_6s = (Rest1 + Rest2 * (tautot * m * cksi) + Rest3 * ((tautot * m * cksi) ** 2)) + Rest4 * ( (tautot * m * cksi) ** 3) # /*------: 14) total atmospheric reflectance :--------*/ atm_ref = ray_ref - Res_ray + aer_ref - Res_aer + Res_6s # /*------: 15) TOA reflectance :--------*/ r_toa = r_surf * tg * ttetas * ttetav / (1 - r_surf * s) + (atm_ref * tg) return r_toa # ============================================================================= if __name__ == "__main__": # example theta_s = 45 theta_v = 5 phi_s = 200 phi_v = -160 r_toa = 0.2 ######################################lecture des coefs_smac nom_smac = 'COEFS/coef_FORMOSAT2_B1_CONT.dat' coefs = coeff(nom_smac) bd = 1 r_surf = smac_inv(r_toa, theta_s, phi_s, theta_v, phi_v, 1013, 0.1, 0.3, 0.3, coefs) r_toa2 = smac_dir(r_surf, theta_s, phi_s, theta_v, phi_v, 1013, 0.1, 0.3, 0.3, coefs) print(r_toa, r_surf, r_toa2)
[ "numpy.exp" ]
[((8207, 8225), 'numpy.exp', 'np.exp', (['(-taup / us)'], {}), '(-taup / us)\n', (8213, 8225), True, 'import numpy as np\n'), ((14032, 14050), 'numpy.exp', 'np.exp', (['(-taup / us)'], {}), '(-taup / us)\n', (14038, 14050), True, 'import numpy as np\n'), ((8722, 8741), 'numpy.exp', 'np.exp', (['(-taup / aa1)'], {}), '(-taup / aa1)\n', (8728, 8741), True, 'import numpy as np\n'), ((14547, 14566), 'numpy.exp', 'np.exp', (['(-taup / aa1)'], {}), '(-taup / aa1)\n', (14553, 14566), True, 'import numpy as np\n'), ((7932, 7949), 'numpy.exp', 'np.exp', (['(ak * taup)'], {}), '(ak * taup)\n', (7938, 7949), True, 'import numpy as np\n'), ((7974, 7992), 'numpy.exp', 'np.exp', (['(-ak * taup)'], {}), '(-ak * taup)\n', (7980, 7992), True, 'import numpy as np\n'), ((8783, 8802), 'numpy.exp', 'np.exp', (['(-taup / aa2)'], {}), '(-taup / aa2)\n', (8789, 8802), True, 'import numpy as np\n'), ((8844, 8863), 'numpy.exp', 'np.exp', (['(-taup / aa3)'], {}), '(-taup / aa3)\n', (8850, 8863), True, 'import numpy as np\n'), ((13757, 13774), 'numpy.exp', 'np.exp', (['(ak * taup)'], {}), '(ak * taup)\n', (13763, 13774), True, 'import numpy as np\n'), ((13799, 13817), 'numpy.exp', 'np.exp', (['(-ak * taup)'], {}), '(-ak * taup)\n', (13805, 13817), True, 'import numpy as np\n'), ((14608, 14627), 'numpy.exp', 'np.exp', (['(-taup / aa2)'], {}), '(-taup / aa2)\n', (14614, 14627), True, 'import numpy as np\n'), ((14669, 14688), 'numpy.exp', 'np.exp', (['(-taup / aa3)'], {}), '(-taup / aa3)\n', (14675, 14688), True, 'import numpy as np\n'), ((8263, 8280), 'numpy.exp', 'np.exp', (['(ak * taup)'], {}), '(ak * taup)\n', (8269, 8280), True, 'import numpy as np\n'), ((8347, 8365), 'numpy.exp', 'np.exp', (['(-ak * taup)'], {}), '(-ak * taup)\n', (8353, 8365), True, 'import numpy as np\n'), ((14088, 14105), 'numpy.exp', 'np.exp', (['(ak * taup)'], {}), '(ak * taup)\n', (14094, 14105), True, 'import numpy as np\n'), ((14172, 14190), 'numpy.exp', 'np.exp', (['(-ak * taup)'], {}), '(-ak * taup)\n', (14178, 14190), True, 'import numpy as np\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function __metaclass__ = type # Copyright 2018 Palo Alto Networks, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: panos_bgp_conditional_advertisement short_description: Configures a BGP conditional advertisement. description: - Use BGP to publish and consume routes from disparate networks. - In the PAN-OS GUI, this resource cannot be created without also creating at least one non-exist filter and one advertise filter. The API behaves a little differently; you can create the conditional advertisement itself, but the API will start throwing errors if you try to update it and there is not at least one non-exist filter and one advertise filter. - In order for a conditional advertisement to be valid, you must specify at least one non-exist and one advertise filter. - When modifying a BGP conditional advertisement, any filters attached are left as-is, unless I(advertise_filter) or I(non_exist_filter) are specified. author: - <NAME> (@freakinhippie) - <NAME> (@shinmog) version_added: "2.8" requirements: - pan-python can be obtained from PyPI U(https://pypi.python.org/pypi/pan-python) - pandevice can be obtained from PyPI U(https://pypi.python.org/pypi/pandevice) notes: - Checkmode is supported. - Panorama is supported. extends_documentation_fragment: - paloaltonetworks.panos.fragments.transitional_provider - paloaltonetworks.panos.fragments.full_template_support - paloaltonetworks.panos.fragments.state options: commit: description: - Commit configuration if changed. default: False type: bool vr_name: description: - Name of the virtual router; it must already exist and have BGP configured. - See M(panos_virtual_router). default: default advertise_filter: description: - B(Deprecated) - Use M(panos_bgp_policy_filter) to define filters after creation. - HORIZONTALLINE - Advertisement filter object returned by M(panos_bgp_policy_filter). non_exist_filter: description: - B(Deprecated) - Use M(panos_bgp_policy_filter) to define filters after creation. - HORIZONTALLINE - Non-Exist filter object returned by M(panos_bgp_policy_filter). enable: description: - Enable this policy. type: bool name: description: - Name of Conditional Advertisement policy. required: True used_by: description: - List of Peer Groups using this policy. type: list ''' EXAMPLES = ''' - name: Create BGP Conditional Advertisement Rule panos_bgp_conditional_advertisement: provider: '{{ provider }}' name: 'cond-rule-01' enable: true non_exist_filter: '{{ non_exist.panos_obj }}' advertise_filter: '{{ advertise.panos_obj }}' ''' RETURN = ''' # Default return values ''' from ansible.module_utils.basic import AnsibleModule from ansible_collections.paloaltonetworks.panos.plugins.module_utils.panos import get_connection try: from pandevice.errors import PanDeviceError from pandevice.network import VirtualRouter from pandevice.network import Bgp from pandevice.network import BgpPolicyConditionalAdvertisement except ImportError: pass def setup_args(): return dict( commit=dict( type='bool', default=False, help='Commit configuration if changed'), vr_name=dict( default='default', help='Name of the virtual router; it must already exist; see panos_virtual_router'), non_exist_filter=dict( type='str', help='Non-Exist filter object returned by panos_bgp_policy_filter; only needed on creation'), advertise_filter=dict( type='str', help='Advertise filter object returned by panos_bgp_policy_filter; only needed on creation'), name=dict( type='str', required=True, help='Name of Conditional Advertisement policy'), enable=dict( type='bool', help='Enable this policy'), used_by=dict( type='list', help='List of Peer Groups using this policy'), ) def main(): helper = get_connection( template=True, template_stack=True, with_state=True, with_classic_provider_spec=True, argument_spec=setup_args(), ) module = AnsibleModule( argument_spec=helper.argument_spec, supports_check_mode=True, required_one_of=helper.required_one_of, ) parent = helper.get_pandevice_parent(module) vr = VirtualRouter(module.params['vr_name']) parent.add(vr) try: vr.refresh() except PanDeviceError as e: module.fail_json(msg='Failed refresh: {0}'.format(e)) bgp = vr.find('', Bgp) if bgp is None: module.fail_json(msg='BGP is not configured on virtual router {0}'.format(vr.name)) listing = bgp.findall(BgpPolicyConditionalAdvertisement) spec = { 'name': module.params['name'], 'enable': module.params['enable'], 'used_by': module.params['used_by'], } obj = BgpPolicyConditionalAdvertisement(**spec) bgp.add(obj) # TODO(gfreeman) - Remove this in 2.12. for ansible_param in ('non_exist_filter', 'advertise_filter'): val = module.params[ansible_param] if val is not None: import pickle from base64 import b64decode module.deprecate('Param {0} is deprecated'.format(ansible_param), '2.12') filter_obj = pickle.loads(b64decode(val)) obj.add(filter_obj) changed, diff = helper.apply_state(obj, listing, module) if changed and module.params['commit']: helper.commit(module) module.exit_json(changed=changed, diff=diff, msg='done') if __name__ == '__main__': main()
[ "ansible.module_utils.basic.AnsibleModule", "base64.b64decode", "pandevice.network.VirtualRouter", "pandevice.network.BgpPolicyConditionalAdvertisement" ]
[((5318, 5437), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'helper.argument_spec', 'supports_check_mode': '(True)', 'required_one_of': 'helper.required_one_of'}), '(argument_spec=helper.argument_spec, supports_check_mode=True,\n required_one_of=helper.required_one_of)\n', (5331, 5437), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((5525, 5564), 'pandevice.network.VirtualRouter', 'VirtualRouter', (["module.params['vr_name']"], {}), "(module.params['vr_name'])\n", (5538, 5564), False, 'from pandevice.network import VirtualRouter\n'), ((6067, 6108), 'pandevice.network.BgpPolicyConditionalAdvertisement', 'BgpPolicyConditionalAdvertisement', ([], {}), '(**spec)\n', (6100, 6108), False, 'from pandevice.network import BgpPolicyConditionalAdvertisement\n'), ((6500, 6514), 'base64.b64decode', 'b64decode', (['val'], {}), '(val)\n', (6509, 6514), False, 'from base64 import b64decode\n')]
#!/usr/bin/env python ###################################################################################### ## Copyright (c) 2010-2011 The Department of Arts and Culture, ## ## The Government of the Republic of South Africa. ## ## ## ## Contributors: Meraka Institute, CSIR, South Africa. ## ## ## ## Permission is hereby granted, free of charge, to any person obtaining a copy ## ## of this software and associated documentation files (the "Software"), to deal ## ## in the Software without restriction, including without limitation the rights ## ## to use, copy, modify, merge, publish, distribute, sublicense, and#or sell ## ## copies of the Software, and to permit persons to whom the Software is ## ## furnished to do so, subject to the following conditions: ## ## The above copyright notice and this permission notice shall be included in ## ## all copies or substantial portions of the Software. ## ## ## ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ## ## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ## ## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ## ## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ## ## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ## ## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ## ## THE SOFTWARE. ## ## ## ###################################################################################### ## ## ## AUTHOR : <NAME>, <NAME> ## ## DATE : October 2010 ## ## ## ###################################################################################### ## ## ## A simple rule rewrites implementation for syllabification. ## ## ## ## ## ## ## ###################################################################################### ### PYTHON2 ### from __future__ import unicode_literals, division, print_function import codecs open = codecs.open ### PYTHON2 ### import re import speect.modules.rewrites as rewrites left_context = 0 graphemes = 1 right_context = 2 phone = 3 class Syllab_Rewrites(rewrites.Rewrites): """ Class to contain and implement the application of rewrite rules to predict pronunciations of isolated words... ruleset is a dict of lists where the each list contains all RewriteRules associated with a specific grapheme... """ WHITESPACE_CHAR = "#" def __init__(self, rules, sets=None): super(Syllab_Rewrites, self).__init__(sets) for fest_rule in rules: lc = fest_rule[left_context].strip() LC = list() for i in lc.split(' '): if i != '': LC.append(i) g = fest_rule[graphemes].strip() G = list() for i in g.split(' '): if i != '': G.append(i) rc = fest_rule[right_context].strip() RC = list() for i in rc.split(' '): if i != '': RC.append(i) p = fest_rule[phone].strip() P = list() for i in p.split(' '): if i != '': P.append(i) rule = rewrites.RewriteRule(LC, G, RC, P, 0) self.addRule(G[0], rule) def syllabify(self, word, phones): """ Predict phone sequence given word... """ # word is Speect item, not using for now phones.append("#") phones.reverse() phones.append("#") phones.reverse() #find matching rule and thus phoneme for each grapheme.. tmp = self.rewrite(phones) syl = list() syllables = list() for i in tmp: if i != '-': syl.append(i) else: syllables.append(syl) syl = list() if len(syl) != 0: syllables.append(syl) return syllables
[ "speect.modules.rewrites.RewriteRule" ]
[((4463, 4500), 'speect.modules.rewrites.RewriteRule', 'rewrites.RewriteRule', (['LC', 'G', 'RC', 'P', '(0)'], {}), '(LC, G, RC, P, 0)\n', (4483, 4500), True, 'import speect.modules.rewrites as rewrites\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup with open('README.rst') as readme_file: readme = readme_file.read() with open('HISTORY.rst') as history_file: history = history_file.read() setup( name='flake8-pytest-mark', version='1.0.0', description='A flake8 plugin that helps check the presence of a PyTest mark', long_description=readme + '\n\n' + history, author='rpc-automation', author_email='<EMAIL>', url='https://github.com/rcbops/flake8-pytest-mark', entry_points={ 'flake8.extension': [ 'M = flake8_pytest_mark:MarkChecker', ], }, packages=['flake8_pytest_mark'], include_package_data=True, install_requires=[ 'flake8>=3.5.0', ], python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', license='Apache Software License 2.0', zip_safe=False, keywords='flake8 flake8-pytest-mark', classifiers=[ 'Development Status :: 4 - Beta', 'Framework :: Flake8', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: Software Development :: Libraries :: Python Modules' ], )
[ "setuptools.setup" ]
[((227, 1365), 'setuptools.setup', 'setup', ([], {'name': '"""flake8-pytest-mark"""', 'version': '"""1.0.0"""', 'description': '"""A flake8 plugin that helps check the presence of a PyTest mark"""', 'long_description': "(readme + '\\n\\n' + history)", 'author': '"""rpc-automation"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/rcbops/flake8-pytest-mark"""', 'entry_points': "{'flake8.extension': ['M = flake8_pytest_mark:MarkChecker']}", 'packages': "['flake8_pytest_mark']", 'include_package_data': '(True)', 'install_requires': "['flake8>=3.5.0']", 'python_requires': '""">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"""', 'license': '"""Apache Software License 2.0"""', 'zip_safe': '(False)', 'keywords': '"""flake8 flake8-pytest-mark"""', 'classifiers': "['Development Status :: 4 - Beta', 'Framework :: Flake8',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English', 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries :: Python Modules']"}), "(name='flake8-pytest-mark', version='1.0.0', description=\n 'A flake8 plugin that helps check the presence of a PyTest mark',\n long_description=readme + '\\n\\n' + history, author='rpc-automation',\n author_email='<EMAIL>', url=\n 'https://github.com/rcbops/flake8-pytest-mark', entry_points={\n 'flake8.extension': ['M = flake8_pytest_mark:MarkChecker']}, packages=[\n 'flake8_pytest_mark'], include_package_data=True, install_requires=[\n 'flake8>=3.5.0'], python_requires=\n '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', license=\n 'Apache Software License 2.0', zip_safe=False, keywords=\n 'flake8 flake8-pytest-mark', classifiers=[\n 'Development Status :: 4 - Beta', 'Framework :: Flake8',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English', 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries :: Python Modules'])\n", (232, 1365), False, 'from setuptools import setup\n')]
import re import requests import os import json import zipfile import files # load config file config = None with open("bundler.json") as f: config = json.load(f) OUTPUT_DIR = "lib" GITHUB_API = "https://api.github.com/" if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) for package in config: print("\nRunning on {}".format(package["repo"])) # get release info r = requests.get(GITHUB_API + "repos/{}/releases".format(package['repo'].replace('\\', '/'))) r.raise_for_status() releases = r.json() # find latest compatible version # this assumes that GH API returns a list of releases sorted most recent last. release = None for rl in releases: if re.match(package["tagRegex"], rl["tag_name"]): release = rl break if release is None: print(f"No release compatible with {package['majorVersion']}.x.x for {package['repo']}") continue print("Found compatible release {}".format(release["tag_name"])) # find target asset asset_url = None asset_name = None for asset in release["assets"]: if re.match(package["assetNameRegex"], asset["name"]): print(f"Found asset {asset['name']}") asset_url = asset["browser_download_url"] asset_name = ".".join(asset["name"].split(".")[:-1]) break if asset_url is None: print(f"No asset matching '{package['assetNameRegex']}' for {package['repo']}") continue # download target asset print("Downloading", asset_url) local = files.download_file(asset_url) # extract files with zipfile.ZipFile(local) as zf: for file in package["files"]: out = os.path.join(OUTPUT_DIR, file["output"].replace("/", os.path.sep)) dr = os.path.sep.join(out.split(os.path.sep)[:-1]) if not os.path.exists(dr): os.makedirs(dr) src = file["source"].format(asset_name) print(f"Extracting {src} to {out}") with open(out, "wb") as out_file, zf.open(src) as src_file: si = src_file.read(4096) while si != b"": out_file.write(si) si = src_file.read(4096) os.remove(local)
[ "os.remove", "json.load", "zipfile.ZipFile", "os.makedirs", "os.path.exists", "files.download_file", "re.match" ]
[((156, 168), 'json.load', 'json.load', (['f'], {}), '(f)\n', (165, 168), False, 'import json\n'), ((236, 262), 'os.path.exists', 'os.path.exists', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (250, 262), False, 'import os\n'), ((268, 291), 'os.makedirs', 'os.makedirs', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (279, 291), False, 'import os\n'), ((1578, 1608), 'files.download_file', 'files.download_file', (['asset_url'], {}), '(asset_url)\n', (1597, 1608), False, 'import files\n'), ((2281, 2297), 'os.remove', 'os.remove', (['local'], {}), '(local)\n', (2290, 2297), False, 'import os\n'), ((716, 761), 're.match', 're.match', (["package['tagRegex']", "rl['tag_name']"], {}), "(package['tagRegex'], rl['tag_name'])\n", (724, 761), False, 'import re\n'), ((1130, 1180), 're.match', 're.match', (["package['assetNameRegex']", "asset['name']"], {}), "(package['assetNameRegex'], asset['name'])\n", (1138, 1180), False, 'import re\n'), ((1639, 1661), 'zipfile.ZipFile', 'zipfile.ZipFile', (['local'], {}), '(local)\n', (1654, 1661), False, 'import zipfile\n'), ((1889, 1907), 'os.path.exists', 'os.path.exists', (['dr'], {}), '(dr)\n', (1903, 1907), False, 'import os\n'), ((1925, 1940), 'os.makedirs', 'os.makedirs', (['dr'], {}), '(dr)\n', (1936, 1940), False, 'import os\n')]
from django.db import models class Company(models.Model): name = models.CharField(blank=True, max_length=300) ticker = models.CharField(blank=True, max_length=300) def __str__(self): return f"({self.ticker}) {self.name}"
[ "django.db.models.CharField" ]
[((71, 115), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(300)'}), '(blank=True, max_length=300)\n', (87, 115), False, 'from django.db import models\n'), ((129, 173), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(300)'}), '(blank=True, max_length=300)\n', (145, 173), False, 'from django.db import models\n')]
# Copyright (c) 2009, 2010, 2011 Google Inc. All rights reserved. # Copyright (c) 2009 Apple Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import re def _first_non_empty_line_after_index(lines, index=0): first_non_empty_line = index for line in lines[index:]: if re.match("^\s*$", line): first_non_empty_line += 1 else: break return first_non_empty_line class CommitMessage: def __init__(self, message): self.message_lines = message[_first_non_empty_line_after_index(message, 0):] def body(self, lstrip=False): lines = self.message_lines[_first_non_empty_line_after_index(self.message_lines, 1):] if lstrip: lines = [line.lstrip() for line in lines] return "\n".join(lines) + "\n" def description(self, lstrip=False, strip_url=False): line = self.message_lines[0] if lstrip: line = line.lstrip() if strip_url: line = re.sub("^(\s*)<.+> ", "\1", line) return line def message(self): return "\n".join(self.message_lines) + "\n"
[ "re.sub", "re.match" ]
[((1737, 1761), 're.match', 're.match', (['"""^\\\\s*$"""', 'line'], {}), "('^\\\\s*$', line)\n", (1745, 1761), False, 'import re\n'), ((2435, 2471), 're.sub', 're.sub', (['"""^(\\\\s*)<.+> """', '"""\x01"""', 'line'], {}), "('^(\\\\s*)<.+> ', '\\x01', line)\n", (2441, 2471), False, 'import re\n')]
#!/usr/bin/env python import json import os import sys import glob from cloudpickle import dumps, loads import numpy as np import dask.array as da import dask.dataframe as ddf import argparse from random import randint from pathlib import Path from dask_ml.wrappers import ParallelPostFit from dask_ml.ensemble import BlockwiseVotingClassifier from dask_ml.compose import ColumnTransformer from sklearnex import patch_sklearn patch_sklearn() from sklearn.ensemble import HistGradientBoostingClassifier from dask_ml.preprocessing import MinMaxScaler from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score from sklearn.linear_model import LogisticRegression labels = ['adware', 'flooder', 'ransomware', 'dropper', 'spyware', 'packed', 'crypto_miner', 'file_infector', 'installer', 'worm', 'downloader'] features = [f"feature_{x}" for x in range(2381)] prefix="/opt/ml" def log_container_info(): print('------------------- environment variables -------------------') print(os.environ) print('------------------- environment variables -------------------') print('------------------- arguments -------------------') print(sys.argv) print('------------------- arguments -------------------') print('------------------- filesystem -------------------') for filename in glob.iglob(prefix + '**/**', recursive=True): print(filename) print('------------------- filesystem -------------------') print('------------------- config -------------------') for filename in glob.iglob(prefix + '**/**', recursive=True): if filename.endswith(".json"): print(f'------------------- {filename} -------------------') with open(filename, "r") as f: print(f.read()) print(f'------------------- {filename} -------------------') print('------------------- config -------------------') def train(args, hyperparameters): print('Starting the training.') print("load k best") kbest = set() for label in labels: with open(f"{prefix}/input/data/support/scores-{label}.pkl", 'rb') as f: scores = loads(f.read()) columns = [x[0] for x in list(sorted(scores, key=lambda i: i[1], reverse=True))[:args.k]] kbest.update(columns) kbest=list(sorted(list(kbest), key=lambda x: int(x.split('_')[1]))) print(f"k best = {kbest}") print(f"k best length = {len(kbest)}") print("load train and test dataset") X_train = ddf.read_parquet(f"{prefix}/input/data/train/x/", compression="snappy", columns=kbest) X_test = ddf.read_parquet(f"{prefix}/input/data/test/x/" , compression="snappy", columns=kbest) y_train = ddf.read_parquet(f"{prefix}/input/data/train/y/", compression="snappy", columns=labels) y_test = ddf.read_parquet(f"{prefix}/input/data/test/y/" , compression="snappy", columns=labels) print("sample train") if args.sample < 1.0: X_train = X_train.sample(frac=args.sample, replace=True, random_state=args.random_state) y_train = y_train.sample(frac=args.sample, replace=True, random_state=args.random_state) print("X train to array") X_train = X_train.to_dask_array(lengths=True) print("X test to array") X_test = X_test.to_dask_array(lengths=True) print("training hgbc models") training_true = dict() test_true = dict() training_pred = dict() test_pred = dict() for label in labels: clf = train_hgbc(args, hyperparameters, label, X_train, y_train) training_true[label] = y_train[label].to_dask_array(lengths=True).compute() test_true[label] = y_test[label].to_dask_array(lengths=True).compute() training_pred[label] = clf.predict(X_train).compute() test_pred[label] = clf.predict(X_test).compute() for label in labels: y_training_true = training_true[label] y_test_true = test_true[label] y_training_pred = training_pred[label] y_test_pred = test_pred[label] print(f"----------------------------------{label}----------------------------------") print(f"{label}_training_accuracy_score={accuracy_score(y_training_true, y_training_pred):.5f};") print(f"{label}_training_balanced_accuracy_score={balanced_accuracy_score(y_training_true, y_training_pred, adjusted=True):.5f};") print(f"{label}_training_precision_score={precision_score(y_training_true, y_training_pred):.5f};") print(f"{label}_training_recall_score={recall_score(y_training_true, y_training_pred):.5f};") print(f"{label}_training_f1_score={f1_score(y_training_true, y_training_pred):.5f};") print(f"{label}_test_accuracy_score={accuracy_score(y_test_true, y_test_pred):.5f};") print(f"{label}_test_balanced_accuracy_score={balanced_accuracy_score(y_test_true, y_test_pred, adjusted=True):.5f};") print(f"{label}_test_precision_score={precision_score(y_test_true, y_test_pred):.5f};") print(f"{label}_test_recall_score={recall_score(y_test_true, y_test_pred):.5f};") print(f"{label}_test_f1_score={f1_score(y_test_true, y_test_pred):.5f};") y_training_true = da.concatenate([training_true[label] for label in labels]) y_test_true = da.concatenate([test_true[label] for label in labels]) y_training_pred = da.concatenate([training_pred[label] for label in labels]) y_test_pred = da.concatenate([test_pred[label] for label in labels]) print(f"----------------------------------General----------------------------------") print(f"training_accuracy_score={accuracy_score(y_training_true, y_training_pred):.5f};") print(f"training_balanced_accuracy_score={balanced_accuracy_score(y_training_true, y_training_pred, adjusted=True):.5f};") print(f"training_precision_score={precision_score(y_training_true, y_training_pred):.5f};") print(f"training_recall_score={recall_score(y_training_true, y_training_pred):.5f};") print(f"training_f1_score={f1_score(y_training_true, y_training_pred):.5f};") print(f"test_accuracy_score={accuracy_score(y_test_true, y_test_pred):.5f};") print(f"test_balanced_accuracy_score={balanced_accuracy_score(y_test_true, y_test_pred, adjusted=True):.5f};") print(f"test_precision_score={precision_score(y_test_true, y_test_pred):.5f};") print(f"test_recall_score={recall_score(y_test_true, y_test_pred):.5f};") print(f"test_f1_score={f1_score(y_test_true, y_test_pred):.5f};") print('Finish training') def train_hgbc(args, hyperparameters, label, X_train, y_train): print(f"training model for {label}") modelname = f"{prefix}/output/model-{label}-{args.sample}-{args.k}.pkl" if Path(modelname).is_file(): print(f"loading {modelname} from disk") with open(modelname, "rb") as f: return loads(f.read()) y_training_true = y_train[label].to_dask_array(lengths=True) clf = ParallelPostFit(estimator=HistGradientBoostingClassifier(**hyperparameters)) clf.fit(X_train, y_training_true) print("save model") with open(modelname, "wb") as f: f.write(dumps(clf)) return clf def cast(hyperparameters, name, type): if name in hyperparameters: hyperparameters[name] = type(hyperparameters[name]) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-k", type=int, default=100) parser.add_argument("-sample", type=float, default=0.1) parser.add_argument("-random-state", type=int, default=randint(0, 2**32)) with open('/opt/ml/input/config/hyperparameters.json', 'r') as f: hyperparameters = json.load(f) cast(hyperparameters, "random_state", int) cast(hyperparameters, "max_iter", int) cast(hyperparameters, "max_leaf_nodes", int) cast(hyperparameters, "verbose", int) cast(hyperparameters, "n_jobs", int) log_container_info() for sample in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: for k in [100, 200, 300]: print(f"-----------------------------k={k}-sample-{sample}-----------------------------") args, unknown = parser.parse_known_args(['-k', str(k), '-sample', str(sample)]) train(args, hyperparameters) print(f"-----------------------------k={k}-sample-{sample}-----------------------------") sys.exit(0)
[ "json.load", "sklearn.ensemble.HistGradientBoostingClassifier", "argparse.ArgumentParser", "random.randint", "cloudpickle.dumps", "sklearn.metrics.accuracy_score", "sklearn.metrics.balanced_accuracy_score", "sklearn.metrics.recall_score", "pathlib.Path", "sklearn.metrics.f1_score", "dask.array.concatenate", "sklearn.metrics.precision_score", "sklearnex.patch_sklearn", "glob.iglob", "dask.dataframe.read_parquet", "sys.exit" ]
[((432, 447), 'sklearnex.patch_sklearn', 'patch_sklearn', ([], {}), '()\n', (445, 447), False, 'from sklearnex import patch_sklearn\n'), ((1377, 1421), 'glob.iglob', 'glob.iglob', (["(prefix + '**/**')"], {'recursive': '(True)'}), "(prefix + '**/**', recursive=True)\n", (1387, 1421), False, 'import glob\n'), ((1591, 1635), 'glob.iglob', 'glob.iglob', (["(prefix + '**/**')"], {'recursive': '(True)'}), "(prefix + '**/**', recursive=True)\n", (1601, 1635), False, 'import glob\n'), ((2555, 2645), 'dask.dataframe.read_parquet', 'ddf.read_parquet', (['f"""{prefix}/input/data/train/x/"""'], {'compression': '"""snappy"""', 'columns': 'kbest'}), "(f'{prefix}/input/data/train/x/', compression='snappy',\n columns=kbest)\n", (2571, 2645), True, 'import dask.dataframe as ddf\n'), ((2656, 2745), 'dask.dataframe.read_parquet', 'ddf.read_parquet', (['f"""{prefix}/input/data/test/x/"""'], {'compression': '"""snappy"""', 'columns': 'kbest'}), "(f'{prefix}/input/data/test/x/', compression='snappy',\n columns=kbest)\n", (2672, 2745), True, 'import dask.dataframe as ddf\n'), ((2757, 2848), 'dask.dataframe.read_parquet', 'ddf.read_parquet', (['f"""{prefix}/input/data/train/y/"""'], {'compression': '"""snappy"""', 'columns': 'labels'}), "(f'{prefix}/input/data/train/y/', compression='snappy',\n columns=labels)\n", (2773, 2848), True, 'import dask.dataframe as ddf\n'), ((2859, 2949), 'dask.dataframe.read_parquet', 'ddf.read_parquet', (['f"""{prefix}/input/data/test/y/"""'], {'compression': '"""snappy"""', 'columns': 'labels'}), "(f'{prefix}/input/data/test/y/', compression='snappy',\n columns=labels)\n", (2875, 2949), True, 'import dask.dataframe as ddf\n'), ((5246, 5304), 'dask.array.concatenate', 'da.concatenate', (['[training_true[label] for label in labels]'], {}), '([training_true[label] for label in labels])\n', (5260, 5304), True, 'import dask.array as da\n'), ((5327, 5381), 'dask.array.concatenate', 'da.concatenate', (['[test_true[label] for label in labels]'], {}), '([test_true[label] for label in labels])\n', (5341, 5381), True, 'import dask.array as da\n'), ((5405, 5463), 'dask.array.concatenate', 'da.concatenate', (['[training_pred[label] for label in labels]'], {}), '([training_pred[label] for label in labels])\n', (5419, 5463), True, 'import dask.array as da\n'), ((5486, 5540), 'dask.array.concatenate', 'da.concatenate', (['[test_pred[label] for label in labels]'], {}), '([test_pred[label] for label in labels])\n', (5500, 5540), True, 'import dask.array as da\n'), ((7401, 7426), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7424, 7426), False, 'import argparse\n'), ((8440, 8451), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8448, 8451), False, 'import sys\n'), ((7714, 7726), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7723, 7726), False, 'import json\n'), ((6774, 6789), 'pathlib.Path', 'Path', (['modelname'], {}), '(modelname)\n', (6778, 6789), False, 'from pathlib import Path\n'), ((7028, 7077), 'sklearn.ensemble.HistGradientBoostingClassifier', 'HistGradientBoostingClassifier', ([], {}), '(**hyperparameters)\n', (7058, 7077), False, 'from sklearn.ensemble import HistGradientBoostingClassifier\n'), ((7199, 7209), 'cloudpickle.dumps', 'dumps', (['clf'], {}), '(clf)\n', (7204, 7209), False, 'from cloudpickle import dumps, loads\n'), ((7599, 7618), 'random.randint', 'randint', (['(0)', '(2 ** 32)'], {}), '(0, 2 ** 32)\n', (7606, 7618), False, 'from random import randint\n'), ((5669, 5717), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_training_true', 'y_training_pred'], {}), '(y_training_true, y_training_pred)\n', (5683, 5717), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((5772, 5844), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['y_training_true', 'y_training_pred'], {'adjusted': '(True)'}), '(y_training_true, y_training_pred, adjusted=True)\n', (5795, 5844), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((5891, 5940), 'sklearn.metrics.precision_score', 'precision_score', (['y_training_true', 'y_training_pred'], {}), '(y_training_true, y_training_pred)\n', (5906, 5940), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((5984, 6030), 'sklearn.metrics.recall_score', 'recall_score', (['y_training_true', 'y_training_pred'], {}), '(y_training_true, y_training_pred)\n', (5996, 6030), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((6070, 6112), 'sklearn.metrics.f1_score', 'f1_score', (['y_training_true', 'y_training_pred'], {}), '(y_training_true, y_training_pred)\n', (6078, 6112), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((6155, 6195), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test_true', 'y_test_pred'], {}), '(y_test_true, y_test_pred)\n', (6169, 6195), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((6246, 6310), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['y_test_true', 'y_test_pred'], {'adjusted': '(True)'}), '(y_test_true, y_test_pred, adjusted=True)\n', (6269, 6310), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((6353, 6394), 'sklearn.metrics.precision_score', 'precision_score', (['y_test_true', 'y_test_pred'], {}), '(y_test_true, y_test_pred)\n', (6368, 6394), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((6434, 6472), 'sklearn.metrics.recall_score', 'recall_score', (['y_test_true', 'y_test_pred'], {}), '(y_test_true, y_test_pred)\n', (6446, 6472), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((6508, 6542), 'sklearn.metrics.f1_score', 'f1_score', (['y_test_true', 'y_test_pred'], {}), '(y_test_true, y_test_pred)\n', (6516, 6542), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((4232, 4280), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_training_true', 'y_training_pred'], {}), '(y_training_true, y_training_pred)\n', (4246, 4280), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((4347, 4419), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['y_training_true', 'y_training_pred'], {'adjusted': '(True)'}), '(y_training_true, y_training_pred, adjusted=True)\n', (4370, 4419), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((4478, 4527), 'sklearn.metrics.precision_score', 'precision_score', (['y_training_true', 'y_training_pred'], {}), '(y_training_true, y_training_pred)\n', (4493, 4527), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((4583, 4629), 'sklearn.metrics.recall_score', 'recall_score', (['y_training_true', 'y_training_pred'], {}), '(y_training_true, y_training_pred)\n', (4595, 4629), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((4681, 4723), 'sklearn.metrics.f1_score', 'f1_score', (['y_training_true', 'y_training_pred'], {}), '(y_training_true, y_training_pred)\n', (4689, 4723), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((4778, 4818), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test_true', 'y_test_pred'], {}), '(y_test_true, y_test_pred)\n', (4792, 4818), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((4881, 4945), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['y_test_true', 'y_test_pred'], {'adjusted': '(True)'}), '(y_test_true, y_test_pred, adjusted=True)\n', (4904, 4945), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((5000, 5041), 'sklearn.metrics.precision_score', 'precision_score', (['y_test_true', 'y_test_pred'], {}), '(y_test_true, y_test_pred)\n', (5015, 5041), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((5093, 5131), 'sklearn.metrics.recall_score', 'recall_score', (['y_test_true', 'y_test_pred'], {}), '(y_test_true, y_test_pred)\n', (5105, 5131), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n'), ((5179, 5213), 'sklearn.metrics.f1_score', 'f1_score', (['y_test_true', 'y_test_pred'], {}), '(y_test_true, y_test_pred)\n', (5187, 5213), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n')]
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2017, 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. ''' Simply utility functions to improve QOL of QM developers and QM users ''' import logging import re import sys import traceback import warnings import logging import sys import os import pandas as pd import numpy as np from scipy.spatial import distance from typing import Tuple, TYPE_CHECKING from copy import deepcopy from qiskit_metal.draw import Vector from numpy.linalg import norm if TYPE_CHECKING: from qiskit_metal import logger __all__ = [ 'copy_update', 'dict_start_with', 'data_frame_empty_typed', 'clean_name', 'enable_warning_traceback', 'get_traceback', 'print_traceback_easy', 'log_error_easy', 'monkey_patch', 'can_write_to_path', 'can_write_to_path_with_warning', 'toggle_numbers', 'bad_fillet_idxs', 'compress_vertex_list', 'get_range_of_vertex_to_not_fillet' ] #################################################################################### # Dictionary related def copy_update(options, *args, deep_copy=True, **kwargs): ''' Utility funciton to merge two dictionaries. Args: options (object): Options deep_copy (bool): True to do a deep copy kwargs (dict): Dictionary of parameters Returns: dict: Merged dictionary ''' if deep_copy: options = deepcopy(options) options.update(*args, **kwargs) else: options = options.copy() options.update(*args, **kwargs) return options def dict_start_with(my_dict, start_with, as_=list): ''' Case sensitive https://stackoverflow.com/questions/17106819/accessing-python-dict-values-with-the-key-start-characters Args: my_dict (dict): The dictionary starts_with (str): String to check of as_ (type): A list of dict. Defaults to list. Returns: list or dict: Parts of the dictionary with keys starting with the given text .. code-block:: python my_dict = {'name': 'Klauss', 'age': 26, 'Date of birth': '15th july'} dict_start_with(my_dict, 'Date') ''' if as_ == list: # start_with in k] return [v for k, v in my_dict.items() if k.startswith(start_with)] elif as_ == dict: return {k: v for k, v in my_dict.items() if k.startswith(start_with)} # def display_options(*ops_names, options=None, find_dot_keys=True, do_display=True): # ''' # Print html display of options dictionary by default `DEFAULT_OPTIONS` # Example use: # --------------- # display_options('make_transmon_pocket_v1', 'make_transmon_connector_v1') # or # dfs, html = display_options(Metal_Transmon_Pocket.__name__, do_display=False) # ''' # # IDEA: display also ._hfss and ._gds etc. for those that have it and add to plugins # if options is None: # from .. import DEFAULT_OPTIONS # options = DEFAULT_OPTIONS # res = [] # for keyname in ops_names: # if find_dot_keys: # names = list(filter(lambda x, match=keyname: x is match or # x.startswith(match+'.'), DEFAULT_OPTIONS.keys())) # names.sort() # for name in names: # res += [pd.Series(options[name], name=name).to_frame()] # else: # res += [pd.Series(options[keyname], name=keyname).to_frame()] # from pyEPR.toolbox import display_dfs # res_html = display_dfs(*res, do_display=do_display) #why not just directly call the function DataFrame_display_side_by_side(*args) ? # return res, res_html def data_frame_empty_typed(column_types: dict): """Creates and empty DataFrame with dtypes for each column given by the dictionary. Arguments: column_types (dict): A key, dtype pairs Returns: DataFrame: An empty dataframe with the typed columns """ df = pd.DataFrame() for name, dtype in column_types.items(): df[name] = pd.Series(dtype=dtype) return df def clean_name(text: str): """Clean a string to a proper variable name in python. Arguments: text (str): Original string Returns: str: Corrected string .. code-block:: python clean_name('32v2 g #Gmw845h$W b53wi ') *Output* `'_32v2_g__Gmw845h_W_b53wi_'` See https://stackoverflow.com/questions/3303312/how-do-i-convert-a-string-to-a-valid-variable-name-in-python """ return re.sub('\W|^(?=\d)', '_', text) #################################################################################### # Tracebacks _old_warn = None def enable_warning_traceback(): """ Show ow traceback on warning. """ global _old_warn _old_warn = warnings.warn def warn(*args, **kwargs): ''' Warn user with traceback to warning. ''' tb = traceback.extract_stack() _old_warn(*args, **kwargs) print("".join(traceback.format_list(tb)[:-1])) warnings.warn = warn def get_traceback(): ''' Returns traceback string. Format each frame in the traceback as a string. Returns: str: Traceback string ''' trace_back = traceback.extract_stack() return "".join(traceback.format_list(trace_back)[:-1]) def print_traceback_easy(start=26): ''' Utility function to print traceback for debug. Will report in series the string version of the frames that we are currently in. Args: start (int): Starting position of the traceback frame. Defaults to 26. Assumes runs from Jupyter notebooks. In general set to zero. ''' print(f"\n") print('\n'.join(map(repr, traceback.extract_stack()[start:]))) print('\n') def log_error_easy(logger: logging.Logger, pre_text='', post_text='', do_print=False): """ Print log message. Arguments: logger (logging.Logger): The logger. pre_text (str): Initial text to write. Defaults to ''. post_text (str): End text to write. Defaults to ''. do_print (bool): True to do the printing, False otherwise. Defaults to False. Test use: .. code-block:: python import traceback from qiskit_metal import logger from qiskit_metal.toolbox_python.utility_functions import log_error_easy def xx(): exc_info = sys.exc_info() try: raise TypeError("Oups!") except Exception as err: try: raise TypeError("Again !?!") except: pass # exc_type, exc_value, exc_tb = sys.exc_info() # error = traceback.format_exception(exc_type, exc_value, exc_tb) # logger.error('\\n\\n'+'\\n'.join(error)+'\\n') log_error_easy(metal.logger) xx() """ exc_type, exc_value, exc_tb = sys.exc_info() error = traceback.format_exception(exc_type, exc_value, exc_tb) text = f'{pre_text}\n\n' + '\n'.join(error) + f'\n{post_text}' logger.error(text) if do_print: print(text) ##################################################################################### def monkey_patch(self, func, func_name=None): ''' Monkey patch a method into a class at runtime. Use descriptor protocol when adding method as an attribute. For a method on a class, when you do a.some_method, python actually does: a.some_method.__get__(a, type(a)) So we're just reproducing that call sequence here explicitly. See: https://stackoverflow.com/questions/38485123/monkey-patching-bound-methods-in-python Args: func (function): function func_name (str): name of the function. Defaults to None. ''' func_name = func_name or func.__name__ setattr(self, func_name, func.__get__(self, self.__class__)) # what happens if we reload the class or swap in real time? #################################################################################### # Used to detect and denote potential short segments, when fillet is used. # # Keep this method until the class QCheckLength will be fully tested. # def which_vertex_has_potential_fillet_errors(coords: list, a_fillet: float, fillet_comparison_precision: int) -> list: # """Iterate through the vertex and check using critea. # 1. If a start or end segment, is the length smaller than a_fillet. # 2. If segment in side of LineString, is the lenght smaller than,FILLET_SCALAR times a_fillet. # Note, there is a rounding error issues. So when the lenght of the segment is calculated, # it is rounded by using fillet_comparison_precision. # Args: # coords (list): List of tuples in (x,y) format. Each tuple represents a vertex on a LineSegment. # a_fillet (float): The radius to fillet a vertex. # fillet_comparison_precision (int): There are rounding issues when comparing to (fillet * scalar). # Use this when calculating length of line-segment. # Returns: # list: List of idexes. Each index corresponds to a vertex in coords, that would not fillet well. # """ # vertex_of_bad = list() # len_coords = len(coords) # if len_coords <= 1: # return vertex_of_bad # # When determining the critera to fillet, scale the fillet value by FILLET_SCALAR. # FILLET_SCALAR = 2.0 # scaled_fillet = a_fillet * FILLET_SCALAR # for index, xy in enumerate(coords): # # Skip the first vertex. # if index > 0: # xy_previous = coords[index-1] # seg_length = np.round( # distance.euclidean(xy_previous, xy), fillet_comparison_precision) # # If at first or last segment, use just the fillet value to check, otherwise, use FILLET_SCALAR. # # Need to not fillet index-1 to index line segment. # if index == 1 or index == len_coords-1: # if seg_length < a_fillet: # vertex_of_bad.extend([index-1, index]) # else: # if seg_length < scaled_fillet: # vertex_of_bad.extend([index-1, index]) # # As precaution, remove duplicates from list. # vertex_of_bad = list(set(vertex_of_bad)) # return vertex_of_bad def toggle_numbers(numbers: list, totlength: int) -> list: """ Given a list of integers called 'numbers', return the toggle of them from zero to totlength - 1. Args: numbers (list): Integers in the original list, in sorted order. totlength (int): Number of elements in complete list. Ex: [0, 1, 2, 3, ..., n - 1] has totlength n. Returns: list: A sorted list of all integers between 0 and totlength - 1, inclusive, not found in numbers. """ complement = [] if totlength: if not numbers: return [i for i in range(totlength)] j = 0 for i in range(totlength): if i < numbers[j]: complement.append(i) else: j += 1 if j >= len(numbers): return complement + [k for k in range(i + 1, totlength)] return complement def bad_fillet_idxs(coords: list, fradius: float, precision: int = 9, isclosed: bool = False) -> list: """ Get list of vertex indices in a linestring (isclosed = False) or polygon (isclosed = True) that cannot be filleted based on proximity to neighbors. By default, this list excludes the first and last vertices if the shape is a linestring. Args: coords (list): Ordered list of tuples of vertex coordinates. fradius (float): User-specified fillet radius from QGeometry table. precision (int, optional): Digits of precision used for round(). Defaults to 9. isclosed (bool, optional): Boolean denoting whether the shape is a linestring or polygon. Defaults to False. Returns: list: List of indices of vertices too close to their neighbors to be filleted. """ length = len(coords) get_dist = Vector.get_distance if isclosed: return [ i for i in range(length) if min(get_dist(coords[i - 1], coords[i], precision), get_dist(coords[i], coords[(i + 1) % length], precision)) < 2 * fradius ] if length < 3: return [] if length == 3: return [] if min(get_dist(coords[0], coords[1], precision), get_dist(coords[1], coords[2], precision)) >= fradius else [1] if (get_dist(coords[0], coords[1], precision) < fradius) or (get_dist( coords[1], coords[2], precision) < 2 * fradius): badlist = [1] else: badlist = [] for i in range(2, length - 2): if min(get_dist(coords[i - 1], coords[i], precision), get_dist(coords[i], coords[i + 1], precision)) < 2 * fradius: badlist.append(i) if (get_dist(coords[length - 3], coords[length - 2], precision) < 2 * fradius) or (get_dist(coords[length - 2], coords[length - 1], precision) < fradius): badlist.append(length - 2) return badlist def get_range_of_vertex_to_not_fillet(coords: list, fradius: float, precision: int = 9, add_endpoints: bool = True) -> list: """ Provide a list of tuples for a list of integers that correspond to coords. Each tuple corresponds to a range of indexes within coords. A range denotes vertexes that are too short to be fillet'd. If the range is just one point, meaning, not a segment, the tuple will contain the same index for start and end. Args: coords (list): Ordered list of tuples of vertex coordinates. fradius (float): User-specified fillet radius from QGeometry table. precision (int, optional): Digits of precision used for round(). Defaults to 9. add_endpoints (bool): Default is True. If the second to endpoint is in list, add the endpoint to list. Used for GDS, not add_qgeometry. Returns: list: A compressed list of tuples. So, it combines adjacent vertexes into a longer one. """ length = len(coords) # isclosed=False is for LineString unique_vertex = bad_fillet_idxs(coords, fradius, precision, isclosed=False) if add_endpoints: # The endpoints of LineString are never fillet'd. If the second vertex or second to last vertex # should not be fillet's, then don't fillet the endpoints. This is used for warning for add_qgeometry. # Also used in QGDSRenderer when breaking the LineString. if (1 in unique_vertex) and (0 not in unique_vertex): unique_vertex.append(0) # second to last vertex in unique_vertex if (length - 2 in unique_vertex) and (length - 1 not in unique_vertex): unique_vertex.append(length - 1) compressed_vertex = compress_vertex_list(unique_vertex) return compressed_vertex def compress_vertex_list(individual_vertex: list) -> list: """ Given a list of vertices that should not be fillet'd, search for a range and make them one compressed list. If the vertex is a point and not a line segment, the returned tuple's start and end are the same index. Args: individual_vertex (list): List of UNIQUE ints. Each int refers to an index of a LineString. Returns: list: A compressed list of tuples. So, it combines adjacent vertices into a longer one. """ reduced_idx = list() sorted_vertex = sorted(individual_vertex) len_vertex = len(sorted_vertex) if len_vertex > 0: # initialzie to unrealistic number. start = -1 end = -1 size_of_range = 0 for index, item in enumerate(sorted_vertex): if index == 0: start = item end = item else: if item == end + 1: end = item size_of_range += 1 else: if size_of_range == 0: # Only one vertex in range. reduced_idx.append((start, end)) start = item end = item else: # Two or more vertexes in range. reduced_idx.append((start, end)) size_of_range = 0 start = item end = item if index == len_vertex - 1: if size_of_range == 0: reduced_idx.append((start, end)) else: reduced_idx.append((start, end)) return reduced_idx else: return reduced_idx ####################################################################################### # File checking def can_write_to_path_with_warning(file: str) -> int: """Check if can write file. Args: file (str): Has the path and/or just the file name. Returns: int: 1 if access is allowed. Else returns 0, if access not given. """ a_logger = logger # If need to use lib pathlib. directory_name = os.path.dirname(os.path.abspath(file)) if os.access(directory_name, os.W_OK): return 1 else: a_logger.warning(f'Not able to write to directory.' f'File:"{file}" not written.' f' Checked directory:"{directory_name}".') return 0 def can_write_to_path(file: str) -> Tuple[int, str]: """ Check to see if path exists and file can be written. Args: file (str): Has the path and/or just the file name. Returns: Tuple[int, str]: int: 1 if access is allowed. Else returns 0, if access not given. str: Full path and file which was searched for. """ # If need to use lib pathlib. directory_name = os.path.dirname(os.path.abspath(file)) if os.access(directory_name, os.W_OK): return 1, directory_name else: return 0, directory_name
[ "pandas.DataFrame", "copy.deepcopy", "traceback.format_exception", "os.path.abspath", "qiskit_metal.logger.error", "traceback.extract_stack", "pandas.Series", "sys.exc_info", "traceback.format_list", "re.sub", "os.access" ]
[((4319, 4333), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4331, 4333), True, 'import pandas as pd\n'), ((4880, 4913), 're.sub', 're.sub', (['"""\\\\W|^(?=\\\\d)"""', '"""_"""', 'text'], {}), "('\\\\W|^(?=\\\\d)', '_', text)\n", (4886, 4913), False, 'import re\n'), ((5600, 5625), 'traceback.extract_stack', 'traceback.extract_stack', ([], {}), '()\n', (5623, 5625), False, 'import traceback\n'), ((7391, 7405), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7403, 7405), False, 'import sys\n'), ((7418, 7473), 'traceback.format_exception', 'traceback.format_exception', (['exc_type', 'exc_value', 'exc_tb'], {}), '(exc_type, exc_value, exc_tb)\n', (7444, 7473), False, 'import traceback\n'), ((7546, 7564), 'qiskit_metal.logger.error', 'logger.error', (['text'], {}), '(text)\n', (7558, 7564), False, 'from qiskit_metal import logger\n'), ((18031, 18065), 'os.access', 'os.access', (['directory_name', 'os.W_OK'], {}), '(directory_name, os.W_OK)\n', (18040, 18065), False, 'import os\n'), ((18759, 18793), 'os.access', 'os.access', (['directory_name', 'os.W_OK'], {}), '(directory_name, os.W_OK)\n', (18768, 18793), False, 'import os\n'), ((1775, 1792), 'copy.deepcopy', 'deepcopy', (['options'], {}), '(options)\n', (1783, 1792), False, 'from copy import deepcopy\n'), ((4398, 4420), 'pandas.Series', 'pd.Series', ([], {'dtype': 'dtype'}), '(dtype=dtype)\n', (4407, 4420), True, 'import pandas as pd\n'), ((5280, 5305), 'traceback.extract_stack', 'traceback.extract_stack', ([], {}), '()\n', (5303, 5305), False, 'import traceback\n'), ((18001, 18022), 'os.path.abspath', 'os.path.abspath', (['file'], {}), '(file)\n', (18016, 18022), False, 'import os\n'), ((18729, 18750), 'os.path.abspath', 'os.path.abspath', (['file'], {}), '(file)\n', (18744, 18750), False, 'import os\n'), ((5645, 5678), 'traceback.format_list', 'traceback.format_list', (['trace_back'], {}), '(trace_back)\n', (5666, 5678), False, 'import traceback\n'), ((5363, 5388), 'traceback.format_list', 'traceback.format_list', (['tb'], {}), '(tb)\n', (5384, 5388), False, 'import traceback\n'), ((6115, 6140), 'traceback.extract_stack', 'traceback.extract_stack', ([], {}), '()\n', (6138, 6140), False, 'import traceback\n')]
######### # Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. from manager_rest.rest import rest_utils from manager_rest.rest.rest_utils import get_json_and_verify_params from aria.orchestrator.workflows.core import engine from aria.orchestrator import execution_preparer from aria.orchestrator.workflows.executor import process from ..... import manager_exceptions from .... import rest_decorators from .. import base class ARIAExecution(base.BaseARIAEndpoints): @rest_decorators.exceptions_handled def get(self, execution_id): """ Get Execution by id """ return self.model.execution.get(execution_id).to_dict() @rest_decorators.exceptions_handled def post(self, execution_id, **kwargs): """ Apply execution action (cancel, force-cancel) by id """ request_dict = get_json_and_verify_params({'action'}) action = request_dict['action'] valid_actions = ['cancel', 'force-cancel'] if action not in valid_actions: raise manager_exceptions.BadParametersError( 'Invalid action: {0}, Valid action values are: {1}'.format( action, valid_actions)) if action in ('cancel', 'force-cancel'): service = self.model.execution.get(execution_id) executor = process.ProcessExecutor(self.plugin_manager) compiler = execution_preparer.ExecutionPreparer( self.model, self.resource, self.plugin_manager, service, request_dict['workflow_name'] ) workflow_ctx = compiler.prepare(execution_id=execution_id) engine_ = engine.Engine(executor) engine_.cancel_execution(workflow_ctx) class ARIAExecutions(base.BaseARIAEndpoints): @rest_decorators.create_filters() def get(self, _include=None, filters=None, pagination=None, sort=None, **kwargs): """ Get an Execution list """ return self._respond_list( self.model.execution.list( include=_include, filters=filters, pagination=pagination, sort=sort, **kwargs ) ) def post(self, **kwargs): """ Start an execution """ request_dict = rest_utils.get_json_and_verify_params( dict( service_id={'type': int}, workflow_name={'type': basestring}, ) ) service = self.model.service.get(request_dict['service_id']) executor = process.ProcessExecutor(plugin_manager=self.plugin_manager) compiler = execution_preparer.ExecutionPreparer( self.model, self.resource, self.plugin_manager, service, request_dict['workflow_name'] ) workflow_ctx = compiler.prepare(executor=executor) engine_ = engine.Engine(executor) engine_.execute(workflow_ctx) return workflow_ctx.execution.to_dict( workflow_ctx.execution.fields() - {'created_at', 'started_at', 'ended_at'}), \ 201
[ "aria.orchestrator.workflows.core.engine.Engine", "manager_rest.rest.rest_utils.get_json_and_verify_params", "aria.orchestrator.workflows.executor.process.ProcessExecutor", "aria.orchestrator.execution_preparer.ExecutionPreparer" ]
[((1425, 1463), 'manager_rest.rest.rest_utils.get_json_and_verify_params', 'get_json_and_verify_params', (["{'action'}"], {}), "({'action'})\n", (1451, 1463), False, 'from manager_rest.rest.rest_utils import get_json_and_verify_params\n'), ((3232, 3291), 'aria.orchestrator.workflows.executor.process.ProcessExecutor', 'process.ProcessExecutor', ([], {'plugin_manager': 'self.plugin_manager'}), '(plugin_manager=self.plugin_manager)\n', (3255, 3291), False, 'from aria.orchestrator.workflows.executor import process\n'), ((3312, 3441), 'aria.orchestrator.execution_preparer.ExecutionPreparer', 'execution_preparer.ExecutionPreparer', (['self.model', 'self.resource', 'self.plugin_manager', 'service', "request_dict['workflow_name']"], {}), "(self.model, self.resource, self.\n plugin_manager, service, request_dict['workflow_name'])\n", (3348, 3441), False, 'from aria.orchestrator import execution_preparer\n'), ((3584, 3607), 'aria.orchestrator.workflows.core.engine.Engine', 'engine.Engine', (['executor'], {}), '(executor)\n', (3597, 3607), False, 'from aria.orchestrator.workflows.core import engine\n'), ((1908, 1952), 'aria.orchestrator.workflows.executor.process.ProcessExecutor', 'process.ProcessExecutor', (['self.plugin_manager'], {}), '(self.plugin_manager)\n', (1931, 1952), False, 'from aria.orchestrator.workflows.executor import process\n'), ((1977, 2106), 'aria.orchestrator.execution_preparer.ExecutionPreparer', 'execution_preparer.ExecutionPreparer', (['self.model', 'self.resource', 'self.plugin_manager', 'service', "request_dict['workflow_name']"], {}), "(self.model, self.resource, self.\n plugin_manager, service, request_dict['workflow_name'])\n", (2013, 2106), False, 'from aria.orchestrator import execution_preparer\n'), ((2289, 2312), 'aria.orchestrator.workflows.core.engine.Engine', 'engine.Engine', (['executor'], {}), '(executor)\n', (2302, 2312), False, 'from aria.orchestrator.workflows.core import engine\n')]
import numpy as np import tensorflow as tf import math import os import glob import scipy.io #======================================================================================================================= #Helper functions to load pretrained weights #======================================================================================================================= def get_weight(weight_name, weight_dict): if weight_dict is None: print("Can't find weight") return None else: return weight_dict.get(weight_name) # returns None if name is not found in dictionary def load_weights(weight_dir): weight_path_all = glob.glob(os.path.join(weight_dir, "*.txt.npz")) pretrained_weight_dict = {} print(len(weight_path_all)) for path in weight_path_all: with np.load(path) as data: layer_name = os.path.basename(path).split('.')[0] print(layer_name) pretrained_weight_dict[layer_name] = data['arr_0'] print(data['arr_0'].shape) return pretrained_weight_dict def load_z_mapping_function(z, output_channel, weight, bias, scope, act=None): with tf.variable_scope(scope) as sc: w = tf.get_variable('w', initializer=weight, trainable=False) b = tf.get_variable('biases', initializer=bias, trainable=False) if act == "lrelu": print ("LRELU") out = lrelu(tf.matmul(z, w) + b) else: out = act(tf.matmul(z, w) + b) return out[:, :output_channel], out[:, output_channel:] def load_weights(weight_dir): weight_path_all = glob.glob(os.path.join(weight_dir, "*.txt.npz")) pretrained_weight_dict = {} print(len(weight_path_all)) for path in weight_path_all: with np.load(path) as data: layer_name = os.path.basename(path).split('.')[0] print(layer_name) pretrained_weight_dict[layer_name] = data['arr_0'] return pretrained_weight_dict #======================================================================================================================= def save_txt_file(pred, name, SAVE_DIR): with open(os.path.join(SAVE_DIR, "{0}.txt".format(name)), 'w') as fp: for i in pred: # print(tuple(point.tolist())) fp.write("{0}\n".format(i)) def transform_tensor_to_image (tensor): t = tf.transpose(tensor, [0 , 2, 1, 3]) return t[:,::-1, :, :] def transform_voxel_to_match_image(tensor): tensor = tf.transpose(tensor, [0, 2, 1, 3, 4]) tensor = tensor[:, ::-1, :, :, :] return tensor def transform_image_to_match_voxel(tensor): tensor = tf.transpose(tensor, [0, 2, 1, 3]) tensor = tensor[:, ::-1, :, :] return tensor def np_transform_tensor_to_image (tensor): t = np.transpose(tensor, [0, 2, 1, 3]) return t
[ "numpy.load", "os.path.basename", "numpy.transpose", "tensorflow.variable_scope", "tensorflow.transpose", "tensorflow.matmul", "os.path.join", "tensorflow.get_variable" ]
[((2376, 2410), 'tensorflow.transpose', 'tf.transpose', (['tensor', '[0, 2, 1, 3]'], {}), '(tensor, [0, 2, 1, 3])\n', (2388, 2410), True, 'import tensorflow as tf\n'), ((2497, 2534), 'tensorflow.transpose', 'tf.transpose', (['tensor', '[0, 2, 1, 3, 4]'], {}), '(tensor, [0, 2, 1, 3, 4])\n', (2509, 2534), True, 'import tensorflow as tf\n'), ((2649, 2683), 'tensorflow.transpose', 'tf.transpose', (['tensor', '[0, 2, 1, 3]'], {}), '(tensor, [0, 2, 1, 3])\n', (2661, 2683), True, 'import tensorflow as tf\n'), ((2789, 2823), 'numpy.transpose', 'np.transpose', (['tensor', '[0, 2, 1, 3]'], {}), '(tensor, [0, 2, 1, 3])\n', (2801, 2823), True, 'import numpy as np\n'), ((677, 714), 'os.path.join', 'os.path.join', (['weight_dir', '"""*.txt.npz"""'], {}), "(weight_dir, '*.txt.npz')\n", (689, 714), False, 'import os\n'), ((1166, 1190), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (1183, 1190), True, 'import tensorflow as tf\n'), ((1210, 1267), 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""'], {'initializer': 'weight', 'trainable': '(False)'}), "('w', initializer=weight, trainable=False)\n", (1225, 1267), True, 'import tensorflow as tf\n'), ((1280, 1340), 'tensorflow.get_variable', 'tf.get_variable', (['"""biases"""'], {'initializer': 'bias', 'trainable': '(False)'}), "('biases', initializer=bias, trainable=False)\n", (1295, 1340), True, 'import tensorflow as tf\n'), ((1623, 1660), 'os.path.join', 'os.path.join', (['weight_dir', '"""*.txt.npz"""'], {}), "(weight_dir, '*.txt.npz')\n", (1635, 1660), False, 'import os\n'), ((826, 839), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (833, 839), True, 'import numpy as np\n'), ((1772, 1785), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1779, 1785), True, 'import numpy as np\n'), ((1420, 1435), 'tensorflow.matmul', 'tf.matmul', (['z', 'w'], {}), '(z, w)\n', (1429, 1435), True, 'import tensorflow as tf\n'), ((1475, 1490), 'tensorflow.matmul', 'tf.matmul', (['z', 'w'], {}), '(z, w)\n', (1484, 1490), True, 'import tensorflow as tf\n'), ((874, 896), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (890, 896), False, 'import os\n'), ((1820, 1842), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1836, 1842), False, 'import os\n')]
import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np import matplotlib.pyplot as plt tf.compat.v1.disable_eager_execution() def plotImages(images_arr): fig, axes = plt.subplots(1, 5, figsize=(15,15)) axes = axes.flatten() for img, ax in zip( images_arr, axes): ax.imshow(img) ax.axis('off') plt.tight_layout() plt.show() _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True) PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered') train_dir = os.path.join(PATH, 'train') validation_dir = os.path.join(PATH, 'validation') train_cats_dir = os.path.join(train_dir, 'cats') # directory with our training cat pictures train_dogs_dir = os.path.join(train_dir, 'dogs') # directory with our training dog pictures validation_cats_dir = os.path.join(validation_dir, 'cats') # directory with our validation cat pictures validation_dogs_dir = os.path.join(validation_dir, 'dogs') # directory with our validation dog pictures #Total amount of trainng and validation images num_cats_tr = len(os.listdir(train_cats_dir)) num_dogs_tr = len(os.listdir(train_dogs_dir)) num_cats_val = len(os.listdir(validation_cats_dir)) num_dogs_val = len(os.listdir(validation_dogs_dir)) total_train = num_cats_tr + num_dogs_tr total_val = num_cats_val + num_dogs_val #Setting global variables so we dont have to keep changing them in functions batch_size = 128 epochs = 15 IMG_HEIGHT = 150 IMG_WIDTH = 150 #Rescaling tensor values to values between 0 and 1 train_image_generator = ImageDataGenerator(rescale=1./255) validation_image_generator = ImageDataGenerator(rescale=1./255) train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='binary') val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size, directory=validation_dir, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='binary') sample_training_images, _ = next(train_data_gen) #plotImages(sample_training_images[:5]) model = Sequential([ Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)), MaxPooling2D(), Conv2D(32, 3, padding='same', activation='relu'), MaxPooling2D(), Conv2D(64, 3, padding='same', activation='relu'), MaxPooling2D(), Flatten(), Dense(512, activation='relu'), Dense(1) ]) model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy']) model.summary() history = model.fit( train_data_gen, steps_per_epoch=total_train // batch_size, epochs=epochs, validation_data=val_data_gen, validation_steps=total_val // batch_size, verbose=2 ) model.save('netron_model_CNN.h5') acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss=history.history['loss'] val_loss=history.history['val_loss'] epochs_range = range(epochs) plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy',color='tab:orange') plt.plot(epochs_range, val_acc, label='Validation Accuracy',color='m') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss',color='tab:orange') plt.plot(epochs_range, val_loss, label='Validation Loss',color='m') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show()
[ "matplotlib.pyplot.title", "tensorflow.keras.preprocessing.image.ImageDataGenerator", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.Dense", "tensorflow.compat.v1.disable_eager_execution", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "os.path.join", "tensorflow.keras.layers.Flatten", "os.path.dirname", "tensorflow.keras.utils.get_file", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "matplotlib.pyplot.legend", "os.listdir", "matplotlib.pyplot.subplot", "tensorflow.keras.layers.Conv2D", "matplotlib.pyplot.plot", "tensorflow.keras.losses.BinaryCrossentropy" ]
[((335, 373), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (371, 373), True, 'import tensorflow as tf\n'), ((721, 792), 'tensorflow.keras.utils.get_file', 'tf.keras.utils.get_file', (['"""cats_and_dogs.zip"""'], {'origin': '_URL', 'extract': '(True)'}), "('cats_and_dogs.zip', origin=_URL, extract=True)\n", (744, 792), True, 'import tensorflow as tf\n'), ((887, 914), 'os.path.join', 'os.path.join', (['PATH', '"""train"""'], {}), "(PATH, 'train')\n", (899, 914), False, 'import os\n'), ((933, 965), 'os.path.join', 'os.path.join', (['PATH', '"""validation"""'], {}), "(PATH, 'validation')\n", (945, 965), False, 'import os\n'), ((986, 1017), 'os.path.join', 'os.path.join', (['train_dir', '"""cats"""'], {}), "(train_dir, 'cats')\n", (998, 1017), False, 'import os\n'), ((1080, 1111), 'os.path.join', 'os.path.join', (['train_dir', '"""dogs"""'], {}), "(train_dir, 'dogs')\n", (1092, 1111), False, 'import os\n'), ((1179, 1215), 'os.path.join', 'os.path.join', (['validation_dir', '"""cats"""'], {}), "(validation_dir, 'cats')\n", (1191, 1215), False, 'import os\n'), ((1285, 1321), 'os.path.join', 'os.path.join', (['validation_dir', '"""dogs"""'], {}), "(validation_dir, 'dogs')\n", (1297, 1321), False, 'import os\n'), ((1929, 1966), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (1947, 1966), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((1994, 2031), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (2012, 2031), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((3867, 3893), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (3877, 3893), True, 'import matplotlib.pyplot as plt\n'), ((3895, 3915), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (3906, 3915), True, 'import matplotlib.pyplot as plt\n'), ((3917, 3991), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'acc'], {'label': '"""Training Accuracy"""', 'color': '"""tab:orange"""'}), "(epochs_range, acc, label='Training Accuracy', color='tab:orange')\n", (3925, 3991), True, 'import matplotlib.pyplot as plt\n'), ((3992, 4063), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_acc'], {'label': '"""Validation Accuracy"""', 'color': '"""m"""'}), "(epochs_range, val_acc, label='Validation Accuracy', color='m')\n", (4000, 4063), True, 'import matplotlib.pyplot as plt\n'), ((4064, 4093), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (4074, 4093), True, 'import matplotlib.pyplot as plt\n'), ((4095, 4140), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Accuracy"""'], {}), "('Training and Validation Accuracy')\n", (4104, 4140), True, 'import matplotlib.pyplot as plt\n'), ((4144, 4164), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (4155, 4164), True, 'import matplotlib.pyplot as plt\n'), ((4166, 4237), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'loss'], {'label': '"""Training Loss"""', 'color': '"""tab:orange"""'}), "(epochs_range, loss, label='Training Loss', color='tab:orange')\n", (4174, 4237), True, 'import matplotlib.pyplot as plt\n'), ((4238, 4306), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_loss'], {'label': '"""Validation Loss"""', 'color': '"""m"""'}), "(epochs_range, val_loss, label='Validation Loss', color='m')\n", (4246, 4306), True, 'import matplotlib.pyplot as plt\n'), ((4307, 4336), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (4317, 4336), True, 'import matplotlib.pyplot as plt\n'), ((4338, 4379), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Loss"""'], {}), "('Training and Validation Loss')\n", (4347, 4379), True, 'import matplotlib.pyplot as plt\n'), ((4381, 4391), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4389, 4391), True, 'import matplotlib.pyplot as plt\n'), ((422, 458), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(5)'], {'figsize': '(15, 15)'}), '(1, 5, figsize=(15, 15))\n', (434, 458), True, 'import matplotlib.pyplot as plt\n'), ((582, 600), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (598, 600), True, 'import matplotlib.pyplot as plt\n'), ((606, 616), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (614, 616), True, 'import matplotlib.pyplot as plt\n'), ((816, 844), 'os.path.dirname', 'os.path.dirname', (['path_to_zip'], {}), '(path_to_zip)\n', (831, 844), False, 'import os\n'), ((1437, 1463), 'os.listdir', 'os.listdir', (['train_cats_dir'], {}), '(train_cats_dir)\n', (1447, 1463), False, 'import os\n'), ((1484, 1510), 'os.listdir', 'os.listdir', (['train_dogs_dir'], {}), '(train_dogs_dir)\n', (1494, 1510), False, 'import os\n'), ((1534, 1565), 'os.listdir', 'os.listdir', (['validation_cats_dir'], {}), '(validation_cats_dir)\n', (1544, 1565), False, 'import os\n'), ((1587, 1618), 'os.listdir', 'os.listdir', (['validation_dogs_dir'], {}), '(validation_dogs_dir)\n', (1597, 1618), False, 'import os\n'), ((2934, 3026), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'input_shape': '(IMG_HEIGHT, IMG_WIDTH, 3)'}), "(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT,\n IMG_WIDTH, 3))\n", (2940, 3026), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3029, 3043), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (3041, 3043), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3050, 3098), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(32, 3, padding='same', activation='relu')\n", (3056, 3098), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3105, 3119), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (3117, 3119), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3126, 3174), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, 3, padding='same', activation='relu')\n", (3132, 3174), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3181, 3195), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (3193, 3195), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3202, 3211), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3209, 3211), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3218, 3247), 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (3223, 3247), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3254, 3262), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (3259, 3262), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3322, 3374), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (3356, 3374), True, 'import tensorflow as tf\n')]
# Copyright 2019 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import torch from torch import nn from torch.nn import functional as F class AE(nn.Module): def __init__(self, zsize, layer_count=3, channels=1): super(AE, self).__init__() d = 128 self.d = d self.zsize = zsize self.layer_count = layer_count mul = 1 inputs = channels for i in range(self.layer_count): setattr(self, "conv%d" % (i + 1), nn.Conv2d(inputs, d * mul, 4, 2, 1)) setattr(self, "conv%d_bn" % (i + 1), nn.BatchNorm2d(d * mul)) inputs = d * mul mul *= 2 self.d_max = inputs self.fc1 = nn.Linear(inputs * 4 * 4, zsize) #self.fc2 = nn.Linear(inputs * 4 * 4, zsize) self.d1 = nn.Linear(zsize, inputs * 4 * 4) mul = inputs // d // 2 for i in range(1, self.layer_count): setattr(self, "deconv%d" % (i + 1), nn.ConvTranspose2d(inputs, d * mul, 4, 2, 1)) setattr(self, "deconv%d_bn" % (i + 1), nn.BatchNorm2d(d * mul)) inputs = d * mul mul //= 2 setattr(self, "deconv%d" % (self.layer_count + 1), nn.ConvTranspose2d(inputs, channels, 4, 2, 1)) def encode(self, x): for i in range(self.layer_count): x = F.relu(getattr(self, "conv%d_bn" % (i + 1))(getattr(self, "conv%d" % (i + 1))(x))) x = x.view(x.shape[0], self.d_max * 4 * 4) enc = self.fc1(x) return enc # def reparameterize(self, mu, logvar): # if self.training: # std = torch.exp(0.5 * logvar) # eps = torch.randn_like(std) # return eps.mul(std).add_(mu) # else: # return mu def decode(self, x): x = x.view(x.shape[0], self.zsize) x = self.d1(x) x = x.view(x.shape[0], self.d_max, 4, 4) #x = self.deconv1_bn(x) x = F.leaky_relu(x, 0.2) for i in range(1, self.layer_count): x = F.leaky_relu(getattr(self, "deconv%d_bn" % (i + 1))(getattr(self, "deconv%d" % (i + 1))(x)), 0.2) x = torch.sigmoid(getattr(self, "deconv%d" % (self.layer_count + 1))(x)) return x def forward(self, x): enc = self.encode(x) # mu = mu.squeeze() # logvar = logvar.squeeze() #z = self.reparameterize(mu, logvar) return self.decode(enc.view(-1, self.zsize, 1, 1)) def weight_init(self, mean, std): for m in self._modules: normal_init(self._modules[m], mean, std) def normal_init(m, mean, std): if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d): m.weight.data.normal_(mean, std) m.bias.data.zero_()
[ "torch.nn.ConvTranspose2d", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "torch.nn.functional.leaky_relu", "torch.nn.Linear" ]
[((1283, 1315), 'torch.nn.Linear', 'nn.Linear', (['(inputs * 4 * 4)', 'zsize'], {}), '(inputs * 4 * 4, zsize)\n', (1292, 1315), False, 'from torch import nn\n'), ((1388, 1420), 'torch.nn.Linear', 'nn.Linear', (['zsize', '(inputs * 4 * 4)'], {}), '(zsize, inputs * 4 * 4)\n', (1397, 1420), False, 'from torch import nn\n'), ((2519, 2539), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', '(0.2)'], {}), '(x, 0.2)\n', (2531, 2539), True, 'from torch.nn import functional as F\n'), ((1780, 1825), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['inputs', 'channels', '(4)', '(2)', '(1)'], {}), '(inputs, channels, 4, 2, 1)\n', (1798, 1825), False, 'from torch import nn\n'), ((1073, 1108), 'torch.nn.Conv2d', 'nn.Conv2d', (['inputs', '(d * mul)', '(4)', '(2)', '(1)'], {}), '(inputs, d * mul, 4, 2, 1)\n', (1082, 1108), False, 'from torch import nn\n'), ((1159, 1182), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(d * mul)'], {}), '(d * mul)\n', (1173, 1182), False, 'from torch import nn\n'), ((1547, 1591), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['inputs', '(d * mul)', '(4)', '(2)', '(1)'], {}), '(inputs, d * mul, 4, 2, 1)\n', (1565, 1591), False, 'from torch import nn\n'), ((1644, 1667), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(d * mul)'], {}), '(d * mul)\n', (1658, 1667), False, 'from torch import nn\n')]
import copy import pytest import numpy as np from cotk.dataloader import LanguageGeneration, MSCOCO from cotk.metric import MetricBase from cotk.wordvector.wordvector import WordVector from cotk.wordvector.gloves import Glove import logging def setup_module(): import random random.seed(0) import numpy as np np.random.seed(0) class TestWordVector(): def base_test_init(self, dl): assert isinstance(dl, WordVector) with pytest.raises(Exception): WordVector.load(None, None, None) WordVector.get_all_subclasses() assert WordVector.load_class('Glove') == Glove assert WordVector.load_class('not_subclass') == None def base_test_load(self, dl): vocab_list = ['the', 'of'] n_dims = 300 wordvec = dl.load(n_dims, vocab_list) assert isinstance(wordvec, np.ndarray) assert wordvec.shape == (len(vocab_list), n_dims) print(wordvec[1]) assert wordvec[1][0] == -0.076947 vocab_list = ['the', 'word_not_exist'] n_dims = 300 wordvec = dl.load(n_dims, vocab_list) assert isinstance(wordvec, np.ndarray) assert wordvec.shape == (len(vocab_list), n_dims) assert wordvec[0][0] == 0.04656 @pytest.fixture def load_glove(): def _load_glove(): return Glove("./tests/wordvector/dummy_glove") return _load_glove class TestGlove(TestWordVector): def test_init(self, load_glove): super().base_test_init(load_glove()) def test_load(self, load_glove): super().base_test_load(load_glove())
[ "numpy.random.seed", "cotk.wordvector.wordvector.WordVector.load_class", "pytest.raises", "cotk.wordvector.wordvector.WordVector.get_all_subclasses", "random.seed", "cotk.wordvector.gloves.Glove", "cotk.wordvector.wordvector.WordVector.load" ]
[((279, 293), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (290, 293), False, 'import random\n'), ((315, 332), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (329, 332), True, 'import numpy as np\n'), ((497, 528), 'cotk.wordvector.wordvector.WordVector.get_all_subclasses', 'WordVector.get_all_subclasses', ([], {}), '()\n', (526, 528), False, 'from cotk.wordvector.wordvector import WordVector\n'), ((1187, 1226), 'cotk.wordvector.gloves.Glove', 'Glove', (['"""./tests/wordvector/dummy_glove"""'], {}), "('./tests/wordvector/dummy_glove')\n", (1192, 1226), False, 'from cotk.wordvector.gloves import Glove\n'), ((432, 456), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (445, 456), False, 'import pytest\n'), ((461, 494), 'cotk.wordvector.wordvector.WordVector.load', 'WordVector.load', (['None', 'None', 'None'], {}), '(None, None, None)\n', (476, 494), False, 'from cotk.wordvector.wordvector import WordVector\n'), ((538, 568), 'cotk.wordvector.wordvector.WordVector.load_class', 'WordVector.load_class', (['"""Glove"""'], {}), "('Glove')\n", (559, 568), False, 'from cotk.wordvector.wordvector import WordVector\n'), ((587, 624), 'cotk.wordvector.wordvector.WordVector.load_class', 'WordVector.load_class', (['"""not_subclass"""'], {}), "('not_subclass')\n", (608, 624), False, 'from cotk.wordvector.wordvector import WordVector\n')]
#!/usr/bin/env python import inspect import time import kottos import kottos.modbus from kottos.modbus.client import Client from kottos.modbus.registers import MNS_REGISTER_TABLE c = Client("192.168.1.90", 502, MNS_REGISTER_TABLE) i = 0 while i < 10: results = c.scan() for (k, v) in results.items(): print('{}: {}'.format(k, v)) time.sleep(1) i = i + 1 print('Terminating')
[ "kottos.modbus.client.Client", "time.sleep" ]
[((184, 231), 'kottos.modbus.client.Client', 'Client', (['"""192.168.1.90"""', '(502)', 'MNS_REGISTER_TABLE'], {}), "('192.168.1.90', 502, MNS_REGISTER_TABLE)\n", (190, 231), False, 'from kottos.modbus.client import Client\n'), ((354, 367), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (364, 367), False, 'import time\n')]
#!/usr/bin/env python # --------------------------------------------------------- # IOU Tracker # Copyright (c) 2017 TU Berlin, Communication Systems Group # Licensed under The MIT License [see LICENSE for details] # Written by <NAME> # --------------------------------------------------------- from time import time import argparse from iou_tracker import track_iou from util import load_mot, save_to_csv def main(args): with open(args.seqmap) as fd: seqs = [line.rstrip('\n') for line in fd] for idx, seq in enumerate(seqs): if seq == "name" or seq == "": continue else: if "DPM" in seq: sigma_l = -0.5 sigma_h = 0.5 sigma_iou = 0.4 t_min = 4 elif "FRCNN" in seq: sigma_l = 0.0 sigma_h = 0.9 sigma_iou = 0.3 t_min = 3 elif "SDP" in seq: sigma_l = 0.4 sigma_h = 0.5 sigma_iou = 0.2 t_min = 2 else: print("No detector name found, this could happen with the wrong seqmap seqmap file. " "Please use c10-train.txt, c10-test.txt or c10-all.txt") exit() det_path = args.benchmark_dir + "/" + seq + "/det/det.txt" out_path = args.res_dir + "/" + seq + ".txt" detections = load_mot(det_path) start = time() tracks = track_iou(detections, sigma_l, sigma_h, sigma_iou, t_min) end = time() num_frames = len(detections) print("finished " + seq + " at " + str(int(num_frames / (end - start))) + " fps!") save_to_csv(out_path, tracks) if __name__ == '__main__': parser = argparse.ArgumentParser(description="IOU Tracker MOT17 demo script. The best parameters for each detector " "are hardcoded.") parser.add_argument('-m', '--seqmap', type=str, required=True, help="full path to the seqmap file to evaluate") parser.add_argument('-o', '--res_dir', type=str, required=True, help="path to the results directory") parser.add_argument('-b', '--benchmark_dir', type=str, required=True, help="path to the sequence directory") args = parser.parse_args() main(args)
[ "util.save_to_csv", "argparse.ArgumentParser", "time.time", "iou_tracker.track_iou", "util.load_mot" ]
[((1821, 1953), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""IOU Tracker MOT17 demo script. The best parameters for each detector are hardcoded."""'}), "(description=\n 'IOU Tracker MOT17 demo script. The best parameters for each detector are hardcoded.'\n )\n", (1844, 1953), False, 'import argparse\n'), ((1448, 1466), 'util.load_mot', 'load_mot', (['det_path'], {}), '(det_path)\n', (1456, 1466), False, 'from util import load_mot, save_to_csv\n'), ((1488, 1494), 'time.time', 'time', ([], {}), '()\n', (1492, 1494), False, 'from time import time\n'), ((1516, 1573), 'iou_tracker.track_iou', 'track_iou', (['detections', 'sigma_l', 'sigma_h', 'sigma_iou', 't_min'], {}), '(detections, sigma_l, sigma_h, sigma_iou, t_min)\n', (1525, 1573), False, 'from iou_tracker import track_iou\n'), ((1592, 1598), 'time.time', 'time', ([], {}), '()\n', (1596, 1598), False, 'from time import time\n'), ((1749, 1778), 'util.save_to_csv', 'save_to_csv', (['out_path', 'tracks'], {}), '(out_path, tracks)\n', (1760, 1778), False, 'from util import load_mot, save_to_csv\n')]
#!/usr/bin/env python # This software was developed at the National Institute of Standards # and Technology by employees of the Federal Government in the course # of their official duties. Pursuant to title 17 Section 105 of the # United States Code this software is not subject to copyright # protection and is in the public domain. NIST assumes no # responsibility whatsoever for its use by other parties, and makes # no guarantees, expressed or implied, about its quality, # reliability, or any other characteristic. # # We would appreciate acknowledgement if the software is used. __version__ = "0.1.0" import logging import os _logger = logging.getLogger(os.path.basename(__file__)) import Objects def main(): fileobject_count = 0 for (event, obj) in Objects.iterparse(args.in_dfxml): if event != "end": continue if not isinstance(obj, Objects.FileObject): continue fileobject_count += 1 if "new" in obj.annos: raise ValueError("A new file was created in translation.") if "deleted" in obj.annos: raise ValueError("A file was lost in translation.") if len(obj.diffs) > 0: for diff in obj.diffs: _logger.info("%s: %r -> %r." % (diff, getattr(obj, diff), getattr(obj.original_fileobject, diff))) raise ValueError("Information changed translating between DFXML, CASE, and back.") if fileobject_count == 0: raise ValueError("No files emitted.") if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("-d", "--debug", action="store_true") parser.add_argument("in_dfxml") args = parser.parse_args() logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) main()
[ "Objects.iterparse", "argparse.ArgumentParser", "logging.basicConfig", "os.path.basename" ]
[((664, 690), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (680, 690), False, 'import os\n'), ((770, 802), 'Objects.iterparse', 'Objects.iterparse', (['args.in_dfxml'], {}), '(args.in_dfxml)\n', (787, 802), False, 'import Objects\n'), ((1569, 1594), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1592, 1594), False, 'import argparse\n'), ((1728, 1800), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': '(logging.DEBUG if args.debug else logging.INFO)'}), '(level=logging.DEBUG if args.debug else logging.INFO)\n', (1747, 1800), False, 'import logging\n')]
import asyncio from typing import Any from typing import AsyncGenerator from typing import Callable from typing import Dict from typing import List import gidgethub from gidgethub.aiohttp import GitHubAPI # List of mutually exclusive status labels ISSUE_STATUS_LABELS = { "needs_reviewer", "awaiting_reviewer", "awaiting_changes", "needs_merger", "awaiting_merger", } def rate_limit_rety(wait_seconds: int) -> Callable[[Callable], Callable]: """Create a decorator that retries a request on rate limiting.""" def decorator(function: Callable) -> Callable: async def wrapped(*args: Any, **kwargs: Any) -> Any: try: return await function(*args, **kwargs) except gidgethub.RateLimitExceeded: print(f"Rate limit exceeded. Retrying in {wait_seconds} seconds.") await asyncio.sleep(wait_seconds) return await wrapped(*args, **kwargs) return wrapped return decorator async def request_review( pull_url: str, gh_login: str, gh: GitHubAPI, token: str ) -> None: """Request a review on a pull request by `gh_login`.""" url = f"{pull_url}/requested_reviewers" await gh.post(url, data={"reviewers": [gh_login]}, oauth_token=token) async def post_comment(gh: GitHubAPI, token: str, comments_url: str, body: str) -> None: """Post a new comment.""" await gh.post( comments_url, data={"body": body}, oauth_token=token, ) async def request_review_fallback( gh: GitHubAPI, token: str, pull_url: str, comments_url: str, gh_login: str ) -> None: """Request a review on a pull request, falling back to a comment. Attempts to request a review and @mentions the reviewer if GitHub doesn't allow us to do that (since the user is not a collaborator on the repo). """ try: await request_review(pull_url, gh_login, gh, token) except gidgethub.InvalidField: print("Falling back to @mention.") await post_comment(gh, token, comments_url, f"@{gh_login} please review.") @rate_limit_rety(60) # rate limited at 30/minute async def num_search_results( gh: GitHubAPI, token: str, query_parameters: List[str], ) -> int: """Search github issues and pull requests and return the number of results.""" query = "+".join(query_parameters) result = await gh.getitem( f"https://api.github.com/search/issues?q={query}", oauth_token=token ) return result["total_count"] @rate_limit_rety(60) # rate limited at 30/minute def search_issues( gh: GitHubAPI, token: str, query_parameters: List[str], ) -> AsyncGenerator[Dict[str, Any], None]: """Search github issues and pull requests. As documented here: https://developer.github.com/v3/search/#search-issues-and-pull-requests A common query string is likely "repo:NixOS/nixpkgs". Returns an async iterator of issues, automatically handling pagination. """ query = "+".join(query_parameters) return gh.getiter( f"https://api.github.com/search/issues?q={query}", oauth_token=token ) async def get_installation_repositories( gh: GitHubAPI, token: str ) -> List[Dict[str, Any]]: """Get the repositories the current installation is valid for. As documented here: https://developer.github.com/v3/apps/installations/#list-repositories-accessible-to-the-app-installation Infers the installation from the token. """ # This should be getiter, but for some reason that doesn't work (returns an # iterator over the items in the json dict such as "total_count") and its # unlikely enough that pagination is an issue h ere. result = await gh.getitem( f"https://api.github.com/installation/repositories", accept="application/vnd.github.machine-man-preview+json", oauth_token=token, ) return result["repositories"] async def set_issue_status( issue: Dict[str, Any], status: str, gh: GitHubAPI, token: str ) -> None: """Sets the status of an issue while resetting other status labels""" assert status in ISSUE_STATUS_LABELS # depending on whether the issue is actually a pull request issue_url = issue.get("issue_url", issue["url"]) # Labels are mutually exclusive, so clear other labels first. labels = issue["labels"] label_names = {label["name"] for label in labels} # should never be more than one, but better to make it a set anyway status_labels = label_names.intersection(ISSUE_STATUS_LABELS) for label in status_labels: if label == status: # Don't touch the label we're supposed to set. continue await gh.delete(issue_url + "/labels/" + label, oauth_token=token) if status not in status_labels: await gh.post( issue_url + "/labels", data={"labels": [status]}, oauth_token=token, )
[ "asyncio.sleep" ]
[((874, 901), 'asyncio.sleep', 'asyncio.sleep', (['wait_seconds'], {}), '(wait_seconds)\n', (887, 901), False, 'import asyncio\n')]
import datetime import pytest from deploy.pretty_printing import pprint_date @pytest.mark.parametrize("date_obj, now, expected_str", [ ("2020-01-01 12:00:00", "2020-01-01 12:00:01", "just now"), ("2020-01-01 12:00:00", "2020-01-01 12:02:00", "just now"), ("2020-01-01 12:00:00", "2020-01-01 12:02:02", "today @ 12:00 (2 min ago)"), ("2020-01-01 12:00:00", "2020-01-01 15:00:00", "today @ 12:00"), ("2020-01-01 23:59:00", "2020-01-02 00:02:00", "yesterday @ 23:59 (3 min ago)"), ("2020-01-01 12:00:00", "2020-01-02 12:00:00", "yesterday @ 12:00"), ("2019-01-01 12:00:00", "2020-01-02 12:00:00", "Tue 1 January 2019 @ 12:00"), ("2019-01-01 01:00:00", "2020-01-02 12:00:00", "Tue 1 January 2019 @ 01:00"), ("2019-01-20 12:00:00", "2020-01-02 12:00:00", "Sun 20 January 2019 @ 12:00"), ]) def test_pprint_date(date_obj, now, expected_str): date_obj = datetime.datetime.strptime(date_obj, "%Y-%m-%d %H:%M:%S") now = datetime.datetime.strptime(now, "%Y-%m-%d %H:%M:%S") assert pprint_date(date_obj, now=now) == expected_str
[ "pytest.mark.parametrize", "datetime.datetime.strptime", "deploy.pretty_printing.pprint_date" ]
[((82, 841), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""date_obj, now, expected_str"""', "[('2020-01-01 12:00:00', '2020-01-01 12:00:01', 'just now'), (\n '2020-01-01 12:00:00', '2020-01-01 12:02:00', 'just now'), (\n '2020-01-01 12:00:00', '2020-01-01 12:02:02',\n 'today @ 12:00 (2 min ago)'), ('2020-01-01 12:00:00',\n '2020-01-01 15:00:00', 'today @ 12:00'), ('2020-01-01 23:59:00',\n '2020-01-02 00:02:00', 'yesterday @ 23:59 (3 min ago)'), (\n '2020-01-01 12:00:00', '2020-01-02 12:00:00', 'yesterday @ 12:00'), (\n '2019-01-01 12:00:00', '2020-01-02 12:00:00',\n 'Tue 1 January 2019 @ 12:00'), ('2019-01-01 01:00:00',\n '2020-01-02 12:00:00', 'Tue 1 January 2019 @ 01:00'), (\n '2019-01-20 12:00:00', '2020-01-02 12:00:00',\n 'Sun 20 January 2019 @ 12:00')]"], {}), "('date_obj, now, expected_str', [(\n '2020-01-01 12:00:00', '2020-01-01 12:00:01', 'just now'), (\n '2020-01-01 12:00:00', '2020-01-01 12:02:00', 'just now'), (\n '2020-01-01 12:00:00', '2020-01-01 12:02:02',\n 'today @ 12:00 (2 min ago)'), ('2020-01-01 12:00:00',\n '2020-01-01 15:00:00', 'today @ 12:00'), ('2020-01-01 23:59:00',\n '2020-01-02 00:02:00', 'yesterday @ 23:59 (3 min ago)'), (\n '2020-01-01 12:00:00', '2020-01-02 12:00:00', 'yesterday @ 12:00'), (\n '2019-01-01 12:00:00', '2020-01-02 12:00:00',\n 'Tue 1 January 2019 @ 12:00'), ('2019-01-01 01:00:00',\n '2020-01-02 12:00:00', 'Tue 1 January 2019 @ 01:00'), (\n '2019-01-20 12:00:00', '2020-01-02 12:00:00',\n 'Sun 20 January 2019 @ 12:00')])\n", (105, 841), False, 'import pytest\n'), ((893, 950), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date_obj', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(date_obj, '%Y-%m-%d %H:%M:%S')\n", (919, 950), False, 'import datetime\n'), ((961, 1013), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['now', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(now, '%Y-%m-%d %H:%M:%S')\n", (987, 1013), False, 'import datetime\n'), ((1026, 1056), 'deploy.pretty_printing.pprint_date', 'pprint_date', (['date_obj'], {'now': 'now'}), '(date_obj, now=now)\n', (1037, 1056), False, 'from deploy.pretty_printing import pprint_date\n')]
# # mongotools: Database Migration Utility # Author: <NAME> # import sys from importdb import import_all from exportdb import export_all from config import CONFIG if len(sys.argv) < 2: print("USAGE: python mongotools.py option[import,export]") sys.exit(0) option = sys.argv[1] if option == 'import': print("IMPORTING FILES FROM", CONFIG['DATA_PATH']+"/import") import_all() elif option == 'export': print("EXPORTING FILES TO", CONFIG['DATA_PATH']+"/export") export_all()
[ "importdb.import_all", "exportdb.export_all", "sys.exit" ]
[((254, 265), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (262, 265), False, 'import sys\n'), ((381, 393), 'importdb.import_all', 'import_all', ([], {}), '()\n', (391, 393), False, 'from importdb import import_all\n'), ((486, 498), 'exportdb.export_all', 'export_all', ([], {}), '()\n', (496, 498), False, 'from exportdb import export_all\n')]
""" Main test configuration, used to fix fixture loading """ import pytest from pytest_ansible_docker import AnsibleDockerTestinfraBackend @pytest.fixture def TestinfraBackend(request): """ Entry point to boot and stop a docker image. """ return AnsibleDockerTestinfraBackend(request)
[ "pytest_ansible_docker.AnsibleDockerTestinfraBackend" ]
[((267, 305), 'pytest_ansible_docker.AnsibleDockerTestinfraBackend', 'AnsibleDockerTestinfraBackend', (['request'], {}), '(request)\n', (296, 305), False, 'from pytest_ansible_docker import AnsibleDockerTestinfraBackend\n')]
""" CEASIOMpy: Conceptual Aircraft Design Software. Developed for CFS ENGINEERING, 1015 Lausanne, Switzerland Functions to create the dictionnary of geometric variables needed for the optimnization routine. Python version: >=3.6 | Author : <NAME> | Creation: 2020-03-24 | Last modification: 2020-06-02 TODO ---- * Expand the geometric parameters * Add constrains between the parameters to disable multiple modifications of the same geometric aspect of the plane """ # ============================================================================= # IMPORTS # ============================================================================= from sys import exit import numpy as np import ceasiompy.utils.apmfunctions as apmf import ceasiompy.utils.cpacsfunctions as cpsf import ceasiompy.CPACSUpdater.cpacsupdater as cpud from ceasiompy.utils.ceasiomlogger import get_logger log = get_logger(__file__.split('.')[0]) # ============================================================================= # GLOBALS # ============================================================================= # Contains the geometric design variables geom_var_dict = {} XPATH = 'None' # ============================================================================= # FUNCTIONS # ============================================================================= def add_am_to_dict(optim_var_dict, am_dict): """Add aeromap values to variable dictionary. All values are dupplicated to reach the same number of value than the aeromap parameters and coefficient. This is done to add the aeromap points that are not taken into account by the driver, but still computed in one iteration. Args: optim_var_dict (dct): Variable dictionary. am_dict (dct): Dictionary with the entire aeromap. Returns: None. """ # Take a variable from the optim dict to compute the length to add var_in_dict = list(optim_var_dict.keys())[0] am_length = int(len(am_dict['cl'][1])/len(optim_var_dict[var_in_dict][1])) log.info("Adding the whole aeromap to the dictionary") for name, infos in optim_var_dict.items(): if name not in apmf.XSTATES+apmf.COEF_LIST: # Calling a new list instance else the clear method will also clean l l = list(infos[1]) infos[1].clear() infos[1].extend(np.repeat(l, am_length)) for name, infos in am_dict.items(): optim_var_dict[name] = infos def update_am_dict(tixi, aeromap_uid, am_dict): """Save the aeromap results. Appends the new aeromap results to a dictionary. Args: tixi (tixi3 handle): TIXI handle of the CPACS file. aeromap_uid (str): uID of the aeromap in use. am_dict (dct): Contains the results of old aeromap calculations. Returns None. """ Coef = apmf.get_aeromap(tixi, aeromap_uid) d = Coef.to_dict() for name , infos in am_dict.items(): infos[1].extend(d[name]) def update_dict(tixi, optim_var_dict): """Update dictionnary after one iteration. The dictionary containing all the problem variables (obj, des, const) is updated with the new values from the resulting CPACS file after one run through the all the modules that are contained in one iteration of the routine. Args: tixi (tixi3 handle) : TIXI handle of the CPACS file optim_var_dict (dict) : Variable dictionary. Returns: None. """ for name, infos in optim_var_dict.items(): if infos[5] in ['', '-']: if tixi.checkElement(infos[4]): new_val = tixi.getDoubleElement(infos[4]) infos[1].append(new_val) def create_var(var_name, init_value, getcmd, setcmd, lim=0.2): """Add design variable to the dictionary. Add the parameters of one variable to the dictionary, which are saved in a tuple as (Name, initial value, lower bound, upper bound, setcommand getcommand). Args: var_name (str) : Name of the variable. init_value (float) : getcmd (str) : Command to retrieve a value in the CPACS file. setcmd (str) : Command to change a value in the CPACS file. lim (float) : Percentage of the initial value to define the upper and lower limit : init_value*(1-lim) < init_value < init_value*(1+lim) The default is 0.2. Returns: None. """ if init_value > 0: lower_bound = init_value*(1-lim) upper_bound = init_value*(1+lim) elif init_value < 0: lower_bound = init_value*(1+lim) upper_bound = init_value*(1-lim) else: lower_bound = -lim upper_bound = lim geom_var_dict[var_name] = (var_name, [init_value], lower_bound, upper_bound, setcmd, getcmd) def init_elem_param(sec_name, section, elem_nb, scmd): """Create wing section element variable. Add design variables and constrains relative to the wing section elements to the dictionnary. Args: sec_name (str) : Name of the wing section section (handle) : Handle of the wing section elem_nb (int) : Number of section elements scmd (str) : Command to get the section handle Returns: None. """ for enb in range(1, elem_nb+1): cmd = scmd + 'get_section_element({}).get_ctigl_section_element().'.format(enb) el_name = sec_name + "_el" + str(enb) element = section.get_section_element(enb).get_ctigl_section_element() var_name = el_name + "_width" init_width = element.get_width() getcmd = cmd+'get_width()' setcmd = cmd+'set_width({})'.format(var_name) create_var(var_name, init_width, getcmd, setcmd) def init_sec_param(name, wing, sec_nb, wcmd): """Create wing section variable Add design variables and constrains relative to the wing sections to the dictionnary. Args: name (str) : Name of the wing wing (handle) : Handle of the wing sec_nb (int) : Number of wing elements wcmd (str) : Command to get the wing handle Returns: None. """ for s in range(1, sec_nb+1): cmd = wcmd + 'get_section({}).'.format(s) sec_name = name + "_sec" + str(s) section = wing.get_section(s) var_name = sec_name + "_Yrotation" init_rot = section.get_rotation().y getcmd = cmd+'get_rotation().y' setcmd = cmd+'set_rotation(geometry.CTiglPoint(0,{},0))'.format(var_name) create_var(var_name, init_rot, getcmd, setcmd) elem_nb = section.get_section_element_count() if elem_nb: init_elem_param(sec_name, section, elem_nb, cmd) def init_wing_param(aircraft, wing_nb): """Create wing variable Add design variables and constrains relative to the wings to the dictionnary. Args: aircraft (handle) : Handle of the aircraft wing_nb (int) : Number of wings Returns: None. """ wings = aircraft.get_wings() for w in range(1, wing_nb+1): cmd = 'wings.get_wing({}).'.format(w) name = "wing" + str(w) wing = wings.get_wing(w) var_name = name+"_span" init_span = wing.get_wing_half_span() getcmd = cmd+'get_wing_half_span()' setcmd = cmd+'set_half_span_keep_ar({})'.format(var_name) # keep_area create_var(var_name, init_span, getcmd, setcmd) var_name = name + "_aspect_ratio" init_AR = wing.get_aspect_ratio() getcmd = cmd+'get_aspect_ratio()' setcmd = cmd+'set_arkeep_area({})'.format(var_name)#keep_ar create_var(var_name, init_AR, getcmd, setcmd) var_name = name + "_area" init_area = wing.get_surface_area()/2 getcmd = cmd+'get_surface_area()' setcmd = cmd+'set_area_keep_ar({})'.format(var_name)#keep_span create_var(var_name, init_area, getcmd, setcmd) var_name = name+"_sweep" init_sweep = wing.get_sweep() getcmd = cmd+'get_sweep()' setcmd = cmd+'set_sweep({})'.format(var_name) create_var(var_name, init_sweep, getcmd, setcmd) var_name = name + "_Yrotation" init_rot = wing.get_rotation().y getcmd = cmd+'get_rotation().y' setcmd = cmd+'set_rotation(geometry.CTiglPoint(0,{},0))'.format(var_name) create_var(var_name, init_rot, getcmd, setcmd) #A tester.... sec_nb = wing.get_section_count() if sec_nb: init_sec_param(name, wing, sec_nb, cmd) def init_fuse_param(aircraft, fuse_nb): """Create fuselage variable Add design variables and constrains relative to the aircraft fuselages to the dictionnary. Args: aircraft (handle) : Handle of the aircraft fuse_nb (int) : Number of fuselages Returns: None. """ for f in range(1, fuse_nb+1): name = "fuse" + str(f) fuselage = aircraft.get_fuselage(f) var_name = name+"_length" init_length = fuselage.get_length() getcmd = 'fuselage.get_length()' setcmd = 'fuselage.set_length({})'.format(var_name) create_var(var_name, init_length, getcmd, setcmd) var_name = name+"_width" init_width = fuselage.get_maximal_width() getcmd = 'fuselage.get_maximal_width()' setcmd = 'fuselage.set_max_width({})'.format(var_name) create_var(var_name, init_width, getcmd, setcmd) # Modify a specific section width fnb = fuselage.get_section_count() if not isinstance(fnb, int): for secnb in fnb: var_name = name + "_sec" + str(secnb) init_sec_width = fuselage.get_maximal_width() getcmd = 'fuselage.get_maximal_width()' setcmd = 'fuselage.set_max_width({})'.format(var_name) create_var(var_name, init_sec_width, getcmd, setcmd) def init_geom_var_dict(tixi): """Create design variable dictionary Return the dictionary of the design variables using the TIGL library. Add design variables and constrains relative to the aircraft fuselages to the dictionnary. Args: tixi (handle) : Handle of the CPACS file Returns: geom_var_dict (dict) : dictionary with the geometric parameters of the routine. """ tigl = cpsf.open_tigl(tixi) aircraft = cpud.get_aircraft(tigl) fuse_nb = aircraft.get_fuselage_count() if fuse_nb: init_fuse_param(aircraft, fuse_nb) wing_nb = aircraft.get_wing_count() if wing_nb: init_wing_param(aircraft, wing_nb) return geom_var_dict if __name__ == "__main__": log.info("Launching dictionnary.py programm...") log.info("Not a standalone programm. Nothing will be executed !") exit()
[ "ceasiompy.utils.apmfunctions.get_aeromap", "ceasiompy.CPACSUpdater.cpacsupdater.get_aircraft", "ceasiompy.utils.cpacsfunctions.open_tigl", "sys.exit", "numpy.repeat" ]
[((2873, 2908), 'ceasiompy.utils.apmfunctions.get_aeromap', 'apmf.get_aeromap', (['tixi', 'aeromap_uid'], {}), '(tixi, aeromap_uid)\n', (2889, 2908), True, 'import ceasiompy.utils.apmfunctions as apmf\n'), ((10398, 10418), 'ceasiompy.utils.cpacsfunctions.open_tigl', 'cpsf.open_tigl', (['tixi'], {}), '(tixi)\n', (10412, 10418), True, 'import ceasiompy.utils.cpacsfunctions as cpsf\n'), ((10434, 10457), 'ceasiompy.CPACSUpdater.cpacsupdater.get_aircraft', 'cpud.get_aircraft', (['tigl'], {}), '(tigl)\n', (10451, 10457), True, 'import ceasiompy.CPACSUpdater.cpacsupdater as cpud\n'), ((10846, 10852), 'sys.exit', 'exit', ([], {}), '()\n', (10850, 10852), False, 'from sys import exit\n'), ((2388, 2411), 'numpy.repeat', 'np.repeat', (['l', 'am_length'], {}), '(l, am_length)\n', (2397, 2411), True, 'import numpy as np\n')]
OntCversion = '2.0.0' from ontology.builtins import print from ontology.libont import bytes2hexstring, hexstring2bytes, elt_in def VaasAssert(expr): if not expr: raise Exception("AssertError") def Main(): a = b'\x01\xef\xab\xcd\x23\x45\xff\xfe\xef\xed\xdc\xba\xa9\xf9\xe9\x9a\x9f\x9e\x99\x8e\x00\x01\x02\x10\x11\x1a\xa1\xff\xf1\xf0\xf0' res = bytes2hexstring(a, 1) VaasAssert(res == '01EFABCD2345FFFEEFEDDCBAA9F9E99A9F9E998E00010210111AA1FFF1F0F0_ADD') res_b = hexstring2bytes(res) VaasAssert(res_b == a) res = bytes2hexstring(a, 0) VaasAssert(res == '01efabcd2345fffeefeddcbaa9f9e99a9f9e998e00010210111aa1fff1f0f0_ADD') res_b = hexstring2bytes(res) VaasAssert(res_b == a) a = b'\x1c\x7f\x04\x4f\x56\x78\x65\x6e\x36\x8c\x6e\x84\xf0\x48\xc6\xc3\xfd\x69\x5c\x14' res = bytes2hexstring(a, 1) VaasAssert(res == '1C7F044F5678656E368C6E84F048C6C3FD695C14') res_b = hexstring2bytes(res) VaasAssert(res_b == a) res = bytes2hexstring(a, 0) VaasAssert(res == '1c7f044f5678656e368c6e84f048c6c3fd695c14') res_b = hexstring2bytes(res) VaasAssert(res_b == a) print(res)
[ "ontology.libont.bytes2hexstring", "ontology.libont.hexstring2bytes", "ontology.builtins.print" ]
[((365, 386), 'ontology.libont.bytes2hexstring', 'bytes2hexstring', (['a', '(1)'], {}), '(a, 1)\n', (380, 386), False, 'from ontology.libont import bytes2hexstring, hexstring2bytes, elt_in\n'), ((491, 511), 'ontology.libont.hexstring2bytes', 'hexstring2bytes', (['res'], {}), '(res)\n', (506, 511), False, 'from ontology.libont import bytes2hexstring, hexstring2bytes, elt_in\n'), ((550, 571), 'ontology.libont.bytes2hexstring', 'bytes2hexstring', (['a', '(0)'], {}), '(a, 0)\n', (565, 571), False, 'from ontology.libont import bytes2hexstring, hexstring2bytes, elt_in\n'), ((676, 696), 'ontology.libont.hexstring2bytes', 'hexstring2bytes', (['res'], {}), '(res)\n', (691, 696), False, 'from ontology.libont import bytes2hexstring, hexstring2bytes, elt_in\n'), ((827, 848), 'ontology.libont.bytes2hexstring', 'bytes2hexstring', (['a', '(1)'], {}), '(a, 1)\n', (842, 848), False, 'from ontology.libont import bytes2hexstring, hexstring2bytes, elt_in\n'), ((927, 947), 'ontology.libont.hexstring2bytes', 'hexstring2bytes', (['res'], {}), '(res)\n', (942, 947), False, 'from ontology.libont import bytes2hexstring, hexstring2bytes, elt_in\n'), ((986, 1007), 'ontology.libont.bytes2hexstring', 'bytes2hexstring', (['a', '(0)'], {}), '(a, 0)\n', (1001, 1007), False, 'from ontology.libont import bytes2hexstring, hexstring2bytes, elt_in\n'), ((1086, 1106), 'ontology.libont.hexstring2bytes', 'hexstring2bytes', (['res'], {}), '(res)\n', (1101, 1106), False, 'from ontology.libont import bytes2hexstring, hexstring2bytes, elt_in\n'), ((1138, 1148), 'ontology.builtins.print', 'print', (['res'], {}), '(res)\n', (1143, 1148), False, 'from ontology.builtins import print\n')]
from typing import Tuple from PyQt5 import QtCore, QtOpenGL, QtWidgets, QtGui from moderngl_window.context.base import BaseWindow from moderngl_window.context.pyqt5.keys import Keys class Window(BaseWindow): """ A basic window implementation using PyQt5 with the goal of creating an OpenGL context and handle keyboard and mouse input. This window bypasses Qt's own event loop to make things as flexible as possible. If you need to use the event loop and are using other features in Qt as well, this example can still be useful as a reference when creating your own window. """ #: Name of the window name = "pyqt5" #: PyQt5 specific key constants keys = Keys # PyQt supports mode buttons, but we are limited by other libraries _mouse_button_map = { 1: 1, 2: 2, 4: 3, } def __init__(self, **kwargs): super().__init__(**kwargs) # Specify OpenGL context parameters gl = QtOpenGL.QGLFormat() gl.setVersion(self.gl_version[0], self.gl_version[1]) gl.setProfile(QtOpenGL.QGLFormat.CoreProfile) gl.setDepthBufferSize(24) gl.setDoubleBuffer(True) gl.setSwapInterval(1 if self.vsync else 0) # Configure multisampling if needed if self.samples > 1: gl.setSampleBuffers(True) gl.setSamples(int(self.samples)) # We need an application object, but we are bypassing the library's # internal event loop to avoid unnecessary work self._app = QtWidgets.QApplication([]) # Create the OpenGL widget self._widget = QtOpenGL.QGLWidget(gl) self.title = self._title # If fullscreen we change the window to match the desktop on the primary screen if self.fullscreen: rect = QtWidgets.QDesktopWidget().screenGeometry() self._width = rect.width() self._height = rect.height() self._buffer_width = rect.width() * self._widget.devicePixelRatio() self._buffer_height = rect.height() * self._widget.devicePixelRatio() if self.resizable: # Ensure a valid resize policy when window is resizable size_policy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding, ) self._widget.setSizePolicy(size_policy) self._widget.resize(self.width, self.height) else: self._widget.setFixedSize(self.width, self.height) # Center the window on the screen if in window mode if not self.fullscreen: center_window_position = ( self.position[0] - self.width / 2, self.position[1] - self.height / 2, ) self._widget.move(*center_window_position) # Needs to be set before show() self._widget.resizeGL = self.resize self.cursor = self._cursor if self.fullscreen: self._widget.showFullScreen() else: self._widget.show() # We want mouse position events self._widget.setMouseTracking(True) # Override event functions in qt self._widget.keyPressEvent = self.key_pressed_event self._widget.keyReleaseEvent = self.key_release_event self._widget.mouseMoveEvent = self.mouse_move_event self._widget.mousePressEvent = self.mouse_press_event self._widget.mouseReleaseEvent = self.mouse_release_event self._widget.wheelEvent = self.mouse_wheel_event self._widget.closeEvent = self.close_event self._widget.showEvent = self.show_event self._widget.hideEvent = self.hide_event # Attach to the context self.init_mgl_context() # Ensure retina and 4k displays get the right viewport self._buffer_width = self._width * self._widget.devicePixelRatio() self._buffer_height = self._height * self._widget.devicePixelRatio() self.set_default_viewport() def _set_fullscreen(self, value: bool) -> None: if value: self._widget.showFullScreen() else: self._widget.showNormal() @property def size(self) -> Tuple[int, int]: """Tuple[int, int]: current window size. This property also support assignment:: # Resize the window to 1000 x 1000 window.size = 1000, 1000 """ return self._width, self._height @size.setter def size(self, value: Tuple[int, int]): pos = self.position self._widget.setGeometry(pos[0], pos[1], value[0], value[1]) @property def position(self) -> Tuple[int, int]: """Tuple[int, int]: The current window position. This property can also be set to move the window:: # Move window to 100, 100 window.position = 100, 100 """ geo = self._widget.geometry() return geo.x(), geo.y() @position.setter def position(self, value: Tuple[int, int]): self._widget.setGeometry(value[0], value[1], self._width, self._height) def swap_buffers(self) -> None: """Swap buffers, set viewport, trigger events and increment frame counter""" self._widget.swapBuffers() self.set_default_viewport() self._app.processEvents() self._frames += 1 @property def cursor(self) -> bool: """bool: Should the mouse cursor be visible inside the window? This property can also be assigned to:: # Disable cursor window.cursor = False """ return self._cursor @cursor.setter def cursor(self, value: bool): if value is True: self._widget.setCursor(QtCore.Qt.ArrowCursor) else: self._widget.setCursor(QtCore.Qt.BlankCursor) self._cursor = value @property def title(self) -> str: """str: Window title. This property can also be set:: window.title = "New Title" """ return self._title @title.setter def title(self, value: str): self._widget.setWindowTitle(value) self._title = value def resize(self, width: int, height: int) -> None: """Replacement for Qt's ``resizeGL`` method. Args: width: New window width height: New window height """ self._width = width // self._widget.devicePixelRatio() self._height = height // self._widget.devicePixelRatio() self._buffer_width = width self._buffer_height = height if self._ctx: self.set_default_viewport() # Make sure we notify the example about the resize super().resize(self._buffer_width, self._buffer_height) def _handle_modifiers(self, mods) -> None: """Update modifiers""" self._modifiers.shift = bool(mods & QtCore.Qt.ShiftModifier) self._modifiers.ctrl = bool(mods & QtCore.Qt.ControlModifier) self._modifiers.alt = bool(mods & QtCore.Qt.AltModifier) def _set_icon(self, icon_path: str) -> None: self._widget.setWindowIcon(QtGui.QIcon(icon_path)) def key_pressed_event(self, event) -> None: """Process Qt key press events forwarding them to standard methods Args: event: The qtevent instance """ if self._exit_key is not None and event.key() == self._exit_key: self.close() if self._fs_key is not None and event.key() == self._fs_key: self.fullscreen = not self.fullscreen self._handle_modifiers(event.modifiers()) self._key_pressed_map[event.key()] = True self._key_event_func(event.key(), self.keys.ACTION_PRESS, self._modifiers) text = event.text() if text.strip() or event.key() == self.keys.SPACE: self._unicode_char_entered_func(text) def key_release_event(self, event) -> None: """Process Qt key release events forwarding them to standard methods Args: event: The qtevent instance """ self._handle_modifiers(event.modifiers()) self._key_pressed_map[event.key()] = False self._key_event_func(event.key(), self.keys.ACTION_RELEASE, self._modifiers) def mouse_move_event(self, event) -> None: """Forward mouse cursor position events to standard methods Args: event: The qtevent instance """ x, y = event.x(), event.y() dx, dy = self._calc_mouse_delta(x, y) if self.mouse_states.any: self._mouse_drag_event_func(x, y, dx, dy) else: self._mouse_position_event_func(x, y, dx, dy) def mouse_press_event(self, event) -> None: """Forward mouse press events to standard methods Args: event: The qtevent instance """ self._handle_modifiers(event.modifiers()) button = self._mouse_button_map.get(event.button()) if button is None: return self._handle_mouse_button_state_change(button, True) self._mouse_press_event_func(event.x(), event.y(), button) def mouse_release_event(self, event) -> None: """Forward mouse release events to standard methods Args: event: The qtevent instance """ self._handle_modifiers(event.modifiers()) button = self._mouse_button_map.get(event.button()) if button is None: return self._handle_mouse_button_state_change(button, False) self._mouse_release_event_func(event.x(), event.y(), button) def mouse_wheel_event(self, event): """Forward mouse wheel events to standard metods. From Qt docs: Returns the distance that the wheel is rotated, in eighths of a degree. A positive value indicates that the wheel was rotated forwards away from the user; a negative value indicates that the wheel was rotated backwards toward the user. Most mouse types work in steps of 15 degrees, in which case the delta value is a multiple of 120; i.e., 120 units * 1/8 = 15 degrees. However, some mice have finer-resolution wheels and send delta values that are less than 120 units (less than 15 degrees). To support this possibility, you can either cumulatively add the delta values from events until the value of 120 is reached, then scroll the widget, or you can partially scroll the widget in response to each wheel event. Args: event (QWheelEvent): Mouse wheel event """ self._handle_modifiers(event.modifiers()) point = event.angleDelta() self._mouse_scroll_event_func(point.x() / 120.0, point.y() / 120.0) def close_event(self, event) -> None: """The standard PyQt close events Args: event: The qtevent instance """ self.close() def close(self): """Close the window""" super().close() self._close_func() def show_event(self, event): """The standard Qt show event""" self._iconify_func(False) def hide_event(self, event): """The standard Qt hide event""" self._iconify_func(True) def destroy(self) -> None: """Quit the Qt application to exit the window gracefully""" QtCore.QCoreApplication.instance().quit()
[ "PyQt5.QtGui.QIcon", "PyQt5.QtWidgets.QSizePolicy", "PyQt5.QtCore.QCoreApplication.instance", "PyQt5.QtWidgets.QDesktopWidget", "PyQt5.QtOpenGL.QGLWidget", "PyQt5.QtWidgets.QApplication", "PyQt5.QtOpenGL.QGLFormat" ]
[((1022, 1042), 'PyQt5.QtOpenGL.QGLFormat', 'QtOpenGL.QGLFormat', ([], {}), '()\n', (1040, 1042), False, 'from PyQt5 import QtCore, QtOpenGL, QtWidgets, QtGui\n'), ((1601, 1627), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['[]'], {}), '([])\n', (1623, 1627), False, 'from PyQt5 import QtCore, QtOpenGL, QtWidgets, QtGui\n'), ((1690, 1712), 'PyQt5.QtOpenGL.QGLWidget', 'QtOpenGL.QGLWidget', (['gl'], {}), '(gl)\n', (1708, 1712), False, 'from PyQt5 import QtCore, QtOpenGL, QtWidgets, QtGui\n'), ((2303, 2395), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Expanding'], {}), '(QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Expanding)\n', (2324, 2395), False, 'from PyQt5 import QtCore, QtOpenGL, QtWidgets, QtGui\n'), ((7408, 7430), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['icon_path'], {}), '(icon_path)\n', (7419, 7430), False, 'from PyQt5 import QtCore, QtOpenGL, QtWidgets, QtGui\n'), ((11775, 11809), 'PyQt5.QtCore.QCoreApplication.instance', 'QtCore.QCoreApplication.instance', ([], {}), '()\n', (11807, 11809), False, 'from PyQt5 import QtCore, QtOpenGL, QtWidgets, QtGui\n'), ((1887, 1913), 'PyQt5.QtWidgets.QDesktopWidget', 'QtWidgets.QDesktopWidget', ([], {}), '()\n', (1911, 1913), False, 'from PyQt5 import QtCore, QtOpenGL, QtWidgets, QtGui\n')]
from setuptools import setup setup(name='perftool', version='0.1.4', description='The Performance Tool', url='https://github.com/Yajan/Perftool.git', author='Yajana', author_email='<EMAIL>', license='Apache License', packages=['perftool','perftool.ext','perftool.reporter'], zip_safe=False)
[ "setuptools.setup" ]
[((30, 305), 'setuptools.setup', 'setup', ([], {'name': '"""perftool"""', 'version': '"""0.1.4"""', 'description': '"""The Performance Tool"""', 'url': '"""https://github.com/Yajan/Perftool.git"""', 'author': '"""Yajana"""', 'author_email': '"""<EMAIL>"""', 'license': '"""Apache License"""', 'packages': "['perftool', 'perftool.ext', 'perftool.reporter']", 'zip_safe': '(False)'}), "(name='perftool', version='0.1.4', description='The Performance Tool',\n url='https://github.com/Yajan/Perftool.git', author='Yajana',\n author_email='<EMAIL>', license='Apache License', packages=['perftool',\n 'perftool.ext', 'perftool.reporter'], zip_safe=False)\n", (35, 305), False, 'from setuptools import setup\n')]
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.web.twcgi}. """ import sys import os import json from io import BytesIO from twisted.trial import unittest from twisted.internet import address, reactor, interfaces, error from twisted.internet.error import ConnectionLost from twisted.python import util, failure, log from twisted.web.http import NOT_FOUND, INTERNAL_SERVER_ERROR from twisted.web import client, http, twcgi, server, resource, http_headers from twisted.web.test._util import _render from twisted.web.test.requesthelper import DummyRequest, DummyChannel DUMMY_CGI = """\ print("Header: OK") print("") print("cgi output") """ DUAL_HEADER_CGI = """\ print("Header: spam") print("Header: eggs") print("") print("cgi output") """ BROKEN_HEADER_CGI = """\ print("XYZ") print("") print("cgi output") """ SPECIAL_HEADER_CGI = """\ print("Server: monkeys") print("Date: last year") print("") print("cgi output") """ READINPUT_CGI = """\ # This is an example of a correctly-written CGI script which reads a body # from stdin, which only reads env['CONTENT_LENGTH'] bytes. import os, sys body_length = int(os.environ.get('CONTENT_LENGTH',0)) indata = sys.stdin.read(body_length) print("Header: OK") print("") print("readinput ok") """ READALLINPUT_CGI = """\ # This is an example of the typical (incorrect) CGI script which expects # the server to close stdin when the body of the request is complete. # A correct CGI should only read env['CONTENT_LENGTH'] bytes. import sys indata = sys.stdin.read() print("Header: OK") print("") print("readallinput ok") """ NO_DUPLICATE_CONTENT_TYPE_HEADER_CGI = """\ print("content-type: text/cgi-duplicate-test") print("") print("cgi output") """ HEADER_OUTPUT_CGI = """\ import json import os print("") print("") vals = {x:y for x,y in os.environ.items() if x.startswith("HTTP_")} print(json.dumps(vals)) """ URL_PARAMETER_CGI = """\ import cgi fs = cgi.FieldStorage() param = fs.getvalue("param") print("Header: OK") print("") print(param) """ class PythonScript(twcgi.FilteredScript): filter = sys.executable class _StartServerAndTearDownMixin: def startServer(self, cgi): root = resource.Resource() cgipath = util.sibpath(__file__, cgi) root.putChild(b"cgi", PythonScript(cgipath)) site = server.Site(root) self.p = reactor.listenTCP(0, site) return self.p.getHost().port def tearDown(self): if getattr(self, "p", None): return self.p.stopListening() def writeCGI(self, source): cgiFilename = os.path.abspath(self.mktemp()) with open(cgiFilename, "wt") as cgiFile: cgiFile.write(source) return cgiFilename class CGITests(_StartServerAndTearDownMixin, unittest.TestCase): """ Tests for L{twcgi.FilteredScript}. """ if not interfaces.IReactorProcess.providedBy(reactor): skip = "CGI tests require a functional reactor.spawnProcess()" def test_CGI(self): cgiFilename = self.writeCGI(DUMMY_CGI) portnum = self.startServer(cgiFilename) url = "http://localhost:%d/cgi" % (portnum,) url = url.encode("ascii") d = client.Agent(reactor).request(b"GET", url) d.addCallback(client.readBody) d.addCallback(self._testCGI_1) return d def _testCGI_1(self, res): self.assertEqual(res, b"cgi output" + os.linesep.encode("ascii")) def test_protectedServerAndDate(self): """ If the CGI script emits a I{Server} or I{Date} header, these are ignored. """ cgiFilename = self.writeCGI(SPECIAL_HEADER_CGI) portnum = self.startServer(cgiFilename) url = "http://localhost:%d/cgi" % (portnum,) url = url.encode("ascii") agent = client.Agent(reactor) d = agent.request(b"GET", url) d.addCallback(discardBody) def checkResponse(response): self.assertNotIn("monkeys", response.headers.getRawHeaders("server")) self.assertNotIn("last year", response.headers.getRawHeaders("date")) d.addCallback(checkResponse) return d def test_noDuplicateContentTypeHeaders(self): """ If the CGI script emits a I{content-type} header, make sure that the server doesn't add an additional (duplicate) one, as per ticket 4786. """ cgiFilename = self.writeCGI(NO_DUPLICATE_CONTENT_TYPE_HEADER_CGI) portnum = self.startServer(cgiFilename) url = "http://localhost:%d/cgi" % (portnum,) url = url.encode("ascii") agent = client.Agent(reactor) d = agent.request(b"GET", url) d.addCallback(discardBody) def checkResponse(response): self.assertEqual( response.headers.getRawHeaders("content-type"), ["text/cgi-duplicate-test"], ) return response d.addCallback(checkResponse) return d def test_noProxyPassthrough(self): """ The CGI script is never called with the Proxy header passed through. """ cgiFilename = self.writeCGI(HEADER_OUTPUT_CGI) portnum = self.startServer(cgiFilename) url = "http://localhost:%d/cgi" % (portnum,) url = url.encode("ascii") agent = client.Agent(reactor) headers = http_headers.Headers( {b"Proxy": [b"foo"], b"X-Innocent-Header": [b"bar"]} ) d = agent.request(b"GET", url, headers=headers) def checkResponse(response): headers = json.loads(response.decode("ascii")) self.assertEqual( set(headers.keys()), {"HTTP_HOST", "HTTP_CONNECTION", "HTTP_X_INNOCENT_HEADER"}, ) d.addCallback(client.readBody) d.addCallback(checkResponse) return d def test_duplicateHeaderCGI(self): """ If a CGI script emits two instances of the same header, both are sent in the response. """ cgiFilename = self.writeCGI(DUAL_HEADER_CGI) portnum = self.startServer(cgiFilename) url = "http://localhost:%d/cgi" % (portnum,) url = url.encode("ascii") agent = client.Agent(reactor) d = agent.request(b"GET", url) d.addCallback(discardBody) def checkResponse(response): self.assertEqual(response.headers.getRawHeaders("header"), ["spam", "eggs"]) d.addCallback(checkResponse) return d def test_malformedHeaderCGI(self): """ Check for the error message in the duplicated header """ cgiFilename = self.writeCGI(BROKEN_HEADER_CGI) portnum = self.startServer(cgiFilename) url = "http://localhost:%d/cgi" % (portnum,) url = url.encode("ascii") agent = client.Agent(reactor) d = agent.request(b"GET", url) d.addCallback(discardBody) loggedMessages = [] def addMessage(eventDict): loggedMessages.append(log.textFromEventDict(eventDict)) log.addObserver(addMessage) self.addCleanup(log.removeObserver, addMessage) def checkResponse(ignored): self.assertIn( "ignoring malformed CGI header: " + repr(b"XYZ"), loggedMessages ) d.addCallback(checkResponse) return d def test_ReadEmptyInput(self): cgiFilename = os.path.abspath(self.mktemp()) with open(cgiFilename, "wt") as cgiFile: cgiFile.write(READINPUT_CGI) portnum = self.startServer(cgiFilename) agent = client.Agent(reactor) url = "http://localhost:%d/cgi" % (portnum,) url = url.encode("ascii") d = agent.request(b"GET", url) d.addCallback(client.readBody) d.addCallback(self._test_ReadEmptyInput_1) return d test_ReadEmptyInput.timeout = 5 # type: ignore[attr-defined] def _test_ReadEmptyInput_1(self, res): expected = f"readinput ok{os.linesep}" expected = expected.encode("ascii") self.assertEqual(res, expected) def test_ReadInput(self): cgiFilename = os.path.abspath(self.mktemp()) with open(cgiFilename, "wt") as cgiFile: cgiFile.write(READINPUT_CGI) portnum = self.startServer(cgiFilename) agent = client.Agent(reactor) url = "http://localhost:%d/cgi" % (portnum,) url = url.encode("ascii") d = agent.request( uri=url, method=b"POST", bodyProducer=client.FileBodyProducer(BytesIO(b"Here is your stdin")), ) d.addCallback(client.readBody) d.addCallback(self._test_ReadInput_1) return d test_ReadInput.timeout = 5 # type: ignore[attr-defined] def _test_ReadInput_1(self, res): expected = f"readinput ok{os.linesep}" expected = expected.encode("ascii") self.assertEqual(res, expected) def test_ReadAllInput(self): cgiFilename = os.path.abspath(self.mktemp()) with open(cgiFilename, "wt") as cgiFile: cgiFile.write(READALLINPUT_CGI) portnum = self.startServer(cgiFilename) url = "http://localhost:%d/cgi" % (portnum,) url = url.encode("ascii") d = client.Agent(reactor).request( uri=url, method=b"POST", bodyProducer=client.FileBodyProducer(BytesIO(b"Here is your stdin")), ) d.addCallback(client.readBody) d.addCallback(self._test_ReadAllInput_1) return d test_ReadAllInput.timeout = 5 # type: ignore[attr-defined] def _test_ReadAllInput_1(self, res): expected = f"readallinput ok{os.linesep}" expected = expected.encode("ascii") self.assertEqual(res, expected) def test_useReactorArgument(self): """ L{twcgi.FilteredScript.runProcess} uses the reactor passed as an argument to the constructor. """ class FakeReactor: """ A fake reactor recording whether spawnProcess is called. """ called = False def spawnProcess(self, *args, **kwargs): """ Set the C{called} flag to C{True} if C{spawnProcess} is called. @param args: Positional arguments. @param kwargs: Keyword arguments. """ self.called = True fakeReactor = FakeReactor() request = DummyRequest(["a", "b"]) request.client = address.IPv4Address("TCP", "127.0.0.1", 12345) resource = twcgi.FilteredScript("dummy-file", reactor=fakeReactor) _render(resource, request) self.assertTrue(fakeReactor.called) class CGIScriptTests(_StartServerAndTearDownMixin, unittest.TestCase): """ Tests for L{twcgi.CGIScript}. """ def test_urlParameters(self): """ If the CGI script is passed URL parameters, do not fall over, as per ticket 9887. """ cgiFilename = self.writeCGI(URL_PARAMETER_CGI) portnum = self.startServer(cgiFilename) url = b"http://localhost:%d/cgi?param=1234" % (portnum,) agent = client.Agent(reactor) d = agent.request(b"GET", url) d.addCallback(client.readBody) d.addCallback(self._test_urlParameters_1) return d def _test_urlParameters_1(self, res): expected = f"1234{os.linesep}" expected = expected.encode("ascii") self.assertEqual(res, expected) def test_pathInfo(self): """ L{twcgi.CGIScript.render} sets the process environment I{PATH_INFO} from the request path. """ class FakeReactor: """ A fake reactor recording the environment passed to spawnProcess. """ def spawnProcess(self, process, filename, args, env, wdir): """ Store the C{env} L{dict} to an instance attribute. @param process: Ignored @param filename: Ignored @param args: Ignored @param env: The environment L{dict} which will be stored @param wdir: Ignored """ self.process_env = env _reactor = FakeReactor() resource = twcgi.CGIScript(self.mktemp(), reactor=_reactor) request = DummyRequest(["a", "b"]) request.client = address.IPv4Address("TCP", "127.0.0.1", 12345) _render(resource, request) self.assertEqual(_reactor.process_env["PATH_INFO"], "/a/b") class CGIDirectoryTests(unittest.TestCase): """ Tests for L{twcgi.CGIDirectory}. """ def test_render(self): """ L{twcgi.CGIDirectory.render} sets the HTTP response code to I{NOT FOUND}. """ resource = twcgi.CGIDirectory(self.mktemp()) request = DummyRequest([""]) d = _render(resource, request) def cbRendered(ignored): self.assertEqual(request.responseCode, NOT_FOUND) d.addCallback(cbRendered) return d def test_notFoundChild(self): """ L{twcgi.CGIDirectory.getChild} returns a resource which renders an response with the HTTP I{NOT FOUND} status code if the indicated child does not exist as an entry in the directory used to initialized the L{twcgi.CGIDirectory}. """ path = self.mktemp() os.makedirs(path) resource = twcgi.CGIDirectory(path) request = DummyRequest(["foo"]) child = resource.getChild("foo", request) d = _render(child, request) def cbRendered(ignored): self.assertEqual(request.responseCode, NOT_FOUND) d.addCallback(cbRendered) return d class CGIProcessProtocolTests(unittest.TestCase): """ Tests for L{twcgi.CGIProcessProtocol}. """ def test_prematureEndOfHeaders(self): """ If the process communicating with L{CGIProcessProtocol} ends before finishing writing out headers, the response has I{INTERNAL SERVER ERROR} as its status code. """ request = DummyRequest([""]) protocol = twcgi.CGIProcessProtocol(request) protocol.processEnded(failure.Failure(error.ProcessTerminated())) self.assertEqual(request.responseCode, INTERNAL_SERVER_ERROR) def test_connectionLost(self): """ Ensure that the CGI process ends cleanly when the request connection is lost. """ d = DummyChannel() request = http.Request(d, True) protocol = twcgi.CGIProcessProtocol(request) request.connectionLost(failure.Failure(ConnectionLost("Connection done"))) protocol.processEnded(failure.Failure(error.ProcessTerminated())) def discardBody(response): """ Discard the body of a HTTP response. @param response: The response. @return: The response. """ return client.readBody(response).addCallback(lambda _: response)
[ "twisted.web.http_headers.Headers", "twisted.internet.error.ConnectionLost", "twisted.python.util.sibpath", "twisted.web.client.readBody", "twisted.web.twcgi.CGIProcessProtocol", "twisted.web.test.requesthelper.DummyRequest", "twisted.web.test.requesthelper.DummyChannel", "twisted.web.server.Site", "twisted.web.twcgi.CGIDirectory", "io.BytesIO", "twisted.internet.reactor.listenTCP", "twisted.web.http.Request", "twisted.web.resource.Resource", "twisted.web.twcgi.FilteredScript", "twisted.internet.error.ProcessTerminated", "twisted.web.test._util._render", "twisted.python.log.addObserver", "os.makedirs", "twisted.internet.interfaces.IReactorProcess.providedBy", "twisted.web.client.Agent", "os.linesep.encode", "twisted.python.log.textFromEventDict", "twisted.internet.address.IPv4Address", "twisted.web.resource.getChild" ]
[((2208, 2227), 'twisted.web.resource.Resource', 'resource.Resource', ([], {}), '()\n', (2225, 2227), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((2246, 2273), 'twisted.python.util.sibpath', 'util.sibpath', (['__file__', 'cgi'], {}), '(__file__, cgi)\n', (2258, 2273), False, 'from twisted.python import util, failure, log\n'), ((2342, 2359), 'twisted.web.server.Site', 'server.Site', (['root'], {}), '(root)\n', (2353, 2359), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((2377, 2403), 'twisted.internet.reactor.listenTCP', 'reactor.listenTCP', (['(0)', 'site'], {}), '(0, site)\n', (2394, 2403), False, 'from twisted.internet import address, reactor, interfaces, error\n'), ((2875, 2921), 'twisted.internet.interfaces.IReactorProcess.providedBy', 'interfaces.IReactorProcess.providedBy', (['reactor'], {}), '(reactor)\n', (2912, 2921), False, 'from twisted.internet import address, reactor, interfaces, error\n'), ((3824, 3845), 'twisted.web.client.Agent', 'client.Agent', (['reactor'], {}), '(reactor)\n', (3836, 3845), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((4633, 4654), 'twisted.web.client.Agent', 'client.Agent', (['reactor'], {}), '(reactor)\n', (4645, 4654), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((5352, 5373), 'twisted.web.client.Agent', 'client.Agent', (['reactor'], {}), '(reactor)\n', (5364, 5373), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((5393, 5467), 'twisted.web.http_headers.Headers', 'http_headers.Headers', (["{b'Proxy': [b'foo'], b'X-Innocent-Header': [b'bar']}"], {}), "({b'Proxy': [b'foo'], b'X-Innocent-Header': [b'bar']})\n", (5413, 5467), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((6266, 6287), 'twisted.web.client.Agent', 'client.Agent', (['reactor'], {}), '(reactor)\n', (6278, 6287), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((6876, 6897), 'twisted.web.client.Agent', 'client.Agent', (['reactor'], {}), '(reactor)\n', (6888, 6897), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((7113, 7140), 'twisted.python.log.addObserver', 'log.addObserver', (['addMessage'], {}), '(addMessage)\n', (7128, 7140), False, 'from twisted.python import util, failure, log\n'), ((7655, 7676), 'twisted.web.client.Agent', 'client.Agent', (['reactor'], {}), '(reactor)\n', (7667, 7676), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((8391, 8412), 'twisted.web.client.Agent', 'client.Agent', (['reactor'], {}), '(reactor)\n', (8403, 8412), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((10545, 10569), 'twisted.web.test.requesthelper.DummyRequest', 'DummyRequest', (["['a', 'b']"], {}), "(['a', 'b'])\n", (10557, 10569), False, 'from twisted.web.test.requesthelper import DummyRequest, DummyChannel\n'), ((10595, 10641), 'twisted.internet.address.IPv4Address', 'address.IPv4Address', (['"""TCP"""', '"""127.0.0.1"""', '(12345)'], {}), "('TCP', '127.0.0.1', 12345)\n", (10614, 10641), False, 'from twisted.internet import address, reactor, interfaces, error\n'), ((10661, 10716), 'twisted.web.twcgi.FilteredScript', 'twcgi.FilteredScript', (['"""dummy-file"""'], {'reactor': 'fakeReactor'}), "('dummy-file', reactor=fakeReactor)\n", (10681, 10716), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((10725, 10751), 'twisted.web.test._util._render', '_render', (['resource', 'request'], {}), '(resource, request)\n', (10732, 10751), False, 'from twisted.web.test._util import _render\n'), ((11261, 11282), 'twisted.web.client.Agent', 'client.Agent', (['reactor'], {}), '(reactor)\n', (11273, 11282), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((12460, 12484), 'twisted.web.test.requesthelper.DummyRequest', 'DummyRequest', (["['a', 'b']"], {}), "(['a', 'b'])\n", (12472, 12484), False, 'from twisted.web.test.requesthelper import DummyRequest, DummyChannel\n'), ((12510, 12556), 'twisted.internet.address.IPv4Address', 'address.IPv4Address', (['"""TCP"""', '"""127.0.0.1"""', '(12345)'], {}), "('TCP', '127.0.0.1', 12345)\n", (12529, 12556), False, 'from twisted.internet import address, reactor, interfaces, error\n'), ((12565, 12591), 'twisted.web.test._util._render', '_render', (['resource', 'request'], {}), '(resource, request)\n', (12572, 12591), False, 'from twisted.web.test._util import _render\n'), ((12973, 12991), 'twisted.web.test.requesthelper.DummyRequest', 'DummyRequest', (["['']"], {}), "([''])\n", (12985, 12991), False, 'from twisted.web.test.requesthelper import DummyRequest, DummyChannel\n'), ((13004, 13030), 'twisted.web.test._util._render', '_render', (['resource', 'request'], {}), '(resource, request)\n', (13011, 13030), False, 'from twisted.web.test._util import _render\n'), ((13536, 13553), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (13547, 13553), False, 'import os\n'), ((13573, 13597), 'twisted.web.twcgi.CGIDirectory', 'twcgi.CGIDirectory', (['path'], {}), '(path)\n', (13591, 13597), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((13616, 13637), 'twisted.web.test.requesthelper.DummyRequest', 'DummyRequest', (["['foo']"], {}), "(['foo'])\n", (13628, 13637), False, 'from twisted.web.test.requesthelper import DummyRequest, DummyChannel\n'), ((13654, 13687), 'twisted.web.resource.getChild', 'resource.getChild', (['"""foo"""', 'request'], {}), "('foo', request)\n", (13671, 13687), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((13700, 13723), 'twisted.web.test._util._render', '_render', (['child', 'request'], {}), '(child, request)\n', (13707, 13723), False, 'from twisted.web.test._util import _render\n'), ((14253, 14271), 'twisted.web.test.requesthelper.DummyRequest', 'DummyRequest', (["['']"], {}), "([''])\n", (14265, 14271), False, 'from twisted.web.test.requesthelper import DummyRequest, DummyChannel\n'), ((14291, 14324), 'twisted.web.twcgi.CGIProcessProtocol', 'twcgi.CGIProcessProtocol', (['request'], {}), '(request)\n', (14315, 14324), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((14635, 14649), 'twisted.web.test.requesthelper.DummyChannel', 'DummyChannel', ([], {}), '()\n', (14647, 14649), False, 'from twisted.web.test.requesthelper import DummyRequest, DummyChannel\n'), ((14668, 14689), 'twisted.web.http.Request', 'http.Request', (['d', '(True)'], {}), '(d, True)\n', (14680, 14689), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((14709, 14742), 'twisted.web.twcgi.CGIProcessProtocol', 'twcgi.CGIProcessProtocol', (['request'], {}), '(request)\n', (14733, 14742), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((15061, 15086), 'twisted.web.client.readBody', 'client.readBody', (['response'], {}), '(response)\n', (15076, 15086), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((3214, 3235), 'twisted.web.client.Agent', 'client.Agent', (['reactor'], {}), '(reactor)\n', (3226, 3235), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((3430, 3456), 'os.linesep.encode', 'os.linesep.encode', (['"""ascii"""'], {}), "('ascii')\n", (3447, 3456), False, 'import os\n'), ((7070, 7102), 'twisted.python.log.textFromEventDict', 'log.textFromEventDict', (['eventDict'], {}), '(eventDict)\n', (7091, 7102), False, 'from twisted.python import util, failure, log\n'), ((9330, 9351), 'twisted.web.client.Agent', 'client.Agent', (['reactor'], {}), '(reactor)\n', (9342, 9351), False, 'from twisted.web import client, http, twcgi, server, resource, http_headers\n'), ((14371, 14396), 'twisted.internet.error.ProcessTerminated', 'error.ProcessTerminated', ([], {}), '()\n', (14394, 14396), False, 'from twisted.internet import address, reactor, interfaces, error\n'), ((14790, 14823), 'twisted.internet.error.ConnectionLost', 'ConnectionLost', (['"""Connection done"""'], {}), "('Connection done')\n", (14804, 14823), False, 'from twisted.internet.error import ConnectionLost\n'), ((14872, 14897), 'twisted.internet.error.ProcessTerminated', 'error.ProcessTerminated', ([], {}), '()\n', (14895, 14897), False, 'from twisted.internet import address, reactor, interfaces, error\n'), ((8625, 8655), 'io.BytesIO', 'BytesIO', (["b'Here is your stdin'"], {}), "(b'Here is your stdin')\n", (8632, 8655), False, 'from io import BytesIO\n'), ((9459, 9489), 'io.BytesIO', 'BytesIO', (["b'Here is your stdin'"], {}), "(b'Here is your stdin')\n", (9466, 9489), False, 'from io import BytesIO\n')]
#copybot.py import pyautogui as pg import time ################## LINE 1 ################### #-ใช้เมาส์คลิกไปยังตำแหน่งที่ต้องการก็อปปี้ (ด้านหน้า) # x=1046, y=266 time.sleep(1) # รอ 1 วินาที start_point = (1046,266) pg.click(start_point) #-ลากไปให้สุดบรรทัด time.sleep(1) end_point = (1400,266) pg.dragTo(end_point, duration=2) #-กดปุ่ม Ctrl + C เพื่อก็อปปี้ pg.hotkey('ctrl','c') #-ขยับเมาส์ไปทางด้านซ้าย left_notepad = (800,266) pg.click(left_notepad) #-กด Ctrl + V เพื่อวาง แล้วกด Enter pg.hotkey('ctrl','v') pg.press('enter') ################## LINE 2 ################### #-ใช้เมาส์คลิกไปยังตำแหน่งที่ต้องการก็อปปี้ (ด้านหน้า) # x=1046, y=266 time.sleep(1) # รอ 1 วินาที start_point = (1046,308) # 266 + 42 pg.click(start_point) #-ลากไปให้สุดบรรทัด time.sleep(1) end_point = (1400,308) pg.dragTo(end_point, duration=2) #-กดปุ่ม Ctrl + C เพื่อก็อปปี้ pg.hotkey('ctrl','c') #-ขยับเมาส์ไปทางด้านซ้าย left_notepad = (800,266) pg.click(left_notepad) ### ******** pg.press('down') #-กด Ctrl + V เพื่อวาง แล้วกด Enter pg.hotkey('ctrl','v') pg.press('enter') ################## LINE 3 ################### #-ใช้เมาส์คลิกไปยังตำแหน่งที่ต้องการก็อปปี้ (ด้านหน้า) # x=1046, y=266 time.sleep(1) # รอ 1 วินาที start_point = (1046,350) # 308 + 42 (42 pixel คือ ระยะระหว่างบรรทัด) pg.click(start_point) #-ลากไปให้สุดบรรทัด time.sleep(1) end_point = (1400,350) pg.dragTo(end_point, duration=2) #-กดปุ่ม Ctrl + C เพื่อก็อปปี้ pg.hotkey('ctrl','c') #-ขยับเมาส์ไปทางด้านซ้าย left_notepad = (800,266) pg.click(left_notepad) ### ******** pg.press('down') pg.press('down') #-กด Ctrl + V เพื่อวาง แล้วกด Enter pg.hotkey('ctrl','v') pg.press('enter')
[ "pyautogui.hotkey", "pyautogui.press", "pyautogui.dragTo", "time.sleep", "pyautogui.click" ]
[((174, 187), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (184, 187), False, 'import time\n'), ((229, 250), 'pyautogui.click', 'pg.click', (['start_point'], {}), '(start_point)\n', (237, 250), True, 'import pyautogui as pg\n'), ((275, 288), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (285, 288), False, 'import time\n'), ((314, 346), 'pyautogui.dragTo', 'pg.dragTo', (['end_point'], {'duration': '(2)'}), '(end_point, duration=2)\n', (323, 346), True, 'import pyautogui as pg\n'), ((382, 404), 'pyautogui.hotkey', 'pg.hotkey', (['"""ctrl"""', '"""c"""'], {}), "('ctrl', 'c')\n", (391, 404), True, 'import pyautogui as pg\n'), ((457, 479), 'pyautogui.click', 'pg.click', (['left_notepad'], {}), '(left_notepad)\n', (465, 479), True, 'import pyautogui as pg\n'), ((518, 540), 'pyautogui.hotkey', 'pg.hotkey', (['"""ctrl"""', '"""v"""'], {}), "('ctrl', 'v')\n", (527, 540), True, 'import pyautogui as pg\n'), ((541, 558), 'pyautogui.press', 'pg.press', (['"""enter"""'], {}), "('enter')\n", (549, 558), True, 'import pyautogui as pg\n'), ((684, 697), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (694, 697), False, 'import time\n'), ((750, 771), 'pyautogui.click', 'pg.click', (['start_point'], {}), '(start_point)\n', (758, 771), True, 'import pyautogui as pg\n'), ((796, 809), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (806, 809), False, 'import time\n'), ((835, 867), 'pyautogui.dragTo', 'pg.dragTo', (['end_point'], {'duration': '(2)'}), '(end_point, duration=2)\n', (844, 867), True, 'import pyautogui as pg\n'), ((903, 925), 'pyautogui.hotkey', 'pg.hotkey', (['"""ctrl"""', '"""c"""'], {}), "('ctrl', 'c')\n", (912, 925), True, 'import pyautogui as pg\n'), ((978, 1000), 'pyautogui.click', 'pg.click', (['left_notepad'], {}), '(left_notepad)\n', (986, 1000), True, 'import pyautogui as pg\n'), ((1018, 1034), 'pyautogui.press', 'pg.press', (['"""down"""'], {}), "('down')\n", (1026, 1034), True, 'import pyautogui as pg\n'), ((1075, 1097), 'pyautogui.hotkey', 'pg.hotkey', (['"""ctrl"""', '"""v"""'], {}), "('ctrl', 'v')\n", (1084, 1097), True, 'import pyautogui as pg\n'), ((1098, 1115), 'pyautogui.press', 'pg.press', (['"""enter"""'], {}), "('enter')\n", (1106, 1115), True, 'import pyautogui as pg\n'), ((1239, 1252), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1249, 1252), False, 'import time\n'), ((1338, 1359), 'pyautogui.click', 'pg.click', (['start_point'], {}), '(start_point)\n', (1346, 1359), True, 'import pyautogui as pg\n'), ((1384, 1397), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1394, 1397), False, 'import time\n'), ((1423, 1455), 'pyautogui.dragTo', 'pg.dragTo', (['end_point'], {'duration': '(2)'}), '(end_point, duration=2)\n', (1432, 1455), True, 'import pyautogui as pg\n'), ((1491, 1513), 'pyautogui.hotkey', 'pg.hotkey', (['"""ctrl"""', '"""c"""'], {}), "('ctrl', 'c')\n", (1500, 1513), True, 'import pyautogui as pg\n'), ((1566, 1588), 'pyautogui.click', 'pg.click', (['left_notepad'], {}), '(left_notepad)\n', (1574, 1588), True, 'import pyautogui as pg\n'), ((1606, 1622), 'pyautogui.press', 'pg.press', (['"""down"""'], {}), "('down')\n", (1614, 1622), True, 'import pyautogui as pg\n'), ((1624, 1640), 'pyautogui.press', 'pg.press', (['"""down"""'], {}), "('down')\n", (1632, 1640), True, 'import pyautogui as pg\n'), ((1681, 1703), 'pyautogui.hotkey', 'pg.hotkey', (['"""ctrl"""', '"""v"""'], {}), "('ctrl', 'v')\n", (1690, 1703), True, 'import pyautogui as pg\n'), ((1704, 1721), 'pyautogui.press', 'pg.press', (['"""enter"""'], {}), "('enter')\n", (1712, 1721), True, 'import pyautogui as pg\n')]
# Copyright 2018 The CapsLayer Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ========================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np import tensorflow as tf from tensorflow.python.keras.utils.data_utils import get_file from tensorflow.python.keras.datasets.cifar import load_batch from capslayer.data.utils.TFRecordHelper import int64_feature, bytes_feature URL = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz" md5sum = 'eb9058c3a382ffc7106e4002c42a8d85' def load_cifar100(split, path=None): if path is None: cache_path = os.path.join(os.path.expanduser('~'), ".capslayer") path = get_file('cifar-100-python', cache_dir=cache_path, file_hash=md5sum, origin=URL, untar=True) split = split.lower() if split == 'test': fpath = os.path.join(path, 'test') images, labels = load_batch(fpath, label_key='fine_labels') else: fpath = os.path.join(path, 'train') images, labels = load_batch(fpath, label_key='fine_labels') idx = np.arange(len(images)) np.random.seed(201808) np.random.shuffle(idx) labels = np.reshape(labels, (-1, )) images = images[idx[:45000]] if split == "train" else images[idx[45000:]] labels = labels[idx[:45000]] if split == "train" else labels[idx[45000:]] images = np.reshape(images.transpose(0, 2, 3, 1), (-1, 3072)).astype(np.float32) labels = np.reshape(labels, (-1, )).astype(np.int32) return(zip(images, labels)) def encode_and_write(dataset, filename): with tf.python_io.TFRecordWriter(filename) as writer: for image, label in dataset: print(image.shape) exit() image_raw = image.tostring() example = tf.train.Example(features=tf.train.Features( feature={'image': bytes_feature(image_raw), 'label': int64_feature(label)})) writer.write(example.SerializeToString()) def tfrecord_runner(path=None, force=True): train_set = load_cifar100(path=path, split='train') eval_set = load_cifar100(path=path, split='eval') test_set = load_cifar100(path=path, split='test') if path is None: path = os.path.join(os.path.expanduser('~'), ".capslayer", "datasets", "cifar100") if not os.path.exists(path): os.makedirs(path) train_set_outpath = os.path.join(path, "train_cifar100.tfrecord") eval_set_outpath = os.path.join(path, "eval_cifar100.tfrecord") test_set_outpath = os.path.join(path, "test_cifar100.tfrecord") if not os.path.exists(train_set_outpath) or force: encode_and_write(train_set, train_set_outpath) if not os.path.exists(eval_set_outpath) or force: encode_and_write(eval_set, eval_set_outpath) if not os.path.exists(test_set_outpath) or force: encode_and_write(test_set, test_set_outpath) if __name__ == "__main__": data = load_cifar100(split='train') print(data)
[ "os.path.expanduser", "numpy.random.seed", "tensorflow.python_io.TFRecordWriter", "os.makedirs", "tensorflow.python.keras.utils.data_utils.get_file", "os.path.exists", "capslayer.data.utils.TFRecordHelper.bytes_feature", "tensorflow.python.keras.datasets.cifar.load_batch", "numpy.reshape", "capslayer.data.utils.TFRecordHelper.int64_feature", "os.path.join", "numpy.random.shuffle" ]
[((3092, 3137), 'os.path.join', 'os.path.join', (['path', '"""train_cifar100.tfrecord"""'], {}), "(path, 'train_cifar100.tfrecord')\n", (3104, 3137), False, 'import os\n'), ((3161, 3205), 'os.path.join', 'os.path.join', (['path', '"""eval_cifar100.tfrecord"""'], {}), "(path, 'eval_cifar100.tfrecord')\n", (3173, 3205), False, 'import os\n'), ((3229, 3273), 'os.path.join', 'os.path.join', (['path', '"""test_cifar100.tfrecord"""'], {}), "(path, 'test_cifar100.tfrecord')\n", (3241, 3273), False, 'import os\n'), ((1310, 1407), 'tensorflow.python.keras.utils.data_utils.get_file', 'get_file', (['"""cifar-100-python"""'], {'cache_dir': 'cache_path', 'file_hash': 'md5sum', 'origin': 'URL', 'untar': '(True)'}), "('cifar-100-python', cache_dir=cache_path, file_hash=md5sum, origin\n =URL, untar=True)\n", (1318, 1407), False, 'from tensorflow.python.keras.utils.data_utils import get_file\n'), ((1470, 1496), 'os.path.join', 'os.path.join', (['path', '"""test"""'], {}), "(path, 'test')\n", (1482, 1496), False, 'import os\n'), ((1522, 1564), 'tensorflow.python.keras.datasets.cifar.load_batch', 'load_batch', (['fpath'], {'label_key': '"""fine_labels"""'}), "(fpath, label_key='fine_labels')\n", (1532, 1564), False, 'from tensorflow.python.keras.datasets.cifar import load_batch\n'), ((1591, 1618), 'os.path.join', 'os.path.join', (['path', '"""train"""'], {}), "(path, 'train')\n", (1603, 1618), False, 'import os\n'), ((1644, 1686), 'tensorflow.python.keras.datasets.cifar.load_batch', 'load_batch', (['fpath'], {'label_key': '"""fine_labels"""'}), "(fpath, label_key='fine_labels')\n", (1654, 1686), False, 'from tensorflow.python.keras.datasets.cifar import load_batch\n'), ((1733, 1755), 'numpy.random.seed', 'np.random.seed', (['(201808)'], {}), '(201808)\n', (1747, 1755), True, 'import numpy as np\n'), ((1764, 1786), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (1781, 1786), True, 'import numpy as np\n'), ((1805, 1830), 'numpy.reshape', 'np.reshape', (['labels', '(-1,)'], {}), '(labels, (-1,))\n', (1815, 1830), True, 'import numpy as np\n'), ((2223, 2260), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['filename'], {}), '(filename)\n', (2250, 2260), True, 'import tensorflow as tf\n'), ((3019, 3039), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3033, 3039), False, 'import os\n'), ((3049, 3066), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (3060, 3066), False, 'import os\n'), ((1256, 1279), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1274, 1279), False, 'import os\n'), ((2094, 2119), 'numpy.reshape', 'np.reshape', (['labels', '(-1,)'], {}), '(labels, (-1,))\n', (2104, 2119), True, 'import numpy as np\n'), ((2945, 2968), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2963, 2968), False, 'import os\n'), ((3286, 3319), 'os.path.exists', 'os.path.exists', (['train_set_outpath'], {}), '(train_set_outpath)\n', (3300, 3319), False, 'import os\n'), ((3396, 3428), 'os.path.exists', 'os.path.exists', (['eval_set_outpath'], {}), '(eval_set_outpath)\n', (3410, 3428), False, 'import os\n'), ((3503, 3535), 'os.path.exists', 'os.path.exists', (['test_set_outpath'], {}), '(test_set_outpath)\n', (3517, 3535), False, 'import os\n'), ((2524, 2548), 'capslayer.data.utils.TFRecordHelper.bytes_feature', 'bytes_feature', (['image_raw'], {}), '(image_raw)\n', (2537, 2548), False, 'from capslayer.data.utils.TFRecordHelper import int64_feature, bytes_feature\n'), ((2607, 2627), 'capslayer.data.utils.TFRecordHelper.int64_feature', 'int64_feature', (['label'], {}), '(label)\n', (2620, 2627), False, 'from capslayer.data.utils.TFRecordHelper import int64_feature, bytes_feature\n')]
import torch # import torch.distributions as dist import os import shutil import argparse import torch.optim as optim from tqdm import tqdm import time from collections import defaultdict import pandas as pd from src import config from src.checkpoints import CheckpointIO from src.utils.io import export_pointcloud from src.utils.visualize import visualize_data import pickle parser = argparse.ArgumentParser( description='Extract meshes from occupancy process.' ) parser.add_argument('config', type=str, help='Path to config file.') parser.add_argument('--no-cuda', action='store_true', help='Do not use cuda.') parser.add_argument('--rotate-planes', action='store_true', help='To see if we need plane rotations') args = parser.parse_args() cfg = config.load_config(args.config, 'configs/default.yaml') is_cuda = (torch.cuda.is_available() and not args.no_cuda) rotate_planes = args.rotate_planes device = torch.device("cuda" if is_cuda else "cpu") out_dir = cfg['training']['out_dir'] generation_dir = os.path.join(out_dir, cfg['generation']['generation_dir']) out_time_file = os.path.join(generation_dir, 'time_generation_full.pkl') out_time_file_class = os.path.join(generation_dir, 'time_generation.pkl') DEGREES = cfg['degrees'] batch_size = cfg['generation']['batch_size'] input_type = cfg['data']['input_type'] vis_n_outputs = cfg['generation']['vis_n_outputs'] if vis_n_outputs is None: vis_n_outputs = -1 # Dataset dataset = config.get_dataset('test', cfg, return_idx=True) # Model model = config.get_model(cfg, device=device, dataset=dataset) checkpoint_io = CheckpointIO(out_dir, model=model) checkpoint_io.load(cfg['test']['model_file']) # Generator generator = config.get_generator(model, cfg, device=device) # Determine what to generate generate_mesh = cfg['generation']['generate_mesh'] generate_pointcloud = cfg['generation']['generate_pointcloud'] if generate_mesh and not hasattr(generator, 'generate_mesh'): generate_mesh = False print('Warning: generator does not support mesh generation.') if generate_pointcloud and not hasattr(generator, 'generate_pointcloud'): generate_pointcloud = False print('Warning: generator does not support pointcloud generation.') # Loader test_loader = torch.utils.data.DataLoader( dataset, batch_size=1, num_workers=0, shuffle=False) # Statistics time_dicts = [] # Generate model.eval() # Count how many models already created model_counter = defaultdict(int) for it, data in enumerate(tqdm(test_loader)): #Output folders mesh_dir = os.path.join(generation_dir, 'meshes') pointcloud_dir = os.path.join(generation_dir, 'pointcloud') in_dir = os.path.join(generation_dir, 'input') generation_vis_dir = os.path.join(generation_dir, 'vis', ) # Get index etc. idx = data['idx'].item() try: model_dict = dataset.get_model_dict(idx) except AttributeError: model_dict = {'model': str(idx), 'category': 'n/a'} modelname = model_dict['model'] category_id = model_dict.get('category', 'n/a') try: category_name = dataset.metadata[category_id].get('name', 'n/a') except AttributeError: category_name = 'n/a' if category_id != 'n/a': mesh_dir = os.path.join(mesh_dir, str(category_id)) pointcloud_dir = os.path.join(pointcloud_dir, str(category_id)) in_dir = os.path.join(in_dir, str(category_id)) folder_name = str(category_id) if category_name != 'n/a': folder_name = str(folder_name) + '_' + category_name.split(',')[0] generation_vis_dir = os.path.join(generation_vis_dir, folder_name) # Create directories if necessary if vis_n_outputs >= 0 and not os.path.exists(generation_vis_dir): os.makedirs(generation_vis_dir) if generate_mesh and not os.path.exists(mesh_dir): os.makedirs(mesh_dir) if generate_pointcloud and not os.path.exists(pointcloud_dir): os.makedirs(pointcloud_dir) if not os.path.exists(in_dir): os.makedirs(in_dir) # Timing dict time_dict = { 'idx': idx, 'class id': category_id, 'class name': category_name, 'modelname': modelname, } time_dicts.append(time_dict) # Generate outputs out_file_dict = {} # Also copy ground truth if cfg['generation']['copy_groundtruth']: modelpath = os.path.join( dataset.dataset_folder, category_id, modelname, cfg['data']['watertight_file']) out_file_dict['gt'] = modelpath if generate_mesh: t0 = time.time() #Part of the code where we rotate objects out,_, rotation = generator.generate_rotated_mesh(data, DEGREES = DEGREES, save_rotation_tensor = rotate_planes) time_dict['mesh'] = time.time() - t0 # Get statistics try: mesh, stats_dict = out except TypeError: mesh, stats_dict = out, {} time_dict.update(stats_dict) # Write output mesh_out_file = os.path.join(mesh_dir, '%s.off' % modelname) mesh.export(mesh_out_file) rotation_out_file = os.path.join(mesh_dir, '%s_rotation.pckl' % modelname) with open(rotation_out_file, 'wb') as f: pickle.dump(rotation, f) out_file_dict['mesh'] = mesh_out_file if generate_pointcloud: t0 = time.time() pointcloud = generator.generate_pointcloud(data) time_dict['pcl'] = time.time() - t0 pointcloud_out_file = os.path.join( pointcloud_dir, '%s.ply' % modelname) export_pointcloud(pointcloud, pointcloud_out_file) out_file_dict['pointcloud'] = pointcloud_out_file if cfg['generation']['copy_input']: # Save inputs if input_type == 'pointcloud' or 'partial_pointcloud': inputs_path = os.path.join(in_dir, '%s.ply' % modelname) inputs = data['inputs'].squeeze(0).cpu().numpy() export_pointcloud(inputs, inputs_path, False) out_file_dict['in'] = inputs_path # Copy to visualization directory for first vis_n_output samples c_it = model_counter[category_id] if c_it < vis_n_outputs: # Save output files img_name = '%02d.off' % c_it for k, filepath in out_file_dict.items(): ext = os.path.splitext(filepath)[1] out_file = os.path.join(generation_vis_dir, '%02d_%s%s' % (c_it, k, ext)) shutil.copyfile(filepath, out_file) model_counter[category_id] += 1 # Create pandas dataframe and save time_df = pd.DataFrame(time_dicts) time_df.set_index(['idx'], inplace=True) time_df.to_pickle(out_time_file) # Create pickle files with main statistics time_df_class = time_df.groupby(by=['class name']).mean() time_df_class.to_pickle(out_time_file_class) # Print results time_df_class.loc['mean'] = time_df_class.mean() print('Timings [s]:') print(time_df_class)
[ "pickle.dump", "argparse.ArgumentParser", "src.config.get_dataset", "collections.defaultdict", "torch.device", "os.path.join", "pandas.DataFrame", "torch.utils.data.DataLoader", "src.checkpoints.CheckpointIO", "src.config.get_model", "os.path.exists", "shutil.copyfile", "tqdm.tqdm", "torch.cuda.is_available", "src.config.get_generator", "src.config.load_config", "os.makedirs", "src.utils.io.export_pointcloud", "time.time", "os.path.splitext" ]
[((386, 463), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Extract meshes from occupancy process."""'}), "(description='Extract meshes from occupancy process.')\n", (409, 463), False, 'import argparse\n'), ((754, 809), 'src.config.load_config', 'config.load_config', (['args.config', '"""configs/default.yaml"""'], {}), "(args.config, 'configs/default.yaml')\n", (772, 809), False, 'from src import config\n'), ((915, 957), 'torch.device', 'torch.device', (["('cuda' if is_cuda else 'cpu')"], {}), "('cuda' if is_cuda else 'cpu')\n", (927, 957), False, 'import torch\n'), ((1013, 1071), 'os.path.join', 'os.path.join', (['out_dir', "cfg['generation']['generation_dir']"], {}), "(out_dir, cfg['generation']['generation_dir'])\n", (1025, 1071), False, 'import os\n'), ((1088, 1144), 'os.path.join', 'os.path.join', (['generation_dir', '"""time_generation_full.pkl"""'], {}), "(generation_dir, 'time_generation_full.pkl')\n", (1100, 1144), False, 'import os\n'), ((1167, 1218), 'os.path.join', 'os.path.join', (['generation_dir', '"""time_generation.pkl"""'], {}), "(generation_dir, 'time_generation.pkl')\n", (1179, 1218), False, 'import os\n'), ((1453, 1501), 'src.config.get_dataset', 'config.get_dataset', (['"""test"""', 'cfg'], {'return_idx': '(True)'}), "('test', cfg, return_idx=True)\n", (1471, 1501), False, 'from src import config\n'), ((1519, 1572), 'src.config.get_model', 'config.get_model', (['cfg'], {'device': 'device', 'dataset': 'dataset'}), '(cfg, device=device, dataset=dataset)\n', (1535, 1572), False, 'from src import config\n'), ((1590, 1624), 'src.checkpoints.CheckpointIO', 'CheckpointIO', (['out_dir'], {'model': 'model'}), '(out_dir, model=model)\n', (1602, 1624), False, 'from src.checkpoints import CheckpointIO\n'), ((1696, 1743), 'src.config.get_generator', 'config.get_generator', (['model', 'cfg'], {'device': 'device'}), '(model, cfg, device=device)\n', (1716, 1743), False, 'from src import config\n'), ((2247, 2332), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(1)', 'num_workers': '(0)', 'shuffle': '(False)'}), '(dataset, batch_size=1, num_workers=0, shuffle=False\n )\n', (2274, 2332), False, 'import torch\n'), ((2449, 2465), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2460, 2465), False, 'from collections import defaultdict\n'), ((6620, 6644), 'pandas.DataFrame', 'pd.DataFrame', (['time_dicts'], {}), '(time_dicts)\n', (6632, 6644), True, 'import pandas as pd\n'), ((821, 846), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (844, 846), False, 'import torch\n'), ((2493, 2510), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (2497, 2510), False, 'from tqdm import tqdm\n'), ((2548, 2586), 'os.path.join', 'os.path.join', (['generation_dir', '"""meshes"""'], {}), "(generation_dir, 'meshes')\n", (2560, 2586), False, 'import os\n'), ((2608, 2650), 'os.path.join', 'os.path.join', (['generation_dir', '"""pointcloud"""'], {}), "(generation_dir, 'pointcloud')\n", (2620, 2650), False, 'import os\n'), ((2664, 2701), 'os.path.join', 'os.path.join', (['generation_dir', '"""input"""'], {}), "(generation_dir, 'input')\n", (2676, 2701), False, 'import os\n'), ((2727, 2762), 'os.path.join', 'os.path.join', (['generation_dir', '"""vis"""'], {}), "(generation_dir, 'vis')\n", (2739, 2762), False, 'import os\n'), ((3597, 3642), 'os.path.join', 'os.path.join', (['generation_vis_dir', 'folder_name'], {}), '(generation_vis_dir, folder_name)\n', (3609, 3642), False, 'import os\n'), ((3760, 3791), 'os.makedirs', 'os.makedirs', (['generation_vis_dir'], {}), '(generation_vis_dir)\n', (3771, 3791), False, 'import os\n'), ((3856, 3877), 'os.makedirs', 'os.makedirs', (['mesh_dir'], {}), '(mesh_dir)\n', (3867, 3877), False, 'import os\n'), ((3954, 3981), 'os.makedirs', 'os.makedirs', (['pointcloud_dir'], {}), '(pointcloud_dir)\n', (3965, 3981), False, 'import os\n'), ((4000, 4022), 'os.path.exists', 'os.path.exists', (['in_dir'], {}), '(in_dir)\n', (4014, 4022), False, 'import os\n'), ((4032, 4051), 'os.makedirs', 'os.makedirs', (['in_dir'], {}), '(in_dir)\n', (4043, 4051), False, 'import os\n'), ((4397, 4494), 'os.path.join', 'os.path.join', (['dataset.dataset_folder', 'category_id', 'modelname', "cfg['data']['watertight_file']"], {}), "(dataset.dataset_folder, category_id, modelname, cfg['data'][\n 'watertight_file'])\n", (4409, 4494), False, 'import os\n'), ((4592, 4603), 'time.time', 'time.time', ([], {}), '()\n', (4601, 4603), False, 'import time\n'), ((5044, 5088), 'os.path.join', 'os.path.join', (['mesh_dir', "('%s.off' % modelname)"], {}), "(mesh_dir, '%s.off' % modelname)\n", (5056, 5088), False, 'import os\n'), ((5153, 5207), 'os.path.join', 'os.path.join', (['mesh_dir', "('%s_rotation.pckl' % modelname)"], {}), "(mesh_dir, '%s_rotation.pckl' % modelname)\n", (5165, 5207), False, 'import os\n'), ((5382, 5393), 'time.time', 'time.time', ([], {}), '()\n', (5391, 5393), False, 'import time\n'), ((5525, 5575), 'os.path.join', 'os.path.join', (['pointcloud_dir', "('%s.ply' % modelname)"], {}), "(pointcloud_dir, '%s.ply' % modelname)\n", (5537, 5575), False, 'import os\n'), ((5597, 5647), 'src.utils.io.export_pointcloud', 'export_pointcloud', (['pointcloud', 'pointcloud_out_file'], {}), '(pointcloud, pointcloud_out_file)\n', (5614, 5647), False, 'from src.utils.io import export_pointcloud\n'), ((3716, 3750), 'os.path.exists', 'os.path.exists', (['generation_vis_dir'], {}), '(generation_vis_dir)\n', (3730, 3750), False, 'import os\n'), ((3822, 3846), 'os.path.exists', 'os.path.exists', (['mesh_dir'], {}), '(mesh_dir)\n', (3836, 3846), False, 'import os\n'), ((3914, 3944), 'os.path.exists', 'os.path.exists', (['pointcloud_dir'], {}), '(pointcloud_dir)\n', (3928, 3944), False, 'import os\n'), ((4803, 4814), 'time.time', 'time.time', ([], {}), '()\n', (4812, 4814), False, 'import time\n'), ((5269, 5293), 'pickle.dump', 'pickle.dump', (['rotation', 'f'], {}), '(rotation, f)\n', (5280, 5293), False, 'import pickle\n'), ((5478, 5489), 'time.time', 'time.time', ([], {}), '()\n', (5487, 5489), False, 'import time\n'), ((5859, 5901), 'os.path.join', 'os.path.join', (['in_dir', "('%s.ply' % modelname)"], {}), "(in_dir, '%s.ply' % modelname)\n", (5871, 5901), False, 'import os\n'), ((5975, 6020), 'src.utils.io.export_pointcloud', 'export_pointcloud', (['inputs', 'inputs_path', '(False)'], {}), '(inputs, inputs_path, False)\n', (5992, 6020), False, 'from src.utils.io import export_pointcloud\n'), ((6390, 6452), 'os.path.join', 'os.path.join', (['generation_vis_dir', "('%02d_%s%s' % (c_it, k, ext))"], {}), "(generation_vis_dir, '%02d_%s%s' % (c_it, k, ext))\n", (6402, 6452), False, 'import os\n'), ((6501, 6536), 'shutil.copyfile', 'shutil.copyfile', (['filepath', 'out_file'], {}), '(filepath, out_file)\n', (6516, 6536), False, 'import shutil\n'), ((6337, 6363), 'os.path.splitext', 'os.path.splitext', (['filepath'], {}), '(filepath)\n', (6353, 6363), False, 'import os\n')]
import torch import numpy as np import torch.nn as nn from math import ceil from torch.autograd import Variable from ptsemseg import caffe_pb2 from ptsemseg.models.utils import * from ptsemseg.loss import * pspnet_specs = { 'pascalvoc': { 'n_classes': 21, 'input_size': (473, 473), 'block_config': [3, 4, 23, 3], }, 'cityscapes': { 'n_classes': 19, 'input_size': (713, 713), 'block_config': [3, 4, 23, 3], }, 'ade20k': { 'n_classes': 150, 'input_size': (473, 473), 'block_config': [3, 4, 6, 3], }, } class pspnet(nn.Module): """ Pyramid Scene Parsing Network URL: https://arxiv.org/abs/1612.01105 References: 1) Original Author's code: https://github.com/hszhao/PSPNet 2) Chainer implementation by @mitmul: https://github.com/mitmul/chainer-pspnet 3) TensorFlow implementation by @hellochick: https://github.com/hellochick/PSPNet-tensorflow Visualization: http://dgschwend.github.io/netscope/#/gist/6bfb59e6a3cfcb4e2bb8d47f827c2928 """ def __init__(self, n_classes=21, block_config=[3, 4, 23, 3], input_size=(473,473), version=None): super(pspnet, self).__init__() self.block_config = pspnet_specs[version]['block_config'] if version is not None else block_config self.n_classes = pspnet_specs[version]['n_classes'] if version is not None else n_classes self.input_size = pspnet_specs[version]['input_size'] if version is not None else input_size # Encoder self.convbnrelu1_1 = conv2DBatchNormRelu(in_channels=3, k_size=3, n_filters=64, padding=1, stride=2, bias=False) self.convbnrelu1_2 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=64, padding=1, stride=1, bias=False) self.convbnrelu1_3 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=128, padding=1, stride=1, bias=False) # Vanilla Residual Blocks self.res_block2 = residualBlockPSP(self.block_config[0], 128, 64, 256, 1, 1) self.res_block3 = residualBlockPSP(self.block_config[1], 256, 128, 512, 2, 1) # Dilated Residual Blocks self.res_block4 = residualBlockPSP(self.block_config[2], 512, 256, 1024, 1, 2) self.res_block5 = residualBlockPSP(self.block_config[3], 1024, 512, 2048, 1, 4) # Pyramid Pooling Module self.pyramid_pooling = pyramidPooling(2048, [6, 3, 2, 1]) # Final conv layers self.cbr_final = conv2DBatchNormRelu(4096, 512, 3, 1, 1, False) self.dropout = nn.Dropout2d(p=0.1, inplace=True) self.classification = nn.Conv2d(512, self.n_classes, 1, 1, 0) # Auxiliary layers for training self.convbnrelu4_aux = conv2DBatchNormRelu(in_channels=1024, k_size=3, n_filters=256, padding=1, stride=1, bias=False) self.aux_cls = nn.Conv2d(256, self.n_classes, 1, 1, 0) # Define auxiliary loss function self.loss = multi_scale_cross_entropy2d def forward(self, x): inp_shape = x.shape[2:] # H, W -> H/2, W/2 x = self.convbnrelu1_1(x) x = self.convbnrelu1_2(x) x = self.convbnrelu1_3(x) # H/2, W/2 -> H/4, W/4 x = F.max_pool2d(x, 3, 2, 1) # H/4, W/4 -> H/8, W/8 x = self.res_block2(x) x = self.res_block3(x) x = self.res_block4(x) # Auxiliary layers for training x_aux = self.convbnrelu4_aux(x) x_aux = self.dropout(x_aux) x_aux = self.aux_cls(x_aux) x = self.res_block5(x) x = self.pyramid_pooling(x) x = self.cbr_final(x) x = self.dropout(x) x = self.classification(x) x = F.upsample(x, size=inp_shape, mode='bilinear') if self.training: return x_aux, x else: # eval mode return x def load_pretrained_model(self, model_path): """ Load weights from caffemodel w/o caffe dependency and plug them in corresponding modules """ # My eyes and my heart both hurt when writing this method # Only care about layer_types that have trainable parameters ltypes = ['BNData', 'ConvolutionData', 'HoleConvolutionData'] def _get_layer_params(layer, ltype): if ltype == 'BNData': gamma = np.array(layer.blobs[0].data) beta = np.array(layer.blobs[1].data) mean = np.array(layer.blobs[2].data) var = np.array(layer.blobs[3].data) return [mean, var, gamma, beta] elif ltype in ['ConvolutionData', 'HoleConvolutionData']: is_bias = layer.convolution_param.bias_term weights = np.array(layer.blobs[0].data) bias = [] if is_bias: bias = np.array(layer.blobs[1].data) return [weights, bias] elif ltype == 'InnerProduct': raise Exception("Fully connected layers {}, not supported".format(ltype)) else: raise Exception("Unkown layer type {}".format(ltype)) net = caffe_pb2.NetParameter() with open(model_path, 'rb') as model_file: net.MergeFromString(model_file.read()) # dict formatted as -> key:<layer_name> :: value:<layer_type> layer_types = {} # dict formatted as -> key:<layer_name> :: value:[<list_of_params>] layer_params = {} for l in net.layer: lname = l.name ltype = l.type if ltype in ltypes: print("Processing layer {}".format(lname)) layer_types[lname] = ltype layer_params[lname] = _get_layer_params(l, ltype) # Set affine=False for all batchnorm modules def _no_affine_bn(module=None): if isinstance(module, nn.BatchNorm2d): module.affine = False if len([m for m in module.children()]) > 0: for child in module.children(): _no_affine_bn(child) #_no_affine_bn(self) def _transfer_conv(layer_name, module): weights, bias = layer_params[layer_name] w_shape = np.array(module.weight.size()) print("CONV {}: Original {} and trans weights {}".format(layer_name, w_shape, weights.shape)) module.weight.data.copy_(torch.from_numpy(weights).view_as(module.weight)) if len(bias) != 0: b_shape = np.array(module.bias.size()) print("CONV {}: Original {} and trans bias {}".format(layer_name, b_shape, bias.shape)) module.bias.data.copy_(torch.from_numpy(bias).view_as(module.bias)) def _transfer_conv_bn(conv_layer_name, mother_module): conv_module = mother_module[0] bn_module = mother_module[1] _transfer_conv(conv_layer_name, conv_module) mean, var, gamma, beta = layer_params[conv_layer_name+'/bn'] print("BN {}: Original {} and trans weights {}".format(conv_layer_name, bn_module.running_mean.size(), mean.shape)) bn_module.running_mean.copy_(torch.from_numpy(mean).view_as(bn_module.running_mean)) bn_module.running_var.copy_(torch.from_numpy(var).view_as(bn_module.running_var)) bn_module.weight.data.copy_(torch.from_numpy(gamma).view_as(bn_module.weight)) bn_module.bias.data.copy_(torch.from_numpy(beta).view_as(bn_module.bias)) def _transfer_residual(prefix, block): block_module, n_layers = block[0], block[1] bottleneck = block_module.layers[0] bottleneck_conv_bn_dic = {prefix + '_1_1x1_reduce': bottleneck.cbr1.cbr_unit, prefix + '_1_3x3': bottleneck.cbr2.cbr_unit, prefix + '_1_1x1_proj': bottleneck.cb4.cb_unit, prefix + '_1_1x1_increase': bottleneck.cb3.cb_unit,} for k, v in bottleneck_conv_bn_dic.items(): _transfer_conv_bn(k, v) for layer_idx in range(2, n_layers+1): residual_layer = block_module.layers[layer_idx-1] residual_conv_bn_dic = {'_'.join(map(str, [prefix, layer_idx, '1x1_reduce'])): residual_layer.cbr1.cbr_unit, '_'.join(map(str, [prefix, layer_idx, '3x3'])): residual_layer.cbr2.cbr_unit, '_'.join(map(str, [prefix, layer_idx, '1x1_increase'])): residual_layer.cb3.cb_unit,} for k, v in residual_conv_bn_dic.items(): _transfer_conv_bn(k, v) convbn_layer_mapping = {'conv1_1_3x3_s2': self.convbnrelu1_1.cbr_unit, 'conv1_2_3x3': self.convbnrelu1_2.cbr_unit, 'conv1_3_3x3': self.convbnrelu1_3.cbr_unit, 'conv5_3_pool6_conv': self.pyramid_pooling.paths[0].cbr_unit, 'conv5_3_pool3_conv': self.pyramid_pooling.paths[1].cbr_unit, 'conv5_3_pool2_conv': self.pyramid_pooling.paths[2].cbr_unit, 'conv5_3_pool1_conv': self.pyramid_pooling.paths[3].cbr_unit, 'conv5_4': self.cbr_final.cbr_unit, 'conv4_' + str(self.block_config[2]+1): self.convbnrelu4_aux.cbr_unit,} # Auxiliary layers for training residual_layers = {'conv2': [self.res_block2, self.block_config[0]], 'conv3': [self.res_block3, self.block_config[1]], 'conv4': [self.res_block4, self.block_config[2]], 'conv5': [self.res_block5, self.block_config[3]],} # Transfer weights for all non-residual conv+bn layers for k, v in convbn_layer_mapping.items(): _transfer_conv_bn(k, v) # Transfer weights for final non-bn conv layer _transfer_conv('conv6', self.classification) _transfer_conv('conv6_1', self.aux_cls) # Transfer weights for all residual layers for k, v in residual_layers.items(): _transfer_residual(k, v) def tile_predict(self, imgs, include_flip_mode=True): """ Predict by takin overlapping tiles from the image. Strides are adaptively computed from the imgs shape and input size :param imgs: torch.Tensor with shape [N, C, H, W] in BGR format :param side: int with side length of model input :param n_classes: int with number of classes in seg output. """ side_x, side_y = self.input_size n_classes = self.n_classes n_samples, c, h, w = imgs.shape #n = int(max(h,w) / float(side) + 1) n_x = int(h / float(side_x) + 1) n_y = int(w / float(side_y) + 1) stride_x = ( h - side_x ) / float(n_x) stride_y = ( w - side_y ) / float(n_y) x_ends = [[int(i*stride_x), int(i*stride_x) + side_x] for i in range(n_x+1)] y_ends = [[int(i*stride_y), int(i*stride_y) + side_y] for i in range(n_y+1)] pred = np.zeros([n_samples, n_classes, h, w]) count = np.zeros([h, w]) slice_count = 0 for sx, ex in x_ends: for sy, ey in y_ends: slice_count += 1 imgs_slice = imgs[:, :, sx:ex, sy:ey] if include_flip_mode: imgs_slice_flip = torch.from_numpy(np.copy(imgs_slice.cpu().numpy()[:, :, :, ::-1])).float() is_model_on_cuda = next(self.parameters()).is_cuda inp = Variable(imgs_slice, volatile=True) if include_flip_mode: flp = Variable(imgs_slice_flip, volatile=True) if is_model_on_cuda: inp = inp.cuda() if include_flip_mode: flp = flp.cuda() psub1 = F.softmax(self.forward(inp), dim=1).data.cpu().numpy() if include_flip_mode: psub2 = F.softmax(self.forward(flp), dim=1).data.cpu().numpy() psub = (psub1 + psub2[:, :, :, ::-1]) / 2.0 else: psub = psub1 pred[:, :, sx:ex, sy:ey] = psub count[sx:ex, sy:ey] += 1.0 score = (pred / count[None, None, ...]).astype(np.float32) return score / np.expand_dims(score.sum(axis=1), axis=1) # For Testing Purposes only if __name__ == '__main__': cd = 0 import os from torch.autograd import Variable import matplotlib.pyplot as plt import scipy.misc as m from ptsemseg.loader.cityscapes_loader import cityscapesLoader as cl psp = pspnet(version='cityscapes') # Just need to do this one time caffemodel_dir_path = 'PATH_TO_PSPNET_DIR/evaluation/model' psp.load_pretrained_model(model_path=os.path.join(caffemodel_dir_path, 'pspnet101_cityscapes.caffemodel')) #psp.load_pretrained_model(model_path=os.path.join(caffemodel_dir_path, 'pspnet50_ADE20K.caffemodel')) #psp.load_pretrained_model(model_path=os.path.join(caffemodel_dir_path, 'pspnet101_VOC2012.caffemodel')) # psp.load_state_dict(torch.load('psp.pth')) psp.float() psp.cuda(cd) psp.eval() dataset_root_dir = 'PATH_TO_CITYSCAPES_DIR' dst = cl(root=dataset_root_dir) img = m.imread(os.path.join(dataset_root_dir, 'leftImg8bit/demoVideo/stuttgart_00/stuttgart_00_000000_000010_leftImg8bit.png')) m.imsave('cropped.png', img) orig_size = img.shape[:-1] img = img.transpose(2, 0, 1) img = img.astype(np.float64) img -= np.array([123.68, 116.779, 103.939])[:, None, None] img = np.copy(img[::-1, :, :]) img = torch.from_numpy(img).float() # convert to torch tensor img = img.unsqueeze(0) out = psp.tile_predict(img) pred = np.argmax(out, axis=1)[0] decoded = dst.decode_segmap(pred) m.imsave('cityscapes_sttutgart_tiled.png', decoded) #m.imsave('cityscapes_sttutgart_tiled.png', pred) checkpoints_dir_path = 'checkpoints' if not os.path.exists(checkpoints_dir_path): os.mkdir(checkpoints_dir_path) psp = torch.nn.DataParallel(psp, device_ids=range(torch.cuda.device_count())) # append `module.` state = {'model_state': psp.state_dict()} torch.save(state, os.path.join(checkpoints_dir_path, "pspnet_101_cityscapes.pth")) #torch.save(state, os.path.join(checkpoints_dir_path, "pspnet_50_ade20k.pth")) #torch.save(state, os.path.join(checkpoints_dir_path, "pspnet_101_pascalvoc.pth")) print("Output Shape {} \t Input Shape {}".format(out.shape, img.shape))
[ "os.mkdir", "torch.nn.Dropout2d", "numpy.copy", "numpy.argmax", "torch.autograd.Variable", "torch.nn.Conv2d", "numpy.zeros", "ptsemseg.loader.cityscapes_loader.cityscapesLoader", "os.path.exists", "torch.cuda.device_count", "ptsemseg.caffe_pb2.NetParameter", "numpy.array", "scipy.misc.imsave", "os.path.join", "torch.from_numpy" ]
[((14241, 14266), 'ptsemseg.loader.cityscapes_loader.cityscapesLoader', 'cl', ([], {'root': 'dataset_root_dir'}), '(root=dataset_root_dir)\n', (14243, 14266), True, 'from ptsemseg.loader.cityscapes_loader import cityscapesLoader as cl\n'), ((14403, 14431), 'scipy.misc.imsave', 'm.imsave', (['"""cropped.png"""', 'img'], {}), "('cropped.png', img)\n", (14411, 14431), True, 'import scipy.misc as m\n'), ((14602, 14626), 'numpy.copy', 'np.copy', (['img[::-1, :, :]'], {}), '(img[::-1, :, :])\n', (14609, 14626), True, 'import numpy as np\n'), ((14832, 14883), 'scipy.misc.imsave', 'm.imsave', (['"""cityscapes_sttutgart_tiled.png"""', 'decoded'], {}), "('cityscapes_sttutgart_tiled.png', decoded)\n", (14840, 14883), True, 'import scipy.misc as m\n'), ((2843, 2876), 'torch.nn.Dropout2d', 'nn.Dropout2d', ([], {'p': '(0.1)', 'inplace': '(True)'}), '(p=0.1, inplace=True)\n', (2855, 2876), True, 'import torch.nn as nn\n'), ((2907, 2946), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', 'self.n_classes', '(1)', '(1)', '(0)'], {}), '(512, self.n_classes, 1, 1, 0)\n', (2916, 2946), True, 'import torch.nn as nn\n'), ((3138, 3177), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', 'self.n_classes', '(1)', '(1)', '(0)'], {}), '(256, self.n_classes, 1, 1, 0)\n', (3147, 3177), True, 'import torch.nn as nn\n'), ((5443, 5467), 'ptsemseg.caffe_pb2.NetParameter', 'caffe_pb2.NetParameter', ([], {}), '()\n', (5465, 5467), False, 'from ptsemseg import caffe_pb2\n'), ((11981, 12019), 'numpy.zeros', 'np.zeros', (['[n_samples, n_classes, h, w]'], {}), '([n_samples, n_classes, h, w])\n', (11989, 12019), True, 'import numpy as np\n'), ((12036, 12052), 'numpy.zeros', 'np.zeros', (['[h, w]'], {}), '([h, w])\n', (12044, 12052), True, 'import numpy as np\n'), ((14286, 14406), 'os.path.join', 'os.path.join', (['dataset_root_dir', '"""leftImg8bit/demoVideo/stuttgart_00/stuttgart_00_000000_000010_leftImg8bit.png"""'], {}), "(dataset_root_dir,\n 'leftImg8bit/demoVideo/stuttgart_00/stuttgart_00_000000_000010_leftImg8bit.png'\n )\n", (14298, 14406), False, 'import os\n'), ((14540, 14576), 'numpy.array', 'np.array', (['[123.68, 116.779, 103.939]'], {}), '([123.68, 116.779, 103.939])\n', (14548, 14576), True, 'import numpy as np\n'), ((14764, 14786), 'numpy.argmax', 'np.argmax', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (14773, 14786), True, 'import numpy as np\n'), ((14992, 15028), 'os.path.exists', 'os.path.exists', (['checkpoints_dir_path'], {}), '(checkpoints_dir_path)\n', (15006, 15028), False, 'import os\n'), ((15038, 15068), 'os.mkdir', 'os.mkdir', (['checkpoints_dir_path'], {}), '(checkpoints_dir_path)\n', (15046, 15068), False, 'import os\n'), ((15238, 15301), 'os.path.join', 'os.path.join', (['checkpoints_dir_path', '"""pspnet_101_cityscapes.pth"""'], {}), "(checkpoints_dir_path, 'pspnet_101_cityscapes.pth')\n", (15250, 15301), False, 'import os\n'), ((13793, 13861), 'os.path.join', 'os.path.join', (['caffemodel_dir_path', '"""pspnet101_cityscapes.caffemodel"""'], {}), "(caffemodel_dir_path, 'pspnet101_cityscapes.caffemodel')\n", (13805, 13861), False, 'import os\n'), ((14637, 14658), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (14653, 14658), False, 'import torch\n'), ((4619, 4648), 'numpy.array', 'np.array', (['layer.blobs[0].data'], {}), '(layer.blobs[0].data)\n', (4627, 4648), True, 'import numpy as np\n'), ((4672, 4701), 'numpy.array', 'np.array', (['layer.blobs[1].data'], {}), '(layer.blobs[1].data)\n', (4680, 4701), True, 'import numpy as np\n'), ((4725, 4754), 'numpy.array', 'np.array', (['layer.blobs[2].data'], {}), '(layer.blobs[2].data)\n', (4733, 4754), True, 'import numpy as np\n'), ((4778, 4807), 'numpy.array', 'np.array', (['layer.blobs[3].data'], {}), '(layer.blobs[3].data)\n', (4786, 4807), True, 'import numpy as np\n'), ((12484, 12519), 'torch.autograd.Variable', 'Variable', (['imgs_slice'], {'volatile': '(True)'}), '(imgs_slice, volatile=True)\n', (12492, 12519), False, 'from torch.autograd import Variable\n'), ((15123, 15148), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (15146, 15148), False, 'import torch\n'), ((5013, 5042), 'numpy.array', 'np.array', (['layer.blobs[0].data'], {}), '(layer.blobs[0].data)\n', (5021, 5042), True, 'import numpy as np\n'), ((12584, 12624), 'torch.autograd.Variable', 'Variable', (['imgs_slice_flip'], {'volatile': '(True)'}), '(imgs_slice_flip, volatile=True)\n', (12592, 12624), False, 'from torch.autograd import Variable\n'), ((5124, 5153), 'numpy.array', 'np.array', (['layer.blobs[1].data'], {}), '(layer.blobs[1].data)\n', (5132, 5153), True, 'import numpy as np\n'), ((6857, 6882), 'torch.from_numpy', 'torch.from_numpy', (['weights'], {}), '(weights)\n', (6873, 6882), False, 'import torch\n'), ((7930, 7952), 'torch.from_numpy', 'torch.from_numpy', (['mean'], {}), '(mean)\n', (7946, 7952), False, 'import torch\n'), ((8026, 8047), 'torch.from_numpy', 'torch.from_numpy', (['var'], {}), '(var)\n', (8042, 8047), False, 'import torch\n'), ((8120, 8143), 'torch.from_numpy', 'torch.from_numpy', (['gamma'], {}), '(gamma)\n', (8136, 8143), False, 'import torch\n'), ((8209, 8231), 'torch.from_numpy', 'torch.from_numpy', (['beta'], {}), '(beta)\n', (8225, 8231), False, 'import torch\n'), ((7277, 7299), 'torch.from_numpy', 'torch.from_numpy', (['bias'], {}), '(bias)\n', (7293, 7299), False, 'import torch\n')]
#!/usr/bin/env python # -*- coding:utf-8 -*- import doctest import test doctest.testmod(test, optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS | doctest.REPORT_ONLY_FIRST_FAILURE )
[ "doctest.testmod" ]
[((80, 203), 'doctest.testmod', 'doctest.testmod', (['test'], {'optionflags': '(doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS | doctest.\n REPORT_ONLY_FIRST_FAILURE)'}), '(test, optionflags=doctest.NORMALIZE_WHITESPACE | doctest.\n ELLIPSIS | doctest.REPORT_ONLY_FIRST_FAILURE)\n', (95, 203), False, 'import doctest\n')]
import json from requests import Response as RequestsResponse from flask import Response as FlaskResponse class Handler: @staticmethod def response(response: RequestsResponse) -> FlaskResponse: try: return FlaskResponse(response.text, response.status_code, headers=response.headers.items()) except Exception as e: return FlaskResponse({'text': e}, 500) @staticmethod def make_response(object_, status) -> FlaskResponse: if type(object_) in [dict, list]: return FlaskResponse(json.dumps(object_), status, {'Content-Type': 'application/json'}) elif type(object_) in [str]: return FlaskResponse(object_, status, {'Content-Type': 'text/html'}) else: return FlaskResponse(json.dumps({'text':'Error in parsing the response object.'}), 500, {'Content-Type': 'text/html'}) @staticmethod def freeze(object_): if isinstance(object_, dict): return frozenset((key, Handler.freeze(value)) for key, value in object_.items()) elif isinstance(object_, list): return tuple(Handler.freeze(value) for value in object_) return object_
[ "flask.Response", "json.dumps" ]
[((373, 404), 'flask.Response', 'FlaskResponse', (["{'text': e}", '(500)'], {}), "({'text': e}, 500)\n", (386, 404), True, 'from flask import Response as FlaskResponse\n'), ((565, 584), 'json.dumps', 'json.dumps', (['object_'], {}), '(object_)\n', (575, 584), False, 'import json\n'), ((688, 749), 'flask.Response', 'FlaskResponse', (['object_', 'status', "{'Content-Type': 'text/html'}"], {}), "(object_, status, {'Content-Type': 'text/html'})\n", (701, 749), True, 'from flask import Response as FlaskResponse\n'), ((797, 858), 'json.dumps', 'json.dumps', (["{'text': 'Error in parsing the response object.'}"], {}), "({'text': 'Error in parsing the response object.'})\n", (807, 858), False, 'import json\n')]
#!/usr/bin/env python # coding: utf-8 # In[8]: import pandas as pd from sklearn import tree # In[9]: df = pd.read_csv("E:\GIT\-CSE-0408-Summer-2021\Final\Decision\Tahmina.csv") # In[10]: x = df.iloc[:,:-1] # In[11]: x # In[12]: y=df.iloc[:,3] # In[13]: y # In[14]: classify_ = tree.DecisionTreeClassifier() # In[15]: classify_ =classify_.fit(x,y) # In[16]: prediction_ = classify_.predict([[190,70,43]]) # In[17]: prediction_ # In[ ]:
[ "pandas.read_csv", "sklearn.tree.DecisionTreeClassifier" ]
[((113, 188), 'pandas.read_csv', 'pd.read_csv', (['"""E:\\\\GIT\\\\-CSE-0408-Summer-2021\\\\Final\\\\Decision\\\\Tahmina.csv"""'], {}), "('E:\\\\GIT\\\\-CSE-0408-Summer-2021\\\\Final\\\\Decision\\\\Tahmina.csv')\n", (124, 188), True, 'import pandas as pd\n'), ((304, 333), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (331, 333), False, 'from sklearn import tree\n')]
import random from link import Link from neuro import Neuro class Network: def __init__(self, *args): self.__nlayers = len(args) self.__neuros = args self.__layers = [] for i in range(self.__nlayers): self.__layers.append( [Neuro([],[]) for n in range(self.__neuros[i])] ) for i in range(self.__nlayers): for neuro in self.__layers[i]: list_in = 0 if i == 0 else [ Link(n_in, neuro, random.random()) for n_in in self.__layers[i-1] ] list_out = 0 if i == self.__nlayers-1 else [Link(neuro, n_out, random.random()) for n_out in self.__layers[i+1]] neuro.list_in = list_in neuro.list_out = list_out def run(self, v): for neuro, imp in zip(self.__layers[0], v): neuro.value = neuro.list_in = imp for i in range(1, self.__nlayers): for neuro in self.__layers[i]: v = [ (link.n_in.value*link.w) for link in neuro.list_in ] neuro.value = neuro.act( sum(v) ) def output(self): return [ neuro.value for neuro in self.__layers[-1] ]
[ "random.random", "neuro.Neuro" ]
[((279, 292), 'neuro.Neuro', 'Neuro', (['[]', '[]'], {}), '([], [])\n', (284, 292), False, 'from neuro import Neuro\n'), ((483, 498), 'random.random', 'random.random', ([], {}), '()\n', (496, 498), False, 'import random\n'), ((612, 627), 'random.random', 'random.random', ([], {}), '()\n', (625, 627), False, 'import random\n')]
#---- Code Surveyor, Copyright 2020 <NAME>, MIT License ''' Surveyor Job Executes a measurement job against a folder tree, using jobworker processes to read files and delegate measurement tasks to Surveyor modules. ''' import os import time import multiprocessing from queue import Empty, Full from code_surveyor.framework import log # No relative path to share module globals from . import jobworker from . import jobout from . import folderwalk from . import utils # Prefixing files/folders to ignore with '.' is almost universal now DEFAULT_FOLDERS_TO_SKIP = ['.?*'] DEFAULT_FILES_TO_SKIP = ['.?*'] # Leave a core open for the main app and jobOut threads, which # running under the command shell process DEFAULT_NUM_WORKERS = max(1, multiprocessing.cpu_count()-1) # Seconds to wait at various points MAIN_PROCESSING_SLEEP = 0.2 WORKER_EXIT_TIMEOUT = 0.4 WORKER_EXIT_TRIES = 8 JOB_EXIT_TIMEOUT = 1.0 TASK_FULL_TIMEOUT = 0.4 # Max number of files and size in bytes that will be sent to a worker # Smaller values result in more multiprocessing overhead, while larger # values risk not providing a good distribution of files across cores # if file/folder sizes vary widely QUEUE_PACKAGE_MAX_ITEMS = 256 QUEUE_PACKAGE_MAX_BYTES = 256000 # Number of unfiltered files before sending a work package # This prevents a small work package from not being sent if # searching through a large number of files not being measured MAX_FILES_BEFORE_SEND = 256 class Options( object ): ''' Holds options that define the execution of a job ''' def __init__(self): self.pathsToMeasure = [] self.fileFilters = [] self.deltaPath = None self.includeFolders = [] self.skipFolders = DEFAULT_FOLDERS_TO_SKIP self.skipFiles = DEFAULT_FILES_TO_SKIP self.recursive = True self.numWorkers = DEFAULT_NUM_WORKERS self.breakOnError = False self.configInfoOnly = False self.profileName = None class Job( object ): ''' One Job object is created and run by the main app. The main thread walks the folder tree placing files in task queue. The out thread gets output from the output work queue and calls back to the application (display update and writing to the output file occurs on this output thread) ''' def __init__(self, configStack, options, file_measured_callback, status_callback): # Options define the life a job and cannot be modified self._options = options # All UI output is done through the status callback self._status_callback = status_callback # Keep track of (and allow access to) raw file metrics self.numFolders = 0 self.numUnfilteredFiles = 0 self.numFilteredFiles = 0 self.numFilesToProcess = 0 # Exceptions that occurred in workers are collected and displayed # Unlike errors, exceptions will not generate rows in output self.exceptions = [] # Queues to communicate with Workers, and the output thread self._taskQueue = multiprocessing.Queue() self._controlQueue = multiprocessing.Queue() self._outQueue = multiprocessing.Queue() self._outThread = jobout.OutThread( self._outQueue, self._controlQueue, self._options.profileName, file_measured_callback) # Create max number of workers (they will be started later as needed) assert self._options.numWorkers > 0, "Less than 1 worker requested!" context = (log.get_context(), self._options.profileName) self._workers = self.Workers( self._controlQueue, self._taskQueue, self._outQueue, context, self._options.numWorkers) log.msg(1, "Created {} workers".format(self._workers.num_max())) # Create our object for tracking state of folder walking self._pathsToMeasure = options.pathsToMeasure self._folderWalker = folderwalk.FolderWalker( options.deltaPath, configStack, options.recursive, options.includeFolders, options.skipFolders, options.fileFilters, options.skipFiles, self.add_folder_files) # Utility object for managing work packages; holds the state of the # work package that is being prepared for sending to queue self._workPackage = self.WorkPackage() # Other processing state self._continueProcessing = True self._taskPackagesSent = 0 self._filesSinceLastSend = 0 #------------------------------------------------------------------------- def add_folder_files(self, currentDir, deltaPath, filesAndConfigs, numUnfilteredFiles): ''' Callback from folderwalk that puts a set of filesAndConfigItems into one or more WorkPackages to send to jobs. At this point files have already been filtered against both job options and the config items. ''' self.numFolders += 1 self.numUnfilteredFiles += numUnfilteredFiles self._filesSinceLastSend += numUnfilteredFiles self.numFilteredFiles += len(filesAndConfigs) if self._options.configInfoOnly: self._config_info_display(currentDir, filesAndConfigs) else: self._put_files_in_queue(currentDir, deltaPath, filesAndConfigs) self._status_callback() return self._check_command() #------------------------------------------------------------------------- def run(self): self._outThread.start() self._fill_work_queue() self._wait_process_packages() self._wait_output_finish() self._wait_then_exit() def _fill_work_queue(self): log.cc(1, "Starting to fill task queue...") for pathToMeasure in self._pathsToMeasure: if self._check_command(): self._folderWalker.walk(pathToMeasure) if self._check_command() and self._workPackage.size_items() > 0: self._send_current_package() def _wait_process_packages(self): log.cc(1, "Task queue complete, waiting for workers to finish...") while self._check_command() and self._task_queue_size() > 0: time.sleep(MAIN_PROCESSING_SLEEP) self._status_callback() log.cc(2, "Task queue size: " + str(self._task_queue_size())) def _wait_output_finish(self): log.cc(1, "Workers finished, waiting for output to finish...") self._send_output_command('WORK_DONE') while self._check_command() or self._outThread.is_alive(): self._outThread.join(JOB_EXIT_TIMEOUT) self._status_callback() self._continueProcessing = not bool(self._controlQueue.empty()) def _wait_then_exit(self): log.cc(1, "Waiting to cleanup workers and output thread...") self._send_workers_command('EXIT') self._send_output_command('EXIT') for worker in self._workers(): tries = 0 while worker.is_alive() and tries < WORKER_EXIT_TRIES: self._status_callback() worker.join(WORKER_EXIT_TIMEOUT) log.cc(2, "Worker {} is_alive: {}".format( worker.name, worker.is_alive())) self._check_command() tries += 1 self._outThread.join(JOB_EXIT_TIMEOUT) self._close_queues() log.cc(1, "TERMINATING") #------------------------------------------------------------------------- # Work Package Processing class WorkPackage( object ): ''' A work package groups a set of files to be sent to a jobworker. The files and the config information necessary to process them are workItems. ''' def __init__(self): self.reset() def reset(self): self.itemsToProcess = [] self.byteSize = 0 def add(self, workItem, byteSize): self.itemsToProcess.append(workItem) self.byteSize += byteSize def size_items(self): return len(self.itemsToProcess) def size_bytes(self): return self.byteSize def items(self): return self.itemsToProcess def ready_to_send(self): return (self.size_items() >= QUEUE_PACKAGE_MAX_ITEMS or self.size_bytes() >= QUEUE_PACKAGE_MAX_BYTES) def _task_queue_size(self): remainingPackages = ( self._taskPackagesSent - self._outThread.taskPackagesReceived - len( self.exceptions ) ) assert remainingPackages >=0, "In/Out Queues out of sync" return remainingPackages def _put_files_in_queue(self, path, deltaPath, filesAndConfigs): ''' Package files from the path into workItems that are grouped into workPackages and placed into the task queue for jobworkers. Packages are broken up if files number or total size exceeds thresholds to help evenly distribute load across cores ''' if not filesAndConfigs: return for fileName, configEntrys in filesAndConfigs: # Expensive to check file size here, but worth it for pracelling widely # varying file sizes out to cores for CPU intensive jobs. # Profiling shows it is not worth caching this try: fileSize = utils.get_file_size(os.path.join(path, fileName)) except Exception as e: # It is possible (at least in Windows) for a fileName to exist # in the file system but be invalid for Windows calls. This is # the first place the file is accessed through the file system; # if it blows up don't want the job to fall apart, and this is # an unusual case, so don't bother with a pathway back to the main # application; just swallow it and provide debug log.msg(1, str(e)) log.stack() continue log.cc(3, "WorkItem: {}, {}".format(fileSize, fileName)) self.numFilesToProcess += 1 workItem = (path, deltaPath, fileName, configEntrys, self._options, len(filesAndConfigs)) self._workPackage.add(workItem, fileSize) if self._workPackage.ready_to_send() or ( self._filesSinceLastSend > MAX_FILES_BEFORE_SEND): self._send_current_package() if not self._check_command(): break def _send_current_package(self): ''' Place package of work on queue, and start a worker ''' self._workers.start_next() log.cc(2, "PUT WorkPackage - files: {}, bytes: {}...".format( self._workPackage.size_items(), self._workPackage.size_bytes())) log.cc(4, list(self._workPackage.items())) try: self._taskQueue.put(list(self._workPackage.items()), True, TASK_FULL_TIMEOUT) except Full: raise utils.JobException("FATAL ERROR -- FULL TASK QUEUE") else: self._taskPackagesSent += 1 self._filesSinceLastSend = 0 self._workPackage.reset() def _config_info_display(self, currentDir, filesAndConfigs): ''' Provide support for the configInfo option, that displays in the UI what folders would be measured by what configEntries ''' activeConfigs = set([]) for fileName, configEntrys in filesAndConfigs: _root, fileExt = os.path.splitext(fileName) # TBD -- this won't work if there are RE or Exclude file types for configEntry in configEntrys: activeConfigs.add((fileExt, configEntry)) if activeConfigs: displayStr = currentDir + "\n" for fileExt, configEntry in activeConfigs: displayStr += (" " + str(fileExt) + " - " + configEntry.config_str_no_fileext() + "\n") self._status_callback(displayStr) #------------------------------------------------------------------------- # Command Queue management def _check_command(self): ''' Check command queue for any problems posted while running a job Exceptions received from the command queue are unpackaged and thrown for main to handle ''' otherCommands = [] try: while self._continueProcessing: (target, command, payload) = self._controlQueue.get_nowait() log.cc(4, "command check: {}, {}".format(target, command)) if target == 'JOB': if 'ERROR' == command: # Error notifications in the control queue are only used to support # break on error -- the error info is handled by the output queue. log.cc(1, "COMMAND: ERROR for file: {}".format(payload)) if self._options.breakOnError: self._continueProcessing = False elif 'EXCEPTION' == command: # Exceptions are bundled up for display to user log.cc(1, "COMMAND: EXCEPTION RECEIVED") if self._options.breakOnError: self._continueProcessing = False self.exceptions.append( payload ) else: otherCommands.append((target, command, payload)) except Empty: log.cc(4, "command check: empty") finally: # Put any queue items removed back in the queue for (target, command, payload) in otherCommands: log.cc(4, "command replace: {}, {}".format(target, command)) self._controlQueue.put_nowait((target, command, payload)) return self._continueProcessing def _close_queues(self): # Make sure queues are flushed and closed to avoid errors in queue code queues = [self._taskQueue, self._outQueue, self._controlQueue] for queue in queues: try: while True: _ = queue.get_nowait() queue.close() except Empty: pass def _send_workers_command(self, command, payload=None): for worker in self._workers(): if worker.is_alive(): self._send_command(worker.name, command, payload) def _send_output_command(self, command, payload=None): self._send_command(self._outThread.name, command, payload) def _send_command(self, target, command, payload): log.cc(2, "COMMAND: {}, {} {}".format(target, command, payload)) self._controlQueue.put_nowait((target, command, payload)) #------------------------------------------------------------------------- class Workers( object ): ''' Subclass for managing group of workers that makes the construction of each Worker a bit cleaner and allows for easy lazy job starting and tracking of how many workers are active ''' def __init__(self, controlQueue, inQueue, outQueue, dbgContext, numWorkers): self._workers = [ jobworker.Worker(inQueue, outQueue, controlQueue, dbgContext, str(num+1)) for num in range(numWorkers) ] self._workerStartIter = self() self._workerStartDone = False self._startedWorkers = 0 def __call__(self): for worker in self._workers: yield worker def start_next(self): if not self._workerStartDone: try: next(self._workerStartIter).start() self._startedWorkers += 1 return True except StopIteration: self._workerStartDone = True return False def num_max(self): return len(self._workers) def num_started(self): return self._startedWorkers
[ "code_surveyor.framework.log.get_context", "code_surveyor.framework.log.stack", "code_surveyor.framework.log.cc", "time.sleep", "os.path.splitext", "multiprocessing.Queue", "os.path.join", "multiprocessing.cpu_count" ]
[((755, 782), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (780, 782), False, 'import multiprocessing\n'), ((3109, 3132), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (3130, 3132), False, 'import multiprocessing\n'), ((3162, 3185), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (3183, 3185), False, 'import multiprocessing\n'), ((3211, 3234), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (3232, 3234), False, 'import multiprocessing\n'), ((5848, 5891), 'code_surveyor.framework.log.cc', 'log.cc', (['(1)', '"""Starting to fill task queue..."""'], {}), "(1, 'Starting to fill task queue...')\n", (5854, 5891), False, 'from code_surveyor.framework import log\n'), ((6197, 6263), 'code_surveyor.framework.log.cc', 'log.cc', (['(1)', '"""Task queue complete, waiting for workers to finish..."""'], {}), "(1, 'Task queue complete, waiting for workers to finish...')\n", (6203, 6263), False, 'from code_surveyor.framework import log\n'), ((6533, 6595), 'code_surveyor.framework.log.cc', 'log.cc', (['(1)', '"""Workers finished, waiting for output to finish..."""'], {}), "(1, 'Workers finished, waiting for output to finish...')\n", (6539, 6595), False, 'from code_surveyor.framework import log\n'), ((6913, 6973), 'code_surveyor.framework.log.cc', 'log.cc', (['(1)', '"""Waiting to cleanup workers and output thread..."""'], {}), "(1, 'Waiting to cleanup workers and output thread...')\n", (6919, 6973), False, 'from code_surveyor.framework import log\n'), ((7541, 7565), 'code_surveyor.framework.log.cc', 'log.cc', (['(1)', '"""TERMINATING"""'], {}), "(1, 'TERMINATING')\n", (7547, 7565), False, 'from code_surveyor.framework import log\n'), ((3573, 3590), 'code_surveyor.framework.log.get_context', 'log.get_context', ([], {}), '()\n', (3588, 3590), False, 'from code_surveyor.framework import log\n'), ((6345, 6378), 'time.sleep', 'time.sleep', (['MAIN_PROCESSING_SLEEP'], {}), '(MAIN_PROCESSING_SLEEP)\n', (6355, 6378), False, 'import time\n'), ((11842, 11868), 'os.path.splitext', 'os.path.splitext', (['fileName'], {}), '(fileName)\n', (11858, 11868), False, 'import os\n'), ((13874, 13907), 'code_surveyor.framework.log.cc', 'log.cc', (['(4)', '"""command check: empty"""'], {}), "(4, 'command check: empty')\n", (13880, 13907), False, 'from code_surveyor.framework import log\n'), ((9574, 9602), 'os.path.join', 'os.path.join', (['path', 'fileName'], {}), '(path, fileName)\n', (9586, 9602), False, 'import os\n'), ((10155, 10166), 'code_surveyor.framework.log.stack', 'log.stack', ([], {}), '()\n', (10164, 10166), False, 'from code_surveyor.framework import log\n'), ((13534, 13574), 'code_surveyor.framework.log.cc', 'log.cc', (['(1)', '"""COMMAND: EXCEPTION RECEIVED"""'], {}), "(1, 'COMMAND: EXCEPTION RECEIVED')\n", (13540, 13574), False, 'from code_surveyor.framework import log\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import tornado from flask_script import Manager, Shell from flask_migrate import Migrate, MigrateCommand import logging from logging.handlers import RotatingFileHandler from tornado import options from tornado.wsgi import WSGIContainer from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop from init import create_app, db app = create_app(os.getenv('FLASK_CONFIG') or 'default') manager = Manager(app) migrate = Migrate(app, db) """ 日志配置 定义一个RotatingFileHandler,最多备份5个日志文件,每个日志文件最大10M """ if app.config["LOGS_START"]==True: basedir = os.path.abspath(os.path.dirname(__file__)) logdir = os.path.join(basedir, 'logs/myapp.log') Rthandler = RotatingFileHandler(logdir, maxBytes=10*1024*1024,backupCount=5) Rthandler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(pathname)s %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s') Rthandler.setFormatter(formatter) app.logger.addHandler(Rthandler) """ 配置作业 """ if app.config["JOBS_START"]: pass #流量监控作业 # from app.jobs.flow import stertFlowMonitoring # # t = threading.Thread(target=stertFlowMonitoring, name='getHaproxyDataThread') # t.start() # #智能扩缩容作业 # t2 = threading.Thread(target=stertTxss, name='stertTxssThread') # t2.start() # # #t.join() # from apscheduler.schedulers.blocking import BlockingScheduler # from app.jobs.flow import stertFlowMonitoring # sched = BlockingScheduler() # sched.add_job(stertFlowMonitoring, 'interval', seconds=5) # sched.start() def make_shell_context(): return dict(app=app, db=db) manager.add_command("shell", Shell(make_context=make_shell_context)) manager.add_command('db', MigrateCommand) COV = None if os.environ.get('FLASK_COVERAGE'): import coverage COV = coverage.coverage(branch=True, include='app/*') COV.start() @manager.command def profile(length=25, profile_dir=None): """Start the application under the code profiler.""" from werkzeug.contrib.profiler import ProfilerMiddleware app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length], profile_dir=profile_dir) app.run() @manager.command def test(coverage=False): """Run the unit tests.""" if coverage and not os.environ.get('FLASK_COVERAGE'): import sys os.environ['FLASK_COVERAGE'] = '1' os.execvp(sys.executable, [sys.executable] + sys.argv) import unittest tests = unittest.TestLoader().discover('tests') unittest.TextTestRunner(verbosity=2).run(tests) if COV: COV.stop() COV.save() print('Coverage Summary:') COV.report() basedir = os.path.abspath(os.path.dirname(__file__)) covdir = os.path.join(basedir, 'tmp/coverage') COV.html_report(directory=covdir) print('HTML version: file://%s/index.html' % covdir) COV.erase() # @manager.command # def test(): # """Run the unit tests.""" # import unittest # tests = unittest.TestLoader().discover('tests') # unittest.TextTestRunner(verbosity=2).run(tests) if __name__ == '__main__': # 打开命令行日志显示 logger = logging.getLogger() fm = tornado.log.LogFormatter(fmt='[%(asctime)s]%(color)s[%(levelname)s]%(end_color)s[(module)s:%(lineno)d] %(message)s',datefmt = '%Y-%m-%d %H:%M:%S') tornado.log.enable_pretty_logging(logger=logger) logger.handlers[0].setFormatter(fm) logging.info('web is starting...') http_server = HTTPServer(WSGIContainer(app)) http_server.listen(5000) IOLoop.instance().start()
[ "tornado.ioloop.IOLoop.instance", "flask_script.Manager", "logging.Formatter", "unittest.TestLoader", "flask_script.Shell", "os.path.join", "tornado.wsgi.WSGIContainer", "os.path.dirname", "tornado.log.LogFormatter", "flask_migrate.Migrate", "werkzeug.contrib.profiler.ProfilerMiddleware", "os.getenv", "unittest.TextTestRunner", "coverage.coverage", "os.environ.get", "logging.info", "tornado.log.enable_pretty_logging", "os.execvp", "logging.handlers.RotatingFileHandler", "logging.getLogger" ]
[((478, 490), 'flask_script.Manager', 'Manager', (['app'], {}), '(app)\n', (485, 490), False, 'from flask_script import Manager, Shell\n'), ((501, 517), 'flask_migrate.Migrate', 'Migrate', (['app', 'db'], {}), '(app, db)\n', (508, 517), False, 'from flask_migrate import Migrate, MigrateCommand\n'), ((1820, 1852), 'os.environ.get', 'os.environ.get', (['"""FLASK_COVERAGE"""'], {}), "('FLASK_COVERAGE')\n", (1834, 1852), False, 'import os\n'), ((685, 724), 'os.path.join', 'os.path.join', (['basedir', '"""logs/myapp.log"""'], {}), "(basedir, 'logs/myapp.log')\n", (697, 724), False, 'import os\n'), ((741, 810), 'logging.handlers.RotatingFileHandler', 'RotatingFileHandler', (['logdir'], {'maxBytes': '(10 * 1024 * 1024)', 'backupCount': '(5)'}), '(logdir, maxBytes=10 * 1024 * 1024, backupCount=5)\n', (760, 810), False, 'from logging.handlers import RotatingFileHandler\n'), ((860, 971), 'logging.Formatter', 'logging.Formatter', (['"""%(pathname)s %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s"""'], {}), "(\n '%(pathname)s %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s'\n )\n", (877, 971), False, 'import logging\n'), ((1720, 1758), 'flask_script.Shell', 'Shell', ([], {'make_context': 'make_shell_context'}), '(make_context=make_shell_context)\n', (1725, 1758), False, 'from flask_script import Manager, Shell\n'), ((1884, 1931), 'coverage.coverage', 'coverage.coverage', ([], {'branch': '(True)', 'include': '"""app/*"""'}), "(branch=True, include='app/*')\n", (1901, 1931), False, 'import coverage\n'), ((2147, 2232), 'werkzeug.contrib.profiler.ProfilerMiddleware', 'ProfilerMiddleware', (['app.wsgi_app'], {'restrictions': '[length]', 'profile_dir': 'profile_dir'}), '(app.wsgi_app, restrictions=[length], profile_dir=profile_dir\n )\n', (2165, 2232), False, 'from werkzeug.contrib.profiler import ProfilerMiddleware\n'), ((3267, 3286), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3284, 3286), False, 'import logging\n'), ((3296, 3451), 'tornado.log.LogFormatter', 'tornado.log.LogFormatter', ([], {'fmt': '"""[%(asctime)s]%(color)s[%(levelname)s]%(end_color)s[(module)s:%(lineno)d] %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(fmt=\n '[%(asctime)s]%(color)s[%(levelname)s]%(end_color)s[(module)s:%(lineno)d] %(message)s'\n , datefmt='%Y-%m-%d %H:%M:%S')\n", (3320, 3451), False, 'import tornado\n'), ((3447, 3495), 'tornado.log.enable_pretty_logging', 'tornado.log.enable_pretty_logging', ([], {'logger': 'logger'}), '(logger=logger)\n', (3480, 3495), False, 'import tornado\n'), ((3543, 3577), 'logging.info', 'logging.info', (['"""web is starting..."""'], {}), "('web is starting...')\n", (3555, 3577), False, 'import logging\n'), ((422, 447), 'os.getenv', 'os.getenv', (['"""FLASK_CONFIG"""'], {}), "('FLASK_CONFIG')\n", (431, 447), False, 'import os\n'), ((645, 670), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (660, 670), False, 'import os\n'), ((2484, 2538), 'os.execvp', 'os.execvp', (['sys.executable', '([sys.executable] + sys.argv)'], {}), '(sys.executable, [sys.executable] + sys.argv)\n', (2493, 2538), False, 'import os\n'), ((2847, 2884), 'os.path.join', 'os.path.join', (['basedir', '"""tmp/coverage"""'], {}), "(basedir, 'tmp/coverage')\n", (2859, 2884), False, 'import os\n'), ((3608, 3626), 'tornado.wsgi.WSGIContainer', 'WSGIContainer', (['app'], {}), '(app)\n', (3621, 3626), False, 'from tornado.wsgi import WSGIContainer\n'), ((2380, 2412), 'os.environ.get', 'os.environ.get', (['"""FLASK_COVERAGE"""'], {}), "('FLASK_COVERAGE')\n", (2394, 2412), False, 'import os\n'), ((2571, 2592), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (2590, 2592), False, 'import unittest\n'), ((2615, 2651), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (2638, 2651), False, 'import unittest\n'), ((2803, 2828), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2818, 2828), False, 'import os\n'), ((3661, 3678), 'tornado.ioloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (3676, 3678), False, 'from tornado.ioloop import IOLoop\n')]
########################################################################## # # pgAdmin 4 - PostgreSQL Tools # # Copyright (C) 2013 - 2020, The pgAdmin Development Team # This software is released under the PostgreSQL Licence # ########################################################################## import simplejson as json import re from functools import wraps import pgadmin.browser.server_groups.servers.databases as database from flask import render_template, request, jsonify from flask_babelex import gettext from pgadmin.browser.collection import CollectionNodeModule from pgadmin.browser.utils import PGChildNodeView from pgadmin.utils.ajax import make_json_response, internal_server_error, \ make_response as ajax_response, gone from pgadmin.utils.driver import get_driver from config import PG_DEFAULT_DRIVER from pgadmin.tools.schema_diff.node_registry import SchemaDiffRegistry from pgadmin.tools.schema_diff.compare import SchemaDiffObjectCompare class EventTriggerModule(CollectionNodeModule): """ class EventTriggerModule(CollectionNodeModule) A module class for Event trigger node derived from CollectionNodeModule. Methods: ------- * __init__(*args, **kwargs) - Method is used to initialize the EventTriggerModule and it's base module. * get_nodes(gid, sid, did) - Method is used to generate the browser collection node. * script_load() - Load the module script for Event trigger, when any of the database node is initialized. """ _NODE_TYPE = 'event_trigger' _COLLECTION_LABEL = gettext("Event Triggers") def __init__(self, *args, **kwargs): """ Method is used to initialize the EventTriggerModule and it's base module. Args: *args: **kwargs: """ super(EventTriggerModule, self).__init__(*args, **kwargs) self.min_ver = 90300 self.max_ver = None self.min_gpdbver = 1000000000 def get_nodes(self, gid, sid, did): """ Generate the event_trigger node """ yield self.generate_browser_collection_node(sid) @property def node_inode(self): """ Always returns false, it is a leaf node, and do not have children nodes. """ return False @property def script_load(self): """ Load the module script for event_trigger, when any of the database node is initialized. """ return database.DatabaseModule.node_type @property def module_use_template_javascript(self): """ Returns whether Jinja2 template is used for generating the javascript module. """ return False blueprint = EventTriggerModule(__name__) class EventTriggerView(PGChildNodeView, SchemaDiffObjectCompare): """ class EventTriggerView(PGChildNodeView) A view class for event trigger node derived from PGChildNodeView. This class is responsible for all the stuff related to view like updating event trigger node, showing properties, showing sql in sql pane. Methods: ------- * __init__(**kwargs) - Method is used to initialize the EventTriggerView and it's base view. * check_precondition() - This function will behave as a decorator which will checks database connection before running view, it will also attaches manager,conn & template_path properties to self * list() - Lists proroperties of all the nodes of type - event trigger. * nodes() - Creates all the child nodes of type - event trigger. * properties(gid, sid, did, etid) - Returns the properties of the given event trigger node * update(gid, sid, did, etid) - Updates the data for the given event trigger node. * msql(gid, sid, did, etid) - Return modified SQL for the given event trigger node based on the request data. * get_sql(data, etid) - Generates the sql from model data * sql(gid, sid, did, etid): - Generates the reversed engineered query for the given event trigger node. * get_event_funcs(gid, sid, did, etid): - Returns the event functions available in that database. * dependents(gid, sid, did, etid): - Returns the dependents list for the given event trigger node. * dependencies(self, gid, sid, did, etid): - Returns the dependencies list for the given event trigger node. """ node_type = blueprint.node_type node_icon = "icon-%s" % blueprint.node_type parent_ids = [ {'type': 'int', 'id': 'gid'}, {'type': 'int', 'id': 'sid'}, {'type': 'int', 'id': 'did'} ] ids = [ {'type': 'int', 'id': 'etid'} ] operations = dict({ 'obj': [ {'get': 'properties', 'delete': 'delete', 'put': 'update'}, {'get': 'list', 'post': 'create', 'delete': 'delete'} ], 'nodes': [{'get': 'node'}, {'get': 'nodes'}], 'children': [{'get': 'children'}], 'sql': [{'get': 'sql'}], 'msql': [{'get': 'msql'}, {'get': 'msql'}], 'stats': [{'get': 'statistics'}], 'dependency': [{'get': 'dependencies'}], 'dependent': [{'get': 'dependents'}], 'fopts': [{'get': 'get_event_funcs'}, {'get': 'get_event_funcs'}] }) keys_to_ignore = ['oid', 'xmin', 'oid-2', 'eventfuncoid'] def check_precondition(f): """ This function will behave as a decorator which will checks database connection before running view, it will also attaches manager,conn & template_path properties to self """ @wraps(f) def wrap(*args, **kwargs): # Here - args[0] will always hold self & kwargs will hold gid, # sid, did self = args[0] self.manager = get_driver( PG_DEFAULT_DRIVER ).connection_manager(kwargs['sid']) self.conn = self.manager.connection(did=kwargs['did']) self.template_path = 'event_triggers/sql/9.3_plus' self.datlastsysoid = \ self.manager.db_info[kwargs['did']]['datlastsysoid'] \ if self.manager.db_info is not None and \ kwargs['did'] in self.manager.db_info else 0 return f(*args, **kwargs) return wrap @check_precondition def list(self, gid, sid, did): """ This function is used to list all the event trigger nodes within that collection. Args: gid: Server Group ID sid: Server ID did: Database ID etid: Event trigger ID Returns: """ sql = render_template("/".join([self.template_path, self._PROPERTIES_SQL])) status, res = self.conn.execute_dict(sql) if not status: return internal_server_error(errormsg=res) return ajax_response( response=res['rows'], status=200 ) @check_precondition def nodes(self, gid, sid, did): """ This function is used to create all the child nodes within the collection. Here it will create all the event trigger nodes. Args: gid: Server Group ID sid: Server ID did: Database ID etid: Event trigger ID Returns: """ result = [] sql = render_template("/".join([self.template_path, self._NODES_SQL])) status, res = self.conn.execute_2darray(sql) if not status: return internal_server_error(errormsg=res) for row in res['rows']: result.append( self.blueprint.generate_browser_node( row['oid'], did, row['name'], self.node_icon )) return make_json_response( data=result, status=200 ) @check_precondition def node(self, gid, sid, did, etid): """ This function will fetch properties of trigger node. Args: gid: Server Group ID sid: Server ID did: Database ID etid: Event trigger ID Returns: Json object of trigger node """ sql = render_template("/".join([self.template_path, self._NODES_SQL]), etid=etid) status, res = self.conn.execute_2darray(sql) if not status: return internal_server_error(errormsg=res) for row in res['rows']: return make_json_response( data=self.blueprint.generate_browser_node( row['oid'], did, row['name'], self.node_icon ), status=200 ) return gone(gettext("Could not find the specified event trigger.")) def _formatter(self, result): """ This function is ued to parse security lables """ seclabels = [] if 'seclabels' in result and result['seclabels'] is not None: for sec in result['seclabels']: sec = re.search(r'([^=]+)=(.*$)', sec) seclabels.append({ 'provider': sec.group(1), 'label': sec.group(2) }) result['seclabels'] = seclabels return result @check_precondition def properties(self, gid, sid, did, etid): """ This function is used to list all the event trigger nodes within that collection. Args: gid: Server Group ID sid: Server ID did: Database ID etid: Event trigger ID Returns: """ status, res = self._fetch_properties(did, etid) if not status: return res return ajax_response( response=res, status=200 ) def _fetch_properties(self, did, etid): """ This function fetch the properties of the event trigger. :param did: :param etid: :return: """ sql = render_template( "/".join([self.template_path, self._PROPERTIES_SQL]), etid=etid, conn=self.conn ) status, res = self.conn.execute_dict(sql) if not status: return False, internal_server_error(errormsg=res) if len(res['rows']) == 0: return False, gone( gettext("Could not find the event trigger information.")) result = res['rows'][0] result['is_sys_obj'] = (result['oid'] <= self.datlastsysoid) result = self._formatter(result) return True, result @check_precondition def create(self, gid, sid, did): """ This function will create a event trigger object. Args: gid: Server Group ID sid: Server ID did: Database ID etid: Event trigger ID Returns: """ data = request.form if request.form else json.loads( request.data, encoding='utf-8' ) required_args = { 'name': 'Name', 'eventowner': 'Owner', 'eventfunname': 'Trigger function', 'enabled': 'Enabled status', 'eventname': 'Events' } err = [] for arg in required_args: if arg not in data: err.append(required_args.get(arg, arg)) if err: return make_json_response( status=400, success=0, errormsg=gettext( "Could not find the required parameter ({}).").format(err) ) try: sql = render_template( "/".join([self.template_path, self._CREATE_SQL]), data=data, conn=self.conn ) status, res = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=res) sql = render_template( "/".join([self.template_path, self._GRANT_SQL]), data=data, conn=self.conn ) sql = sql.strip('\n').strip(' ') status, res = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=res) sql = render_template( "/".join([self.template_path, self._OID_SQL]), data=data ) status, etid = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=etid) return jsonify( node=self.blueprint.generate_browser_node( etid, did, data['name'], self.node_icon ) ) except Exception as e: return internal_server_error(errormsg=str(e)) @check_precondition def update(self, gid, sid, did, etid): """ This function will update the data for the selected event trigger node. Args: gid: Server Group ID sid: Server ID did: Database ID etid: Event trigger ID Returns: """ data = request.form if request.form else json.loads( request.data, encoding='utf-8' ) try: sql = self.get_sql(data, etid) # Most probably this is due to error if not isinstance(sql, str): return sql if sql != "": status, res = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=res) sql = render_template( "/".join([self.template_path, self._OID_SQL]), data=data ) status, etid = self.conn.execute_scalar(sql) return jsonify( node=self.blueprint.generate_browser_node( etid, did, data['name'], self.node_icon ) ) else: return make_json_response( success=1, info="Nothing to update", data={ 'id': etid, 'sid': sid, 'gid': gid, 'did': did } ) except Exception as e: return internal_server_error(errormsg=str(e)) @staticmethod def get_delete_data(cmd, etid, request_object): """ This function is used to get the data and cascade information. :param cmd: Command :param etid: Object ID :param request_object: request object :return: """ cascade = False # Below will decide if it's simple drop or drop with cascade call if cmd == 'delete': # This is a cascade operation cascade = True if etid is None: data = request_object.form if request_object.form else \ json.loads(request_object.data, encoding='utf-8') else: data = {'ids': [etid]} return cascade, data @check_precondition def delete(self, gid, sid, did, etid=None, only_sql=False): """ This function will delete an existing event trigger object. Args: gid: Server Group ID sid: Server ID did: Database ID etid: Event trigger ID only_sql: Returns: """ # get the value of cascade and data cascade, data = self.get_delete_data(self.cmd, etid, request) try: for etid in data['ids']: sql = render_template( "/".join([self.template_path, self._DELETE_SQL]), etid=etid ) status, name = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=name) if name is None: return make_json_response( status=410, success=0, errormsg=gettext( 'Error: Object not found.' ), info=gettext( 'The specified event trigger could not be found.\n' ) ) sql = render_template( "/".join([self.template_path, self._DELETE_SQL]), name=name, cascade=cascade ) # Used for schema diff tool if only_sql: return sql status, res = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=res) return make_json_response( success=1, info=gettext("Event trigger dropped") ) except Exception as e: return internal_server_error(errormsg=str(e)) @check_precondition def msql(self, gid, sid, did, etid=None): """ This function is used to return modified SQL for the selected event trigger node. Args: gid: Server Group ID sid: Server ID did: Database ID etid: Event trigger ID Returns: """ data = {} for k, v in request.args.items(): try: data[k] = json.loads(v, encoding='utf-8') except ValueError: data[k] = v try: sql = self.get_sql(data, etid) # Most probably this is due to error if not isinstance(sql, str): return sql sql = re.sub('\n{2,}', '\n\n', sql) if sql == '': sql = "--modified SQL" return make_json_response( data=sql, status=200 ) except Exception as e: return internal_server_error(errormsg=str(e)) def get_sql(self, data, etid=None): """ This function will generate sql from model data. Args: data: Contains the data of the selected event trigger node. etid: Event trigger ID Returns: """ required_args = [ 'name' ] if etid is not None: sql = render_template( "/".join([self.template_path, self._PROPERTIES_SQL]), etid=etid ) status, res = self.conn.execute_dict(sql) if not status: return internal_server_error(errormsg=res) if len(res['rows']) == 0: return gone( gettext("Could not find the event trigger information.") ) old_data = res['rows'][0] old_data = self._formatter(old_data) for arg in required_args: if arg not in data: data[arg] = old_data[arg] sql = render_template( "/".join([self.template_path, self._UPDATE_SQL]), data=data, o_data=old_data ) else: sql = self._get_create_with_grant_sql(data) return sql.strip('\n') def _get_create_with_grant_sql(self, data): required_args = { 'name': 'Name', 'eventowner': 'Owner', 'eventfunname': 'Trigger function', 'enabled': 'Enabled status', 'eventname': 'Events' } err = [] for arg in required_args: if arg not in data: err.append(required_args.get(arg, arg)) if err: return make_json_response( status=410, success=0, errormsg=gettext( "Could not find the required parameter ({})." ).format(arg) ) sql = render_template( "/".join([self.template_path, self._CREATE_SQL]), data=data ) sql += "\n" sql += render_template( "/".join([self.template_path, self._GRANT_SQL]), data=data ) return sql.strip('\n').strip(' ') @check_precondition def sql(self, gid, sid, did, etid, json_resp=True): """ This function will generate sql to show in the sql pane for the selected event trigger node. Args: gid: Server Group ID sid: Server ID did: Database ID etid: Event trigger ID json_resp: Returns: """ sql = render_template( "/".join([self.template_path, self._PROPERTIES_SQL]), etid=etid ) status, res = self.conn.execute_dict(sql) if not status: return internal_server_error(errormsg=res) if len(res['rows']) == 0: return gone( gettext( "Could not find the specified event trigger on the " "server.") ) result = res['rows'][0] result = self._formatter(result) sql = render_template( "/".join([self.template_path, self._CREATE_SQL]), data=result, conn=self.conn ) sql += "\n\n" sql += render_template( "/".join([self.template_path, self._GRANT_SQL]), data=result, conn=self.conn ) db_sql = render_template( "/".join([self.template_path, 'get_db.sql']), did=did ) status, db_name = self.conn.execute_scalar(db_sql) if not status: return internal_server_error(errormsg=db_name) sql_header = "-- Event Trigger: {0} on database {1}\n\n-- ".format( result['name'], db_name ) sql_header += render_template( "/".join([self.template_path, self._DELETE_SQL]), name=result['name'], ) sql_header += "\n" sql = sql_header + sql sql = re.sub('\n{2,}', '\n\n', sql) if not json_resp: return sql return ajax_response(response=sql) @check_precondition def get_event_funcs(self, gid, sid, did, etid=None): """ This function gets the event functions and returns an ajax response for the event trigger node. Args: gid: Server Group ID sid: Server ID did: Database ID etid: Event trigger ID Returns: """ res = [{'label': '', 'value': ''}] sql = render_template( "/".join([self.template_path, 'eventfunctions.sql']) ) status, rest = self.conn.execute_2darray(sql) if not status: return internal_server_error(errormsg=rest) for row in rest['rows']: res.append( {'label': row['tfname'], 'value': row['tfname']} ) return make_json_response( data=res, status=200 ) @check_precondition def dependents(self, gid, sid, did, etid=None): """ This function gets the dependents and returns an ajax response for the event trigger node. Args: gid: Server Group ID sid: Server ID did: Database ID etid: Event trigger ID """ dependents_result = self.get_dependents(self.conn, etid) return ajax_response( response=dependents_result, status=200 ) @check_precondition def dependencies(self, gid, sid, did, etid): """ This function gets the dependencies and returns an ajax response for the event trigger node. Args: gid: Server Group ID sid: Server ID did: Database ID etid: Event trigger ID """ dependencies_result = self.get_dependencies(self.conn, etid) return ajax_response( response=dependencies_result, status=200 ) @check_precondition def fetch_objects_to_compare(self, sid, did): """ This function will fetch the list of all the event triggers for specified database id. :param sid: Server Id :param did: Database Id :return: """ res = dict() last_system_oid = 0 if self.manager.db_info is not None and did in self.manager.db_info: last_system_oid = (self.manager.db_info[did])['datlastsysoid'] sql = render_template( "/".join([self.template_path, 'nodes.sql']), datlastsysoid=last_system_oid, showsysobj=self.blueprint.show_system_objects ) status, rset = self.conn.execute_2darray(sql) if not status: return internal_server_error(errormsg=rset) for row in rset['rows']: status, data = self._fetch_properties(did, row['oid']) if status: res[row['name']] = data return res def get_sql_from_diff(self, **kwargs): """ This function is used to get the DDL/DML statements. :param kwargs: :return: """ gid = kwargs.get('gid') sid = kwargs.get('sid') did = kwargs.get('did') oid = kwargs.get('oid') data = kwargs.get('data', None) drop_sql = kwargs.get('drop_sql', False) if data: sql = self.get_sql(data=data, etid=oid) else: if drop_sql: sql = self.delete(gid=gid, sid=sid, did=did, etid=oid, only_sql=True) else: sql = self.sql(gid=gid, sid=sid, did=did, etid=oid, json_resp=False) return sql SchemaDiffRegistry(blueprint.node_type, EventTriggerView, 'Database') EventTriggerView.register_node_view(blueprint)
[ "flask_babelex.gettext", "pgadmin.tools.schema_diff.node_registry.SchemaDiffRegistry", "pgadmin.utils.ajax.internal_server_error", "pgadmin.utils.ajax.make_json_response", "simplejson.loads", "pgadmin.utils.driver.get_driver", "functools.wraps", "re.search", "pgadmin.utils.ajax.make_response", "re.sub", "flask.request.args.items" ]
[((26430, 26499), 'pgadmin.tools.schema_diff.node_registry.SchemaDiffRegistry', 'SchemaDiffRegistry', (['blueprint.node_type', 'EventTriggerView', '"""Database"""'], {}), "(blueprint.node_type, EventTriggerView, 'Database')\n", (26448, 26499), False, 'from pgadmin.tools.schema_diff.node_registry import SchemaDiffRegistry\n'), ((1604, 1629), 'flask_babelex.gettext', 'gettext', (['"""Event Triggers"""'], {}), "('Event Triggers')\n", (1611, 1629), False, 'from flask_babelex import gettext\n'), ((5728, 5736), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (5733, 5736), False, 'from functools import wraps\n'), ((7036, 7083), 'pgadmin.utils.ajax.make_response', 'ajax_response', ([], {'response': "res['rows']", 'status': '(200)'}), "(response=res['rows'], status=200)\n", (7049, 7083), True, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((7997, 8040), 'pgadmin.utils.ajax.make_json_response', 'make_json_response', ([], {'data': 'result', 'status': '(200)'}), '(data=result, status=200)\n', (8015, 8040), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((10029, 10068), 'pgadmin.utils.ajax.make_response', 'ajax_response', ([], {'response': 'res', 'status': '(200)'}), '(response=res, status=200)\n', (10042, 10068), True, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((17923, 17943), 'flask.request.args.items', 'request.args.items', ([], {}), '()\n', (17941, 17943), False, 'from flask import render_template, request, jsonify\n'), ((22624, 22653), 're.sub', 're.sub', (['"""\n{2,}"""', '"""\n\n"""', 'sql'], {}), "('\\n{2,}', '\\n\\n', sql)\n", (22630, 22653), False, 'import re\n'), ((22720, 22747), 'pgadmin.utils.ajax.make_response', 'ajax_response', ([], {'response': 'sql'}), '(response=sql)\n', (22733, 22747), True, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((23549, 23589), 'pgadmin.utils.ajax.make_json_response', 'make_json_response', ([], {'data': 'res', 'status': '(200)'}), '(data=res, status=200)\n', (23567, 23589), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((24051, 24104), 'pgadmin.utils.ajax.make_response', 'ajax_response', ([], {'response': 'dependents_result', 'status': '(200)'}), '(response=dependents_result, status=200)\n', (24064, 24104), True, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((24569, 24624), 'pgadmin.utils.ajax.make_response', 'ajax_response', ([], {'response': 'dependencies_result', 'status': '(200)'}), '(response=dependencies_result, status=200)\n', (24582, 24624), True, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((6984, 7019), 'pgadmin.utils.ajax.internal_server_error', 'internal_server_error', ([], {'errormsg': 'res'}), '(errormsg=res)\n', (7005, 7019), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((7687, 7722), 'pgadmin.utils.ajax.internal_server_error', 'internal_server_error', ([], {'errormsg': 'res'}), '(errormsg=res)\n', (7708, 7722), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((8628, 8663), 'pgadmin.utils.ajax.internal_server_error', 'internal_server_error', ([], {'errormsg': 'res'}), '(errormsg=res)\n', (8649, 8663), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((9001, 9055), 'flask_babelex.gettext', 'gettext', (['"""Could not find the specified event trigger."""'], {}), "('Could not find the specified event trigger.')\n", (9008, 9055), False, 'from flask_babelex import gettext\n'), ((11231, 11273), 'simplejson.loads', 'json.loads', (['request.data'], {'encoding': '"""utf-8"""'}), "(request.data, encoding='utf-8')\n", (11241, 11273), True, 'import simplejson as json\n'), ((13542, 13584), 'simplejson.loads', 'json.loads', (['request.data'], {'encoding': '"""utf-8"""'}), "(request.data, encoding='utf-8')\n", (13552, 13584), True, 'import simplejson as json\n'), ((18271, 18300), 're.sub', 're.sub', (['"""\n{2,}"""', '"""\n\n"""', 'sql'], {}), "('\\n{2,}', '\\n\\n', sql)\n", (18277, 18300), False, 'import re\n'), ((18385, 18425), 'pgadmin.utils.ajax.make_json_response', 'make_json_response', ([], {'data': 'sql', 'status': '(200)'}), '(data=sql, status=200)\n', (18403, 18425), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((21405, 21440), 'pgadmin.utils.ajax.internal_server_error', 'internal_server_error', ([], {'errormsg': 'res'}), '(errormsg=res)\n', (21426, 21440), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((22251, 22290), 'pgadmin.utils.ajax.internal_server_error', 'internal_server_error', ([], {'errormsg': 'db_name'}), '(errormsg=db_name)\n', (22272, 22290), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((23361, 23397), 'pgadmin.utils.ajax.internal_server_error', 'internal_server_error', ([], {'errormsg': 'rest'}), '(errormsg=rest)\n', (23382, 23397), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((25439, 25475), 'pgadmin.utils.ajax.internal_server_error', 'internal_server_error', ([], {'errormsg': 'rset'}), '(errormsg=rset)\n', (25460, 25475), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((9329, 9360), 're.search', 're.search', (['"""([^=]+)=(.*$)"""', 'sec'], {}), "('([^=]+)=(.*$)', sec)\n", (9338, 9360), False, 'import re\n'), ((10539, 10574), 'pgadmin.utils.ajax.internal_server_error', 'internal_server_error', ([], {'errormsg': 'res'}), '(errormsg=res)\n', (10560, 10574), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((12171, 12206), 'pgadmin.utils.ajax.internal_server_error', 'internal_server_error', ([], {'errormsg': 'res'}), '(errormsg=res)\n', (12192, 12206), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((12515, 12550), 'pgadmin.utils.ajax.internal_server_error', 'internal_server_error', ([], {'errormsg': 'res'}), '(errormsg=res)\n', (12536, 12550), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((12797, 12833), 'pgadmin.utils.ajax.internal_server_error', 'internal_server_error', ([], {'errormsg': 'etid'}), '(errormsg=etid)\n', (12818, 12833), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((14491, 14605), 'pgadmin.utils.ajax.make_json_response', 'make_json_response', ([], {'success': '(1)', 'info': '"""Nothing to update"""', 'data': "{'id': etid, 'sid': sid, 'gid': gid, 'did': did}"}), "(success=1, info='Nothing to update', data={'id': etid,\n 'sid': sid, 'gid': gid, 'did': did})\n", (14509, 14605), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((15482, 15531), 'simplejson.loads', 'json.loads', (['request_object.data'], {'encoding': '"""utf-8"""'}), "(request_object.data, encoding='utf-8')\n", (15492, 15531), True, 'import simplejson as json\n'), ((17988, 18019), 'simplejson.loads', 'json.loads', (['v'], {'encoding': '"""utf-8"""'}), "(v, encoding='utf-8')\n", (17998, 18019), True, 'import simplejson as json\n'), ((19155, 19190), 'pgadmin.utils.ajax.internal_server_error', 'internal_server_error', ([], {'errormsg': 'res'}), '(errormsg=res)\n', (19176, 19190), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((21517, 21585), 'flask_babelex.gettext', 'gettext', (['"""Could not find the specified event trigger on the server."""'], {}), "('Could not find the specified event trigger on the server.')\n", (21524, 21585), False, 'from flask_babelex import gettext\n'), ((5925, 5954), 'pgadmin.utils.driver.get_driver', 'get_driver', (['PG_DEFAULT_DRIVER'], {}), '(PG_DEFAULT_DRIVER)\n', (5935, 5954), False, 'from pgadmin.utils.driver import get_driver\n'), ((10658, 10714), 'flask_babelex.gettext', 'gettext', (['"""Could not find the event trigger information."""'], {}), "('Could not find the event trigger information.')\n", (10665, 10714), False, 'from flask_babelex import gettext\n'), ((13926, 13961), 'pgadmin.utils.ajax.internal_server_error', 'internal_server_error', ([], {'errormsg': 'res'}), '(errormsg=res)\n', (13947, 13961), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((16403, 16439), 'pgadmin.utils.ajax.internal_server_error', 'internal_server_error', ([], {'errormsg': 'name'}), '(errormsg=name)\n', (16424, 16439), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((17281, 17316), 'pgadmin.utils.ajax.internal_server_error', 'internal_server_error', ([], {'errormsg': 'res'}), '(errormsg=res)\n', (17302, 17316), False, 'from pgadmin.utils.ajax import make_json_response, internal_server_error, make_response as ajax_response, gone\n'), ((17405, 17437), 'flask_babelex.gettext', 'gettext', (['"""Event trigger dropped"""'], {}), "('Event trigger dropped')\n", (17412, 17437), False, 'from flask_babelex import gettext\n'), ((19279, 19335), 'flask_babelex.gettext', 'gettext', (['"""Could not find the event trigger information."""'], {}), "('Could not find the event trigger information.')\n", (19286, 19335), False, 'from flask_babelex import gettext\n'), ((11793, 11847), 'flask_babelex.gettext', 'gettext', (['"""Could not find the required parameter ({})."""'], {}), "('Could not find the required parameter ({}).')\n", (11800, 11847), False, 'from flask_babelex import gettext\n'), ((16625, 16660), 'flask_babelex.gettext', 'gettext', (['"""Error: Object not found."""'], {}), "('Error: Object not found.')\n", (16632, 16660), False, 'from flask_babelex import gettext\n'), ((16745, 16805), 'flask_babelex.gettext', 'gettext', (['"""The specified event trigger could not be found.\n"""'], {}), "('The specified event trigger could not be found.\\n')\n", (16752, 16805), False, 'from flask_babelex import gettext\n'), ((20368, 20422), 'flask_babelex.gettext', 'gettext', (['"""Could not find the required parameter ({})."""'], {}), "('Could not find the required parameter ({}).')\n", (20375, 20422), False, 'from flask_babelex import gettext\n')]
import pyOcean_cpu as ocean import numpy as np import pyOceanNumpy a = np.arange(24).reshape([3,2,4]) print(a) b = ocean.asTensor(a).reverseAxes2() print(b) b.fill(3) b.sync() print(a)
[ "numpy.arange", "pyOcean_cpu.asTensor" ]
[((72, 85), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (81, 85), True, 'import numpy as np\n'), ((116, 133), 'pyOcean_cpu.asTensor', 'ocean.asTensor', (['a'], {}), '(a)\n', (130, 133), True, 'import pyOcean_cpu as ocean\n')]
# This file is part of the Data Cleaning Library (openclean). # # Copyright (C) 2018-2021 New York University. # # openclean is released under the Revised BSD License. See file LICENSE for # full license details. """Unit tests for the update operator in data processing pipelines.""" from openclean.function.eval.base import Col def test_update_rows_in_stream(ds): """Test updating values in a column in a data stream.""" col_a = list(ds.update('A', Col('B') + Col('C')).to_df()['A']) assert col_a == [9] * 10
[ "openclean.function.eval.base.Col" ]
[((462, 470), 'openclean.function.eval.base.Col', 'Col', (['"""B"""'], {}), "('B')\n", (465, 470), False, 'from openclean.function.eval.base import Col\n'), ((473, 481), 'openclean.function.eval.base.Col', 'Col', (['"""C"""'], {}), "('C')\n", (476, 481), False, 'from openclean.function.eval.base import Col\n')]
import math import random num = random.random() show = math.trunc(num) print('O numero {}, inteiro fica {}'.format(num, show))
[ "random.random", "math.trunc" ]
[((32, 47), 'random.random', 'random.random', ([], {}), '()\n', (45, 47), False, 'import random\n'), ((56, 71), 'math.trunc', 'math.trunc', (['num'], {}), '(num)\n', (66, 71), False, 'import math\n')]
"""7. The treachery of whales.""" from enum import Enum from statistics import mean, median def find_fuel_spend_using_simple_rule(horizontal_positions: str) -> int: """Finds the minimum fuel spend required to align the crab submarines.""" crab_positions = [int(p) for p in horizontal_positions.split(",")] median_position = median(crab_positions) return int(sum(abs(p - median_position) for p in crab_positions)) def find_fuel_spend_using_complex_rule(horizontal_positions: str) -> int: """Same as above but for the more complex rule. The constraint in part 2 leads to: Fuel = Sum_i[ 1/2 (p - xi)^2 + 1/2 |p - xi| ] where xi is position of the ith crab, and p is the optimal location for the crabs to align. Differentiating this and rearranging gives: p = Sum_i[xi] / N - Sum_i[ Sign[p - xi] / 2N] The first term on the RHS is the mean of the crab positions, and the second term is between -1/2 and 1/2. So p must satisfy Mean[xi] - 1 / 2 <= p <= Mean[xi] + 1/2 [n.b. I derived most of this but missed the last step to get the tighter bound!] """ crab_positions = [int(p) for p in horizontal_positions.split(",")] fuel_used = lambda p: sum(0.5 * ((p - c) ** 2 + abs(p - c)) for c in crab_positions) mean_crab_position = mean(crab_positions) p_candidates = range( int(mean_crab_position - 1 / 2), int(mean_crab_position + 1 / 2) + 1 ) return int(min(map(fuel_used, p_candidates))) if __name__ == "__main__": with open("input.txt", "r") as f: horizontal_positions = f.read() print(f"Part one: {find_fuel_spend_using_simple_rule(horizontal_positions)}") print(f"Part two: {find_fuel_spend_using_complex_rule(horizontal_positions)}")
[ "statistics.median", "statistics.mean" ]
[((340, 362), 'statistics.median', 'median', (['crab_positions'], {}), '(crab_positions)\n', (346, 362), False, 'from statistics import mean, median\n'), ((1319, 1339), 'statistics.mean', 'mean', (['crab_positions'], {}), '(crab_positions)\n', (1323, 1339), False, 'from statistics import mean, median\n')]
import tdameritrade as td from ib_insync import * import os import asyncio from util import * import wrapper from PyQt5.QtWidgets import QApplication import PyQt5.QtWidgets as qt from PyQt5.QtGui import QPalette from PyQt5 import QtGui from PyQt5.QtCore import Qt from PyQt5 import QtCore class TickerTable(qt.QTableWidget): headers = [ 'symbol', 'last', 'mark', 'adjOptValue', 'adj', 'size', 'option_symbol', 'BUY', 'SELL' ] """ TODO: Define settings delta - The minimum delta when buying long calls. Affects 'extra' extrinsic value of option and execution speed of option orders. """ def __init__(self, parent=None): qt.QTableWidget.__init__(self, parent) self.conId2Row = {} self.setColumnCount(len(self.headers)) self.setHorizontalHeaderLabels(self.headers) self.setAlternatingRowColors(True) def addTicker(self, ticker): row = self.rowCount() self.insertRow(row) # self.conId2Row get connection id by calling tdclient's equivalent of ib's reqMktData # TODO: add qt.QPushButton('BUY') and qt.QPushButton('SE') for col in range(len(self.headers)): item = qt.QTableWidgetItem('-') self.setItem(row, col, item) item = self.item(row, 0) item.setText(ticker) item = self.item(row, 5) # col 'size' item.setText('1') # Adding BUY and SELL buttons and connect corresponding slots/callback functions buyButton = qt.QPushButton('BUY') self.setCellWidget(row,len(self.headers)-2, buyButton) buyButton.clicked.connect(self.onBuyButtonClicked) sellButton = qt.QPushButton('SELL') self.setCellWidget(row,len(self.headers)-1, sellButton) sellButton.clicked.connect(self.onSellButtonClicked) self.resizeColumnsToContents() def clearTickers(self): self.setRowCount(0) self.conId2Row.clear() # @QtCore.pyqtSlot("QModelIndex") def onBuyButtonClicked(self,_): # col = 'symbol', row = ADD code to get row of Buy Button row = self.row(self.selectedItems()[0]) # row of Buy button clicked col = 0 # 'ticker' column print(row, col) item = self.item(row, col) print(item.text()) ticker = item.text() #### if just the ticker is given: # automatically get option symbol for next Friday to input into placeIBTrade() option_symbol = getOptionSymbol(ticker) print (option_symbol) # convert PG_121319C108 to [expiry_date] [strike] [C] using regular expressions IB_list = [] IB_list = tdapi_to_IB(option_symbol) print(IB_list) print('\n' + tdapi_to_tos(option_symbol) + '\n') #placeIBTrade(args.ticker, dateIB, strike, 'C', c.midpoint(option_symbol)) # in ADP_120619C250 format(tdapi) if args.tdapi: c.placeOrder(args.order_type + stradd, args.size, option_symbol, 'LIMIT', c.midpoint(option_symbol, args.order_type)) else: placeIBTrade(IB_list[0], IB_list[1], IB_list[2], IB_list[3], c.midpoint(option_symbol, args.order_type)) # return ToS option symbol Ex] .ADP191206C250 instead of (ADP_120619C250) # Copy and paste into ToS Watchlist print('\n' + tdapi_to_tos(option_symbol)) return def onSellButtonClicked(self,_): return class Window(qt.QWidget): def __init__(self, clientId, refreshToken): qt.QWidget.__init__(self) self.edit = qt.QLineEdit('', self) self.edit.editingFinished.connect(self.add) self.table = TickerTable() self.connectButton = qt.QPushButton('Connect') self.connectButton.clicked.connect(self.onConnectButtonClicked) layout = qt.QVBoxLayout(self) layout.addWidget(self.edit) layout.addWidget(self.table) layout.addWidget(self.connectButton) # Initialize TDClient which connects to the TD API self.connectInfo = (clientId, refreshToken) self.tdclient = td.TDClient(*self.connectInfo) def add(self, text=''): text = text or self.edit.text() if text: ticker = text self.table.addTicker(ticker) self.edit.setText(text) def onConnectButtonClicked(self, _): #check connection status if not connected connect. # tdclient = td.TDClient(*self.connectInfo) # if not self.tdclient.isConnected(): if self.connectButton.text() == 'Disconnect': self.table.clearTickers() self.connectButton.setText('Connect') # exit() # print(tdclient.search('AAPL')) else: print(self.tdclient.accounts(positions=True)) self.connectButton.setText('Disconnect') for symbol in ('AAPL', 'TSLA', 'VZ'): self.add(f"{symbol}") self.add("Stock('ORCL', 'SMART', 'USD')") self.edit.setText('') self.edit.setPlaceholderText('Enter Ticker Symbol . . .') return 0 def closeEvent(self, ev): asyncio.get_event_loop().stop() return 0 if __name__ == '__main__': refreshToken = open(os.path.expanduser('~/.r_token'), 'r') clientId = os.getenv("TDAMERITRADE_CLIENT_ID") app = QApplication([]) app.setStyle('Fusion') dark_palette = QPalette() dark_palette.setColor(QPalette.Window, QtGui.QColor(53, 53, 53)) dark_palette.setColor(QPalette.WindowText, Qt.white) dark_palette.setColor(QPalette.Base, QtGui.QColor(25, 25, 25)) dark_palette.setColor(QPalette.AlternateBase, QtGui.QColor(53, 53, 53)) dark_palette.setColor(QPalette.ToolTipBase, Qt.white) dark_palette.setColor(QPalette.ToolTipText, Qt.white) dark_palette.setColor(QPalette.Text, Qt.white) dark_palette.setColor(QPalette.Button, QtGui.QColor(53, 53, 53)) dark_palette.setColor(QPalette.ButtonText, Qt.white) dark_palette.setColor(QPalette.BrightText, Qt.red) dark_palette.setColor(QPalette.Link, QtGui.QColor(42, 130, 218)) dark_palette.setColor(QPalette.Highlight, QtGui.QColor(42, 130, 218)) dark_palette.setColor(QPalette.HighlightedText, Qt.black) app.setPalette(dark_palette) app.setStyleSheet("QToolTip { color: #ffffff; background-color: #2a82da; border: 1px solid white; }") util.patchAsyncio() util.useQt() window = Window(clientId, refreshToken) window.resize(600, 400) window.show() # tdapp.run() util.run() app.exec_()
[ "asyncio.get_event_loop", "PyQt5.QtGui.QColor", "PyQt5.QtWidgets.QPushButton", "PyQt5.QtWidgets.QLineEdit", "PyQt5.QtGui.QPalette", "PyQt5.QtWidgets.QTableWidget.__init__", "PyQt5.QtWidgets.QWidget.__init__", "PyQt5.QtWidgets.QVBoxLayout", "PyQt5.QtWidgets.QTableWidgetItem", "PyQt5.QtWidgets.QApplication", "tdameritrade.TDClient", "os.path.expanduser", "os.getenv" ]
[((5256, 5291), 'os.getenv', 'os.getenv', (['"""TDAMERITRADE_CLIENT_ID"""'], {}), "('TDAMERITRADE_CLIENT_ID')\n", (5265, 5291), False, 'import os\n'), ((5303, 5319), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['[]'], {}), '([])\n', (5315, 5319), False, 'from PyQt5.QtWidgets import QApplication\n'), ((5367, 5377), 'PyQt5.QtGui.QPalette', 'QPalette', ([], {}), '()\n', (5375, 5377), False, 'from PyQt5.QtGui import QPalette\n'), ((672, 710), 'PyQt5.QtWidgets.QTableWidget.__init__', 'qt.QTableWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (696, 710), True, 'import PyQt5.QtWidgets as qt\n'), ((1505, 1526), 'PyQt5.QtWidgets.QPushButton', 'qt.QPushButton', (['"""BUY"""'], {}), "('BUY')\n", (1519, 1526), True, 'import PyQt5.QtWidgets as qt\n'), ((1671, 1693), 'PyQt5.QtWidgets.QPushButton', 'qt.QPushButton', (['"""SELL"""'], {}), "('SELL')\n", (1685, 1693), True, 'import PyQt5.QtWidgets as qt\n'), ((3473, 3498), 'PyQt5.QtWidgets.QWidget.__init__', 'qt.QWidget.__init__', (['self'], {}), '(self)\n', (3492, 3498), True, 'import PyQt5.QtWidgets as qt\n'), ((3519, 3541), 'PyQt5.QtWidgets.QLineEdit', 'qt.QLineEdit', (['""""""', 'self'], {}), "('', self)\n", (3531, 3541), True, 'import PyQt5.QtWidgets as qt\n'), ((3658, 3683), 'PyQt5.QtWidgets.QPushButton', 'qt.QPushButton', (['"""Connect"""'], {}), "('Connect')\n", (3672, 3683), True, 'import PyQt5.QtWidgets as qt\n'), ((3773, 3793), 'PyQt5.QtWidgets.QVBoxLayout', 'qt.QVBoxLayout', (['self'], {}), '(self)\n', (3787, 3793), True, 'import PyQt5.QtWidgets as qt\n'), ((4048, 4078), 'tdameritrade.TDClient', 'td.TDClient', (['*self.connectInfo'], {}), '(*self.connectInfo)\n', (4059, 4078), True, 'import tdameritrade as td\n'), ((5202, 5234), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.r_token"""'], {}), "('~/.r_token')\n", (5220, 5234), False, 'import os\n'), ((5422, 5446), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(53)', '(53)', '(53)'], {}), '(53, 53, 53)\n', (5434, 5446), False, 'from PyQt5 import QtGui\n'), ((5546, 5570), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(25)', '(25)', '(25)'], {}), '(25, 25, 25)\n', (5558, 5570), False, 'from PyQt5 import QtGui\n'), ((5622, 5646), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(53)', '(53)', '(53)'], {}), '(53, 53, 53)\n', (5634, 5646), False, 'from PyQt5 import QtGui\n'), ((5858, 5882), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(53)', '(53)', '(53)'], {}), '(53, 53, 53)\n', (5870, 5882), False, 'from PyQt5 import QtGui\n'), ((6037, 6063), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(42)', '(130)', '(218)'], {}), '(42, 130, 218)\n', (6049, 6063), False, 'from PyQt5 import QtGui\n'), ((6111, 6137), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(42)', '(130)', '(218)'], {}), '(42, 130, 218)\n', (6123, 6137), False, 'from PyQt5 import QtGui\n'), ((1195, 1219), 'PyQt5.QtWidgets.QTableWidgetItem', 'qt.QTableWidgetItem', (['"""-"""'], {}), "('-')\n", (1214, 1219), True, 'import PyQt5.QtWidgets as qt\n'), ((5101, 5125), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (5123, 5125), False, 'import asyncio\n')]
from mayavi import mlab import itertools # import rotanimate from sklearn.decomposition import PCA from collections import OrderedDict # setting up figure fig = mlab.figure('hitleys', bgcolor=(1,1,1)) fig.scene.disable_render = True # fig = plt.figure(dpi=100) # ax = fig.add_subplot(111, projection='3d') # fig_2 = plt.figure(dpi=100) # ax_2 = fig_2.add_subplot(111) # setting up statistical tools from the sklearn package. # This page explains the base of the technique and variants: # http://scikit-learn.org/stable/modules/decomposition.html pca_3 = PCA(n_components=3) # pca_2 = PCA(n_components=2) # left-lib hitleys: hitleys = [[.487, 1-.737, .754, 1-.766, 'alysson'], [.462, .359, .714, .34, 'pupo'], [.494, 1-.833, .824, 1-.821, 'leite'], [.596, 1-.731, .795, 1-.83, 'valdez'], [.635, .314, .783, 1-.83, 'castanheira'], [.417, .442, .638, .446, 'souza'], [.34, .333, .791, .28, 'ramos'], [.891, 1-.859, .84, 1-.876, 'goncalo'], [.583, .314, .658, .222, 'stivelman'], [.314, .385, .843, .331, 'horsth'], [.788, .212, .815, 1-.867, 'santullo'], [1-.731, .314, .86, 1-.706, 'torres'], [.615, 1-.724, .797, 1-.766, 'ferreira'], [.462, 1-.705, .658, .448, 'almeida'], [.744, .218, .745, 1-.835, 'vasconcelos'], [.385, .276, .825, .267, 'veras'], [.462, 1-.718, .869, 1-.747, 'borgo'], [.705, 1-.808, .878, 1-.903, 'silva'], [.429, 1-.763, .853, 1-.747, 'menezes'], [.346, .314, .761, .326, 'porto'], [1-.756, 1-.769, .852, .345, 'castilho'], [.346, .365, .746, .34, 'giroto'], [.256, .244, .843, .368, 'azevedo'], [.846, 1-.795, .889, 1-.963, 'cesar'], [1-.905, 1-.902, .907, 1-.926, 'lucas'], [1-.833, 1-.756, .771, .377, 'azeredo'], [.5, .25, .836, 1-.733, 'marques'], [.449, .295, .814, .257, 'bezerra'], [1-.795, 1-.846, .932, 1-.706, 'gerent'], [.808, .327, .693, 1-.722, 'rodrigues'], [.872, 1-.756, .861, 1-.908, 'dias'], [.385, .231, .841, .299, 'albano'], [.647, 1-.712, .801, 1-.761, 'baltar'], [.571, 1-.724, .87, 1-.816, 'magliano'], [.353, .378, .804, .303, 'bobadilha'], [.494, .34, .728, .234, 'mascarenhas'], [.615, .333, .635, .225, 'claudino'], [.615, .269, .663, .236, 'maiandi'], [.481, 1-.801, .685, .308, 'pimentel'], [1-.763, .487, .568, .517, 'herrmann'], [.353, 1-.801, .818, 1-.775, 'corsino'], [.256, .231, .908, .257, 'sousa'], [.218, .269, .856, .358, 'goes']] hitleys_d = OrderedDict((x[4], x[:4]) for x in hitleys) references = [[.5, .5, .5, .5, 'perfect_center']] references_d = OrderedDict((x[4], x[:4]) for x in references) # actual work: first find the fit using the pca_x component and than use it to # transform our 4-dimension data. hitleys_3 = pca_3.fit(list(hitleys_d.values())).transform(list(hitleys_d.values())) references_3 = pca_3.transform(list(references_d.values())) # hitleys_2 = pca_2.fit(list(hitleys_d.values())).transform(list(hitleys_d.values())) # Just unwrapping in a data display friendly way. X_3, Y_3, Z_3 = zip(*hitleys_3) RX_3, RY_3, RZ_3 = zip(*references_3) # X_2, Y_2 = zip(*hitleys_2) # plotting the scatter plots. mlab.points3d(X_3, Y_3, Z_3, scale_factor=0.02, color=(0,0,1)) mlab.points3d(RX_3, RY_3, RZ_3, scale_factor=0.02, color=(1,0,0)) mlab.orientation_axes() # ax.scatter3D(X_3, Y_3, Z_3, c='b') # ax.scatter3D(RX_3, RY_3, RZ_3, c='r') # ax_2.scatter(X_2, Y_2) OFFSET = 0.01 for label, xyz_ in zip([x[4] for x in itertools.chain(hitleys, references)], itertools.chain(hitleys_3, references_3)): mlab.text3d(xyz_[0] + OFFSET, xyz_[1] + OFFSET, xyz_[2] + OFFSET, label, scale=.02, color=(0,0,0)) fig.scene.disable_render = False # rotanimate.rotanimate(ax, 100, 'hitleys.gif', delay=20)
[ "mayavi.mlab.text3d", "mayavi.mlab.figure", "mayavi.mlab.points3d", "sklearn.decomposition.PCA", "collections.OrderedDict", "itertools.chain", "mayavi.mlab.orientation_axes" ]
[((162, 203), 'mayavi.mlab.figure', 'mlab.figure', (['"""hitleys"""'], {'bgcolor': '(1, 1, 1)'}), "('hitleys', bgcolor=(1, 1, 1))\n", (173, 203), False, 'from mayavi import mlab\n'), ((556, 575), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(3)'}), '(n_components=3)\n', (559, 575), False, 'from sklearn.decomposition import PCA\n'), ((2772, 2815), 'collections.OrderedDict', 'OrderedDict', (['((x[4], x[:4]) for x in hitleys)'], {}), '((x[4], x[:4]) for x in hitleys)\n', (2783, 2815), False, 'from collections import OrderedDict\n'), ((2883, 2929), 'collections.OrderedDict', 'OrderedDict', (['((x[4], x[:4]) for x in references)'], {}), '((x[4], x[:4]) for x in references)\n', (2894, 2929), False, 'from collections import OrderedDict\n'), ((3457, 3521), 'mayavi.mlab.points3d', 'mlab.points3d', (['X_3', 'Y_3', 'Z_3'], {'scale_factor': '(0.02)', 'color': '(0, 0, 1)'}), '(X_3, Y_3, Z_3, scale_factor=0.02, color=(0, 0, 1))\n', (3470, 3521), False, 'from mayavi import mlab\n'), ((3520, 3587), 'mayavi.mlab.points3d', 'mlab.points3d', (['RX_3', 'RY_3', 'RZ_3'], {'scale_factor': '(0.02)', 'color': '(1, 0, 0)'}), '(RX_3, RY_3, RZ_3, scale_factor=0.02, color=(1, 0, 0))\n', (3533, 3587), False, 'from mayavi import mlab\n'), ((3586, 3609), 'mayavi.mlab.orientation_axes', 'mlab.orientation_axes', ([], {}), '()\n', (3607, 3609), False, 'from mayavi import mlab\n'), ((3828, 3868), 'itertools.chain', 'itertools.chain', (['hitleys_3', 'references_3'], {}), '(hitleys_3, references_3)\n', (3843, 3868), False, 'import itertools\n'), ((3873, 3978), 'mayavi.mlab.text3d', 'mlab.text3d', (['(xyz_[0] + OFFSET)', '(xyz_[1] + OFFSET)', '(xyz_[2] + OFFSET)', 'label'], {'scale': '(0.02)', 'color': '(0, 0, 0)'}), '(xyz_[0] + OFFSET, xyz_[1] + OFFSET, xyz_[2] + OFFSET, label,\n scale=0.02, color=(0, 0, 0))\n', (3884, 3978), False, 'from mayavi import mlab\n'), ((3764, 3800), 'itertools.chain', 'itertools.chain', (['hitleys', 'references'], {}), '(hitleys, references)\n', (3779, 3800), False, 'import itertools\n')]
import logging import sys logger = logging.getLogger("data") # Configure the main logger for the data package if not logger.handlers: handler = logging.StreamHandler(sys.stdout) formatting = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") handler.setFormatter(formatting) handler.setLevel(logging.DEBUG) logger.addHandler(handler) logger.setLevel(logging.DEBUG) logger.debug("Data logger registered.")
[ "logging.Formatter", "logging.StreamHandler", "logging.getLogger" ]
[((36, 61), 'logging.getLogger', 'logging.getLogger', (['"""data"""'], {}), "('data')\n", (53, 61), False, 'import logging\n'), ((150, 183), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (171, 183), False, 'import logging\n'), ((201, 263), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(levelname)s - %(message)s')\n", (218, 263), False, 'import logging\n')]
import unittest from troposphere import Parameter, Ref, NoValue from troposphere.validators import boolean, integer, integer_range from troposphere.validators import positive_integer, network_port from troposphere.validators import tg_healthcheck_port from troposphere.validators import s3_bucket_name, encoding, status from troposphere.validators import iam_path, iam_names, iam_role_name from troposphere.validators import iam_group_name, iam_user_name, elb_name from troposphere.validators import backup_vault_name, check_required from troposphere.validators import mutually_exclusive, notification_type from troposphere.validators import notification_event, task_type from troposphere.validators import compliance_level, operating_system from troposphere.validators import one_of from troposphere.validators import waf_action_type class TestValidators(unittest.TestCase): def test_boolean(self): for x in [True, "True", "true", 1, "1"]: self.assertEqual(boolean(x), True, repr(x)) for x in [False, "False", "false", 0, "0"]: self.assertEqual(boolean(x), False, repr(x)) for x in ["000", "111", "abc"]: with self.assertRaises(ValueError): boolean(x) def test_integer(self): self.assertEqual(integer(-1), -1) self.assertEqual(integer("-1"), "-1") self.assertEqual(integer(0), 0) self.assertEqual(integer("0"), "0") self.assertEqual(integer(65535), 65535) self.assertEqual(integer("65535"), "65535") self.assertEqual(integer(1.0), 1.0) with self.assertRaises(ValueError): integer("string") with self.assertRaises(ValueError): integer(object) with self.assertRaises(ValueError): integer(None) def test_positive_integer(self): for x in [0, 1, 65535]: positive_integer(x) for x in [-1, -10]: with self.assertRaises(ValueError): positive_integer(x) def test_integer_range(self): between_ten_and_twenty = integer_range(10, 20) self.assertEqual(between_ten_and_twenty(10), 10) self.assertEqual(between_ten_and_twenty(15), 15) self.assertEqual(between_ten_and_twenty(20), 20) for i in (-1, 9, 21, 1111111): with self.assertRaises(ValueError): between_ten_and_twenty(i) def test_network_port(self): for x in [-1, 0, 1, 1024, 65535]: network_port(x) for x in [-2, -10, 65536, 100000]: with self.assertRaises(ValueError): network_port(x) def test_network_port_ref(self): p = Parameter('myport') network_port(Ref(p)) def test_tg_healthcheck_port(self): for x in ["traffic-port"]: tg_healthcheck_port(x) for x in [-1, 0, 1, 1024, 65535]: tg_healthcheck_port(x) for x in [-2, -10, 65536, 100000]: with self.assertRaises(ValueError): tg_healthcheck_port(x) def test_tg_healthcheck_port_ref(self): p = Parameter('myport') tg_healthcheck_port(Ref(p)) def test_s3_bucket_name(self): for b in ['a'*3, 'a'*63, 'wick3d-sweet.bucket']: s3_bucket_name(b) for b in ['a'*2, 'a'*64, 'invalid_bucket', 'InvalidBucket']: with self.assertRaises(ValueError): s3_bucket_name(b) for b in ['.invalid', 'invalid.', 'invalid..bucket']: with self.assertRaises(ValueError): s3_bucket_name(b) for b in ['1.2.3.4', '11.22.33.44', '111.222.333.444']: with self.assertRaises(ValueError): s3_bucket_name(b) def test_elb_name(self): for b in ['a', 'a-a', 'aaa', 'a'*32, 'wick3d-elb-name', 'Wick3d-ELB-Name']: elb_name(b) for b in ['a'*33, 'invalid_elb', '-invalid-elb', 'invalid-elb-', '-elb-', '-a', 'a-']: with self.assertRaises(ValueError): elb_name(b) def test_encoding(self): for e in ['plain', 'base64']: encoding(e) for e in ['wrong_encdoing', 'base62']: with self.assertRaises(ValueError): encoding(e) def test_status(self): for s in ['Active', 'Inactive']: status(s) for s in ['active', 'idle']: with self.assertRaises(ValueError): status(s) def test_iam_names(self): for s in ['foobar.+=@-,', 'BARfoo789.+=@-,']: iam_names(s) for s in ['foo%', 'bar$']: with self.assertRaises(ValueError): iam_names(s) def test_iam_path(self): for s in ['/%s/' % ('a'*30), '/%s/' % ('a'*510)]: iam_path(s) for s in ['/%s/' % ('a'*511), '/%s/' % ('a'*1025)]: with self.assertRaises(ValueError): iam_path(s) def test_iam_role_name(self): for s in ['a'*30, 'a'*64]: iam_role_name(s) for s in ['a'*65, 'a'*128]: with self.assertRaises(ValueError): iam_role_name(s) def test_iam_group_name(self): for s in ['a'*64, 'a'*128]: iam_group_name(s) for s in ['a'*129, 'a'*256]: with self.assertRaises(ValueError): iam_group_name(s) def test_iam_user_name(self): for s in ['a', 'a'*64, 'A', 'Aa', 'A=,.@-']: iam_user_name(s) for s in ['', 'a'*65, 'a%', 'a#', 'A a']: with self.assertRaises(ValueError): iam_user_name(s) def test_backup_vault_name(self): for s in ['a', 'a'*50, 'A', 'Aa', 'A1', 'A-a', 'A_a', 'A.a']: backup_vault_name(s) for s in ['', 'a'*65, 'a%', 'a#', 'A a']: with self.assertRaises(ValueError): backup_vault_name(s) def test_check_required(self): class_name = "test_class" props = { 'foo': 1, 'bar': 2, } conditionals = { 'foo', 'bar', } check_required(class_name, props, conditionals) conditionals = { 'foo', 'bar', 'baz', } with self.assertRaises(ValueError): check_required(class_name, props, conditionals) def test_one_of(self): conds = ['Bilbo', 'Frodo'] one_of('hobbits', {"first": "Bilbo"}, "first", conds) one_of('hobbits', {"first": "Frodo"}, "first", conds) with self.assertRaises(ValueError): one_of('hobbits', {"first": "Gandalf"}, "first", conds) one_of('hobbits', {"first": "Gandalf"}, "second", conds) def test_mutually_exclusive(self): conds = ['a', 'b', 'c'] mutually_exclusive('a', {"a": "apple"}, conds) mutually_exclusive('b', {"b": "banana"}, conds) mutually_exclusive('c', {"c": "carrot"}, conds) with self.assertRaises(ValueError): mutually_exclusive('ac', {"a": "apple", "c": "carrot"}, conds) with self.assertRaises(ValueError): mutually_exclusive( 'abc', {"a": "apple", "b": "banana", "c": "carrot"}, conds ) def test_mutually_exclusive_novalue(self): conds = ['a', 'b', 'c'] properties = { 'a': Ref("AWS::NoValue"), 'b': NoValue, 'c': "AWS::Region", } mutually_exclusive("a", properties, conds) def test_compliance_level(self): for s in ['CRITICAL', 'HIGH', 'MEDIUM', 'LOW', 'INFORMATIONAL', 'UNSPECIFIED']: compliance_level(s) for s in ['crit', '', '%%', 'FORMATIONAL']: with self.assertRaises(ValueError): compliance_level(s) def test_notification_event(self): for l in [['All', 'InProgress', 'Success', 'TimedOut', 'Cancelled', 'Failed'], ['InProgress', 'TimedOut']]: notification_event(l) for l in [['', 'timeout', '%'], ['Inprogress', '@ll']]: with self.assertRaises(ValueError): notification_event(l) def test_notification_type(self): for s in ['Command', 'Invocation']: notification_type(s) for s in ['foo', '', 'command', 'Iinvocation']: with self.assertRaises(ValueError): notification_type(s) def test_operating_system(self): for s in ['WINDOWS', 'AMAZON_LINUX', 'UBUNTU', 'REDHAT_ENTERPRISE_LINUX']: operating_system(s) for s in ['', 'bar', 'AMAZONLINUX', 'LINUX']: with self.assertRaises(ValueError): operating_system(s) def test_task_type(self): for s in ['RUN_COMMAND', 'AUTOMATION', 'LAMBDA', 'STEP_FUNCTION']: task_type(s) for s in ['', 'foo', 'a', 'l@mbda', 'STEPFUNCTION']: with self.assertRaises(ValueError): task_type(s) def test_waf_action_type(self): for s in ['ALLOW', 'BLOCK', 'COUNT']: waf_action_type(s) for s in ['', 'deny', 'UNBLOCK', 'COUNTER']: with self.assertRaises(ValueError): waf_action_type(s) if __name__ == '__main__': unittest.main()
[ "troposphere.Ref", "troposphere.validators.iam_names", "troposphere.validators.positive_integer", "troposphere.validators.encoding", "troposphere.validators.waf_action_type", "troposphere.validators.operating_system", "unittest.main", "troposphere.validators.notification_type", "troposphere.validators.compliance_level", "troposphere.validators.iam_role_name", "troposphere.validators.iam_user_name", "troposphere.validators.backup_vault_name", "troposphere.validators.integer", "troposphere.validators.iam_group_name", "troposphere.validators.notification_event", "troposphere.validators.tg_healthcheck_port", "troposphere.validators.one_of", "troposphere.validators.s3_bucket_name", "troposphere.validators.iam_path", "troposphere.validators.elb_name", "troposphere.validators.boolean", "troposphere.validators.check_required", "troposphere.validators.integer_range", "troposphere.Parameter", "troposphere.validators.task_type", "troposphere.validators.status", "troposphere.validators.mutually_exclusive", "troposphere.validators.network_port" ]
[((9324, 9339), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9337, 9339), False, 'import unittest\n'), ((2079, 2100), 'troposphere.validators.integer_range', 'integer_range', (['(10)', '(20)'], {}), '(10, 20)\n', (2092, 2100), False, 'from troposphere.validators import boolean, integer, integer_range\n'), ((2678, 2697), 'troposphere.Parameter', 'Parameter', (['"""myport"""'], {}), "('myport')\n", (2687, 2697), False, 'from troposphere import Parameter, Ref, NoValue\n'), ((3102, 3121), 'troposphere.Parameter', 'Parameter', (['"""myport"""'], {}), "('myport')\n", (3111, 3121), False, 'from troposphere import Parameter, Ref, NoValue\n'), ((6139, 6186), 'troposphere.validators.check_required', 'check_required', (['class_name', 'props', 'conditionals'], {}), '(class_name, props, conditionals)\n', (6153, 6186), False, 'from troposphere.validators import backup_vault_name, check_required\n'), ((6454, 6507), 'troposphere.validators.one_of', 'one_of', (['"""hobbits"""', "{'first': 'Bilbo'}", '"""first"""', 'conds'], {}), "('hobbits', {'first': 'Bilbo'}, 'first', conds)\n", (6460, 6507), False, 'from troposphere.validators import one_of\n'), ((6516, 6569), 'troposphere.validators.one_of', 'one_of', (['"""hobbits"""', "{'first': 'Frodo'}", '"""first"""', 'conds'], {}), "('hobbits', {'first': 'Frodo'}, 'first', conds)\n", (6522, 6569), False, 'from troposphere.validators import one_of\n'), ((6831, 6877), 'troposphere.validators.mutually_exclusive', 'mutually_exclusive', (['"""a"""', "{'a': 'apple'}", 'conds'], {}), "('a', {'a': 'apple'}, conds)\n", (6849, 6877), False, 'from troposphere.validators import mutually_exclusive, notification_type\n'), ((6886, 6933), 'troposphere.validators.mutually_exclusive', 'mutually_exclusive', (['"""b"""', "{'b': 'banana'}", 'conds'], {}), "('b', {'b': 'banana'}, conds)\n", (6904, 6933), False, 'from troposphere.validators import mutually_exclusive, notification_type\n'), ((6942, 6989), 'troposphere.validators.mutually_exclusive', 'mutually_exclusive', (['"""c"""', "{'c': 'carrot'}", 'conds'], {}), "('c', {'c': 'carrot'}, conds)\n", (6960, 6989), False, 'from troposphere.validators import mutually_exclusive, notification_type\n'), ((7492, 7534), 'troposphere.validators.mutually_exclusive', 'mutually_exclusive', (['"""a"""', 'properties', 'conds'], {}), "('a', properties, conds)\n", (7510, 7534), False, 'from troposphere.validators import mutually_exclusive, notification_type\n'), ((1290, 1301), 'troposphere.validators.integer', 'integer', (['(-1)'], {}), '(-1)\n', (1297, 1301), False, 'from troposphere.validators import boolean, integer, integer_range\n'), ((1332, 1345), 'troposphere.validators.integer', 'integer', (['"""-1"""'], {}), "('-1')\n", (1339, 1345), False, 'from troposphere.validators import boolean, integer, integer_range\n'), ((1378, 1388), 'troposphere.validators.integer', 'integer', (['(0)'], {}), '(0)\n', (1385, 1388), False, 'from troposphere.validators import boolean, integer, integer_range\n'), ((1418, 1430), 'troposphere.validators.integer', 'integer', (['"""0"""'], {}), "('0')\n", (1425, 1430), False, 'from troposphere.validators import boolean, integer, integer_range\n'), ((1462, 1476), 'troposphere.validators.integer', 'integer', (['(65535)'], {}), '(65535)\n', (1469, 1476), False, 'from troposphere.validators import boolean, integer, integer_range\n'), ((1510, 1526), 'troposphere.validators.integer', 'integer', (['"""65535"""'], {}), "('65535')\n", (1517, 1526), False, 'from troposphere.validators import boolean, integer, integer_range\n'), ((1562, 1574), 'troposphere.validators.integer', 'integer', (['(1.0)'], {}), '(1.0)\n', (1569, 1574), False, 'from troposphere.validators import boolean, integer, integer_range\n'), ((1637, 1654), 'troposphere.validators.integer', 'integer', (['"""string"""'], {}), "('string')\n", (1644, 1654), False, 'from troposphere.validators import boolean, integer, integer_range\n'), ((1711, 1726), 'troposphere.validators.integer', 'integer', (['object'], {}), '(object)\n', (1718, 1726), False, 'from troposphere.validators import boolean, integer, integer_range\n'), ((1783, 1796), 'troposphere.validators.integer', 'integer', (['None'], {}), '(None)\n', (1790, 1796), False, 'from troposphere.validators import boolean, integer, integer_range\n'), ((1879, 1898), 'troposphere.validators.positive_integer', 'positive_integer', (['x'], {}), '(x)\n', (1895, 1898), False, 'from troposphere.validators import positive_integer, network_port\n'), ((2489, 2504), 'troposphere.validators.network_port', 'network_port', (['x'], {}), '(x)\n', (2501, 2504), False, 'from troposphere.validators import positive_integer, network_port\n'), ((2719, 2725), 'troposphere.Ref', 'Ref', (['p'], {}), '(p)\n', (2722, 2725), False, 'from troposphere import Parameter, Ref, NoValue\n'), ((2815, 2837), 'troposphere.validators.tg_healthcheck_port', 'tg_healthcheck_port', (['x'], {}), '(x)\n', (2834, 2837), False, 'from troposphere.validators import tg_healthcheck_port\n'), ((2892, 2914), 'troposphere.validators.tg_healthcheck_port', 'tg_healthcheck_port', (['x'], {}), '(x)\n', (2911, 2914), False, 'from troposphere.validators import tg_healthcheck_port\n'), ((3150, 3156), 'troposphere.Ref', 'Ref', (['p'], {}), '(p)\n', (3153, 3156), False, 'from troposphere import Parameter, Ref, NoValue\n'), ((3263, 3280), 'troposphere.validators.s3_bucket_name', 's3_bucket_name', (['b'], {}), '(b)\n', (3277, 3280), False, 'from troposphere.validators import s3_bucket_name, encoding, status\n'), ((3866, 3877), 'troposphere.validators.elb_name', 'elb_name', (['b'], {}), '(b)\n', (3874, 3877), False, 'from troposphere.validators import iam_group_name, iam_user_name, elb_name\n'), ((4147, 4158), 'troposphere.validators.encoding', 'encoding', (['e'], {}), '(e)\n', (4155, 4158), False, 'from troposphere.validators import s3_bucket_name, encoding, status\n'), ((4363, 4372), 'troposphere.validators.status', 'status', (['s'], {}), '(s)\n', (4369, 4372), False, 'from troposphere.validators import s3_bucket_name, encoding, status\n'), ((4581, 4593), 'troposphere.validators.iam_names', 'iam_names', (['s'], {}), '(s)\n', (4590, 4593), False, 'from troposphere.validators import iam_path, iam_names, iam_role_name\n'), ((4806, 4817), 'troposphere.validators.iam_path', 'iam_path', (['s'], {}), '(s)\n', (4814, 4817), False, 'from troposphere.validators import iam_path, iam_names, iam_role_name\n'), ((5036, 5052), 'troposphere.validators.iam_role_name', 'iam_role_name', (['s'], {}), '(s)\n', (5049, 5052), False, 'from troposphere.validators import iam_path, iam_names, iam_role_name\n'), ((5254, 5271), 'troposphere.validators.iam_group_name', 'iam_group_name', (['s'], {}), '(s)\n', (5268, 5271), False, 'from troposphere.validators import iam_group_name, iam_user_name, elb_name\n'), ((5491, 5507), 'troposphere.validators.iam_user_name', 'iam_user_name', (['s'], {}), '(s)\n', (5504, 5507), False, 'from troposphere.validators import iam_group_name, iam_user_name, elb_name\n'), ((5760, 5780), 'troposphere.validators.backup_vault_name', 'backup_vault_name', (['s'], {}), '(s)\n', (5777, 5780), False, 'from troposphere.validators import backup_vault_name, check_required\n'), ((6335, 6382), 'troposphere.validators.check_required', 'check_required', (['class_name', 'props', 'conditionals'], {}), '(class_name, props, conditionals)\n', (6349, 6382), False, 'from troposphere.validators import backup_vault_name, check_required\n'), ((6626, 6681), 'troposphere.validators.one_of', 'one_of', (['"""hobbits"""', "{'first': 'Gandalf'}", '"""first"""', 'conds'], {}), "('hobbits', {'first': 'Gandalf'}, 'first', conds)\n", (6632, 6681), False, 'from troposphere.validators import one_of\n'), ((6694, 6750), 'troposphere.validators.one_of', 'one_of', (['"""hobbits"""', "{'first': 'Gandalf'}", '"""second"""', 'conds'], {}), "('hobbits', {'first': 'Gandalf'}, 'second', conds)\n", (6700, 6750), False, 'from troposphere.validators import one_of\n'), ((7046, 7108), 'troposphere.validators.mutually_exclusive', 'mutually_exclusive', (['"""ac"""', "{'a': 'apple', 'c': 'carrot'}", 'conds'], {}), "('ac', {'a': 'apple', 'c': 'carrot'}, conds)\n", (7064, 7108), False, 'from troposphere.validators import mutually_exclusive, notification_type\n'), ((7165, 7243), 'troposphere.validators.mutually_exclusive', 'mutually_exclusive', (['"""abc"""', "{'a': 'apple', 'b': 'banana', 'c': 'carrot'}", 'conds'], {}), "('abc', {'a': 'apple', 'b': 'banana', 'c': 'carrot'}, conds)\n", (7183, 7243), False, 'from troposphere.validators import mutually_exclusive, notification_type\n'), ((7394, 7413), 'troposphere.Ref', 'Ref', (['"""AWS::NoValue"""'], {}), "('AWS::NoValue')\n", (7397, 7413), False, 'from troposphere import Parameter, Ref, NoValue\n'), ((7691, 7710), 'troposphere.validators.compliance_level', 'compliance_level', (['s'], {}), '(s)\n', (7707, 7710), False, 'from troposphere.validators import compliance_level, operating_system\n'), ((8034, 8055), 'troposphere.validators.notification_event', 'notification_event', (['l'], {}), '(l)\n', (8052, 8055), False, 'from troposphere.validators import notification_event, task_type\n'), ((8301, 8321), 'troposphere.validators.notification_type', 'notification_type', (['s'], {}), '(s)\n', (8318, 8321), False, 'from troposphere.validators import mutually_exclusive, notification_type\n'), ((8614, 8633), 'troposphere.validators.operating_system', 'operating_system', (['s'], {}), '(s)\n', (8630, 8633), False, 'from troposphere.validators import compliance_level, operating_system\n'), ((8890, 8902), 'troposphere.validators.task_type', 'task_type', (['s'], {}), '(s)\n', (8899, 8902), False, 'from troposphere.validators import notification_event, task_type\n'), ((9136, 9154), 'troposphere.validators.waf_action_type', 'waf_action_type', (['s'], {}), '(s)\n', (9151, 9154), False, 'from troposphere.validators import waf_action_type\n'), ((985, 995), 'troposphere.validators.boolean', 'boolean', (['x'], {}), '(x)\n', (992, 995), False, 'from troposphere.validators import boolean, integer, integer_range\n'), ((1093, 1103), 'troposphere.validators.boolean', 'boolean', (['x'], {}), '(x)\n', (1100, 1103), False, 'from troposphere.validators import boolean, integer, integer_range\n'), ((1225, 1235), 'troposphere.validators.boolean', 'boolean', (['x'], {}), '(x)\n', (1232, 1235), False, 'from troposphere.validators import boolean, integer, integer_range\n'), ((1991, 2010), 'troposphere.validators.positive_integer', 'positive_integer', (['x'], {}), '(x)\n', (2007, 2010), False, 'from troposphere.validators import positive_integer, network_port\n'), ((2612, 2627), 'troposphere.validators.network_port', 'network_port', (['x'], {}), '(x)\n', (2624, 2627), False, 'from troposphere.validators import positive_integer, network_port\n'), ((3022, 3044), 'troposphere.validators.tg_healthcheck_port', 'tg_healthcheck_port', (['x'], {}), '(x)\n', (3041, 3044), False, 'from troposphere.validators import tg_healthcheck_port\n'), ((3414, 3431), 'troposphere.validators.s3_bucket_name', 's3_bucket_name', (['b'], {}), '(b)\n', (3428, 3431), False, 'from troposphere.validators import s3_bucket_name, encoding, status\n'), ((3558, 3575), 'troposphere.validators.s3_bucket_name', 's3_bucket_name', (['b'], {}), '(b)\n', (3572, 3575), False, 'from troposphere.validators import s3_bucket_name, encoding, status\n'), ((3704, 3721), 'troposphere.validators.s3_bucket_name', 's3_bucket_name', (['b'], {}), '(b)\n', (3718, 3721), False, 'from troposphere.validators import s3_bucket_name, encoding, status\n'), ((4055, 4066), 'troposphere.validators.elb_name', 'elb_name', (['b'], {}), '(b)\n', (4063, 4066), False, 'from troposphere.validators import iam_group_name, iam_user_name, elb_name\n'), ((4270, 4281), 'troposphere.validators.encoding', 'encoding', (['e'], {}), '(e)\n', (4278, 4281), False, 'from troposphere.validators import s3_bucket_name, encoding, status\n'), ((4474, 4483), 'troposphere.validators.status', 'status', (['s'], {}), '(s)\n', (4480, 4483), False, 'from troposphere.validators import s3_bucket_name, encoding, status\n'), ((4693, 4705), 'troposphere.validators.iam_names', 'iam_names', (['s'], {}), '(s)\n', (4702, 4705), False, 'from troposphere.validators import iam_path, iam_names, iam_role_name\n'), ((4942, 4953), 'troposphere.validators.iam_path', 'iam_path', (['s'], {}), '(s)\n', (4950, 4953), False, 'from troposphere.validators import iam_path, iam_names, iam_role_name\n'), ((5153, 5169), 'troposphere.validators.iam_role_name', 'iam_role_name', (['s'], {}), '(s)\n', (5166, 5169), False, 'from troposphere.validators import iam_path, iam_names, iam_role_name\n'), ((5373, 5390), 'troposphere.validators.iam_group_name', 'iam_group_name', (['s'], {}), '(s)\n', (5387, 5390), False, 'from troposphere.validators import iam_group_name, iam_user_name, elb_name\n'), ((5622, 5638), 'troposphere.validators.iam_user_name', 'iam_user_name', (['s'], {}), '(s)\n', (5635, 5638), False, 'from troposphere.validators import iam_group_name, iam_user_name, elb_name\n'), ((5895, 5915), 'troposphere.validators.backup_vault_name', 'backup_vault_name', (['s'], {}), '(s)\n', (5912, 5915), False, 'from troposphere.validators import backup_vault_name, check_required\n'), ((7827, 7846), 'troposphere.validators.compliance_level', 'compliance_level', (['s'], {}), '(s)\n', (7843, 7846), False, 'from troposphere.validators import compliance_level, operating_system\n'), ((8184, 8205), 'troposphere.validators.notification_event', 'notification_event', (['l'], {}), '(l)\n', (8202, 8205), False, 'from troposphere.validators import notification_event, task_type\n'), ((8442, 8462), 'troposphere.validators.notification_type', 'notification_type', (['s'], {}), '(s)\n', (8459, 8462), False, 'from troposphere.validators import mutually_exclusive, notification_type\n'), ((8752, 8771), 'troposphere.validators.operating_system', 'operating_system', (['s'], {}), '(s)\n', (8768, 8771), False, 'from troposphere.validators import compliance_level, operating_system\n'), ((9028, 9040), 'troposphere.validators.task_type', 'task_type', (['s'], {}), '(s)\n', (9037, 9040), False, 'from troposphere.validators import notification_event, task_type\n'), ((9272, 9290), 'troposphere.validators.waf_action_type', 'waf_action_type', (['s'], {}), '(s)\n', (9287, 9290), False, 'from troposphere.validators import waf_action_type\n')]
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # # <NAME> <<EMAIL>> # (c) 1998-2021 all rights reserved def test(): """ Verify that the device base class is not exported """ # access import journal # attempt to try: # access the device base class journal.Device() # which is not published assert False, "unreachable" # if it fails except AttributeError: # all good pass # all done return # main if __name__ == "__main__": # run the test test() # end of file
[ "journal.Device" ]
[((295, 311), 'journal.Device', 'journal.Device', ([], {}), '()\n', (309, 311), False, 'import journal\n')]
############################################## ##### Predicting EUR/USD pair using LSTM ##### ############################################## ################################### ### Part 1 - Data Preprocessing ### ################################### ### Importing the libraries ### import numpy as np import pandas as pd ### Importing the data set ### dataset = pd.read_csv('dataset.csv') ### Set basic parameters ### timesteps = 120 test_size = 0.2 # 0.2 = 20% of the dataset ### Set hyperparameters ### from keras.optimizers import Adam parameters = {'hidden_layers': [3, 6], 'units_per_layer': [50, 100, 200], 'dropout': [0.0, 0.2, 0.4], 'batch_size': [128, 256], 'epochs': [100], 'optimizer': [Adam(lr = 0.001)], 'loss': ['mean_squared_error'], 'metrics': ['accuracy']} ### Processing the specific dataset ### # The code an next assumes that the prediction(y) is the last column of the dataset. # If your dataset isn't ready, process it here. # Convert dates to days # 0 is Monday - 4 is Friday # Stock exchanges are closed on Weekends import datetime for i in range (0, dataset.shape[0]): dataset.iloc[i,4] = datetime.datetime.strptime(dataset.iloc[i,4], '%m/%d/%Y').weekday() # We don't need the 2 last columns and we have to make 'Price' column being the last column. # Swap 'Price' and "RSI' columns for i in range (0, dataset.shape[0]): dataset.iloc[i,16] = dataset.iloc[i,3] dataset.iloc[i,3] = dataset.iloc[i,15] dataset.iloc[i,15] = dataset.iloc[i,16] # Delete the unused columns dataset = dataset.iloc[:,:16] ### Feature Scaling - Normalization ### from sklearn.preprocessing import MinMaxScaler sc = MinMaxScaler(feature_range = (0, 1)) dataset_scaled = sc.fit_transform(dataset) ### Creating a 3D data structure with [timesteps] timesteps and one output ### # [Samples, Timesteps, Features] # x_train(Z) = [Features(Z-1)] # y_train(Z) = [Feature(Z)] x = [] y = [] for i in range(timesteps, dataset_scaled.shape[0]): x.append(dataset_scaled[i-timesteps:i, :dataset_scaled.shape[1]-1]) y.append(dataset_scaled[i, dataset_scaled.shape[1]-1]) x, y = np.array(x), np.array(y) y = np.reshape(y, (y.shape[0], 1)) ### Splitting the dataset into the Training set and Test set ### from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = test_size, random_state = 0) ################################## ### Part 2 - Building the LSTM ### ################################## ### Importing the Keras libraries and packages ### from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers import Dropout ### Build the regressor ### def build_regressor(hidden_layers, units_per_layer, dropout, optimizer, loss, metrics): # Initialising the LSTM regressor = Sequential() # Adding the first LSTM layer and some Dropout regularisation regressor.add(LSTM(units = units_per_layer, return_sequences = True, input_shape = (x_train.shape[1], x_train.shape[2]))) regressor.add(Dropout(dropout)) # Adding new LSTM hidden layers if needed for i in range(0, hidden_layers-1): regressor.add(LSTM(units = units_per_layer, return_sequences = True)) regressor.add(Dropout(dropout)) # Adding the pre-last LSTM layer regressor.add(LSTM(units = units_per_layer)) regressor.add(Dropout(dropout)) # Adding the output layer regressor.add(Dense(units = 1)) # Compiling the LSTM regressor.compile(optimizer = optimizer, loss = loss, metrics = metrics) return regressor ### Train the model ### def fit_regressor(epochs, batch_size): return regressor.fit(x_train, y_train, epochs = epochs, batch_size = batch_size, validation_data=(x_test, y_test), shuffle=True) ### Start Evaluating and Tuning our LSTM model ### import matplotlib.pyplot as plt results = [] best_parameters = [] best_loss = float("inf") best_model = Sequential() for layers in parameters["hidden_layers"]: for units_per_layer in parameters["units_per_layer"]: for dropout in parameters["dropout"]: for batch_size in parameters["batch_size"]: for epochs in parameters["epochs"]: for optimizer in parameters["optimizer"]: for loss in parameters["loss"]: for metrics in parameters["metrics"]: regressor = build_regressor(int(layers), units_per_layer, dropout, optimizer, loss, [metrics]) history = fit_regressor(epochs, batch_size) results.append([layers, units_per_layer, dropout, batch_size, epochs, optimizer, loss, metrics, float(history.history['loss'][0]), float(history.history['val_loss'][0])]) plt.plot(history.history['val_loss'][2:epochs], color = 'blue', label = 'Test') plt.plot(history.history['loss'][2:epochs], color = 'red', label = 'Train') plt.xlabel('Epochs') plt.ylabel('Error') plt.legend() plt.show() print('Layers:\t\t',layers,'\nUnits per layer:',units_per_layer,'\nDropout:\t',dropout,'\nBatch size:\t', batch_size, '\nEpochs:\t\t',epochs,'\nOptimizer:\t',optimizer,'\nLoss function:\t',loss,'\nMetrics:\t',metrics, '\nLoss (Train):\t',history.history['loss'][epochs-1],'\nLoss (Test):\t',history.history['val_loss'][epochs-1],'\n\n') # Keep the best model if float(history.history['loss'][epochs-1]) < best_loss: best_model = regressor best_loss = float(history.history['loss'][0]) best_parameters.clear() best_parameters.append([layers, units_per_layer, dropout, batch_size, epochs, optimizer, loss, metrics, float(history.history['loss'][0]), float(history.history['val_loss'][0]), float(history.history['acc'][0]), float(history.history['val_acc'][0])]) ### Show the best parameters ### print('************* Best parameters *************') print('* Layers:\t',best_parameters[0][0],'\n* Units:\t',best_parameters[0][1],'\n* Dropout:\t',best_parameters[0][2],'\n* Batch size:\t', best_parameters[0][3],'\n* Epochs:\t',best_parameters[0][4],'\n* Optimizer:\t',best_parameters[0][5],'\n* Loss function:',best_parameters[0][6], '\n* Metrics:\t',best_parameters[0][7],'\n* Loss (Train):\t',best_parameters[0][8],'\n* Loss (Test):\t',best_parameters[0][9]) print('\n*******************************************\n') ### Save the weights ### best_model.save_weights('./checkpoint') ########################################### ### Part 3 - Making a single prediction ### ########################################### ### INSERT HERE your timeseries in this array [Timesteps]x[Features]### for_predict = x_test[0,:] # For example, take the first timeseries of the Test set ### Reshape and predict ### # It will use the best trained regressor # for_predict = np.reshape(for_predict, (1,for_predict.shape[0], for_predict.shape[1])) predictions_scaled = best_model.predict(for_predict) ### Invert MinMax transform ### # Our scaler have used a specific array size. # We have to add some padding to be able to inverse the transform correctly. padding = np.zeros((for_predict.shape[0],dataset.shape[1]-1)) predictions_scaled = np.append(padding, predictions_scaled, axis=1) predictions_scaled = sc.inverse_transform(predictions_scaled) predictions = predictions_scaled[:,dataset_scaled.shape[1]-1] ### Calculate RMSE for the new predictions ### # ADD HERE the actual values to the actual_values (without normalization) actual_values = [1.110] # Just an example # Calculate RMS from math import sqrt from sklearn.metrics import mean_squared_error rmse = sqrt(mean_squared_error(predictions, actual_values)) print('Predictions RMSE: %.3f' % rmse)
[ "pandas.read_csv", "sklearn.model_selection.train_test_split", "sklearn.preprocessing.MinMaxScaler", "numpy.append", "numpy.reshape", "sklearn.metrics.mean_squared_error", "matplotlib.pyplot.show", "keras.layers.Dropout", "matplotlib.pyplot.legend", "keras.optimizers.Adam", "datetime.datetime.strptime", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "keras.layers.LSTM", "numpy.zeros", "keras.layers.Dense", "numpy.array", "keras.models.Sequential", "matplotlib.pyplot.xlabel" ]
[((364, 390), 'pandas.read_csv', 'pd.read_csv', (['"""dataset.csv"""'], {}), "('dataset.csv')\n", (375, 390), True, 'import pandas as pd\n'), ((1746, 1780), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (1758, 1780), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2231, 2261), 'numpy.reshape', 'np.reshape', (['y', '(y.shape[0], 1)'], {}), '(y, (y.shape[0], 1))\n', (2241, 2261), True, 'import numpy as np\n'), ((2416, 2475), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': 'test_size', 'random_state': '(0)'}), '(x, y, test_size=test_size, random_state=0)\n', (2432, 2475), False, 'from sklearn.model_selection import train_test_split\n'), ((4078, 4090), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4088, 4090), False, 'from keras.models import Sequential\n'), ((7702, 7774), 'numpy.reshape', 'np.reshape', (['for_predict', '(1, for_predict.shape[0], for_predict.shape[1])'], {}), '(for_predict, (1, for_predict.shape[0], for_predict.shape[1]))\n', (7712, 7774), True, 'import numpy as np\n'), ((7993, 8047), 'numpy.zeros', 'np.zeros', (['(for_predict.shape[0], dataset.shape[1] - 1)'], {}), '((for_predict.shape[0], dataset.shape[1] - 1))\n', (8001, 8047), True, 'import numpy as np\n'), ((8066, 8112), 'numpy.append', 'np.append', (['padding', 'predictions_scaled'], {'axis': '(1)'}), '(padding, predictions_scaled, axis=1)\n', (8075, 8112), True, 'import numpy as np\n'), ((2202, 2213), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2210, 2213), True, 'import numpy as np\n'), ((2215, 2226), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2223, 2226), True, 'import numpy as np\n'), ((2939, 2951), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2949, 2951), False, 'from keras.models import Sequential\n'), ((8499, 8545), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['predictions', 'actual_values'], {}), '(predictions, actual_values)\n', (8517, 8545), False, 'from sklearn.metrics import mean_squared_error\n'), ((776, 790), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (780, 790), False, 'from keras.optimizers import Adam\n'), ((3040, 3145), 'keras.layers.LSTM', 'LSTM', ([], {'units': 'units_per_layer', 'return_sequences': '(True)', 'input_shape': '(x_train.shape[1], x_train.shape[2])'}), '(units=units_per_layer, return_sequences=True, input_shape=(x_train.\n shape[1], x_train.shape[2]))\n', (3044, 3145), False, 'from keras.layers import LSTM\n'), ((3166, 3182), 'keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (3173, 3182), False, 'from keras.layers import Dropout\n'), ((3447, 3474), 'keras.layers.LSTM', 'LSTM', ([], {'units': 'units_per_layer'}), '(units=units_per_layer)\n', (3451, 3474), False, 'from keras.layers import LSTM\n'), ((3496, 3512), 'keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (3503, 3512), False, 'from keras.layers import Dropout\n'), ((3566, 3580), 'keras.layers.Dense', 'Dense', ([], {'units': '(1)'}), '(units=1)\n', (3571, 3580), False, 'from keras.layers import Dense\n'), ((1228, 1286), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['dataset.iloc[i, 4]', '"""%m/%d/%Y"""'], {}), "(dataset.iloc[i, 4], '%m/%d/%Y')\n", (1254, 1286), False, 'import datetime\n'), ((3296, 3346), 'keras.layers.LSTM', 'LSTM', ([], {'units': 'units_per_layer', 'return_sequences': '(True)'}), '(units=units_per_layer, return_sequences=True)\n', (3300, 3346), False, 'from keras.layers import LSTM\n'), ((3374, 3390), 'keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (3381, 3390), False, 'from keras.layers import Dropout\n'), ((5051, 5126), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss'][2:epochs]"], {'color': '"""blue"""', 'label': '"""Test"""'}), "(history.history['val_loss'][2:epochs], color='blue', label='Test')\n", (5059, 5126), True, 'import matplotlib.pyplot as plt\n'), ((5163, 5234), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss'][2:epochs]"], {'color': '"""red"""', 'label': '"""Train"""'}), "(history.history['loss'][2:epochs], color='red', label='Train')\n", (5171, 5234), True, 'import matplotlib.pyplot as plt\n'), ((5271, 5291), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (5281, 5291), True, 'import matplotlib.pyplot as plt\n'), ((5324, 5343), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (5334, 5343), True, 'import matplotlib.pyplot as plt\n'), ((5376, 5388), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5386, 5388), True, 'import matplotlib.pyplot as plt\n'), ((5421, 5431), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5429, 5431), True, 'import matplotlib.pyplot as plt\n')]
import logging from argparse import ArgumentParser from typing import Any, List from django.conf import settings from django.db import transaction from zerver.lib.logging_util import log_to_file from zerver.lib.management import ZulipBaseCommand from zerver.models import UserProfile from zproject.backends import ZulipLDAPException, sync_user_from_ldap ## Setup ## logger = logging.getLogger('zulip.sync_ldap_user_data') log_to_file(logger, settings.LDAP_SYNC_LOG_PATH) # Run this on a cronjob to pick up on name changes. def sync_ldap_user_data(user_profiles: List[UserProfile], deactivation_protection: bool=True) -> None: logger.info("Starting update.") with transaction.atomic(): realms = set([u.realm.string_id for u in user_profiles]) for u in user_profiles: # This will save the user if relevant, and will do nothing if the user # does not exist. try: sync_user_from_ldap(u, logger) except ZulipLDAPException as e: logger.error("Error attempting to update user %s:" % (u.delivery_email,)) logger.error(e) if deactivation_protection: if not UserProfile.objects.filter(is_bot=False, is_active=True).exists(): error_msg = ("Ldap sync would have deactivated all users. This is most likely due " + "to a misconfiguration of ldap settings. Rolling back...\n" + "Use the --force option if the mass deactivation is intended.") logger.error(error_msg) # Raising an exception in this atomic block will rollback the transaction. raise Exception(error_msg) for string_id in realms: if not UserProfile.objects.filter(is_bot=False, is_active=True, realm__string_id=string_id, role__gte=UserProfile.ROLE_REALM_ADMINISTRATOR).exists(): error_msg = ("Ldap sync would have deactivated all administrators of realm %s. " + "This is most likely due " + "to a misconfiguration of ldap settings. Rolling back...\n" + "Use the --force option if the mass deactivation is intended.") error_msg = error_msg % (string_id,) logger.error(error_msg) raise Exception(error_msg) logger.info("Finished update.") class Command(ZulipBaseCommand): def add_arguments(self, parser: ArgumentParser) -> None: parser.add_argument('-f', '--force', dest='force', action="store_true", default=False, help='Disable the protection against deactivating all users.') self.add_realm_args(parser) self.add_user_list_args(parser) def handle(self, *args: Any, **options: Any) -> None: if options.get('realm_id') is not None: realm = self.get_realm(options) user_profiles = self.get_users(options, realm, is_bot=False, include_deactivated=True) else: user_profiles = UserProfile.objects.select_related().filter(is_bot=False) sync_ldap_user_data(user_profiles, not options['force'])
[ "zerver.models.UserProfile.objects.filter", "zerver.models.UserProfile.objects.select_related", "zerver.lib.logging_util.log_to_file", "zproject.backends.sync_user_from_ldap", "django.db.transaction.atomic", "logging.getLogger" ]
[((378, 424), 'logging.getLogger', 'logging.getLogger', (['"""zulip.sync_ldap_user_data"""'], {}), "('zulip.sync_ldap_user_data')\n", (395, 424), False, 'import logging\n'), ((425, 473), 'zerver.lib.logging_util.log_to_file', 'log_to_file', (['logger', 'settings.LDAP_SYNC_LOG_PATH'], {}), '(logger, settings.LDAP_SYNC_LOG_PATH)\n', (436, 473), False, 'from zerver.lib.logging_util import log_to_file\n'), ((675, 695), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (693, 695), False, 'from django.db import transaction\n'), ((941, 971), 'zproject.backends.sync_user_from_ldap', 'sync_user_from_ldap', (['u', 'logger'], {}), '(u, logger)\n', (960, 971), False, 'from zproject.backends import ZulipLDAPException, sync_user_from_ldap\n'), ((3293, 3329), 'zerver.models.UserProfile.objects.select_related', 'UserProfile.objects.select_related', ([], {}), '()\n', (3327, 3329), False, 'from zerver.models import UserProfile\n'), ((1194, 1250), 'zerver.models.UserProfile.objects.filter', 'UserProfile.objects.filter', ([], {'is_bot': '(False)', 'is_active': '(True)'}), '(is_bot=False, is_active=True)\n', (1220, 1250), False, 'from zerver.models import UserProfile\n'), ((1781, 1918), 'zerver.models.UserProfile.objects.filter', 'UserProfile.objects.filter', ([], {'is_bot': '(False)', 'is_active': '(True)', 'realm__string_id': 'string_id', 'role__gte': 'UserProfile.ROLE_REALM_ADMINISTRATOR'}), '(is_bot=False, is_active=True, realm__string_id=\n string_id, role__gte=UserProfile.ROLE_REALM_ADMINISTRATOR)\n', (1807, 1918), False, 'from zerver.models import UserProfile\n')]
# coding=utf-8 # Copyright 2022 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Github API util tests.""" import contextlib import os import textwrap from unittest import mock from etils import epath import pytest from tensorflow_datasets.core.github_api import github_path _SKIP_NON_HERMETIC = False # Non hermetic tests are explicitly marked and skipped if `_SKIP_NON_HERMETIC` # is True. non_hermetic_test = pytest.mark.skipif( _SKIP_NON_HERMETIC, reason='Non-hermetic test skipped.', ) _original_query_github = github_path.GithubApi.query _AUTHOR_EXPECTED_CONTENT = textwrap.dedent("""\ # This is the list of TensorFlow Datasets authors for copyright purposes. # # This does not necessarily list everyone who has contributed code, since in # some cases, their employer may be the copyright holder. To see the full list # of contributors, see the revision history in source control. Google Inc. """) # Note: assert_no_api_call is globally applied on all tests (in conftest.py) @contextlib.contextmanager def enable_api_call(): """Contextmanager which locally re-enable API calls.""" with mock.patch.object(github_path.GithubApi, 'query', _original_query_github): yield def test_parse_github_path(): url = 'github://tensorflow/datasets/tree/master/docs/README.md' repo, branch, path = github_path._parse_github_path(url) assert repo == 'tensorflow/datasets' assert branch == 'master' assert path == 'docs/README.md' url = 'github://tensorflow/datasets/tree/master' repo, branch, path = github_path._parse_github_path(url) assert repo == 'tensorflow/datasets' assert branch == 'master' assert path == '' # pylint: disable=g-explicit-bool-comparison def test_github_path_registered_as_path(): uri = 'github://tensorflow/datasets/tree/master/docs/README.md' path = epath.Path(uri) assert isinstance(path, github_path.GithubPath) assert os.fspath(path) == uri def test_invalid_github_path(): # Path are lazily validated, so require explicit `_metadata` call. with pytest.raises(ValueError, match='Invalid github path'): _ = github_path.GithubPath()._metadata with pytest.raises(ValueError, match='Invalid github path'): _ = github_path.GithubPath('')._metadata with pytest.raises(ValueError, match='Invalid github path'): _ = github_path.GithubPath('github://not/a/path') with pytest.raises(ValueError, match='Invalid github path'): _ = github_path.GithubPath('github://tensorflow/tree/master/docs/README.md') # `blob` isn't accepted for consistency between paths. with pytest.raises(ValueError, match='/blob/` isn\'t accepted.'): _ = github_path.GithubPath( 'github://tensorflow/datasets/blob/master/docs/README.md') p = github_path.GithubPath( 'github://tensorflow/datasets/tree/master/docs/README.md') p = p.parent # /docs _ = p._metadata p = p.parent # / _ = p._metadata p = p.parent with pytest.raises(ValueError, match='Invalid github path'): _ = p._metadata def test_github_path_purepath(): """Tests that pathlib methods works as expected.""" p = github_path.GithubPath('github://tensorflow/datasets/tree/master') sub_p = p / 'some_folder' assert isinstance(sub_p, github_path.GithubPath) assert str(p) == 'github://tensorflow/datasets/tree/master' assert str(sub_p) == 'github://tensorflow/datasets/tree/master/some_folder' assert os.fspath(p) == 'github://tensorflow/datasets/tree/master' assert p == github_path.GithubPath.from_repo('tensorflow/datasets') def test_github_path_as_url(): p = github_path.GithubPath.from_repo('tensorflow/datasets', 'v3.1.0') p /= 'README.md' expected = 'https://raw.githubusercontent.com/tensorflow/datasets/v3.1.0/README.md' assert p.as_raw_url() == expected @non_hermetic_test def test_github_api_listdir(): """Test query github API.""" # PurePath ops do not trigger API calls p = github_path.GithubPath.from_repo('tensorflow/datasets', 'v3.1.0') p = p / 'tensorflow_datasets' / 'testing' with enable_api_call(): sub_dirs = sorted(p.iterdir()) # `listdir` call cache the filetype of all childs all_dir_names = [d.name for d in sub_dirs if d.is_dir()] all_file_names = [d.name for d in sub_dirs if d.is_file()] all_names = [d.name for d in sub_dirs] with pytest.raises(NotADirectoryError): list((p / '__init__.py').iterdir()) assert all_names == [ '__init__.py', 'dataset_builder_testing.py', 'dataset_builder_testing_test.py', 'fake_data_generation', 'fake_data_utils.py', 'generate_archives.sh', 'metadata', 'mocking.py', 'mocking_test.py', 'test_case.py', 'test_data', 'test_utils.py', 'test_utils_test.py', ] assert all_dir_names == [ 'fake_data_generation', 'metadata', 'test_data', ] assert all_file_names == [ '__init__.py', 'dataset_builder_testing.py', 'dataset_builder_testing_test.py', 'fake_data_utils.py', 'generate_archives.sh', 'mocking.py', 'mocking_test.py', 'test_case.py', 'test_utils.py', 'test_utils_test.py', ] @non_hermetic_test def test_github_api_exists(): """Test query github API.""" p = github_path.GithubPath.from_repo('tensorflow/datasets', 'v3.1.0') with enable_api_call(): assert p.exists() assert not (p / 'unknown_dir').exists() readme = p / 'README.md' core = p / 'tensorflow_datasets' / 'core' with enable_api_call(): assert readme.is_file() assert core.is_dir() # Data should have been cached (no API calls required) assert not readme.is_dir() assert not core.is_file() assert readme.exists() assert core.exists() # Recreating a new Path reuse the cache readme_recreated = core.parent.parent / 'README.md' assert readme_recreated.is_file() assert readme_recreated._metadata == readme._metadata @non_hermetic_test def test_github_api_read_bytes_text(): """Test query github API file content.""" p = github_path.GithubPath.from_repo('tensorflow/datasets', 'v3.1.0') # Note: This is not wrapped inside `enable_api_call` contextmanager as # users need to download files without setting up an API token. content = (p / 'AUTHORS').read_bytes() assert isinstance(content, bytes) assert content == _AUTHOR_EXPECTED_CONTENT.encode() content = (p / 'AUTHORS').read_text() assert isinstance(content, str) assert content == _AUTHOR_EXPECTED_CONTENT # Cannot read the content of a directory. with pytest.raises(FileNotFoundError, match='Request failed'): (p / 'tensorflow_datasets' / 'core').read_bytes() @non_hermetic_test def test_github_api_copy(tmp_path): p = github_path.GithubPath.from_repo('tensorflow/datasets', 'v3.1.0') src = p / 'AUTHORS' dst = tmp_path / 'AUTHORS' target = src.copy(dst) assert target == dst assert dst.read_text() == _AUTHOR_EXPECTED_CONTENT with pytest.raises(FileExistsError, match='Destination .* exists'): src.copy(dst) src.copy(dst, overwrite=True) def test_assert_no_api_call(): with pytest.raises(AssertionError, match='Forbidden API call'): github_path.GithubPath.from_repo('tensorflow/datasets', 'v1.0.0').exists() def test_get_tree(): tree = { 'tree': [ { 'path': 'code1.py', 'type': 'blob', }, { 'path': 'myfolder', 'type': 'tree', }, { 'path': 'myfolder/code2.py', 'type': 'blob', }, { 'path': 'myfolder/mysubfolder', 'type': 'tree', }, { 'path': 'myfolder/mysubfolder/code3.py', 'type': 'blob', }, ] } with mock.patch.object(github_path.GithubApi, 'query', return_value=tree): root = github_path.GithubPath.from_repo('tensorflow/datasets', 'v9.9.9') def gh_path(file: str) -> github_path.GithubPath: return github_path.GithubPath( f'github://tensorflow/datasets/tree/v9.9.9/{file}') def assert_is_file(file): assert file.is_file() assert not file.is_dir() assert file.exists() def assert_is_folder(folder, files): assert set(folder.iterdir()) == files assert folder.is_dir() assert not folder.is_file() assert folder.exists() myfolder = gh_path('myfolder') mysubfolder = gh_path('myfolder/mysubfolder') code1 = gh_path('code1.py') code2 = gh_path('myfolder/code2.py') code3 = gh_path('myfolder/mysubfolder/code3.py') assert_is_folder(root, {code1, myfolder}) assert_is_folder(myfolder, {code2, mysubfolder}) assert_is_folder(mysubfolder, {code3}) assert_is_file(code1) assert_is_file(code2) assert_is_file(code3)
[ "textwrap.dedent", "unittest.mock.patch.object", "tensorflow_datasets.core.github_api.github_path.GithubPath", "os.fspath", "pytest.mark.skipif", "pytest.raises", "tensorflow_datasets.core.github_api.github_path._parse_github_path", "tensorflow_datasets.core.github_api.github_path.GithubPath.from_repo", "etils.epath.Path" ]
[((951, 1026), 'pytest.mark.skipif', 'pytest.mark.skipif', (['_SKIP_NON_HERMETIC'], {'reason': '"""Non-hermetic test skipped."""'}), "(_SKIP_NON_HERMETIC, reason='Non-hermetic test skipped.')\n", (969, 1026), False, 'import pytest\n'), ((1120, 1490), 'textwrap.dedent', 'textwrap.dedent', (['""" # This is the list of TensorFlow Datasets authors for copyright purposes.\n #\n # This does not necessarily list everyone who has contributed code, since in\n # some cases, their employer may be the copyright holder. To see the full list\n # of contributors, see the revision history in source control.\n\n Google Inc.\n """'], {}), '(\n """ # This is the list of TensorFlow Datasets authors for copyright purposes.\n #\n # This does not necessarily list everyone who has contributed code, since in\n # some cases, their employer may be the copyright holder. To see the full list\n # of contributors, see the revision history in source control.\n\n Google Inc.\n """\n )\n', (1135, 1490), False, 'import textwrap\n'), ((1909, 1944), 'tensorflow_datasets.core.github_api.github_path._parse_github_path', 'github_path._parse_github_path', (['url'], {}), '(url)\n', (1939, 1944), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((2121, 2156), 'tensorflow_datasets.core.github_api.github_path._parse_github_path', 'github_path._parse_github_path', (['url'], {}), '(url)\n', (2151, 2156), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((2410, 2425), 'etils.epath.Path', 'epath.Path', (['uri'], {}), '(uri)\n', (2420, 2425), False, 'from etils import epath\n'), ((3322, 3408), 'tensorflow_datasets.core.github_api.github_path.GithubPath', 'github_path.GithubPath', (['"""github://tensorflow/datasets/tree/master/docs/README.md"""'], {}), "(\n 'github://tensorflow/datasets/tree/master/docs/README.md')\n", (3344, 3408), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((3684, 3750), 'tensorflow_datasets.core.github_api.github_path.GithubPath', 'github_path.GithubPath', (['"""github://tensorflow/datasets/tree/master"""'], {}), "('github://tensorflow/datasets/tree/master')\n", (3706, 3750), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((4147, 4212), 'tensorflow_datasets.core.github_api.github_path.GithubPath.from_repo', 'github_path.GithubPath.from_repo', (['"""tensorflow/datasets"""', '"""v3.1.0"""'], {}), "('tensorflow/datasets', 'v3.1.0')\n", (4179, 4212), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((4485, 4550), 'tensorflow_datasets.core.github_api.github_path.GithubPath.from_repo', 'github_path.GithubPath.from_repo', (['"""tensorflow/datasets"""', '"""v3.1.0"""'], {}), "('tensorflow/datasets', 'v3.1.0')\n", (4517, 4550), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((5818, 5883), 'tensorflow_datasets.core.github_api.github_path.GithubPath.from_repo', 'github_path.GithubPath.from_repo', (['"""tensorflow/datasets"""', '"""v3.1.0"""'], {}), "('tensorflow/datasets', 'v3.1.0')\n", (5850, 5883), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((6588, 6653), 'tensorflow_datasets.core.github_api.github_path.GithubPath.from_repo', 'github_path.GithubPath.from_repo', (['"""tensorflow/datasets"""', '"""v3.1.0"""'], {}), "('tensorflow/datasets', 'v3.1.0')\n", (6620, 6653), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((7273, 7338), 'tensorflow_datasets.core.github_api.github_path.GithubPath.from_repo', 'github_path.GithubPath.from_repo', (['"""tensorflow/datasets"""', '"""v3.1.0"""'], {}), "('tensorflow/datasets', 'v3.1.0')\n", (7305, 7338), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((1678, 1751), 'unittest.mock.patch.object', 'mock.patch.object', (['github_path.GithubApi', '"""query"""', '_original_query_github'], {}), "(github_path.GithubApi, 'query', _original_query_github)\n", (1695, 1751), False, 'from unittest import mock\n'), ((2485, 2500), 'os.fspath', 'os.fspath', (['path'], {}), '(path)\n', (2494, 2500), False, 'import os\n'), ((2619, 2673), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Invalid github path"""'}), "(ValueError, match='Invalid github path')\n", (2632, 2673), False, 'import pytest\n'), ((2726, 2780), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Invalid github path"""'}), "(ValueError, match='Invalid github path')\n", (2739, 2780), False, 'import pytest\n'), ((2835, 2889), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Invalid github path"""'}), "(ValueError, match='Invalid github path')\n", (2848, 2889), False, 'import pytest\n'), ((2899, 2944), 'tensorflow_datasets.core.github_api.github_path.GithubPath', 'github_path.GithubPath', (['"""github://not/a/path"""'], {}), "('github://not/a/path')\n", (2921, 2944), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((2953, 3007), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Invalid github path"""'}), "(ValueError, match='Invalid github path')\n", (2966, 3007), False, 'import pytest\n'), ((3017, 3089), 'tensorflow_datasets.core.github_api.github_path.GithubPath', 'github_path.GithubPath', (['"""github://tensorflow/tree/master/docs/README.md"""'], {}), "('github://tensorflow/tree/master/docs/README.md')\n", (3039, 3089), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((3155, 3213), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""/blob/` isn\'t accepted."""'}), '(ValueError, match="/blob/` isn\'t accepted.")\n', (3168, 3213), False, 'import pytest\n'), ((3224, 3310), 'tensorflow_datasets.core.github_api.github_path.GithubPath', 'github_path.GithubPath', (['"""github://tensorflow/datasets/blob/master/docs/README.md"""'], {}), "(\n 'github://tensorflow/datasets/blob/master/docs/README.md')\n", (3246, 3310), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((3513, 3567), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Invalid github path"""'}), "(ValueError, match='Invalid github path')\n", (3526, 3567), False, 'import pytest\n'), ((3979, 3991), 'os.fspath', 'os.fspath', (['p'], {}), '(p)\n', (3988, 3991), False, 'import os\n'), ((4052, 4107), 'tensorflow_datasets.core.github_api.github_path.GithubPath.from_repo', 'github_path.GithubPath.from_repo', (['"""tensorflow/datasets"""'], {}), "('tensorflow/datasets')\n", (4084, 4107), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((4879, 4912), 'pytest.raises', 'pytest.raises', (['NotADirectoryError'], {}), '(NotADirectoryError)\n', (4892, 4912), False, 'import pytest\n'), ((7098, 7154), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {'match': '"""Request failed"""'}), "(FileNotFoundError, match='Request failed')\n", (7111, 7154), False, 'import pytest\n'), ((7500, 7561), 'pytest.raises', 'pytest.raises', (['FileExistsError'], {'match': '"""Destination .* exists"""'}), "(FileExistsError, match='Destination .* exists')\n", (7513, 7561), False, 'import pytest\n'), ((7654, 7711), 'pytest.raises', 'pytest.raises', (['AssertionError'], {'match': '"""Forbidden API call"""'}), "(AssertionError, match='Forbidden API call')\n", (7667, 7711), False, 'import pytest\n'), ((8348, 8416), 'unittest.mock.patch.object', 'mock.patch.object', (['github_path.GithubApi', '"""query"""'], {'return_value': 'tree'}), "(github_path.GithubApi, 'query', return_value=tree)\n", (8365, 8416), False, 'from unittest import mock\n'), ((8429, 8494), 'tensorflow_datasets.core.github_api.github_path.GithubPath.from_repo', 'github_path.GithubPath.from_repo', (['"""tensorflow/datasets"""', '"""v9.9.9"""'], {}), "('tensorflow/datasets', 'v9.9.9')\n", (8461, 8494), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((2683, 2707), 'tensorflow_datasets.core.github_api.github_path.GithubPath', 'github_path.GithubPath', ([], {}), '()\n', (2705, 2707), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((2790, 2816), 'tensorflow_datasets.core.github_api.github_path.GithubPath', 'github_path.GithubPath', (['""""""'], {}), "('')\n", (2812, 2816), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((8563, 8637), 'tensorflow_datasets.core.github_api.github_path.GithubPath', 'github_path.GithubPath', (['f"""github://tensorflow/datasets/tree/v9.9.9/{file}"""'], {}), "(f'github://tensorflow/datasets/tree/v9.9.9/{file}')\n", (8585, 8637), False, 'from tensorflow_datasets.core.github_api import github_path\n'), ((7717, 7782), 'tensorflow_datasets.core.github_api.github_path.GithubPath.from_repo', 'github_path.GithubPath.from_repo', (['"""tensorflow/datasets"""', '"""v1.0.0"""'], {}), "('tensorflow/datasets', 'v1.0.0')\n", (7749, 7782), False, 'from tensorflow_datasets.core.github_api import github_path\n')]
""" Author: <NAME> Date: 10 April 2021 """ import logging import os from typing import List, Union import shutil from dataclasses import dataclass from concurrent.futures import ThreadPoolExecutor, as_completed, Future from threading import Lock import urllib.parse import json import glob from zipfile import ZipFile import pandas as pd from requests import Session from alive_progress import alive_bar from sla_cli.src.common.path import Path from sla_cli.src.common.config import inject_config, Config from sla_cli.src.download import inject_http_session, Downloader from sla_cli.src.download.isic.metadata import IsicMetadataDownloader, requires_isic_metadata logger = logging.getLogger(__name__) def make_batches(data: List[any], n: int): """ Yield successive n-sized chunks from data. :param data: The data to batch. :param n: The number of items per batch. :return: A batch of size n taken from data. """ for i in range(0, len(data), n): yield data[i:i + n] @dataclass class DownloadOptions: image_ids: List[str] title: str @dataclass class ResponseOptions: index: int download_path: str unzip: bool class IsicImageDownloader(Downloader): MAX_BATCH_SIZE = 300 def __init__(self, *args, **kwargs): """ Downloader class for the ISIC Archive API. """ super().__init__(*args, **kwargs) self.metadata: pd.DataFrame = self._get_metadata() self.download_path = self._create_download_path(force=self.force) self.batch_size = self.options.config.isic.batch_size self.max_workers = self.options.config.isic.max_workers @requires_isic_metadata def _get_metadata(self) -> pd.DataFrame: """ Returns a dataset with the metadata for only the given dataset name. :return: A filtered dataframe on the dataset name. """ df = pd.read_csv(Path.isic_metadata()) df = df[df["dataset"].str.upper() == convert(self.dataset_name)] return df def _create_download_path(self, force: bool = False) -> Union[str, None]: """ Returns the download path, create it if it does not already exist. :param force: Flag to force deletion. """ path = os.path.join(self.destination_directory, self.dataset_name) if os.path.exists(path) and force: logger.debug(f"'-f/--force' flag set, deleting directory: '{path}'") shutil.rmtree(path) logger.debug(f"Deletion successful.") elif os.path.exists(path) and not force: logger.warning(f"{self.dataset_name} already exists at the destination directory '{path}'") logger.warning(f"If you wish to re-download the dataset, try 'sla-cli download -f/--force <DATASET>'") logger.warning(f"Skipping...") return None # Make the download path. os.mkdir(path) logger.info(f"Created the download directory at: '{path}'") return path @property def image_ids(self) -> List[str]: """Returns the images ids for the given dataset.""" return list(self.metadata["isic_id"]) def download(self, **kwargs): """ Downloads the requested images from the ISIC archive. """ if self.download_path is None: return None self._download() self._verify_download() self._move_images() self._save_metadata() @property def _default_download_options(self): """Creates and returns the default download options.""" return DownloadOptions( image_ids=self.image_ids, title=f"[SLA] - INFO - - - Downloading {self.dataset_name}." ) @inject_http_session def _download(self, session: Session, **kwargs): """ Downloads the requested images from the ISIC archive. :param session: The HTTP session to the ISIC Archive API. """ options = kwargs.get("options", self._default_download_options) batches = list(make_batches(options.image_ids, n=self.batch_size)) with alive_bar(len(batches), title=options.title, enrich_print=False) as bar: with ThreadPoolExecutor(max_workers=self.max_workers) as executor: # Create a worker with a batch of image ids to request and download. futures_to_request = {executor.submit(self._make_request, session, batch): idx for idx, batch in enumerate(batches)} # As the requests complete, process the responses for index, future in enumerate(as_completed(futures_to_request)): try: options = ResponseOptions(index, self.download_path, self.unzip) # Process the downloaded batch. self._process_response(future, options) bar() except Exception as e: logger.warning(f"{e.__str__()}") def _make_request(self, session: Session, batch: List[str]): """ Request 300 images from the ISIC API. :param session: The HTTP session object. :param batch: The batch of images to download. :return: The HTTP response. """ url = self._make_url(image_ids=batch) return session.get(url) def _make_url(self, image_ids: List[str]) -> str: """ Creates a request for a series of 300 images. :param image_ids: The images to download, as a json list. :return: The request URL. """ image_ids = self._preprocess_image_ids(image_ids) return f"{self.url}/image/download?include=images&imageIds={image_ids}" @staticmethod def _preprocess_image_ids(image_ids: list) -> List[str]: """ Converts a python list of image IDs to a json array suitable for use with the ISIC API. :param image_ids: The image ids to convert. :return: The formatted image ids. """ image_ids = json.dumps(str(image_ids)) # Replace and switch quote notation for the API image_ids = image_ids.replace('"', "") image_ids = image_ids.replace("'", '"') # Quote all url strings. image_ids = urllib.parse.quote(image_ids) return image_ids @staticmethod def _process_response(future: Future, options: ResponseOptions): """ Saves the downloaded ISIC images to as ZIP archives and then unpacks them. :param future: The future to ask for the result off. :param options: The options to handle the response with. """ res = future.result() if not res: logger.error(f"Download content is empty.") raise ValueError("Issue downloading images.") else: # Save the downloaded data to a zip file. archive_file = os.path.join(options.download_path, f"download_{options.index}.zip") with open(archive_file, "wb") as stream: for chunk in res: stream.write(chunk) if options.unzip: # Unzip the archive to the save path. # Use threading lock to stop deadlocking on filesystem resources. with Lock(): logger.debug(f"Unzipping {archive_file} to {options.download_path}") IsicImageDownloader._unzip_archive(archive_file, options.download_path) logger.debug(f"Removing {archive_file}.") os.remove(archive_file) @staticmethod def _unzip_archive(archive: str, download_path: str) -> None: """ Unzip archive and place contents into output directory. :param archive: The archive to read data from. :param download_path: The path to unpack the archives to. """ with ZipFile(archive, 'r') as zip_ref: zip_ref.extractall(download_path) @property def isic_image_path(self) -> str: """Returns the isic image path.""" return os.path.join(self.download_path, "ISIC-images", convert(self.dataset_name)) @property def isic_images(self) -> List[str]: """Returns all the downloaded images, with non-image files removed.""" return [image.split(".")[0] for image in os.listdir(self.isic_image_path) if not image.endswith(".txt")] def _verify_download(self): """Verifies all images were correctly downloaded.""" # Get the metadata and images file names and compare them # to see if any images were missed. image_names = self.isic_images meta_names = list(self.metadata["image_name"]) missing_images = sorted(list(set(meta_names) ^ set(image_names))) if len(missing_images) > 0: self._download_missing_images(missing_images) self._verify_download() else: logger.info(f"All '{self.dataset_name}' images were downloaded successfully'") return True def _download_missing_images(self, missing_images: List[str]): """ Re-download missing images from the initial download. :param missing_images: A list of missing image names. """ df = self.metadata missing_ids = sorted(list(df[df["image_name"].isin(missing_images)]["isic_id"])) options = DownloadOptions( image_ids=missing_ids, title=f"[SLA] - INFO - - - Re-Downloading {len(missing_ids)} from {self.dataset_name}." ) self._download(options=options) @property def image_dst_directory(self) -> str: return os.path.join(self.download_path, "images") def _move_images(self): """Gather all images and move them to the root of the download folder.""" # Move all images to 'images' folder. shutil.move(self.isic_image_path, self.image_dst_directory) # Delete old parent folder. os.rmdir(os.path.join(self.download_path, "ISIC-images")) # Remove all .txt files. [os.remove(os.path.join(self.image_dst_directory, file)) for file in os.listdir(self.image_dst_directory) if file.endswith(".txt")] def _save_metadata(self): """Saves the datasets metadata to a file.""" # Save the metadata name as the dataset name. Handy for opening in excel for review. if self.options.metadata_as_name: save_name = self.dataset_name.lower().replace(" ", "_").replace("-", "_") + ".csv" self.metadata.to_csv(os.path.join(self.download_path, save_name)) # Save as "metadata.csv", easier to work with for ML input pipelines. else: self.metadata.to_csv(os.path.join(self.download_path, "metadata.csv")) def convert(dataset: str) -> str: """Translates the CLI argument name into the Metadata value for the ISIC archive.""" return { "bcn_20000": "BCN_20000", "bcn_2020_challenge": "BCN_2020_Challenge", "brisbane_isic_challenge_2020": "Brisbane ISIC Challenge 2020", "dermoscopedia_cc_by": "Dermoscopedia (CC-BY)", "ham10000": "HAM10000", "isic_2020_challenge_mskcc_contribution": "ISIC 2020 Challenge - MSKCC contribution", "isic_2020_vienna_part_1": "ISIC_2020_Vienna_part_1", "isic_2020_vienna_part_2": "ISIC_2020_Vienna_part2", "jid_editorial_images_2018": "2018 JID Editorial Images", "msk_1": "MSK-1", "msk_2": "MSK-2", "msk_3": "MSK-3", "msk_4": "MSK-4", "msk_5": "MSK-5", "sonic": "SONIC", "sydney_mia_smdc_2020_isic_challenge_contribution": "Sydney (MIA / SMDC) 2020 ISIC challenge contribution", "uda_1": "UDA-1", "uda_2": "UDA-2" }.get(dataset, dataset).upper() def name_converter(name: str) -> str: """ Returns the correct dataset name for datasets begining with numbers. :param name: The name of the dataset to convert :return: The converted dataset name if required, else passed in name is returned. """ return { "jid_editorial_images_2018": "2018 JID Editorial Images" }.get(name, name)
[ "os.mkdir", "os.remove", "zipfile.ZipFile", "os.path.exists", "sla_cli.src.common.path.Path.isic_metadata", "concurrent.futures.as_completed", "threading.Lock", "shutil.move", "shutil.rmtree", "concurrent.futures.ThreadPoolExecutor", "os.path.join", "os.listdir", "logging.getLogger" ]
[((687, 714), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (704, 714), False, 'import logging\n'), ((2283, 2342), 'os.path.join', 'os.path.join', (['self.destination_directory', 'self.dataset_name'], {}), '(self.destination_directory, self.dataset_name)\n', (2295, 2342), False, 'import os\n'), ((2931, 2945), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (2939, 2945), False, 'import os\n'), ((9723, 9765), 'os.path.join', 'os.path.join', (['self.download_path', '"""images"""'], {}), "(self.download_path, 'images')\n", (9735, 9765), False, 'import os\n'), ((9931, 9990), 'shutil.move', 'shutil.move', (['self.isic_image_path', 'self.image_dst_directory'], {}), '(self.isic_image_path, self.image_dst_directory)\n', (9942, 9990), False, 'import shutil\n'), ((1928, 1948), 'sla_cli.src.common.path.Path.isic_metadata', 'Path.isic_metadata', ([], {}), '()\n', (1946, 1948), False, 'from sla_cli.src.common.path import Path\n'), ((2354, 2374), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2368, 2374), False, 'import os\n'), ((2479, 2498), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (2492, 2498), False, 'import shutil\n'), ((6966, 7034), 'os.path.join', 'os.path.join', (['options.download_path', 'f"""download_{options.index}.zip"""'], {}), "(options.download_path, f'download_{options.index}.zip')\n", (6978, 7034), False, 'import os\n'), ((7954, 7975), 'zipfile.ZipFile', 'ZipFile', (['archive', '"""r"""'], {}), "(archive, 'r')\n", (7961, 7975), False, 'from zipfile import ZipFile\n'), ((10044, 10091), 'os.path.join', 'os.path.join', (['self.download_path', '"""ISIC-images"""'], {}), "(self.download_path, 'ISIC-images')\n", (10056, 10091), False, 'import os\n'), ((2562, 2582), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2576, 2582), False, 'import os\n'), ((4251, 4299), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'self.max_workers'}), '(max_workers=self.max_workers)\n', (4269, 4299), False, 'from concurrent.futures import ThreadPoolExecutor, as_completed, Future\n'), ((8404, 8436), 'os.listdir', 'os.listdir', (['self.isic_image_path'], {}), '(self.isic_image_path)\n', (8414, 8436), False, 'import os\n'), ((10145, 10189), 'os.path.join', 'os.path.join', (['self.image_dst_directory', 'file'], {}), '(self.image_dst_directory, file)\n', (10157, 10189), False, 'import os\n'), ((10203, 10239), 'os.listdir', 'os.listdir', (['self.image_dst_directory'], {}), '(self.image_dst_directory)\n', (10213, 10239), False, 'import os\n'), ((10614, 10657), 'os.path.join', 'os.path.join', (['self.download_path', 'save_name'], {}), '(self.download_path, save_name)\n', (10626, 10657), False, 'import os\n'), ((10785, 10833), 'os.path.join', 'os.path.join', (['self.download_path', '"""metadata.csv"""'], {}), "(self.download_path, 'metadata.csv')\n", (10797, 10833), False, 'import os\n'), ((4645, 4677), 'concurrent.futures.as_completed', 'as_completed', (['futures_to_request'], {}), '(futures_to_request)\n', (4657, 4677), False, 'from concurrent.futures import ThreadPoolExecutor, as_completed, Future\n'), ((7350, 7356), 'threading.Lock', 'Lock', ([], {}), '()\n', (7354, 7356), False, 'from threading import Lock\n'), ((7622, 7645), 'os.remove', 'os.remove', (['archive_file'], {}), '(archive_file)\n', (7631, 7645), False, 'import os\n')]
from django.test import TestCase from django.contrib.auth import get_user_model class ModelTests(TestCase): def test_create_user_with_email_sucessfull(self): """Test Create with a new user with an email is sucessfull""" email = "<EMAIL>" password = "<PASSWORD>" # import pdb; pdb.set_trace() user = get_user_model().objects.create_user( email = email, password = password, # username = 'testuser' ) # import pdb; pdb.set_trace() self.assertEqual(user.email, email) self.assertEqual(user.email , email) self.assertTrue(user.check_password(password)) def test_new_user_email_normalized(self): """Test email for new user is normalized""" email = "<EMAIL>" user = get_user_model().objects.create_user(email, "test123") self.assertEqual(user.email, email.lower()) def test_new_user_invalid_email(self): """Test creating user with no email raises error""" with self.assertRaises(ValueError): get_user_model().objects.create_user(None, "test123") def test_create_new_superuser(self): """TEst creating a new superuse""" user = get_user_model().objects.create_superuser( "<EMAIL>", "test123",) self.assertTrue(user.is_superuser) self.assertTrue(user.is_staff)
[ "django.contrib.auth.get_user_model" ]
[((345, 361), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (359, 361), False, 'from django.contrib.auth import get_user_model\n'), ((812, 828), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (826, 828), False, 'from django.contrib.auth import get_user_model\n'), ((1235, 1251), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1249, 1251), False, 'from django.contrib.auth import get_user_model\n'), ((1081, 1097), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1095, 1097), False, 'from django.contrib.auth import get_user_model\n')]
from __future__ import absolute_import from pyhwcomm.machine import CPU, GPU, Machine from pyhwcomm.link import QPI48, PCIe3x16 class Intel(Machine): def __init__(self): Machine.__init__(self) self.cpu0 = CPU(0) self.cpu1 = CPU(1) self.topology.add_edge(self.cpu0, self.cpu1, link=QPI48()) self.gpu0 = GPU(0) self.topology.add_edge(self.cpu1, self.gpu0, link=PCIe3x16())
[ "pyhwcomm.machine.Machine.__init__", "pyhwcomm.machine.GPU", "pyhwcomm.link.QPI48", "pyhwcomm.machine.CPU", "pyhwcomm.link.PCIe3x16" ]
[((184, 206), 'pyhwcomm.machine.Machine.__init__', 'Machine.__init__', (['self'], {}), '(self)\n', (200, 206), False, 'from pyhwcomm.machine import CPU, GPU, Machine\n'), ((227, 233), 'pyhwcomm.machine.CPU', 'CPU', (['(0)'], {}), '(0)\n', (230, 233), False, 'from pyhwcomm.machine import CPU, GPU, Machine\n'), ((254, 260), 'pyhwcomm.machine.CPU', 'CPU', (['(1)'], {}), '(1)\n', (257, 260), False, 'from pyhwcomm.machine import CPU, GPU, Machine\n'), ((348, 354), 'pyhwcomm.machine.GPU', 'GPU', (['(0)'], {}), '(0)\n', (351, 354), False, 'from pyhwcomm.machine import CPU, GPU, Machine\n'), ((319, 326), 'pyhwcomm.link.QPI48', 'QPI48', ([], {}), '()\n', (324, 326), False, 'from pyhwcomm.link import QPI48, PCIe3x16\n'), ((413, 423), 'pyhwcomm.link.PCIe3x16', 'PCIe3x16', ([], {}), '()\n', (421, 423), False, 'from pyhwcomm.link import QPI48, PCIe3x16\n')]
#!/usr/bin/python3 import serial # pip3 install pyserial import argparse import time import scipy.signal from rtlsdr import RtlSdr # pip3 install pyrtlsdr import numpy as np import matplotlib.pyplot as plt import csv def isFloat(string): try: float(string) return True except ValueError: return False def set_pll(s, freq, on=1, power=1, wait_for_ok=True): s.flushInput() cmd = str(on) + ' ' + str(freq) + ' ' + str(power) + '\n'; # print('sending ' + cmd) s.write(str.encode(cmd)) # line = s.readline() # print(line) # line = s.readline() # print(line) # if wait_for_ok: # ack_ok = False # while not ack_ok: # line = s.readline() # #print(line) # if line == b'ok\n': # ack_ok = True def sdr_get_power(sdr): """Measures the RMS power with a RTL-SDR. """ samples = sdr.read_samples(1024*16) freq,psd = scipy.signal.welch(samples,sdr.sample_rate/1e6,nperseg=8192,return_onesided=0, window='flattop') psd = 10*np.log10(np.sqrt(psd**2)); freq += sdr.center_freq/1e6 return freq,psd; def readCalibrationFile(path, index): if path is not None: cal_file = dict() with open(path, newline='') as csvfile: csvreader = csv.reader(csvfile, delimiter='\t') for row in csvreader: if not isFloat(row[0]): continue f = float(row[0]) power = float(row[index]) cal_file[f] = power return cal_file return None def sdr_init(index, freq, gain, sample_rate=2.4e6): sdr = RtlSdr(device_index = index) sdr.sample_rate = 2.4e6 sdr.center_freq = freq * 1e6 sdr.gain = gain sdr.set_agc_mode(0) sdr_get_power(sdr) #First read doesn't work return sdr def sdr_measure(sdr, f, cal_val, f_range = 1, nb_meas = 5): sdr.center_freq = f * 1e6 avg = [] for j in range(nb_meas): freq, psd = sdr_get_power(sdr) max_p = np.min(psd) for i in range(len(freq)): max_p = psd[i] if (f-f_range < freq[i] < f+f_range and psd[i] > max_p) else max_p; avg.append(max_p) avg = np.mean(avg) if cal_val is not None: avg -= cal_val[f] return avg def main(): pass; parser = argparse.ArgumentParser(description='EMI mapping with 3D-printer and RTL-SDR.') parser.add_argument('-p', '--serial-port', type=str, help='serial port',default='/dev/ttyUSB0') parser.add_argument('-b', '--baud-rate', type=int, help='serial baud rate',default=9600) parser.add_argument('-l', '--frequency-lbound', type=float, help='',default=1000) parser.add_argument('-s', '--frequency-step', type=float, help='',default=1) parser.add_argument('-r', '--frequency-span', type=float, help='',default=300) parser.add_argument('-g', '--gain', type=int, help='sets the SDR gain in 0.1dB',default=0) parser.add_argument('-t', '--thru', type=str, help='Input file of a thru measurement') parser.add_argument('-o', '--open', type=str, help='Input file of an open/short measurement') parser.add_argument('--invert-sdr', action='store_true', help='Swaps the S11 and S21 SDRs') args = parser.parse_args() # Args s11_listen = len(RtlSdr.get_device_serial_addresses()) > 1 #if not s11_listen: # print("-> Running in single device mode (S21 only)") #else: # print("-> Running in dual device mode (S11 and S21)") # SDR stuff freq_lbound = args.frequency_lbound ; freq_range = args.frequency_span; freq_ubound = freq_lbound + freq_range; freq_step = args.frequency_step; frequencies = np.arange(freq_lbound,freq_ubound,freq_step) # Open serial port s = serial.Serial(args.serial_port, args.baud_rate, timeout=1) time.sleep(2) # Wait to boot # Calibration (Open/Short, (Load), Thru) cal_s11 = readCalibrationFile(args.open, 2) # O/S -> S11 = 0 dB cal_s21 = readCalibrationFile(args.thru, 1) # Thru -> S21 = 0 dB # Open SDRs sdr_S21_index = 0 if not args.invert_sdr else 1 sdr_S11_index = 1 if not args.invert_sdr else 0 sdr_S21 = sdr_init(sdr_S21_index, freq_lbound * 1e6, args.gain) if s11_listen: sdr_S11 = sdr_init(sdr_S11_index, freq_lbound * 1e6, args.gain) s11 = [] s21 = [] print('Frequency\tS21\tS11') for f in frequencies: print(f, end="\t", flush=True) set_pll(s,f) # S21 tmp = sdr_measure(sdr_S21, f, cal_s21) s21.append(tmp) print(tmp, end="\t", flush=True) # S11 if s11_listen: tmp = sdr_measure(sdr_S11, f, cal_s11) s11.append(tmp) print(tmp, flush=True) else: print(0, flush=True) #s21 = s21 - np.max(s21) plt.plot(frequencies, s21, label="S21") if s11_listen: #s11 = s11 - np.max(s11) plt.plot(frequencies, s11, label="S11") plt.grid(True) plt.legend(loc='lower right') plt.xlim([freq_lbound,freq_ubound]) #plt.ylim([None,0]) plt.show() # Close ressources set_pll(s,f,0,0) s.close() sdr_S21.close() if s11_listen: sdr_S11.close() if __name__== "__main__": main()
[ "serial.Serial", "matplotlib.pyplot.xlim", "matplotlib.pyplot.show", "argparse.ArgumentParser", "matplotlib.pyplot.plot", "csv.reader", "rtlsdr.RtlSdr", "matplotlib.pyplot.legend", "time.sleep", "numpy.min", "numpy.mean", "numpy.arange", "rtlsdr.RtlSdr.get_device_serial_addresses", "matplotlib.pyplot.grid", "numpy.sqrt" ]
[((2287, 2366), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""EMI mapping with 3D-printer and RTL-SDR."""'}), "(description='EMI mapping with 3D-printer and RTL-SDR.')\n", (2310, 2366), False, 'import argparse\n'), ((3565, 3611), 'numpy.arange', 'np.arange', (['freq_lbound', 'freq_ubound', 'freq_step'], {}), '(freq_lbound, freq_ubound, freq_step)\n', (3574, 3611), True, 'import numpy as np\n'), ((3634, 3692), 'serial.Serial', 'serial.Serial', (['args.serial_port', 'args.baud_rate'], {'timeout': '(1)'}), '(args.serial_port, args.baud_rate, timeout=1)\n', (3647, 3692), False, 'import serial\n'), ((3693, 3706), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3703, 3706), False, 'import time\n'), ((4624, 4663), 'matplotlib.pyplot.plot', 'plt.plot', (['frequencies', 's21'], {'label': '"""S21"""'}), "(frequencies, s21, label='S21')\n", (4632, 4663), True, 'import matplotlib.pyplot as plt\n'), ((4752, 4766), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4760, 4766), True, 'import matplotlib.pyplot as plt\n'), ((4767, 4796), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (4777, 4796), True, 'import matplotlib.pyplot as plt\n'), ((4797, 4833), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[freq_lbound, freq_ubound]'], {}), '([freq_lbound, freq_ubound])\n', (4805, 4833), True, 'import matplotlib.pyplot as plt\n'), ((4853, 4863), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4861, 4863), True, 'import matplotlib.pyplot as plt\n'), ((1610, 1636), 'rtlsdr.RtlSdr', 'RtlSdr', ([], {'device_index': 'index'}), '(device_index=index)\n', (1616, 1636), False, 'from rtlsdr import RtlSdr\n'), ((2173, 2185), 'numpy.mean', 'np.mean', (['avg'], {}), '(avg)\n', (2180, 2185), True, 'import numpy as np\n'), ((4712, 4751), 'matplotlib.pyplot.plot', 'plt.plot', (['frequencies', 's11'], {'label': '"""S11"""'}), "(frequencies, s11, label='S11')\n", (4720, 4751), True, 'import matplotlib.pyplot as plt\n'), ((1995, 2006), 'numpy.min', 'np.min', (['psd'], {}), '(psd)\n', (2001, 2006), True, 'import numpy as np\n'), ((3207, 3243), 'rtlsdr.RtlSdr.get_device_serial_addresses', 'RtlSdr.get_device_serial_addresses', ([], {}), '()\n', (3241, 3243), False, 'from rtlsdr import RtlSdr\n'), ((1049, 1066), 'numpy.sqrt', 'np.sqrt', (['(psd ** 2)'], {}), '(psd ** 2)\n', (1056, 1066), True, 'import numpy as np\n'), ((1276, 1311), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '"""\t"""'}), "(csvfile, delimiter='\\t')\n", (1286, 1311), False, 'import csv\n')]