hexsha
stringlengths
40
40
size
int64
1
1.03M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
239
max_stars_repo_name
stringlengths
5
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
239
max_issues_repo_name
stringlengths
5
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
239
max_forks_repo_name
stringlengths
5
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.03M
avg_line_length
float64
1
958k
max_line_length
int64
1
1.03M
alphanum_fraction
float64
0
1
79573eecb0d2490a477f7a9c44a6afdd183fbc15
1,260
py
Python
cinder/volume/drivers/lenovo/lenovo_fc.py
rackerlabs/cinder
4295ff0a64f781c3546f6c6e0816dbb8100133cb
[ "Apache-2.0" ]
1
2019-02-08T05:24:58.000Z
2019-02-08T05:24:58.000Z
cinder/volume/drivers/lenovo/lenovo_fc.py
rackerlabs/cinder
4295ff0a64f781c3546f6c6e0816dbb8100133cb
[ "Apache-2.0" ]
1
2021-03-21T11:38:29.000Z
2021-03-21T11:38:29.000Z
cinder/volume/drivers/lenovo/lenovo_fc.py
rackerlabs/cinder
4295ff0a64f781c3546f6c6e0816dbb8100133cb
[ "Apache-2.0" ]
15
2017-01-12T10:35:10.000Z
2019-04-19T08:22:10.000Z
# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder.volume.drivers.dothill import dothill_fc from cinder.volume.drivers.lenovo import lenovo_common class LenovoFCDriver(dothill_fc.DotHillFCDriver): """OpenStack Fibre Channel cinder drivers for Lenovo Storage arrays. Version history: 1.0 - Inheriting from DotHill cinder drivers. """ VERSION = "1.0" def __init__(self, *args, **kwargs): super(LenovoFCDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(lenovo_common.common_opts) def _init_common(self): return lenovo_common.LenovoCommon(self.configuration)
34.054054
78
0.726984
79573ef733be2952fb417d10e220321798c4beb8
439
py
Python
books_library/books/migrations/0012_book_liked.py
Ilyes-Hammadi/books-library
cba0df1ae95c3467b8d16caca16383c0af9a98cd
[ "MIT" ]
9
2017-07-20T15:25:51.000Z
2019-08-26T22:48:01.000Z
books_library/books/migrations/0012_book_liked.py
Ilyes-Hammadi/bookslib
cba0df1ae95c3467b8d16caca16383c0af9a98cd
[ "MIT" ]
null
null
null
books_library/books/migrations/0012_book_liked.py
Ilyes-Hammadi/bookslib
cba0df1ae95c3467b8d16caca16383c0af9a98cd
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-04-06 03:03 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('books', '0011_book_likes'), ] operations = [ migrations.AddField( model_name='book', name='liked', field=models.BooleanField(default=False), ), ]
20.904762
53
0.605923
79573ff0145f6355352368c73ac7599142c1ba47
19,527
py
Python
salt/modules/mine.py
BnGx/salt
0a560791488d1585bd00b4cfd91a26bb0ac9b459
[ "Apache-2.0" ]
1
2021-09-06T00:14:04.000Z
2021-09-06T00:14:04.000Z
salt/modules/mine.py
BnGx/salt
0a560791488d1585bd00b4cfd91a26bb0ac9b459
[ "Apache-2.0" ]
2
2021-04-30T21:17:57.000Z
2021-12-13T20:40:23.000Z
salt/modules/mine.py
Kamatera/salt
ac960a3308617657d9d039dae9108e0045ab3929
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ The function cache system allows for data to be stored on the master so it can be easily read by other minions """ # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time import traceback # Import salt libs import salt.crypt import salt.payload import salt.transport.client import salt.utils.args import salt.utils.dictupdate import salt.utils.event import salt.utils.functools import salt.utils.mine import salt.utils.minions import salt.utils.network from salt.exceptions import SaltClientError # Import 3rd-party libs from salt.ext import six MINE_INTERNAL_KEYWORDS = frozenset( [ "__pub_user", "__pub_arg", "__pub_fun", "__pub_jid", "__pub_tgt", "__pub_tgt_type", "__pub_ret", ] ) __proxyenabled__ = ["*"] log = logging.getLogger(__name__) def _auth(): """ Return the auth object """ if "auth" not in __context__: try: __context__["auth"] = salt.crypt.SAuth(__opts__) except SaltClientError: log.error( "Could not authenticate with master." "Mine data will not be transmitted." ) return __context__["auth"] def _mine_function_available(func): if func not in __salt__: log.error("Function %s in mine_functions not available", func) return False return True def _mine_send(load, opts): eventer = salt.utils.event.MinionEvent(opts, listen=False) event_ret = eventer.fire_event(load, "_minion_mine") # We need to pause here to allow for the decoupled nature of # events time to allow the mine to propagate time.sleep(0.5) return event_ret def _mine_get(load, opts): if opts.get("transport", "") in ("zeromq", "tcp"): try: load["tok"] = _auth().gen_token(b"salt") except AttributeError: log.error( "Mine could not authenticate with master. " "Mine could not be retrieved." ) return False with salt.transport.client.ReqChannel.factory(opts) as channel: return channel.send(load) def _mine_store(mine_data, clear=False): """ Helper function to store the provided mine data. This will store either locally in the cache (for masterless setups), or in the master's cache. :param dict mine_data: Dictionary with function_name: function_data to store. :param bool clear: Whether or not to clear (`True`) the mine data for the function names present in ``mine_data``, or update it (`False`). """ # Store in the salt-minion's local cache if __opts__["file_client"] == "local": if not clear: old = __salt__["data.get"]("mine_cache") if isinstance(old, dict): old.update(mine_data) mine_data = old return __salt__["data.update"]("mine_cache", mine_data) # Store on the salt master load = { "cmd": "_mine", "data": mine_data, "id": __opts__["id"], "clear": clear, } return _mine_send(load, __opts__) def update(clear=False, mine_functions=None): """ Call the configured functions and send the data back up to the master. The functions to be called are merged from the master config, pillar and minion config under the option `mine_functions`: .. code-block:: yaml mine_functions: network.ip_addrs: - eth0 disk.usage: [] This function accepts the following arguments: :param bool clear: Default: ``False`` Specifies whether updating will clear the existing values (``True``), or whether it will update them (``False``). :param dict mine_functions: Update (or clear, see ``clear``) the mine data on these functions only. This will need to have the structure as defined on https://docs.saltstack.com/en/latest/topics/mine/index.html#mine-functions This feature can be used when updating the mine for functions that require a refresh at different intervals than the rest of the functions specified under `mine_functions` in the minion/master config or pillar. A potential use would be together with the `scheduler`, for example: .. code-block:: yaml schedule: lldp_mine_update: function: mine.update kwargs: mine_functions: net.lldp: [] hours: 12 In the example above, the mine for `net.lldp` would be refreshed every 12 hours, while `network.ip_addrs` would continue to be updated as specified in `mine_interval`. The function cache will be populated with information from executing these functions CLI Example: .. code-block:: bash salt '*' mine.update """ if not mine_functions: mine_functions = __salt__["config.merge"]("mine_functions", {}) # If we don't have any mine functions configured, then we should just bail out if not mine_functions: return elif isinstance(mine_functions, list): mine_functions = dict((fun, {}) for fun in mine_functions) elif isinstance(mine_functions, dict): pass else: return mine_data = {} for function_alias, function_data in six.iteritems(mine_functions): ( function_name, function_args, function_kwargs, minion_acl, ) = salt.utils.mine.parse_function_definition(function_data) if not _mine_function_available(function_name or function_alias): continue try: res = salt.utils.functools.call_function( __salt__[function_name or function_alias], *function_args, **function_kwargs ) except Exception: # pylint: disable=broad-except trace = traceback.format_exc() log.error( "Function %s in mine.update failed to execute", function_name or function_alias, ) log.debug("Error: %s", trace) continue if minion_acl.get("allow_tgt"): mine_data[function_alias] = salt.utils.mine.wrap_acl_structure( res, **minion_acl ) else: mine_data[function_alias] = res return _mine_store(mine_data, clear) def send(name, *args, **kwargs): """ Send a specific function and its result to the salt mine. This gets stored in either the local cache, or the salt master's cache. :param str name: Name of the function to add to the mine. The following pameters are extracted from kwargs if present: :param str mine_function: The name of the execution_module.function to run and whose value will be stored in the salt mine. Defaults to ``name``. :param str allow_tgt: Targeting specification for ACL. Specifies which minions are allowed to access this function. Please note both your master and minion need to be on, at least, version 3000 for this to work properly. :param str allow_tgt_type: Type of the targeting specification. This value will be ignored if ``allow_tgt`` is not specified. Please note both your master and minion need to be on, at least, version 3000 for this to work properly. Remaining args and kwargs will be passed on to the function to run. :rtype: bool :return: Whether executing the function and storing the information was succesful. .. versionchanged:: 3000 Added ``allow_tgt``- and ``allow_tgt_type``-parameters to specify which minions are allowed to access this function. See :ref:`targeting` for more information about targeting. CLI Example: .. code-block:: bash salt '*' mine.send network.ip_addrs eth0 salt '*' mine.send eth0_ip_addrs mine_function=network.ip_addrs eth0 salt '*' mine.send eth0_ip_addrs mine_function=network.ip_addrs eth0 allow_tgt='G@grain:value' allow_tgt_type=compound """ kwargs = salt.utils.args.clean_kwargs(**kwargs) mine_function = kwargs.pop("mine_function", None) allow_tgt = kwargs.pop("allow_tgt", None) allow_tgt_type = kwargs.pop("allow_tgt_type", None) mine_data = {} try: res = salt.utils.functools.call_function( __salt__[mine_function or name], *args, **kwargs ) except Exception as exc: # pylint: disable=broad-except trace = traceback.format_exc() log.error("Function %s in mine.send failed to execute", mine_function or name) log.debug("Error: %s", trace) return False if allow_tgt: mine_data[name] = salt.utils.mine.wrap_acl_structure( res, allow_tgt=allow_tgt, allow_tgt_type=allow_tgt_type ) else: mine_data[name] = res return _mine_store(mine_data) def get(tgt, fun, tgt_type="glob", exclude_minion=False): """ Get data from the mine. :param str tgt: Target whose mine data to get. :param fun: Function to get the mine data of. You can specify multiple functions to retrieve using either a list or a comma-separated string of functions. :type fun: str or list :param str tgt_type: Default ``glob``. Target type to use with ``tgt``. See :ref:`targeting` for more information. Note that all pillar matches, whether using the compound matching system or the pillar matching system, will be exact matches, with globbing disabled. :param bool exclude_minion: Excludes the current minion from the result set. CLI Example: .. code-block:: bash salt '*' mine.get '*' network.interfaces salt '*' mine.get 'os:Fedora' network.interfaces grain salt '*' mine.get 'G@os:Fedora and S@192.168.5.0/24' network.ipaddrs compound .. seealso:: Retrieving Mine data from Pillar and Orchestrate This execution module is intended to be executed on minions. Master-side operations such as Pillar or Orchestrate that require Mine data should use the :py:mod:`Mine Runner module <salt.runners.mine>` instead; it can be invoked from a Pillar SLS file using the :py:func:`saltutil.runner <salt.modules.saltutil.runner>` module. For example: .. code-block:: jinja {% set minion_ips = salt.saltutil.runner('mine.get', tgt='*', fun='network.ip_addrs', tgt_type='glob') %} """ # Load from local minion's cache if __opts__["file_client"] == "local": ret = {} is_target = { "glob": __salt__["match.glob"], "pcre": __salt__["match.pcre"], "list": __salt__["match.list"], "grain": __salt__["match.grain"], "grain_pcre": __salt__["match.grain_pcre"], "ipcidr": __salt__["match.ipcidr"], "compound": __salt__["match.compound"], "pillar": __salt__["match.pillar"], "pillar_pcre": __salt__["match.pillar_pcre"], }[tgt_type](tgt) if not is_target: return ret data = __salt__["data.get"]("mine_cache") if not isinstance(data, dict): return ret if isinstance(fun, six.string_types): functions = list(set(fun.split(","))) _ret_dict = len(functions) > 1 elif isinstance(fun, list): functions = fun _ret_dict = True else: return ret for function in functions: if function not in data: continue # If this is a mine item with minion_side_ACL, get its data if salt.utils.mine.MINE_ITEM_ACL_ID in data[function]: res = data[function][salt.utils.mine.MINE_ITEM_ACL_DATA] else: # Backwards compatibility with non-ACL mine data. res = data[function] if _ret_dict: ret.setdefault(function, {})[__opts__["id"]] = res else: ret[__opts__["id"]] = res return ret # Load from master load = { "cmd": "_mine_get", "id": __opts__["id"], "tgt": tgt, "fun": fun, "tgt_type": tgt_type, } ret = _mine_get(load, __opts__) if exclude_minion and __opts__["id"] in ret: del ret[__opts__["id"]] return ret def delete(fun): """ Remove specific function contents of minion. :param str fun: The name of the function. :rtype: bool :return: True on success. CLI Example: .. code-block:: bash salt '*' mine.delete 'network.interfaces' """ if __opts__["file_client"] == "local": data = __salt__["data.get"]("mine_cache") if isinstance(data, dict) and fun in data: del data[fun] return __salt__["data.update"]("mine_cache", data) load = { "cmd": "_mine_delete", "id": __opts__["id"], "fun": fun, } return _mine_send(load, __opts__) def flush(): """ Remove all mine contents of minion. :rtype: bool :return: True on success CLI Example: .. code-block:: bash salt '*' mine.flush """ if __opts__["file_client"] == "local": return __salt__["data.update"]("mine_cache", {}) load = { "cmd": "_mine_flush", "id": __opts__["id"], } return _mine_send(load, __opts__) def get_docker(interfaces=None, cidrs=None, with_container_id=False): """ .. versionchanged:: 2017.7.8,2018.3.3 When :conf_minion:`docker.update_mine` is set to ``False`` for a given minion, no mine data will be populated for that minion, and thus none will be returned for it. .. versionchanged:: 2019.2.0 :conf_minion:`docker.update_mine` now defaults to ``False`` Get all mine data for :py:func:`docker.ps <salt.modules.dockermod.ps_>` and run an aggregation routine. The ``interfaces`` parameter allows for specifying the network interfaces from which to select IP addresses. The ``cidrs`` parameter allows for specifying a list of subnets which the IP address must match. with_container_id Boolean, to expose container_id in the list of results .. versionadded:: 2015.8.2 CLI Example: .. code-block:: bash salt '*' mine.get_docker salt '*' mine.get_docker interfaces='eth0' salt '*' mine.get_docker interfaces='["eth0", "eth1"]' salt '*' mine.get_docker cidrs='107.170.147.0/24' salt '*' mine.get_docker cidrs='["107.170.147.0/24", "172.17.42.0/24"]' salt '*' mine.get_docker interfaces='["eth0", "eth1"]' cidrs='["107.170.147.0/24", "172.17.42.0/24"]' """ # Enforce that interface and cidr are lists if interfaces: interface_ = [] interface_.extend(interfaces if isinstance(interfaces, list) else [interfaces]) interfaces = interface_ if cidrs: cidr_ = [] cidr_.extend(cidrs if isinstance(cidrs, list) else [cidrs]) cidrs = cidr_ # Get docker info cmd = "docker.ps" docker_hosts = get("*", cmd) proxy_lists = {} # Process docker info for containers in six.itervalues(docker_hosts): host = containers.pop("host") host_ips = [] # Prepare host_ips list if not interfaces: for info in six.itervalues(host["interfaces"]): if "inet" in info: for ip_ in info["inet"]: host_ips.append(ip_["address"]) else: for interface in interfaces: if interface in host["interfaces"]: if "inet" in host["interfaces"][interface]: for item in host["interfaces"][interface]["inet"]: host_ips.append(item["address"]) host_ips = list(set(host_ips)) # Filter out ips from host_ips with cidrs if cidrs: good_ips = [] for cidr in cidrs: for ip_ in host_ips: if salt.utils.network.in_subnet(cidr, [ip_]): good_ips.append(ip_) host_ips = list(set(good_ips)) # Process each container for container in six.itervalues(containers): container_id = container["Info"]["Id"] if container["Image"] not in proxy_lists: proxy_lists[container["Image"]] = {} for dock_port in container["Ports"]: # IP exists only if port is exposed ip_address = dock_port.get("IP") # If port is 0.0.0.0, then we must get the docker host IP if ip_address == "0.0.0.0": for ip_ in host_ips: containers = ( proxy_lists[container["Image"]] .setdefault("ipv4", {}) .setdefault(dock_port["PrivatePort"], []) ) container_network_footprint = "{0}:{1}".format( ip_, dock_port["PublicPort"] ) if with_container_id: value = (container_network_footprint, container_id) else: value = container_network_footprint if value not in containers: containers.append(value) elif ip_address: containers = ( proxy_lists[container["Image"]] .setdefault("ipv4", {}) .setdefault(dock_port["PrivatePort"], []) ) container_network_footprint = "{0}:{1}".format( dock_port["IP"], dock_port["PublicPort"] ) if with_container_id: value = (container_network_footprint, container_id) else: value = container_network_footprint if value not in containers: containers.append(value) return proxy_lists def valid(): """ List valid entries in mine configuration. CLI Example: .. code-block:: bash salt '*' mine.valid """ mine_functions = __salt__["config.merge"]("mine_functions", {}) # If we don't have any mine functions configured, then we should just bail out if not mine_functions: return mine_data = {} for function_alias, function_data in six.iteritems(mine_functions): ( function_name, function_args, function_kwargs, minion_acl, ) = salt.utils.mine.parse_function_definition(function_data) if not _mine_function_available(function_name or function_alias): continue if function_name: mine_data[function_alias] = { function_name: function_args + [{key: value} for key, value in six.iteritems(function_kwargs)] } else: mine_data[function_alias] = function_data return mine_data
33.725389
126
0.594664
7957408d8bd25e393ad9198d501328d9036a9ca1
2,192
py
Python
ir_config_parameter_multi_company/tests/test_base.py
ShaheenHossain/itpp-labs-misc-addons13
bf62dc5bc1abdc18d78e9560a286babbe1d0e082
[ "MIT" ]
null
null
null
ir_config_parameter_multi_company/tests/test_base.py
ShaheenHossain/itpp-labs-misc-addons13
bf62dc5bc1abdc18d78e9560a286babbe1d0e082
[ "MIT" ]
null
null
null
ir_config_parameter_multi_company/tests/test_base.py
ShaheenHossain/itpp-labs-misc-addons13
bf62dc5bc1abdc18d78e9560a286babbe1d0e082
[ "MIT" ]
3
2020-08-25T01:57:59.000Z
2021-09-11T15:38:02.000Z
# -*- coding: utf-8 -*- from odoo.tests import common class TestBase(common.TransactionCase): at_install = False post_install = True def setUp(self): super(TestBase, self).setUp() self.config_param = self.env["ir.config_parameter"] self.main_company = self.env.user.company_id self.second_company = self.env["res.company"].create({"name": "Second company"}) self.env.user.company_ids = [(4, self.second_company.id)] def test_cache(self): KEY = "test_key" VALUE1 = "value1" VALUE2 = "value2" # set value for first company self.config_param.set_param(KEY, VALUE1) # call get_param to cache the value self.assertEqual( self.config_param.get_param(KEY), VALUE1, "Value is not saved!" ) # set value for second company self.env.user.company_id = self.second_company self.config_param.set_param(KEY, VALUE2) param = self.config_param.search([("key", "=", KEY)]) # check without cache first self.assertEqual(param.value, VALUE2, "Value for second company is not saved!") # check cache self.assertEqual( self.config_param.get_param(KEY), VALUE2, "Cache gives value for wrong company!", ) self.env.user.company_id = self.main_company self.assertEqual(self.config_param.get_param(KEY), VALUE1) def test_protected_param(self): KEY = "database.expiration_date" VALUE1 = "value1" VALUE2 = "value2" # first company self.config_param.set_param(KEY, VALUE1) self.assertEqual( self.config_param.get_param(KEY), VALUE1, "Value is not saved!" ) # for second company self.env.user.company_id = self.second_company self.assertEqual(self.config_param.get_param(KEY), VALUE1) self.config_param.set_param(KEY, VALUE2) self.assertEqual(self.config_param.get_param(KEY), VALUE2) # switch back to first company self.env.user.company_id = self.main_company self.assertEqual(self.config_param.get_param(KEY), VALUE2)
33.212121
88
0.632755
795740ea289d3eb288c9b4f2752a7fd9b25f6bf6
1,877
py
Python
examples/zstd_dump.py
ostrovok-team/papi-sdk-python
04c2ddc2c6f843fff2fa6be6d3f82d6b50025255
[ "MIT" ]
1
2020-12-30T13:06:41.000Z
2020-12-30T13:06:41.000Z
examples/zstd_dump.py
EmergingTravel/papi-sdk-python
04c2ddc2c6f843fff2fa6be6d3f82d6b50025255
[ "MIT" ]
2
2021-01-18T07:57:29.000Z
2021-06-23T11:04:14.000Z
examples/zstd_dump.py
ostrovok-team/papi-sdk-python
04c2ddc2c6f843fff2fa6be6d3f82d6b50025255
[ "MIT" ]
3
2020-12-30T13:09:45.000Z
2020-12-30T13:42:33.000Z
""" ETG API provides hotel's static data dump in .zstd format. You can find more about the dump structure and the format in our documentation - https://docs.emergingtravel.com/#0b55c99a-7ef0-4a18-bbfe-fd1bdf35d08e Please note that uncompressed data could be more than 20GB. Below is an example of how to handle such large archive. For decompression, we will use the zstandard library which you can install using the command > pip install zstandard The script takes the path to the archive file, splits the whole file by 16MB chunks, extracts objects line by line (each line contains one hotel in JSON format), and converts them into Python dicts which you can use in your inner logic. """ from zstandard import ZstdDecompressor import json def parse_dump(filename: str) -> None: """ The sample of function that can parse a big zstd dump. :param filename: path to a zstd archive """ with open(filename, "rb") as fh: # make decompressor dctx = ZstdDecompressor() with dctx.stream_reader(fh) as reader: previous_line = "" while True: # we will read the file by 16mb chunk chunk = reader.read(2 ** 24) if not chunk: break raw_data = chunk.decode("utf-8") # all JSON files split by the new line char "\n" # try to read one by one lines = raw_data.split("\n") for i, line in enumerate(lines[:-1]): if i == 0: line = previous_line + line hotel_data = json.loads(line) # do stuff with the hotel print(f"current hotel is {hotel_data['name']}") previous_line = lines[-1] if __name__ == "__main__": parse_dump("partner_feed_en.json.zst")
35.415094
150
0.614811
795741b5ebc4badc7bfd5fbe224c7fa553eee65a
23,628
py
Python
examples/tests/test_offchain_error_cases.py
xli/client-sdk-python
1d0ec7f7b395bd827b778f1903001088e799fb05
[ "Apache-2.0" ]
1
2021-02-15T14:41:34.000Z
2021-02-15T14:41:34.000Z
examples/tests/test_offchain_error_cases.py
xli/client-sdk-python
1d0ec7f7b395bd827b778f1903001088e799fb05
[ "Apache-2.0" ]
null
null
null
examples/tests/test_offchain_error_cases.py
xli/client-sdk-python
1d0ec7f7b395bd827b778f1903001088e799fb05
[ "Apache-2.0" ]
null
null
null
# Copyright (c) The Diem Core Contributors # SPDX-License-Identifier: Apache-2.0 from diem.offchain import ( Status, Action, jws, http_header, CommandResponseObject, CommandResponseError, PaymentActionObject, ) from diem import LocalAccount, testnet from ..vasp.wallet import ActionResult import dataclasses, requests, json, copy, pytest, uuid AMOUNT = 1_000_000_000 def test_send_command_failed_by_invalid_jws_signature_and_retry_by_bg_job(monkeypatch, sender_app, receiver_app): intent_id = receiver_app.gen_intent_id("bar", AMOUNT) with monkeypatch.context() as m: m.setattr(sender_app, "compliance_key", LocalAccount.generate().compliance_key) sender_app.pay("foo", intent_id) assert len(sender_app.saved_commands) == 1 assert len(receiver_app.saved_commands) == 0 with pytest.raises(CommandResponseError) as err: sender_app.run_once_background_job() assert_response_command_error(err.value.resp, "invalid_jws_signature") assert len(sender_app.saved_commands) == 1 assert len(receiver_app.saved_commands) == 0 assert sender_app.run_once_background_job() == ActionResult.SEND_REQUEST_SUCCESS assert len(sender_app.saved_commands) == 1 assert len(receiver_app.saved_commands) == 1 # receiver_app continues the flow after error is recovered assert receiver_app.run_once_background_job() == ( Action.EVALUATE_KYC_DATA, ActionResult.PASS, ) def test_send_command_failed_by_server_internal_error_and_retry_by_bg_job(monkeypatch, sender_app, receiver_app): intent_id = receiver_app.gen_intent_id("bar", AMOUNT) with monkeypatch.context() as m: # receiver side save request failed, which causes 500 error to sender client m.setattr( receiver_app, "save_command", raise_error(Exception("simulate receiver app internal error")), ) reference_id = sender_app.pay("foo", intent_id) with pytest.raises(Exception): sender_app.run_once_background_job() assert len(sender_app.saved_commands) == 1 assert len(receiver_app.saved_commands) == 0 assert sender_app.run_once_background_job() == ActionResult.SEND_REQUEST_SUCCESS assert len(sender_app.saved_commands) == 1 assert len(receiver_app.saved_commands) == 1 # receiver continues the flow after error is recovered assert receiver_app.run_once_background_job() == ( Action.EVALUATE_KYC_DATA, ActionResult.PASS, ) assert receiver_app.run_once_background_job() == ActionResult.SEND_REQUEST_SUCCESS with monkeypatch.context() as m: # receiver side save request failed, which causes 500 error to sender client m.setattr( receiver_app, "save_command", raise_error(Exception("simulate server internal error")), ) # action success but send request should fail assert sender_app.run_once_background_job() == ( Action.EVALUATE_KYC_DATA, ActionResult.PASS, ) with pytest.raises(Exception): sender_app.run_once_background_job() assert sender_status(sender_app, reference_id) == Status.ready_for_settlement assert sender_status(receiver_app, reference_id) == Status.needs_kyc_data # retry failed again with pytest.raises(Exception): sender_app.run_once_background_job() assert sender_status(sender_app, reference_id) == Status.ready_for_settlement assert sender_status(receiver_app, reference_id) == Status.needs_kyc_data assert sender_app.run_once_background_job() == ActionResult.SEND_REQUEST_SUCCESS assert sender_status(sender_app, reference_id) == Status.ready_for_settlement assert sender_status(receiver_app, reference_id) == Status.ready_for_settlement def test_invalid_command_request_json(sender_app, receiver_app): resp = send_request("invalid_request_json", sender_app, receiver_app, "failure") assert resp.cid is None assert_response_command_error(resp, "invalid_object") def test_invalid_json(sender_app, receiver_app): resp = send_request_json("invalid_json", sender_app, receiver_app, "failure") assert resp.cid is None assert_response_command_error(resp, "invalid_json") def test_missing_required_fields(sender_app, receiver_app): request = minimum_required_fields_request_sample(sender_app, receiver_app) assert send_request(request, sender_app, receiver_app, "success") for field in find_all_fields(request): # ignore national_id which is optional, the id_value in national_id is required if field in ["command.payment.sender.kyc_data.national_id"]: continue new_req = copy.deepcopy(request) set_field(new_req, field, None) resp = send_request(new_req, sender_app, receiver_app, "failure") assert_response_command_error(resp, "missing_field", field) def test_unknown_fields(sender_app, receiver_app): request = minimum_required_fields_request_sample(sender_app, receiver_app) for field in find_all_fields(request): new_req = copy.deepcopy(request) unknown_field = field + "-unknown" set_field(new_req, unknown_field, "any") resp = send_request(new_req, sender_app, receiver_app, "failure") assert_response_command_error(resp, "unknown_field", unknown_field) def test_invalid_field_value(sender_app, receiver_app): request = minimum_required_fields_request_sample(sender_app, receiver_app) fields = [ "_ObjectType", "command_type", "command._ObjectType", "command.payment.action.action", "command.payment.receiver.status.status", "command.payment.receiver.address", "command.payment.sender.address", "command.payment.sender.status.status", "command.payment.sender.kyc_data.type", "command.payment.sender.kyc_data.payload_version", ] for field in fields: new_req = copy.deepcopy(request) set_field(new_req, field, "invalid-value") resp = send_request(new_req, sender_app, receiver_app, "failure") assert_response_command_error(resp, "invalid_field_value", field) def test_invalid_field_value_type(sender_app, receiver_app): request = minimum_required_fields_request_sample(sender_app, receiver_app) fields = [ "command.payment.sender.status", # status is object, status.status is status enum string "command.payment.sender.metadata", ] for field in fields: new_req = copy.deepcopy(request) set_field(new_req, field, "invalid-value-type") resp = send_request(new_req, sender_app, receiver_app, "failure") assert_response_command_error(resp, "invalid_field_value", field) def test_invalid_actor_metadata_item_type(sender_app, receiver_app): request = minimum_required_fields_request_sample(sender_app, receiver_app) field = "command.payment.sender.metadata" set_field(request, field, ["1", 2]) resp = send_request(request, sender_app, receiver_app, "failure") assert_response_command_error(resp, "invalid_field_value", field) def test_written_once_payment_actor_kyc_data(sender_app, receiver_app): assert_invalid_overwrite_error( sender_app, receiver_app, "payment.sender.kyc_data", lambda cmd: replace_command_sender(cmd, kyc_data=sender_app.users["user-x"].kyc_data()), ) def test_written_once_payment_actor_additional_kyc_data(sender_app, receiver_app): assert_invalid_overwrite_error( sender_app, receiver_app, "payment.sender.additional_kyc_data", lambda cmd: replace_command_sender(cmd, additional_kyc_data="random stuff"), ) def test_written_once_payment_actor_address(sender_app, receiver_app): new_sender_address = sender_app.gen_user_account_id("user-x") def update_cmd(cmd): cmd = replace_command_sender(cmd, address=new_sender_address) return dataclasses.replace(cmd, my_actor_address=new_sender_address) assert_invalid_overwrite_error(sender_app, receiver_app, "payment.sender.address", update_cmd) def test_written_once_payment_action(sender_app, receiver_app): assert_invalid_overwrite_error( sender_app, receiver_app, "payment.action", lambda cmd: replace_command_payment(cmd, action=PaymentActionObject(amount=AMOUNT, currency="XDX")), ) def test_written_once_payment_recipient_signature(sender_app, receiver_app): assert_invalid_overwrite_error( sender_app, receiver_app, "payment.recipient_signature", lambda cmd: replace_command_payment(cmd, recipient_signature="sig"), ) def test_written_once_payment_description(sender_app, receiver_app): assert_invalid_overwrite_error( sender_app, receiver_app, "payment.description", lambda cmd: replace_command_payment(cmd, description="want to change desc"), ) def test_written_once_payment_original_payment_reference_id(sender_app, receiver_app): assert_invalid_overwrite_error( sender_app, receiver_app, "payment.original_payment_reference_id", lambda cmd: replace_command_payment(cmd, original_payment_reference_id="4185027f-0574-6f55-2668-3a38fdb5de98"), original_payment_reference_id="6185027f-0574-6f55-2668-3a38fdb5de98", ) def test_written_once_payment_original_payment_reference_id_initial_only(sender_app, receiver_app): assert_invalid_overwrite_error( sender_app, receiver_app, "payment.original_payment_reference_id", lambda cmd: replace_command_payment(cmd, original_payment_reference_id="4185027f-0574-6f55-2668-3a38fdb5de98"), ) def test_update_opponent_actor_field_error(sender_app, receiver_app): assert_invalid_overwrite_error( sender_app, receiver_app, "payment.receiver", lambda cmd: replace_actor(cmd, "receiver", cmd.payment.receiver, additional_kyc_data="random stuff"), ) def test_resource_is_locked_error(sender_app, receiver_app): intent_id = receiver_app.gen_intent_id("bar", AMOUNT) ref_id = sender_app.pay("foo", intent_id) assert sender_app.run_once_background_job() == ActionResult.SEND_REQUEST_SUCCESS sender_app.lock(ref_id).acquire() assert receiver_app.run_once_background_job() == ( Action.EVALUATE_KYC_DATA, ActionResult.PASS, ) with pytest.raises(CommandResponseError) as err: receiver_app.run_once_background_job() assert_response_command_error(err.value.resp, "conflict") def test_travel_rule_limit_validation(sender_app, receiver_app): request = minimum_required_fields_request_sample(sender_app, receiver_app, amount=10) resp = send_request(request, sender_app, receiver_app, "failure") assert_response_command_error(resp, "no_kyc_needed", "command.payment.action.amount") def test_invalid_currency_code(sender_app, receiver_app): request = minimum_required_fields_request_sample(sender_app, receiver_app, currency="XXX") resp = send_request(request, sender_app, receiver_app, "failure") assert_response_command_error(resp, "invalid_field_value", "command.payment.action.currency") def test_unsupported_currency(sender_app, receiver_app): request = minimum_required_fields_request_sample(sender_app, receiver_app, currency="XDX") resp = send_request(request, sender_app, receiver_app, "failure") assert_response_command_error(resp, "unsupported_currency", "command.payment.action.currency") def test_cid_uuid(sender_app, receiver_app): request = minimum_required_fields_request_sample(sender_app, receiver_app) request["cid"] = "invalid uuid" resp = send_request(request, sender_app, receiver_app, "failure") assert_response_command_error(resp, "invalid_field_value", "cid") def test_reference_id_uuid(sender_app, receiver_app): request = minimum_required_fields_request_sample(sender_app, receiver_app) request["command"]["payment"]["reference_id"] = "invalid uuid" resp = send_request(request, sender_app, receiver_app, "failure") assert_response_command_error(resp, "invalid_field_value", "command.payment.reference_id") def test_unknown_address_could_not_find_request_receiver_account_id(sender_app, receiver_app): request = minimum_required_fields_request_sample(sender_app, receiver_app) request["command"]["payment"]["receiver"]["address"] = sender_app.offchain_client.my_compliance_key_account_id resp = send_request(request, sender_app, receiver_app, "failure") assert_response_command_error(resp, "unknown_address") def test_x_request_sender_address_must_one_of_actor_addresses(sender_app, receiver_app): request = minimum_required_fields_request_sample(sender_app, receiver_app) resp = send_request( request, sender_app, receiver_app, "failure", sender_address=sender_app.offchain_client.my_compliance_key_account_id, ) assert_response_command_error(resp, "invalid_http_header") def test_http_header_x_request_sender_address_missing(sender_app, receiver_app): request = minimum_required_fields_request_sample(sender_app, receiver_app) resp = send_request_json_with_headers( json.dumps(request), sender_app, receiver_app, "failure", {http_header.X_REQUEST_ID: str(uuid.uuid4())} ) assert_response_protocol_error(resp, "missing_http_header") def test_missing_x_request_id(sender_app, receiver_app): request = minimum_required_fields_request_sample(sender_app, receiver_app) resp = send_request_json_with_headers( json.dumps(request), sender_app, receiver_app, "failure", {http_header.X_REQUEST_SENDER_ADDRESS: "address"} ) assert_response_protocol_error(resp, "missing_http_header") def test_could_not_find_onchain_account_by_x_request_sender_address(sender_app, receiver_app): account = LocalAccount.generate() account_id = account.account_identifier() request = minimum_required_fields_request_sample(sender_app, receiver_app) request["command"]["payment"]["sender"]["address"] = account_id resp = send_request(request, sender_app, receiver_app, "failure", sender_address=account_id) assert_response_protocol_error(resp, "invalid_http_header") def test_could_not_find_compliance_key_of_x_request_sender_address(sender_app, receiver_app): account = testnet.gen_account(testnet.create_client()) account_id = account.account_identifier() request = minimum_required_fields_request_sample(sender_app, receiver_app) request["command"]["payment"]["sender"]["address"] = account_id resp = send_request(request, sender_app, receiver_app, "failure", sender_address=account_id) assert_response_protocol_error(resp, "invalid_http_header") def test_invalid_recipient_signature(sender_app, receiver_app): intent_id = receiver_app.gen_intent_id("bar", AMOUNT) ref_id = sender_app.pay("foo", intent_id) assert sender_app.run_once_background_job() == ActionResult.SEND_REQUEST_SUCCESS assert receiver_app.run_once_background_job() == ( Action.EVALUATE_KYC_DATA, ActionResult.PASS, ) cmd = receiver_app.saved_commands.get(ref_id) invalid_sig_cmd = replace_command_payment(cmd, recipient_signature=b"invalid-sig".hex()) with pytest.raises(CommandResponseError) as err: assert receiver_app._send_request(invalid_sig_cmd) assert_response_command_error(err.value.resp, "invalid_recipient_signature", "command.payment.recipient_signature") def test_receiver_actor_is_ready_for_settlement_but_recipient_signature_is_none(sender_app, receiver_app): intent_id = receiver_app.gen_intent_id("bar", AMOUNT) ref_id = sender_app.pay("foo", intent_id) assert sender_app.run_once_background_job() == ActionResult.SEND_REQUEST_SUCCESS assert receiver_app.run_once_background_job() == ( Action.EVALUATE_KYC_DATA, ActionResult.PASS, ) cmd = receiver_app.saved_commands.get(ref_id) missing_sig_cmd = replace_command_payment(cmd, recipient_signature=None) with pytest.raises(CommandResponseError) as err: assert receiver_app._send_request(missing_sig_cmd) assert_response_command_error(err.value.resp, "missing_field", "command.payment.recipient_signature") def test_invalid_recipient_signature_hex(sender_app, receiver_app): intent_id = receiver_app.gen_intent_id("bar", AMOUNT) ref_id = sender_app.pay("foo", intent_id) assert sender_app.run_once_background_job() == ActionResult.SEND_REQUEST_SUCCESS assert receiver_app.run_once_background_job() == ( Action.EVALUATE_KYC_DATA, ActionResult.PASS, ) cmd = receiver_app.saved_commands.get(ref_id) invalid_sig_cmd = replace_command_payment(cmd, recipient_signature="invalid-sig-hex") with pytest.raises(CommandResponseError) as err: assert receiver_app._send_request(invalid_sig_cmd) assert_response_command_error(err.value.resp, "invalid_recipient_signature", "command.payment.recipient_signature") def replace_actor(cmd, name, actor, **changes): new_actor = dataclasses.replace(actor, **changes) return replace_command_payment(cmd, **{name: new_actor}) def replace_command_sender(cmd, **changes): return replace_actor(cmd, "sender", cmd.payment.sender, **changes) def replace_command_payment(cmd, **changes): new_payment = dataclasses.replace(cmd.payment, **changes) return dataclasses.replace(cmd, payment=new_payment) def assert_invalid_overwrite_error(sender_app, receiver_app, field, update_cmd, original_payment_reference_id=None): # setup soft match for sender provide additional_kyc_data receiver_app.evaluate_kyc_data_result = {"foo": ActionResult.SOFT_MATCH} receiver_app.manual_review_result = {"foo": ActionResult.PASS} intent_id = receiver_app.gen_intent_id("bar", AMOUNT) ref_id = sender_app.pay( "foo", intent_id, original_payment_reference_id=original_payment_reference_id, desc="this is a good deal" ) assert sender_app.run_once_background_job() == ActionResult.SEND_REQUEST_SUCCESS assert receiver_app.run_once_background_job() == ( Action.EVALUATE_KYC_DATA, ActionResult.SOFT_MATCH, ) assert receiver_app.run_once_background_job() == ActionResult.SEND_REQUEST_SUCCESS assert sender_app.run_once_background_job() == ( Action.CLEAR_SOFT_MATCH, ActionResult.SENT_ADDITIONAL_KYC_DATA, ) assert sender_app.run_once_background_job() == ActionResult.SEND_REQUEST_SUCCESS assert receiver_app.run_once_background_job() == ( Action.REVIEW_KYC_DATA, ActionResult.PASS, ) assert receiver_app.run_once_background_job() == ActionResult.SEND_REQUEST_SUCCESS # sender provided kyc_data and additional_kyc_data # receiver provided signature and kyc_data assert len(sender_app.saved_commands) == 1 cmd = sender_app.saved_commands.get(ref_id) sender_app.saved_commands[ref_id] = update_cmd(cmd) assert sender_app.run_once_background_job() == ( Action.EVALUATE_KYC_DATA, ActionResult.PASS, ) with pytest.raises(CommandResponseError) as err: sender_app.run_once_background_job() assert_response_command_error(err.value.resp, "invalid_overwrite", field) def assert_response_command_error(resp, code, field=None): assert_response_error(resp, code, "command_error", field) def assert_response_protocol_error(resp, code, field=None): assert_response_error(resp, code, "protocol_error", field) def assert_response_error(resp, code, err_type, field=None): assert resp.error, resp assert resp.error.type == err_type, resp assert resp.error.code == code, resp assert resp.error.field == field, resp def set_field(dic, field, value): path = field.split(".") for f in path[0 : len(path) - 1]: if f not in dic: dic[f] = {} dic = dic[f] dic[path[len(path) - 1]] = value def find_all_fields(dic): ret = [] for key in dic: ret.append(key) if isinstance(dic[key], dict): sub_fields = find_all_fields(dic[key]) for sf in sub_fields: ret.append(".".join([key, sf])) return ret def send_request(request, sender_app, receiver_app, expected_resp_status, sender_address=None) -> CommandResponseObject: return send_request_json(json.dumps(request), sender_app, receiver_app, expected_resp_status, sender_address) def send_request_json( request_json, sender_app, receiver_app, expected_resp_status, sender_address=None ) -> CommandResponseObject: if sender_address is None: subaddresses = sender_app.users["foo"].subaddresses subaddress = subaddresses[len(subaddresses) - 1] if len(subaddresses) > 0 else None sender_address = sender_app._available_child_vasp().account_identifier(subaddress) return send_request_json_with_headers( request_json, sender_app, receiver_app, expected_resp_status, { http_header.X_REQUEST_ID: str(uuid.uuid4()), http_header.X_REQUEST_SENDER_ADDRESS: sender_address, }, ) def send_request_json_with_headers( request_json, sender_app, receiver_app, expected_resp_status, headers ) -> CommandResponseObject: session = requests.Session() resp = session.post( f"http://localhost:{receiver_app.offchain_service_port}/v2/command", data=jws.serialize_string(request_json, sender_app.compliance_key.sign), headers=headers, ) cmd_resp_obj = jws.deserialize( resp.content, CommandResponseObject, receiver_app.compliance_key.public_key().verify, ) assert cmd_resp_obj.status == expected_resp_status if expected_resp_status == "success": assert resp.status_code == 200 else: assert resp.status_code == 400 return cmd_resp_obj def sender_status(wallet, ref_id): command = wallet.saved_commands[ref_id] return command.payment.sender.status.status def raise_error(e: Exception): def fn(*args, **wargs): raise e return fn def minimum_required_fields_request_sample(sender_app, receiver_app, amount=AMOUNT, currency="XUS"): return { "_ObjectType": "CommandRequestObject", "cid": "3185027f-0574-6f55-2668-3a38fdb5de98", "command_type": "PaymentCommand", "command": { "_ObjectType": "PaymentCommand", "payment": { "reference_id": "4185027f-0574-6f55-2668-3a38fdb5de98", "sender": { "address": sender_app.gen_user_account_id("foo"), "status": {"status": "needs_kyc_data"}, "kyc_data": { "type": "individual", "payload_version": 1, "national_id": {"id_value": "332-323-4344"}, }, }, "receiver": { "address": receiver_app.gen_user_account_id("bar"), "status": {"status": "none"}, }, "action": { "amount": amount, "currency": currency, "action": "charge", "timestamp": 1604902048, }, }, }, }
38.23301
120
0.727781
795741f60e2bd257b13060349288c5251cf83a8a
939
py
Python
solr-synonyms-api/synonyms/endpoints/synonyms.py
JohnamLane/namex
365fb42e99c702a0e6618451dcc86c1955333fd7
[ "Apache-2.0" ]
null
null
null
solr-synonyms-api/synonyms/endpoints/synonyms.py
JohnamLane/namex
365fb42e99c702a0e6618451dcc86c1955333fd7
[ "Apache-2.0" ]
1
2019-05-02T07:10:52.000Z
2019-05-02T07:10:52.000Z
solr-synonyms-api/synonyms/endpoints/synonyms.py
JohnamLane/namex
365fb42e99c702a0e6618451dcc86c1955333fd7
[ "Apache-2.0" ]
null
null
null
import logging import flask_restplus import synonyms.models.synonym as synonym __all__ = ['api'] api = flask_restplus.Namespace('Synonyms', description='Work with synonyms used in Solr') @api.route('/<col>/<term>', methods=['GET']) class _Synonyms(flask_restplus.Resource): @staticmethod def get(col, term): term = term.strip().lower() logging.debug('Doing {} search for "{}"'.format(col, term)) results = synonym.Synonym.find(term, col) if not results: return {'message': 'Term \'{}\' not found in any synonyms list'.format(term)}, 404 response_list = [] for result in results: if col == 'synonyms_text': response_list.append(result.synonyms_text) # col == stems_text else: response_list.append(result.stems_text) print(response_list) return ('results', response_list), 200
26.083333
94
0.615548
7957420797900b4da9002c9a62d89d9fc58b153f
2,677
py
Python
tests/test_mlp.py
nlinc1905/evolutionary-reinforcement-learner
a1384426d0d47403abd7382a0c6b6ebe7d949aff
[ "MIT" ]
null
null
null
tests/test_mlp.py
nlinc1905/evolutionary-reinforcement-learner
a1384426d0d47403abd7382a0c6b6ebe7d949aff
[ "MIT" ]
null
null
null
tests/test_mlp.py
nlinc1905/evolutionary-reinforcement-learner
a1384426d0d47403abd7382a0c6b6ebe7d949aff
[ "MIT" ]
null
null
null
import numpy as np import unittest from models.mlp import softmax, relu, MLP def test_softmax(): # Assert that the output matches what is expected for a given input test_array = np.array([[0.2, 0.4, 0.6], [0.4, 0.6, 0.8]]) expected_output = np.array([ [0.2693075, 0.32893292, 0.40175958], [0.2693075, 0.32893292, 0.40175958], ]) np.testing.assert_allclose( actual=softmax(test_array), desired=expected_output, rtol=1e-5 ) def test_relu(): # Assert that the output matches what is expected for a given input test_array = np.array([[-0.2, 0.4, 0.6], [0.4, -0.6, 0.8]]) output_array = relu(test_array) expected_output_array = np.array([[0., 0.4, 0.6], [0.4, 0., 0.8]]) np.testing.assert_equal(actual=output_array, desired=expected_output_array) class MLPTestCase(unittest.TestCase): def setUp(self): self.seed = 14 self.mlp = MLP( input_dim=8, hidden_units=50, nbr_classes=2, seed=self.seed, hidden_layer_activation_func=relu ) self.expected_param_length = ( (self.mlp.input_dim * self.mlp.hidden_units) + self.mlp.hidden_units + (self.mlp.hidden_units * self.mlp.output_dim) + self.mlp.output_dim ) def test_init(self): # Assert that the parameters were initialized correctly assert self.mlp.input_dim == 8 assert self.mlp.hidden_units == 50 assert self.mlp.output_dim == 2 np.testing.assert_equal(actual=self.mlp.b1, desired=np.zeros(50)) np.testing.assert_equal(actual=self.mlp.b2, desired=np.zeros(2)) np.testing.assert_equal(actual=self.mlp.expected_input_shape, desired=self.expected_param_length) def test_get_params(self): # Assert that the params returned have the right dimensionality test_params = self.mlp.get_params() assert len(test_params.shape) == 1 assert test_params.shape[0] == self.expected_param_length def test_set_params(self): # Assert that the params can be set np.random.seed(self.seed) test_params = np.random.randn(self.expected_param_length,) self.mlp.set_params(params=test_params) output = self.mlp.get_params() np.testing.assert_allclose(actual=output, desired=test_params, rtol=1e-5) def test_sample_action(self): # Assert that the sample action returns an integer index np.random.seed(self.seed) test_array = np.random.randn(8, ) test_action = self.mlp.sample_action(x=test_array) assert isinstance(test_action, np.int64)
36.175676
105
0.650728
795742a5a0d847ad9fbcaef0a0c2a4ab3b785294
3,888
py
Python
src/graph.py
xfontes42/hermes-simulation
152261f0070b6b1520f77c04bc5ffa655eee6a56
[ "MIT" ]
7
2019-07-11T16:07:55.000Z
2020-04-20T13:57:15.000Z
src/graph.py
xfontes42/hermes-simulation
152261f0070b6b1520f77c04bc5ffa655eee6a56
[ "MIT" ]
null
null
null
src/graph.py
xfontes42/hermes-simulation
152261f0070b6b1520f77c04bc5ffa655eee6a56
[ "MIT" ]
1
2019-11-12T15:46:54.000Z
2019-11-12T15:46:54.000Z
""" Graph representing a road network. Graph topology should allow for dynamic run-time changes (e.g. accidents and other phenomena that restrict or even block a given edge). """ from typing import List, Tuple from utils import congestion_time_estimate import networkx as nx class RoadGraph: graph: nx.DiGraph nstart: int nend: int def __init__(self): self.hardcoded_graph_2() def __print_edge_volumes(self): """Pretty print of the edges current volumes. Useful for debug purposes""" print("Volumes:") for e in self.graph.edges: print("\t(%i, %i) -> %i" % (e[0], e[1], self.graph.edges[e[0], e[1]]['volume'])) def add_vehicle(self, edge: (int, int)): """Add a vehicle to a given edge""" self.graph.edges[edge[0], edge[1]]['volume'] += 1 def remove_vehicle(self, edge: (int, int)): """Remove a vehicle from a given edge""" self.graph.edges[edge[0], edge[1]]['volume'] -= 1 def get_edge_data(self, edge: Tuple[int, int]) -> dict: """Get edge related data. ATIS data endpoint""" return self.graph.edges[edge[0], edge[1]] def get_possible_routes(self, src_node: int, dest_node: int): """Get all possible routes from the src_node to the destiny_node""" return list(nx.all_simple_paths(self.graph, src_node, dest_node)) def get_all_routes(self) -> List[List[int]]: # results in [[0, 1, 3], [0, 2, 1, 3], [0, 2, 3]] return self.get_possible_routes(self.nstart, self.nend) # this below doesn't work bc it forces to go through all nodes # return nx.all_topological_sorts(self.graph) def get_optimal_route_travel_time(self, route: List[int]) -> float: """Gets the estimated optimal travel time it takes to transverse a given route""" edges = list(zip(route, route[1:])) estimates = [self.graph.edges[e[0], e[1]]['free_flow_travel_time'] for e in edges] return sum(estimates) def get_edge_travel_time(self, edge: Tuple[int, int], volume: int) -> float: """Get the time it takes to transverse the edge, considering a given volume""" edge_data = self.get_edge_data(edge) return congestion_time_estimate(edge_data['free_flow_travel_time'], edge_data['capacity'], volume) def get_edge_real_travel_time(self, edge: Tuple[int, int]) -> float: """Get the real actual time it takes to transverse the edge (congestion included)""" return self.get_edge_travel_time(edge, self.get_edge_data(edge)['volume']) def hardcoded_graph_1(self): """Hardcoded deliverable 2 example graph for now""" self.graph = nx.DiGraph() self.graph.add_nodes_from(range(0, 4)) self.nstart = 0 self.nend = 3 self.graph.add_edges_from( [(0, 1), (0, 2), (2, 1), (1, 3), (2, 3)], volume=0, free_flow_travel_time=1, capacity=20) def hardcoded_graph_2(self): """A different hardcoded graph, see Xavier's EcoBook""" self.graph = nx.DiGraph() self.graph.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8]) self.nstart = 0 self.nend = 8 self.graph.add_edges_from( [(0, 1), (1, 3), (3, 6), (6, 8)], volume=0, free_flow_travel_time=0.85, capacity=50) # fastest path self.graph.add_edges_from( [(0, 2), (2, 5), (5, 7), (7, 8)], volume=0, free_flow_travel_time=1.17, capacity=50) # shortest path self.graph.add_edges_from( [(1, 4), (2, 4), (4, 6), (4, 7)], volume=0, free_flow_travel_time=0.92, capacity=50) # other recommended path
36.679245
92
0.587706
79574363d9c947cfe6b4fe10e6f9545acfd09dfd
4,115
py
Python
examples/tutorial_api_python/05_keypoints_from_images_multi_gpu.py
0x333333/openpose
363cd0b5d44ab127d0786ac1f3398e784933dd5d
[ "DOC" ]
3
2019-10-16T08:35:07.000Z
2019-10-23T08:34:52.000Z
examples/tutorial_api_python/05_keypoints_from_images_multi_gpu.py
adityadas8888/openpose
d89fe626505cde6604396aed77ad283c8f013473
[ "DOC" ]
null
null
null
examples/tutorial_api_python/05_keypoints_from_images_multi_gpu.py
adityadas8888/openpose
d89fe626505cde6604396aed77ad283c8f013473
[ "DOC" ]
null
null
null
# From Python # It requires OpenCV installed for Python import sys import cv2 import os from sys import platform import argparse import time # Import Openpose (Windows/Ubuntu/OSX) dir_path = os.path.dirname(os.path.realpath(__file__)) try: # Windows Import if platform == "win32": # Change these variables to point to the correct folder (Release/x64 etc.) sys.path.append(dir_path + '/../../python/openpose/Release'); os.environ['PATH'] = os.environ['PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;' import pyopenpose as op else: # Change these variables to point to the correct folder (Release/x64 etc.) sys.path.append('../../python'); # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it. # sys.path.append('/usr/local/python') from openpose import pyopenpose as op except ImportError as e: print('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?') raise e # Flags parser = argparse.ArgumentParser() parser.add_argument("--image_dir", default="../../../examples/media/", help="Process a directory of images. Read all standard formats (jpg, png, bmp, etc.).") parser.add_argument("--no_display", default=False, help="Enable to disable the visual display.") parser.add_argument("--num_gpu", default=op.get_gpu_number(), help="Number of GPUs.") args = parser.parse_known_args() # Custom Params (refer to include/openpose/flags.hpp for more parameters) params = dict() params["model_folder"] = "../../../models/" params["num_gpu"] = int(vars(args[0])["num_gpu"]) numberGPUs = int(params["num_gpu"]) # Add others in path? for i in range(0, len(args[1])): curr_item = args[1][i] if i != len(args[1])-1: next_item = args[1][i+1] else: next_item = "1" if "--" in curr_item and "--" in next_item: key = curr_item.replace('-','') if key not in params: params[key] = "1" elif "--" in curr_item and "--" not in next_item: key = curr_item.replace('-','') if key not in params: params[key] = next_item # Construct it from system arguments # op.init_argv(args[1]) # oppython = op.OpenposePython() try: # Starting OpenPose opWrapper = op.WrapperPython() opWrapper.configure(params) opWrapper.start() # Read frames on directory imagePaths = op.get_images_on_directory(args[0].image_dir); # Read number of GPUs in your system start = time.time() # Process and display images for imageBaseId in range(0, len(imagePaths), numberGPUs): # Create datums datums = [] images = [] # Read and push images into OpenPose wrapper for gpuId in range(0, numberGPUs): imageId = imageBaseId+gpuId if imageId < len(imagePaths): imagePath = imagePaths[imageBaseId+gpuId] datum = op.Datum() images.append(cv2.imread(imagePath)) datum.cvInputData = images[-1] datums.append(datum) opWrapper.waitAndEmplace([datums[-1]]) # Retrieve processed results from OpenPose wrapper for gpuId in range(0, numberGPUs): imageId = imageBaseId+gpuId if imageId < len(imagePaths): datum = datums[gpuId] opWrapper.waitAndPop([datum]) print("Body keypoints: \n" + str(datum.poseKeypoints)) if not args[0].no_display: cv2.imshow("OpenPose 1.5.1 - Tutorial Python API", datum.cvOutputData) key = cv2.waitKey(15) if key == 27: break end = time.time() print("OpenPose demo successfully finished. Total time: " + str(end - start) + " seconds") except Exception as e: # print(e) sys.exit(-1)
37.072072
289
0.633779
79574391f2d5c01951282421727d7eeca67f0f53
20,152
py
Python
stats/nonparametric.py
DockyD/DvM
7fbc6aa33a88b288edab9948071849b02ac71469
[ "MIT" ]
7
2018-03-22T10:44:55.000Z
2021-08-25T01:18:17.000Z
stats/nonparametric.py
DockyD/DvM
7fbc6aa33a88b288edab9948071849b02ac71469
[ "MIT" ]
3
2018-03-19T14:24:08.000Z
2019-02-20T19:11:08.000Z
stats/nonparametric.py
DockyD/DvM
7fbc6aa33a88b288edab9948071849b02ac71469
[ "MIT" ]
7
2018-04-04T14:55:43.000Z
2021-11-15T15:40:15.000Z
""" NonParametric statistical tests Created by Dirk van Moorselaar on 27-02-2018. Copyright (c) 2018 DvM. All rights reserved. """ import cv2 import numpy as np from math import sqrt from scipy.stats import ttest_rel, ttest_ind, wilcoxon, ttest_1samp from IPython import embed def permutationTTest(X1, X2, nr_perm): ''' ''' # check whether X2 is a chance variable or a data array if isinstance(X2, (float, int)): X2 = np.tile(X2, X1.shape) X = X1 - X2 # calculate T statistic nr_obs = X.shape[0] nr_test = X.shape[1:] T_0 = X.mean(axis = 0)/(X.std(axis = 0)/sqrt(nr_obs)) # calculate surrogate T distribution surr = np.copy(X) T_p = np.stack([np.zeros(nr_test) for i in range(nr_perm)], axis = 0) for p in range(nr_perm): perms = np.array(np.random.randint(2,size = X.shape), dtype = bool) surr[perms] *= -1 T_p[p] = surr.mean(axis = 0)/(surr.std(axis = 0)/sqrt(nr_obs)) # check how often surrogate T exceeds real T value thresh = np.sum(np.array((T_p > T_0),dtype = float), axis = 0) p_value = thresh/nr_perm return p_value, T_0 def clusterBasedPermutation(X1, X2, p_val = 0.05, cl_p_val = 0.05, paired = True, tail = 'both', nr_perm = 1000, mask = None, conn = None): ''' Implements Maris, E., & Oostenveld, R. (2007). Nonparametric statistical testing of EEG- and MEG- data. Journal of Neurosience Methods, 164(1), 177?190. http://doi.org/10.1016/J.Jneumeth.2007.03.024 Arguments - - - - - X1 (array): subject X dim1 X dim2 (optional), where dim1 and dim2 can be any type of dimension (time, frequency, electrode, etc). Values in array represent some dependent measure (e.g classification accuracy or power) X2 (array | float): either a datamatrix with same dimensions as X1, or a single value against which X1 will be tested p_val (float): p_value used for inclusion into the cluster cl_p_val (float): p_value for evaluation overall cluster significance paired (bool): paired t testing (True) or independent t testing (False) tail (str): apply one- or two- tailed t testing nr_perm (int): number of permutations mask (array): dim1 X dim2 array. Can be used to restrict cluster based test to a specific region. conn (array): outlines which dim1 points are connected to other dim1 points. Usefull when doing a cluster based permutation test across electrodes Returns - - - - cl_p_vals (array): dim1 X dim2 with p-values < cl_p_val for significant clusters and 1's for all other clusters ''' # if no mask is provided include all datapoints in analysis if mask == None: mask = np.array(np.ones(X1.shape[1:]),dtype = bool) print('\nUsing all {} datapoints in cluster based permutation'.format(mask.size), end = '\r') elif mask.shape != X1[0].shape: print('\nMask does not have the same shape as X1. Adjust mask!') else: print('\nThere are {} out of {} datapoints in your mask during cluster based permutation'.format(int(mask.sum()), mask.size)) # check whether X2 is a chance variable or a data array if isinstance(X2, (float, int)): X2 = np.tile(X2, X1.shape) # compute observed cluster statistics pos_sizes, neg_sizes, pos_labels, neg_labels, sig_cl = computeClusterSizes(X1, X2, p_val, paired, tail, mask, conn) cl_p_vals = np.ones(sig_cl.shape) # iterate to determine how often permuted clusters exceed the observed cluster threshold c_pos_cl = np.zeros(np.max(np.unique(pos_labels))) c_neg_cl = np.zeros(np.max(np.unique(neg_labels))) # initiate random arrays X1_rand = np.zeros(X1.shape) X2_rand = np.zeros(X1.shape) for p in range(nr_perm): #print("\r{0}% of permutations".format((float(p)/nr_perm)*100),) # create random partitions if paired: # keep observations paired under permutation rand_idx = np.random.rand(X1.shape[0])<0.5 X1_rand[rand_idx,:] = X1[rand_idx,:] X1_rand[~rand_idx,:] = X2[~rand_idx,:] X2_rand[rand_idx,:] = X2[rand_idx,:] X2_rand[~rand_idx,:] = X1[~rand_idx,:] else: # fully randomize observations under permutation all_X = np.vstack((X1,X2)) all_X = all_X[np.random.permutation(all_X.shape[0]),:] X1_rand = all_X[:X1.shape[0],:] X2_rand = all_X[X1.shape[0]:,:] # compute cluster statistics under random permutation rand_pos_sizes, rand_neg_sizes, _, _, _ = computeClusterSizes(X1_rand, X2_rand, p_val, paired, tail, mask, conn) max_rand = np.max(np.hstack((rand_pos_sizes, rand_neg_sizes))) # count cluster p values c_pos_cl += max_rand > pos_sizes c_neg_cl += max_rand > neg_sizes # compute cluster p values p_pos = c_pos_cl / nr_perm p_neg = c_neg_cl / nr_perm # remove clusters that do not pass threshold if tail == 'both': for i, cl in enumerate(np.unique(pos_labels)[1:]): # 0 is not a cluster if p_pos[i] < cl_p_val/2: cl_p_vals[pos_labels == cl] = p_pos[i] else: pos_labels[pos_labels == cl] = 0 for i, cl in enumerate(np.unique(neg_labels)[1:]): # 0 is not a cluster if p_neg[i] < cl_p_val/2: cl_p_vals[neg_labels == cl] = p_neg[i] else: neg_labels[neg_labels == cl] = 0 elif tail == 'right': for i, cl in enumerate(np.unique(pos_labels)[1:]): # 0 is not a cluster if p_pos[i] < cl_p_val: cl_p_vals[pos_labels == cl] = p_pos[i] else: pos_labels[pos_labels == cl] = 0 elif tail == 'left': for i, cl in enumerate(np.unique(neg_labels)[1:]): # 0 is not a cluster if p_neg[i] < cl_p_val: cl_p_vals[neg_labels == cl] = p_neg[i] else: neg_labels[neg_labels == cl] = 0 # ADD FUNCTION TO GET return cl_p_vals def computeClusterSizes(X1, X2, p_val, paired, tail, mask, conn): ''' Helper function for clusterBasedPermutation (see documentation) NOTE!!! Add the moment only supports two tailed tests Add the moment does not support connectivity ''' # STEP 1: determine 'actual' p value # apply the mask to restrict the data X1_mask = X1[:,mask] X2_mask = X2[:,mask] p_vals = np.ones(mask.shape) t_vals = np.zeros(mask.shape) if paired: t_vals[mask], p_vals[mask] = ttest_rel(X1_mask, X2_mask) else: t_vals[mask], p_vals[mask] = ttest_ind(X1_mask, X2_mask) # initialize clusters and use mask to restrict relevant info sign_cl = np.mean(X1,0) - np.mean(X2,0) sign_cl[~mask] = 0 p_vals[~mask] = 1 # STEP 2: apply threshold and determine positive and negative clusters cl_mask = p_vals < p_val pos_cl = np.zeros(cl_mask.shape) neg_cl = np.zeros(cl_mask.shape) pos_cl[sign_cl > 0] = cl_mask[sign_cl > 0] neg_cl[sign_cl < 0] = cl_mask[sign_cl < 0] # STEP 3: label clusters if conn == None: nr_p, pos_labels = cv2.connectedComponents(np.uint8(pos_cl)) nr_n, neg_labels = cv2.connectedComponents(np.uint8(neg_cl)) pos_labels = np.squeeze(pos_labels) # hack to control for onedimensional data (CHECK whether correct) neg_labels = np.squeeze(neg_labels) else: print('Function does not yet support connectivity') # STEP 4: compute the sum of t stats in each cluster (pos and neg) pos_sizes, neg_sizes = np.zeros(nr_p - 1), np.zeros(nr_n - 1) for i, label in enumerate(np.unique(pos_labels)[1:]): pos_sizes[i] = np.sum(t_vals[pos_labels == label]) for i, label in enumerate(np.unique(neg_labels)[1:]): neg_sizes[i] = abs(np.sum(t_vals[neg_labels == label])) if sum(pos_sizes) == 0: pos_sizes = 0 if sum(neg_sizes) == 0: neg_sizes = 0 return pos_sizes, neg_sizes, pos_labels, neg_labels, p_vals def clusterMask(X1, X2, p_val, paired = True): ''' add docstring ''' # indicate significant clusters of individual timecourses sig_cl = clusterBasedPermutation(X1, X2, p_val = p_val, paired = paired) cluster_mask = ~np.array(sig_cl, dtype = bool) return cluster_mask def permTTest(X_real, X_perm, p_thresh = 0.05): ''' permTTest calculates p-values for the one-sample t-stat for each sample point across frequencies using a surrogate distribution generated with permuted data. The p-value is calculated by comparing the t distribution of the real and the permuted slope data across sample points. The t-stats for both distribution is calculated with t = (m - 0)/SEm , where m is the sample mean slope and SEm is the standard error of the mean slope (i.e. stddev/sqrt(n)). The p value is then derived by dividing the number of instances where the surrogate T value across permutations is larger then the real T value by the number of permutations. Arguments - - - - - X_real(array): subject X dim1 X dim2 (optional), where dim1 and dim2 can be any type of dimension (time, frequency, electrode, etc). Values in array represent some dependent measure (e.g classification accuracy or power) X_perm(array): subject X nr_permutation X dim1 X dim2 (optional) p_thresh (float): threshold for significance. All p values below this value are considered to be significant Returns - - - - p_val (array): array with p_values across frequencies and sample points sig (array): array with significance indices (i.e. 0 or 1) across frequencies and sample points ''' # FUNCTION DOES NOT YET SUPPORT ONE DIMENSIONAL DATA # preallocate arrays nr_perm = X_perm.shape [1] nr_obs = X_real.shape[0] p_val = np.zeros(X_real.shape[1:]) sig = np.zeros(X_real.shape[1:]) # will be filled with 0s (non-significant) and 1s (significant) # calculate the real and the surrogate one-sample t-stats r_M = np.mean(X_real, axis = 0); p_M = np.mean(X_perm, axis = 0) r_SE = np.std(X_real, axis = 0)/sqrt(nr_obs); p_SE = np.std(X_perm, axis = 0)/sqrt(nr_obs) r_T = r_M/r_SE; p_T = p_M/p_SE # calculate p-values for f in range(X_real.shape[1]): for s in range(X_real.shape[2]): surr_T = p_T[f,s,:] p_val[f,s] = len(surr_T[surr_T>r_T[f,s]])/float(nr_perm) if p_val[f,s] < p_thresh: sig[f,s] = 1 return p_val, sig def FDR(p_vals, q = 0.05, method = 'pdep', adjust_p = False, report = True): ''' Functions controls the false discovery rate of a family of hypothesis tests. FDR is the expected proportion of rejected hypotheses that are mistakingly rejected (i.e., the null hypothesis is actually true for those tests). FDR is less conservative/more powerfull method for correcting for multiple comparisons than procedures like Bonferroni correction that provide strong control of the familiy-wise error rate (i.e. the probability that one or more null hypotheses are mistakingly rejected) Arguments - - - - - p_vals (array): an array (one or multi-demensional) containing the p_values of each individual test in a family f tests q (float): the desired false discovery rate method (str): If 'pdep' the original Bejnamini & Hochberg (1995) FDR procedure is used, which is guaranteed to be accurate if the individual tests are independent or positively dependent (e.g., Gaussian variables that are positively correlated or independent). If 'dep,' the FDR procedure described in Benjamini & Yekutieli (2001) that is guaranteed to be accurate for any test dependency structure (e.g.,Gaussian variables with any covariance matrix) is used. 'dep' is always appropriate to use but is less powerful than 'pdep.' adjust_p (bool): If True, adjusted p-values are computed (can be computationally intensive) report (bool): If True, a brief summary of FDR results is printed Returns - - - - h (array): a boolean matrix of the same size as the input p_vals, specifying whether the test that produced the corresponding p-value is significant crit_p (float): All uncorrected p-values less than or equal to crit_p are significant. If no p-values are significant, crit_p = 0 adj_ci_cvrg (float): he FCR-adjusted BH- or BY-selected confidence interval coverage. adj_p (array): All adjusted p-values less than or equal to q are significant. Note, adjusted p-values can be greater than 1 ''' orig = p_vals.shape # check whether p_vals contains valid input (i.e. between 0 and 1) if np.sum(p_vals > 1) or np.sum(p_vals < 0): print ('Input contains invalid p values') # sort p_values if p_vals.ndim > 1: p_vect = np.squeeze(np.reshape(p_vals,(1,-1))) else: p_vect = p_vals sort = np.argsort(p_vect) # for sorting rev_sort = np.argsort(sort) # to reverse sorting p_sorted = p_vect[sort] nr_tests = p_sorted.size tests = np.arange(1.0,nr_tests + 1) if method == 'pdep': # BH procedure for independence or positive independence if report: print('FDR/FCR procedure used is guaranteed valid for independent or positively dependent tests') thresh = tests * (q/nr_tests) wtd_p = nr_tests * p_sorted / tests elif method == 'dep': # BH procedure for any dependency structure if report: print('FDR/FCR procedure used is guaranteed valid for independent or dependent tests') denom = nr_tests * sum(1/tests) thresh = tests * (q/denom) wtd_p = denom * p_sorted / tests # Note this method can produce adjusted p values > 1 (Compute adjusted p values) # Chec whether p values need to be adjusted if adjust_p: adj_p = np.empty(nr_tests) * np.nan wtd_p_sortidx = np.argsort(wtd_p) wtd_p_sorted = wtd_p[wtd_p_sortidx] next_fill = 0 for i in range(nr_tests): if wtd_p_sortidx[i] >= next_fill: adj_p[next_fill:wtd_p_sortidx[i]+1] = wtd_p_sorted[i] next_fill = wtd_p_sortidx[i] + 1 if next_fill > nr_tests: break adj_p = np.reshape(adj_p[rev_sort], (orig)) else: adj_p = np.nan rej = np.where(p_sorted <= thresh)[0] if rej.size == 0: crit_p = 0 h = np.array(p_vals * 0, dtype = bool) adj_ci_cvrg = np.nan else: max_idx = rej[-1] # find greatest significant pvalue crit_p = p_sorted[max_idx] h = p_vals <= crit_p adj_ci_cvrg = 1 - thresh[max_idx] if report: nr_sig = np.sum(p_sorted <= crit_p) if nr_sig == 1: print('Out of {} tests, {} is significant using a false discovery rate of {}\n'.format(nr_tests,nr_sig,q)) else: print('Out of {} tests, {} are significant using a false discovery rate of {}\n'.format(nr_tests,nr_sig,q)) return h, crit_p, adj_ci_cvrg, adj_p def threshArray(X, chance, method = 'ttest', paired = True, p_value = 0.05): ''' Two step thresholding of a two dimensional data array. Step 1: use group level testing for each individual data point Step 2: apply clusterbased permutation on the thresholded data from step 1 Arguments - - - - - X (array): subject X dim1 X dim2, where dim1 and dim2 can be any type of dimension (time, frequency, electrode, etc). Values in array represent some dependent measure (e.g classification accuracy or power) chance (int | float): chance value. All non-significant values will be reset to this value method (str): statistical test used in first step of thresholding paired (bool): specifies whether ttest is a paired sampled test or not p_value (float) | p_value used for thresholding Returns - - - - X (array): thresholded data ''' X_ = np.copy(X) # make sure original data remains unchanged p_vals = signedRankArray(X_, chance, method) X_[:,p_vals > p_value] = chance p_vals = clusterBasedPermutation(X_,chance, paired = paired) X_ = X_.mean(axis = 0) X_[p_vals > p_value] = chance return X_ def signedRankArray(X, Y, method = 'ttest_rel'): ''' Arguments - - - - - X1 (array): subject X dim1 X dim2, where dim1 and dim2 can be any type of dimension (time, frequency, electrode, etc). Values in array represent some dependent measure (e.g classification accuracy or power) Y (array | float): either a datamatrix with same dimensions as X1, or a single value against which X1 will be tested method (str): type of test to calculate p values ''' # check whether X2 is a chance variable or a data array if isinstance(Y, (float, int)): Y = np.tile(Y, X.shape) p_vals = np.ones(X[0].shape) for i in range(p_vals.shape[0]): for j in range(p_vals.shape[1]): if method == 'wilcoxon': _, p_vals[i,j] = wilcoxon(X[:,i,j], Y[:,i,j]) elif method == 'ttest_rel': _, p_vals[i,j] = ttest_rel(X[:,i,j], Y[:,i,j]) elif method == 'ttest_1samp': _, p_vals[i,j] = ttest_1samp(X[:,i,j], Y[0,i,j]) return p_vals def bootstrap(X, b_iter = 1000): ''' bootstrap uses a bootstrap procedure to calculate standard error of data in X. Arguments - - - - - test Returns - - - - ''' nr_obs = X.shape[0] bootstrapped = np.zeros((b_iter,X.shape[1])) for b in range(b_iter): idx = np.random.choice(nr_obs,nr_obs,replace = True) # sample nr subjects observations from the slopes sample (with replacement) bootstrapped[b,:] = np.mean(X[idx,:],axis = 0) error = np.std(bootstrapped, axis = 0) mean = X.mean(axis = 0) return error, mean def jacklatency(x1, x2, thresh_1, thresh_2, times, info = False): ''' Helper function of jackknife. Calculates the latency difference between threshold crosses using linear interpolation Arguments - - - - - x1 (array): subject X time. Values in array represent some dependent measure. (e.g. ERP voltages) x2 (array): array with same dimensions as X1 thresh_1 (float): criterion value thresh_2 (float): criterion value times (array): timing of samples in X1 and X2 times (str): calculate onset or offset latency differences Returns - - - - D (float): latency difference ''' # get latency exceeding thresh idx_1 = np.where(x1 >= thresh_1)[0][0] lat_1 = times[idx_1 - 1] + (times[idx_1] - times[idx_1 - 1]) * \ (thresh_1 - x1[idx_1 - 1])/(x1[idx_1] - x1[idx_1-1]) idx_2 = np.where(x2 >= thresh_2)[0][0] lat_2 = times[idx_2 - 1] + (times[idx_2] - times[idx_2 - 1]) * \ (thresh_2 - x2[idx_2 - 1])/(x2[idx_2] - x2[idx_2-1]) D = lat_2 - lat_1 if info: print('Estimated onset latency X1 = {0:.2f} and X2: {1:.2f}'.format(lat_1, lat_2)) return D def jackknife(X1, X2, times, peak_window, percent_amp = 50, timing = 'onset'): ''' Implements Miller, J., Patterson, T., & Ulrich, R. (1998). Jackknife-based method for measuring LRP onset latency differences. Psychophysiology, 35(1), 99-115. Compares onset latencies between two grand-average waveforms. For each waveform a criterion is determined based on a set percentage of the grand average peak. The latency at which this criterion is first reached is then determined using linear interpolation. Next the jackknife estimate of the standard error of the difference is used, which is then used to calculate the t value corresponding to the null hypothesis of no differences in onset latencies Arguments - - - - - X1 (array): subject X time. Values in array represent some dependent measure. (e.g. ERP voltages) X2 (array): array with same dimensions as X1 times (array): timing of samples in X1 and X2 peak_window (tuple | list): time window that contains peak of interest percent_amp (int): used to calculate criterion value timing (str): calculate onset or offset latency differnces Returns - - - - onset (float): onset differnce between grand waveform of X1 and X2 t_value (float): corresponding to the null hypothesis of no differences in onset latencies ''' # set number of observations nr_sj = X1.shape[0] # flip arrays if necessary if timing == 'offset': X1 = np.fliplr(X1) X2 = np.fliplr(X2) times = np.flipud(times) # get time window of interest s,e = np.sort([np.argmin(abs(times - t)) for t in peak_window]) t = times[s:e] # slice data containing the peak average x1 = np.mean(X1[:,s:e], axis = 0) x2 = np.mean(X2[:,s:e], axis = 0) # get the criterion based on peak amplitude percentage c_1 = max(x1) * percent_amp/ 100.0 c_2 = max(x2) * percent_amp/ 100.0 onset = jacklatency(x1, x2, c_1, c_2, t, info = True) # repeat previous steps but exclude all data points once D = [] idx = np.arange(nr_sj) for i in range(nr_sj): x1 = np.mean(abs(X1[np.where(idx != i)[0],s:e]), axis = 0) x2 = np.mean(abs(X2[:,s:e]), axis = 0) c_1 = max(x1) * percent_amp/ 100.0 c_2 = max(x2) * percent_amp/ 100.0 D.append(jacklatency(x1, x2, c_1, c_2, t) ) # compute the jackknife estimate of the standard error of the differnce Sd = np.sqrt((nr_sj - 1.0)/ nr_sj * np.sum([(d - np.mean(D))**2 for d in np.array(D)])) t_value = onset/ Sd return onset, t_value
33.925926
139
0.699434
795743a772d220ca3f67a2a77409b2e357bf526b
2,586
py
Python
azure-mgmt-compute/azure/mgmt/compute/models/virtual_machine_scale_set_os_disk.py
Berryliao84/Python-Azure
a96ed6e8bbf4290372980a2919b31110da90b164
[ "MIT" ]
1
2017-10-29T15:14:35.000Z
2017-10-29T15:14:35.000Z
azure-mgmt-compute/azure/mgmt/compute/models/virtual_machine_scale_set_os_disk.py
Berryliao84/Python-Azure
a96ed6e8bbf4290372980a2919b31110da90b164
[ "MIT" ]
null
null
null
azure-mgmt-compute/azure/mgmt/compute/models/virtual_machine_scale_set_os_disk.py
Berryliao84/Python-Azure
a96ed6e8bbf4290372980a2919b31110da90b164
[ "MIT" ]
null
null
null
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class VirtualMachineScaleSetOSDisk(Model): """Describes a virtual machine scale set operating system disk. :param name: The disk name. :type name: str :param caching: The caching type. Possible values include: 'None', 'ReadOnly', 'ReadWrite' :type caching: str or :class:`CachingTypes <azure.mgmt.compute.models.CachingTypes>` :param create_option: The create option. Possible values include: 'fromImage', 'empty', 'attach' :type create_option: str or :class:`DiskCreateOptionTypes <azure.mgmt.compute.models.DiskCreateOptionTypes>` :param os_type: The Operating System type. Possible values include: 'Windows', 'Linux' :type os_type: str or :class:`OperatingSystemTypes <azure.mgmt.compute.models.OperatingSystemTypes>` :param image: The Source User Image VirtualHardDisk. This VirtualHardDisk will be copied before using it to attach to the Virtual Machine. If SourceImage is provided, the destination VirtualHardDisk should not exist. :type image: :class:`VirtualHardDisk <azure.mgmt.compute.models.VirtualHardDisk>` :param vhd_containers: The list of virtual hard disk container uris. :type vhd_containers: list of str """ _validation = { 'name': {'required': True}, 'create_option': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'caching': {'key': 'caching', 'type': 'CachingTypes'}, 'create_option': {'key': 'createOption', 'type': 'DiskCreateOptionTypes'}, 'os_type': {'key': 'osType', 'type': 'OperatingSystemTypes'}, 'image': {'key': 'image', 'type': 'VirtualHardDisk'}, 'vhd_containers': {'key': 'vhdContainers', 'type': '[str]'}, } def __init__(self, name, create_option, caching=None, os_type=None, image=None, vhd_containers=None): self.name = name self.caching = caching self.create_option = create_option self.os_type = os_type self.image = image self.vhd_containers = vhd_containers
41.709677
105
0.643078
79574421f4a31ff8a90002ef3342c1199a9d7541
94
py
Python
pyccel/parser/syntax/__init__.py
toddrme2178/pyccel
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
[ "MIT" ]
null
null
null
pyccel/parser/syntax/__init__.py
toddrme2178/pyccel
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
[ "MIT" ]
null
null
null
pyccel/parser/syntax/__init__.py
toddrme2178/pyccel
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
[ "MIT" ]
null
null
null
# -*- coding: UTF-8 -*- from .basic import * from .openmp import * from .openacc import *
15.666667
23
0.617021
7957442f56be2917126eb2dda27bc6904fa5101d
2,263
py
Python
test/acceptance/features/steps/olm.py
growi/service-binding-operator
f10f7f8838049b0c4e9fe04aa6dbce151296b908
[ "Apache-2.0" ]
null
null
null
test/acceptance/features/steps/olm.py
growi/service-binding-operator
f10f7f8838049b0c4e9fe04aa6dbce151296b908
[ "Apache-2.0" ]
null
null
null
test/acceptance/features/steps/olm.py
growi/service-binding-operator
f10f7f8838049b0c4e9fe04aa6dbce151296b908
[ "Apache-2.0" ]
null
null
null
import re from command import Command from openshift import Openshift class Operator(object): openshift = Openshift() cmd = Command() pod_name_pattern = "{name}.*" name = "" operator_catalog_source_name = "" operator_catalog_image = "" operator_catalog_channel = "" operator_subscription_csv_version = None package_name = "" def is_running(self, wait=False): if wait: pod_name = self.openshift.wait_for_pod(self.pod_name_pattern.format(name=self.name), self.openshift.operators_namespace) else: pod_name = self.openshift.search_pod_in_namespace(self.pod_name_pattern.format(name=self.name), self.openshift.operators_namespace) if pod_name is not None: operator_pod_status = self.openshift.check_pod_status(pod_name, self.openshift.operators_namespace) print("The pod {} is running: {}".format(self.name, operator_pod_status)) return operator_pod_status else: return False def install_catalog_source(self): if self.operator_catalog_image != "": install_src_output = self.openshift.create_catalog_source(self.operator_catalog_source_name, self.operator_catalog_image) if re.search(r'.*catalogsource.operators.coreos.com/%s\s(unchanged|created)' % self.operator_catalog_source_name, install_src_output) is None: print("Failed to create {} catalog source".format(self.operator_catalog_source_name)) return False return self.openshift.wait_for_package_manifest(self.package_name, self.operator_catalog_source_name, self.operator_catalog_channel) def install_operator_subscription(self, csv_version=None): install_sub_output = self.openshift.create_operator_subscription( self.package_name, self.operator_catalog_source_name, self.operator_catalog_channel, self.operator_subscription_csv_version if csv_version is None else csv_version) if re.search(r'.*subscription.operators.coreos.com/%s\s(unchanged|created)' % self.package_name, install_sub_output) is None: print("Failed to create {} operator subscription".format(self.package_name)) return False return True
47.145833
154
0.719841
79574461a39c134b1ebb8121bd822dbb0f1b7b7f
86,018
py
Python
0900-hp/hplip-3.21.12/ui5/devmgr5.py
rgfaber/dev-toolkit
b7e6b1e35a4bfb8ca9ba75e5556917cc49b88f7f
[ "Apache-2.0" ]
null
null
null
0900-hp/hplip-3.21.12/ui5/devmgr5.py
rgfaber/dev-toolkit
b7e6b1e35a4bfb8ca9ba75e5556917cc49b88f7f
[ "Apache-2.0" ]
null
null
null
0900-hp/hplip-3.21.12/ui5/devmgr5.py
rgfaber/dev-toolkit
b7e6b1e35a4bfb8ca9ba75e5556917cc49b88f7f
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # # (c) Copyright 2001-2015 HP Development Company, L.P. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Authors: Don Welch, Naga Samrat Chowdary Narla # #from __future__ import generators # Std Lib import sys import time import os import gzip import select import struct import signal from base.sixext.moves import configparser # Local from base.g import * from base import device, utils, pml, maint, models, pkit, os_utils from prnt import cups from base.sixext import PY3 from base.codes import * from .ui_utils import * from installer.core_install import * # Qt from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5.QtWidgets import * import collections try: collectionsAbc = collections.abc except AttributeError: collectionsAbc = collections hpmudext = utils.import_ext('hpmudext') # dbus try: import dbus from dbus.mainloop.glib import DBusGMainLoop from dbus import lowlevel except ImportError: log.error("Unable to load DBus libraries. Please check your installation and try again.") if PY3: # Workaround due to incomplete Python3 support in Linux distros. log.error("Please upgrade your python installation to the latest available version.") sys.exit(1) import warnings # Ignore: .../dbus/connection.py:242: DeprecationWarning: object.__init__() takes no parameters # (occurring on Python 2.6/dBus 0.83/Ubuntu 9.04) warnings.simplefilter("ignore", DeprecationWarning) # Main form from .devmgr5_base import Ui_MainWindow from .devmgr_ext import Ui_MainWindow_Derived # Aux. dialogs from .faxsetupdialog import FaxSetupDialog from .plugindialog import PluginDialog from .firmwaredialog import FirmwareDialog from .aligndialog import AlignDialog from .printdialog import PrintDialog from .makecopiesdialog import MakeCopiesDialog from .sendfaxdialog import SendFaxDialog from .fabwindow import FABWindow from .devicesetupdialog import DeviceSetupDialog from .printtestpagedialog import PrintTestPageDialog from .infodialog import InfoDialog from .cleandialog import CleanDialog from .colorcaldialog import ColorCalDialog from .linefeedcaldialog import LineFeedCalDialog from .pqdiagdialog import PQDiagDialog from .nodevicesdialog import NoDevicesDialog from .aboutdialog import AboutDialog # Other forms and controls from .settingsdialog import SettingsDialog from .printsettingstoolbox import PrintSettingsToolbox from base import os_utils # all in seconds MIN_AUTO_REFRESH_RATE = 5 MAX_AUTO_REFRESH_RATE = 60 DEF_AUTO_REFRESH_RATE = 30 device_list = {} # { Device_URI : device.Device(), ... } model_obj = models.ModelData() # Used to convert dbus xformed data back to plain Python types # *********************************************************************************** # # ITEM/UTILITY UI CLASSES # # *********************************************************************************** class FuncViewItem(QListWidgetItem): def __init__(self, parent, text, pixmap, tooltip_text, cmd): QListWidgetItem.__init__(self, QIcon(pixmap), text, parent) self.tooltip_text = tooltip_text self.cmd = cmd # XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX class DeviceViewItem(QListWidgetItem): def __init__(self, parent, text, pixmap, device_uri, is_avail=True): QListWidgetItem.__init__(self, QIcon(pixmap), text, parent) self.device_uri = device_uri self.is_avail = is_avail self.setTextAlignment(Qt.AlignHCenter) # XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX class PluginInstall(QObject): def __init__(self, parent, plugin_type, plugin_installed): self.parent = parent self.plugin_type = plugin_type self.plugin_installed = plugin_installed def exec_(self): install_plugin = True if self.plugin_installed: install_plugin = QMessageBox.warning(self.parent, self.parent.windowTitle(), self.__tr("<b>The HPLIP plugin is already installed.</b><p>Do you want to continue and re-install it?"), QMessageBox.Yes, QMessageBox.No, QMessageBox.NoButton) == QMessageBox.Yes if install_plugin: ok, sudo_ok = pkit.run_plugin_command(self.plugin_type == PLUGIN_REQUIRED, self.parent.cur_device.mq['plugin-reason']) if not sudo_ok: QMessageBox.critical(self.parent, self.parent.windowTitle(), self.__tr("<b>Unable to find an appropriate su/sudo utility to run hp-plugin.</b><p>Install kdesu, gnomesu, or gksu.</p>"), QMessageBox.Ok, QMessageBox.NoButton, QMessageBox.NoButton) def __tr(self,s,c = None): return qApp.translate("DevMgr5",s,c) # *********************************************************************************** # # MAINWINDOW # # *********************************************************************************** ''' class Ui_MainWindow_Derived(Ui_MainWindow): def setupUi(self, MainWindow, latest_available_version, Is_autoInstaller_distro): super().setupUi(MainWindow) self.DiagnoseQueueAction = QAction(MainWindow) self.DiagnoseQueueAction.setObjectName("DiagnoseQueueAction") self.DiagnoseHPLIPAction = QAction(MainWindow) self.DiagnoseHPLIPAction.setObjectName("DiagnoseHPLIPAction") self.latest_available_version = latest_available_version self.Is_autoInstaller_distro = Is_autoInstaller_distro if self.latest_available_version is not "": self.tab_3 = QWidget() self.tab_3.setObjectName("tab_3") self.label = QLabel(self.tab_3) self.label.setGeometry(QRect(30, 45, 300, 17)) self.label.setObjectName("label") if self.Is_autoInstaller_distro: self.InstallLatestButton = QPushButton(self.tab_3) self.InstallLatestButton.setGeometry(QRect(351, 40, 96, 27)) self.InstallLatestButton.setObjectName("pushButton") else: self.ManualInstalllabel = QLabel(self.tab_3) self.ManualInstalllabel.setGeometry(QRect(30, 70,300, 45)) self.ManualInstalllabel.setObjectName("label") self.InstallLatestButton = QPushButton(self.tab_3) self.InstallLatestButton.setGeometry(QRect(295, 80, 110, 25)) self.InstallLatestButton.setObjectName("pushButton") self.Tabs.addTab(self.tab_3, "") # super().setupUi(MainWindow) def retranslateUi(self, MainWindow): super().retranslateUi(MainWindow) if self.latest_available_version is not "": self.label.setText(QtGui.QApplication.translate("MainWindow", "New version of HPLIP-%s is available"%self.latest_available_version, None)) self.Tabs.setTabText(self.Tabs.indexOf(self.tab_3), QtGui.QApplication.translate("MainWindow", "Upgrade", None)) if self.Is_autoInstaller_distro: self.InstallLatestButton.setText(QtGui.QApplication.translate("MainWindow", "Install now", None)) else: msg="Please install manually as mentioned in " self.ManualInstalllabel.setText(QtGui.QApplication.translate("MainWindow", msg, None)) self.InstallLatestButton.setText(QtGui.QApplication.translate("MainWindow", "HPLIP website", None)) ''' class DevMgr5(Ui_MainWindow_Derived, Ui_MainWindow, QMainWindow): def __init__(self, toolbox_version, initial_device_uri=None, dbus_loop=None, parent=None, name=None, fl=0): # QMainWindow.__init__(self, parent) super(DevMgr5, self).__init__(parent) log.debug("Initializing toolbox UI (Qt5)...") log.debug("HPLIP Version: %s" % prop.installed_version) self.toolbox_version = toolbox_version self.initial_device_uri = initial_device_uri self.device_vars = {} self.num_devices = 0 self.cur_device = None self.cur_printer = None self.updating = False self.init_failed = False self.service = None self.Is_autoInstaller_distro = False # True-->tier1(supports auto installation). False--> tier2(manual installation) # Distro insformation core = CoreInstall(MODE_CHECK) # core.init() self.Is_autoInstaller_distro = core.is_auto_installer_support() # User settings self.user_settings = UserSettings() self.user_settings.load() self.user_settings.debug() self.cur_device_uri = self.user_settings.last_used_device_uri installed_version=sys_conf.get('hplip','version') if not utils.Is_HPLIP_older_version( installed_version, self.user_settings.latest_available_version): self.setupUi(self,"",self.Is_autoInstaller_distro) else: self.setupUi(self, self.user_settings.latest_available_version,self.Is_autoInstaller_distro) # Other initialization self.initDBus() self.initPixmaps() self.initMisc() self.initUI() cups.setPasswordCallback(showPasswordUI) if not prop.doc_build: self.ContentsAction.setEnabled(False) self.allow_auto_refresh = True QTimer.singleShot(0, self.initalUpdate) # *********************************************************************************** # # INIT # # *********************************************************************************** # TODO: Make sbus init mandatory success, else exit def initDBus(self): self.dbus_loop = DBusGMainLoop(set_as_default=True) self.dbus_avail, self.service, self.session_bus = device.init_dbus(self.dbus_loop) if not self.dbus_avail: log.error("dBus initialization error. Exiting.") self.init_failed = True return # Receive events from the session bus self.session_bus.add_signal_receiver(self.handleSessionSignal, sender_keyword='sender', destination_keyword='dest', interface_keyword='interface', member_keyword='member', path_keyword='path') def initPixmaps(self): self.func_icons_cached = False self.func_icons = {} self.device_icons = {} # Application icon self.setWindowIcon(QIcon(load_pixmap('hp_logo', '128x128'))) self.fax_icon = load_pixmap("fax2", "other") def initUI(self): # Setup device icon list self.DeviceList.setSortingEnabled(True) self.DeviceList.setContextMenuPolicy(Qt.CustomContextMenu) self.setDeviceListViewMode(QListView.IconMode) self.ViewAsIconsAction.triggered.connect(lambda: self.setDeviceListViewMode(QListView.IconMode)) self.ViewAsListAction.triggered.connect(lambda: self.setDeviceListViewMode(QListView.ListMode)) self.DeviceList.customContextMenuRequested["const QPoint &"].connect(self.DeviceList_customContextMenuRequested) # Setup main menu self.DeviceRefreshAction.setIcon(QIcon(load_pixmap("refresh1", "16x16"))) self.DeviceRefreshAction.triggered.connect(self.DeviceRefreshAction_activated) self.RefreshAllAction.setIcon(QIcon(load_pixmap("refresh", "16x16"))) self.RefreshAllAction.triggered.connect(self.RefreshAllAction_activated) self.SetupDeviceAction.setIcon(QIcon(load_pixmap('list_add', '16x16'))) self.SetupDeviceAction.triggered.connect(self.SetupDeviceAction_activated) self.RemoveDeviceAction.setIcon(QIcon(load_pixmap('list_remove', '16x16'))) self.RemoveDeviceAction.triggered.connect(self.RemoveDeviceAction_activated) self.PreferencesAction.setIcon(QIcon(load_pixmap('settings', '16x16'))) self.PreferencesAction.triggered.connect(self.PreferencesAction_activated) self.DiagnoseQueueAction.setIcon(QIcon(load_pixmap('warning', '16x16'))) self.DiagnoseQueueAction.triggered.connect(self.DiagnoseQueueAction_activated) self.DiagnoseHPLIPAction.setIcon(QIcon(load_pixmap('troubleshoot', '16x16'))) self.DiagnoseHPLIPAction.triggered.connect(self.DiagnoseHPLIP_activated) self.ContentsAction.setIcon(QIcon(load_pixmap("help", "16x16"))) self.ContentsAction.triggered.connect(self.helpContents) self.QuitAction.setIcon(QIcon(load_pixmap("quit", "16x16"))) self.QuitAction.triggered.connect(self.quit) self.AboutAction.triggered.connect(self.helpAbout) self.PrintControlPrinterNameCombo.activated["const QString &"].connect(self.PrintControlPrinterNameCombo_activated) self.PrintSettingsPrinterNameCombo.activated["const QString &"].connect(self.PrintSettingsPrinterNameCombo_activated) signal.signal(signal.SIGINT, signal.SIG_IGN) # Init tabs/controls self.initActionsTab() self.initStatusTab() self.initSuppliesTab() self.initPrintSettingsTab() self.initPrintControlTab() self.Tabs.currentChanged[int].connect(self.Tabs_currentChanged) # Resize the splitter so that the device list starts as a single column self.splitter.setSizes([80, 600]) # Setup the Device List self.DeviceList.setIconSize(QSize(60, 60)) self.DeviceList.currentItemChanged["QListWidgetItem *", "QListWidgetItem *"].connect(self.DeviceList_currentChanged) def initMisc(self): self.TabIndex = { 0: self.updateActionsTab, 1: self.updateStatusTab, 2: self.updateSuppliesTab, 3: self.updatePrintSettingsTab, 4: self.updatePrintControlTab, 5:self.updateHPLIPupgrade, } # docs self.docs = "http://hplip.sf.net" if prop.doc_build: g = os.path.join(sys_conf.get('dirs', 'doc'), 'index.html') if os.path.exists(g): self.docs = "file://%s" % g # support self.support = "https://launchpad.net/hplip" def initalUpdate(self): if self.init_failed: self.close() return self.rescanDevices() cont = True if self.initial_device_uri is not None: if not self.activateDevice(self.initial_device_uri): log.error("Device %s not found" % self.initial_device_uri) cont = False if self.cur_printer: self.getPrinterState() if self.printer_state == cups.IPP_PRINTER_STATE_STOPPED: self.cur_device.sendEvent(EVENT_PRINTER_QUEUE_STOPPED, self.cur_printer) if not self.printer_accepting: self.cur_device.sendEvent(EVENT_PRINTER_QUEUE_REJECTING_JOBS, self.cur_printer) def activateDevice(self, device_uri): log.debug(log.bold("Activate: %s %s %s" % ("*"*20, device_uri, "*"*20))) index = 0 d = self.DeviceList.item(index) #firstItem() found = False while d is not None: if d.device_uri == device_uri: found = True self.DeviceList.setSelected(d, True) self.DeviceList.setCurrentItem(d) break index += 1 d = self.DeviceList.item(index) return found # *********************************************************************************** # # UPDATES/NOTIFICATIONS # # *********************************************************************************** def handleSessionSignal(self, *args, **kwds): if kwds['interface'] == 'com.hplip.Toolbox' and \ kwds['member'] == 'Event': log.debug("Handling event...") event = device.Event(*args[:6]) event.debug() if event.event_code < EVENT_MIN_USER_EVENT: pass elif event.event_code == EVENT_DEVICE_UPDATE_REPLY: log.debug("EVENT_DEVICE_UPDATE_REPLY (%s)" % event.device_uri) dev = self.findDeviceByURI(event.device_uri) if dev is not None: try: self.service.GetStatus(event.device_uri, reply_handler=self.handleStatusReply, error_handler=self.handleStatusError) except dbus.exceptions.DBusException as e: log.error("dbus call to GetStatus() failed.") elif event.event_code == EVENT_USER_CONFIGURATION_CHANGED: log.debug("EVENT_USER_CONFIGURATION_CHANGED") self.user_settings.load() elif event.event_code == EVENT_HISTORY_UPDATE: log.debug("EVENT_HISTORY_UPDATE (%s)" % event.device_uri) dev = self.findDeviceByURI(event.device_uri) if dev is not None: self.updateHistory(dev) elif event.event_code == EVENT_SYSTEMTRAY_EXIT: log.debug("EVENT_SYSTEMTRAY_EXIT") log.warn("HPLIP Status Service was closed. HPLIP Device Manager will now exit.") cups.releaseCupsInstance() self.close() elif event.event_code == EVENT_RAISE_DEVICE_MANAGER: log.debug("EVENT_RAISE_DEVICE_MANAGER") self.showNormal() self.setWindowState(self.windowState() & ~Qt.WindowMinimized | Qt.WindowActive) self.raise_() elif event.event_code in (EVENT_DEVICE_START_POLLING, EVENT_DEVICE_STOP_POLLING, EVENT_POLLING_REQUEST): pass else: log.error("Unhandled event: %d" % event.event_code) def handleStatusReply(self, device_uri, data): dev = self.findDeviceByURI(device_uri) if dev is not None: t = {} for key in data: value = model_obj.convert_data(str(key), str(data[key])) t.setdefault(key, value) dev.dq = t.copy() for d in dev.dq: dev.__dict__[d.replace('-','_')] = dev.dq[d] self.updateDevice(dev) def handleStatusError(self, e): log.error(str(e)) def updateHistory(self, dev=None): if dev is None: dev = self.cur_device try: self.service.GetHistory(dev.device_uri, reply_handler=self.handleHistoryReply, error_handler=self.handleHistoryError) except dbus.exceptions.DBusException as e: log.error("dbus call to GetHistory() failed.") def handleHistoryReply(self, device_uri, history): dev = self.findDeviceByURI(device_uri) if dev is not None: result = [] history.reverse() for h in history: result.append(device.Event(*tuple(h))) try: self.error_code = result[0].event_code except IndexError: self.error_code = STATUS_UNKNOWN dev.error_state = STATUS_TO_ERROR_STATE_MAP.get(self.error_code, ERROR_STATE_CLEAR) dev.hist = result self.updateDevice(dev) def handleHistoryError(self, e): log.error(str(e)) def sendMessage(self, device_uri, printer_name, event_code, username=prop.username, job_id=0, title=''): device.Event(device_uri, printer_name, event_code, username, job_id, title).send_via_dbus(self.session_bus) def timedRefresh(self): if not self.updating and self.user_settings.auto_refresh and self.allow_auto_refresh: log.debug("Refresh timer...") self.cleanupChildren() if self.user_settings.auto_refresh_type == 0: self.requestDeviceUpdate() else: self.rescanDevices() # *********************************************************************************** # # TAB/DEVICE CHANGE SLOTS # # *********************************************************************************** def Tabs_currentChanged(self, tab=0): """ Called when the active tab changes. Update newly displayed tab. """ if self.cur_device is not None: self.TabIndex[tab]() def updateAllTabs(self): for tab in self.TabIndex: self.TabIndex[tab]() def updateCurrentTab(self): log.debug("updateCurrentTab()") self.TabIndex[self.Tabs.currentIndex()]() # *********************************************************************************** # # DEVICE ICON LIST/DEVICE UPDATE(S) # # *********************************************************************************** def DeviceRefreshAction_activated(self): self.DeviceRefreshAction.setEnabled(False) self.requestDeviceUpdate() self.DeviceRefreshAction.setEnabled(True) def RefreshAllAction_activated(self): self.rescanDevices() def setDeviceListViewMode(self, mode): if mode == QListView.ListMode: self.DeviceList.setViewMode(QListView.ListMode) self.ViewAsListAction.setEnabled(False) self.ViewAsIconsAction.setEnabled(True) else: self.DeviceList.setViewMode(QListView.IconMode) self.ViewAsListAction.setEnabled(True) self.ViewAsIconsAction.setEnabled(False) def createDeviceIcon(self, dev=None): if dev is None: dev = self.cur_device try: dev.icon except AttributeError: dev.icon = "default_printer" try: self.device_icons[dev.icon] except: self.device_icons[dev.icon] = load_pixmap(dev.icon, 'devices') pix = self.device_icons[dev.icon] w, h = pix.width(), pix.height() error_state = dev.error_state icon = QPixmap(w, h) p = QPainter(icon) p.eraseRect(0, 0, icon.width(), icon.height()) p.drawPixmap(0, 0, pix) try: tech_type = dev.tech_type except AttributeError: tech_type = TECH_TYPE_NONE if dev.device_type == DEVICE_TYPE_FAX: p.drawPixmap(w - self.fax_icon.width(), 0, self.fax_icon) if error_state != ERROR_STATE_CLEAR: if tech_type in (TECH_TYPE_COLOR_INK, TECH_TYPE_MONO_INK): status_icon = getStatusOverlayIcon(error_state)[0] # ink else: status_icon = getStatusOverlayIcon(error_state)[1] # laser if status_icon is not None: p.drawPixmap(0, 0, status_icon) p.end() return icon def refreshDeviceList(self): global devices log.debug("Rescanning device list...") if 1: beginWaitCursor() self.updating = True self.setWindowTitle(self.__tr("Refreshing Device List - HP Device Manager")) self.statusBar().showMessage(self.__tr("Refreshing device list...")) self.cups_devices = device.getSupportedCUPSDevices(['hp', 'hpfax']) current = None try: adds = [] for d in self.cups_devices: if d not in device_list: adds.append(d) log.debug("Adds: %s" % ','.join(adds)) removals = [] for d in device_list: if d not in self.cups_devices: removals.append(d) log.debug("Removals (1): %s" % ','.join(removals)) updates = [] for d in device_list: if d not in adds and d not in removals: updates.append(d) log.debug("Updates: %s" % ','.join(updates)) for d in adds: log.debug("adding: %s" % d) # Note: Do not perform any I/O with this device. dev = device.Device(d, service=self.service, disable_dbus=False) if not dev.supported: log.debug("Unsupported model - removing device.") removals.append(d) continue icon = self.createDeviceIcon(dev) if dev.device_type == DEVICE_TYPE_FAX: DeviceViewItem(self.DeviceList, self.__tr("%s (Fax)"%dev.model_ui), icon, d) else: if dev.fax_type: DeviceViewItem(self.DeviceList, self.__tr("%s (Printer)"%dev.model_ui), icon, d) else: DeviceViewItem(self.DeviceList, dev.model_ui, icon, d) device_list[d] = dev log.debug("Removals (2): %s" % ','.join(removals)) removed_device=None for d in removals: removed_device = d index = self.DeviceList.count()-1 item = self.DeviceList.item(index) log.debug("removing: %s" % d) try: del device_list[d] except KeyError: pass while index >= 0 and item is not None: if item.device_uri == d: self.DeviceList.takeItem(index) break index -= 1 item = self.DeviceList.item(index) qApp.processEvents() self.DeviceList.updateGeometry() qApp.processEvents() if len(device_list): for tab in self.TabIndex: self.Tabs.setTabEnabled(tab, True) if self.cur_device_uri: index = 0 item = first_item = self.DeviceList.item(index) while item is not None: qApp.processEvents() if item.device_uri == self.cur_device_uri: current = item self.statusBar().showMessage(self.cur_device_uri) break index += 1 item = self.DeviceList.item(index) else: self.cur_device = None self.cur_device_uri = '' if self.cur_device is None: i = self.DeviceList.item(0) if i is not None: self.cur_device_uri = i.device_uri self.cur_device = device_list[self.cur_device_uri] current = i self.updatePrinterCombos() if self.cur_device_uri: #user_conf.set('last_used', 'device_uri',self.cur_device_uri) self.user_settings.last_used_device_uri = self.cur_device_uri self.user_settings.save() for d in updates + adds: if d not in removals: self.requestDeviceUpdate(device_list[d]) else: # no devices self.cur_device = None self.DeviceRefreshAction.setEnabled(False) self.RemoveDeviceAction.setEnabled(False) #self.DiagnoseQueueAction.setEnabled(False) self.updating = False self.statusBar().showMessage(self.__tr("Press F6 to refresh.")) for tab in self.TabIndex: self.Tabs.setTabEnabled(tab, False) endWaitCursor() dlg = NoDevicesDialog(self) dlg.exec_() finally: self.updating = False endWaitCursor() if current is not None: self.DeviceList.setCurrentItem(current) self.DeviceRefreshAction.setEnabled(True) if self.cur_device is not None: self.RemoveDeviceAction.setEnabled(True) #self.DiagnoseQueueAction.setEnabled(True) self.statusBar().showMessage(self.cur_device_uri) self.updateWindowTitle() def updateWindowTitle(self): if self.cur_device.device_type == DEVICE_TYPE_FAX: self.setWindowTitle(self.__tr("HP Device Manager - %s (Fax)"%self.cur_device.model_ui)) else: if self.cur_device.fax_type: self.setWindowTitle(self.__tr("HP Device Manager - %s (Printer)"%self.cur_device.model_ui)) else: self.setWindowTitle(self.__tr("HP Device Manager - %s"%self.cur_device.model_ui)) self.statusBar().showMessage(self.cur_device_uri) def updateDeviceByURI(self, device_uri): return self.updateDevice(self.findDeviceByURI(device_uri)) def updateDevice(self, dev=None, update_tab=True): """ Update the device icon and currently displayed tab. """ if dev is None: dev = self.cur_device log.debug("updateDevice(%s)" % dev.device_uri) item = self.findItem(dev) if item is not None: item.setIcon(QIcon(self.createDeviceIcon(dev))) if dev is self.cur_device and update_tab: self.updatePrinterCombos() self.updateCurrentTab() self.statusBar().showMessage(self.cur_device_uri) if self.cur_device.device_type == DEVICE_TYPE_PRINTER: self.Tabs.setTabText(self.Tabs.indexOf(self.Settings), QApplication.translate("MainWindow", "Print Settings", None)) self.Tabs.setTabText(self.Tabs.indexOf(self.Control), QApplication.translate("MainWindow", "Printer Control", None)) else: self.Tabs.setTabText(self.Tabs.indexOf(self.Settings), QApplication.translate("MainWindow", "Fax Settings", None)) self.Tabs.setTabText(self.Tabs.indexOf(self.Control), QApplication.translate("MainWindow", "Fax Control", None)) def DeviceList_currentChanged(self, i, j): if i is not None and not self.updating: self.cur_device_uri = self.DeviceList.currentItem().device_uri self.cur_device = device_list[self.cur_device_uri] #user_conf.set('last_used', 'device_uri', self.cur_device_uri) self.user_settings.last_used_device_uri = self.cur_device_uri self.user_settings.save() self.updateDevice() self.updateWindowTitle() def findItem(self, dev): if dev is None: dev = self.cur_device return self.findItemByURI(dev.device_uri) def findItemByURI(self, device_uri): index = 0 item = self.DeviceList.item(index) while item is not None: if item.device_uri == device_uri: return item index += 1 item = self.DeviceList.item(index) def findDeviceByURI(self, device_uri): try: return device_list[device_uri] except: return None def requestDeviceUpdate(self, dev=None, item=None): """ Submit device update request to update thread. """ if dev is None: dev = self.cur_device if dev is not None: dev.error_state = ERROR_STATE_REFRESHING self.updateDevice(dev, update_tab=False) self.sendMessage(dev.device_uri, '', EVENT_DEVICE_UPDATE_REQUESTED) def rescanDevices(self): """ Rescan and update all devices. """ if not self.updating: self.RefreshAllAction.setEnabled(False) try: self.refreshDeviceList() finally: self.RefreshAllAction.setEnabled(True) def callback(self): qApp.processEvents() # *********************************************************************************** # # DEVICE LIST RIGHT CLICK # # *********************************************************************************** def DeviceList_customContextMenuRequested(self, p): d = self.cur_device if d is not None: avail = d.device_state != DEVICE_STATE_NOT_FOUND and d.supported printer = d.device_type == DEVICE_TYPE_PRINTER and avail fax = d.fax_type > FAX_TYPE_NONE and prop.fax_build and d.device_type == DEVICE_TYPE_FAX and \ sys.hexversion >= 0x020300f0 and avail scan = d.scan_type > SCAN_TYPE_NONE and prop.scan_build and \ printer and self.user_settings.cmd_scan cpy = d.copy_type > COPY_TYPE_NONE and printer popup = QMenu(self) item = self.DeviceList.currentItem() if item is not None: if self.cur_device.error_state != ERROR_STATE_ERROR: if printer: popup.addAction(self.__tr("Print..."), lambda: self.contextMenuFunc(PrintDialog(self, self.cur_printer))) if scan: popup.addAction(self.__tr("Scan..."), lambda: self.contextMenuFunc(self.user_settings.cmd_scan)) #self.ScanButton_clicked) if cpy: popup.addAction(self.__tr("Make Copies..."), lambda: MakeCopiesDialog(self, self.cur_device_uri)) #self.MakeCopiesButton_clicked) else: # self.cur_device.device_type == DEVICE_TYPE_FAX: if fax: popup.addAction(self.__tr("Send Fax..."), lambda: self.contextMenuFunc(SendFaxDialog(self, self.cur_printer, self.cur_device_uri))) #self.SendFaxButton_clicked) popup.addSeparator() if not self.updating: popup.addAction(self.__tr("Refresh Device"), self.requestDeviceUpdate) #self.DeviceRefreshAction_activated) if not self.updating: popup.addAction(self.__tr("Refresh All"), self.rescanDevices) #self.RefreshAllAction_activated) popup.addSeparator() if self.DeviceList.viewMode() == QListView.IconMode: popup.addAction(self.__tr("View as List"), lambda: self.setDeviceListViewMode(QListView.ListMode)) else: popup.addAction(self.__tr("View as Icons"), lambda: self.setDeviceListViewMode(QListView.IconMode)) popup.exec_(self.DeviceList.mapToGlobal(p)) def contextMenuFunc(self, f): self.sendMessage('', '', EVENT_DEVICE_STOP_POLLING) try: try: f.exec_() # Dialog except AttributeError: beginWaitCursor() if f.split(':')[0] in ('http', 'https', 'file'): log.debug("Opening browser to: %s" % f) utils.openURL(f) else: self.runExternalCommand(f) QTimer.singleShot(1000, self.unlockClick) finally: self.sendMessage('', '', EVENT_DEVICE_START_POLLING) # *********************************************************************************** # # PRINTER NAME COMBOS # # *********************************************************************************** def updatePrinterCombos(self): self.PrintSettingsPrinterNameCombo.clear() self.PrintControlPrinterNameCombo.clear() if self.cur_device is not None and \ self.cur_device.supported: self.cur_device.updateCUPSPrinters() for c in self.cur_device.cups_printers: self.PrintSettingsPrinterNameCombo.insertItem(0, c) self.PrintControlPrinterNameCombo.insertItem(0, c) self.cur_printer = to_unicode(self.PrintSettingsPrinterNameCombo.currentText()) def PrintSettingsPrinterNameCombo_activated(self, s): self.cur_printer = to_unicode(s) self.updateCurrentTab() def PrintControlPrinterNameCombo_activated(self, s): self.cur_printer = to_unicode(s) self.updateCurrentTab() # *********************************************************************************** # # FUNCTIONS/ACTION TAB # # *********************************************************************************** def initActionsTab(self): self.click_lock = None self.ActionsList.setIconSize(QSize(32, 32)) self.ActionsList.itemClicked["QListWidgetItem *"].connect(self.ActionsList_clicked) self.ActionsList.itemDoubleClicked["QListWidgetItem *"].connect(self.ActionsList_clicked) def updateActionsTab(self): beginWaitCursor() try: self.ActionsList.clear() d = self.cur_device if d is not None: avail = d.device_state != DEVICE_STATE_NOT_FOUND and d.supported fax = d.fax_type > FAX_TYPE_NONE and prop.fax_build and d.device_type == DEVICE_TYPE_FAX and \ sys.hexversion >= 0x020300f0 and avail printer = d.device_type == DEVICE_TYPE_PRINTER and avail scan = d.scan_type > SCAN_TYPE_NONE and prop.scan_build and \ printer and self.user_settings.cmd_scan cpy = d.copy_type > COPY_TYPE_NONE and printer req_plugin = d.plugin == PLUGIN_REQUIRED opt_plugin = d.plugin == PLUGIN_OPTIONAL try: back_end, is_hp, bus, model, serial, dev_file, host, zc, port = \ device.parseDeviceURI(self.cur_device_uri) except Error: return hplip_conf = configparser.ConfigParser() fp = open("/etc/hp/hplip.conf", "r") hplip_conf.readfp(fp) fp.close() try: plugin_installed = utils.to_bool(hplip_conf.get("hplip", "plugin")) except configparser.NoOptionError: plugin_installed = False if d.plugin != PLUGIN_NONE: if req_plugin and plugin_installed: x = self.__tr("Download and install<br>required plugin (already installed).") elif req_plugin and not plugin_installed: x = self.__tr("Download and install<br>required plugin (needs installation).") elif opt_plugin and plugin_installed: x = self.__tr("Download and install<br>optional plugin (already installed).") elif opt_plugin and not plugin_installed: x = self.__tr("Download and install<br>optional plugin (needs installation).") else: x = '' # TODO: Cache this data structure # -- add a field that specifies if the icon should always show, or only when device is avail. # TODO: Tooltips # TODO: Right-click icon/list view menu self.ICONS = [ # PRINTER (lambda : printer, self.__tr("Print"), # Text "print", # Icon self.__tr("Print documents or files."), # Tooltip lambda : PrintDialog(self, self.cur_printer)), # command/action (lambda :scan, self.__tr("Scan"), "scan", self.__tr("Scan a document, image, or photograph.<br>"), self.user_settings.cmd_scan), (lambda : cpy, self.__tr("Make Copies"), "makecopies", self.__tr("Make copies on the device controlled by the PC.<br>"), lambda : MakeCopiesDialog(self, self.cur_device_uri)), # FAX (lambda: fax, self.__tr("Send Fax"), "fax", self.__tr("Send a fax from the PC."), lambda : SendFaxDialog(self, self.cur_printer, self.cur_device_uri)), (lambda: fax, self.__tr("Fax Setup"), "fax_setup", self.__tr("Fax support must be setup before you can send faxes."), lambda : FaxSetupDialog(self, self.cur_device_uri)), (lambda: fax and self.user_settings.cmd_fab, self.__tr("Fax Address Book"), "fab", self.__tr("Setup fax phone numbers to use when sending faxes from the PC."), self.user_settings.cmd_fab), # SETTINGS/TOOLS (lambda : d.power_settings != POWER_SETTINGS_NONE and avail, self.__tr("Device Settings"), "settings", self.__tr("Your device has special device settings.<br>You may alter these settings here."), lambda : DeviceSetupDialog(self, self.cur_device_uri)), (lambda : printer, self.__tr("Print Test Page"), "testpage", self.__tr("Print a test page to test the setup of your printer."), lambda : PrintTestPageDialog(self, self.cur_printer)), (lambda : True, self.__tr("View Printer and Device Information"), "cups", self.__tr("View information about the device and all its CUPS queues."), lambda : InfoDialog(self, self.cur_device_uri)), (lambda: printer and d.align_type != ALIGN_TYPE_NONE, self.__tr("Align Cartridges (Print Heads)"), "align", self.__tr("This will improve the quality of output when a new cartridge is installed."), lambda : AlignDialog(self, self.cur_device_uri)), (lambda: printer and d.clean_type != CLEAN_TYPE_NONE, self.__tr("Clean Printheads"), "clean", self.__tr("You only need to perform this action if you are<br>having problems with poor printout quality due to clogged ink nozzles."), lambda : CleanDialog(self, self.cur_device_uri)), (lambda: printer and d.color_cal_type != COLOR_CAL_TYPE_NONE and d.color_cal_type == COLOR_CAL_TYPE_TYPHOON, self.__tr("Color Calibration"), "colorcal", self.__tr("Use this procedure to optimimize your printer's color output<br>(requires glossy photo paper)."), lambda : ColorCalDialog(self, self.cur_device_uri)), (lambda: printer and d.color_cal_type != COLOR_CAL_TYPE_NONE and d.color_cal_type != COLOR_CAL_TYPE_TYPHOON, self.__tr("Color Calibration"), "colorcal", self.__tr("Use this procedure to optimimize your printer's color output."), lambda : ColorCalDialog(self, self.cur_device_uri)), (lambda: printer and d.linefeed_cal_type != LINEFEED_CAL_TYPE_NONE, self.__tr("Line Feed Calibration"), "linefeed_cal", self.__tr("Use line feed calibration to optimize print quality<br>(to remove gaps in the printed output)."), lambda : LineFeedCalDialog(self, self.cur_device_uri)), (lambda: printer and d.pq_diag_type != PQ_DIAG_TYPE_NONE, self.__tr("Print Diagnostic Page"), "pq_diag", self.__tr("Your printer can print a test page <br>to help diagnose print quality problems."), lambda : PQDiagDialog(self, self.cur_device_uri)), (lambda: printer and d.wifi_config >= WIFI_CONFIG_USB_XML and bus == 'usb', self.__tr("Wireless/wifi setup using USB"), "wireless", self.__tr("Configure your wireless capable printer using a temporary USB connection."), 'hp-wificonfig -d %s' % self.cur_device_uri), # FIRMWARE (lambda : printer and d.fw_download , self.__tr("Download Firmware"), "firmware", self.__tr("Download firmware to your printer <br>(required on some devices after each power-up)."), lambda : FirmwareDialog(self, self.cur_device_uri)), # PLUGIN (lambda : printer and req_plugin, self.__tr("Install Required Plugin"), "plugin", x, lambda : PluginInstall(self, d.plugin, plugin_installed)), (lambda : printer and opt_plugin, self.__tr("Install Optional Plugin"), "plugin", x, lambda : PluginInstall(self, d.plugin, plugin_installed)), # EWS (lambda : printer and d.embedded_server_type > EWS_NONE and bus == 'net', self.__tr("Open printer's web page in a browser"), "ews", self.__tr("The printer's web page has supply, status, and other information."), openEWS(host, zc)), # HELP/WEBSITE (lambda : True, self.__tr("Visit HPLIP Support Website"), "hp_logo", self.__tr("Visit HPLIP Support Website."), self.support), (lambda : True, self.__tr("Help"), "help", self.__tr("View HPLIP help."), self.docs), ] if not self.func_icons_cached: for filte, text, icon, tooltip, cmd in self.ICONS: self.func_icons[icon] = load_pixmap(icon, '32x32') self.func_icons_cached = True for fltr, text, icon, tooltip, cmd in self.ICONS: if fltr is not None: if not fltr(): continue FuncViewItem(self.ActionsList, text, self.func_icons[icon], tooltip, cmd) finally: endWaitCursor() def ActionsList_clicked(self, item): if item is not None and self.click_lock is not item: self.click_lock = item if item.cmd and isinstance(item.cmd, collectionsAbc.Callable): dlg = item.cmd() self.sendMessage('', '', EVENT_DEVICE_STOP_POLLING) try: dlg.exec_() finally: self.sendMessage('', '', EVENT_DEVICE_START_POLLING) else: beginWaitCursor() if item.cmd.split(':')[0] in ('http', 'https', 'file'): log.debug("Opening browser to: %s" % item.cmd) utils.openURL(item.cmd) else: self.runExternalCommand(str(item.cmd)) QTimer.singleShot(1000, self.unlockClick) def unlockClick(self): self.click_lock = None endWaitCursor() def ActionsList_customContextMenuRequested(self, p): print(p) #pass # *********************************************************************************** # # STATUS TAB # # *********************************************************************************** def initStatusTab(self): self.StatusTable.setColumnCount(0) self.status_headers = [self.__tr(""), self.__tr("Status"), self.__tr("Date and Time"), self.__tr("Code"), self.__tr("Job ID"), self.__tr("Description")] def updateStatusTab(self): self.updateStatusLCD() self.updateStatusTable() def updateStatusLCD(self): if self.cur_device is not None and \ self.cur_device.hist and \ self.cur_device.supported: dq = self.cur_device.dq if dq.get('panel', 0) == 1: line1 = dq.get('panel-line1', '') line2 = dq.get('panel-line2', '') else: try: line1 = device.queryString(self.cur_device.hist[0].event_code) except (AttributeError, TypeError): line1 = '' line2 = '' self.drawStatusLCD(line1, line2) else: if self.cur_device.status_type == STATUS_TYPE_NONE: self.drawStatusLCD(self.__tr("Status information not"), self.__tr("available for this device.")) elif not self.cur_device.supported: self.drawStatusLCD(self.__tr("Device not supported.")) elif not self.cur_device.hist: self.drawStatusLCD(self.__tr("No status history available.")) else: self.drawStatusLCD() def drawStatusLCD(self, line1='', line2=''): pm = load_pixmap('panel_lcd', 'other') p = QPainter() p.begin(pm) p.setPen(QColor(0, 0, 0)) p.setFont(self.font()) x, y_line1, y_line2 = 10, 17, 33 # TODO: Scroll long lines if line1: p.drawText(x, y_line1, line1) if line2: p.drawText(x, y_line2, line2) p.end() self.LCD.setPixmap(pm) def updateStatusTable(self): self.StatusTable.clear() flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled row = 0 hist = self.cur_device.hist[:] if hist: self.StatusTable.setRowCount(len(hist)) self.StatusTable.setColumnCount(len(self.status_headers)) self.StatusTable.setHorizontalHeaderLabels(self.status_headers) self.StatusTable.verticalHeader().hide() self.StatusTable.horizontalHeader().show() hist.reverse() row = len(hist)-1 for e in hist: if e is None: continue ess = device.queryString(e.event_code, 0) esl = device.queryString(e.event_code, 1) if row == 0: desc = self.__tr("(most recent)") else: desc = getTimeDeltaDesc(e.timedate) dt = QDateTime() dt.setTime_t(int(e.timedate)) #, Qt.LocalTime) # TODO: In Qt4.x, use QLocale.toString(date, format) tt = str("%s %s"%(dt.toString(),desc)) if e.job_id: job_id = to_unicode(e.job_id) else: job_id = to_unicode('') error_state = STATUS_TO_ERROR_STATE_MAP.get(e.event_code, ERROR_STATE_CLEAR) tech_type = self.cur_device.tech_type if tech_type in (TECH_TYPE_COLOR_INK, TECH_TYPE_MONO_INK): status_pix = getStatusListIcon(error_state)[0] # ink else: status_pix = getStatusListIcon(error_state)[1] # laser event_code = to_unicode(e.event_code) i = QTableWidgetItem(QIcon(status_pix), self.__tr("")) i.setFlags(flags) self.StatusTable.setItem(row, 0, i) for col, t in [(1, ess), (2, tt), (3, event_code), (4, job_id), (5, esl)]: i = QTableWidgetItem(str(t)) i.setFlags(flags) self.StatusTable.setItem(row, col, i) row -= 1 self.StatusTable.resizeColumnsToContents() self.StatusTable.setColumnWidth(0, 24) else: self.StatusTable.setRowCount(1) self.StatusTable.setColumnCount(2) self.StatusTable.setHorizontalHeaderLabels(["", ""]) self.StatusTable.verticalHeader().hide() self.StatusTable.horizontalHeader().hide() flags = Qt.ItemIsEnabled pixmap = getStatusListIcon(ERROR_STATE_ERROR)[0] i = QTableWidgetItem(QIcon(pixmap), self.__tr("")) i.setFlags(flags) self.StatusTable.setItem(row, 0, i) i = QTableWidgetItem(self.__tr("Status information not available for this device.")) i.setFlags(flags) self.StatusTable.setItem(0, 1, i) self.StatusTable.resizeColumnsToContents() self.StatusTable.setColumnWidth(0, 24) # *********************************************************************************** # # SUPPLIES TAB # # *********************************************************************************** def initSuppliesTab(self): self.pix_battery = load_pixmap('battery', '16x16') yellow = "#ffff00" light_yellow = "#ffffcc" cyan = "#00ffff" light_cyan = "#ccffff" magenta = "#ff00ff" light_magenta = "#ffccff" black = "#000000" blue = "#0000ff" gray = "#808080" dark_gray = "#a9a9a9" light_gray = "#c0c0c0" red = "#ff0000" self.TYPE_TO_PIX_MAP = { AGENT_TYPE_UNSPECIFIED : [black], AGENT_TYPE_BLACK: [black], AGENT_TYPE_MATTE_BLACK : [black], AGENT_TYPE_PHOTO_BLACK : [dark_gray], AGENT_TYPE_BLACK_B8800: [black], AGENT_TYPE_CMY: [cyan, magenta, yellow], AGENT_TYPE_KCM: [light_cyan, light_magenta, light_yellow], AGENT_TYPE_GGK: [dark_gray], AGENT_TYPE_YELLOW: [yellow], AGENT_TYPE_MAGENTA: [magenta], AGENT_TYPE_CYAN : [cyan], AGENT_TYPE_CYAN_LOW: [light_cyan], AGENT_TYPE_YELLOW_LOW: [light_yellow], AGENT_TYPE_MAGENTA_LOW: [light_magenta], AGENT_TYPE_BLUE: [blue], AGENT_TYPE_KCMY_CM: [yellow, cyan, magenta], AGENT_TYPE_LC_LM: [light_cyan, light_magenta], #AGENT_TYPE_Y_M: [yellow, magenta], #AGENT_TYPE_C_K: [black, cyan], AGENT_TYPE_LG_PK: [light_gray, dark_gray], AGENT_TYPE_LG: [light_gray], AGENT_TYPE_G: [gray], AGENT_TYPE_DG: [dark_gray], AGENT_TYPE_PG: [light_gray], AGENT_TYPE_C_M: [cyan, magenta], AGENT_TYPE_K_Y: [black, yellow], AGENT_TYPE_LC: [light_cyan], AGENT_TYPE_RED : [red], } self.supplies_headers = [self.__tr(""), self.__tr("Description"), self.__tr("HP Part No."), self.__tr("Approx. Level"), self.__tr("Status")] def updateSuppliesTab(self): beginWaitCursor() flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled try: self.SuppliesTable.clear() self.SuppliesTable.setRowCount(0) self.SuppliesTable.setColumnCount(0) if self.cur_device is not None and \ self.cur_device.supported and \ self.cur_device.status_type != STATUS_TYPE_NONE and \ self.cur_device.device_state != DEVICE_STATE_NOT_FOUND: self.cur_device.sorted_supplies = [] a = 1 while True: try: agent_type = int(self.cur_device.dq['agent%d-type' % a]) agent_kind = int(self.cur_device.dq['agent%d-kind' % a]) agent_sku = self.cur_device.dq['agent%d-sku' % a] except KeyError: break else: self.cur_device.sorted_supplies.append((a, agent_kind, agent_type, agent_sku)) a += 1 self.cur_device.sorted_supplies.sort(key=utils.cmp_to_key(utils.levelsCmp)) self.SuppliesTable.setRowCount(len(self.cur_device.sorted_supplies)) self.SuppliesTable.setColumnCount(len(self.supplies_headers)) self.SuppliesTable.setHorizontalHeaderLabels(self.supplies_headers) self.SuppliesTable.verticalHeader().hide() self.SuppliesTable.horizontalHeader().show() self.SuppliesTable.setIconSize(QSize(100, 18)) for row, x in enumerate(self.cur_device.sorted_supplies): a, agent_kind, agent_type, agent_sku = x try: agent_level = int(self.cur_device.dq['agent%d-level' % a]) agent_desc = self.cur_device.dq['agent%d-desc' % a] agent_health_desc = self.cur_device.dq['agent%d-health-desc' % a] except KeyError: break # Bar graph level level_pixmap = None if agent_kind in (AGENT_KIND_SUPPLY, #AGENT_KIND_HEAD, AGENT_KIND_HEAD_AND_SUPPLY, AGENT_KIND_TONER_CARTRIDGE, AGENT_KIND_MAINT_KIT, AGENT_KIND_ADF_KIT, AGENT_KIND_INT_BATTERY, AGENT_KIND_DRUM_KIT, ): level_pixmap = self.createStatusLevelGraphic(agent_level, agent_type) # Color icon pixmap = None if agent_kind in (AGENT_KIND_SUPPLY, AGENT_KIND_HEAD, AGENT_KIND_HEAD_AND_SUPPLY, AGENT_KIND_TONER_CARTRIDGE, #AGENT_KIND_MAINT_KIT, #AGENT_KIND_ADF_KIT, AGENT_KIND_INT_BATTERY, #AGENT_KIND_DRUM_KIT, ): pixmap = self.getStatusIcon(agent_kind, agent_type) if pixmap is not None: i = QTableWidgetItem(QIcon(pixmap), self.__tr("")) i.setFlags(flags) self.SuppliesTable.setItem(row, 0, i) for col, t in [(1, agent_desc), (2, agent_sku), (4, agent_health_desc)]: i = QTableWidgetItem(str(t)) i.setFlags(flags) self.SuppliesTable.setItem(row, col, i) if level_pixmap is not None: i = QTableWidgetItem(QIcon(level_pixmap), self.__tr("")) i.setFlags(flags) self.SuppliesTable.setItem(row, 3, i) self.SuppliesTable.resizeColumnsToContents() self.SuppliesTable.setColumnWidth(0, 24) self.SuppliesTable.setColumnWidth(3, 120) else: # No supplies info log.warning("Supplies information not available for this device.") flags = Qt.ItemIsEnabled self.SuppliesTable.setRowCount(1) self.SuppliesTable.setColumnCount(2) self.SuppliesTable.setHorizontalHeaderLabels(["", ""]) self.SuppliesTable.verticalHeader().hide() self.SuppliesTable.horizontalHeader().hide() i = QTableWidgetItem(self.__tr("Supplies information not available for this device.")) i.setFlags(flags) self.SuppliesTable.setItem(0, 1, i) pixmap = getStatusListIcon(ERROR_STATE_ERROR)[0] i = QTableWidgetItem(QIcon(pixmap), self.__tr("")) i.setFlags(flags) self.SuppliesTable.setItem(0, 0, i) self.SuppliesTable.resizeColumnsToContents() self.SuppliesTable.setColumnWidth(0, 24) finally: endWaitCursor() def getStatusIcon(self, agent_kind, agent_type): if agent_kind in (AGENT_KIND_SUPPLY, AGENT_KIND_HEAD, AGENT_KIND_HEAD_AND_SUPPLY, AGENT_KIND_TONER_CARTRIDGE): map = self.TYPE_TO_PIX_MAP[agent_type] if isinstance(map, list): map_len = len(map) pix = QPixmap(16, 16) pix.fill(QColor(0, 0, 0, 0)) p = QPainter() p.begin(pix) p.setRenderHint(QPainter.Antialiasing) if map_len == 1: p.setPen(QColor(map[0])) p.setBrush(QBrush(QColor(map[0]), Qt.SolidPattern)) p.drawPie(2, 2, 10, 10, 0, 5760) elif map_len == 2: p.setPen(QColor(map[0])) p.setBrush(QBrush(QColor(map[0]), Qt.SolidPattern)) p.drawPie(2, 4, 8, 8, 0, 5760) p.setPen(QColor(map[1])) p.setBrush(QBrush(QColor(map[1]), Qt.SolidPattern)) p.drawPie(6, 4, 8, 8, 0, 5760) elif map_len == 3: p.setPen(QColor(map[2])) p.setBrush(QBrush(QColor(map[2]), Qt.SolidPattern)) p.drawPie(6, 6, 8, 8, 0, 5760) p.setPen(QColor(map[1])) p.setBrush(QBrush(QColor(map[1]), Qt.SolidPattern)) p.drawPie(2, 6, 8, 8, 0, 5760) p.setPen(QColor(map[0])) p.setBrush(QBrush(QColor(map[0]), Qt.SolidPattern)) p.drawPie(4, 2, 8, 8, 0, 5760) p.end() return pix else: return map elif agent_kind == AGENT_KIND_INT_BATTERY: return self.pix_battery def createStatusLevelGraphic(self, percent, agent_type, w=100, h=18): if percent: fw = int(w/100*percent) else: fw = 0 px = QPixmap(w, h) px.fill(QColor(0, 0, 0, 0)) pp = QPainter() pp.begin(px) pp.setRenderHint(QPainter.Antialiasing) pp.setPen(Qt.black) map = self.TYPE_TO_PIX_MAP[agent_type] map_len = len(map) if map_len == 1 or map_len > 3: pp.fillRect(0, 0, fw, h, QBrush(QColor(map[0]))) elif map_len == 2: h2 = int(h / 2) pp.fillRect(0, 0, fw, h2, QBrush(QColor(map[0]))) pp.fillRect(0, h2, fw, h, QBrush(QColor(map[1]))) elif map_len == 3: h3 = int(h / 3) h23 = int(2 * h3) pp.fillRect(0, 0, fw, h3, QBrush(QColor(map[0]))) pp.fillRect(0, h3, fw, h23, QBrush(QColor(map[1]))) pp.fillRect(0, h23, fw, h, QBrush(QColor(map[2]))) # draw black frame pp.drawRect(0, 0, w, h) if percent > 75 and agent_type in \ (AGENT_TYPE_BLACK, AGENT_TYPE_UNSPECIFIED, AGENT_TYPE_BLUE): pp.setPen(Qt.white) # 75% ticks w1 = int(3 * w / 4) h6 = int(h / 6) pp.drawLine(w1, 0, w1, h6) pp.drawLine(w1, h, w1, h-h6) if percent > 50 and agent_type in \ (AGENT_TYPE_BLACK, AGENT_TYPE_UNSPECIFIED, AGENT_TYPE_BLUE): pp.setPen(Qt.white) # 50% ticks w2 = int(w / 2) h4 = int(h / 4) pp.drawLine(w2, 0, w2, h4) pp.drawLine(w2, h, w2, h-h4) if percent > 25 and agent_type in \ (AGENT_TYPE_BLACK, AGENT_TYPE_UNSPECIFIED, AGENT_TYPE_BLUE): pp.setPen(Qt.white) # 25% ticks w4 = int(w / 4) pp.drawLine(w4, 0, w4, h6) pp.drawLine(w4, h, w4, h-h6) pp.end() return px # *********************************************************************************** # # PRINTER SETTINGS TAB # # *********************************************************************************** def initPrintSettingsTab(self): pass def updatePrintSettingsTab(self): beginWaitCursor() try: if self.cur_device.device_type == DEVICE_TYPE_PRINTER: self.PrintSettingsPrinterNameLabel.setText(self.__tr("Printer Name:")) else: self.PrintSettingsPrinterNameLabel.setText(self.__tr("Fax Name:")) self.PrintSettingsToolbox.updateUi(self.cur_device, self.cur_printer) finally: endWaitCursor() # *********************************************************************************** # # PRINTER CONTROL TAB # # *********************************************************************************** def initPrintControlTab(self): self.JOB_STATES = { cups.IPP_JOB_PENDING : self.__tr("Pending"), cups.IPP_JOB_HELD : self.__tr("On hold"), cups.IPP_JOB_PROCESSING : self.__tr("Printing"), cups.IPP_JOB_STOPPED : self.__tr("Stopped"), cups.IPP_JOB_CANCELLED : self.__tr("Canceled"), cups.IPP_JOB_ABORTED : self.__tr("Aborted"), cups.IPP_JOB_COMPLETED : self.__tr("Completed"), } self.CancelJobButton.setIcon(QIcon(load_pixmap('cancel', '16x16'))) self.RefreshButton.setIcon(QIcon(load_pixmap('refresh', '16x16'))) self.JOB_STATE_ICONS = { cups.IPP_JOB_PENDING: QIcon(load_pixmap("busy", "16x16")), cups.IPP_JOB_HELD : QIcon(load_pixmap("busy", "16x16")), cups.IPP_JOB_PROCESSING : QIcon(load_pixmap("print", "16x16")), cups.IPP_JOB_STOPPED : QIcon(load_pixmap("warning", "16x16")), cups.IPP_JOB_CANCELLED : QIcon(load_pixmap("warning", "16x16")), cups.IPP_JOB_ABORTED : QIcon(load_pixmap("error", "16x16")), cups.IPP_JOB_COMPLETED : QIcon(load_pixmap("ok", "16x16")), } self.StartStopButton.clicked.connect(self.StartStopButton_clicked) self.AcceptRejectButton.clicked.connect(self.AcceptRejectButton_clicked) self.SetDefaultButton.clicked.connect(self.SetDefaultButton_clicked) self.CancelJobButton.clicked.connect(self.CancelJobButton_clicked) self.RefreshButton.clicked.connect(self.RefreshButton_clicked) self.job_headers = [self.__tr("Status"), self.__tr("Title/Description"), self.__tr("Job ID")] # TODO: Check queues at startup and send events if stopped or rejecting def initUpgradeTab(self): self.InstallLatestButton.clicked.connect(self.InstallLatestButton_clicked) self.InstallLatestButton_lock = False def InstallLatestButton_clicked(self): if self.InstallLatestButton_lock is True: return if self.Is_autoInstaller_distro: self.InstallLatestButton.setEnabled(False) terminal_cmd = utils.get_terminal() if terminal_cmd is not None and utils.which("hp-upgrade"): cmd = terminal_cmd + " 'hp-upgrade -w'" os_utils.execute(cmd) else: log.error("Failed to run hp-upgrade command from terminal =%s "%terminal_cmd) self.InstallLatestButton.setEnabled(True) else: self.InstallLatestButton_lock = True utils.openURL("http://hplipopensource.com/hplip-web/install/manual/index.html") QTimer.singleShot(1000, self.InstallLatestButton_unlock) def InstallLatestButton_unlock(self): self.InstallLatestButton_lock = False def CancelJobButton_clicked(self): item = self.JobTable.currentItem() if item is not None: job_id, ok = value_int(item.data(Qt.UserRole)) if ok and job_id: self.cur_device.cancelJob(job_id) QTimer.singleShot(1000, self.updatePrintControlTab) def RefreshButton_clicked(self): self.updatePrintControlTab() def updateHPLIPupgrade(self): self.initUpgradeTab() def updatePrintControlTab(self): if self.cur_device.device_type == DEVICE_TYPE_PRINTER: self.PrintControlPrinterNameLabel.setText(self.__tr("Printer Name:")) self.groupBox.setTitle(QApplication.translate("MainWindow", "Printer Queue Control", None)) else: self.PrintControlPrinterNameLabel.setText(self.__tr("Fax Name:")) self.groupBox.setTitle(QApplication.translate("MainWindow", "Fax Queue Control", None)) self.JobTable.clear() self.JobTable.setRowCount(0) self.JobTable.setColumnCount(0) self.updatePrintController() flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled jobs = cups.getJobs() num_jobs = 0 for j in jobs: if j.dest == self.cur_printer: num_jobs += 1 if num_jobs: self.CancelJobButton.setEnabled(True) self.JobTable.setRowCount(num_jobs) self.JobTable.setColumnCount(len(self.job_headers)) self.JobTable.setHorizontalHeaderLabels(self.job_headers) for row, j in enumerate(jobs): if j.dest == self.cur_printer: i = QTableWidgetItem(self.JOB_STATE_ICONS[j.state], self.JOB_STATES[j.state]) i.setData(Qt.UserRole, j.id) i.setFlags(flags) self.JobTable.setItem(row, 0, i) i = QTableWidgetItem(j.title) i.setFlags(flags) self.JobTable.setItem(row, 1, i) i = QTableWidgetItem(to_unicode(j.id)) i.setFlags(flags) self.JobTable.setItem(row, 2, i) self.JobTable.setCurrentCell(0, 0) self.JobTable.resizeColumnsToContents() else: self.CancelJobButton.setEnabled(False) def getPrinterState(self): self.printer_state = cups.IPP_PRINTER_STATE_IDLE self.printer_accepting = True cups_printers = cups.getPrinters() for p in cups_printers: if p.name == self.cur_printer: self.printer_state = p.state self.printer_accepting = p.accepting break def updatePrintController(self): # default printer self.SetDefaultButton.setText(self.__tr("Set as Default")) default_printer = cups.getDefaultPrinter() if self.cur_device.device_type == DEVICE_TYPE_PRINTER: device_string = "Printer" else: device_string = "Fax" if default_printer == self.cur_printer: self.SetDefaultLabel.setText(self.__tr("Default %s"%device_string)) self.SetDefaultIcon.setPixmap(load_pixmap("ok", "16x16")) self.SetDefaultButton.setEnabled(False) else: self.SetDefaultLabel.setText(self.__tr("Not Default %s"%device_string)) self.SetDefaultIcon.setPixmap(load_pixmap("info", "16x16")) self.SetDefaultButton.setEnabled(True) self.getPrinterState() # start/stop if self.printer_state == cups.IPP_PRINTER_STATE_IDLE: self.StartStopLabel.setText(self.__tr("Started/Idle")) self.StartStopIcon.setPixmap(load_pixmap("idle", "16x16")) self.StartStopButton.setText(self.__tr("Stop %s"%device_string)) elif self.printer_state == cups.IPP_PRINTER_STATE_PROCESSING: self.StartStopLabel.setText(self.__tr("Started/Processing")) self.StartStopIcon.setPixmap(load_pixmap("busy", "16x16")) self.StartStopButton.setText(self.__tr("Stop %s"%device_string)) else: self.StartStopLabel.setText(self.__tr("Stopped")) self.StartStopIcon.setPixmap(load_pixmap("warning", "16x16")) self.StartStopButton.setText(self.__tr("Start %s"%device_string)) # reject/accept if self.printer_accepting: self.AcceptRejectLabel.setText(self.__tr("Accepting Jobs")) self.AcceptRejectIcon.setPixmap(load_pixmap("idle", "16x16")) self.AcceptRejectButton.setText(self.__tr("Reject Jobs")) else: self.AcceptRejectLabel.setText(self.__tr("Rejecting Jobs")) self.AcceptRejectIcon.setPixmap(load_pixmap("warning", "16x16")) self.AcceptRejectButton.setText(self.__tr("Accept Jobs")) def StartStopButton_clicked(self): beginWaitCursor() try: if self.printer_state in (cups.IPP_PRINTER_STATE_IDLE, cups.IPP_PRINTER_STATE_PROCESSING): result, result_str = cups.cups_operation(cups.stop, GUI_MODE, 'qt4', self, self.cur_printer) if result == cups.IPP_OK: if self.cur_device.device_type == DEVICE_TYPE_PRINTER: e = EVENT_PRINTER_QUEUE_STOPPED else: e = EVENT_FAX_QUEUE_STOPPED else: result, result_str = cups.cups_operation(cups.start, GUI_MODE, 'qt4', self, self.cur_printer) if result == cups.IPP_OK: if self.cur_device.device_type == DEVICE_TYPE_PRINTER: e = EVENT_PRINTER_QUEUE_STARTED else: e = EVENT_FAX_QUEUE_STARTED if result == cups.IPP_OK: self.updatePrintController() self.cur_device.sendEvent(e, self.cur_printer) else: FailureUI(self, self.__tr("<b>Start/Stop printer queue operation fails. </b><p>Error : %s"%result_str)) cups.releaseCupsInstance() finally: endWaitCursor() def AcceptRejectButton_clicked(self): beginWaitCursor() try: if self.printer_accepting: result, result_str = cups.cups_operation(cups.reject, GUI_MODE, 'qt4', self, self.cur_printer) if result == cups.IPP_OK: if self.cur_device.device_type == DEVICE_TYPE_PRINTER: e = EVENT_PRINTER_QUEUE_REJECTING_JOBS else: e = EVENT_FAX_QUEUE_REJECTING_JOBS else: result, result_str = cups.cups_operation(cups.accept, GUI_MODE, 'qt4', self, self.cur_printer) if result == cups.IPP_OK: if self.cur_device.device_type == DEVICE_TYPE_PRINTER: e = EVENT_PRINTER_QUEUE_ACCEPTING_JOBS else: e = EVENT_FAX_QUEUE_ACCEPTING_JOBS if result == cups.IPP_OK: self.updatePrintController() self.cur_device.sendEvent(e, self.cur_printer) else: FailureUI(self, self.__tr("<b>Accept/Reject printer queue operation fails.</b><p>Error : %s"%result_str)) cups.releaseCupsInstance() finally: endWaitCursor() def SetDefaultButton_clicked(self): beginWaitCursor() try: result, result_str = cups.cups_operation(cups.setDefaultPrinter, GUI_MODE, 'qt4', self, self.cur_printer.encode('utf8')) if result != cups.IPP_OK: FailureUI(self, self.__tr("<b>Set printer queue as default operation fails. </b><p>Error : %s"%result_str)) cups.releaseCupsInstance() else: self.updatePrintController() if self.cur_device.device_type == DEVICE_TYPE_PRINTER: e = EVENT_PRINTER_QUEUE_SET_AS_DEFAULT else: e = EVENT_FAX_QUEUE_SET_AS_DEFAULT self.cur_device.sendEvent(e, self.cur_printer) finally: endWaitCursor() def cancelCheckedJobs(self): beginWaitCursor() try: item = self.JobTable.firstChild() while item is not None: if item.isOn(): self.cur_device.cancelJob(item.job_id) item = item.nextSibling() finally: endWaitCursor() self.updatePrintControlTab() # *********************************************************************************** # # EXIT/CHILD CLEANUP # # *********************************************************************************** def closeEvent(self, event): self.cleanup() event.accept() def cleanup(self): self.cleanupChildren() def cleanupChildren(self): log.debug("Cleaning up child processes.") try: os.waitpid(-1, os.WNOHANG) except OSError: pass def quit(self): self.cleanupChildren() cups.releaseCupsInstance() self.close() # *********************************************************************************** # # DEVICE SETTINGS PLUGIN # # *********************************************************************************** # *********************************************************************************** # # SETTINGS DIALOG # # *********************************************************************************** def PreferencesAction_activated(self, tab_to_show=0): dlg = SettingsDialog(self) dlg.TabWidget.setCurrentIndex(tab_to_show) if dlg.exec_() == QDialog.Accepted: self.user_settings.load() if self.cur_device is not None: self.cur_device.sendEvent(EVENT_USER_CONFIGURATION_CHANGED, self.cur_printer) # *********************************************************************************** # # SETUP/REMOVE # # *********************************************************************************** def SetupDeviceAction_activated(self): if utils.which('hp-setup'): cmd = 'hp-setup --gui' else: cmd = 'python ./setup.py --gui' log.debug(cmd) utils.run(cmd) self.rescanDevices() self.updatePrinterCombos() def RemoveDeviceAction_activated(self): if utils.which('hp-setup'): cmd = 'hp-setup --gui --remove' else: cmd = 'python ./setup.py --gui --remove' if self.cur_device_uri is not None: cmd += ' --device=%s' % self.cur_device_uri log.debug(cmd) utils.run(cmd) self.rescanDevices() self.updatePrinterCombos() def DiagnoseQueueAction_activated(self): if utils.which('hp-diagnose_queues'): cmd= 'hp-diagnose_queues --gui' else: cmd= 'python ./diagnose_queues.py --gui' log.debug(cmd) # ok, output = utils.run(cmd) os_utils.execute(cmd) def DiagnoseHPLIP_activated(self): if utils.which('hp-doctor'): cmd = 'hp-doctor -i -w' else: cmd = 'python ./doctor.py -i -w' terminal_cmd = utils.get_terminal() if terminal_cmd: cmd = terminal_cmd + " '%s'"%cmd os_utils.execute(cmd) # *********************************************************************************** # # MISC # # *********************************************************************************** def runExternalCommand(self, cmd, macro_char='%'): beginWaitCursor() try: if len(cmd) == 0: FailureUI(self,self.__tr("<p><b>Unable to run command. No command specified.</b><p>Use <pre>Configure...</pre> to specify a command to run.")) log.error("No command specified. Use settings to configure commands.") else: log.debug("Run: %s %s (%s) %s" % ("*"*20, cmd, self.cur_device_uri, "*"*20)) log.debug(cmd) try: cmd = ''.join([self.cur_device.device_vars.get(x, x) \ for x in cmd.split(macro_char)]) except AttributeError: pass log.debug(cmd) path = cmd.split()[0] args = cmd.split() log.debug(path) log.debug(args) self.cleanupChildren() os.spawnvp(os.P_NOWAIT, path, args) qApp.processEvents() finally: endWaitCursor() def helpContents(self): utils.openURL(self.docs) def helpAbout(self): dlg = AboutDialog(self, prop.version, self.toolbox_version + " (Qt4)") dlg.exec_() def __tr(self,s,c = None): return qApp.translate("DevMgr5",s,c) # XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX class PasswordDialog(QDialog): def __init__(self, prompt, parent=None, name=None, modal=0, fl=0): QDialog.__init__(self, parent) self.prompt = prompt Layout= QGridLayout(self) Layout.setMargin(11) Layout.setSpacing(6) self.PromptTextLabel = QLabel(self) Layout.addWidget(self.PromptTextLabel,0,0,1,3) self.UsernameTextLabel = QLabel(self) Layout.addWidget(self.UsernameTextLabel,1,0) self.UsernameLineEdit = QLineEdit(self) self.UsernameLineEdit.setEchoMode(QLineEdit.Normal) Layout.addWidget(self.UsernameLineEdit,1,1,1,2) self.PasswordTextLabel = QLabel(self) Layout.addWidget(self.PasswordTextLabel,2,0) self.PasswordLineEdit = QLineEdit(self) self.PasswordLineEdit.setEchoMode(QLineEdit.Password) Layout.addWidget(self.PasswordLineEdit,2,1,1,2) self.OkPushButton = QPushButton(self) Layout.addWidget(self.OkPushButton,3,2) self.languageChange() self.resize(QSize(420,163).expandedTo(self.minimumSizeHint())) self.OkPushButton.clicked.connect(self.accept) self.PasswordLineEdit.returnPressed.connect(self.accept) def getUsername(self): return to_unicode(self.UsernameLineEdit.text()) def getPassword(self): return to_unicode(self.PasswordLineEdit.text()) def languageChange(self): self.setWindowTitle(self.__tr("HP Device Manager - Enter Username/Password")) self.PromptTextLabel.setText(self.__tr(self.prompt)) self.UsernameTextLabel.setText(self.__tr("Username:")) self.PasswordTextLabel.setText(self.__tr("Password:")) self.OkPushButton.setText(self.__tr("OK")) def __tr(self,s,c = None): return qApp.translate("DevMgr5",s,c) # XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX def showPasswordUI(prompt): try: dlg = PasswordDialog(prompt, None) if dlg.exec_() == QDialog.Accepted: return (dlg.getUsername(), dlg.getPassword()) finally: pass return ("", "") def openEWS(host, zc): if zc: status, ip = hpmudext.get_zc_ip_address(zc) if status != hpmudext.HPMUD_R_OK: ip = "hplipopensource.com" else: ip = host return "http://%s" % ip
36.917597
189
0.545537
7957460ae567f9fa80966ca9f15901dadc7ae18d
5,973
py
Python
flow_models/merge.py
piotrjurkiewicz/flow_stats
cc97a8381275cb9dd23ed0c3432abffaf4198431
[ "MIT" ]
9
2019-07-08T09:53:22.000Z
2021-11-19T07:50:11.000Z
flow_models/merge.py
ElsevierSoftwareX/SOFTX-D-21-00003
cc97a8381275cb9dd23ed0c3432abffaf4198431
[ "MIT" ]
1
2021-02-23T16:01:21.000Z
2021-04-03T02:06:32.000Z
flow_models/merge.py
ElsevierSoftwareX/SOFTX-D-21-00003
cc97a8381275cb9dd23ed0c3432abffaf4198431
[ "MIT" ]
5
2019-09-27T14:52:54.000Z
2022-01-25T07:58:24.000Z
#!/usr/bin/python3 """ Merges flows which were split across multiple records due to *active timeout*. """ import argparse import warnings from .lib.io import FlowValFields, io_parser, IN_FORMATS, OUT_FORMATS from .lib.util import logmsg, prepare_file_list class Flow: __slots__ = 'key', 'val' def __init__(self, key, first, first_ms, last, last_ms, packets, octets, aggs): self.key = key self.val = FlowValFields() self.val.first = first self.val.first_ms = first_ms self.val.last = last self.val.last_ms = last_ms self.val.packets = packets self.val.octets = octets self.val.aggs = aggs or 1 def to_tuple(self): return self.key, self.val.first, self.val.first_ms, self.val.last, self.val.last_ms, \ self.val.packets, self.val.octets, self.val.aggs def merge(in_files, out_file, in_format='nfcapd', out_format='csv_flow', inactive_timeout=15.0, active_timeout=300.0): """ Merge flows split due to timeout. :param list[os.PathLike] in_files: input files paths :param os.PathLike | _io.TextIOWrapper out_file: output file or directory path or stream :param in_format: format of input files :param out_format: format of output :param inactive_timeout: inactive timeout in seconds :param active_timeout: active timeout in seconds """ inactive_s, inactive_ms = divmod(inactive_timeout, 1) inactive_s, inactive_ms = int(inactive_s), int(inactive_ms * 1000) active_time = active_timeout - inactive_timeout cache = {} reader, writer = IN_FORMATS[in_format], OUT_FORMATS[out_format] writer = writer(out_file) next(writer) written = 0 merged = 0 wrong = 0 for file in in_files: for key, first, first_ms, last, last_ms, packets, octets, aggs in reader(file): new_flow = Flow(key, first, first_ms, last, last_ms, packets, octets, aggs) if key in cache: old_flow = cache[key] nfs, nfm = new_flow.val.first, new_flow.val.first_ms ols, olm = old_flow.val.last, old_flow.val.last_ms nls, nlm = new_flow.val.last, new_flow.val.last_ms ofs, ofm = old_flow.val.first, old_flow.val.first_ms if nfs > ols or nfs == ols and nfm > olm: # new first > old last # correct order pass elif ofs > nls or ofs == nls and ofm > nlm: # old first > new last # reversed order old_flow, new_flow = new_flow, old_flow warnings.warn("Found a flow with the reversed order") else: # error wrong += 1 del cache[key] continue delta_s = new_flow.val.first - old_flow.val.last delta_ms = new_flow.val.first_ms - old_flow.val.last_ms if delta_ms < 0: delta_s -= 1 delta_ms = 1000 - delta_ms if delta_s < inactive_s or delta_s == inactive_s and delta_ms < inactive_ms: # merge flows merged += 1 old_flow.val.last = new_flow.val.last # update last old_flow.val.last_ms = new_flow.val.last_ms # update last old_flow.val.aggs += 1 # add flow old_flow.val.packets += new_flow.val.packets # add packets old_flow.val.octets += new_flow.val.octets # add octets if new_flow.val.last - new_flow.val.first < active_time: # too short to merge # dump it del cache[key] writer.send(old_flow.to_tuple()) written += 1 else: # dump old flow from cache del cache[key] writer.send(old_flow.to_tuple()) written += 1 # new flow if new_flow.val.last - new_flow.val.first < active_time: # too short to merge # dump new flow too writer.send(new_flow.to_tuple()) written += 1 else: # candidate to merge # add new flow to cache cache[key] = new_flow else: # new flow if new_flow.val.last - new_flow.val.first < active_time: # too short to merge # dump it asap writer.send(new_flow.to_tuple()) written += 1 else: # candidate to merge # add it to cache cache[key] = new_flow logmsg(f'Finished: {file} Cached: {len(cache)} Wrong: {wrong} Merged: {merged} Written: {written}') for flow in cache.values(): # dump all remaining flows writer.send(flow.to_tuple()) written += 1 writer.close() logmsg(f'Finished all files. Wrong: {wrong} Merged: {merged} Written: {written}') def parser(): p = argparse.ArgumentParser(description=__doc__, parents=[io_parser]) p.add_argument('-I', type=float, default=15.0, help='inactive timeout in seconds') p.add_argument('-A', type=float, default=300.0, help='active timeout in seconds') return p def main(): app_args = parser().parse_args() if app_args.i == 'binary': input_files = app_args.files else: input_files = prepare_file_list(app_args.files) merge(input_files, app_args.O, app_args.i, app_args.o, app_args.I, app_args.A) if __name__ == '__main__': main()
38.535484
118
0.542943
7957460c640ff0e84286b297906f8de227cfb6e2
8,839
py
Python
pyNastran/bdf/bdf_interface/card_groups.py
SaravananM21/pyNastran
acfe9da5567ca0477997973b4fe9f9053e39a4d9
[ "BSD-3-Clause" ]
1
2021-08-02T09:49:24.000Z
2021-08-02T09:49:24.000Z
pyNastran/bdf/bdf_interface/card_groups.py
zchlrnr/pyNastran
d89a59a918e8d2262e3d8e6abd01abdc37046cdf
[ "BSD-3-Clause" ]
null
null
null
pyNastran/bdf/bdf_interface/card_groups.py
zchlrnr/pyNastran
d89a59a918e8d2262e3d8e6abd01abdc37046cdf
[ "BSD-3-Clause" ]
null
null
null
acoustic = { # loads 'ACLOAD', 'ACSRCE', # element - property 'CHACBR', 'PACBAR', 'CHACAB', 'PACABS', 'CACINF3', 'CACINF4', 'PACINF', 'CSLOT3', 'CSLOT4', 'AXSLOT', 'GRIDS', 'CAABSF', 'PAABSF', } aero_geom = { # aero cards 'AECOMP', 'AECOMPL', ## aecomps 'AEFACT', ## aefacts 'AELINK', ## aelinks 'AELIST', ## aelists 'AEPARM', ## aeparams 'AESTAT', ## aestats 'AESURF', ## aesurf 'AESURFS', ## aesurfs 'CAERO1', 'CAERO2', 'CAERO3', 'CAERO4', 'CAERO5', ## caeros 'PAERO1', 'PAERO2', 'PAERO3', 'PAERO4', 'PAERO5', ## paeros 'MONPNT1', 'MONPNT2', 'MONPNT3', 'MONDSP1', ## monitor_points 'SPLINE1', 'SPLINE2', 'SPLINE3', 'SPLINE4', 'SPLINE5', ## splines 'SPLINE6', 'SPLINE7', } static_aero = { 'AEROS', ## aeros 'TRIM', 'TRIM2', ## trims 'CSSCHD', ## csschds 'DIVERG', ## divergs } dynamic_aero = { 'AERO', ## aero 'GUST', ## gusts 'FLUTTER', ## flutters 'FLFACT', ## flfacts 'MKAERO1', 'MKAERO2', ## mkaeros } basic = { '/', 'ECHOON', 'ECHOOFF', 'PARAM', 'GRID', 'GRDSET', 'SPOINT', ## coords 'CORD1R', 'CORD1C', 'CORD1S', 'CORD2R', 'CORD2C', 'CORD2S', #'GMCORD', } old_axisymmetric_structure = { # axixsymmetric 'CCONEAX', # element 'PCONEAX', # property 'AXIC', # axic 'AXIF', # axif 'FORCEAX', # loads } axisymmetric_structure = { 'CTRAX3', 'CTRAX6', 'CTRIAX', 'CTRIAX6', 'CQUADX', 'CQUADX4', 'CQUADX8', } axisymmetric_loads = {} structure = { ## masses 'CONM1', 'CONM2', 'CMASS1', 'CMASS2', 'CMASS3', 'CMASS4', ## elements # springs 'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4', # 'CELAS5', # bushings 'CBUSH', 'CBUSH1D', 'CBUSH2D', # dampers 'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CDAMP5', 'CFAST', 'CBAR', 'CBARAO', 'BAROR', 'CROD', 'CTUBE', 'CBEAM', 'CBEAM3', 'CONROD', 'CBEND', 'BEAMOR', 'CTRIA3', 'CTRIA6', 'CTRIAR', 'CQUAD4', 'CQUAD8', 'CQUADR', 'CQUAD', 'SNORM', 'CTETRA', 'CPYRAM', 'CPENTA', 'CHEXA', 'CSHEAR', 'CVISC', 'CRAC2D', 'CRAC3D', 'CGAP', 'GENEL', ## properties 'PMASS', 'PELAS', 'PGAP', 'PFAST', 'PLPLANE', 'PPLANE', 'PBUSH', 'PBUSH1D', 'PDAMP', 'PDAMP5', 'PROD', 'PBAR', 'PBARL', 'PBEAM', 'PTUBE', 'PBCOMP', 'PBRSECT', 'PBEND', 'PBEAML', 'PBMSECT', # not fully supported 'PBEAM3', # v1.3 'PSHELL', 'PCOMP', 'PCOMPG', 'PSHEAR', 'PTRSHL', 'PSOLID', 'PLSOLID', 'PVISC', 'PRAC2D', 'PRAC3D', 'PIHEX', 'PCOMPS', # PQUAD4 ## pdampt 'PDAMPT', ## pelast 'PELAST', ## pbusht 'PBUSHT', } #structure_loads = { #} modes = { #: methods 'EIGB', 'EIGR', 'EIGRL', #: cMethods 'EIGC', 'EIGP', } materials = { ## materials 'MAT1', 'MAT2', 'MAT3', 'MAT8', 'MAT9', 'MAT10', 'MAT11', 'MAT3D', 'MATG', 'MATHE', 'MATHP', 'MATEV', # 'MATHE' #'EQUIV', # testing only, should never be activated... ## thermal_materials 'MAT4', 'MAT5', } nonlinear_materials = { ## creep_materials 'CREEP', ## Material dependence - MATT1/MATT2/etc. 'MATT1', 'MATT2', 'MATT3', 'MATT4', 'MATT5', 'MATT8', 'MATT9', 'MATS1', #'MATS3', 'MATS8', ## tables 'TABLEM1', 'TABLEM2', 'TABLEM3', 'TABLEM4', # material tables - temperature # nonlinear elastic temperature dependent materials (e.g. creep) # sees TABLES1 'TABLEST', # material tables - stress (MATS1, CREEP, MATHP) 'TABLES1', } dynamic_loads = { ## dloads 'DLOAD', ## dload_entries 'ACSRCE', 'TLOAD1', 'TLOAD2', 'RLOAD1', 'RLOAD2', 'QVECT', 'RANDPS', 'RANDT1', # random # ---- dynamic cards ---- # 'DAREA', ## dareas 'DPHASE', ## dphases 'TF', ## transfer_functions 'TIC', ## initial conditions - sid (set ID) } transient = { 'DELAY', ## delays 'NLPARM', ## nlparms 'NLPCI', ## nlpcis 'TSTEP', ## tsteps 'TSTEPNL', 'TSTEP1', ## tstepnls } frequency = { ## frequencies 'FREQ', 'FREQ1', 'FREQ2', 'FREQ3', 'FREQ4', 'FREQ5', } static_loads = { ## loads 'LOAD', 'CLOAD', 'LSEQ', 'LOADCYN', 'LOADCYH', 'SLOAD', 'FORCE', 'FORCE1', 'FORCE2', 'MOMENT', 'MOMENT1', 'MOMENT2', 'GRAV', 'ACCEL', 'ACCEL1', 'PLOAD', 'PLOAD1', 'PLOAD2', 'PLOAD4', 'PLOADX1', 'RFORCE', 'RFORCE1', 'SPCD', 'DEFORM', # msgmesh #'GMLOAD', # axisymmetric 'PRESAX', #thermal 'QVOL', } static_thermal_loads = { # temperature cards 'TEMP', 'TEMPD', 'TEMPB3', 'TEMPAX', } dynamic_thermal_loads = { 'QBDY1', 'QBDY2', 'QBDY3', 'QHBDY', 'CHBDYE', 'CHBDYG', 'CHBDYP', 'PCONV', 'PCONVM', 'PHBDY', 'RADBC', 'CONV', 'RADM', 'VIEW', 'VIEW3D', # TODO: not validated 'RADCAV', ## radcavs } optimization = { # optimization cards 'DEQATN', 'DTABLE', 'DCONSTR', 'DESVAR', 'TOPVAR', 'DDVAL', 'DRESP1', 'DRESP2', 'DRESP3', 'DVCREL1', 'DVCREL2', 'DVPREL1', 'DVPREL2', 'DVMREL1', 'DVMREL2', 'DOPTPRM', 'DLINK', 'DCONADD', 'DVGRID', 'DSCREEN', } superelements = { # superelements 'SETREE', 'SENQSET', 'SEBULK', 'SEBNDRY', 'SEELT', 'SELOC', 'SEMPLN', 'SECONCT', 'SELABEL', 'SEEXCLD', 'CSUPER', 'CSUPEXT', 'SELOAD', 'RELEASE', # super-element sets 'SESET', ## se_sets 'SEBSET', 'SEBSET1', ## se_bsets 'SECSET', 'SECSET1', ## se_csets 'SEQSET', 'SEQSET1', ## se_qsets #'SEUSET', 'SEUSET1', ## se_usets 'SEQSEP', } sol_101 = basic + structure + materials + static_loads + static_thermal_loads # statics sol_103 = basic + structure + materials + modes # modes sol_105 = basic + structure + materials + static_loads # buckling sol_144 = basic + structure + materials + aero_geom + static_aero + static_loads # static aero sol_145 = basic + structure + materials + aero_geom + dynamic_aero + modes # flutter sol_146 = basic + structure + materials + aero_geom + dynamic_aero + modes + frequency + static_loads + dynamic_loads # gust sol_153 = basic + structure + materials # + static_loads sol_200 = optimization + sol_144 + sol_145 + sol_146 cards_to_read = [ ## nodes 'EPOINT', 'SEQGP', 'GRIDB', # points 'POINT', #'GRIDG' ## ringfl 'RINGFL', ## ringaxs 'RINGAX', 'POINTAX', ## nsms 'NSM', 'NSM1', 'NSML', 'NSML1', ## nsmadds 'NSMADD', 'CTRSHL', 'CPLSTN3', 'CPLSTN4', 'CPLSTN6', 'CPLSTN8', # plate strain 'CPLSTS3', 'CPLSTS4', 'CPLSTS6', 'CPLSTS8', # plate stress # acoustic 'CHACAB', 'CAABSF', 'CHACBR', 'PACABS', 'PAABSF', 'PACBAR', 'ACMODL', ## rigid_elements 'RBAR', 'RBAR1', 'RBE1', 'RBE2', 'RBE3', 'RROD', 'RSPLINE', 'RSSCON', ## plotels 'PLOTEL', ## nxstrats 'NXSTRAT', ## spcs 'SPC', 'SPCADD', 'SPC1', 'SPCAX', 'SPCOFF', 'SPCOFF1', 'GMSPC', ## mpcs 'MPC', 'MPCADD', ## suport/suport1/se_suport 'SUPORT', 'SUPORT1', 'SESUP', 'ROTORG', 'ROTORD', ## rotors # direct matrix input cards 'DMIG', 'DMIJ', 'DMIJI', 'DMIK', 'DMI', 'DTI', 'DMIAX', # sets 'SET1', 'SET3', ## sets 'ASET', 'ASET1', ## asets 'OMIT', 'OMIT1', ## omits 'BSET', 'BSET1', ## bsets 'CSET', 'CSET1', ## csets 'QSET', 'QSET1', ## qsets 'USET', 'USET1', ## usets 'RADSET', # radset #------------------------------------------------------------------ ## parametric 'PSET', 'PVAL', 'GMCURV', 'GMSURF', 'FEEDGE', 'FEFACE', #------------------------------------------------------------------ ## tables 'TABLED1', 'TABLED2', 'TABLED3', 'TABLED4', # dynamic tables - freq/time loads ## modal damping table - tables_sdamping 'TABDMP1', ## random_tables # PSD=func(freq); used by RANDPS card 'TABRND1', # gust for aeroelastic response; used by RANDPS card 'TABRNDG', # ??? 'TABLEHT', 'TABLEH1', #------------------------------------------------------------------ # : modtrak 'MODTRAK', #: contact 'BCBODY', ## bcbody 'BCPARA', ## bcpara 'BCTPARA', ## bctpara 'BCRPARA', ## bcrpara 'BCTPARM', ## bctparm 'BGADD', ## bgadds 'BGSET', ## bgsets 'BCTADD', ## bctadds 'BCTSET', ## bctsets 'BSURF', ## bsurf 'BSURFS', ## bsurfs 'BCONP', ## bconp 'BLSEG', ## blseg 'BFRIC', ## bfric 'TEMPBC', #'RADMT', 'RADLST', 'RADMTX', #'RADBND', #'TEMPP1', 'TEMPRB', 'CONVM', ## ??? #'PANEL', 'SWLDPRM', #'CWELD', 'PWELD', 'PWSEAM', 'CWSEAM', 'CSEAM', 'PSEAM', 'DVSHAP', 'BNDGRID', #'CYSYM', 'CYJOIN', 'MODTRAK', 'DSCONS', 'DVAR', 'DVSET', 'DYNRED', #'BNDFIX', 'BNDFIX1', #'AEFORCE', 'UXVEC', 'GUST2', # cyclic 'CYJOIN', 'CYAX', # other 'INCLUDE', # '=' 'ENDDATA', ]
23.507979
125
0.528567
795746dadd7adf4ffe4641d7ab5ada50ed2aa1ab
1,433
py
Python
src/api/endpoint/blog.py
DJWOMS/blog_fastapi
f584634a2cd410904df6a7d9478044d269737a91
[ "BSD-3-Clause" ]
39
2020-03-12T13:00:22.000Z
2022-03-30T13:46:21.000Z
src/api/endpoint/blog.py
DJWOMS/blog_fastapi
f584634a2cd410904df6a7d9478044d269737a91
[ "BSD-3-Clause" ]
null
null
null
src/api/endpoint/blog.py
DJWOMS/blog_fastapi
f584634a2cd410904df6a7d9478044d269737a91
[ "BSD-3-Clause" ]
18
2020-03-20T06:01:34.000Z
2022-03-29T15:06:57.000Z
from typing import List from fastapi import APIRouter, Depends from sqlalchemy.orm import Session from src.api.utils.db import get_db from src.blog import service, schemas router = APIRouter() @router.post("/category/", response_model=schemas.CategoryCreate) async def create_category(item: schemas.CategoryCreate, db: Session = Depends(get_db)): return service.create_category(db=db, item=item) @router.get("/category/", response_model=List[schemas.CategoryGet]) async def get_category_list(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)): return service.get_categories(db=db, skip=skip, limit=limit) @router.get("/category/{category_id}", response_model=schemas.CategoryAndPosts) def get_category(category_id: int, db: Session = Depends(get_db)): return service.get_category_posts(db=db, category_id=category_id) @router.post("/post/", response_model=schemas.PostCreate) def create_post(item: schemas.PostCreate, db: Session = Depends(get_db)): return service.create_post(db=db, item=item) @router.get("/post/", response_model=List[schemas.PostList]) def get_post_list(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)): return service.get_posts(db=db, skip=skip, limit=limit) @router.get("/post/{post_id}", response_model=schemas.PostSingle) def get_post_detail(post_id: int, db: Session = Depends(get_db)): return service.get_post_single(db=db, post_id=post_id)
36.74359
92
0.763433
795746db9717ac7bb91430a8e7b6b35e1308141f
88,793
py
Python
pysnmp-with-texts/IRM3-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
8
2019-05-09T17:04:00.000Z
2021-06-09T06:50:51.000Z
pysnmp-with-texts/IRM3-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
4
2019-05-31T16:42:59.000Z
2020-01-31T21:57:17.000Z
pysnmp-with-texts/IRM3-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module IRM3-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IRM3-MIB # Produced by pysmi-0.3.4 at Wed May 1 11:44:27 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection") repeaterRev2, subSysMMAC, repeaterRev1 = mibBuilder.importSymbols("IRM-OIDS", "repeaterRev2", "subSysMMAC", "repeaterRev1") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, Counter64, iso, ObjectIdentity, TimeTicks, MibIdentifier, Bits, Gauge32, IpAddress, NotificationType, Integer32, Counter32, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "Counter64", "iso", "ObjectIdentity", "TimeTicks", "MibIdentifier", "Bits", "Gauge32", "IpAddress", "NotificationType", "Integer32", "Counter32", "ModuleIdentity") TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString") device = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1)) board = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 2)) port = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 3)) sourceAddr = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 7)) redundancy = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 8)) alarm = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9)) deviceMMACType = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))).clone(namedValues=NamedValues(("unknown", 1), ("mMAC8", 2), ("mMAC5", 3), ("mMAC3", 4), ("mINIMMAC", 5), ("mRXI", 6), ("m3Shunt", 7), ("m5Shunt", 8), ("m8FNB", 9), ("nonFNB", 10), ("mMAC3Shunting", 11), ("mMAC5Shunting", 12), ("mMAC8Shunting", 13), ("m8Shunting", 14)))).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceMMACType.setStatus('mandatory') if mibBuilder.loadTexts: deviceMMACType.setDescription('Indicates the type of MMAC.') deviceSlots = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 3), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceSlots.setStatus('mandatory') if mibBuilder.loadTexts: deviceSlots.setDescription('Number of available device slots in this device, including the IRM slot.') deviceOccupiedSlots = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 4), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceOccupiedSlots.setStatus('mandatory') if mibBuilder.loadTexts: deviceOccupiedSlots.setDescription('The slots that contain MIM boards. This field is bit encoded (eg. if a bit is set then that board is present). For example, a value of 3 indicates that there are boards in the IRM slot and the first MIM slot.') devicePortsOn = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: devicePortsOn.setStatus('mandatory') if mibBuilder.loadTexts: devicePortsOn.setDescription('The number of operating ports on the device.') deviceTotalPorts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceTotalPorts.setStatus('mandatory') if mibBuilder.loadTexts: deviceTotalPorts.setDescription('The total number of repeater ports on the MMAC.') deviceTotalPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 7), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceTotalPkts.setStatus('mandatory') if mibBuilder.loadTexts: deviceTotalPkts.setDescription('The number of packets detected on all ports of this device.') deviceTotalErrors = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 8), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceTotalErrors.setStatus('mandatory') if mibBuilder.loadTexts: deviceTotalErrors.setDescription('The sum of all of the errors listed including alignment, CRC, runts, giants, and out of window collisions.') deviceTransmitColls = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 9), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceTransmitColls.setStatus('mandatory') if mibBuilder.loadTexts: deviceTransmitColls.setDescription('The number of transmit collisions detected by this MMAC.') deviceRecColls = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 10), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceRecColls.setStatus('mandatory') if mibBuilder.loadTexts: deviceRecColls.setDescription('The number of receive collisions detected by this MMAC.') deviceAlignErrs = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 11), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceAlignErrs.setStatus('mandatory') if mibBuilder.loadTexts: deviceAlignErrs.setDescription('The number of packets with alignment errors detected.') deviceCRCErrs = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 12), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceCRCErrs.setStatus('mandatory') if mibBuilder.loadTexts: deviceCRCErrs.setDescription('The number of packets with bad CRC detected.') deviceRunts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 13), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceRunts.setStatus('mandatory') if mibBuilder.loadTexts: deviceRunts.setDescription('The number of runt frames detected by this device.') deviceOOWColls = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 14), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceOOWColls.setStatus('mandatory') if mibBuilder.loadTexts: deviceOOWColls.setDescription('The number of out-of-window collisions detected by this device.') deviceNoResources = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 15), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceNoResources.setStatus('mandatory') if mibBuilder.loadTexts: deviceNoResources.setDescription('The number of No resource condtions detected by this device.') deviceRecBytes = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 16), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceRecBytes.setStatus('mandatory') if mibBuilder.loadTexts: deviceRecBytes.setDescription('The number of bytes detected on the network.') deviceGiantFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 17), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceGiantFrames.setStatus('mandatory') if mibBuilder.loadTexts: deviceGiantFrames.setDescription('The number of packets longer than 1518 bytes.') deviceRestart = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 18), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: deviceRestart.setStatus('mandatory') if mibBuilder.loadTexts: deviceRestart.setDescription('If this object is set to 1 the device is restarted.') deviceResetCounters = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 19), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: deviceResetCounters.setStatus('mandatory') if mibBuilder.loadTexts: deviceResetCounters.setDescription('If this object is set to 1, all of the devices counters are reset to 0.') deviceRedundantCts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 20), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceRedundantCts.setStatus('mandatory') if mibBuilder.loadTexts: deviceRedundantCts.setDescription('The number of redundant circuits available for use.') deviceDiscover = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 21), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: deviceDiscover.setStatus('deprecated') if mibBuilder.loadTexts: deviceDiscover.setDescription('When this variable is set to 1 The following events occur: (1) The device sends a LINK-UP trap (2) The device sends a proprietary packet that will cause all other Cabeltron devices on that local network to send a LINK-UP trap.') deviceTimeBase = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 24), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: deviceTimeBase.setStatus('mandatory') if mibBuilder.loadTexts: deviceTimeBase.setDescription('The number of seconds used as the interval for performing all of the rate alarm checks. The minimum is 10 secs.') deviceResetRedundancy = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 25), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: deviceResetRedundancy.setStatus('mandatory') if mibBuilder.loadTexts: deviceResetRedundancy.setDescription('Setting this object to a 1 will reset redundancy for the entire box.') deviceSrcAddrAgingTime = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 26), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: deviceSrcAddrAgingTime.setStatus('mandatory') if mibBuilder.loadTexts: deviceSrcAddrAgingTime.setDescription('The number of minutes that a source address is not detected before it is removed from the source address table.') deviceSrcAddrTraps = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 27), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("trapsOff", 1), ("trapsOn", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: deviceSrcAddrTraps.setStatus('mandatory') if mibBuilder.loadTexts: deviceSrcAddrTraps.setDescription('Controls sending of source address related traps.') deviceSrcAddrLocked = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("lockOff", 1), ("lockOn", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: deviceSrcAddrLocked.setStatus('mandatory') if mibBuilder.loadTexts: deviceSrcAddrLocked.setDescription('If this is set to 2, any source addresses that are detected on station ports that are not in the table will cause the port to be turned off.') deviceEtherOccupiedSlots = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 29), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceEtherOccupiedSlots.setStatus('mandatory') if mibBuilder.loadTexts: deviceEtherOccupiedSlots.setDescription('The slots that contain Ethernet MIM boards. This field is bit encoded (eg. if a bit is set then that board is present). For example, a value of 2 indicates that there is an Ethernet board in the first MIM slot.') deviceTROccupiedSlots = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 30), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceTROccupiedSlots.setStatus('mandatory') if mibBuilder.loadTexts: deviceTROccupiedSlots.setDescription('The slots that contain managable Token Ring MIM boards. This field is bit encoded (eg. if a bit is set then that board is present). For example, a value of 2 indicates that there is a managable Token Ring board in the first MIM slot.') deviceFDDIOccupiedSlots = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 31), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceFDDIOccupiedSlots.setStatus('mandatory') if mibBuilder.loadTexts: deviceFDDIOccupiedSlots.setDescription('The slots that contain FDDI MIM boards. This field is bit encoded (eg. if a bit is set then that board is present). For example, a value of 2 indicates that there is an FDDI board in the first MIM slot.') deviceRestoreDefaults = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 32), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: deviceRestoreDefaults.setStatus('mandatory') if mibBuilder.loadTexts: deviceRestoreDefaults.setDescription("If this object is set to 1 the factory defaults are restored. This, however, does not affect the device's IP address.") deviceActiveUsers = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 33), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceActiveUsers.setStatus('mandatory') if mibBuilder.loadTexts: deviceActiveUsers.setDescription('The number of active users seen by this device.') deviceOSIFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 34), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceOSIFrames.setStatus('mandatory') if mibBuilder.loadTexts: deviceOSIFrames.setDescription('The number of frames with OSI protocol seen by this device.') deviceNovellFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 35), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceNovellFrames.setStatus('mandatory') if mibBuilder.loadTexts: deviceNovellFrames.setDescription('The number of frames with NOVELL protocol seen by this device.') deviceBanyanFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 36), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceBanyanFrames.setStatus('mandatory') if mibBuilder.loadTexts: deviceBanyanFrames.setDescription('The number of frames with BANYAN protocol seen by this device.') deviceDECNetFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 37), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceDECNetFrames.setStatus('mandatory') if mibBuilder.loadTexts: deviceDECNetFrames.setDescription('The number of frames with DECNET protocol seen by this device.') deviceXNSFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 38), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceXNSFrames.setStatus('mandatory') if mibBuilder.loadTexts: deviceXNSFrames.setDescription('The number of frames with XNS protocol seen by this device.') deviceIPFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 39), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceIPFrames.setStatus('mandatory') if mibBuilder.loadTexts: deviceIPFrames.setDescription('The number of frames with TCPIP protocol seen by this device.') deviceCtronFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 40), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceCtronFrames.setStatus('mandatory') if mibBuilder.loadTexts: deviceCtronFrames.setDescription('The number of frames with Cabletron protocol seen by this device.') deviceAppletalkFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 41), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceAppletalkFrames.setStatus('mandatory') if mibBuilder.loadTexts: deviceAppletalkFrames.setDescription('The number of frames with Appletalk protocol seen by this device.') deviceOtherFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 42), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceOtherFrames.setStatus('mandatory') if mibBuilder.loadTexts: deviceOtherFrames.setDescription('The number of frames seen by this device, that do not fall into any of the previously listed protocol catagories.') device64To127Frames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 43), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: device64To127Frames.setStatus('mandatory') if mibBuilder.loadTexts: device64To127Frames.setDescription('The number of frames seen by this device, with frame sizes between 64 and 127 bytes.') device128To255Frames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 44), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: device128To255Frames.setStatus('mandatory') if mibBuilder.loadTexts: device128To255Frames.setDescription('The number of frames seen by this device, with frame sizes between 128 and 255 bytes.') device256To511Frames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 45), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: device256To511Frames.setStatus('mandatory') if mibBuilder.loadTexts: device256To511Frames.setDescription('The number of frames seen by this device, with frame sizes between 256 and 511 bytes.') device512To1023Frames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 46), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: device512To1023Frames.setStatus('mandatory') if mibBuilder.loadTexts: device512To1023Frames.setDescription('The number of frames seen by this device, with frame sizes between 512 and 1023 bytes.') device1024To1518Frames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 47), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: device1024To1518Frames.setStatus('mandatory') if mibBuilder.loadTexts: device1024To1518Frames.setDescription('The number of frames seen by this device, with frame sizes between 1024 and 1518 bytes.') deviceBroadPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 48), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceBroadPkts.setStatus('mandatory') if mibBuilder.loadTexts: deviceBroadPkts.setDescription('The sum of broadcast packets detected on all ports of this device.') deviceMultPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 49), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceMultPkts.setStatus('mandatory') if mibBuilder.loadTexts: deviceMultPkts.setDescription('The sum of multicast packets detected on all ports of this device.') deviceThdPartyOccupiedSlots = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 51), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceThdPartyOccupiedSlots.setStatus('mandatory') if mibBuilder.loadTexts: deviceThdPartyOccupiedSlots.setDescription('The slots that contain Third Party MIM boards. This field is bit encoded (eg. if a bit is set then that board is present). For example, a value of 2 indicates that there is a Third Party board in the first MIM slot.') deviceImimOccupiedSlots = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 52), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceImimOccupiedSlots.setStatus('mandatory') if mibBuilder.loadTexts: deviceImimOccupiedSlots.setDescription('The slots that contain Cabletron Intelligent MIM boards. This field is bit encoded (eg. if a bit is set then that board is present). For example, a value of 2 indicates that there is an Intelligent board in the first MIM slot.') deviceLinkTraps = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 54), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("trapsOff", 1), ("trapsOn", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: deviceLinkTraps.setStatus('mandatory') if mibBuilder.loadTexts: deviceLinkTraps.setDescription('Controls sending of link related traps.') ctIPDefaultFrameType = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 56), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ethernet", 1), ("snap8022", 2))).clone('ethernet')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ctIPDefaultFrameType.setStatus('mandatory') if mibBuilder.loadTexts: ctIPDefaultFrameType.setDescription('Defines the default frame type that will be used by the device. A device will understand both frame types and will respond based upon the framing type that is learned for the given address. This object defines the type of framing that will be used if no framing is known, for example ARP requests. Setting a value of 8022snap(2) will result in framing 802.3 - 802.2 - IP packets being generated by default. Setting a value of ethernet(1) will result in framing ethernet IP packets by default.') deviceSrcAddrType = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 1, 57), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ipHash", 1), ("decHash", 2))).clone('ipHash')).setMaxAccess("readwrite") if mibBuilder.loadTexts: deviceSrcAddrType.setStatus('mandatory') if mibBuilder.loadTexts: deviceSrcAddrType.setDescription(' Selects the hashing algorithm of source addresses, DEC or IP') sourceAddrBoard = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 7, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: sourceAddrBoard.setStatus('mandatory') if mibBuilder.loadTexts: sourceAddrBoard.setDescription('The board number of the port that the supplied source address has been found on.') sourceAddrPort = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 7, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: sourceAddrPort.setStatus('mandatory') if mibBuilder.loadTexts: sourceAddrPort.setDescription('The port number of the port that the supplied source address has been found on.') redundancyPollInterval = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 8, 1), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: redundancyPollInterval.setStatus('mandatory') if mibBuilder.loadTexts: redundancyPollInterval.setDescription('The number of seconds between polls for redundancy.') redundancyTestTod = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 8, 2), OctetString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: redundancyTestTod.setStatus('mandatory') if mibBuilder.loadTexts: redundancyTestTod.setDescription('The time of day at which the redundant circuits should be tested.') redundancyPerformTest = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 8, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("performTest", 1)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: redundancyPerformTest.setStatus('mandatory') if mibBuilder.loadTexts: redundancyPerformTest.setDescription('Test the redundant circuit.') redundancyCircuitName = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 8, 4), OctetString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: redundancyCircuitName.setStatus('mandatory') if mibBuilder.loadTexts: redundancyCircuitName.setDescription('The name of the indicated circuit.') redundancyRetryCount = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 8, 5), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: redundancyRetryCount.setStatus('mandatory') if mibBuilder.loadTexts: redundancyRetryCount.setDescription('The number of unanswered polls allowed for the indicated circuit before the current connection is declared bad.') redundancyNumBPs = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 8, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: redundancyNumBPs.setStatus('mandatory') if mibBuilder.loadTexts: redundancyNumBPs.setDescription('The number of board/port combinations associated with the indicated circuit.') redundancyCircuitBoard = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 8, 7), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: redundancyCircuitBoard.setStatus('mandatory') if mibBuilder.loadTexts: redundancyCircuitBoard.setDescription('The boards associated with the indicated circuit.') redundancyCircuitPort = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 8, 8), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: redundancyCircuitPort.setStatus('mandatory') if mibBuilder.loadTexts: redundancyCircuitPort.setDescription('The ports associated with the indicated circuit.') redundancyCircuitTypes = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 8, 9), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: redundancyCircuitTypes.setStatus('mandatory') if mibBuilder.loadTexts: redundancyCircuitTypes.setDescription('The type of each port associated with the indicated circuit.') redundancyCircuitNumAddr = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 8, 10), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: redundancyCircuitNumAddr.setStatus('mandatory') if mibBuilder.loadTexts: redundancyCircuitNumAddr.setDescription('The number of addresses associated with the indicated circuit.') redundancyCircuitMACAddrAdd = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 8, 11), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: redundancyCircuitMACAddrAdd.setStatus('mandatory') if mibBuilder.loadTexts: redundancyCircuitMACAddrAdd.setDescription('Adds this ethernet address to the polling list for the indicated circuit.') redundancyCircuitMACAddrDel = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 8, 12), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: redundancyCircuitMACAddrDel.setStatus('mandatory') if mibBuilder.loadTexts: redundancyCircuitMACAddrDel.setDescription('Removes this ethernet address from the polling list for the indicated circuit.') redundancyCircuitMACAddrDisp = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 8, 13), IpAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: redundancyCircuitMACAddrDisp.setStatus('mandatory') if mibBuilder.loadTexts: redundancyCircuitMACAddrDisp.setDescription('The ethernet addresses associated with the indicated circuit, accessed by index.') redundancyCircuitEnable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 8, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: redundancyCircuitEnable.setStatus('mandatory') if mibBuilder.loadTexts: redundancyCircuitEnable.setDescription('Enables and disables the indicated circuit.') redundancyCircuitReset = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 8, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("reset", 1)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: redundancyCircuitReset.setStatus('mandatory') if mibBuilder.loadTexts: redundancyCircuitReset.setDescription('Reset the indicated circuit. Return all of the associated board and ports to NOT-USED, remove associated addresses from the polling list, reset the circuit name and retry count to default values.') devAlrm = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 1)) bdAlrm = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2)) portAlrm = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3)) devTraffic = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 1, 1)) devColls = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 1, 2)) devError = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 1, 3)) devBroad = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 1, 4)) devTrafficEnable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: devTrafficEnable.setStatus('mandatory') if mibBuilder.loadTexts: devTrafficEnable.setDescription('Enable the sending of device level traffic traps.') devTrafficThreshold = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 1, 1, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: devTrafficThreshold.setStatus('mandatory') if mibBuilder.loadTexts: devTrafficThreshold.setDescription('The threshold of packets within the Devicetimebase that will cause a traffic alarm.') devCollsEnable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 1, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: devCollsEnable.setStatus('mandatory') if mibBuilder.loadTexts: devCollsEnable.setDescription('Enable the sending of device level collision alarms.') devCollsThreshold = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 1, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 15))).setMaxAccess("readwrite") if mibBuilder.loadTexts: devCollsThreshold.setStatus('mandatory') if mibBuilder.loadTexts: devCollsThreshold.setDescription('This object represents the number of collisions per good packet measured by the device that will generate an alarm.') devErrorEnable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 1, 3, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: devErrorEnable.setStatus('mandatory') if mibBuilder.loadTexts: devErrorEnable.setDescription('Enable the sending of device level error alarms.') devErrorThreshold = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 1, 3, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: devErrorThreshold.setStatus('mandatory') if mibBuilder.loadTexts: devErrorThreshold.setDescription('This object represents the percentage of errors per good packet that will generate an alarm.') devErrorSource = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 1, 3, 3), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: devErrorSource.setStatus('mandatory') if mibBuilder.loadTexts: devErrorSource.setDescription('The type of error used for the error alarm. The integer is a bit encoded version of which errors to include in the error sum. The bits are encoded as follows: CRCErrors - Bit 0 - Least Significant Bit runts - Bit 1 OOWColls - Bit 2 alignErrs - Bit 3 NoResource - Bit 4 Giants - Bit 5 ') devBroadEnable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 1, 4, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: devBroadEnable.setStatus('mandatory') if mibBuilder.loadTexts: devBroadEnable.setDescription('Enable the sending of device level broadcast traps.') devBroadThreshold = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 1, 4, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: devBroadThreshold.setStatus('mandatory') if mibBuilder.loadTexts: devBroadThreshold.setDescription('The threshold of broadcast packets within the Devicetimebase that will cause a broadcast alarm.') bdTraffic = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 1)) bdColls = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 2)) bdError = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 3)) bdBroad = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 4)) bdTrafficEnable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: bdTrafficEnable.setStatus('mandatory') if mibBuilder.loadTexts: bdTrafficEnable.setDescription('Enable the sending of board level traffic traps for this board.') bdTrafficThreshold = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 1, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: bdTrafficThreshold.setStatus('mandatory') if mibBuilder.loadTexts: bdTrafficThreshold.setDescription('The threshold of packets within the Devicetimebase that will cause a traffic alarm.') bdTrafficBdDisable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: bdTrafficBdDisable.setStatus('mandatory') if mibBuilder.loadTexts: bdTrafficBdDisable.setDescription('Permit a board to be disabled on a traffic threshold alarm.') bdCollsEnable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: bdCollsEnable.setStatus('mandatory') if mibBuilder.loadTexts: bdCollsEnable.setDescription('Enable the sending of board level collision alarms.') bdCollsThreshold = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 15))).setMaxAccess("readwrite") if mibBuilder.loadTexts: bdCollsThreshold.setStatus('mandatory') if mibBuilder.loadTexts: bdCollsThreshold.setDescription('This object represents the number of collisions per good packet measured by the board that will generate an alarm.') bdCollsBdDisable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 2, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: bdCollsBdDisable.setStatus('mandatory') if mibBuilder.loadTexts: bdCollsBdDisable.setDescription('Permit a board to be disabled on a collision threshold alarm.') bdErrorEnable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 3, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: bdErrorEnable.setStatus('mandatory') if mibBuilder.loadTexts: bdErrorEnable.setDescription('Enable the sending of board level error alarms.') bdErrorThreshold = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 3, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: bdErrorThreshold.setStatus('mandatory') if mibBuilder.loadTexts: bdErrorThreshold.setDescription('This object represents the percentage of errors per good packet that will generate an alarm.') bdErrorSource = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 3, 3), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: bdErrorSource.setStatus('mandatory') if mibBuilder.loadTexts: bdErrorSource.setDescription('The type of error used for the error alarm. The integer is a bit encoded version of which errors to include in the error sum. The bits are encoded as follows: CRCErrors - Bit 0 Least Significant Bit runts - Bit 1 OOWColls - Bit 2 alignErrs - Bit 3 NoResource - Bit 4 Giants - Bit 5 ') bdErrorBdDisable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 3, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: bdErrorBdDisable.setStatus('mandatory') if mibBuilder.loadTexts: bdErrorBdDisable.setDescription('Permit a board to be disabled on an error threshold alarm.') bdBroadEnable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 4, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: bdBroadEnable.setStatus('mandatory') if mibBuilder.loadTexts: bdBroadEnable.setDescription('Enable the sending of board level broadcast traps for this board.') bdBroadThreshold = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 4, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: bdBroadThreshold.setStatus('mandatory') if mibBuilder.loadTexts: bdBroadThreshold.setDescription('The threshold of broadcast packets within the Devicetimebase that will cause a broadcast alarm.') bdBroadDisable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 2, 4, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: bdBroadDisable.setStatus('mandatory') if mibBuilder.loadTexts: bdBroadDisable.setDescription('Permit a board to be disabled on a broadcast threshold alarm.') portTraffic = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 1)) portColls = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 2)) portError = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 3)) portBroad = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 4)) portTrafficEnable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: portTrafficEnable.setStatus('mandatory') if mibBuilder.loadTexts: portTrafficEnable.setDescription('Enable the sending of port level traffic traps for this port.') portTrafficThreshold = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 1, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: portTrafficThreshold.setStatus('mandatory') if mibBuilder.loadTexts: portTrafficThreshold.setDescription('The threshold of packets within the Devicetimebase that will cause a traffic alarm.') portTrafficPortDisable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: portTrafficPortDisable.setStatus('mandatory') if mibBuilder.loadTexts: portTrafficPortDisable.setDescription('Permit a port to be disabled on a traffic threshold alarm.') portCollsEnable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: portCollsEnable.setStatus('mandatory') if mibBuilder.loadTexts: portCollsEnable.setDescription('Enable the sending of port level collsion alarms.') portCollsThreshold = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 15))).setMaxAccess("readwrite") if mibBuilder.loadTexts: portCollsThreshold.setStatus('mandatory') if mibBuilder.loadTexts: portCollsThreshold.setDescription('This object represents the number of collisions per good packet measured by the port that will generate an alarm.') portCollsPortDisable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 2, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: portCollsPortDisable.setStatus('mandatory') if mibBuilder.loadTexts: portCollsPortDisable.setDescription('Permit a port to be disabled on a collision threshold alarm.') portErrorEnable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 3, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: portErrorEnable.setStatus('mandatory') if mibBuilder.loadTexts: portErrorEnable.setDescription('Enable the sending of port level error alarms.') portErrorThreshold = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 3, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: portErrorThreshold.setStatus('mandatory') if mibBuilder.loadTexts: portErrorThreshold.setDescription('This object represents the percentage of errors per good packet that will generate an alarm.') portErrorSource = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 3, 3), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: portErrorSource.setStatus('mandatory') if mibBuilder.loadTexts: portErrorSource.setDescription('The type of error used for the error alarm. The integer is a bit encoded version of which errors to include in the error sum. The bits are encoded as follows: CRCErrors - Bit 0 - Least Significant Bit runts - Bit 1 OOWColls - Bit 2 alignErrs - Bit 3 NoResource - Bit 4 Giants - Bit 5 ') portErrorPortDisable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 3, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: portErrorPortDisable.setStatus('mandatory') if mibBuilder.loadTexts: portErrorPortDisable.setDescription('Permit a port to be disabled on an error threshold alarm.') portBroadEnable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 4, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: portBroadEnable.setStatus('mandatory') if mibBuilder.loadTexts: portBroadEnable.setDescription('Enable the sending of port level broadcast alarms.') portBroadThreshold = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 4, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: portBroadThreshold.setStatus('mandatory') if mibBuilder.loadTexts: portBroadThreshold.setDescription('The threshold of broadcast packets within the Devicetimebase that will cause a broadcast alarm.') portBroadDisable = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 1, 9, 3, 4, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: portBroadDisable.setStatus('mandatory') if mibBuilder.loadTexts: portBroadDisable.setDescription('Permit a port to be disabled on a broadcast threshold alarm.') deviceR2 = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 1)) networkR2 = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 2)) boardR2 = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3)) portR2 = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4)) tokenRingD = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 1, 3)) deviceTRPortsOn = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 1, 3, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceTRPortsOn.setStatus('mandatory') if mibBuilder.loadTexts: deviceTRPortsOn.setDescription('This gives the number of Token Ring ports enabled on this device.') deviceTRPorts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 1, 3, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceTRPorts.setStatus('mandatory') if mibBuilder.loadTexts: deviceTRPorts.setDescription('This gives the total number of Token Ring ports on this device.') deviceTRRingPortsOn = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 1, 3, 3), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceTRRingPortsOn.setStatus('mandatory') if mibBuilder.loadTexts: deviceTRRingPortsOn.setDescription('This gives the number of Token Ring ring ports that are enabled.') deviceTRRingPorts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 1, 3, 4), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceTRRingPorts.setStatus('mandatory') if mibBuilder.loadTexts: deviceTRRingPorts.setDescription('This gives the total number of Token Ring ring ports on this device.') deviceTRLans = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 1, 3, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceTRLans.setStatus('mandatory') if mibBuilder.loadTexts: deviceTRLans.setDescription('This gives the number of Token Rings formed on this device.') deviceTRBoards = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 1, 3, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceTRBoards.setStatus('mandatory') if mibBuilder.loadTexts: deviceTRBoards.setDescription('This returns the number of Token Ring boards on the device.') deviceTRBoardMap = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 1, 3, 7), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: deviceTRBoardMap.setStatus('mandatory') if mibBuilder.loadTexts: deviceTRBoardMap.setDescription('The slots that contain Token Ring MIM boards. This field is bit encoded (eg. if a bit is set then that board is present). For example, a value of 2 indicates that there is a Token Ring board in the first MIM slot.') commonB = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 1)) ethernetB = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2)) tokenRingB = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 3)) fDDIB = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 4)) boardIndex = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardIndex.setStatus('mandatory') if mibBuilder.loadTexts: boardIndex.setDescription('The slot number of this board.') boardName = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 1, 2), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: boardName.setStatus('mandatory') if mibBuilder.loadTexts: boardName.setDescription('The name of the board. This field will only contain printable ASCII characters.') boardType = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 1, 3), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardType.setStatus('mandatory') if mibBuilder.loadTexts: boardType.setDescription('The type of the MIM. (eg. FOT, TPT, etc). See appendix A for encoding.') boardTotalPorts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 1, 4), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardTotalPorts.setStatus('mandatory') if mibBuilder.loadTexts: boardTotalPorts.setDescription('Total number of ports supported on this board.') boardStatus = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: boardStatus.setStatus('deprecated') if mibBuilder.loadTexts: boardStatus.setDescription('The administrative status of this MIM.') boardPortsOn = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardPortsOn.setStatus('mandatory') if mibBuilder.loadTexts: boardPortsOn.setDescription('The number of operating ports on the board.') boardOper = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("operational", 1), ("notOperational", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: boardOper.setStatus('deprecated') if mibBuilder.loadTexts: boardOper.setDescription('Returns a value indicating whether any of the enabled ports on the board are not operational, thereby placing the board in a non-operational state.') boardActiveUsers = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 1, 8), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardActiveUsers.setStatus('mandatory') if mibBuilder.loadTexts: boardActiveUsers.setDescription('The number of active users seen on this board.') boardTotalPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 1), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardTotalPkts.setStatus('mandatory') if mibBuilder.loadTexts: boardTotalPkts.setDescription('The total number of packets this board has seen since the last reset.') boardErrorPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 2), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardErrorPkts.setStatus('mandatory') if mibBuilder.loadTexts: boardErrorPkts.setDescription('The number of packets that have experienced an error.') boardTransColls = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 3), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardTransColls.setStatus('mandatory') if mibBuilder.loadTexts: boardTransColls.setDescription('The number of transmit collisions this board has detected.') boardRecColls = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 4), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardRecColls.setStatus('mandatory') if mibBuilder.loadTexts: boardRecColls.setDescription('The number of Receive mode collisions this board has detected.') boardAlignErrs = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 5), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardAlignErrs.setStatus('mandatory') if mibBuilder.loadTexts: boardAlignErrs.setDescription('The number of misaligned frames this board has detected.') boardCRCErrs = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 6), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardCRCErrs.setStatus('mandatory') if mibBuilder.loadTexts: boardCRCErrs.setDescription('The number of packets with bad CRC detected.') boardRunts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 7), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardRunts.setStatus('mandatory') if mibBuilder.loadTexts: boardRunts.setDescription('The number of runt frames detected by this board.') boardOOWColls = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 8), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardOOWColls.setStatus('mandatory') if mibBuilder.loadTexts: boardOOWColls.setDescription('The number of out-of-window collisions detected by this board.') boardNoResources = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 9), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardNoResources.setStatus('mandatory') if mibBuilder.loadTexts: boardNoResources.setDescription('The number of times the no resources condition was detected for this board.') boardRecBytes = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 10), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardRecBytes.setStatus('mandatory') if mibBuilder.loadTexts: boardRecBytes.setDescription('The number of bytes detected on this board.') boardGiants = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 11), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardGiants.setStatus('mandatory') if mibBuilder.loadTexts: boardGiants.setDescription('The number of packets longer than 1518 bytes detected on this board.') boardOSIFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 12), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardOSIFrames.setStatus('mandatory') if mibBuilder.loadTexts: boardOSIFrames.setDescription('The number of frames with OSI protocol seen by this board.') boardNovellFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 13), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardNovellFrames.setStatus('mandatory') if mibBuilder.loadTexts: boardNovellFrames.setDescription('The number of frames with NOVELL protocol seen by this board.') boardBanyanFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 14), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardBanyanFrames.setStatus('mandatory') if mibBuilder.loadTexts: boardBanyanFrames.setDescription('The number of frames with BANYAN protocol seen by this board.') boardDECNetFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 15), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardDECNetFrames.setStatus('mandatory') if mibBuilder.loadTexts: boardDECNetFrames.setDescription('The number of frames with DECNET protocol seen by this board.') boardXNSFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 16), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardXNSFrames.setStatus('mandatory') if mibBuilder.loadTexts: boardXNSFrames.setDescription('The number of frames with XNS protocol seen by this board.') boardIPFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 17), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardIPFrames.setStatus('mandatory') if mibBuilder.loadTexts: boardIPFrames.setDescription('The number of frames with TCPIP protocol seen by this board.') boardCtronFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 18), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardCtronFrames.setStatus('mandatory') if mibBuilder.loadTexts: boardCtronFrames.setDescription('The number of frames with Cabletron protocol seen by this board.') boardAppletalkFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 19), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardAppletalkFrames.setStatus('mandatory') if mibBuilder.loadTexts: boardAppletalkFrames.setDescription('The number of frames with Appletalk protocol seen by this board.') boardOtherFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 20), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardOtherFrames.setStatus('mandatory') if mibBuilder.loadTexts: boardOtherFrames.setDescription('The number of frames seen by this board, that do not fall into any of the previously listed protocol catagories.') board64To127Frames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 21), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: board64To127Frames.setStatus('mandatory') if mibBuilder.loadTexts: board64To127Frames.setDescription('The number of frames seen by this board, with frame sizes between 64 and 127 bytes.') board128To255Frames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 22), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: board128To255Frames.setStatus('mandatory') if mibBuilder.loadTexts: board128To255Frames.setDescription('The number of frames seen by this board, with frame sizes between 128 and 255 bytes.') board256To511Frames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 23), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: board256To511Frames.setStatus('mandatory') if mibBuilder.loadTexts: board256To511Frames.setDescription('The number of frames seen by this board, with frame sizes between 256 and 511 bytes.') board512To1023Frames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 24), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: board512To1023Frames.setStatus('mandatory') if mibBuilder.loadTexts: board512To1023Frames.setDescription('The number of frames seen by this board, with frame sizes between 512 and 1023 bytes.') board1024To1518Frames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 25), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: board1024To1518Frames.setStatus('mandatory') if mibBuilder.loadTexts: board1024To1518Frames.setDescription('The number of frames seen by this board, with frame sizes between 1024 and 1518 bytes.') boardBroadPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 26), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardBroadPkts.setStatus('mandatory') if mibBuilder.loadTexts: boardBroadPkts.setDescription('The sum of broadcast packets detected on all ports of this board.') boardMultPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 27), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardMultPkts.setStatus('mandatory') if mibBuilder.loadTexts: boardMultPkts.setDescription('The sum of multicast packets detected on all ports of this board.') boardSrcAddrLocked = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 2, 31), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("lockOff", 1), ("lockOn", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: boardSrcAddrLocked.setStatus('mandatory') if mibBuilder.loadTexts: boardSrcAddrLocked.setDescription('If this is set to 2, any source addresses that are detected on station port that are not in the table will cause the port to be turned off.') boardTotalRingPorts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 3, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardTotalRingPorts.setStatus('mandatory') if mibBuilder.loadTexts: boardTotalRingPorts.setDescription('The total number of ring ports on this board.') boardTotalStationPorts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 3, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardTotalStationPorts.setStatus('mandatory') if mibBuilder.loadTexts: boardTotalStationPorts.setDescription('The total number of station ports on this board.') boardModeStatus = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 3, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("mgmtMode", 1), ("autoMOde", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: boardModeStatus.setStatus('mandatory') if mibBuilder.loadTexts: boardModeStatus.setDescription('This describes the mode of this board. autoMode means the board is using the hardware defaults. mgmtMode means the board is using setting by the user.') boardTotalRingPortsOn = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 3, 4), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardTotalRingPortsOn.setStatus('mandatory') if mibBuilder.loadTexts: boardTotalRingPortsOn.setDescription('The number of enabled ring ports on this board.') boardTotalStationPortsOn = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 3, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardTotalStationPortsOn.setStatus('mandatory') if mibBuilder.loadTexts: boardTotalStationPortsOn.setDescription('The number of enabled station ports on this board.') boardSpeed = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 3, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 16))).clone(namedValues=NamedValues(("fourMHz", 4), ("sixteenMhz", 16)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: boardSpeed.setStatus('mandatory') if mibBuilder.loadTexts: boardSpeed.setDescription('The speed of the board. The board will expect to receive data at this speed.') ringSpeedFault = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 3, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("noFaultDetected", 1), ("faultDetected", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: ringSpeedFault.setStatus('mandatory') if mibBuilder.loadTexts: ringSpeedFault.setDescription('Data speed of station mismatches configuration speed of board. Board is automatically bypassed.') boardSpeedFaultPort = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 3, 8), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardSpeedFaultPort.setStatus('mandatory') if mibBuilder.loadTexts: boardSpeedFaultPort.setDescription('This object indicates which ring speed detect circuit(s) caused the fault, if there was one. The return code indicates the location on the board feeding the speed detect circuit(s). Ring in means the circuit on the back-up path between the ring out port and the ring in port. FNB means the circuit between the ring in port and the FNB. NotApplicable means that a ring Speed Fault has not occurred. Since more than one circuit can cause the fault, the return code is the sum of the following values: 1 - FNB 2 - RingIn 4 - RingOut 8 - NotApplicable ') boardFirstRingPort = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 3, 9), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: boardFirstRingPort.setStatus('mandatory') if mibBuilder.loadTexts: boardFirstRingPort.setDescription('This object defines which port on the board is the first ring port. All ring port numbers will be consecutive. Using this in conjunction with BoardRingPorts, all ring port numbers can be determined.') boardBypassRingPortState = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 3, 3, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("on", 2), ("illegal", 3)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: boardBypassRingPortState.setStatus('mandatory') if mibBuilder.loadTexts: boardBypassRingPortState.setDescription('This object sets and reports the state of the ring bypass relay on boards which contain a ring bypass relay. The ring bypass relay will separate only the ring ports from the FNB. The FNB only bypasses the station ports on these boards. The illegal value (3) will return an error status if set.') commonP = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 1)) ethernetP = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2)) tokenRingP = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 3)) fDDIP = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 4)) portIndex = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portIndex.setStatus('mandatory') if mibBuilder.loadTexts: portIndex.setDescription('The slot number of this port.') portMediaType = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("bnc", 1), ("aui", 2), ("tp", 3), ("tenbt", 4), ("fot", 5), ("laser", 6), ("stp", 7), ("utp", 8), ("fo", 9), ("otherMedia", 10)))).setMaxAccess("readonly") if mibBuilder.loadTexts: portMediaType.setStatus('mandatory') if mibBuilder.loadTexts: portMediaType.setDescription('The type of the port. (eg. FOT, TPT, etc).') portAdminState = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("on", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: portAdminState.setStatus('mandatory') if mibBuilder.loadTexts: portAdminState.setDescription('The administrative status of this port.') portSourceAddr = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 1, 4), OctetString()).setMaxAccess("readonly") if mibBuilder.loadTexts: portSourceAddr.setStatus('mandatory') if mibBuilder.loadTexts: portSourceAddr.setDescription('A source address that has been seen on this port.') portOper = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("notOperational", 1), ("operational", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: portOper.setStatus('mandatory') if mibBuilder.loadTexts: portOper.setDescription('Returns a value indicating whether this port is in an operational state.') portActiveUsers = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portActiveUsers.setStatus('mandatory') if mibBuilder.loadTexts: portActiveUsers.setDescription('The number of active users seen on this port.') portName = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 1, 7), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: portName.setStatus('mandatory') if mibBuilder.loadTexts: portName.setDescription('The name of the port. this field will only contain printable ASCII characters.') portTopologyType = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("stations", 1), ("trunk", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: portTopologyType.setStatus('mandatory') if mibBuilder.loadTexts: portTopologyType.setDescription('The topological type of the port.') portLinkStatus = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("linkSignalInactive", 1), ("linkSignalActive", 2), ("linkSignalNotSupported", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: portLinkStatus.setStatus('mandatory') if mibBuilder.loadTexts: portLinkStatus.setDescription('The state of the link signal for this port.') portStatus = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("segmented", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: portStatus.setStatus('mandatory') if mibBuilder.loadTexts: portStatus.setDescription('The network state of this port.') portTotalPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 4), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portTotalPkts.setStatus('mandatory') if mibBuilder.loadTexts: portTotalPkts.setDescription('The total number of packets this port has seen since the last reset.') portErrorPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 5), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portErrorPkts.setStatus('mandatory') if mibBuilder.loadTexts: portErrorPkts.setDescription('The number of packets that have experienced an error.') portXmitColls = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 6), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portXmitColls.setStatus('mandatory') if mibBuilder.loadTexts: portXmitColls.setDescription('The number of Transmit mode collisions this port has detected.') portRecColls = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 7), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portRecColls.setStatus('mandatory') if mibBuilder.loadTexts: portRecColls.setDescription('The number of Receive mode collisions this port has detected.') portAlignErrs = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 8), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portAlignErrs.setStatus('mandatory') if mibBuilder.loadTexts: portAlignErrs.setDescription('The number of misaligned frames this port has detected.') portCRCErrs = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 9), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portCRCErrs.setStatus('mandatory') if mibBuilder.loadTexts: portCRCErrs.setDescription('The number of packets with bad CRC detected.') portRunts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 10), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portRunts.setStatus('mandatory') if mibBuilder.loadTexts: portRunts.setDescription('The number of runt frames detected by this port.') portOOWColls = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 11), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portOOWColls.setStatus('mandatory') if mibBuilder.loadTexts: portOOWColls.setDescription('The number of out-of-window collisions detected by this port.') portNoResorces = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 12), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portNoResorces.setStatus('mandatory') if mibBuilder.loadTexts: portNoResorces.setDescription('The number of times the no resource condition is detected on this port.') portRecBytes = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 13), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portRecBytes.setStatus('mandatory') if mibBuilder.loadTexts: portRecBytes.setDescription('The number of bytes detected on the network.') portGiants = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 14), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portGiants.setStatus('mandatory') if mibBuilder.loadTexts: portGiants.setDescription('The number of frames longer than 1518 bytes detected on this port.') portRedundCrt = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 15), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: portRedundCrt.setStatus('mandatory') if mibBuilder.loadTexts: portRedundCrt.setDescription('The redundant circuit with which this port is associated.') portRedundType = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notUsed", 1), ("primary", 2), ("backup", 3)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: portRedundType.setStatus('mandatory') if mibBuilder.loadTexts: portRedundType.setDescription('The type of port this port is in the redundant circuit. The port must be associated with a redundant circuit first, using the object portRedundCrt.') portRedundStatus = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notUsed", 1), ("active", 2), ("inactive", 3)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: portRedundStatus.setStatus('mandatory') if mibBuilder.loadTexts: portRedundStatus.setDescription('The current state of this port is in the redundant circuit. The port must be associated with a redundant circuit first, using the object portRedundCrt.') portForceTrunkType = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("notForced", 1), ("forced", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: portForceTrunkType.setStatus('mandatory') if mibBuilder.loadTexts: portForceTrunkType.setDescription('This variable forces a port to be a trunk port.') portOSIFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 19), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portOSIFrames.setStatus('mandatory') if mibBuilder.loadTexts: portOSIFrames.setDescription('The number of frames with OSI protocol seen by this port.') portNovellFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 20), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portNovellFrames.setStatus('mandatory') if mibBuilder.loadTexts: portNovellFrames.setDescription('The number of frames with NOVELL protocol seen by this port.') portBanyanFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 21), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portBanyanFrames.setStatus('mandatory') if mibBuilder.loadTexts: portBanyanFrames.setDescription('The number of frames with BANYAN protocol seen by this port.') portDECNetFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 22), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portDECNetFrames.setStatus('mandatory') if mibBuilder.loadTexts: portDECNetFrames.setDescription('The number of frames with DECNET protocol seen by this port.') portXNSFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 23), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portXNSFrames.setStatus('mandatory') if mibBuilder.loadTexts: portXNSFrames.setDescription('The number of frames with XNS protocol seen by this port.') portIPFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 24), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portIPFrames.setStatus('mandatory') if mibBuilder.loadTexts: portIPFrames.setDescription('The number of frames with TCPIP protocol seen by this port.') portCtronFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 25), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portCtronFrames.setStatus('mandatory') if mibBuilder.loadTexts: portCtronFrames.setDescription('The number of frames with Cabletron protocol seen by this port.') portAppletalkFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 26), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portAppletalkFrames.setStatus('mandatory') if mibBuilder.loadTexts: portAppletalkFrames.setDescription('The number of frames with Appletalk protocol seen by this port.') portOtherFrames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 27), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portOtherFrames.setStatus('mandatory') if mibBuilder.loadTexts: portOtherFrames.setDescription('The number of frames seen by this port, that do not fall into any of the previously listed protocol catagories.') port64To127Frames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 28), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: port64To127Frames.setStatus('mandatory') if mibBuilder.loadTexts: port64To127Frames.setDescription('The number of frames seen by this port, with frame sizes between 64 and 127 bytes.') port128To255Frames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 29), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: port128To255Frames.setStatus('mandatory') if mibBuilder.loadTexts: port128To255Frames.setDescription('The number of frames seen by this port, with frame sizes between 128 and 255 bytes.') port256To511Frames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 30), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: port256To511Frames.setStatus('mandatory') if mibBuilder.loadTexts: port256To511Frames.setDescription('The number of frames seen by this port, with frame sizes between 256 and 511 bytes.') port512To1023Frames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 31), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: port512To1023Frames.setStatus('mandatory') if mibBuilder.loadTexts: port512To1023Frames.setDescription('The number of frames seen by this port, with frame sizes between 512 and 1023 bytes.') port1024To1518Frames = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 32), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: port1024To1518Frames.setStatus('mandatory') if mibBuilder.loadTexts: port1024To1518Frames.setDescription('The number of frames seen by this port, with frame sizes between 1024 and 1518 bytes.') portBroadPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 33), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portBroadPkts.setStatus('mandatory') if mibBuilder.loadTexts: portBroadPkts.setDescription('The sum of broadcast packets detected on this port.') portMultPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 34), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: portMultPkts.setStatus('mandatory') if mibBuilder.loadTexts: portMultPkts.setDescription('The sum of multicast packets detected on this port.') portSrcAddrLocked = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 2, 38), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("lockOff", 1), ("lockOn", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: portSrcAddrLocked.setStatus('mandatory') if mibBuilder.loadTexts: portSrcAddrLocked.setDescription('If this is set to 2, any source addresses that are detected on station ports that are not in the table will cause the port to be turned off.') stationP = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 3, 1)) ringP = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 3, 2)) ringPort2 = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 3, 3)) commonRP = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 3, 3, 1)) autowrapRP = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 3, 3, 2)) stationPortLinkStatus = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("linkSignalInactive", 1), ("linkSignalActive", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: stationPortLinkStatus.setStatus('mandatory') if mibBuilder.loadTexts: stationPortLinkStatus.setDescription('When station is supplying phantom current the Link Signal is active. When there is no phantom current the Link Signal is inactive.') stationPortLinkStateTime = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 3, 1, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: stationPortLinkStateTime.setStatus('mandatory') if mibBuilder.loadTexts: stationPortLinkStateTime.setDescription('The amount of in seconds which have elapsed since the station port last changed state. Note: setting this to zero will clear the time.') ringPortLinkStatus = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 3, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("on", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: ringPortLinkStatus.setStatus('obsolete') if mibBuilder.loadTexts: ringPortLinkStatus.setDescription('Reports the link state of a ring port.') ringPortLinkStateTime = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 3, 2, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ringPortLinkStateTime.setStatus('obsolete') if mibBuilder.loadTexts: ringPortLinkStateTime.setDescription("Reports the time (in seconds) since the ring port's link state changed.") commonRPcapabilities = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 3, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("nonAutowrap", 1), ("autowrap", 2), ("selectable", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: commonRPcapabilities.setStatus('mandatory') if mibBuilder.loadTexts: commonRPcapabilities.setDescription('This object returns a code indicating that the queried ring port is nonautowrap, autowrap or selectable.') autowrapRPFaultStatus = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 3, 3, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("faultUndetectable", 1), ("noFaultDetected", 2), ("faultDetected", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: autowrapRPFaultStatus.setStatus('mandatory') if mibBuilder.loadTexts: autowrapRPFaultStatus.setDescription('This object reports the wire fault of an autowrappable ring port.') autowrapRPFaultStateTime = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 3, 3, 2, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: autowrapRPFaultStateTime.setStatus('mandatory') if mibBuilder.loadTexts: autowrapRPFaultStateTime.setDescription('This object reports the amount of time in time ticks since the ring port fault state last changed. Writing zero to this object resets the time. If a zero is read, it means either zero time ticks have passed or the ring port is nonautowrap.') autowrapRPSelectedType = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 3, 3, 2, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unselectable", 1), ("stp", 2), ("fo", 3)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: autowrapRPSelectedType.setStatus('mandatory') if mibBuilder.loadTexts: autowrapRPSelectedType.setDescription('This object indicates the media type of the indicated ring port on some boards. Some boards provide both STP and fiber optic ring ports. The unselectable value will return an error if set. A set of STP or FO for a non-selectable ring port will not do anything, i.e. the status of normal is returned.') autowrapRPPhantomCurrent = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 2, 2, 4, 3, 3, 2, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noPhantomAvailable", 1), ("activatePhantom", 2), ("deactivatePhantom", 3)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: autowrapRPPhantomCurrent.setStatus('mandatory') if mibBuilder.loadTexts: autowrapRPPhantomCurrent.setDescription('This object tells the board to turn on phantom current for the ring port and look for wire faults. NoPhantomAvailable will return an error if set. A set of activatePhantom or deactivatePhantom for a non-autowrap ring port will not do anything, i.e. the status of normal is returned.') fNB = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 1, 6, 1, 1)) connectedLeft = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 6, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("detached", 1), ("attached", 2), ("faulted", 3)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: connectedLeft.setStatus('mandatory') if mibBuilder.loadTexts: connectedLeft.setDescription('The Connected Left board (board n + 1) has the following states: Detached == 1 (Management (only management) detached, read/write). Attached == 2 (Management/AutoMode attached, read/write). Faulted == 3 (Management/AutoMode tried to attach but failed READ ONLY). ') connectedRight = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 6, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("detached", 1), ("attached", 2), ("faulted", 3)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: connectedRight.setStatus('mandatory') if mibBuilder.loadTexts: connectedRight.setDescription('The Connected Right board (board n - 1) has the following states: Detached == 1 (Management (only management) detached, read/write). Attached == 2 (Management/AutoMode attached, read/write ). Faulted == 3 (Management/AutoMode tried to attach but failed READ ONLY). ') boardBypassState = MibScalar((1, 3, 6, 1, 4, 1, 52, 1, 6, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("on", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: boardBypassState.setStatus('mandatory') if mibBuilder.loadTexts: boardBypassState.setDescription('This object indicates that the board is in the bypassed state when it is ON. Bypassed means the board is NOT attached to the FNB.') mibBuilder.exportSymbols("IRM3-MIB", portTotalPkts=portTotalPkts, deviceAppletalkFrames=deviceAppletalkFrames, deviceCtronFrames=deviceCtronFrames, deviceRecColls=deviceRecColls, portTrafficEnable=portTrafficEnable, portBroadEnable=portBroadEnable, boardTotalPkts=boardTotalPkts, boardOOWColls=boardOOWColls, fDDIB=fDDIB, bdBroadEnable=bdBroadEnable, boardXNSFrames=boardXNSFrames, ringPort2=ringPort2, sourceAddrPort=sourceAddrPort, portIndex=portIndex, deviceTotalErrors=deviceTotalErrors, portBroad=portBroad, deviceTransmitColls=deviceTransmitColls, boardOtherFrames=boardOtherFrames, portStatus=portStatus, boardGiants=boardGiants, stationP=stationP, devColls=devColls, portR2=portR2, deviceRecBytes=deviceRecBytes, boardSrcAddrLocked=boardSrcAddrLocked, portAlignErrs=portAlignErrs, boardFirstRingPort=boardFirstRingPort, commonB=commonB, portCollsPortDisable=portCollsPortDisable, redundancyCircuitMACAddrDisp=redundancyCircuitMACAddrDisp, deviceAlignErrs=deviceAlignErrs, redundancyCircuitMACAddrAdd=redundancyCircuitMACAddrAdd, portActiveUsers=portActiveUsers, portOOWColls=portOOWColls, deviceBroadPkts=deviceBroadPkts, boardTotalStationPorts=boardTotalStationPorts, portErrorThreshold=portErrorThreshold, portRecColls=portRecColls, devBroadThreshold=devBroadThreshold, port=port, devErrorSource=devErrorSource, deviceDiscover=deviceDiscover, portRedundType=portRedundType, deviceGiantFrames=deviceGiantFrames, redundancyNumBPs=redundancyNumBPs, device64To127Frames=device64To127Frames, deviceSrcAddrType=deviceSrcAddrType, boardIPFrames=boardIPFrames, portErrorPortDisable=portErrorPortDisable, autowrapRPPhantomCurrent=autowrapRPPhantomCurrent, ctIPDefaultFrameType=ctIPDefaultFrameType, alarm=alarm, portBroadPkts=portBroadPkts, fNB=fNB, portTraffic=portTraffic, autowrapRPFaultStateTime=autowrapRPFaultStateTime, bdErrorSource=bdErrorSource, deviceTotalPkts=deviceTotalPkts, portError=portError, fDDIP=fDDIP, deviceLinkTraps=deviceLinkTraps, boardType=boardType, boardOper=boardOper, deviceEtherOccupiedSlots=deviceEtherOccupiedSlots, redundancyRetryCount=redundancyRetryCount, bdCollsBdDisable=bdCollsBdDisable, deviceImimOccupiedSlots=deviceImimOccupiedSlots, device=device, deviceXNSFrames=deviceXNSFrames, deviceResetRedundancy=deviceResetRedundancy, devBroadEnable=devBroadEnable, board256To511Frames=board256To511Frames, devBroad=devBroad, deviceTRPorts=deviceTRPorts, portRunts=portRunts, deviceR2=deviceR2, boardAlignErrs=boardAlignErrs, deviceSrcAddrAgingTime=deviceSrcAddrAgingTime, boardMultPkts=boardMultPkts, deviceBanyanFrames=deviceBanyanFrames, deviceTimeBase=deviceTimeBase, portOtherFrames=portOtherFrames, boardBypassState=boardBypassState, portNoResorces=portNoResorces, bdErrorBdDisable=bdErrorBdDisable, bdBroadDisable=bdBroadDisable, boardName=boardName, commonRP=commonRP, boardModeStatus=boardModeStatus, portBanyanFrames=portBanyanFrames, boardDECNetFrames=boardDECNetFrames, portRedundCrt=portRedundCrt, redundancyCircuitTypes=redundancyCircuitTypes, deviceRestart=deviceRestart, deviceTRBoards=deviceTRBoards, portCollsEnable=portCollsEnable, deviceSrcAddrTraps=deviceSrcAddrTraps, deviceSlots=deviceSlots, deviceDECNetFrames=deviceDECNetFrames, devErrorThreshold=devErrorThreshold, boardSpeedFaultPort=boardSpeedFaultPort, redundancyCircuitReset=redundancyCircuitReset, boardRecBytes=boardRecBytes, portAdminState=portAdminState, devAlrm=devAlrm, deviceOOWColls=deviceOOWColls, deviceIPFrames=deviceIPFrames, bdCollsEnable=bdCollsEnable, portBroadThreshold=portBroadThreshold, devTrafficEnable=devTrafficEnable, portTopologyType=portTopologyType, portMultPkts=portMultPkts, deviceMultPkts=deviceMultPkts, deviceTRBoardMap=deviceTRBoardMap, port512To1023Frames=port512To1023Frames, deviceMMACType=deviceMMACType, redundancyTestTod=redundancyTestTod, portRecBytes=portRecBytes, deviceFDDIOccupiedSlots=deviceFDDIOccupiedSlots, bdBroadThreshold=bdBroadThreshold, portSrcAddrLocked=portSrcAddrLocked, bdAlrm=bdAlrm, devicePortsOn=devicePortsOn, redundancyPollInterval=redundancyPollInterval, portErrorEnable=portErrorEnable, deviceRunts=deviceRunts, deviceNoResources=deviceNoResources, portColls=portColls, networkR2=networkR2, boardRunts=boardRunts, device128To255Frames=device128To255Frames, portOSIFrames=portOSIFrames, board=board, sourceAddr=sourceAddr, portSourceAddr=portSourceAddr, deviceOtherFrames=deviceOtherFrames, deviceActiveUsers=deviceActiveUsers, board64To127Frames=board64To127Frames, boardNoResources=boardNoResources, boardActiveUsers=boardActiveUsers, deviceRedundantCts=deviceRedundantCts, bdCollsThreshold=bdCollsThreshold, deviceTRRingPortsOn=deviceTRRingPortsOn, boardTotalPorts=boardTotalPorts, redundancyCircuitEnable=redundancyCircuitEnable, deviceRestoreDefaults=deviceRestoreDefaults, redundancyCircuitPort=redundancyCircuitPort, autowrapRP=autowrapRP, ethernetP=ethernetP, devCollsEnable=devCollsEnable, ringP=ringP, portAppletalkFrames=portAppletalkFrames, deviceOSIFrames=deviceOSIFrames, portCollsThreshold=portCollsThreshold, boardStatus=boardStatus, connectedRight=connectedRight, deviceTRLans=deviceTRLans, boardTotalStationPortsOn=boardTotalStationPortsOn, ringPortLinkStateTime=ringPortLinkStateTime, portOper=portOper, tokenRingD=tokenRingD, deviceTotalPorts=deviceTotalPorts, deviceResetCounters=deviceResetCounters, bdTrafficEnable=bdTrafficEnable, redundancyCircuitName=redundancyCircuitName, bdBroad=bdBroad, boardCRCErrs=boardCRCErrs, tokenRingB=tokenRingB, portMediaType=portMediaType, commonP=commonP, board512To1023Frames=board512To1023Frames, autowrapRPSelectedType=autowrapRPSelectedType, bdTrafficThreshold=bdTrafficThreshold, portGiants=portGiants, bdTraffic=bdTraffic, boardRecColls=boardRecColls, portErrorSource=portErrorSource, stationPortLinkStateTime=stationPortLinkStateTime, portTrafficPortDisable=portTrafficPortDisable, boardBroadPkts=boardBroadPkts, portXmitColls=portXmitColls, commonRPcapabilities=commonRPcapabilities, deviceOccupiedSlots=deviceOccupiedSlots, boardPortsOn=boardPortsOn, redundancyCircuitMACAddrDel=redundancyCircuitMACAddrDel, connectedLeft=connectedLeft, sourceAddrBoard=sourceAddrBoard, deviceTROccupiedSlots=deviceTROccupiedSlots, portCtronFrames=portCtronFrames, boardNovellFrames=boardNovellFrames, ethernetB=ethernetB, ringPortLinkStatus=ringPortLinkStatus, devTraffic=devTraffic, boardBanyanFrames=boardBanyanFrames, boardSpeed=boardSpeed, portRedundStatus=portRedundStatus, board1024To1518Frames=board1024To1518Frames, portIPFrames=portIPFrames, bdErrorThreshold=bdErrorThreshold, devCollsThreshold=devCollsThreshold, devError=devError, boardOSIFrames=boardOSIFrames, redundancy=redundancy, boardTotalRingPorts=boardTotalRingPorts, portTrafficThreshold=portTrafficThreshold, portBroadDisable=portBroadDisable, boardIndex=boardIndex, boardBypassRingPortState=boardBypassRingPortState, redundancyPerformTest=redundancyPerformTest, tokenRingP=tokenRingP, deviceTRRingPorts=deviceTRRingPorts, portDECNetFrames=portDECNetFrames, port1024To1518Frames=port1024To1518Frames, deviceTRPortsOn=deviceTRPortsOn, port256To511Frames=port256To511Frames, bdColls=bdColls, port128To255Frames=port128To255Frames, portCRCErrs=portCRCErrs, boardTransColls=boardTransColls, portForceTrunkType=portForceTrunkType, boardCtronFrames=boardCtronFrames, redundancyCircuitBoard=redundancyCircuitBoard, portLinkStatus=portLinkStatus, boardAppletalkFrames=boardAppletalkFrames, deviceSrcAddrLocked=deviceSrcAddrLocked, boardErrorPkts=boardErrorPkts, portAlrm=portAlrm, portXNSFrames=portXNSFrames, portName=portName, stationPortLinkStatus=stationPortLinkStatus, port64To127Frames=port64To127Frames, deviceCRCErrs=deviceCRCErrs, devTrafficThreshold=devTrafficThreshold, redundancyCircuitNumAddr=redundancyCircuitNumAddr, device1024To1518Frames=device1024To1518Frames, portNovellFrames=portNovellFrames, portErrorPkts=portErrorPkts, autowrapRPFaultStatus=autowrapRPFaultStatus, bdErrorEnable=bdErrorEnable, ringSpeedFault=ringSpeedFault, devErrorEnable=devErrorEnable, deviceThdPartyOccupiedSlots=deviceThdPartyOccupiedSlots, device256To511Frames=device256To511Frames, boardTotalRingPortsOn=boardTotalRingPortsOn, board128To255Frames=board128To255Frames, boardR2=boardR2, bdError=bdError, bdTrafficBdDisable=bdTrafficBdDisable, deviceNovellFrames=deviceNovellFrames, device512To1023Frames=device512To1023Frames)
129.43586
8,310
0.767639
795746e2838221b7c4057fbd90ee465a1fc136b3
1,752
py
Python
biobb_adapters/pycompss/biobb_ml/classification/k_neighbors.py
bioexcel/biobb_adapters
45f32feac328cb05f28038b2b00a7416fcae3178
[ "Apache-2.0" ]
null
null
null
biobb_adapters/pycompss/biobb_ml/classification/k_neighbors.py
bioexcel/biobb_adapters
45f32feac328cb05f28038b2b00a7416fcae3178
[ "Apache-2.0" ]
4
2019-03-04T15:22:06.000Z
2021-09-24T14:43:48.000Z
biobb_adapters/pycompss/biobb_ml/classification/k_neighbors.py
bioexcel/biobb_adapters
45f32feac328cb05f28038b2b00a7416fcae3178
[ "Apache-2.0" ]
2
2020-09-08T05:26:23.000Z
2022-03-28T07:09:20.000Z
# Python import os import sys import traceback # Pycompss from pycompss.api.task import task from pycompss.api.parameter import FILE_IN, FILE_OUT # Adapters commons pycompss from biobb_adapters.pycompss.biobb_commons import task_config # Wrapped Biobb from biobb_ml.classification.k_neighbors import KNeighborsTrain # Importing class instead of module to avoid name collision task_time_out = int(os.environ.get('TASK_TIME_OUT', 0)) @task(input_dataset_path=FILE_IN, output_model_path=FILE_OUT, output_test_table_path=FILE_OUT, output_plot_path=FILE_OUT, on_failure="IGNORE", time_out=task_time_out) def _kneighborstrain(input_dataset_path, output_model_path, output_test_table_path, output_plot_path, properties, **kwargs): task_config.pop_pmi(os.environ) try: KNeighborsTrain(input_dataset_path=input_dataset_path, output_model_path=output_model_path, output_test_table_path=output_test_table_path, output_plot_path=output_plot_path, properties=properties, **kwargs).launch() except Exception as e: traceback.print_exc() raise e finally: sys.stdout.flush() sys.stderr.flush() def k_neighbors(input_dataset_path, output_model_path, output_test_table_path=None, output_plot_path=None, properties=None, **kwargs): if (output_model_path is None or os.path.exists(output_model_path)) and \ (output_test_table_path is None or os.path.exists(output_test_table_path)) and \ (output_plot_path is None or os.path.exists(output_plot_path)) and \ True: print("WARN: Task KNeighborsTrain already executed.") else: _kneighborstrain( input_dataset_path, output_model_path, output_test_table_path, output_plot_path, properties, **kwargs)
43.8
223
0.77911
7957476121e66d46f0a061037c729c83c99a9e19
6,842
py
Python
tensorflow/lite/micro/tools/make/transform_source.py
leike666666/tensorflow
a3fd0ddfcb716be124e95b51e96e6c1e4507ef64
[ "Apache-2.0" ]
12
2020-12-28T18:42:10.000Z
2022-03-24T17:34:21.000Z
tensorflow/lite/micro/tools/make/transform_source.py
leike666666/tensorflow
a3fd0ddfcb716be124e95b51e96e6c1e4507ef64
[ "Apache-2.0" ]
2
2021-08-25T15:58:11.000Z
2022-02-10T01:47:24.000Z
tensorflow/lite/micro/tools/make/transform_source.py
leike666666/tensorflow
a3fd0ddfcb716be124e95b51e96e6c1e4507ef64
[ "Apache-2.0" ]
3
2020-03-09T19:17:02.000Z
2020-06-26T23:14:31.000Z
# Lint as: python2, python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Resolves non-system C/C++ includes to their full paths. Used to generate Arduino and ESP-IDF examples. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import re import sys import six EXAMPLE_DIR_PATH = 'tensorflow/lite/micro/examples/' def replace_arduino_includes(line, supplied_headers_list): """Updates any includes to reference the new Arduino library paths.""" include_match = re.match(r'(.*#include.*")(.*)(")', line) if include_match: path = include_match.group(2) for supplied_header in supplied_headers_list: if six.ensure_str(supplied_header).endswith(path): path = supplied_header break line = include_match.group(1) + six.ensure_str(path) + include_match.group( 3) return line def replace_arduino_main(line): """Updates any occurences of a bare main definition to the Arduino equivalent.""" main_match = re.match(r'(.*int )(main)(\(.*)', line) if main_match: line = main_match.group(1) + 'tflite_micro_main' + main_match.group(3) return line def check_ino_functions(input_text): """Ensures the required functions exist.""" # We're moving to an Arduino-friendly structure for all our examples, so they # have to have a setup() and loop() function, just like their IDE expects. if not re.search(r'void setup\(\) \{', input_text): raise Exception( 'All examples must have a setup() function for Arduino compatiblity\n' + input_text) if not re.search(r'void loop\(\) \{', input_text): raise Exception( 'All examples must have a loop() function for Arduino compatiblity') return input_text def add_example_ino_library_include(input_text): """Makes sure the example includes the header that loads the library.""" return re.sub(r'#include ', '#include <TensorFlowLite.h>\n\n#include ', input_text, 1) def replace_ardunio_example_includes(line, _): """Updates any includes for local example files.""" # Because the export process moves the example source and header files out of # their default locations into the top-level 'examples' folder in the Arduino # library, we have to update any include references to match. dir_path = 'tensorflow/lite/micro/examples/' include_match = re.match( r'(.*#include.*")' + six.ensure_str(dir_path) + r'([^/]+)/(.*")', line) if include_match: flattened_name = re.sub(r'/', '_', include_match.group(3)) line = include_match.group(1) + flattened_name return line def replace_esp_example_includes(line, source_path): """Updates any includes for local example files.""" # Because the export process moves the example source and header files out of # their default locations into the top-level 'main' folder in the ESP-IDF # project, we have to update any include references to match. include_match = re.match(r'.*#include.*"(' + EXAMPLE_DIR_PATH + r'.*)"', line) if include_match: # Compute the target path relative from the source's directory target_path = include_match.group(1) source_dirname = os.path.dirname(source_path) rel_to_target = os.path.relpath(target_path, start=source_dirname) line = '#include "%s"' % rel_to_target return line def transform_arduino_sources(input_lines, flags): """Transform sources for the Arduino platform. Args: input_lines: A sequence of lines from the input file to process. flags: Flags indicating which transformation(s) to apply. Returns: The transformed output as a string. """ supplied_headers_list = six.ensure_str(flags.third_party_headers).split(' ') output_lines = [] for line in input_lines: line = replace_arduino_includes(line, supplied_headers_list) if flags.is_example_ino or flags.is_example_source: line = replace_ardunio_example_includes(line, flags.source_path) else: line = replace_arduino_main(line) output_lines.append(line) output_text = '\n'.join(output_lines) if flags.is_example_ino: output_text = check_ino_functions(output_text) output_text = add_example_ino_library_include(output_text) return output_text def transform_esp_sources(input_lines, flags): """Transform sources for the ESP-IDF platform. Args: input_lines: A sequence of lines from the input file to process. flags: Flags indicating which transformation(s) to apply. Returns: The transformed output as a string. """ output_lines = [] for line in input_lines: if flags.is_example_source: line = replace_esp_example_includes(line, flags.source_path) output_lines.append(line) output_text = '\n'.join(output_lines) return output_text def main(unused_args, flags): """Transforms the input source file to work when exported as example.""" input_file_lines = sys.stdin.read().split('\n') output_text = '' if flags.platform == 'arduino': output_text = transform_arduino_sources(input_file_lines, flags) elif flags.platform == 'esp': output_text = transform_esp_sources(input_file_lines, flags) sys.stdout.write(output_text) def parse_args(): """Converts the raw arguments into accessible flags.""" parser = argparse.ArgumentParser() parser.add_argument( '--platform', choices=['arduino', 'esp'], required=True, help='Target platform.') parser.add_argument( '--third_party_headers', type=str, default='', help='Space-separated list of headers to resolve.') parser.add_argument( '--is_example_ino', dest='is_example_ino', action='store_true', help='Whether the destination is an example main ino.') parser.add_argument( '--is_example_source', dest='is_example_source', action='store_true', help='Whether the destination is an example cpp or header file.') parser.add_argument( '--source_path', type=str, default='', help='The relative path of the source code file.') flags, unparsed = parser.parse_known_args() main(unparsed, flags) if __name__ == '__main__': parse_args()
33.213592
83
0.708857
7957476ae52a292df3c0fecbbdc42b75d5b87837
5,173
py
Python
tests/test_kldiv.py
VirgiAgl/GPflow
95e77a5f2fe1514a30f87b5ed03ad72bbce8dead
[ "Apache-2.0" ]
null
null
null
tests/test_kldiv.py
VirgiAgl/GPflow
95e77a5f2fe1514a30f87b5ed03ad72bbce8dead
[ "Apache-2.0" ]
null
null
null
tests/test_kldiv.py
VirgiAgl/GPflow
95e77a5f2fe1514a30f87b5ed03ad72bbce8dead
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 the GPflow authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.from __future__ import print_function # -*- coding: utf-8 -*- import numpy as np import tensorflow as tf import gpflow from gpflow.kullback_leiblers import gauss_kl from numpy.testing import assert_almost_equal import pytest from gpflow import settings from gpflow.test_util import session_tf def squareT(A): """ Returns (A Aᵀ) """ return A.dot(A.T) def make_sqrt_data(rng, N, M): return np.array([np.tril(rng.randn(M, M)) for _ in range(N)]) # N x M x M def make_K_batch_data(rng, N, M): K_np = rng.randn(N, M, M) beye = np.array([np.eye(M) for _ in range(N)]) return .1 * (K_np + np.transpose(K_np, (0, 2, 1))) + beye class Datum: M, N = 5, 4 rng = np.random.RandomState(0) mu_data = rng.randn(M, N) # M x N K_data = squareT(rng.randn(M, M)) + 1e-6 * np.eye(M) # M x M I = np.eye(M) # M x M sqrt_data = make_sqrt_data(rng, N, M) # N x M x M sqrt_diag_data = rng.randn(M, N) # M x N K_batch_data = make_K_batch_data(rng, N, M) @pytest.fixture def mu(session_tf): return tf.convert_to_tensor(Datum.mu_data) @pytest.fixture def sqrt_diag(session_tf): return tf.convert_to_tensor(Datum.sqrt_diag_data) @pytest.fixture def K(session_tf): return tf.convert_to_tensor(Datum.K_data) @pytest.fixture def K_batch(session_tf): return tf.convert_to_tensor(Datum.K_batch_data) @pytest.fixture def sqrt(session_tf): return tf.convert_to_tensor(Datum.sqrt_data) @pytest.fixture() def I(session_tf): return tf.convert_to_tensor(Datum.I) @pytest.mark.parametrize('white', [True, False]) def test_diags(session_tf, white, mu, sqrt_diag, K): """ The covariance of q(x) can be Cholesky matrices or diagonal matrices. Here we make sure the behaviours overlap. """ # the chols are diagonal matrices, with the same entries as the diag representation. chol_from_diag = tf.stack([tf.diag(sqrt_diag[:, i]) for i in range(Datum.N)]) # N x M x M # run kl_diag = gauss_kl(mu, sqrt_diag, K if white else None) kl_dense = gauss_kl(mu, chol_from_diag, K if white else None) np.testing.assert_allclose(kl_diag.eval(), kl_dense.eval()) @pytest.mark.parametrize('diag', [True, False]) def test_whitened(session_tf, diag, mu, sqrt_diag, I): """ Check that K=Identity and K=None give same answer """ chol_from_diag = tf.stack([tf.diag(sqrt_diag[:, i]) for i in range(Datum.N)]) # N x M x M s = sqrt_diag if diag else chol_from_diag kl_white = gauss_kl(mu, s) kl_nonwhite = gauss_kl(mu, s, I) np.testing.assert_allclose(kl_white.eval(), kl_nonwhite.eval()) @pytest.mark.parametrize('shared_k', [True, False]) @pytest.mark.parametrize('diag', [True, False]) def test_sumkl_equals_batchkl(session_tf, shared_k, diag, mu, sqrt, sqrt_diag, K_batch, K): """ gauss_kl implicitely performs a sum of KL divergences This test checks that doing the sum outside of the function is equivalent For q(X)=prod q(x_l) and p(X)=prod p(x_l), check that sum KL(q(x_l)||p(x_l)) = KL(q(X)||p(X)) Here, q(X) has covariance L x M x M p(X) has covariance L x M x M ( or M x M ) Here, q(x_i) has covariance 1 x M x M p(x_i) has covariance M x M """ s = sqrt_diag if diag else sqrt kl_batch = gauss_kl(mu,s,K if shared_k else K_batch) kl_sum = [] for n in range(Datum.N): kl_sum.append(gauss_kl(mu[:, n][:,None], # M x 1 sqrt_diag[:, n][:, None] if diag else sqrt[n, :, :][None, :, :], # 1 x M x M or M x 1 K if shared_k else K_batch[n, :, :][None,:,:])) # 1 x M x M or M x M kl_sum =tf.reduce_sum(kl_sum) assert_almost_equal(kl_sum.eval(), kl_batch.eval()) def tf_kl_1d(q_mu, q_sigma, p_var=1.0): p_var = tf.ones_like(q_sigma) if p_var is None else p_var q_var = tf.square(q_sigma) kl = 0.5 * (q_var / p_var + tf.square(q_mu) / p_var - 1 + tf.log(p_var / q_var)) return tf.reduce_sum(kl) @pytest.mark.parametrize('white', [True, False]) def test_oned(session_tf, white, mu, sqrt, K_batch): """ Check that the KL divergence matches a 1D by-hand calculation. """ m = 0 mu1d = mu[m,:][None,:] # 1 x N s1d = sqrt[:,m,m][:,None,None] # N x 1 x 1 K1d = K_batch[:,m,m][:,None,None] # N x 1 x 1 kl = gauss_kl(mu1d,s1d,K1d if not white else None) kl_tf = tf_kl_1d(tf.reshape(mu1d,(-1,)), # N tf.reshape(s1d,(-1,)), # N None if white else tf.reshape(K1d,(-1,))) # N np.testing.assert_allclose(kl.eval(), kl_tf.eval()) if __name__ == "__main__": tf.test.main()
34.486667
97
0.661318
7957488a3bde5ca46aeebfde0d6250394e842e78
2,788
py
Python
python/ray/tune/examples/pbt_ppo_example.py
zhu-eric/ray
8903bcd0c325f76f2642eb542140bdde5a94f7ac
[ "Apache-2.0" ]
1
2019-10-07T17:20:01.000Z
2019-10-07T17:20:01.000Z
python/ray/tune/examples/pbt_ppo_example.py
zhu-eric/ray
8903bcd0c325f76f2642eb542140bdde5a94f7ac
[ "Apache-2.0" ]
null
null
null
python/ray/tune/examples/pbt_ppo_example.py
zhu-eric/ray
8903bcd0c325f76f2642eb542140bdde5a94f7ac
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python """Example of using PBT with RLlib. Note that this requires a cluster with at least 8 GPUs in order for all trials to run concurrently, otherwise PBT will round-robin train the trials which is less efficient (or you can set {"gpu": 0} to use CPUs for SGD instead). Note that Tune in general does not need 8 GPUs, and this is just a more computationally demainding example. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import random import ray from ray.tune import run, sample_from from ray.tune.schedulers import PopulationBasedTraining if __name__ == "__main__": # Postprocess the perturbed config to ensure it's still valid def explore(config): # ensure we collect enough timesteps to do sgd if config["train_batch_size"] < config["sgd_minibatch_size"] * 2: config["train_batch_size"] = config["sgd_minibatch_size"] * 2 # ensure we run at least one sgd iter if config["num_sgd_iter"] < 1: config["num_sgd_iter"] = 1 return config pbt = PopulationBasedTraining( time_attr="time_total_s", metric="episode_reward_mean", mode="max", perturbation_interval=120, resample_probability=0.25, # Specifies the mutations of these hyperparams hyperparam_mutations={ "lambda": lambda: random.uniform(0.9, 1.0), "clip_param": lambda: random.uniform(0.01, 0.5), "lr": [1e-3, 5e-4, 1e-4, 5e-5, 1e-5], "num_sgd_iter": lambda: random.randint(1, 30), "sgd_minibatch_size": lambda: random.randint(128, 16384), "train_batch_size": lambda: random.randint(2000, 160000), }, custom_explore_fn=explore) ray.init() run( "PPO", name="pbt_humanoid_test", scheduler=pbt, **{ "num_samples": 8, "config": { "env": "Humanoid-v1", "kl_coeff": 1.0, "num_workers": 8, "num_gpus": 1, "model": { "free_log_std": True }, # These params are tuned from a fixed starting value. "lambda": 0.95, "clip_param": 0.2, "lr": 1e-4, # These params start off randomly drawn from a set. "num_sgd_iter": sample_from( lambda spec: random.choice([10, 20, 30])), "sgd_minibatch_size": sample_from( lambda spec: random.choice([128, 512, 2048])), "train_batch_size": sample_from( lambda spec: random.choice([10000, 20000, 40000])) }, })
35.291139
78
0.583572
79574a0b5ab2cff9e214c633b4eba5d63d27debd
7,654
py
Python
nanomesh/image/_plane.py
hpgem/nanomesher
06e7648ff8b9ecf4cc1faa967469db6270c0ba5d
[ "Apache-2.0" ]
null
null
null
nanomesh/image/_plane.py
hpgem/nanomesher
06e7648ff8b9ecf4cc1faa967469db6270c0ba5d
[ "Apache-2.0" ]
null
null
null
nanomesh/image/_plane.py
hpgem/nanomesher
06e7648ff8b9ecf4cc1faa967469db6270c0ba5d
[ "Apache-2.0" ]
null
null
null
from __future__ import annotations import logging from typing import TYPE_CHECKING, Union import matplotlib.pyplot as plt import numpy as np from .._doc import doc from ._image import Image from ._utils import show_image logger = logging.getLogger(__name__) if TYPE_CHECKING: from .mesh import TriangleMesh @doc(Image, prefix='Data class for working with 2D image data', shape='(i,j) ') class Plane(Image, ndim=2): def show(self, *, ax: plt.Axes = None, title: str = None, **kwargs) -> 'plt.Axes': """Plot the image using :mod:`matplotlib`. Parameters ---------- ax : matplotlib.axes.Axes, optional Axes to use for plotting. title : str, optional Title for the plot. **kwargs These parameters are passed to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : matplotlib.axes.Axes Instance of :class:`matplotlib.axes.Axes` """ return show_image(self.image, ax=ax, title=title, **kwargs) @doc(show) def plot(self, *args, **kwargs): return self.show(*args, **kwargs) def generate_mesh(self, **kwargs) -> TriangleMesh: """Generate mesh from binary (segmented) image. Parameters ---------- **kwargs: Keyword arguments are passed to :func:`nanomesh.plane2mesh` Returns ------- mesh : TriangleMesh Description of the mesh. """ from nanomesh.image2mesh import plane2mesh return plane2mesh(image=self.image, **kwargs) def select_roi(self, from_points: np.ndarray = None): """Select region of interest in interactive matplotlib figure. Parameters ---------- from_points : (n, 2) numpy.ndarray, optional List of points that are used as anchors for the roi selection. Returns ------- roi : `nanomesh.image._roi2d.ROISelector` Region of interest object. Bounding box is stored in :attr:`roi.bbox`. """ from ._roi2d import ROISelector ax = self.show(title='Select region of interest') if from_points is not None: # reverse columns to match image from_points = from_points[:, ::-1] ax.scatter(*from_points.T) roi = ROISelector(ax, snap_to=from_points) return roi def crop(self, left: int, top: int, right: int, bottom: int) -> Plane: """Crop image to pixel indices. Parameters ---------- left, top, right, bottom : int Index of pixel delimiting cropping box. Returns ------- Plane New instance of :class:`Plane`. """ return Plane(self.image[top:bottom, left:right]) def crop_to_roi(self, bbox: np.ndarray) -> Plane: """Crop plane to rectangle defined by bounding box. Parameters ---------- bbox : (4,2) numpy.ndarray List of points describing region of interest. The bounding box may be rotated. Returns ------- Plane Cropped region as :class:`Plane` object. """ from ._roi2d import extract_rectangle cropped = extract_rectangle(self.image, bbox=bbox) return Plane(cropped) def compare_with_mesh(self, mesh: TriangleMesh) -> 'plt.Axes': """Make a plot comparing the image with the given mesh. Parameters ---------- mesh : TriangleMesh Mesh to compare the image with. Returns ------- plt.Axes """ from ..utils import compare_mesh_with_image return compare_mesh_with_image(image=self.image, mesh=mesh) def compare_with_digitized(self, digitized: Union[np.ndarray, 'Plane'], cmap: str = None, **kwargs) -> 'plt.Axes': """Compare image with digitized (segmented) image. Returns a plot with the overlay of the digitized image. Parameters ---------- digitized : numpy.ndarray, Plane Digitized image of the same dimensions to overlay cmap : str Matplotlib color map for :func:`matplotlib.pyplot.imshow` **kwargs These parameters are passed to :func:`skimage.color.label2rgb`. Returns ------- ax : matplotlib.axes.Axes """ from skimage.color import label2rgb if isinstance(digitized, Plane): digitized = digitized.image # bg_label=0 is default for scikit-image from 0.19 onwards kwargs.setdefault('bg_label', 0) image_overlay = label2rgb(digitized, image=self.image, **kwargs) fig, ax = plt.subplots() ax.imshow(image_overlay, interpolation='none', cmap=cmap) ax.axis('image') ax.set_xticks([]) ax.set_yticks([]) ax.set_title('Image comparison') return ax def compare_with_other(self, other: Union[np.ndarray, 'Plane'], cmap: str = None, **kwargs) -> 'plt.Axes': """Compare image with other image. Parameters ---------- other : numpy.ndarray, Plane Other image of the same dimensions to overlay cmap : str Matplotlib color map for :func:`matplotlib.pyplot.imshow` **kwargs These parameters are passed to :func:`skimage.util.compare_images`. Returns ------- ax : matplotlib.axes.Axes """ from skimage.util import compare_images if isinstance(other, Plane): other = other.image kwargs.setdefault('method', 'checkerboard') kwargs.setdefault('n_tiles', (4, 4)) comp = compare_images(self.image, other, **kwargs) fig, ax = plt.subplots() ax.imshow(comp, interpolation='none', cmap=cmap) ax.axis('image') ax.set_xticks([]) ax.set_yticks([]) ax.set_title(f'Image comparison ({kwargs["method"]})') return ax def clear_border(self, *, object_label: int, fill_val: int, **kwargs) -> Plane: """Remove objects at the border of the image. Parameters ---------- object_label : int Label of the objects to remove. fill_val : int Cleared objects are set to this value. **kwargs These parameters are passed to :func:`skimage.segmentation.clear_border`. Returns ------- Plane New instance of :class:`Plane`. """ from skimage import segmentation objects = (self.image == object_label).astype(int) border_cleared = segmentation.clear_border(objects, **kwargs) mask = (border_cleared != objects) out = self.image.copy() out[mask] = fill_val return self.__class__(out) def try_all_threshold(self, **kwargs): """Produce a plot trying all available thresholds using :func:`skimage.filters.try_all_threshold`. Parameters ---------- **kwargs These parameters are passed to :func:`skimage.filters.try_all_threshold`. """ from skimage import filters kwargs.setdefault('verbose', False) self.apply(filters.try_all_threshold, **kwargs)
29.666667
79
0.559838
79574aae80605727cdbb5a9b66f67e2ff2e07af0
1,859
py
Python
jesse/indicators/voss.py
slipperlobster/flipper
8482edd77604fcec2ea08913f1748c21be80dac7
[ "MIT" ]
3,999
2018-11-09T10:38:51.000Z
2022-03-31T12:29:12.000Z
jesse/indicators/voss.py
slipperlobster/flipper
8482edd77604fcec2ea08913f1748c21be80dac7
[ "MIT" ]
172
2020-04-16T16:19:08.000Z
2022-03-28T13:28:55.000Z
jesse/indicators/voss.py
pmondal08/jesse
527952a74bc76f76cf3a2d25755386f8db285885
[ "MIT" ]
495
2019-03-01T21:48:53.000Z
2022-03-30T15:35:19.000Z
from collections import namedtuple import numpy as np try: from numba import njit except ImportError: njit = lambda a : a from jesse.helpers import get_candle_source, slice_candles VossFilter = namedtuple('VossFilter', ['voss', 'filt']) def voss(candles: np.ndarray, period: int = 20, predict: int = 3, bandwith: float = 0.25, source_type: str = "close", sequential: bool = False) -> VossFilter: """ Voss indicator by John F. Ehlers :param candles: np.ndarray :param period: int - default: 20 :param predict: int - default: 3 :param bandwith: float - default: 0.25 :param source_type: str - default: "close" :param sequential: bool - default: False :return: float | np.ndarray """ candles = slice_candles(candles, sequential) source = get_candle_source(candles, source_type=source_type) voss_val, filt = voss_fast(source, period, predict, bandwith) if sequential: return VossFilter(voss_val, filt) else: return VossFilter(voss_val[-1], filt[-1]) @njit def voss_fast(source, period, predict, bandwith): voss = np.full_like(source, 0) filt = np.full_like(source, 0) pi = np.pi order = 3 * predict f1 = np.cos(2 * pi / period) g1 = np.cos(bandwith * 2 * pi / period) s1 = 1 / g1 - np.sqrt(1 / (g1 * g1) - 1) for i in range(source.shape[0]): if i > period and i > 5 and i > order: filt[i] = 0.5 * (1 - s1) * (source[i] - source[i - 2]) + f1 * (1 + s1) * filt[i - 1] - s1 * filt[i - 2] for i in range(source.shape[0]): if not (i <= period or i <= 5 or i <= order): sumc = 0 for count in range(order): sumc = sumc + ((count + 1) / float(order)) * voss[i - (order - count)] voss[i] = ((3 + order) / 2) * filt[i] - sumc return voss, filt
29.507937
117
0.596557
79574b0f03627988d479aa23cb5884e1ab422412
685
py
Python
Easy/Search_Insert_Position.py
dianjiaogit/LeetCode_Python_solution
390693c839d1be8802c21ea81062443b6d5ea36f
[ "MIT" ]
null
null
null
Easy/Search_Insert_Position.py
dianjiaogit/LeetCode_Python_solution
390693c839d1be8802c21ea81062443b6d5ea36f
[ "MIT" ]
null
null
null
Easy/Search_Insert_Position.py
dianjiaogit/LeetCode_Python_solution
390693c839d1be8802c21ea81062443b6d5ea36f
[ "MIT" ]
null
null
null
# Given a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order. # You may assume no duplicates in the array. # Example 1: # Input: [1,3,5,6], 5 # Output: 2 # Example 2: # Input: [1,3,5,6], 2 # Output: 1 # Example 3: # Input: [1,3,5,6], 7 # Output: 4 # Example 4: # Input: [1,3,5,6], 0 # Output: 0 class Solution(object): def searchInsert(self, nums, target): """ :type nums: List[int] :type target: int :rtype: int """ for i in range(0, len(nums)): if target <= nums[i]: return i return len(nums)
20.757576
156
0.569343
79574b7d38a488ae732752c84570648aeb722859
1,910
py
Python
test/test_polytope.py
DavidWalz/diversipy
bbc9b6b650529f7cb739cf981dddb3eaad2f2613
[ "BSD-3-Clause" ]
3
2021-01-06T13:35:00.000Z
2021-08-12T08:22:04.000Z
test/test_polytope.py
DavidWalz/diversipy
bbc9b6b650529f7cb739cf981dddb3eaad2f2613
[ "BSD-3-Clause" ]
1
2020-02-20T10:11:38.000Z
2020-02-29T22:52:42.000Z
test/test_polytope.py
DavidWalz/diversipy
bbc9b6b650529f7cb739cf981dddb3eaad2f2613
[ "BSD-3-Clause" ]
null
null
null
from diversipy import polytope import numpy as np import pytest def test_constraints_from_bounds(): A, b = polytope.constraints_from_bounds(lower=[1, 2], upper=[5, 6]) np.testing.assert_almost_equal(A, [[-1, 0], [0, -1], [1, 0], [0, 1]]) np.testing.assert_almost_equal(b, [-1, -2, 5, 6]) def test_check_constraints(): polytope.check_Ab(A=np.ones((2, 3)), b=np.ones(2)) # ok with pytest.raises(Exception): polytope.check_Ab(A=np.ones((2, 3, 1)), b=np.ones(2)) # A must be 2-dimensional with pytest.raises(Exception): polytope.check_Ab(A=np.ones((2, 3)), b=np.ones(2, 2)) # b must be 1-dimensional with pytest.raises(Exception): polytope.check_Ab( A=np.ones((4, 3)), b=np.ones(2) ) # A and b must have same number of columns def test_chebyshev_center(): A, b = polytope.constraints_from_bounds(lower=[0, 0, 0], upper=[1, 1, 1]) x0 = polytope.chebyshev_center(A, b) assert np.allclose(x0, [0.5, 0.5, 0.5]) def test_solve_equality(): # x1 + x2 + x3 = 2 A = np.array([[1, 1, 1]]) b = np.array([2]) N, xp = polytope.solve_equality(A, b) Z = np.random.uniform(low=-1, high=1, size=(5, 2)) X = Z @ N.T + xp assert np.allclose(X @ A.T, b) def test_hitandrun(): pass # tested in test_sample() def test_sample(): # Sampling from unit-simplex in R^3 X = polytope.sample( n_points=1000, lower=[0, 0, 0], upper=[1, 1, 1], A2=np.array([[1, 1, 1]]), b2=np.array([1]), ) assert np.allclose(X.sum(axis=1), 1) assert X.min() >= 0 assert X.max() <= 1 # Sampling from [0, 1]^2 subject to # x1 / 2 + x2 <= 1 # 2/3 x1 - x2 <= -0.2 A1 = np.array([[1 / 2, 1], [2 / 3, -1]]) b1 = np.array([1, -0.2]) X = polytope.sample(n_points=1000, lower=[0, 0], upper=[1, 1], A1=A1, b1=b1) assert np.all(X @ A1.T <= b1)
28.507463
88
0.571204
79574ba7f2b8bb53fec750bba51c34114e19122e
1,579
py
Python
gdc_filtration_tools/tools/filter_contigs.py
MarcSaric/variant-filtration-tool
e4492e2225e87ce904e49401d8919e7f84f9de87
[ "Apache-2.0" ]
null
null
null
gdc_filtration_tools/tools/filter_contigs.py
MarcSaric/variant-filtration-tool
e4492e2225e87ce904e49401d8919e7f84f9de87
[ "Apache-2.0" ]
2
2020-07-08T13:51:37.000Z
2020-07-09T19:31:00.000Z
gdc_filtration_tools/tools/filter_contigs.py
MarcSaric/variant-filtration-tool
e4492e2225e87ce904e49401d8919e7f84f9de87
[ "Apache-2.0" ]
2
2020-08-14T08:51:08.000Z
2021-04-28T15:37:16.000Z
"""Removes the VCF records on chromosomes that are not present in the contig lines of the VCF header. @author: Kyle Hernandez <kmhernan@uchicago.edu> """ import pysam from gdc_filtration_tools.logger import Logger from gdc_filtration_tools.utils import get_pysam_outmode def filter_contigs(input_vcf: str, output_vcf: str): """ Filter out VCF records on chromosomes that are not present in the contig lines of the VCF header. :param input_vcf: The input VCF file to filter. :param output_vcf: The output filtered VCF file to create. BGzip and tabix-index created if ends with '.gz'. """ logger = Logger.get_logger("filter_contigs") logger.info("Filter VCF for contigs not in header.") # setup total = 0 removed = 0 written = 0 reader = pysam.VariantFile(input_vcf) mode = get_pysam_outmode(output_vcf) writer = pysam.VariantFile(output_vcf, mode=mode, header=reader.header) # Process try: contigs = set(list(reader.header.contigs)) for record in reader.fetch(): total += 1 if record.chrom in contigs: written += 1 writer.write(record) else: removed += 1 finally: reader.close() writer.close() if mode == "wz": logger.info("Creating tabix index...") tbx = pysam.tabix_index(output_vcf, preset="vcf", force=True) logger.info( "Processed {} records, wrote {} records, and removed {} records".format( total, written, removed ) )
28.709091
112
0.639645
79574c01650c47d09ce3798f5d95234ad4aa269b
1,275
py
Python
src/riotwatcher/_apis/league_of_legends/ThirdPartyCodeApiV4.py
TheBoringBakery/Riot-Watcher
6e05fffe127530a75fd63e67da37ba81489fd4fe
[ "MIT" ]
489
2015-01-04T22:49:51.000Z
2022-03-28T03:15:54.000Z
src/riotwatcher/_apis/league_of_legends/ThirdPartyCodeApiV4.py
TheBoringBakery/Riot-Watcher
6e05fffe127530a75fd63e67da37ba81489fd4fe
[ "MIT" ]
162
2015-02-09T22:10:40.000Z
2022-02-22T13:48:50.000Z
src/riotwatcher/_apis/league_of_legends/ThirdPartyCodeApiV4.py
TheBoringBakery/Riot-Watcher
6e05fffe127530a75fd63e67da37ba81489fd4fe
[ "MIT" ]
221
2015-01-07T18:01:57.000Z
2022-03-26T21:18:48.000Z
from .. import BaseApi, NamedEndpoint from .urls import ThirdPartyCodeApiV4Urls class ThirdPartyCodeApiV4(NamedEndpoint): """ This class wraps the ThirdPartyCode-v4 Api calls provided by the Riot API. See https://developer.riotgames.com/api-methods/#third-party-code-v4 for more detailed information """ def __init__(self, base_api: BaseApi): """ Initialize a new ThirdPartyCodeApiV4 which uses the provided base_api :param BaseApi base_api: the root API object to use for making all requests. """ super().__init__(base_api, self.__class__.__name__) def by_summoner(self, region: str, encrypted_summoner_id: str): """ FOR KR SUMMONERS, A 404 WILL ALWAYS BE RETURNED. Valid codes must be no longer than 256 characters and only use valid characters: 0-9, a-z, A-Z, and - :param string region: the region to execute this request on :param string encrypted_summoner_id: Summoner ID :returns: string """ return self._request_endpoint( self.by_summoner.__name__, region, ThirdPartyCodeApiV4Urls.by_summoner, encrypted_summoner_id=encrypted_summoner_id, )
32.692308
90
0.658039
79574dfab9cf518d207b0607095024996e3e4dc0
610
py
Python
crudproject1/enroll/migrations/0001_initial.py
meghanj01/CRUDProject
2f572ff243d0cf4dd3c7120b8429016cb5f15ad3
[ "MIT" ]
null
null
null
crudproject1/enroll/migrations/0001_initial.py
meghanj01/CRUDProject
2f572ff243d0cf4dd3c7120b8429016cb5f15ad3
[ "MIT" ]
null
null
null
crudproject1/enroll/migrations/0001_initial.py
meghanj01/CRUDProject
2f572ff243d0cf4dd3c7120b8429016cb5f15ad3
[ "MIT" ]
null
null
null
# Generated by Django 3.1.13 on 2021-07-19 06:23 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=70)), ('email', models.EmailField(max_length=100)), ('password', models.CharField(max_length=100)), ], ), ]
25.416667
114
0.568852
79574e517284978eff739a9646d5b134968a595c
2,580
py
Python
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/19_features/numtrees_20/rule_19.py
apcarrik/kaggle
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
[ "MIT" ]
null
null
null
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/19_features/numtrees_20/rule_19.py
apcarrik/kaggle
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
[ "MIT" ]
null
null
null
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/19_features/numtrees_20/rule_19.py
apcarrik/kaggle
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
[ "MIT" ]
null
null
null
def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Temperature, obj[3]: Time, obj[4]: Coupon, obj[5]: Coupon_validity, obj[6]: Gender, obj[7]: Age, obj[8]: Maritalstatus, obj[9]: Children, obj[10]: Education, obj[11]: Occupation, obj[12]: Income, obj[13]: Bar, obj[14]: Coffeehouse, obj[15]: Restaurantlessthan20, obj[16]: Restaurant20to50, obj[17]: Direction_same, obj[18]: Distance # {"feature": "Coupon", "instances": 51, "metric_value": 0.9864, "depth": 1} if obj[4]>1: # {"feature": "Passanger", "instances": 34, "metric_value": 0.99, "depth": 2} if obj[0]>0: # {"feature": "Education", "instances": 29, "metric_value": 0.9991, "depth": 3} if obj[10]<=0: # {"feature": "Occupation", "instances": 15, "metric_value": 0.9183, "depth": 4} if obj[11]<=12: # {"feature": "Income", "instances": 13, "metric_value": 0.7793, "depth": 5} if obj[12]<=2: # {"feature": "Children", "instances": 7, "metric_value": 0.9852, "depth": 6} if obj[9]<=0: # {"feature": "Temperature", "instances": 5, "metric_value": 0.7219, "depth": 7} if obj[2]<=55: return 'True' elif obj[2]>55: # {"feature": "Time", "instances": 2, "metric_value": 1.0, "depth": 8} if obj[3]>1: return 'False' elif obj[3]<=1: return 'True' else: return 'True' else: return 'False' elif obj[9]>0: return 'False' else: return 'False' elif obj[12]>2: return 'True' else: return 'True' elif obj[11]>12: return 'False' else: return 'False' elif obj[10]>0: # {"feature": "Occupation", "instances": 14, "metric_value": 0.8631, "depth": 4} if obj[11]<=11: # {"feature": "Time", "instances": 12, "metric_value": 0.65, "depth": 5} if obj[3]<=1: return 'False' elif obj[3]>1: # {"feature": "Age", "instances": 3, "metric_value": 0.9183, "depth": 6} if obj[7]>0: return 'True' elif obj[7]<=0: return 'False' else: return 'False' else: return 'True' elif obj[11]>11: return 'True' else: return 'True' else: return 'False' elif obj[0]<=0: return 'True' else: return 'True' elif obj[4]<=1: # {"feature": "Bar", "instances": 17, "metric_value": 0.6723, "depth": 2} if obj[13]<=1.0: return 'False' elif obj[13]>1.0: # {"feature": "Occupation", "instances": 5, "metric_value": 0.971, "depth": 3} if obj[11]<=7: return 'True' elif obj[11]>7: return 'False' else: return 'False' else: return 'True' else: return 'False'
37.941176
400
0.565504
79574e6db307cdc762e965ff99ab97b28897d6d8
10,520
py
Python
v6.0.5/firewall/fortios_firewall_vipgrp46.py
fortinet-solutions-cse/ansible_fgt_modules
c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719
[ "Apache-2.0" ]
14
2018-09-25T20:35:25.000Z
2021-07-14T04:30:54.000Z
v6.0.5/firewall/fortios_firewall_vipgrp46.py
fortinet-solutions-cse/ansible_fgt_modules
c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719
[ "Apache-2.0" ]
32
2018-10-09T04:13:42.000Z
2020-05-11T07:20:28.000Z
v6.0.5/firewall/fortios_firewall_vipgrp46.py
fortinet-solutions-cse/ansible_fgt_modules
c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719
[ "Apache-2.0" ]
11
2018-10-09T00:14:53.000Z
2021-11-03T10:54:09.000Z
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_firewall_vipgrp46 short_description: Configure IPv4 to IPv6 virtual IP groups in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify firewall feature and vipgrp46 category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.5 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address. type: str required: false username: description: - FortiOS or FortiGate username. type: str required: false password: description: - FortiOS or FortiGate password. type: str default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate certificate must be verified by a proper CA. type: bool default: true version_added: 2.9 state: description: - Indicates whether to create or remove the object. type: str required: true choices: - present - absent version_added: 2.9 firewall_vipgrp46: description: - Configure IPv4 to IPv6 virtual IP groups. default: null type: dict suboptions: color: description: - Integer value to determine the color of the icon in the GUI (range 1 to 32). type: int comments: description: - Comment. type: str member: description: - Member VIP objects of the group (Separate multiple objects with a space). type: list suboptions: name: description: - VIP46 name. Source firewall.vip46.name. required: true type: str name: description: - VIP46 group name. required: true type: str uuid: description: - Universally Unique Identifier (UUID; automatically assigned but can be manually reset). type: str ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Configure IPv4 to IPv6 virtual IP groups. fortios_firewall_vipgrp46: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" state: "present" firewall_vipgrp46: color: "3" comments: "<your_own_value>" member: - name: "default_name_6 (source firewall.vip46.name)" name: "default_name_7" uuid: "<your_own_value>" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_firewall_vipgrp46_data(json): option_list = ['color', 'comments', 'member', 'name', 'uuid'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def firewall_vipgrp46(data, fos): vdom = data['vdom'] state = data['state'] firewall_vipgrp46_data = data['firewall_vipgrp46'] filtered_data = underscore_to_hyphen(filter_firewall_vipgrp46_data(firewall_vipgrp46_data)) if state == "present": return fos.set('firewall', 'vipgrp46', data=filtered_data, vdom=vdom) elif state == "absent": return fos.delete('firewall', 'vipgrp46', mkey=filtered_data['name'], vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_firewall(data, fos): if data['firewall_vipgrp46']: resp = firewall_vipgrp46(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "state": {"required": True, "type": "str", "choices": ["present", "absent"]}, "firewall_vipgrp46": { "required": False, "type": "dict", "default": None, "options": { "color": {"required": False, "type": "int"}, "comments": {"required": False, "type": "str"}, "member": {"required": False, "type": "list", "options": { "name": {"required": True, "type": "str"} }}, "name": {"required": True, "type": "str"}, "uuid": {"required": False, "type": "str"} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not None and \ 'username' in module.params and module.params['username'] is not None and \ 'password' in module.params and module.params['password'] is not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_firewall(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_firewall(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
30.229885
109
0.596293
79574fd1e62cb71f8dfaea6f50eb35e3781dbc2b
2,368
py
Python
tests/pymath/test_calculate_tax.py
BrianLusina/PyCharm
144dd4f6b2d254507237f46c8ee175c407fe053d
[ "Apache-2.0", "MIT" ]
null
null
null
tests/pymath/test_calculate_tax.py
BrianLusina/PyCharm
144dd4f6b2d254507237f46c8ee175c407fe053d
[ "Apache-2.0", "MIT" ]
null
null
null
tests/pymath/test_calculate_tax.py
BrianLusina/PyCharm
144dd4f6b2d254507237f46c8ee175c407fe053d
[ "Apache-2.0", "MIT" ]
null
null
null
from unittest import TestCase, skip from pymath.calculate_tax import calculate_tax # todo: failing tests @skip("Failing tests, not implemented methods correctly") class CalculateTaxTests(TestCase): def test_it_calculates_tax_for_one_person(self): result = calculate_tax({"James": 20500}) self.assertEqual( result, {"James": 2490.0}, msg="Should return {'James': 2490.0} for the input {'James': 20500}", ) def test_it_calculates_tax_for_several_people(self): income_input = {"James": 20500, "Mary": 500, "Evan": 70000} result = calculate_tax(income_input) self.assertEqual( {"James": 2490.0, "Mary": 0, "Evan": 15352.5}, result, msg="Should return {} for the input {}".format( {"James": 2490.0, "Mary": 0, "Evan": 15352.5}, {"James": 20500, "Mary": 500, "Evan": 70000}, ), ) def test_it_does_not_accept_integers(self): with self.assertRaises(ValueError) as context: calculate_tax(1) self.assertEqual( "The provided input is not a dictionary.", context.exception.message, "Invalid input of type int not allowed", ) def test_calculated_tax_is_a_float(self): result = calculate_tax({"Jane": 20500}) self.assertIsInstance( calculate_tax({"Jane": 20500}), dict, msg="Should return a result of data type dict", ) self.assertIsInstance( result["Jane"], float, msg="Tax returned should be an float." ) def test_it_returns_zero_tax_for_income_less_than_1000(self): result = calculate_tax({"Jake": 100}) self.assertEqual( result, {"Jake": 0}, msg="Should return zero tax for incomes less than 1000" ) def test_it_throws_an_error_if_any_of_the_inputs_is_non_numeric(self): with self.assertRaises(ValueError, msg="Allow only numeric input"): calculate_tax({"James": 2490.0, "Kiura": "200", "Kinuthia": 15352.5}) def test_it_return_an_empty_dict_for_an_empty_dict_input(self): result = calculate_tax({}) self.assertEqual( result, {}, msg="Should return an empty dict if the input was an empty dict" )
37
88
0.604307
79575044c7b646762c580363239430cf49f5e846
2,112
py
Python
basis/producer_consumer_cost.py
dictxwang/python-fragments
029820bfd290c60aeb172e876ddf3937a8704e91
[ "Apache-2.0" ]
null
null
null
basis/producer_consumer_cost.py
dictxwang/python-fragments
029820bfd290c60aeb172e876ddf3937a8704e91
[ "Apache-2.0" ]
null
null
null
basis/producer_consumer_cost.py
dictxwang/python-fragments
029820bfd290c60aeb172e876ddf3937a8704e91
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf8 -*- __author__ = 'wangqiang' ''' 多进程方式的生成者-消费者模型(测试耗时) 多线程的实现 ''' import threading from queue import Queue import time import uuid import random def producer(name, mq): ''' 生产者,向队列插入数据 :param mq: 共享队列 :return: ''' while True: # 元组的第一项用于表示消息类型,元组第二项使用uuid模拟消息内容 mt = ['a', 'b', 'c'] random.shuffle(mt) try: mq.put((mt[0], int(time.clock_gettime(time.CLOCK_PROCESS_CPUTIME_ID) * 1000000), uuid.uuid1()), block=True, timeout=0.01) except Exception as exp: pass # 模拟数据通信的延时 rnd = random.randint(0, 10) if rnd >= 0: time.sleep(rnd * 0.001) def consumer(name, mq): ''' 消费者,从队列中取出数据,并实现业务逻辑 :param name: 消费者名称 :param mq: 共享队列 :return: ''' tcount = 0 ocount = 0 ccount = 0 while True: try: task = mq.get() now = int(time.clock_gettime(time.CLOCK_PROCESS_CPUTIME_ID) * 1000000) tcount += 1 if task: time_diff = now - task[1] ccount += time_diff # print("[{}] get message time_diff:{}μs".format(name, time_diff)) time_diff_limit = 800 if time_diff > time_diff_limit: ocount += 1 print("[{}] get message time_diff {}μs > {}μs".format(name, time_diff, time_diff_limit)) print("timeout-count={},total-count={}, avg-time-diff={}μs".format(ocount, tcount, ccount // tcount)) except Exception as exp: break finally: time.sleep(0.001) if __name__ == "_+_main__": message_queue = Queue(maxsize=1000) # 构建消费者 for i in range(4): c = threading.Thread(target=consumer, args=("c{}".format(i), message_queue)) c.start() print("consumer-{} started".format(i)) # 构建生产者 p1 = threading.Thread(target=producer, args=("01", message_queue)) p1.start() print("producer started") if __name__ == "__main__": print(1) if __name__ == "__main__": print(2)
24.275862
133
0.548295
7957504ec2b0029638149ae432540fd032b242ba
55,252
py
Python
google/cloud/compute_v1/services/security_policies/client.py
auphofBSF/python-compute
c81bfa752c9db93edd0cd56fec3a79599704d792
[ "Apache-2.0" ]
null
null
null
google/cloud/compute_v1/services/security_policies/client.py
auphofBSF/python-compute
c81bfa752c9db93edd0cd56fec3a79599704d792
[ "Apache-2.0" ]
null
null
null
google/cloud/compute_v1/services/security_policies/client.py
auphofBSF/python-compute
c81bfa752c9db93edd0cd56fec3a79599704d792
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from distutils import util import os import re from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.cloud.compute_v1.services.security_policies import pagers from google.cloud.compute_v1.types import compute from .transports.base import SecurityPoliciesTransport, DEFAULT_CLIENT_INFO from .transports.rest import SecurityPoliciesRestTransport class SecurityPoliciesClientMeta(type): """Metaclass for the SecurityPolicies client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = ( OrderedDict() ) # type: Dict[str, Type[SecurityPoliciesTransport]] _transport_registry["rest"] = SecurityPoliciesRestTransport def get_transport_class(cls, label: str = None,) -> Type[SecurityPoliciesTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class SecurityPoliciesClient(metaclass=SecurityPoliciesClientMeta): """The SecurityPolicies API.""" @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "compute.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: SecurityPoliciesClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: SecurityPoliciesClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> SecurityPoliciesTransport: """Return the transport used by the client instance. Returns: SecurityPoliciesTransport: The transport used by the client instance. """ return self._transport @staticmethod def common_billing_account_path(billing_account: str,) -> str: """Return a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str, str]: """Parse a billing_account path into its component segments.""" m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str,) -> str: """Return a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: """Parse a folder path into its component segments.""" m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str,) -> str: """Return a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: """Parse a organization path into its component segments.""" m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str,) -> str: """Return a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: """Parse a project path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str,) -> str: """Return a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str, str]: """Parse a location path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path) return m.groupdict() if m else {} def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, SecurityPoliciesTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the security policies client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, SecurityPoliciesTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() # Create SSL credentials for mutual TLS if needed. use_client_cert = bool( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: is_mtls = True client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() client_cert_source_func = ( mtls.default_client_cert_source() if is_mtls else None ) # Figure out which api endpoint to use. if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, SecurityPoliciesTransport): # transport is a SecurityPoliciesTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, ) def add_rule( self, request: compute.AddRuleSecurityPolicyRequest = None, *, project: str = None, security_policy: str = None, security_policy_rule_resource: compute.SecurityPolicyRule = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: r"""Inserts a rule into a security policy. Args: request (google.cloud.compute_v1.types.AddRuleSecurityPolicyRequest): The request object. A request message for SecurityPolicies.AddRule. See the method description for details. project (str): Project ID for this request. This corresponds to the ``project`` field on the ``request`` instance; if ``request`` is provided, this should not be set. security_policy (str): Name of the security policy to update. This corresponds to the ``security_policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. security_policy_rule_resource (google.cloud.compute_v1.types.SecurityPolicyRule): The body resource for this request This corresponds to the ``security_policy_rule_resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.compute_v1.types.Operation: Represents an Operation resource. Google Compute Engine has three Operation resources: - [Global](/compute/docs/reference/rest/{$api_version}/globalOperations) \* [Regional](/compute/docs/reference/rest/{$api_version}/regionOperations) \* [Zonal](/compute/docs/reference/rest/{$api_version}/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the globalOperations resource. - For regional operations, use the regionOperations resource. - For zonal operations, use the zonalOperations resource. For more information, read Global, Regional, and Zonal Resources. (== resource_for {$api_version}.globalOperations ==) (== resource_for {$api_version}.regionOperations ==) (== resource_for {$api_version}.zoneOperations ==) """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any( [project, security_policy, security_policy_rule_resource] ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a compute.AddRuleSecurityPolicyRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, compute.AddRuleSecurityPolicyRequest): request = compute.AddRuleSecurityPolicyRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if project is not None: request.project = project if security_policy is not None: request.security_policy = security_policy if security_policy_rule_resource is not None: request.security_policy_rule_resource = security_policy_rule_resource # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.add_rule] # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def delete( self, request: compute.DeleteSecurityPolicyRequest = None, *, project: str = None, security_policy: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: r"""Deletes the specified policy. Args: request (google.cloud.compute_v1.types.DeleteSecurityPolicyRequest): The request object. A request message for SecurityPolicies.Delete. See the method description for details. project (str): Project ID for this request. This corresponds to the ``project`` field on the ``request`` instance; if ``request`` is provided, this should not be set. security_policy (str): Name of the security policy to delete. This corresponds to the ``security_policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.compute_v1.types.Operation: Represents an Operation resource. Google Compute Engine has three Operation resources: - [Global](/compute/docs/reference/rest/{$api_version}/globalOperations) \* [Regional](/compute/docs/reference/rest/{$api_version}/regionOperations) \* [Zonal](/compute/docs/reference/rest/{$api_version}/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the globalOperations resource. - For regional operations, use the regionOperations resource. - For zonal operations, use the zonalOperations resource. For more information, read Global, Regional, and Zonal Resources. (== resource_for {$api_version}.globalOperations ==) (== resource_for {$api_version}.regionOperations ==) (== resource_for {$api_version}.zoneOperations ==) """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([project, security_policy]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a compute.DeleteSecurityPolicyRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, compute.DeleteSecurityPolicyRequest): request = compute.DeleteSecurityPolicyRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if project is not None: request.project = project if security_policy is not None: request.security_policy = security_policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete] # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def get( self, request: compute.GetSecurityPolicyRequest = None, *, project: str = None, security_policy: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.SecurityPolicy: r"""List all of the ordered rules present in a single specified policy. Args: request (google.cloud.compute_v1.types.GetSecurityPolicyRequest): The request object. A request message for SecurityPolicies.Get. See the method description for details. project (str): Project ID for this request. This corresponds to the ``project`` field on the ``request`` instance; if ``request`` is provided, this should not be set. security_policy (str): Name of the security policy to get. This corresponds to the ``security_policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.compute_v1.types.SecurityPolicy: Represents a Google Cloud Armor security policy resource. Only external backend services that use load balancers can reference a security policy. For more information, see Google Cloud Armor security policy overview. (== resource_for {$api_version}.securityPolicies ==) """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([project, security_policy]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a compute.GetSecurityPolicyRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, compute.GetSecurityPolicyRequest): request = compute.GetSecurityPolicyRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if project is not None: request.project = project if security_policy is not None: request.security_policy = security_policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get] # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def get_rule( self, request: compute.GetRuleSecurityPolicyRequest = None, *, project: str = None, security_policy: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.SecurityPolicyRule: r"""Gets a rule at the specified priority. Args: request (google.cloud.compute_v1.types.GetRuleSecurityPolicyRequest): The request object. A request message for SecurityPolicies.GetRule. See the method description for details. project (str): Project ID for this request. This corresponds to the ``project`` field on the ``request`` instance; if ``request`` is provided, this should not be set. security_policy (str): Name of the security policy to which the queried rule belongs. This corresponds to the ``security_policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.compute_v1.types.SecurityPolicyRule: Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny). """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([project, security_policy]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a compute.GetRuleSecurityPolicyRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, compute.GetRuleSecurityPolicyRequest): request = compute.GetRuleSecurityPolicyRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if project is not None: request.project = project if security_policy is not None: request.security_policy = security_policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_rule] # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def insert( self, request: compute.InsertSecurityPolicyRequest = None, *, project: str = None, security_policy_resource: compute.SecurityPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: r"""Creates a new policy in the specified project using the data included in the request. Args: request (google.cloud.compute_v1.types.InsertSecurityPolicyRequest): The request object. A request message for SecurityPolicies.Insert. See the method description for details. project (str): Project ID for this request. This corresponds to the ``project`` field on the ``request`` instance; if ``request`` is provided, this should not be set. security_policy_resource (google.cloud.compute_v1.types.SecurityPolicy): The body resource for this request This corresponds to the ``security_policy_resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.compute_v1.types.Operation: Represents an Operation resource. Google Compute Engine has three Operation resources: - [Global](/compute/docs/reference/rest/{$api_version}/globalOperations) \* [Regional](/compute/docs/reference/rest/{$api_version}/regionOperations) \* [Zonal](/compute/docs/reference/rest/{$api_version}/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the globalOperations resource. - For regional operations, use the regionOperations resource. - For zonal operations, use the zonalOperations resource. For more information, read Global, Regional, and Zonal Resources. (== resource_for {$api_version}.globalOperations ==) (== resource_for {$api_version}.regionOperations ==) (== resource_for {$api_version}.zoneOperations ==) """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([project, security_policy_resource]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a compute.InsertSecurityPolicyRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, compute.InsertSecurityPolicyRequest): request = compute.InsertSecurityPolicyRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if project is not None: request.project = project if security_policy_resource is not None: request.security_policy_resource = security_policy_resource # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.insert] # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def list( self, request: compute.ListSecurityPoliciesRequest = None, *, project: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListPager: r"""List all the policies that have been configured for the specified project. Args: request (google.cloud.compute_v1.types.ListSecurityPoliciesRequest): The request object. A request message for SecurityPolicies.List. See the method description for details. project (str): Project ID for this request. This corresponds to the ``project`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.compute_v1.services.security_policies.pagers.ListPager: Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([project]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a compute.ListSecurityPoliciesRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, compute.ListSecurityPoliciesRequest): request = compute.ListSecurityPoliciesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if project is not None: request.project = project # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list] # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def list_preconfigured_expression_sets( self, request: compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest = None, *, project: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.SecurityPoliciesListPreconfiguredExpressionSetsResponse: r"""Gets the current list of preconfigured Web Application Firewall (WAF) expressions. Args: request (google.cloud.compute_v1.types.ListPreconfiguredExpressionSetsSecurityPoliciesRequest): The request object. A request message for SecurityPolicies.ListPreconfiguredExpressionSets. See the method description for details. project (str): Project ID for this request. This corresponds to the ``project`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.compute_v1.types.SecurityPoliciesListPreconfiguredExpressionSetsResponse: """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([project]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance( request, compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest ): request = compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest( request ) # If we have keyword arguments corresponding to fields on the # request, apply these. if project is not None: request.project = project # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[ self._transport.list_preconfigured_expression_sets ] # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def patch( self, request: compute.PatchSecurityPolicyRequest = None, *, project: str = None, security_policy: str = None, security_policy_resource: compute.SecurityPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: r"""Patches the specified policy with the data included in the request. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead. Args: request (google.cloud.compute_v1.types.PatchSecurityPolicyRequest): The request object. A request message for SecurityPolicies.Patch. See the method description for details. project (str): Project ID for this request. This corresponds to the ``project`` field on the ``request`` instance; if ``request`` is provided, this should not be set. security_policy (str): Name of the security policy to update. This corresponds to the ``security_policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. security_policy_resource (google.cloud.compute_v1.types.SecurityPolicy): The body resource for this request This corresponds to the ``security_policy_resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.compute_v1.types.Operation: Represents an Operation resource. Google Compute Engine has three Operation resources: - [Global](/compute/docs/reference/rest/{$api_version}/globalOperations) \* [Regional](/compute/docs/reference/rest/{$api_version}/regionOperations) \* [Zonal](/compute/docs/reference/rest/{$api_version}/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the globalOperations resource. - For regional operations, use the regionOperations resource. - For zonal operations, use the zonalOperations resource. For more information, read Global, Regional, and Zonal Resources. (== resource_for {$api_version}.globalOperations ==) (== resource_for {$api_version}.regionOperations ==) (== resource_for {$api_version}.zoneOperations ==) """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([project, security_policy, security_policy_resource]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a compute.PatchSecurityPolicyRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, compute.PatchSecurityPolicyRequest): request = compute.PatchSecurityPolicyRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if project is not None: request.project = project if security_policy is not None: request.security_policy = security_policy if security_policy_resource is not None: request.security_policy_resource = security_policy_resource # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.patch] # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def patch_rule( self, request: compute.PatchRuleSecurityPolicyRequest = None, *, project: str = None, security_policy: str = None, security_policy_rule_resource: compute.SecurityPolicyRule = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: r"""Patches a rule at the specified priority. Args: request (google.cloud.compute_v1.types.PatchRuleSecurityPolicyRequest): The request object. A request message for SecurityPolicies.PatchRule. See the method description for details. project (str): Project ID for this request. This corresponds to the ``project`` field on the ``request`` instance; if ``request`` is provided, this should not be set. security_policy (str): Name of the security policy to update. This corresponds to the ``security_policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. security_policy_rule_resource (google.cloud.compute_v1.types.SecurityPolicyRule): The body resource for this request This corresponds to the ``security_policy_rule_resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.compute_v1.types.Operation: Represents an Operation resource. Google Compute Engine has three Operation resources: - [Global](/compute/docs/reference/rest/{$api_version}/globalOperations) \* [Regional](/compute/docs/reference/rest/{$api_version}/regionOperations) \* [Zonal](/compute/docs/reference/rest/{$api_version}/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the globalOperations resource. - For regional operations, use the regionOperations resource. - For zonal operations, use the zonalOperations resource. For more information, read Global, Regional, and Zonal Resources. (== resource_for {$api_version}.globalOperations ==) (== resource_for {$api_version}.regionOperations ==) (== resource_for {$api_version}.zoneOperations ==) """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any( [project, security_policy, security_policy_rule_resource] ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a compute.PatchRuleSecurityPolicyRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, compute.PatchRuleSecurityPolicyRequest): request = compute.PatchRuleSecurityPolicyRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if project is not None: request.project = project if security_policy is not None: request.security_policy = security_policy if security_policy_rule_resource is not None: request.security_policy_rule_resource = security_policy_rule_resource # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.patch_rule] # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def remove_rule( self, request: compute.RemoveRuleSecurityPolicyRequest = None, *, project: str = None, security_policy: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: r"""Deletes a rule at the specified priority. Args: request (google.cloud.compute_v1.types.RemoveRuleSecurityPolicyRequest): The request object. A request message for SecurityPolicies.RemoveRule. See the method description for details. project (str): Project ID for this request. This corresponds to the ``project`` field on the ``request`` instance; if ``request`` is provided, this should not be set. security_policy (str): Name of the security policy to update. This corresponds to the ``security_policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.compute_v1.types.Operation: Represents an Operation resource. Google Compute Engine has three Operation resources: - [Global](/compute/docs/reference/rest/{$api_version}/globalOperations) \* [Regional](/compute/docs/reference/rest/{$api_version}/regionOperations) \* [Zonal](/compute/docs/reference/rest/{$api_version}/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the globalOperations resource. - For regional operations, use the regionOperations resource. - For zonal operations, use the zonalOperations resource. For more information, read Global, Regional, and Zonal Resources. (== resource_for {$api_version}.globalOperations ==) (== resource_for {$api_version}.regionOperations ==) (== resource_for {$api_version}.zoneOperations ==) """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([project, security_policy]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a compute.RemoveRuleSecurityPolicyRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, compute.RemoveRuleSecurityPolicyRequest): request = compute.RemoveRuleSecurityPolicyRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if project is not None: request.project = project if security_policy is not None: request.security_policy = security_policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.remove_rule] # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("SecurityPoliciesClient",)
44.450523
107
0.61377
79575050f23fb6768f83b7b31a4028460cfce883
194
py
Python
tf_lite_converter.py
angelorodem/tensorflow2-char-rnn
f28503c61de62eade9b477bf13573988fb3807de
[ "MIT" ]
null
null
null
tf_lite_converter.py
angelorodem/tensorflow2-char-rnn
f28503c61de62eade9b477bf13573988fb3807de
[ "MIT" ]
null
null
null
tf_lite_converter.py
angelorodem/tensorflow2-char-rnn
f28503c61de62eade9b477bf13573988fb3807de
[ "MIT" ]
null
null
null
import tensorflow as tf converter = tf.lite.TFLiteConverter.from_saved_model('name_save/saved_model') tflite_model = converter.convert() open("converted_model.tflite", "wb").write(tflite_model)
38.8
77
0.814433
795752341c8adba8b89e4088635cbfeb62146765
13,396
py
Python
smtbx/absolute_structure/__init__.py
rimmartin/cctbx_project
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
[ "BSD-3-Clause-LBNL" ]
null
null
null
smtbx/absolute_structure/__init__.py
rimmartin/cctbx_project
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
[ "BSD-3-Clause-LBNL" ]
null
null
null
smtbx/absolute_structure/__init__.py
rimmartin/cctbx_project
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
[ "BSD-3-Clause-LBNL" ]
null
null
null
from __future__ import division from __future__ import print_function from __future__ import absolute_import import math import sys from cctbx.array_family import flex from cctbx import sgtbx, xray from libtbx import adopt_init_args from libtbx.utils import xfrange from libtbx.utils\ import format_float_with_standard_uncertainty as format_float_with_su from libtbx.utils import Sorry from scitbx.math import distributions from scitbx.lstbx import normal_eqns_solving import smtbx.utils from smtbx.refinement import constraints from smtbx.refinement import least_squares from cctbx.xray import observations class hooft_analysis(object): """ Determination of absolute structure using Bayesian statistics on Bijvoet differences. See: Hooft, R.W.W., Straver, L.H., Spek, A.L. (2008). J. Appl. Cryst., 41, 96-103. Hooft, R.W.W., Straver, L.H., Spek, A.L. (2009). Acta Crystallogr. A65, 319-321. Hooft, R.W.W., Straver, L.H., Spek, A.L. (2010). J. Appl. Cryst., 43, 665-668. and for more information: http://www.absolutestructure.com/bibliography.html """ distribution = "Gaussian" def __init__(self, fo2, fc, scale_factor=None, outlier_cutoff_factor=None, probability_plot_slope=None): self.probability_plot_slope = probability_plot_slope assert fo2.is_xray_intensity_array() assert fc.is_complex_array() assert not fo2.space_group().is_centric() if scale_factor is None: scale_factor = fo2.scale_factor(fc) fc2 = fc.as_intensity_array() self.delta_fc2 = fc2.anomalous_differences() self.delta_fo2 = fo2.anomalous_differences() self.n_bijvoet_pairs = self.delta_fo2.size() if outlier_cutoff_factor is not None: cutoff_sel = flex.abs(self.delta_fo2.data()) > ( outlier_cutoff_factor * scale_factor) * flex.max( flex.abs(self.delta_fc2.data())) self.delta_fo2 = self.delta_fo2.select(~cutoff_sel) self.delta_fc2 = self.delta_fc2.select(~cutoff_sel) self.delta_fc2 = self.delta_fc2.customized_copy( data=self.delta_fc2.data() * scale_factor) if not self.delta_fo2.size(): raise Sorry("Absolute structure could not be determined") min_gamma = -10 max_gamma = 10 # quick and dirty to find better min, max gammas max_log_p_obs = -1e100 while True: # search for the maximum width = max_gamma - min_gamma if width < 0.0001: break middle = (min_gamma + max_gamma)/2 a = middle - width/4 b = middle + width/4 value_a = self.log_p_obs_given_gamma(a) value_b = self.log_p_obs_given_gamma(b) if value_a > value_b: max_gamma = middle elif value_a == value_b: min_gamma = a max_gamma = b else: min_gamma = middle max_log_p_obs = max([max_log_p_obs, value_a, value_b]) while True: # search for where the curve becomes close to zero on the left min_gamma = middle - width/2 if (width > 100 or self.log_p_obs_given_gamma(min_gamma) - max_log_p_obs < -10): break width *= 2 width = max_gamma - min_gamma while True: # search for where the curve becomes close to zero on the right max_gamma = middle + width/2 if (width > 100 or self.log_p_obs_given_gamma(max_gamma) - max_log_p_obs < -10): break width *= 2 n_steps = 500 d_gamma = (max_gamma - min_gamma)/n_steps # now do it properly log_p_obs_given_gammas = flex.double() for gamma in xfrange(min_gamma, max_gamma, d_gamma): log_p_obs_given_gammas.append(self.log_p_obs_given_gamma(gamma)) max_log_p_obs = flex.max(log_p_obs_given_gammas) G_numerator = 0 G_denominator = 0 p_u_gammas = flex.double() # Numerical integration using trapezoidal rule for i, gamma in enumerate(xfrange(min_gamma, max_gamma, d_gamma)): p_u_gamma = math.exp(log_p_obs_given_gammas[i] - max_log_p_obs) p_u_gammas.append(p_u_gamma) if i == 0: continue G_numerator += 0.5 * d_gamma * ( (gamma-d_gamma) * p_u_gammas[-2] + gamma * p_u_gammas[-1]) G_denominator += 0.5 * (p_u_gammas[-2] + p_u_gammas[-1]) * d_gamma self.G = G_numerator/G_denominator sigma_squared_G_numerator = 0 # Numerical integration using trapezoidal rule next_ = None for i, gamma in enumerate(xfrange(min_gamma, max_gamma, d_gamma)): previous = next_ next_ = math.pow((gamma - self.G), 2) * p_u_gammas[i] * d_gamma if i == 0: continue sigma_squared_G_numerator += 0.5 * (previous + next_) self.hooft_y = (1-self.G)/2 self.sigma_G = math.sqrt(sigma_squared_G_numerator/G_denominator) self.sigma_y = self.sigma_G/2 # Now calculate P2, P3 values log_p_obs_given_gamma_is_minus_1 = self.log_p_obs_given_gamma(-1) log_p_obs_given_gamma_is_0 = self.log_p_obs_given_gamma(0) log_p_obs_given_gamma_is_1 = self.log_p_obs_given_gamma(1) max_log_p_obs = max([log_p_obs_given_gamma_is_minus_1, log_p_obs_given_gamma_is_0, log_p_obs_given_gamma_is_1]) # all values normalised by max_log_p_obs for numerical stability log_p_obs_given_gamma_is_minus_1 -= max_log_p_obs log_p_obs_given_gamma_is_0 -= max_log_p_obs log_p_obs_given_gamma_is_1 -= max_log_p_obs p2_denominator = math.exp(log_p_obs_given_gamma_is_1) \ + math.exp(log_p_obs_given_gamma_is_minus_1) p3_denominator = math.exp(log_p_obs_given_gamma_is_1) \ + math.exp(log_p_obs_given_gamma_is_minus_1) \ + math.exp(log_p_obs_given_gamma_is_0) # if p2_denominator == 0: self.p2_true = self.p2_false = None else: self.p2_true = ( math.exp(log_p_obs_given_gamma_is_1)) / p2_denominator self.p2_false = ( math.exp(log_p_obs_given_gamma_is_minus_1)) / p2_denominator self.p3_true = ( math.exp(log_p_obs_given_gamma_is_1)) / p3_denominator self.p3_false = ( math.exp(log_p_obs_given_gamma_is_minus_1)) / p3_denominator self.p3_racemic_twin = ( math.exp(log_p_obs_given_gamma_is_0)) / p3_denominator def log_p_obs_given_gamma(self, gamma): x_gamma = (gamma * self.delta_fc2.data() - self.delta_fo2.data()) \ / self.delta_fo2.sigmas() if self.probability_plot_slope is not None: x_gamma /= self.probability_plot_slope return -0.5 * flex.sum_sq(x_gamma) def show(self, out=None): def format_p(p_value): if p_value is None: return "n/a" elif p_value >= 1e-2: return "%.3f" %p_value else: return "%.3e" %p_value if out is None: out=sys.stdout print("Bijvoet pair analysis using %s distribution" %self.distribution, file=out) print("Bijvoet pairs (all): %i" %self.n_bijvoet_pairs, file=out) print("Bijvoet pairs (used): %i" %self.delta_fo2.size(), file=out) print("Bijvoet pairs coverage: %.2f" %( self.n_bijvoet_pairs/self.delta_fo2.customized_copy( anomalous_flag=True).complete_set().n_bijvoet_pairs()), file=out) print("G: %s" %format_float_with_su(self.G, self.sigma_G), file=out) print("P2(true): %s" %format_p(self.p2_true), file=out) print("P2(false): %s" %format_p(self.p2_false), file=out) print("P3(true): %s" %format_p(self.p3_true), file=out) print("P3(false): %s" %format_p(self.p3_false), file=out) print("P3(racemic twin): %s" %format_p(self.p3_racemic_twin), file=out) print("Hooft y: %s" %format_float_with_su( self.hooft_y, self.sigma_y), file=out) class bijvoet_differences_probability_plot(object): """ Hooft, R.W.W., Straver, L.H., Spek, A.L. (2010). J. Appl. Cryst., 43, 665-668. """ def __init__(self, hooft_analysis, use_students_t_distribution=False, students_t_nu=None, probability_plot_slope=None): self.delta_fo2, minus_fo2 =\ hooft_analysis.delta_fo2.generate_bijvoet_mates().hemispheres_acentrics() self.delta_fc2, minus_fc2 =\ hooft_analysis.delta_fc2.generate_bijvoet_mates().hemispheres_acentrics() # we want to plot both hemispheres self.delta_fo2.indices().extend(minus_fo2.indices()) self.delta_fo2.data().extend(minus_fo2.data() * -1) self.delta_fo2.sigmas().extend(minus_fo2.sigmas()) self.delta_fc2.indices().extend(minus_fc2.indices()) self.delta_fc2.data().extend(minus_fc2.data() * -1) self.indices = self.delta_fo2.indices() observed_deviations = (hooft_analysis.G * self.delta_fc2.data() - self.delta_fo2.data())/self.delta_fo2.sigmas() if probability_plot_slope is not None: observed_deviations /= probability_plot_slope selection = flex.sort_permutation(observed_deviations) observed_deviations = observed_deviations.select(selection) if use_students_t_distribution: if students_t_nu is None: students_t_nu = maximise_students_t_correlation_coefficient( observed_deviations, 1, 200) self.distribution = distributions.students_t_distribution(students_t_nu) else: self.distribution = distributions.normal_distribution() self.x = self.distribution.quantiles(observed_deviations.size()) self.y = observed_deviations self.fit = flex.linear_regression(self.x[5:-5], self.y[5:-5]) self.correlation = flex.linear_correlation(self.x[5:-5], self.y[5:-5]) assert self.fit.is_well_defined() def show(self, out=None): if out is None: out=sys.stdout print("y_intercept: %.3f" %self.fit.y_intercept(), file=out) print("slope: %.3f" %self.fit.slope(), file=out) print("correlation coefficient: %.4f" %self.correlation.coefficient(), file=out) def maximise_students_t_correlation_coefficient(observed_deviations, min_nu, max_nu): def compute_corr_coeff(i): distribution = distributions.students_t_distribution(i) expected_deviations = distribution.quantiles(observed_deviations.size()) return flex.linear_correlation( observed_deviations[5:-5], expected_deviations[5:-5]) assert max_nu > min_nu assert min_nu > 0 while True: width = max_nu - min_nu if width < 0.01: break middle = (min_nu + max_nu)/2 a = middle - width/4 b = middle + width/4 value_a = compute_corr_coeff(a).coefficient() value_b = compute_corr_coeff(b).coefficient() if value_a > value_b: max_nu = middle elif value_a == value_b: min_nu = a max_nu = b else: min_nu = middle return middle class students_t_hooft_analysis(hooft_analysis): """ Hooft, R.W.W., Straver, L.H., Spek, A.L. (2010). J. Appl. Cryst., 43, 665-668. """ distribution = "Student's t" def __init__(self, fo2, fc, degrees_of_freedom, scale_factor=None, outlier_cutoff_factor=None, probability_plot_slope=None): self.degrees_of_freedom = degrees_of_freedom hooft_analysis.__init__(self, fo2, fc, scale_factor=scale_factor, outlier_cutoff_factor=outlier_cutoff_factor, probability_plot_slope=probability_plot_slope) def log_p_obs_given_gamma(self, gamma): dof = self.degrees_of_freedom x_gamma = (gamma * self.delta_fc2.data() - self.delta_fo2.data()) \ / self.delta_fo2.sigmas() if self.probability_plot_slope is not None: x_gamma /= self.probability_plot_slope return -(1+dof)/2 * flex.sum(flex.log(flex.pow2(x_gamma) + dof)) class flack_analysis(object): def __init__(self, xray_structure, obs_, exti=None, connectivity_table=None): if exti is None: exti = xray.dummy_extinction_correction() adopt_init_args(self, locals()) assert obs_.fo_sq.anomalous_flag() assert not(obs_.twin_fractions and obs_.merohedral_components) xray_structure = xray_structure.deep_copy_scatterers() for sc in xray_structure.scatterers(): f = xray.scatterer_flags() f.set_use_u_aniso(sc.flags.use_u_aniso()) f.set_use_u_iso(sc.flags.use_u_iso()) f.set_use_fp_fdp(True) sc.flags = f twin_fractions = () it = xray.twin_component(sgtbx.rot_mx((-1,0,0,0,-1,0,0,0,-1)), 0.2, True) twin_components = (it,) obs = observations.customized_copy(obs_, twin_fractions, twin_components) # reparameterisation needs all fractions twin_fractions += twin_components if connectivity_table is None: connectivity_table = smtbx.utils.connectivity_table(xray_structure) reparametrisation = constraints.reparametrisation( xray_structure, [], connectivity_table, twin_fractions=twin_fractions, extinction=exti ) normal_eqns = least_squares.crystallographic_ls(obs, reparametrisation) cycles = normal_eqns_solving.naive_iterations( normal_eqns, n_max_iterations=10, gradient_threshold=1e-7, step_threshold=1e-4) self.flack_x = it.value self.sigma_x = math.sqrt(normal_eqns.covariance_matrix( jacobian_transpose=reparametrisation.jacobian_transpose_matching( reparametrisation.mapping_to_grad_fc_independent_scalars))[0]) def show(self, out=None): if out is None: out = sys.stdout print("Flack x: %s" %format_float_with_su(self.flack_x, self.sigma_x), file=out)
38.828986
85
0.688041
795753f5aa0315b7d93ee121742c2df07df9a666
1,699
py
Python
python/network-security/vernam.py
shivekkhurana/learning
d871343a30cf4db85f3f938a2ca0e419997ca84e
[ "MIT" ]
null
null
null
python/network-security/vernam.py
shivekkhurana/learning
d871343a30cf4db85f3f938a2ca0e419997ca84e
[ "MIT" ]
null
null
null
python/network-security/vernam.py
shivekkhurana/learning
d871343a30cf4db85f3f938a2ca0e419997ca84e
[ "MIT" ]
null
null
null
from random import randint import string class Vernam(object): def __init__(self): super(Vernam, self).__init__() self.chars = string.lowercase + ' ' def o(self, c): return self.chars.find(c) def c(self, o): if o < len(self.chars) and o >= 0: return self.chars[o] return None def decrypt(self, payload, key): key = key.split(',') key = [int(k) for k in key] number_equivalents = (self.o(c) for c in payload) number_equivalents = [n for n in number_equivalents if n != -1] operated = ( n - key[index] for index, n in enumerate(number_equivalents) ) modded = (o % len(self.chars) for o in operated) decypher = [self.c(m) for m in modded] return ''.join(decypher) def encrypt(self, payload): payload = payload.lower() number_equivalents = (self.o(c) for c in payload) number_equivalents = [n for n in number_equivalents if n != -1] randints = [randint(0, 99) for n in number_equivalents] operated = ( randints[index] + n for index, n in enumerate(number_equivalents) ) modded = (o % len(self.chars) for o in operated) cypher = [self.c(m) for m in modded] return (''.join(cypher), ','.join([str(i) for i in randints])) def main(): payload = raw_input("Enter Payload : ") #payload = 'all the students of class are good' encrypt = Vernam().encrypt(payload) cipher = encrypt[0] key = encrypt[1] print(cipher, key) print ('Decipher', Vernam().decrypt(cipher, key)) if __name__ == '__main__': main()
29.293103
71
0.576221
795754b5ed435b2b41eaea4dcb25c24c63537000
3,030
py
Python
modules/google.py
dngfx/MagicBot
56abfce2aac28f36e24ebe00229625196b269907
[ "WTFPL" ]
1
2020-12-31T03:10:42.000Z
2020-12-31T03:10:42.000Z
modules/google.py
dngfx/MagicBot
56abfce2aac28f36e24ebe00229625196b269907
[ "WTFPL" ]
3
2020-10-12T21:27:28.000Z
2021-08-12T09:46:55.000Z
modules/google.py
dngfx/MagicBot
56abfce2aac28f36e24ebe00229625196b269907
[ "WTFPL" ]
1
2020-10-12T21:17:58.000Z
2020-10-12T21:17:58.000Z
# --depends-on commands # --depends-on config # --require-config google-api-key # --require-config google-search-id import json from src import ModuleManager, utils URL_GOOGLESEARCH = "https://www.googleapis.com/customsearch/v1" URL_GOOGLESUGGEST = "http://google.com/complete/search" @utils.export( "channelset", utils.BoolSetting("google-safesearch", "Turn safe search off/on") ) class Module(ModuleManager.BaseModule): @utils.hook("received.command.g", alias_of="google") @utils.hook("received.command.google") def google(self, event): """ :help: Get first Google result for a given search term :usage: [search term] """ phrase = event["args"] or event["target"].buffer.get() if phrase: safe_setting = event["target"].get_setting("google-safesearch", True) safe = "active" if safe_setting else "off" page = utils.http.request( URL_GOOGLESEARCH, get_params={ "q": phrase, "prettyPrint": "true", "num": 1, "gl": "gb", "key": self.bot.config["google-api-key"], "cx": self.bot.config["google-search-id"], "safe": safe, }, ).json() if page: if "items" in page and len(page["items"]): item = page["items"][0] link = item["link"] text = utils.parse.line_normalise(item["snippet"] or item["title"]) event["stdout"].write( "%s: %s — %s" % (event["user"].nickname, text, link) ) else: event["stderr"].write("No results found") else: raise utils.EventResultsError() else: event["stderr"].write("No phrase provided") @utils.hook("received.command.suggest") def suggest(self, event): """ :help: Get suggested phrases from Google :usage: [phrase] """ phrase = event["args"] or event["target"].buffer.get() if phrase: page = utils.http.request( URL_GOOGLESUGGEST, get_params={"output": "json", "client": "hp", "gl": "gb", "q": phrase}, ).json() if page: # google gives us jsonp, so we need to unwrap it. page = page.split("(", 1)[1][:-1] page = json.loads(page) suggestions = page[1] suggestions = [utils.http.strip_html(s[0]) for s in suggestions] if suggestions: event["stdout"].write("%s: %s" % (phrase, ", ".join(suggestions))) else: event["stderr"].write("No suggestions found") else: raise utils.EventResultsError() else: event["stderr"].write("No phrase provided")
35.232558
87
0.50066
7957572d582e4fc6aa8cff914e57abb8c8bab553
103,310
py
Python
superset/views/core.py
jackyq2015/incubator-superset
ffa80c69771a784318e9b713ecf02399f0083556
[ "Apache-2.0" ]
null
null
null
superset/views/core.py
jackyq2015/incubator-superset
ffa80c69771a784318e9b713ecf02399f0083556
[ "Apache-2.0" ]
7
2020-03-24T18:12:00.000Z
2022-03-29T22:27:49.000Z
superset/views/core.py
jackyq2015/incubator-superset
ffa80c69771a784318e9b713ecf02399f0083556
[ "Apache-2.0" ]
null
null
null
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=C,R,W import logging import re from contextlib import closing from datetime import datetime, timedelta from typing import Any, cast, Dict, List, Optional, Union from urllib import parse import backoff import msgpack import pandas as pd import pyarrow as pa import simplejson as json from flask import ( abort, flash, g, Markup, redirect, render_template, request, Response, url_for, ) from flask_appbuilder import expose from flask_appbuilder.models.sqla.interface import SQLAInterface from flask_appbuilder.security.decorators import has_access, has_access_api from flask_appbuilder.security.sqla import models as ab_models from flask_babel import gettext as __, lazy_gettext as _ from sqlalchemy import and_, Integer, or_, select from sqlalchemy.exc import SQLAlchemyError from sqlalchemy.orm.session import Session from werkzeug.routing import BaseConverter from werkzeug.urls import Href import superset.models.core as models from superset import ( app, appbuilder, cache, conf, dataframe, db, event_logger, get_feature_flags, is_feature_enabled, result_set, results_backend, results_backend_use_msgpack, security_manager, sql_lab, talisman, viz, ) from superset.connectors.connector_registry import ConnectorRegistry from superset.connectors.sqla.models import AnnotationDatasource from superset.constants import RouteMethod from superset.exceptions import ( DatabaseNotFound, SupersetException, SupersetSecurityException, SupersetTimeoutException, ) from superset.jinja_context import get_template_processor from superset.models.dashboard import Dashboard from superset.models.datasource_access_request import DatasourceAccessRequest from superset.models.slice import Slice from superset.models.sql_lab import Query, TabState from superset.models.user_attributes import UserAttribute from superset.sql_parse import ParsedQuery from superset.sql_validators import get_validator_by_name from superset.utils import core as utils, dashboard_import_export from superset.utils.dates import now_as_float from superset.utils.decorators import etag_cache, stats_timing from superset.views.chart import views as chart_views from .base import ( api, BaseFilter, BaseSupersetView, check_ownership, common_bootstrap_payload, CsvResponse, data_payload_response, DeleteMixin, generate_download_headers, get_error_msg, get_user_roles, handle_api_exception, json_error_response, json_success, SupersetModelView, ) from .dashboard import views as dash_views from .database import views as in_views from .utils import ( apply_display_max_row_limit, bootstrap_user_data, get_datasource_info, get_form_data, get_viz, ) config = app.config CACHE_DEFAULT_TIMEOUT = config["CACHE_DEFAULT_TIMEOUT"] SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT = config["SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT"] stats_logger = config["STATS_LOGGER"] DAR = DatasourceAccessRequest QueryStatus = utils.QueryStatus DATABASE_KEYS = [ "allow_csv_upload", "allow_ctas", "allow_dml", "allow_multi_schema_metadata_fetch", "allow_run_async", "allows_subquery", "backend", "database_name", "expose_in_sqllab", "force_ctas_schema", "id", ] ALL_DATASOURCE_ACCESS_ERR = __( "This endpoint requires the `all_datasource_access` permission" ) DATASOURCE_MISSING_ERR = __("The data source seems to have been deleted") ACCESS_REQUEST_MISSING_ERR = __("The access requests seem to have been deleted") USER_MISSING_ERR = __("The user seems to have been deleted") FORM_DATA_KEY_BLACKLIST: List[str] = [] if not config["ENABLE_JAVASCRIPT_CONTROLS"]: FORM_DATA_KEY_BLACKLIST = ["js_tooltip", "js_onclick_href", "js_data_mutator"] def get_database_access_error_msg(database_name): return __( "This view requires the database %(name)s or " "`all_datasource_access` permission", name=database_name, ) def is_owner(obj, user): """ Check if user is owner of the slice """ return obj and user in obj.owners def check_datasource_perms( self, datasource_type: str = None, datasource_id: int = None ) -> None: """ Check if user can access a cached response from explore_json. This function takes `self` since it must have the same signature as the the decorated method. :param datasource_type: The datasource type, i.e., 'druid' or 'table' :param datasource_id: The datasource ID :raises SupersetSecurityException: If the user cannot access the resource """ form_data = get_form_data()[0] try: datasource_id, datasource_type = get_datasource_info( datasource_id, datasource_type, form_data ) except SupersetException as e: raise SupersetSecurityException(str(e)) viz_obj = get_viz( datasource_type=datasource_type, datasource_id=datasource_id, form_data=form_data, force=False, ) security_manager.assert_viz_permission(viz_obj) def check_slice_perms(self, slice_id): """ Check if user can access a cached response from slice_json. This function takes `self` since it must have the same signature as the the decorated method. """ form_data, slc = get_form_data(slice_id, use_slice_data=True) viz_obj = get_viz( datasource_type=slc.datasource.type, datasource_id=slc.datasource.id, form_data=form_data, force=False, ) security_manager.assert_viz_permission(viz_obj) def _deserialize_results_payload( payload: Union[bytes, str], query, use_msgpack: Optional[bool] = False ) -> dict: logging.debug(f"Deserializing from msgpack: {use_msgpack}") if use_msgpack: with stats_timing( "sqllab.query.results_backend_msgpack_deserialize", stats_logger ): ds_payload = msgpack.loads(payload, raw=False) with stats_timing("sqllab.query.results_backend_pa_deserialize", stats_logger): pa_table = pa.deserialize(ds_payload["data"]) df = result_set.SupersetResultSet.convert_table_to_df(pa_table) ds_payload["data"] = dataframe.df_to_records(df) or [] db_engine_spec = query.database.db_engine_spec all_columns, data, expanded_columns = db_engine_spec.expand_data( ds_payload["selected_columns"], ds_payload["data"] ) ds_payload.update( {"data": data, "columns": all_columns, "expanded_columns": expanded_columns} ) return ds_payload else: with stats_timing( "sqllab.query.results_backend_json_deserialize", stats_logger ): return json.loads(payload) # type: ignore class AccessRequestsModelView(SupersetModelView, DeleteMixin): datamodel = SQLAInterface(DAR) include_route_methods = RouteMethod.CRUD_SET list_columns = [ "username", "user_roles", "datasource_link", "roles_with_datasource", "created_on", ] order_columns = ["created_on"] base_order = ("changed_on", "desc") label_columns = { "username": _("User"), "user_roles": _("User Roles"), "database": _("Database URL"), "datasource_link": _("Datasource"), "roles_with_datasource": _("Roles to grant"), "created_on": _("Created On"), } @talisman(force_https=False) @app.route("/health") def health(): return "OK" @talisman(force_https=False) @app.route("/healthcheck") def healthcheck(): return "OK" @talisman(force_https=False) @app.route("/ping") def ping(): return "OK" class KV(BaseSupersetView): """Used for storing and retrieving key value pairs""" @event_logger.log_this @has_access_api @expose("/store/", methods=["POST"]) def store(self): try: value = request.form.get("data") obj = models.KeyValue(value=value) db.session.add(obj) db.session.commit() except Exception as e: return json_error_response(e) return Response(json.dumps({"id": obj.id}), status=200) @event_logger.log_this @has_access_api @expose("/<key_id>/", methods=["GET"]) def get_value(self, key_id): try: kv = db.session.query(models.KeyValue).filter_by(id=key_id).scalar() if not kv: return Response(status=404, content_type="text/plain") except Exception as e: return json_error_response(e) return Response(kv.value, status=200, content_type="text/plain") class R(BaseSupersetView): """used for short urls""" @event_logger.log_this @expose("/<url_id>") def index(self, url_id): url = db.session.query(models.Url).get(url_id) if url and url.url: explore_url = "//superset/explore/?" if url.url.startswith(explore_url): explore_url += f"r={url_id}" return redirect(explore_url[1:]) else: return redirect(url.url[1:]) else: flash("URL to nowhere...", "danger") return redirect("/") @event_logger.log_this @has_access_api @expose("/shortner/", methods=["POST"]) def shortner(self): url = request.form.get("data") obj = models.Url(url=url) db.session.add(obj) db.session.commit() return Response( "{scheme}://{request.headers[Host]}/r/{obj.id}".format( scheme=request.scheme, request=request, obj=obj ), mimetype="text/plain", ) class Superset(BaseSupersetView): """The base views for Superset!""" logger = logging.getLogger(__name__) @has_access_api @expose("/datasources/") def datasources(self): datasources = ConnectorRegistry.get_all_datasources(db.session) datasources = [o.short_data for o in datasources if o.short_data.get("name")] datasources = sorted(datasources, key=lambda o: o["name"]) return self.json_response(datasources) @has_access_api @expose("/override_role_permissions/", methods=["POST"]) def override_role_permissions(self): """Updates the role with the give datasource permissions. Permissions not in the request will be revoked. This endpoint should be available to admins only. Expects JSON in the format: { 'role_name': '{role_name}', 'database': [{ 'datasource_type': '{table|druid}', 'name': '{database_name}', 'schema': [{ 'name': '{schema_name}', 'datasources': ['{datasource name}, {datasource name}'] }] }] } """ data = request.get_json(force=True) role_name = data["role_name"] databases = data["database"] db_ds_names = set() for dbs in databases: for schema in dbs["schema"]: for ds_name in schema["datasources"]: fullname = utils.get_datasource_full_name( dbs["name"], ds_name, schema=schema["name"] ) db_ds_names.add(fullname) existing_datasources = ConnectorRegistry.get_all_datasources(db.session) datasources = [d for d in existing_datasources if d.full_name in db_ds_names] role = security_manager.find_role(role_name) # remove all permissions role.permissions = [] # grant permissions to the list of datasources granted_perms = [] for datasource in datasources: view_menu_perm = security_manager.find_permission_view_menu( view_menu_name=datasource.perm, permission_name="datasource_access" ) # prevent creating empty permissions if view_menu_perm and view_menu_perm.view_menu: role.permissions.append(view_menu_perm) granted_perms.append(view_menu_perm.view_menu.name) db.session.commit() return self.json_response( {"granted": granted_perms, "requested": list(db_ds_names)}, status=201 ) @event_logger.log_this @has_access @expose("/request_access/") def request_access(self): datasources = set() dashboard_id = request.args.get("dashboard_id") if dashboard_id: dash = db.session.query(Dashboard).filter_by(id=int(dashboard_id)).one() datasources |= dash.datasources datasource_id = request.args.get("datasource_id") datasource_type = request.args.get("datasource_type") if datasource_id: ds_class = ConnectorRegistry.sources.get(datasource_type) datasource = ( db.session.query(ds_class).filter_by(id=int(datasource_id)).one() ) datasources.add(datasource) has_access = all( ( datasource and security_manager.datasource_access(datasource) for datasource in datasources ) ) if has_access: return redirect("/superset/dashboard/{}".format(dashboard_id)) if request.args.get("action") == "go": for datasource in datasources: access_request = DAR( datasource_id=datasource.id, datasource_type=datasource.type ) db.session.add(access_request) db.session.commit() flash(__("Access was requested"), "info") return redirect("/") return self.render_template( "superset/request_access.html", datasources=datasources, datasource_names=", ".join([o.name for o in datasources]), ) @event_logger.log_this @has_access @expose("/approve") def approve(self): def clean_fulfilled_requests(session): for r in session.query(DAR).all(): datasource = ConnectorRegistry.get_datasource( r.datasource_type, r.datasource_id, session ) if not datasource or security_manager.datasource_access(datasource): # datasource does not exist anymore session.delete(r) session.commit() datasource_type = request.args.get("datasource_type") datasource_id = request.args.get("datasource_id") created_by_username = request.args.get("created_by") role_to_grant = request.args.get("role_to_grant") role_to_extend = request.args.get("role_to_extend") session = db.session datasource = ConnectorRegistry.get_datasource( datasource_type, datasource_id, session ) if not datasource: flash(DATASOURCE_MISSING_ERR, "alert") return json_error_response(DATASOURCE_MISSING_ERR) requested_by = security_manager.find_user(username=created_by_username) if not requested_by: flash(USER_MISSING_ERR, "alert") return json_error_response(USER_MISSING_ERR) requests = ( session.query(DAR) .filter( DAR.datasource_id == datasource_id, DAR.datasource_type == datasource_type, DAR.created_by_fk == requested_by.id, ) .all() ) if not requests: flash(ACCESS_REQUEST_MISSING_ERR, "alert") return json_error_response(ACCESS_REQUEST_MISSING_ERR) # check if you can approve if security_manager.all_datasource_access() or check_ownership( datasource, raise_if_false=False ): # can by done by admin only if role_to_grant: role = security_manager.find_role(role_to_grant) requested_by.roles.append(role) msg = __( "%(user)s was granted the role %(role)s that gives access " "to the %(datasource)s", user=requested_by.username, role=role_to_grant, datasource=datasource.full_name, ) utils.notify_user_about_perm_udate( g.user, requested_by, role, datasource, "email/role_granted.txt", app.config, ) flash(msg, "info") if role_to_extend: perm_view = security_manager.find_permission_view_menu( "email/datasource_access", datasource.perm ) role = security_manager.find_role(role_to_extend) security_manager.add_permission_role(role, perm_view) msg = __( "Role %(r)s was extended to provide the access to " "the datasource %(ds)s", r=role_to_extend, ds=datasource.full_name, ) utils.notify_user_about_perm_udate( g.user, requested_by, role, datasource, "email/role_extended.txt", app.config, ) flash(msg, "info") clean_fulfilled_requests(session) else: flash(__("You have no permission to approve this request"), "danger") return redirect("/accessrequestsmodelview/list/") for r in requests: session.delete(r) session.commit() return redirect("/accessrequestsmodelview/list/") def get_viz( self, slice_id=None, form_data=None, datasource_type=None, datasource_id=None, force=False, ): if slice_id: slc = db.session.query(Slice).filter_by(id=slice_id).one() return slc.get_viz() else: viz_type = form_data.get("viz_type", "table") datasource = ConnectorRegistry.get_datasource( datasource_type, datasource_id, db.session ) viz_obj = viz.viz_types[viz_type]( datasource, form_data=form_data, force=force ) return viz_obj @has_access @expose("/slice/<slice_id>/") def slice(self, slice_id): form_data, slc = get_form_data(slice_id, use_slice_data=True) if not slc: abort(404) endpoint = "/superset/explore/?form_data={}".format( parse.quote(json.dumps({"slice_id": slice_id})) ) param = utils.ReservedUrlParameters.STANDALONE.value if request.args.get(param) == "true": endpoint += f"&{param}=true" return redirect(endpoint) def get_query_string_response(self, viz_obj): query = None try: query_obj = viz_obj.query_obj() if query_obj: query = viz_obj.datasource.get_query_str(query_obj) except Exception as e: logging.exception(e) return json_error_response(e) if not query: query = "No query." return self.json_response( {"query": query, "language": viz_obj.datasource.query_language} ) def get_raw_results(self, viz_obj): return self.json_response( {"data": viz_obj.get_df_payload()["df"].to_dict("records")} ) def get_samples(self, viz_obj): return self.json_response({"data": viz_obj.get_samples()}) def generate_json( self, viz_obj, csv=False, query=False, results=False, samples=False ): if csv: return CsvResponse( viz_obj.get_csv(), status=200, headers=generate_download_headers("csv"), mimetype="application/csv", ) if query: return self.get_query_string_response(viz_obj) if results: return self.get_raw_results(viz_obj) if samples: return self.get_samples(viz_obj) payload = viz_obj.get_payload() return data_payload_response(*viz_obj.payload_json_and_has_error(payload)) @event_logger.log_this @api @has_access_api @expose("/slice_json/<slice_id>") @etag_cache(CACHE_DEFAULT_TIMEOUT, check_perms=check_slice_perms) def slice_json(self, slice_id): form_data, slc = get_form_data(slice_id, use_slice_data=True) datasource_type = slc.datasource.type datasource_id = slc.datasource.id viz_obj = get_viz( datasource_type=datasource_type, datasource_id=datasource_id, form_data=form_data, force=False, ) return self.generate_json(viz_obj) @event_logger.log_this @api @has_access_api @expose("/annotation_json/<layer_id>") def annotation_json(self, layer_id): form_data = get_form_data()[0] form_data["layer_id"] = layer_id form_data["filters"] = [{"col": "layer_id", "op": "==", "val": layer_id}] datasource = AnnotationDatasource() viz_obj = viz.viz_types["table"](datasource, form_data=form_data, force=False) payload = viz_obj.get_payload() return data_payload_response(*viz_obj.payload_json_and_has_error(payload)) EXPLORE_JSON_METHODS = ["POST"] if not is_feature_enabled("ENABLE_EXPLORE_JSON_CSRF_PROTECTION"): EXPLORE_JSON_METHODS.append("GET") @event_logger.log_this @api @has_access_api @handle_api_exception @expose( "/explore_json/<datasource_type>/<datasource_id>/", methods=EXPLORE_JSON_METHODS ) @expose("/explore_json/", methods=EXPLORE_JSON_METHODS) @etag_cache(CACHE_DEFAULT_TIMEOUT, check_perms=check_datasource_perms) def explore_json(self, datasource_type=None, datasource_id=None): """Serves all request that GET or POST form_data This endpoint evolved to be the entry point of many different requests that GETs or POSTs a form_data. `self.generate_json` receives this input and returns different payloads based on the request args in the first block TODO: break into one endpoint for each return shape""" csv = request.args.get("csv") == "true" query = request.args.get("query") == "true" results = request.args.get("results") == "true" samples = request.args.get("samples") == "true" force = request.args.get("force") == "true" form_data = get_form_data()[0] try: datasource_id, datasource_type = get_datasource_info( datasource_id, datasource_type, form_data ) except SupersetException as e: return json_error_response(utils.error_msg_from_exception(e)) viz_obj = get_viz( datasource_type=datasource_type, datasource_id=datasource_id, form_data=form_data, force=force, ) return self.generate_json( viz_obj, csv=csv, query=query, results=results, samples=samples ) @event_logger.log_this @has_access @expose("/import_dashboards", methods=["GET", "POST"]) def import_dashboards(self): """Overrides the dashboards using json instances from the file.""" f = request.files.get("file") if request.method == "POST" and f: try: dashboard_import_export.import_dashboards(db.session, f.stream) except DatabaseNotFound as e: flash( _( "Cannot import dashboard: %(db_error)s.\n" "Make sure to create the database before " "importing the dashboard.", db_error=e, ), "danger", ) except Exception as e: logging.exception(e) flash( _( "An unknown error occurred. " "Please contact your Superset administrator" ), "danger", ) return redirect("/dashboard/list/") return self.render_template("superset/import_dashboards.html") @event_logger.log_this @has_access @expose("/explorev2/<datasource_type>/<datasource_id>/") def explorev2(self, datasource_type, datasource_id): """Deprecated endpoint, here for backward compatibility of urls""" return redirect( url_for( "Superset.explore", datasource_type=datasource_type, datasource_id=datasource_id, **request.args, ) ) @event_logger.log_this @has_access @expose("/explore/<datasource_type>/<datasource_id>/", methods=["GET", "POST"]) @expose("/explore/", methods=["GET", "POST"]) def explore(self, datasource_type=None, datasource_id=None): user_id = g.user.get_id() if g.user else None form_data, slc = get_form_data(use_slice_data=True) # Flash the SIP-15 message if the slice is owned by the current user and has not # been updated, i.e., is not using the [start, end) interval. if ( config["SIP_15_ENABLED"] and slc and g.user in slc.owners and ( not form_data.get("time_range_endpoints") or form_data["time_range_endpoints"] != ( utils.TimeRangeEndpoint.INCLUSIVE, utils.TimeRangeEndpoint.EXCLUSIVE, ) ) ): url = Href("/superset/explore/")( { "form_data": json.dumps( { "slice_id": slc.id, "time_range_endpoints": ( utils.TimeRangeEndpoint.INCLUSIVE.value, utils.TimeRangeEndpoint.EXCLUSIVE.value, ), } ) } ) flash(Markup(config["SIP_15_TOAST_MESSAGE"].format(url=url))) error_redirect = "/chart/list/" try: datasource_id, datasource_type = get_datasource_info( datasource_id, datasource_type, form_data ) except SupersetException: return redirect(error_redirect) datasource = ConnectorRegistry.get_datasource( datasource_type, datasource_id, db.session ) if not datasource: flash(DATASOURCE_MISSING_ERR, "danger") return redirect(error_redirect) if config["ENABLE_ACCESS_REQUEST"] and ( not security_manager.datasource_access(datasource) ): flash( __(security_manager.get_datasource_access_error_msg(datasource)), "danger", ) return redirect( "superset/request_access/?" f"datasource_type={datasource_type}&" f"datasource_id={datasource_id}&" ) viz_type = form_data.get("viz_type") if not viz_type and datasource.default_endpoint: return redirect(datasource.default_endpoint) # slc perms slice_add_perm = security_manager.can_access("can_add", "SliceModelView") slice_overwrite_perm = is_owner(slc, g.user) slice_download_perm = security_manager.can_access( "can_download", "SliceModelView" ) form_data["datasource"] = str(datasource_id) + "__" + datasource_type # On explore, merge legacy and extra filters into the form data utils.convert_legacy_filters_into_adhoc(form_data) utils.merge_extra_filters(form_data) # merge request url params if request.method == "GET": utils.merge_request_params(form_data, request.args) # handle save or overwrite action = request.args.get("action") if action == "overwrite" and not slice_overwrite_perm: return json_error_response( _("You don't have the rights to ") + _("alter this ") + _("chart"), status=400, ) if action == "saveas" and not slice_add_perm: return json_error_response( _("You don't have the rights to ") + _("create a ") + _("chart"), status=400, ) if action in ("saveas", "overwrite"): return self.save_or_overwrite_slice( request.args, slc, slice_add_perm, slice_overwrite_perm, slice_download_perm, datasource_id, datasource_type, datasource.name, ) standalone = ( request.args.get(utils.ReservedUrlParameters.STANDALONE.value) == "true" ) bootstrap_data = { "can_add": slice_add_perm, "can_download": slice_download_perm, "can_overwrite": slice_overwrite_perm, "datasource": datasource.data, "form_data": form_data, "datasource_id": datasource_id, "datasource_type": datasource_type, "slice": slc.data if slc else None, "standalone": standalone, "user_id": user_id, "forced_height": request.args.get("height"), "common": common_bootstrap_payload(), } table_name = ( datasource.table_name if datasource_type == "table" else datasource.datasource_name ) if slc: title = slc.slice_name else: title = _("Explore - %(table)s", table=table_name) return self.render_template( "superset/basic.html", bootstrap_data=json.dumps( bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser ), entry="explore", title=title, standalone_mode=standalone, ) @api @handle_api_exception @has_access_api @expose("/filter/<datasource_type>/<datasource_id>/<column>/") def filter(self, datasource_type, datasource_id, column): """ Endpoint to retrieve values for specified column. :param datasource_type: Type of datasource e.g. table :param datasource_id: Datasource id :param column: Column name to retrieve values for :return: """ # TODO: Cache endpoint by user, datasource and column datasource = ConnectorRegistry.get_datasource( datasource_type, datasource_id, db.session ) if not datasource: return json_error_response(DATASOURCE_MISSING_ERR) security_manager.assert_datasource_permission(datasource) payload = json.dumps( datasource.values_for_column(column, config["FILTER_SELECT_ROW_LIMIT"]), default=utils.json_int_dttm_ser, ) return json_success(payload) def save_or_overwrite_slice( self, args, slc, slice_add_perm, slice_overwrite_perm, slice_download_perm, datasource_id, datasource_type, datasource_name, ): """Save or overwrite a slice""" slice_name = args.get("slice_name") action = args.get("action") form_data = get_form_data()[0] if action in ("saveas"): if "slice_id" in form_data: form_data.pop("slice_id") # don't save old slice_id slc = Slice(owners=[g.user] if g.user else []) slc.params = json.dumps(form_data, indent=2, sort_keys=True) slc.datasource_name = datasource_name slc.viz_type = form_data["viz_type"] slc.datasource_type = datasource_type slc.datasource_id = datasource_id slc.slice_name = slice_name if action in ("saveas") and slice_add_perm: self.save_slice(slc) elif action == "overwrite" and slice_overwrite_perm: self.overwrite_slice(slc) # Adding slice to a dashboard if requested dash = None if request.args.get("add_to_dash") == "existing": dash = ( db.session.query(Dashboard) .filter_by(id=int(request.args.get("save_to_dashboard_id"))) .one() ) # check edit dashboard permissions dash_overwrite_perm = check_ownership(dash, raise_if_false=False) if not dash_overwrite_perm: return json_error_response( _("You don't have the rights to ") + _("alter this ") + _("dashboard"), status=400, ) flash( _("Chart [{}] was added to dashboard [{}]").format( slc.slice_name, dash.dashboard_title ), "info", ) elif request.args.get("add_to_dash") == "new": # check create dashboard permissions dash_add_perm = security_manager.can_access("can_add", "DashboardModelView") if not dash_add_perm: return json_error_response( _("You don't have the rights to ") + _("create a ") + _("dashboard"), status=400, ) dash = Dashboard( dashboard_title=request.args.get("new_dashboard_name"), owners=[g.user] if g.user else [], ) flash( _( "Dashboard [{}] just got created and chart [{}] was added " "to it" ).format(dash.dashboard_title, slc.slice_name), "info", ) if dash and slc not in dash.slices: dash.slices.append(slc) db.session.commit() response = { "can_add": slice_add_perm, "can_download": slice_download_perm, "can_overwrite": is_owner(slc, g.user), "form_data": slc.form_data, "slice": slc.data, "dashboard_id": dash.id if dash else None, } if request.args.get("goto_dash") == "true": response.update({"dashboard": dash.url}) return json_success(json.dumps(response)) def save_slice(self, slc): session = db.session() msg = _("Chart [{}] has been saved").format(slc.slice_name) session.add(slc) session.commit() flash(msg, "info") def overwrite_slice(self, slc): session = db.session() session.merge(slc) session.commit() msg = _("Chart [{}] has been overwritten").format(slc.slice_name) flash(msg, "info") @api @has_access_api @expose("/checkbox/<model_view>/<id_>/<attr>/<value>", methods=["GET"]) def checkbox(self, model_view, id_, attr, value): """endpoint for checking/unchecking any boolean in a sqla model""" modelview_to_model = { "{}ColumnInlineView".format(name.capitalize()): source.column_class for name, source in ConnectorRegistry.sources.items() } model = modelview_to_model[model_view] col = db.session.query(model).get(id_) checked = value == "true" if col: setattr(col, attr, checked) if checked: metrics = col.get_metrics().values() col.datasource.add_missing_metrics(metrics) db.session.commit() return json_success('"OK"') @api @has_access_api @expose("/schemas/<db_id>/") @expose("/schemas/<db_id>/<force_refresh>/") def schemas(self, db_id, force_refresh="false"): db_id = int(db_id) force_refresh = force_refresh.lower() == "true" database = db.session.query(models.Database).get(db_id) if database: schemas = database.get_all_schema_names( cache=database.schema_cache_enabled, cache_timeout=database.schema_cache_timeout, force=force_refresh, ) schemas = security_manager.schemas_accessible_by_user(database, schemas) else: schemas = [] return Response(json.dumps({"schemas": schemas}), mimetype="application/json") @api @has_access_api @expose("/tables/<db_id>/<schema>/<substr>/") @expose("/tables/<db_id>/<schema>/<substr>/<force_refresh>/") def tables(self, db_id, schema, substr, force_refresh="false"): """Endpoint to fetch the list of tables for given database""" db_id = int(db_id) force_refresh = force_refresh.lower() == "true" schema = utils.parse_js_uri_path_item(schema, eval_undefined=True) substr = utils.parse_js_uri_path_item(substr, eval_undefined=True) database = db.session.query(models.Database).filter_by(id=db_id).one() if schema: tables = ( database.get_all_table_names_in_schema( schema=schema, force=force_refresh, cache=database.table_cache_enabled, cache_timeout=database.table_cache_timeout, ) or [] ) views = ( database.get_all_view_names_in_schema( schema=schema, force=force_refresh, cache=database.table_cache_enabled, cache_timeout=database.table_cache_timeout, ) or [] ) else: tables = database.get_all_table_names_in_database( cache=True, force=False, cache_timeout=24 * 60 * 60 ) views = database.get_all_view_names_in_database( cache=True, force=False, cache_timeout=24 * 60 * 60 ) tables = security_manager.get_datasources_accessible_by_user( database, tables, schema ) views = security_manager.get_datasources_accessible_by_user( database, views, schema ) def get_datasource_label(ds_name: utils.DatasourceName) -> str: return ds_name.table if schema else f"{ds_name.schema}.{ds_name.table}" if substr: tables = [tn for tn in tables if substr in get_datasource_label(tn)] views = [vn for vn in views if substr in get_datasource_label(vn)] if not schema and database.default_schemas: user_schema = g.user.email.split("@")[0] valid_schemas = set(database.default_schemas + [user_schema]) tables = [tn for tn in tables if tn.schema in valid_schemas] views = [vn for vn in views if vn.schema in valid_schemas] max_items = config["MAX_TABLE_NAMES"] or len(tables) total_items = len(tables) + len(views) max_tables = len(tables) max_views = len(views) if total_items and substr: max_tables = max_items * len(tables) // total_items max_views = max_items * len(views) // total_items table_options = [ { "value": tn.table, "schema": tn.schema, "label": get_datasource_label(tn), "title": get_datasource_label(tn), "type": "table", } for tn in tables[:max_tables] ] table_options.extend( [ { "value": vn.table, "schema": vn.schema, "label": get_datasource_label(vn), "title": get_datasource_label(vn), "type": "view", } for vn in views[:max_views] ] ) table_options.sort(key=lambda value: value["label"]) payload = {"tableLength": len(tables) + len(views), "options": table_options} return json_success(json.dumps(payload)) @api @has_access_api @expose("/copy_dash/<dashboard_id>/", methods=["GET", "POST"]) def copy_dash(self, dashboard_id): """Copy dashboard""" session = db.session() data = json.loads(request.form.get("data")) dash = models.Dashboard() original_dash = session.query(Dashboard).get(dashboard_id) dash.owners = [g.user] if g.user else [] dash.dashboard_title = data["dashboard_title"] if data["duplicate_slices"]: # Duplicating slices as well, mapping old ids to new ones old_to_new_sliceids = {} for slc in original_dash.slices: new_slice = slc.clone() new_slice.owners = [g.user] if g.user else [] session.add(new_slice) session.flush() new_slice.dashboards.append(dash) old_to_new_sliceids["{}".format(slc.id)] = "{}".format(new_slice.id) # update chartId of layout entities # in v2_dash positions json data, chartId should be integer, # while in older version slice_id is string type for value in data["positions"].values(): if ( isinstance(value, dict) and value.get("meta") and value.get("meta").get("chartId") ): old_id = "{}".format(value.get("meta").get("chartId")) new_id = int(old_to_new_sliceids[old_id]) value["meta"]["chartId"] = new_id else: dash.slices = original_dash.slices dash.params = original_dash.params self._set_dash_metadata(dash, data) session.add(dash) session.commit() dash_json = json.dumps(dash.data) session.close() return json_success(dash_json) @api @has_access_api @expose("/save_dash/<dashboard_id>/", methods=["GET", "POST"]) def save_dash(self, dashboard_id): """Save a dashboard's metadata""" session = db.session() dash = session.query(Dashboard).get(dashboard_id) check_ownership(dash, raise_if_false=True) data = json.loads(request.form.get("data")) self._set_dash_metadata(dash, data) session.merge(dash) session.commit() session.close() return json_success(json.dumps({"status": "SUCCESS"})) @staticmethod def _set_dash_metadata(dashboard, data): positions = data["positions"] # find slices in the position data slice_ids = [] slice_id_to_name = {} for value in positions.values(): if isinstance(value, dict): try: slice_id = value["meta"]["chartId"] slice_ids.append(slice_id) slice_id_to_name[slice_id] = value["meta"]["sliceName"] except KeyError: pass session = db.session() current_slices = session.query(Slice).filter(Slice.id.in_(slice_ids)).all() dashboard.slices = current_slices # update slice names. this assumes user has permissions to update the slice # we allow user set slice name be empty string for slc in dashboard.slices: try: new_name = slice_id_to_name[slc.id] if slc.slice_name != new_name: slc.slice_name = new_name session.merge(slc) session.flush() except KeyError: pass # remove leading and trailing white spaces in the dumped json dashboard.position_json = json.dumps( positions, indent=None, separators=(",", ":"), sort_keys=True ) md = dashboard.params_dict dashboard.css = data.get("css") dashboard.dashboard_title = data["dashboard_title"] if "timed_refresh_immune_slices" not in md: md["timed_refresh_immune_slices"] = [] if "filter_scopes" in data: md.pop("filter_immune_slices", None) md.pop("filter_immune_slice_fields", None) md["filter_scopes"] = json.loads(data.get("filter_scopes", "{}")) else: if "filter_immune_slices" not in md: md["filter_immune_slices"] = [] if "filter_immune_slice_fields" not in md: md["filter_immune_slice_fields"] = {} md["expanded_slices"] = data["expanded_slices"] md["refresh_frequency"] = data.get("refresh_frequency", 0) default_filters_data = json.loads(data.get("default_filters", "{}")) applicable_filters = { key: v for key, v in default_filters_data.items() if int(key) in slice_ids } md["default_filters"] = json.dumps(applicable_filters) if data.get("color_namespace"): md["color_namespace"] = data.get("color_namespace") if data.get("color_scheme"): md["color_scheme"] = data.get("color_scheme") if data.get("label_colors"): md["label_colors"] = data.get("label_colors") dashboard.json_metadata = json.dumps(md) @api @has_access_api @expose("/add_slices/<dashboard_id>/", methods=["POST"]) def add_slices(self, dashboard_id): """Add and save slices to a dashboard""" data = json.loads(request.form.get("data")) session = db.session() dash = session.query(Dashboard).get(dashboard_id) check_ownership(dash, raise_if_false=True) new_slices = session.query(Slice).filter(Slice.id.in_(data["slice_ids"])) dash.slices += new_slices session.merge(dash) session.commit() session.close() return "SLICES ADDED" @api @has_access_api @expose("/testconn", methods=["POST", "GET"]) def testconn(self): """Tests a sqla connection""" try: db_name = request.json.get("name") uri = request.json.get("uri") # if the database already exists in the database, only its safe (password-masked) URI # would be shown in the UI and would be passed in the form data. # so if the database already exists and the form was submitted with the safe URI, # we assume we should retrieve the decrypted URI to test the connection. if db_name: existing_database = ( db.session.query(models.Database) .filter_by(database_name=db_name) .one_or_none() ) if existing_database and uri == existing_database.safe_sqlalchemy_uri(): uri = existing_database.sqlalchemy_uri_decrypted # this is the database instance that will be tested database = models.Database( # extras is sent as json, but required to be a string in the Database model extra=json.dumps(request.json.get("extras", {})), impersonate_user=request.json.get("impersonate_user"), encrypted_extra=json.dumps(request.json.get("encrypted_extra", {})), ) database.set_sqlalchemy_uri(uri) username = g.user.username if g.user is not None else None engine = database.get_sqla_engine(user_name=username) with closing(engine.connect()) as conn: conn.scalar(select([1])) return json_success('"OK"') except Exception as e: logging.exception(e) return json_error_response( "Connection failed!\n\n" "The error message returned was:\n{}".format(e) ) @api @has_access_api @expose("/recent_activity/<user_id>/", methods=["GET"]) def recent_activity(self, user_id): """Recent activity (actions) for a given user""" M = models if request.args.get("limit"): limit = int(request.args.get("limit")) else: limit = 1000 qry = ( db.session.query(M.Log, M.Dashboard, Slice) .outerjoin(M.Dashboard, M.Dashboard.id == M.Log.dashboard_id) .outerjoin(Slice, Slice.id == M.Log.slice_id) .filter( and_( ~M.Log.action.in_(("queries", "shortner", "sql_json")), M.Log.user_id == user_id, ) ) .order_by(M.Log.dttm.desc()) .limit(limit) ) payload = [] for log in qry.all(): item_url = None item_title = None if log.Dashboard: item_url = log.Dashboard.url item_title = log.Dashboard.dashboard_title elif log.Slice: item_url = log.Slice.slice_url item_title = log.Slice.slice_name payload.append( { "action": log.Log.action, "item_url": item_url, "item_title": item_title, "time": log.Log.dttm, } ) return json_success(json.dumps(payload, default=utils.json_int_dttm_ser)) @api @has_access_api @expose("/csrf_token/", methods=["GET"]) def csrf_token(self): return Response( self.render_template("superset/csrf_token.json"), mimetype="text/json" ) @api @has_access_api @expose("/available_domains/", methods=["GET"]) def available_domains(self): """ Returns the list of available Superset Webserver domains (if any) defined in config. This enables charts embedded in other apps to leverage domain sharding if appropriately configured. """ return Response( json.dumps(conf.get("SUPERSET_WEBSERVER_DOMAINS")), mimetype="text/json" ) @api @has_access_api @expose("/fave_dashboards_by_username/<username>/", methods=["GET"]) def fave_dashboards_by_username(self, username): """This lets us use a user's username to pull favourite dashboards""" user = security_manager.find_user(username=username) return self.fave_dashboards(user.get_id()) @api @has_access_api @expose("/fave_dashboards/<user_id>/", methods=["GET"]) def fave_dashboards(self, user_id): qry = ( db.session.query(Dashboard, models.FavStar.dttm) .join( models.FavStar, and_( models.FavStar.user_id == int(user_id), models.FavStar.class_name == "Dashboard", Dashboard.id == models.FavStar.obj_id, ), ) .order_by(models.FavStar.dttm.desc()) ) payload = [] for o in qry.all(): d = { "id": o.Dashboard.id, "dashboard": o.Dashboard.dashboard_link(), "title": o.Dashboard.dashboard_title, "url": o.Dashboard.url, "dttm": o.dttm, } if o.Dashboard.created_by: user = o.Dashboard.created_by d["creator"] = str(user) d["creator_url"] = "/superset/profile/{}/".format(user.username) payload.append(d) return json_success(json.dumps(payload, default=utils.json_int_dttm_ser)) @api @has_access_api @expose("/created_dashboards/<user_id>/", methods=["GET"]) def created_dashboards(self, user_id): Dash = Dashboard qry = ( db.session.query(Dash) .filter(or_(Dash.created_by_fk == user_id, Dash.changed_by_fk == user_id)) .order_by(Dash.changed_on.desc()) ) payload = [ { "id": o.id, "dashboard": o.dashboard_link(), "title": o.dashboard_title, "url": o.url, "dttm": o.changed_on, } for o in qry.all() ] return json_success(json.dumps(payload, default=utils.json_int_dttm_ser)) @api @has_access_api @expose("/user_slices", methods=["GET"]) @expose("/user_slices/<user_id>/", methods=["GET"]) def user_slices(self, user_id=None): """List of slices a user created, or faved""" if not user_id: user_id = g.user.id FavStar = models.FavStar qry = ( db.session.query(Slice, FavStar.dttm) .join( models.FavStar, and_( models.FavStar.user_id == int(user_id), models.FavStar.class_name == "slice", Slice.id == models.FavStar.obj_id, ), isouter=True, ) .filter( or_( Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id, FavStar.user_id == user_id, ) ) .order_by(Slice.slice_name.asc()) ) payload = [ { "id": o.Slice.id, "title": o.Slice.slice_name, "url": o.Slice.slice_url, "data": o.Slice.form_data, "dttm": o.dttm if o.dttm else o.Slice.changed_on, "viz_type": o.Slice.viz_type, } for o in qry.all() ] return json_success(json.dumps(payload, default=utils.json_int_dttm_ser)) @api @has_access_api @expose("/created_slices", methods=["GET"]) @expose("/created_slices/<user_id>/", methods=["GET"]) def created_slices(self, user_id=None): """List of slices created by this user""" if not user_id: user_id = g.user.id qry = ( db.session.query(Slice) .filter(or_(Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id)) .order_by(Slice.changed_on.desc()) ) payload = [ { "id": o.id, "title": o.slice_name, "url": o.slice_url, "dttm": o.changed_on, "viz_type": o.viz_type, } for o in qry.all() ] return json_success(json.dumps(payload, default=utils.json_int_dttm_ser)) @api @has_access_api @expose("/fave_slices", methods=["GET"]) @expose("/fave_slices/<user_id>/", methods=["GET"]) def fave_slices(self, user_id=None): """Favorite slices for a user""" if not user_id: user_id = g.user.id qry = ( db.session.query(Slice, models.FavStar.dttm) .join( models.FavStar, and_( models.FavStar.user_id == int(user_id), models.FavStar.class_name == "slice", Slice.id == models.FavStar.obj_id, ), ) .order_by(models.FavStar.dttm.desc()) ) payload = [] for o in qry.all(): d = { "id": o.Slice.id, "title": o.Slice.slice_name, "url": o.Slice.slice_url, "dttm": o.dttm, "viz_type": o.Slice.viz_type, } if o.Slice.created_by: user = o.Slice.created_by d["creator"] = str(user) d["creator_url"] = "/superset/profile/{}/".format(user.username) payload.append(d) return json_success(json.dumps(payload, default=utils.json_int_dttm_ser)) @api @has_access_api @expose("/warm_up_cache/", methods=["GET"]) def warm_up_cache(self): """Warms up the cache for the slice or table. Note for slices a force refresh occurs. """ slices = None session = db.session() slice_id = request.args.get("slice_id") table_name = request.args.get("table_name") db_name = request.args.get("db_name") if not slice_id and not (table_name and db_name): return json_error_response( __( "Malformed request. slice_id or table_name and db_name " "arguments are expected" ), status=400, ) if slice_id: slices = session.query(Slice).filter_by(id=slice_id).all() if not slices: return json_error_response( __("Chart %(id)s not found", id=slice_id), status=404 ) elif table_name and db_name: SqlaTable = ConnectorRegistry.sources["table"] table = ( session.query(SqlaTable) .join(models.Database) .filter( models.Database.database_name == db_name or SqlaTable.table_name == table_name ) ).one_or_none() if not table: return json_error_response( __( "Table %(t)s wasn't found in the database %(d)s", t=table_name, s=db_name, ), status=404, ) slices = ( session.query(Slice) .filter_by(datasource_id=table.id, datasource_type=table.type) .all() ) for slc in slices: try: form_data = get_form_data(slc.id, use_slice_data=True)[0] obj = get_viz( datasource_type=slc.datasource.type, datasource_id=slc.datasource.id, form_data=form_data, force=True, ) obj.get_json() except Exception as e: self.logger.exception("Failed to warm up cache") return json_error_response(utils.error_msg_from_exception(e)) return json_success( json.dumps( [{"slice_id": slc.id, "slice_name": slc.slice_name} for slc in slices] ) ) @has_access_api @expose("/favstar/<class_name>/<obj_id>/<action>/") def favstar(self, class_name, obj_id, action): """Toggle favorite stars on Slices and Dashboard""" session = db.session() FavStar = models.FavStar count = 0 favs = ( session.query(FavStar) .filter_by(class_name=class_name, obj_id=obj_id, user_id=g.user.get_id()) .all() ) if action == "select": if not favs: session.add( FavStar( class_name=class_name, obj_id=obj_id, user_id=g.user.get_id(), dttm=datetime.now(), ) ) count = 1 elif action == "unselect": for fav in favs: session.delete(fav) else: count = len(favs) session.commit() return json_success(json.dumps({"count": count})) @api @has_access_api @expose("/dashboard/<dashboard_id>/published/", methods=("GET", "POST")) def publish(self, dashboard_id): """Gets and toggles published status on dashboards""" logging.warning( "This API endpoint is deprecated and will be removed in version 1.0.0" ) session = db.session() Role = ab_models.Role dash = ( session.query(Dashboard).filter(Dashboard.id == dashboard_id).one_or_none() ) admin_role = session.query(Role).filter(Role.name == "Admin").one_or_none() if request.method == "GET": if dash: return json_success(json.dumps({"published": dash.published})) else: return json_error_response( f"ERROR: cannot find dashboard {dashboard_id}", status=404 ) else: edit_perm = is_owner(dash, g.user) or admin_role in get_user_roles() if not edit_perm: return json_error_response( f'ERROR: "{g.user.username}" cannot alter dashboard "{dash.dashboard_title}"', status=403, ) dash.published = str(request.form["published"]).lower() == "true" session.commit() return json_success(json.dumps({"published": dash.published})) @has_access @expose("/dashboard/<dashboard_id>/") def dashboard(self, dashboard_id): """Server side rendering for a dashboard""" session = db.session() qry = session.query(Dashboard) if dashboard_id.isdigit(): qry = qry.filter_by(id=int(dashboard_id)) else: qry = qry.filter_by(slug=dashboard_id) dash = qry.one_or_none() if not dash: abort(404) datasources = set() for slc in dash.slices: datasource = slc.datasource if datasource: datasources.add(datasource) if config["ENABLE_ACCESS_REQUEST"]: for datasource in datasources: if datasource and not security_manager.datasource_access(datasource): flash( __( security_manager.get_datasource_access_error_msg(datasource) ), "danger", ) return redirect( "superset/request_access/?" f"dashboard_id={dash.id}&" ) dash_edit_perm = check_ownership( dash, raise_if_false=False ) and security_manager.can_access("can_save_dash", "Superset") dash_save_perm = security_manager.can_access("can_save_dash", "Superset") superset_can_explore = security_manager.can_access("can_explore", "Superset") superset_can_csv = security_manager.can_access("can_csv", "Superset") slice_can_edit = security_manager.can_access("can_edit", "SliceModelView") standalone_mode = ( request.args.get(utils.ReservedUrlParameters.STANDALONE.value) == "true" ) edit_mode = ( request.args.get(utils.ReservedUrlParameters.EDIT_MODE.value) == "true" ) # Hack to log the dashboard_id properly, even when getting a slug @event_logger.log_this def dashboard(**kwargs): pass dashboard( dashboard_id=dash.id, dashboard_version="v2", dash_edit_perm=dash_edit_perm, edit_mode=edit_mode, ) dashboard_data = dash.data dashboard_data.update( { "standalone_mode": standalone_mode, "dash_save_perm": dash_save_perm, "dash_edit_perm": dash_edit_perm, "superset_can_explore": superset_can_explore, "superset_can_csv": superset_can_csv, "slice_can_edit": slice_can_edit, } ) url_params = { key: value for key, value in request.args.items() if key not in [param.value for param in utils.ReservedUrlParameters] } bootstrap_data = { "user_id": g.user.get_id(), "dashboard_data": dashboard_data, "datasources": {ds.uid: ds.data for ds in datasources}, "common": common_bootstrap_payload(), "editMode": edit_mode, "urlParams": url_params, } if request.args.get("json") == "true": return json_success( json.dumps(bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser) ) return self.render_template( "superset/dashboard.html", entry="dashboard", standalone_mode=standalone_mode, title=dash.dashboard_title, bootstrap_data=json.dumps( bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser ), ) @api @event_logger.log_this @expose("/log/", methods=["POST"]) def log(self): return Response(status=200) @has_access @expose("/sync_druid/", methods=["POST"]) @event_logger.log_this def sync_druid_source(self): """Syncs the druid datasource in main db with the provided config. The endpoint takes 3 arguments: user - user name to perform the operation as cluster - name of the druid cluster config - configuration stored in json that contains: name: druid datasource name dimensions: list of the dimensions, they become druid columns with the type STRING metrics_spec: list of metrics (dictionary). Metric consists of 2 attributes: type and name. Type can be count, etc. `count` type is stored internally as longSum other fields will be ignored. Example: { 'name': 'test_click', 'metrics_spec': [{'type': 'count', 'name': 'count'}], 'dimensions': ['affiliate_id', 'campaign', 'first_seen'] } """ payload = request.get_json(force=True) druid_config = payload["config"] user_name = payload["user"] cluster_name = payload["cluster"] user = security_manager.find_user(username=user_name) DruidDatasource = ConnectorRegistry.sources["druid"] DruidCluster = DruidDatasource.cluster_class if not user: err_msg = __( "Can't find User '%(name)s', please ask your admin " "to create one.", name=user_name, ) logging.error(err_msg) return json_error_response(err_msg) cluster = ( db.session.query(DruidCluster) .filter_by(cluster_name=cluster_name) .one_or_none() ) if not cluster: err_msg = __( "Can't find DruidCluster with cluster_name = " "'%(name)s'", name=cluster_name, ) logging.error(err_msg) return json_error_response(err_msg) try: DruidDatasource.sync_to_db_from_config(druid_config, user, cluster) except Exception as e: logging.exception(utils.error_msg_from_exception(e)) return json_error_response(utils.error_msg_from_exception(e)) return Response(status=201) @has_access @expose("/sqllab_viz/", methods=["POST"]) @event_logger.log_this def sqllab_viz(self): SqlaTable = ConnectorRegistry.sources["table"] data = json.loads(request.form.get("data")) table_name = data.get("datasourceName") database_id = data.get("dbId") table = ( db.session.query(SqlaTable) .filter_by(database_id=database_id, table_name=table_name) .one_or_none() ) if not table: table = SqlaTable(table_name=table_name, owners=[g.user]) table.database_id = database_id table.schema = data.get("schema") table.template_params = data.get("templateParams") table.is_sqllab_view = True q = ParsedQuery(data.get("sql")) table.sql = q.stripped() db.session.add(table) cols = [] for config in data.get("columns"): column_name = config.get("name") SqlaTable = ConnectorRegistry.sources["table"] TableColumn = SqlaTable.column_class SqlMetric = SqlaTable.metric_class col = TableColumn( column_name=column_name, filterable=True, groupby=True, is_dttm=config.get("is_date", False), type=config.get("type", False), ) cols.append(col) table.columns = cols table.metrics = [SqlMetric(metric_name="count", expression="count(*)")] db.session.commit() return json_success(json.dumps({"table_id": table.id})) @has_access @expose("/table/<database_id>/<table_name>/<schema>/") @event_logger.log_this def table(self, database_id, table_name, schema): schema = utils.parse_js_uri_path_item(schema, eval_undefined=True) table_name = utils.parse_js_uri_path_item(table_name) mydb = db.session.query(models.Database).filter_by(id=database_id).one() payload_columns = [] indexes = [] primary_key = [] foreign_keys = [] try: columns = mydb.get_columns(table_name, schema) indexes = mydb.get_indexes(table_name, schema) primary_key = mydb.get_pk_constraint(table_name, schema) foreign_keys = mydb.get_foreign_keys(table_name, schema) except Exception as e: return json_error_response(utils.error_msg_from_exception(e)) keys = [] if primary_key and primary_key.get("constrained_columns"): primary_key["column_names"] = primary_key.pop("constrained_columns") primary_key["type"] = "pk" keys += [primary_key] for fk in foreign_keys: fk["column_names"] = fk.pop("constrained_columns") fk["type"] = "fk" keys += foreign_keys for idx in indexes: idx["type"] = "index" keys += indexes for col in columns: dtype = "" try: dtype = "{}".format(col["type"]) except Exception: # sqla.types.JSON __str__ has a bug, so using __class__. dtype = col["type"].__class__.__name__ pass payload_columns.append( { "name": col["name"], "type": dtype.split("(")[0] if "(" in dtype else dtype, "longType": dtype, "keys": [k for k in keys if col["name"] in k.get("column_names")], } ) tbl = { "name": table_name, "columns": payload_columns, "selectStar": mydb.select_star( table_name, schema=schema, show_cols=True, indent=True, cols=columns, latest_partition=True, ), "primaryKey": primary_key, "foreignKeys": foreign_keys, "indexes": keys, } return json_success(json.dumps(tbl)) @has_access @expose("/extra_table_metadata/<database_id>/<table_name>/<schema>/") @event_logger.log_this def extra_table_metadata(self, database_id, table_name, schema): schema = utils.parse_js_uri_path_item(schema, eval_undefined=True) table_name = utils.parse_js_uri_path_item(table_name) mydb = db.session.query(models.Database).filter_by(id=database_id).one() payload = mydb.db_engine_spec.extra_table_metadata(mydb, table_name, schema) return json_success(json.dumps(payload)) @has_access @expose("/select_star/<database_id>/<table_name>") @expose("/select_star/<database_id>/<table_name>/<schema>") @event_logger.log_this def select_star(self, database_id, table_name, schema=None): mydb = db.session.query(models.Database).get(database_id) schema = utils.parse_js_uri_path_item(schema, eval_undefined=True) table_name = utils.parse_js_uri_path_item(table_name) return json_success( mydb.select_star(table_name, schema, latest_partition=True, show_cols=True) ) @has_access_api @expose("/estimate_query_cost/<database_id>/", methods=["POST"]) @expose("/estimate_query_cost/<database_id>/<schema>/", methods=["POST"]) @event_logger.log_this def estimate_query_cost(self, database_id: int, schema: str = None) -> Response: mydb = db.session.query(models.Database).get(database_id) sql = json.loads(request.form.get("sql", '""')) template_params = json.loads(request.form.get("templateParams") or "{}") if template_params: template_processor = get_template_processor(mydb) sql = template_processor.process_template(sql, **template_params) timeout = SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT timeout_msg = f"The estimation exceeded the {timeout} seconds timeout." try: with utils.timeout(seconds=timeout, error_message=timeout_msg): cost = mydb.db_engine_spec.estimate_query_cost( mydb, schema, sql, utils.sources.get("sql_lab") ) except SupersetTimeoutException as e: logging.exception(e) return json_error_response(timeout_msg) except Exception as e: return json_error_response(str(e)) spec = mydb.db_engine_spec query_cost_formatters = get_feature_flags().get( "QUERY_COST_FORMATTERS_BY_ENGINE", {} ) query_cost_formatter = query_cost_formatters.get( spec.engine, spec.query_cost_formatter ) cost = query_cost_formatter(cost) return json_success(json.dumps(cost)) @expose("/theme/") def theme(self): return self.render_template("superset/theme.html") @has_access_api @expose("/cached_key/<key>/") @event_logger.log_this def cached_key(self, key): """Returns a key from the cache""" resp = cache.get(key) if resp: return resp return "nope" @has_access_api @expose("/cache_key_exist/<key>/") @event_logger.log_this def cache_key_exist(self, key): """Returns if a key from cache exist""" key_exist = True if cache.get(key) else False status = 200 if key_exist else 404 return json_success(json.dumps({"key_exist": key_exist}), status=status) @has_access_api @expose("/results/<key>/") @event_logger.log_this def results(self, key): return self.results_exec(key) def results_exec(self, key: str): """Serves a key off of the results backend It is possible to pass the `rows` query argument to limit the number of rows returned. """ if not results_backend: return json_error_response("Results backend isn't configured") read_from_results_backend_start = now_as_float() blob = results_backend.get(key) stats_logger.timing( "sqllab.query.results_backend_read", now_as_float() - read_from_results_backend_start, ) if not blob: return json_error_response( "Data could not be retrieved. " "You may want to re-run the query.", status=410, ) query = db.session.query(Query).filter_by(results_key=key).one_or_none() if query is None: return json_error_response( "Data could not be retrieved. You may want to re-run the query.", status=404, ) rejected_tables = security_manager.rejected_tables( query.sql, query.database, query.schema ) if rejected_tables: return json_error_response( security_manager.get_table_access_error_msg(rejected_tables), status=403 ) payload = utils.zlib_decompress(blob, decode=not results_backend_use_msgpack) obj: dict = _deserialize_results_payload( payload, query, cast(bool, results_backend_use_msgpack) ) if "rows" in request.args: try: rows = int(request.args["rows"]) except ValueError: return json_error_response("Invalid `rows` argument", status=400) obj = apply_display_max_row_limit(obj, rows) return json_success( json.dumps(obj, default=utils.json_iso_dttm_ser, ignore_nan=True) ) @has_access_api @expose("/stop_query/", methods=["POST"]) @event_logger.log_this @backoff.on_exception( backoff.constant, Exception, interval=1, on_backoff=lambda details: db.session.rollback(), on_giveup=lambda details: db.session.rollback(), max_tries=5, ) def stop_query(self): client_id = request.form.get("client_id") query = db.session.query(Query).filter_by(client_id=client_id).one() if query.status in [ QueryStatus.FAILED, QueryStatus.SUCCESS, QueryStatus.TIMED_OUT, ]: logging.error( f"Query with client_id {client_id} could not be stopped: query already complete" ) return self.json_response("OK") query.status = QueryStatus.STOPPED db.session.commit() return self.json_response("OK") @has_access_api @expose("/validate_sql_json/", methods=["POST", "GET"]) @event_logger.log_this def validate_sql_json(self): """Validates that arbitrary sql is acceptable for the given database. Returns a list of error/warning annotations as json. """ sql = request.form.get("sql") database_id = request.form.get("database_id") schema = request.form.get("schema") or None template_params = json.loads(request.form.get("templateParams") or "{}") if len(template_params) > 0: # TODO: factor the Database object out of template rendering # or provide it as mydb so we can render template params # without having to also persist a Query ORM object. return json_error_response( "SQL validation does not support template parameters", status=400 ) session = db.session() mydb = session.query(models.Database).filter_by(id=database_id).one_or_none() if not mydb: return json_error_response( "Database with id {} is missing.".format(database_id), status=400 ) spec = mydb.db_engine_spec validators_by_engine = get_feature_flags().get("SQL_VALIDATORS_BY_ENGINE") if not validators_by_engine or spec.engine not in validators_by_engine: return json_error_response( "no SQL validator is configured for {}".format(spec.engine), status=400 ) validator_name = validators_by_engine[spec.engine] validator = get_validator_by_name(validator_name) if not validator: return json_error_response( "No validator named {} found (configured for the {} engine)".format( validator_name, spec.engine ) ) try: timeout = config["SQLLAB_VALIDATION_TIMEOUT"] timeout_msg = f"The query exceeded the {timeout} seconds timeout." with utils.timeout(seconds=timeout, error_message=timeout_msg): errors = validator.validate(sql, schema, mydb) payload = json.dumps( [err.to_dict() for err in errors], default=utils.pessimistic_json_iso_dttm_ser, ignore_nan=True, encoding=None, ) return json_success(payload) except Exception as e: logging.exception(e) msg = _( f"{validator.name} was unable to check your query.\n" "Please recheck your query.\n" f"Exception: {e}" ) # Return as a 400 if the database error message says we got a 4xx error if re.search(r"([\W]|^)4\d{2}([\W]|$)", str(e)): return json_error_response(f"{msg}", status=400) else: return json_error_response(f"{msg}") def _sql_json_async( self, session: Session, rendered_query: str, query: Query, expand_data: bool, log_params: Optional[Dict[str, Any]] = None, ) -> str: """ Send SQL JSON query to celery workers :param session: SQLAlchemy session object :param rendered_query: the rendered query to perform by workers :param query: The query (SQLAlchemy) object :return: String JSON response """ logging.info(f"Query {query.id}: Running query on a Celery worker") # Ignore the celery future object and the request may time out. try: sql_lab.get_sql_results.delay( query.id, rendered_query, return_results=False, store_results=not query.select_as_cta, user_name=g.user.username if g.user else None, start_time=now_as_float(), expand_data=expand_data, log_params=log_params, ) except Exception as e: logging.exception(f"Query {query.id}: {e}") msg = _( "Failed to start remote query on a worker. " "Tell your administrator to verify the availability of " "the message queue." ) query.status = QueryStatus.FAILED query.error_message = msg session.commit() return json_error_response("{}".format(msg)) resp = json_success( json.dumps( {"query": query.to_dict()}, default=utils.json_int_dttm_ser, ignore_nan=True, ), status=202, ) session.commit() return resp def _sql_json_sync( self, session: Session, rendered_query: str, query: Query, expand_data: bool, log_params: Optional[Dict[str, Any]] = None, ) -> str: """ Execute SQL query (sql json) :param rendered_query: The rendered query (included templates) :param query: The query SQL (SQLAlchemy) object :return: String JSON response """ try: timeout = config["SQLLAB_TIMEOUT"] timeout_msg = f"The query exceeded the {timeout} seconds timeout." store_results = ( is_feature_enabled("SQLLAB_BACKEND_PERSISTENCE") and not query.select_as_cta ) with utils.timeout(seconds=timeout, error_message=timeout_msg): # pylint: disable=no-value-for-parameter data = sql_lab.get_sql_results( query.id, rendered_query, return_results=True, store_results=store_results, user_name=g.user.username if g.user else None, expand_data=expand_data, log_params=log_params, ) payload = json.dumps( apply_display_max_row_limit(data), default=utils.pessimistic_json_iso_dttm_ser, ignore_nan=True, encoding=None, ) except Exception as e: logging.exception(f"Query {query.id}: {e}") return json_error_response(f"{{e}}") if data.get("status") == QueryStatus.FAILED: return json_error_response(payload=data) return json_success(payload) @has_access_api @expose("/sql_json/", methods=["POST"]) @event_logger.log_this def sql_json(self): log_params = { "user_agent": cast(Optional[str], request.headers.get("USER_AGENT")) } return self.sql_json_exec(request.json, log_params) def sql_json_exec( self, query_params: dict, log_params: Optional[Dict[str, Any]] = None ): """Runs arbitrary sql and returns data as json""" # Collect Values database_id: int = cast(int, query_params.get("database_id")) schema: str = cast(str, query_params.get("schema")) sql: str = cast(str, query_params.get("sql")) try: template_params: dict = json.loads( query_params.get("templateParams") or "{}" ) except json.JSONDecodeError: logging.warning( f"Invalid template parameter {query_params.get('templateParams')}" " specified. Defaulting to empty dict" ) template_params = {} limit: int = query_params.get("queryLimit") or app.config["SQL_MAX_ROW"] async_flag: bool = cast(bool, query_params.get("runAsync")) if limit < 0: logging.warning( f"Invalid limit of {limit} specified. Defaulting to max limit." ) limit = 0 select_as_cta: bool = cast(bool, query_params.get("select_as_cta")) tmp_table_name: str = cast(str, query_params.get("tmp_table_name")) client_id: str = cast( str, query_params.get("client_id") or utils.shortid()[:10] ) sql_editor_id: str = cast(str, query_params.get("sql_editor_id")) tab_name: str = cast(str, query_params.get("tab")) status: str = QueryStatus.PENDING if async_flag else QueryStatus.RUNNING session = db.session() mydb = session.query(models.Database).get(database_id) if not mydb: return json_error_response(f"Database with id {database_id} is missing.") # Set tmp_table_name for CTA if select_as_cta and mydb.force_ctas_schema: tmp_table_name = f"{mydb.force_ctas_schema}.{tmp_table_name}" # Save current query query = Query( database_id=database_id, sql=sql, schema=schema, select_as_cta=select_as_cta, start_time=now_as_float(), tab_name=tab_name, status=status, sql_editor_id=sql_editor_id, tmp_table_name=tmp_table_name, user_id=g.user.get_id() if g.user else None, client_id=client_id, ) try: session.add(query) session.flush() query_id = query.id session.commit() # shouldn't be necessary except SQLAlchemyError as e: logging.error(f"Errors saving query details {e}") session.rollback() raise Exception(_("Query record was not created as expected.")) if not query_id: raise Exception(_("Query record was not created as expected.")) logging.info(f"Triggering query_id: {query_id}") rejected_tables = security_manager.rejected_tables(sql, mydb, schema) if rejected_tables: query.status = QueryStatus.FAILED session.commit() return json_error_response( security_manager.get_table_access_error_msg(rejected_tables), link=security_manager.get_table_access_link(rejected_tables), status=403, ) try: template_processor = get_template_processor( database=query.database, query=query ) rendered_query = template_processor.process_template( query.sql, **template_params ) except Exception as e: error_msg = utils.error_msg_from_exception(e) return json_error_response( f"Query {query_id}: Template rendering failed: {error_msg}" ) # set LIMIT after template processing limits = [mydb.db_engine_spec.get_limit_from_sql(rendered_query), limit] query.limit = min(lim for lim in limits if lim is not None) # Flag for whether or not to expand data # (feature that will expand Presto row objects and arrays) expand_data: bool = cast( bool, is_feature_enabled("PRESTO_EXPAND_DATA") and query_params.get("expand_data"), ) # Async request. if async_flag: return self._sql_json_async( session, rendered_query, query, expand_data, log_params ) # Sync request. return self._sql_json_sync( session, rendered_query, query, expand_data, log_params ) @has_access @expose("/csv/<client_id>") @event_logger.log_this def csv(self, client_id): """Download the query results as csv.""" logging.info("Exporting CSV file [{}]".format(client_id)) query = db.session.query(Query).filter_by(client_id=client_id).one() rejected_tables = security_manager.rejected_tables( query.sql, query.database, query.schema ) if rejected_tables: flash(security_manager.get_table_access_error_msg(rejected_tables)) return redirect("/") blob = None if results_backend and query.results_key: logging.info( "Fetching CSV from results backend " "[{}]".format(query.results_key) ) blob = results_backend.get(query.results_key) if blob: logging.info("Decompressing") payload = utils.zlib_decompress( blob, decode=not results_backend_use_msgpack ) obj = _deserialize_results_payload( payload, query, results_backend_use_msgpack ) columns = [c["name"] for c in obj["columns"]] df = pd.DataFrame.from_records(obj["data"], columns=columns) logging.info("Using pandas to convert to CSV") csv = df.to_csv(index=False, **config["CSV_EXPORT"]) else: logging.info("Running a query to turn into CSV") sql = query.select_sql or query.executed_sql df = query.database.get_df(sql, query.schema) # TODO(bkyryliuk): add compression=gzip for big files. csv = df.to_csv(index=False, **config["CSV_EXPORT"]) response = Response(csv, mimetype="text/csv") response.headers[ "Content-Disposition" ] = f"attachment; filename={query.name}.csv" event_info = { "event_type": "data_export", "client_id": client_id, "row_count": len(df.index), "database": query.database.name, "schema": query.schema, "sql": query.sql, "exported_format": "csv", } logging.info( f"CSV exported: {repr(event_info)}", extra={"superset_event": event_info} ) return response @api @handle_api_exception @has_access @expose("/fetch_datasource_metadata") @event_logger.log_this def fetch_datasource_metadata(self): datasource_id, datasource_type = request.args.get("datasourceKey").split("__") datasource = ConnectorRegistry.get_datasource( datasource_type, datasource_id, db.session ) # Check if datasource exists if not datasource: return json_error_response(DATASOURCE_MISSING_ERR) # Check permission for datasource security_manager.assert_datasource_permission(datasource) return json_success(json.dumps(datasource.data)) @has_access_api @expose("/queries/<last_updated_ms>") def queries(self, last_updated_ms): """ Get the updated queries. :param last_updated_ms: unix time, milliseconds """ last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0 return self.queries_exec(last_updated_ms_int) def queries_exec(self, last_updated_ms_int: int): stats_logger.incr("queries") if not g.user.get_id(): return json_error_response( "Please login to access the queries.", status=403 ) # UTC date time, same that is stored in the DB. last_updated_dt = utils.EPOCH + timedelta(seconds=last_updated_ms_int / 1000) sql_queries = ( db.session.query(Query) .filter( Query.user_id == g.user.get_id(), Query.changed_on >= last_updated_dt ) .all() ) dict_queries = {q.client_id: q.to_dict() for q in sql_queries} return json_success(json.dumps(dict_queries, default=utils.json_int_dttm_ser)) @has_access @expose("/search_queries") @event_logger.log_this def search_queries(self) -> Response: """ Search for previously run sqllab queries. Used for Sqllab Query Search page /superset/sqllab#search. Custom permission can_only_search_queries_owned restricts queries to only queries run by current user. :returns: Response with list of sql query dicts """ query = db.session.query(Query) if security_manager.can_only_access_owned_queries(): search_user_id = g.user.get_user_id() else: search_user_id = request.args.get("user_id") database_id = request.args.get("database_id") search_text = request.args.get("search_text") status = request.args.get("status") # From and To time stamp should be Epoch timestamp in seconds from_time = request.args.get("from") to_time = request.args.get("to") if search_user_id: # Filter on user_id query = query.filter(Query.user_id == search_user_id) if database_id: # Filter on db Id query = query.filter(Query.database_id == database_id) if status: # Filter on status query = query.filter(Query.status == status) if search_text: # Filter on search text query = query.filter(Query.sql.like("%{}%".format(search_text))) if from_time: query = query.filter(Query.start_time > int(from_time)) if to_time: query = query.filter(Query.start_time < int(to_time)) query_limit = config["QUERY_SEARCH_LIMIT"] sql_queries = query.order_by(Query.start_time.asc()).limit(query_limit).all() dict_queries = [q.to_dict() for q in sql_queries] return Response( json.dumps(dict_queries, default=utils.json_int_dttm_ser), status=200, mimetype="application/json", ) @app.errorhandler(500) def show_traceback(self): return ( render_template("superset/traceback.html", error_msg=get_error_msg()), 500, ) @expose("/welcome") def welcome(self): """Personalized welcome page""" if not g.user or not g.user.get_id(): return redirect(appbuilder.get_url_for_login) welcome_dashboard_id = ( db.session.query(UserAttribute.welcome_dashboard_id) .filter_by(user_id=g.user.get_id()) .scalar() ) if welcome_dashboard_id: return self.dashboard(str(welcome_dashboard_id)) payload = { "user": bootstrap_user_data(g.user), "common": common_bootstrap_payload(), } return self.render_template( "superset/welcome.html", entry="welcome", bootstrap_data=json.dumps( payload, default=utils.pessimistic_json_iso_dttm_ser ), ) @has_access @expose("/profile/<username>/") def profile(self, username): """User profile page""" if not username and g.user: username = g.user.username user = ( db.session.query(ab_models.User).filter_by(username=username).one_or_none() ) if not user: abort(404, description=f"User: {username} does not exist.") payload = { "user": bootstrap_user_data(user, include_perms=True), "common": common_bootstrap_payload(), } return self.render_template( "superset/basic.html", title=_("%(user)s's profile", user=username), entry="profile", bootstrap_data=json.dumps( payload, default=utils.pessimistic_json_iso_dttm_ser ), ) @staticmethod def _get_sqllab_payload(user_id: int) -> Dict[str, Any]: # send list of tab state ids tabs_state = ( db.session.query(TabState.id, TabState.label) .filter_by(user_id=user_id) .all() ) tab_state_ids = [tab_state[0] for tab_state in tabs_state] # return first active tab, or fallback to another one if no tab is active active_tab = ( db.session.query(TabState) .filter_by(user_id=user_id) .order_by(TabState.active.desc()) .first() ) databases: Dict[int, Any] = {} queries: Dict[str, Any] = {} # These are unnecessary if sqllab backend persistence is disabled if is_feature_enabled("SQLLAB_BACKEND_PERSISTENCE"): databases = { database.id: { k: v for k, v in database.to_json().items() if k in DATABASE_KEYS } for database in db.session.query(models.Database).all() } # return all user queries associated with existing SQL editors user_queries = ( db.session.query(Query) .filter_by(user_id=user_id) .filter(Query.sql_editor_id.cast(Integer).in_(tab_state_ids)) .all() ) queries = { query.client_id: {k: v for k, v in query.to_dict().items()} for query in user_queries } return { "defaultDbId": config["SQLLAB_DEFAULT_DBID"], "common": common_bootstrap_payload(), "tab_state_ids": tabs_state, "active_tab": active_tab.to_dict() if active_tab else None, "databases": databases, "queries": queries, } @has_access @expose("/sqllab") def sqllab(self): """SQL Editor""" payload = self._get_sqllab_payload(g.user.get_id()) bootstrap_data = json.dumps( payload, default=utils.pessimistic_json_iso_dttm_ser ) return self.render_template( "superset/basic.html", entry="sqllab", bootstrap_data=bootstrap_data ) @api @handle_api_exception @has_access_api @expose("/slice_query/<slice_id>/") def slice_query(self, slice_id): """ This method exposes an API endpoint to get the database query string for this slice """ viz_obj = get_viz(slice_id) security_manager.assert_viz_permission(viz_obj) return self.get_query_string_response(viz_obj) @api @has_access_api @expose("/schemas_access_for_csv_upload") def schemas_access_for_csv_upload(self): """ This method exposes an API endpoint to get the schema access control settings for csv upload in this database """ if not request.args.get("db_id"): return json_error_response("No database is allowed for your csv upload") db_id = int(request.args.get("db_id")) database = db.session.query(models.Database).filter_by(id=db_id).one() try: schemas_allowed = database.get_schema_access_for_csv_upload() if ( security_manager.database_access(database) or security_manager.all_datasource_access() ): return self.json_response(schemas_allowed) # the list schemas_allowed should not be empty here # and the list schemas_allowed_processed returned from security_manager # should not be empty either, # otherwise the database should have been filtered out # in CsvToDatabaseForm schemas_allowed_processed = security_manager.schemas_accessible_by_user( database, schemas_allowed, False ) return self.json_response(schemas_allowed_processed) except Exception: return json_error_response( "Failed to fetch schemas allowed for csv upload in this database! " "Please contact your Superset Admin!", stacktrace=utils.get_stacktrace(), ) class CssTemplateModelView(SupersetModelView, DeleteMixin): datamodel = SQLAInterface(models.CssTemplate) include_route_methods = RouteMethod.CRUD_SET list_title = _("CSS Templates") show_title = _("Show CSS Template") add_title = _("Add CSS Template") edit_title = _("Edit CSS Template") list_columns = ["template_name"] edit_columns = ["template_name", "css"] add_columns = edit_columns label_columns = {"template_name": _("Template Name")} class CssTemplateAsyncModelView(CssTemplateModelView): include_route_methods = {RouteMethod.API_READ} list_columns = ["template_name", "css"] @app.after_request def apply_http_headers(response: Response): """Applies the configuration's http headers to all responses""" # HTTP_HEADERS is deprecated, this provides backwards compatibility response.headers.extend( {**config["OVERRIDE_HTTP_HEADERS"], **config["HTTP_HEADERS"]} ) for k, v in config["DEFAULT_HTTP_HEADERS"].items(): if k not in response.headers: response.headers[k] = v return response
36.223703
98
0.58158
795757d49393498167925d2604a062311596d09c
3,631
py
Python
nipyapi/nifi/models/listing_request_entity.py
iMajna/nipyapi
5480af8fe8c6b470249837835cb1a067abb6678e
[ "Apache-2.0" ]
null
null
null
nipyapi/nifi/models/listing_request_entity.py
iMajna/nipyapi
5480af8fe8c6b470249837835cb1a067abb6678e
[ "Apache-2.0" ]
1
2020-03-16T10:02:46.000Z
2020-03-16T13:37:42.000Z
nipyapi/nifi/models/listing_request_entity.py
iMajna/nipyapi
5480af8fe8c6b470249837835cb1a067abb6678e
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ NiFi Rest Api The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service. OpenAPI spec version: 1.12.1 Contact: dev@nifi.apache.org Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class ListingRequestEntity(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'listing_request': 'ListingRequestDTO' } attribute_map = { 'listing_request': 'listingRequest' } def __init__(self, listing_request=None): """ ListingRequestEntity - a model defined in Swagger """ self._listing_request = None if listing_request is not None: self.listing_request = listing_request @property def listing_request(self): """ Gets the listing_request of this ListingRequestEntity. :return: The listing_request of this ListingRequestEntity. :rtype: ListingRequestDTO """ return self._listing_request @listing_request.setter def listing_request(self, listing_request): """ Sets the listing_request of this ListingRequestEntity. :param listing_request: The listing_request of this ListingRequestEntity. :type: ListingRequestDTO """ self._listing_request = listing_request def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, ListingRequestEntity): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
29.282258
479
0.567612
795758bc417ff76e43d0933f469c92306cb6bf4c
1,510
py
Python
sdk/python/feast/entity.py
lgvital/feast2
833d49559a4d353ac682f27268725fcc14f93f6e
[ "Apache-2.0" ]
1
2019-12-12T13:21:56.000Z
2019-12-12T13:21:56.000Z
sdk/python/feast/entity.py
lgvital/feast2
833d49559a4d353ac682f27268725fcc14f93f6e
[ "Apache-2.0" ]
null
null
null
sdk/python/feast/entity.py
lgvital/feast2
833d49559a4d353ac682f27268725fcc14f93f6e
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 The Feast Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from feast.value_type import ValueType from feast.core.FeatureSet_pb2 import EntitySpec as EntityProto from feast.types import Value_pb2 as ValueTypeProto from feast.field import Field class Entity(Field): """Entity field type""" def to_proto(self) -> EntityProto: """ Converts Entity to its Protocol Buffer representation Returns: Returns EntitySpec object """ value_type = ValueTypeProto.ValueType.Enum.Value(self.dtype.name) return EntityProto(name=self.name, value_type=value_type) @classmethod def from_proto(cls, entity_proto: EntityProto): """ Creates a Feast Entity object from its Protocol Buffer representation Args: entity_proto: EntitySpec protobuf object Returns: Entity object """ return cls(name=entity_proto.name, dtype=ValueType(entity_proto.value_type))
32.826087
84
0.713245
795758c4c1834967041b2e26095c0fc620d0488b
4,045
py
Python
tensorboard/uploader/util.py
catherinaxu/tensorboard
b2527c579f6f42865c5dcc5c2bc438977bb05616
[ "Apache-2.0" ]
2
2019-11-23T18:36:20.000Z
2019-12-07T20:58:02.000Z
tensorboard/uploader/util.py
catherinaxu/tensorboard
b2527c579f6f42865c5dcc5c2bc438977bb05616
[ "Apache-2.0" ]
null
null
null
tensorboard/uploader/util.py
catherinaxu/tensorboard
b2527c579f6f42865c5dcc5c2bc438977bb05616
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for use by the uploader command line tool.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import errno import os import os.path import time class RateLimiter(object): """Helper class for rate-limiting using a fixed minimum interval.""" def __init__(self, interval_secs): """Constructs a RateLimiter that permits a tick() every `interval_secs`.""" self._time = time # Use property for ease of testing. self._interval_secs = interval_secs self._last_called_secs = 0 def tick(self): """Blocks until it has been at least `interval_secs` since last tick().""" wait_secs = self._last_called_secs + self._interval_secs - self._time.time() if wait_secs > 0: self._time.sleep(wait_secs) self._last_called_secs = self._time.time() def get_user_config_directory(): """Returns a platform-specific root directory for user config settings.""" # On Windows, prefer %LOCALAPPDATA%, then %APPDATA%, since we can expect the # AppData directories to be ACLed to be visible only to the user and admin # users (https://stackoverflow.com/a/7617601/1179226). If neither is set, # return None instead of falling back to something that may be world-readable. if os.name == "nt": appdata = os.getenv("LOCALAPPDATA") if appdata: return appdata appdata = os.getenv("APPDATA") if appdata: return appdata return None # On non-windows, use XDG_CONFIG_HOME if set, else default to ~/.config. xdg_config_home = os.getenv("XDG_CONFIG_HOME") if xdg_config_home: return xdg_config_home return os.path.join(os.path.expanduser("~"), ".config") def make_file_with_directories(path, private=False): """Creates a file and its containing directories, if they don't already exist. If `private` is True, the file will be made private (readable only by the current user) and so will the leaf directory. Pre-existing contents of the file are not modified. Passing `private=True` is not supported on Windows because it doesn't support the relevant parts of `os.chmod()`. Args: path: str, The path of the file to create. private: boolean, Whether to make the file and leaf directory readable only by the current user. Raises: RuntimeError: If called on Windows with `private` set to True. """ if private and os.name == "nt": raise RuntimeError("Creating private file not supported on Windows") try: path = os.path.realpath(path) leaf_dir = os.path.dirname(path) try: os.makedirs(leaf_dir) except OSError as e: if e.errno != errno.EEXIST: raise if private: os.chmod(leaf_dir, 0o700) open(path, "a").close() if private: os.chmod(path, 0o600) except EnvironmentError as e: raise RuntimeError("Failed to create file %s: %s" % (path, e)) def set_timestamp(pb, seconds_since_epoch): """Sets a `Timestamp` proto message to a floating point UNIX time. This is like `pb.FromNanoseconds(int(seconds_since_epoch * 1e9))` but without introducing floating-point error. Args: pb: A `google.protobuf.Timestamp` message to mutate. seconds_since_epoch: A `float`, as returned by `time.time`. """ pb.seconds = int(seconds_since_epoch) pb.nanos = int(round((seconds_since_epoch % 1) * 10**9))
35.173913
80
0.704326
79575b35dd682500e0e4aba7a391e30bbc192bcc
7,931
py
Python
configs/topologies/Mesh_XY.py
taomiao/gem5
4effe34f94b599add133357473e1b120b54719ab
[ "BSD-3-Clause" ]
1
2020-07-05T12:35:27.000Z
2020-07-05T12:35:27.000Z
configs/topologies/Mesh_XY.py
taomiao/gem5
4effe34f94b599add133357473e1b120b54719ab
[ "BSD-3-Clause" ]
null
null
null
configs/topologies/Mesh_XY.py
taomiao/gem5
4effe34f94b599add133357473e1b120b54719ab
[ "BSD-3-Clause" ]
1
2019-04-13T20:24:43.000Z
2019-04-13T20:24:43.000Z
# Copyright (c) 2010 Advanced Micro Devices, Inc. # 2016 Georgia Institute of Technology # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Brad Beckmann # Tushar Krishna from __future__ import print_function from __future__ import absolute_import from m5.params import * from m5.objects import * from .BaseTopology import SimpleTopology # Creates a generic Mesh assuming an equal number of cache # and directory controllers. # XY routing is enforced (using link weights) # to guarantee deadlock freedom. class Mesh_XY(SimpleTopology): description='Mesh_XY' def __init__(self, controllers): self.nodes = controllers # Makes a generic mesh # assuming an equal number of cache and directory cntrls def makeTopology(self, options, network, IntLink, ExtLink, Router): nodes = self.nodes num_routers = options.num_cpus num_rows = options.mesh_rows # default values for link latency and router latency. # Can be over-ridden on a per link/router basis link_latency = options.link_latency # used by simple and garnet router_latency = options.router_latency # only used by garnet # There must be an evenly divisible number of cntrls to routers # Also, obviously the number or rows must be <= the number of routers cntrls_per_router, remainder = divmod(len(nodes), num_routers) assert(num_rows > 0 and num_rows <= num_routers) num_columns = int(num_routers / num_rows) assert(num_columns * num_rows == num_routers) # Create the routers in the mesh routers = [Router(router_id=i, latency = router_latency) \ for i in range(num_routers)] network.routers = routers # link counter to set unique link ids link_count = 0 # Add all but the remainder nodes to the list of nodes to be uniformly # distributed across the network. network_nodes = [] remainder_nodes = [] for node_index in range(len(nodes)): if node_index < (len(nodes) - remainder): network_nodes.append(nodes[node_index]) else: remainder_nodes.append(nodes[node_index]) # Connect each node to the appropriate router ext_links = [] for (i, n) in enumerate(network_nodes): cntrl_level, router_id = divmod(i, num_routers) assert(cntrl_level < cntrls_per_router) ext_links.append(ExtLink(link_id=link_count, ext_node=n, int_node=routers[router_id], latency = link_latency)) link_count += 1 # Connect the remainding nodes to router 0. These should only be # DMA nodes. for (i, node) in enumerate(remainder_nodes): assert(node.type == 'DMA_Controller') assert(i < remainder) ext_links.append(ExtLink(link_id=link_count, ext_node=node, int_node=routers[0], latency = link_latency)) link_count += 1 network.ext_links = ext_links # Create the mesh links. int_links = [] # East output to West input links (weight = 1) for row in range(num_rows): for col in range(num_columns): if (col + 1 < num_columns): east_out = col + (row * num_columns) west_in = (col + 1) + (row * num_columns) int_links.append(IntLink(link_id=link_count, src_node=routers[east_out], dst_node=routers[west_in], src_outport="East", dst_inport="West", latency = link_latency, weight=1)) link_count += 1 # West output to East input links (weight = 1) for row in range(num_rows): for col in range(num_columns): if (col + 1 < num_columns): east_in = col + (row * num_columns) west_out = (col + 1) + (row * num_columns) int_links.append(IntLink(link_id=link_count, src_node=routers[west_out], dst_node=routers[east_in], src_outport="West", dst_inport="East", latency = link_latency, weight=1)) link_count += 1 # North output to South input links (weight = 2) for col in range(num_columns): for row in range(num_rows): if (row + 1 < num_rows): north_out = col + (row * num_columns) south_in = col + ((row + 1) * num_columns) int_links.append(IntLink(link_id=link_count, src_node=routers[north_out], dst_node=routers[south_in], src_outport="North", dst_inport="South", latency = link_latency, weight=2)) link_count += 1 # South output to North input links (weight = 2) for col in range(num_columns): for row in range(num_rows): if (row + 1 < num_rows): north_in = col + (row * num_columns) south_out = col + ((row + 1) * num_columns) int_links.append(IntLink(link_id=link_count, src_node=routers[south_out], dst_node=routers[north_in], src_outport="South", dst_inport="North", latency = link_latency, weight=2)) link_count += 1 network.int_links = int_links
44.80791
78
0.556424
79575b78986c1b69b2db1221ffe5296898f10e85
101,015
py
Python
scipy/signal/tests/test_signaltools.py
magnusja/scipy
c4a5a1f984e28840010f20a7e41caa21b8f41979
[ "FSFAP" ]
1
2019-06-24T17:59:50.000Z
2019-06-24T17:59:50.000Z
scipy/signal/tests/test_signaltools.py
magnusja/scipy
c4a5a1f984e28840010f20a7e41caa21b8f41979
[ "FSFAP" ]
null
null
null
scipy/signal/tests/test_signaltools.py
magnusja/scipy
c4a5a1f984e28840010f20a7e41caa21b8f41979
[ "FSFAP" ]
1
2019-02-27T06:19:13.000Z
2019-02-27T06:19:13.000Z
# -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import import sys from decimal import Decimal from itertools import product import warnings import pytest from pytest import raises as assert_raises from numpy.testing import ( assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_allclose, assert_, assert_warns, assert_array_less) from scipy._lib._numpy_compat import suppress_warnings from numpy import array, arange import numpy as np from scipy.ndimage.filters import correlate1d from scipy.optimize import fmin from scipy import signal from scipy.signal import ( correlate, convolve, convolve2d, fftconvolve, choose_conv_method, hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, zpk2tf, zpk2sos, invres, invresz, vectorstrength, lfiltic, tf2sos, sosfilt, sosfiltfilt, sosfilt_zi, tf2zpk, BadCoefficients) from scipy.signal.windows import hann from scipy.signal.signaltools import _filtfilt_gust if sys.version_info.major >= 3 and sys.version_info.minor >= 5: from math import gcd else: from fractions import gcd class _TestConvolve(object): def test_basic(self): a = [3, 4, 5, 6, 5, 4] b = [1, 2, 3] c = convolve(a, b) assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12])) def test_same(self): a = [3, 4, 5] b = [1, 2, 3, 4] c = convolve(a, b, mode="same") assert_array_equal(c, array([10, 22, 34])) def test_same_eq(self): a = [3, 4, 5] b = [1, 2, 3] c = convolve(a, b, mode="same") assert_array_equal(c, array([10, 22, 22])) def test_complex(self): x = array([1 + 1j, 2 + 1j, 3 + 1j]) y = array([1 + 1j, 2 + 1j]) z = convolve(x, y) assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j])) def test_zero_rank(self): a = 1289 b = 4567 c = convolve(a, b) assert_equal(c, a * b) def test_single_element(self): a = array([4967]) b = array([3920]) c = convolve(a, b) assert_equal(c, a * b) def test_2d_arrays(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve(a, b) d = array([[2, 7, 16, 17, 12], [10, 30, 62, 58, 38], [12, 31, 58, 49, 30]]) assert_array_equal(c, d) def test_input_swapping(self): small = arange(8).reshape(2, 2, 2) big = 1j * arange(27).reshape(3, 3, 3) big += arange(27)[::-1].reshape(3, 3, 3) out_array = array( [[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j], [52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j], [46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j], [40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]], [[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j], [282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j], [246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j], [142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]], [[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j], [174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j], [138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j], [70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]], [[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j], [68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j], [38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j], [12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]]) assert_array_equal(convolve(small, big, 'full'), out_array) assert_array_equal(convolve(big, small, 'full'), out_array) assert_array_equal(convolve(small, big, 'same'), out_array[1:3, 1:3, 1:3]) assert_array_equal(convolve(big, small, 'same'), out_array[0:3, 0:3, 0:3]) assert_array_equal(convolve(small, big, 'valid'), out_array[1:3, 1:3, 1:3]) assert_array_equal(convolve(big, small, 'valid'), out_array[1:3, 1:3, 1:3]) def test_invalid_params(self): a = [3, 4, 5] b = [1, 2, 3] assert_raises(ValueError, convolve, a, b, mode='spam') assert_raises(ValueError, convolve, a, b, mode='eggs', method='fft') assert_raises(ValueError, convolve, a, b, mode='ham', method='direct') assert_raises(ValueError, convolve, a, b, mode='full', method='bacon') assert_raises(ValueError, convolve, a, b, mode='same', method='bacon') class TestConvolve(_TestConvolve): def test_valid_mode2(self): # See gh-5897 a = [1, 2, 3, 6, 5, 3] b = [2, 3, 4, 5, 3, 4, 2, 2, 1] expected = [70, 78, 73, 65] out = convolve(a, b, 'valid') assert_array_equal(out, expected) out = convolve(b, a, 'valid') assert_array_equal(out, expected) a = [1 + 5j, 2 - 1j, 3 + 0j] b = [2 - 3j, 1 + 0j] expected = [2 - 3j, 8 - 10j] out = convolve(a, b, 'valid') assert_array_equal(out, expected) out = convolve(b, a, 'valid') assert_array_equal(out, expected) def test_same_mode(self): a = [1, 2, 3, 3, 1, 2] b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3] c = convolve(a, b, 'same') d = array([57, 61, 63, 57, 45, 36]) assert_array_equal(c, d) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, convolve, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, convolve, *(b, a), **{'mode': 'valid'}) def test_convolve_method(self, n=100): types = sum([t for _, t in np.sctypes.items()], []) types = {np.dtype(t).name for t in types} # These types include 'bool' and all precisions (int8, float32, etc) # The removed types throw errors in correlate or fftconvolve for dtype in ['complex256', 'complex192', 'float128', 'float96', 'str', 'void', 'bytes', 'object', 'unicode', 'string']: if dtype in types: types.remove(dtype) args = [(t1, t2, mode) for t1 in types for t2 in types for mode in ['valid', 'full', 'same']] # These are random arrays, which means test is much stronger than # convolving testing by convolving two np.ones arrays np.random.seed(42) array_types = {'i': np.random.choice([0, 1], size=n), 'f': np.random.randn(n)} array_types['b'] = array_types['u'] = array_types['i'] array_types['c'] = array_types['f'] + 0.5j*array_types['f'] for t1, t2, mode in args: x1 = array_types[np.dtype(t1).kind].astype(t1) x2 = array_types[np.dtype(t2).kind].astype(t2) results = {key: convolve(x1, x2, method=key, mode=mode) for key in ['fft', 'direct']} assert_equal(results['fft'].dtype, results['direct'].dtype) if 'bool' in t1 and 'bool' in t2: assert_equal(choose_conv_method(x1, x2), 'direct') continue # Found by experiment. Found approx smallest value for (rtol, atol) # threshold to have tests pass. if any([t in {'complex64', 'float32'} for t in [t1, t2]]): kwargs = {'rtol': 1.0e-4, 'atol': 1e-6} elif 'float16' in [t1, t2]: # atol is default for np.allclose kwargs = {'rtol': 1e-3, 'atol': 1e-8} else: # defaults for np.allclose (different from assert_allclose) kwargs = {'rtol': 1e-5, 'atol': 1e-8} assert_allclose(results['fft'], results['direct'], **kwargs) def test_convolve_method_large_input(self): # This is really a test that convolving two large integers goes to the # direct method even if they're in the fft method. for n in [10, 20, 50, 51, 52, 53, 54, 60, 62]: z = np.array([2**n], dtype=np.int64) fft = convolve(z, z, method='fft') direct = convolve(z, z, method='direct') # this is the case when integer precision gets to us # issue #6076 has more detail, hopefully more tests after resolved if n < 50: assert_equal(fft, direct) assert_equal(fft, 2**(2*n)) assert_equal(direct, 2**(2*n)) def test_mismatched_dims(self): # Input arrays should have the same number of dimensions assert_raises(ValueError, convolve, [1], 2, method='direct') assert_raises(ValueError, convolve, 1, [2], method='direct') assert_raises(ValueError, convolve, [1], 2, method='fft') assert_raises(ValueError, convolve, 1, [2], method='fft') assert_raises(ValueError, convolve, [1], [[2]]) assert_raises(ValueError, convolve, [3], 2) class _TestConvolve2d(object): def test_2d_arrays(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] d = array([[2, 7, 16, 17, 12], [10, 30, 62, 58, 38], [12, 31, 58, 49, 30]]) e = convolve2d(a, b) assert_array_equal(e, d) def test_valid_mode(self): e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] f = [[1, 2, 3], [3, 4, 5]] h = array([[62, 80, 98, 116, 134]]) g = convolve2d(e, f, 'valid') assert_array_equal(g, h) # See gh-5897 g = convolve2d(f, e, 'valid') assert_array_equal(g, h) def test_valid_mode_complx(self): e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] f = np.array([[1, 2, 3], [3, 4, 5]], dtype=complex) + 1j h = array([[62.+24.j, 80.+30.j, 98.+36.j, 116.+42.j, 134.+48.j]]) g = convolve2d(e, f, 'valid') assert_array_almost_equal(g, h) # See gh-5897 g = convolve2d(f, e, 'valid') assert_array_equal(g, h) def test_fillvalue(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] fillval = 1 c = convolve2d(a, b, 'full', 'fill', fillval) d = array([[24, 26, 31, 34, 32], [28, 40, 62, 64, 52], [32, 46, 67, 62, 48]]) assert_array_equal(c, d) def test_fillvalue_deprecations(self): # Deprecated 2017-07, scipy version 1.0.0 with suppress_warnings() as sup: sup.filter(np.ComplexWarning, "Casting complex values to real") r = sup.record(DeprecationWarning, "could not cast `fillvalue`") convolve2d([[1]], [[1, 2]], fillvalue=1j) assert_(len(r) == 1) warnings.filterwarnings( "error", message="could not cast `fillvalue`", category=DeprecationWarning) assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]], fillvalue=1j) with suppress_warnings(): warnings.filterwarnings( "always", message="`fillvalue` must be scalar or an array ", category=DeprecationWarning) assert_warns(DeprecationWarning, convolve2d, [[1]], [[1, 2]], fillvalue=[1, 2]) warnings.filterwarnings( "error", message="`fillvalue` must be scalar or an array ", category=DeprecationWarning) assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]], fillvalue=[1, 2]) def test_fillvalue_empty(self): # Check that fillvalue being empty raises an error: assert_raises(ValueError, convolve2d, [[1]], [[1, 2]], fillvalue=[]) def test_wrap_boundary(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve2d(a, b, 'full', 'wrap') d = array([[80, 80, 74, 80, 80], [68, 68, 62, 68, 68], [80, 80, 74, 80, 80]]) assert_array_equal(c, d) def test_sym_boundary(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve2d(a, b, 'full', 'symm') d = array([[34, 30, 44, 62, 66], [52, 48, 62, 80, 84], [82, 78, 92, 110, 114]]) assert_array_equal(c, d) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, convolve2d, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, convolve2d, *(b, a), **{'mode': 'valid'}) class TestConvolve2d(_TestConvolve2d): def test_same_mode(self): e = [[1, 2, 3], [3, 4, 5]] f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] g = convolve2d(e, f, 'same') h = array([[22, 28, 34], [80, 98, 116]]) assert_array_equal(g, h) def test_valid_mode2(self): # See gh-5897 e = [[1, 2, 3], [3, 4, 5]] f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] expected = [[62, 80, 98, 116, 134]] out = convolve2d(e, f, 'valid') assert_array_equal(out, expected) out = convolve2d(f, e, 'valid') assert_array_equal(out, expected) e = [[1 + 1j, 2 - 3j], [3 + 1j, 4 + 0j]] f = [[2 - 1j, 3 + 2j, 4 + 0j], [4 - 0j, 5 + 1j, 6 - 3j]] expected = [[27 - 1j, 46. + 2j]] out = convolve2d(e, f, 'valid') assert_array_equal(out, expected) # See gh-5897 out = convolve2d(f, e, 'valid') assert_array_equal(out, expected) def test_consistency_convolve_funcs(self): # Compare np.convolve, signal.convolve, signal.convolve2d a = np.arange(5) b = np.array([3.2, 1.4, 3]) for mode in ['full', 'valid', 'same']: assert_almost_equal(np.convolve(a, b, mode=mode), signal.convolve(a, b, mode=mode)) assert_almost_equal(np.squeeze( signal.convolve2d([a], [b], mode=mode)), signal.convolve(a, b, mode=mode)) def test_invalid_dims(self): assert_raises(ValueError, convolve2d, 3, 4) assert_raises(ValueError, convolve2d, [3], [4]) assert_raises(ValueError, convolve2d, [[[3]]], [[[4]]]) class TestFFTConvolve(object): @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_real(self, axes): a = array([1, 2, 3]) expected = array([1, 4, 10, 12, 9.]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_real_axes(self, axes): a = array([1, 2, 3]) expected = array([1, 4, 10, 12, 9.]) a = np.tile(a, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_complex(self, axes): a = array([1 + 1j, 2 + 2j, 3 + 3j]) expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_complex_axes(self, axes): a = array([1 + 1j, 2 + 2j, 3 + 3j]) expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) a = np.tile(a, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, [0, 1], [1, 0], [0, -1], [-1, 0], [-2, 1], [1, -2], [-2, -1], [-1, -2]]) def test_2d_real_same(self, axes): a = array([[1, 2, 3], [4, 5, 6]]) expected = array([[1, 4, 10, 12, 9], [8, 26, 56, 54, 36], [16, 40, 73, 60, 36]]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [[1, 2], [2, 1], [1, -1], [-1, 1], [-2, 2], [2, -2], [-2, -1], [-1, -2]]) def test_2d_real_same_axes(self, axes): a = array([[1, 2, 3], [4, 5, 6]]) expected = array([[1, 4, 10, 12, 9], [8, 26, 56, 54, 36], [16, 40, 73, 60, 36]]) a = np.tile(a, [2, 1, 1]) expected = np.tile(expected, [2, 1, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, [0, 1], [1, 0], [0, -1], [-1, 0], [-2, 1], [1, -2], [-2, -1], [-1, -2]]) def test_2d_complex_same(self, axes): a = array([[1 + 2j, 3 + 4j, 5 + 6j], [2 + 1j, 4 + 3j, 6 + 5j]]) expected = array([ [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], [10j, 44j, 118j, 156j, 122j], [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] ]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [[1, 2], [2, 1], [1, -1], [-1, 1], [-2, 2], [2, -2], [-2, -1], [-1, -2]]) def test_2d_complex_same_axes(self, axes): a = array([[1 + 2j, 3 + 4j, 5 + 6j], [2 + 1j, 4 + 3j, 6 + 5j]]) expected = array([ [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], [10j, 44j, 118j, 156j, 122j], [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] ]) a = np.tile(a, [2, 1, 1]) expected = np.tile(expected, [2, 1, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_real_same_mode(self, axes): a = array([1, 2, 3]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected_1 = array([35., 41., 47.]) expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) if axes == '': out = fftconvolve(a, b, 'same') else: out = fftconvolve(a, b, 'same', axes=axes) assert_array_almost_equal(out, expected_1) if axes == '': out = fftconvolve(b, a, 'same') else: out = fftconvolve(b, a, 'same', axes=axes) assert_array_almost_equal(out, expected_2) @pytest.mark.parametrize('axes', [1, -1, [1], [-1]]) def test_real_same_mode_axes(self, axes): a = array([1, 2, 3]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected_1 = array([35., 41., 47.]) expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected_1 = np.tile(expected_1, [2, 1]) expected_2 = np.tile(expected_2, [2, 1]) out = fftconvolve(a, b, 'same', axes=axes) assert_array_almost_equal(out, expected_1) out = fftconvolve(b, a, 'same', axes=axes) assert_array_almost_equal(out, expected_2) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_valid_mode_real(self, axes): # See gh-5897 a = array([3, 2, 1]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected = array([24., 31., 41., 43., 49., 25., 12.]) if axes == '': out = fftconvolve(a, b, 'valid') else: out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) if axes == '': out = fftconvolve(b, a, 'valid') else: out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1]]) def test_valid_mode_real_axes(self, axes): # See gh-5897 a = array([3, 2, 1]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected = array([24., 31., 41., 43., 49., 25., 12.]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_valid_mode_complex(self, axes): a = array([3 - 1j, 2 + 7j, 1 + 0j]) b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) if axes == '': out = fftconvolve(a, b, 'valid') else: out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) if axes == '': out = fftconvolve(b, a, 'valid') else: out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_valid_mode_complex_axes(self, axes): a = array([3 - 1j, 2 + 7j, 1 + 0j]) b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) def test_empty(self): # Regression test for #1745: crashes with 0-length input. assert_(fftconvolve([], []).size == 0) assert_(fftconvolve([5, 6], []).size == 0) assert_(fftconvolve([], [7]).size == 0) def test_zero_rank(self): a = array(4967) b = array(3920) out = fftconvolve(a, b) assert_equal(out, a * b) def test_single_element(self): a = array([4967]) b = array([3920]) out = fftconvolve(a, b) assert_equal(out, a * b) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_random_data(self, axes): np.random.seed(1234) a = np.random.rand(1233) + 1j * np.random.rand(1233) b = np.random.rand(1321) + 1j * np.random.rand(1321) expected = np.convolve(a, b, 'full') if axes == '': out = fftconvolve(a, b, 'full') else: out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_random_data_axes(self, axes): np.random.seed(1234) a = np.random.rand(1233) + 1j * np.random.rand(1233) b = np.random.rand(1321) + 1j * np.random.rand(1321) expected = np.convolve(a, b, 'full') a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.parametrize('axes', [[1, 4], [4, 1], [1, -1], [-1, 1], [-4, 4], [4, -4], [-4, -1], [-1, -4]]) def test_random_data_multidim_axes(self, axes): np.random.seed(1234) a = np.random.rand(123, 222) + 1j * np.random.rand(123, 222) b = np.random.rand(132, 111) + 1j * np.random.rand(132, 111) expected = convolve2d(a, b, 'full') a = a[:, :, None, None, None] b = b[:, :, None, None, None] expected = expected[:, :, None, None, None] a = np.rollaxis(a.swapaxes(0, 2), 1, 5) b = np.rollaxis(b.swapaxes(0, 2), 1, 5) expected = np.rollaxis(expected.swapaxes(0, 2), 1, 5) # use 1 for dimension 2 in a and 3 in b to test broadcasting a = np.tile(a, [2, 1, 3, 1, 1]) b = np.tile(b, [2, 1, 1, 4, 1]) expected = np.tile(expected, [2, 1, 3, 4, 1]) out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.slow @pytest.mark.parametrize( 'n', list(range(1, 100)) + list(range(1000, 1500)) + np.random.RandomState(1234).randint(1001, 10000, 5).tolist()) def test_many_sizes(self, n): a = np.random.rand(n) + 1j * np.random.rand(n) b = np.random.rand(n) + 1j * np.random.rand(n) expected = np.convolve(a, b, 'full') out = fftconvolve(a, b, 'full') assert_allclose(out, expected, atol=1e-10) out = fftconvolve(a, b, 'full', axes=[0]) assert_allclose(out, expected, atol=1e-10) def test_invalid_shapes(self): a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) with assert_raises(ValueError, match="For 'valid' mode, one must be at least " "as large as the other in every dimension"): fftconvolve(a, b, mode='valid') def test_invalid_shapes_axes(self): a = np.zeros([5, 6, 2, 1]) b = np.zeros([5, 6, 3, 1]) with assert_raises(ValueError, match=r"incompatible shapes for in1 and in2:" r" \(5L?, 6L?, 2L?, 1L?\) and" r" \(5L?, 6L?, 3L?, 1L?\)"): fftconvolve(a, b, axes=[0, 1]) @pytest.mark.parametrize('a,b', [([1], 2), (1, [2]), ([3], [[2]])]) def test_mismatched_dims(self, a, b): with assert_raises(ValueError, match="in1 and in2 should have the same" " dimensionality"): fftconvolve(a, b) def test_invalid_flags(self): with assert_raises(ValueError, match="acceptable mode flags are 'valid'," " 'same', or 'full'"): fftconvolve([1], [2], mode='chips') with assert_raises(ValueError, match="when provided, axes cannot be empty"): fftconvolve([1], [2], axes=[]) with assert_raises(ValueError, match="when given, axes values must be a scalar" " or vector"): fftconvolve([1], [2], axes=[[1, 2], [3, 4]]) with assert_raises(ValueError, match="when given, axes values must be integers"): fftconvolve([1], [2], axes=[1., 2., 3., 4.]) with assert_raises(ValueError, match="axes exceeds dimensionality of input"): fftconvolve([1], [2], axes=[1]) with assert_raises(ValueError, match="axes exceeds dimensionality of input"): fftconvolve([1], [2], axes=[-2]) with assert_raises(ValueError, match="all axes must be unique"): fftconvolve([1], [2], axes=[0, 0]) class TestMedFilt(object): def test_basic(self): f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46], [50, 50, 50, 50, 50, 0, 72, 77, 68, 66], [50, 50, 50, 50, 50, 46, 47, 19, 64, 77], [50, 50, 50, 50, 50, 42, 15, 29, 95, 35], [50, 50, 50, 50, 50, 46, 34, 9, 21, 66], [70, 97, 28, 68, 78, 77, 61, 58, 71, 42], [64, 53, 44, 29, 68, 32, 19, 68, 24, 84], [3, 33, 53, 67, 1, 78, 74, 55, 12, 83], [7, 11, 46, 70, 60, 47, 24, 43, 61, 26], [32, 61, 88, 7, 39, 4, 92, 64, 45, 61]] d = signal.medfilt(f, [7, 3]) e = signal.medfilt2d(np.array(f, float), [7, 3]) assert_array_equal(d, [[0, 50, 50, 50, 42, 15, 15, 18, 27, 0], [0, 50, 50, 50, 50, 42, 19, 21, 29, 0], [50, 50, 50, 50, 50, 47, 34, 34, 46, 35], [50, 50, 50, 50, 50, 50, 42, 47, 64, 42], [50, 50, 50, 50, 50, 50, 46, 55, 64, 35], [33, 50, 50, 50, 50, 47, 46, 43, 55, 26], [32, 50, 50, 50, 50, 47, 46, 45, 55, 26], [7, 46, 50, 50, 47, 46, 46, 43, 45, 21], [0, 32, 33, 39, 32, 32, 43, 43, 43, 0], [0, 7, 11, 7, 4, 4, 19, 19, 24, 0]]) assert_array_equal(d, e) def test_none(self): # Ticket #1124. Ensure this does not segfault. signal.medfilt(None) # Expand on this test to avoid a regression with possible contiguous # numpy arrays that have odd strides. The stride value below gets # us into wrong memory if used (but it does not need to be used) dummy = np.arange(10, dtype=np.float64) a = dummy[5:6] a.strides = 16 assert_(signal.medfilt(a, 1) == 5.) def test_refcounting(self): # Check a refcounting-related crash a = Decimal(123) x = np.array([a, a], dtype=object) if hasattr(sys, 'getrefcount'): n = 2 * sys.getrefcount(a) else: n = 10 # Shouldn't segfault: for j in range(n): signal.medfilt(x) if hasattr(sys, 'getrefcount'): assert_(sys.getrefcount(a) < n) assert_equal(x, [a, a]) class TestWiener(object): def test_basic(self): g = array([[5, 6, 4, 3], [3, 5, 6, 2], [2, 3, 5, 6], [1, 6, 9, 7]], 'd') h = array([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667], [2.666666667, 4.33333333333, 4.44444444444, 2.8888888888], [2.222222222, 4.4444444444, 5.4444444444, 4.801066874837], [1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]]) assert_array_almost_equal(signal.wiener(g), h, decimal=6) assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6) class TestResample(object): def test_basic(self): # Some basic tests # Regression test for issue #3603. # window.shape must equal to sig.shape[0] sig = np.arange(128) num = 256 win = signal.get_window(('kaiser', 8.0), 160) assert_raises(ValueError, signal.resample, sig, num, window=win) # Other degenerate conditions assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1) assert_raises(ValueError, signal.resample_poly, sig, 1, 0) # test for issue #6505 - should not modify window.shape when axis ≠ 0 sig2 = np.tile(np.arange(160), (2,1)) signal.resample(sig2, num, axis=-1, window=win) assert_(win.shape == (160,)) def test_fft(self): # Test FFT-based resampling self._test_data(method='fft') def test_polyphase(self): # Test polyphase resampling self._test_data(method='polyphase') def test_polyphase_extfilter(self): # Test external specification of downsampling filter self._test_data(method='polyphase', ext=True) def test_mutable_window(self): # Test that a mutable window is not modified impulse = np.zeros(3) window = np.random.RandomState(0).randn(2) window_orig = window.copy() signal.resample_poly(impulse, 5, 1, window=window) assert_array_equal(window, window_orig) def test_output_float32(self): # Test that float32 inputs yield a float32 output x = np.arange(10, dtype=np.float32) h = np.array([1,1,1], dtype=np.float32) y = signal.resample_poly(x, 1, 2, window=h) assert_(y.dtype == np.float32) def _test_data(self, method, ext=False): # Test resampling of sinusoids and random noise (1-sec) rate = 100 rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201] # Sinusoids, windowed to avoid edge artifacts t = np.arange(rate) / float(rate) freqs = np.array((1., 10., 40.))[:, np.newaxis] x = np.sin(2 * np.pi * freqs * t) * hann(rate) for rate_to in rates_to: t_to = np.arange(rate_to) / float(rate_to) y_tos = np.sin(2 * np.pi * freqs * t_to) * hann(rate_to) if method == 'fft': y_resamps = signal.resample(x, rate_to, axis=-1) else: if ext and rate_to != rate: # Match default window design g = gcd(rate_to, rate) up = rate_to // g down = rate // g max_rate = max(up, down) f_c = 1. / max_rate half_len = 10 * max_rate window = signal.firwin(2 * half_len + 1, f_c, window=('kaiser', 5.0)) polyargs = {'window': window} else: polyargs = {} y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1, **polyargs) for y_to, y_resamp, freq in zip(y_tos, y_resamps, freqs): if freq >= 0.5 * rate_to: y_to.fill(0.) # mostly low-passed away assert_allclose(y_resamp, y_to, atol=1e-3) else: assert_array_equal(y_to.shape, y_resamp.shape) corr = np.corrcoef(y_to, y_resamp)[0, 1] assert_(corr > 0.99, msg=(corr, rate, rate_to)) # Random data rng = np.random.RandomState(0) x = hann(rate) * np.cumsum(rng.randn(rate)) # low-pass, wind for rate_to in rates_to: # random data t_to = np.arange(rate_to) / float(rate_to) y_to = np.interp(t_to, t, x) if method == 'fft': y_resamp = signal.resample(x, rate_to) else: y_resamp = signal.resample_poly(x, rate_to, rate) assert_array_equal(y_to.shape, y_resamp.shape) corr = np.corrcoef(y_to, y_resamp)[0, 1] assert_(corr > 0.99, msg=corr) # More tests of fft method (Master 0.18.1 fails these) if method == 'fft': x1 = np.array([1.+0.j,0.+0.j]) y1_test = signal.resample(x1,4) y1_true = np.array([1.+0.j,0.5+0.j,0.+0.j,0.5+0.j]) # upsampling a complex array assert_allclose(y1_test, y1_true, atol=1e-12) x2 = np.array([1.,0.5,0.,0.5]) y2_test = signal.resample(x2,2) # downsampling a real array y2_true = np.array([1.,0.]) assert_allclose(y2_test, y2_true, atol=1e-12) def test_poly_vs_filtfilt(self): # Check that up=1.0 gives same answer as filtfilt + slicing random_state = np.random.RandomState(17) try_types = (int, np.float32, np.complex64, float, complex) size = 10000 down_factors = [2, 11, 79] for dtype in try_types: x = random_state.randn(size).astype(dtype) if dtype in (np.complex64, np.complex128): x += 1j * random_state.randn(size) # resample_poly assumes zeros outside of signl, whereas filtfilt # can only constant-pad. Make them equivalent: x[0] = 0 x[-1] = 0 for down in down_factors: h = signal.firwin(31, 1. / down, window='hamming') yf = filtfilt(h, 1.0, x, padtype='constant')[::down] # Need to pass convolved version of filter to resample_poly, # since filtfilt does forward and backward, but resample_poly # only goes forward hc = convolve(h, h[::-1]) y = signal.resample_poly(x, 1, down, window=hc) assert_allclose(yf, y, atol=1e-7, rtol=1e-7) def test_correlate1d(self): for down in [2, 4]: for nx in range(1, 40, down): for nweights in (32, 33): x = np.random.random((nx,)) weights = np.random.random((nweights,)) y_g = correlate1d(x, weights[::-1], mode='constant') y_s = signal.resample_poly(x, up=1, down=down, window=weights) assert_allclose(y_g[::down], y_s) class TestCSpline1DEval(object): def test_basic(self): y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0]) x = arange(len(y)) dx = x[1] - x[0] cj = signal.cspline1d(y) x2 = arange(len(y) * 10.0) / 10.0 y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0]) # make sure interpolated values are on knot points assert_array_almost_equal(y2[::10], y, decimal=5) def test_complex(self): # create some smoothly varying complex signal to interpolate x = np.arange(2) y = np.zeros(x.shape, dtype=np.complex64) T = 10.0 f = 1.0 / T y = np.exp(2.0J * np.pi * f * x) # get the cspline transform cy = signal.cspline1d(y) # determine new test x value and interpolate xnew = np.array([0.5]) ynew = signal.cspline1d_eval(cy, xnew) assert_equal(ynew.dtype, y.dtype) class TestOrderFilt(object): def test_basic(self): assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1), [2, 3, 2]) class _TestLinearFilter(object): def generate(self, shape): x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) return self.convert_dtype(x) def convert_dtype(self, arr): if self.dtype == np.dtype('O'): arr = np.asarray(arr) out = np.empty(arr.shape, self.dtype) iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'], [['readonly'],['writeonly']]) for x, y in iter: y[...] = self.type(x[()]) return out else: return np.array(arr, self.dtype, copy=False) def test_rank_1_IIR(self): x = self.generate((6,)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, -0.5]) y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) assert_array_almost_equal(lfilter(b, a, x), y_r) def test_rank_1_FIR(self): x = self.generate((6,)) b = self.convert_dtype([1, 1]) a = self.convert_dtype([1]) y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.]) assert_array_almost_equal(lfilter(b, a, x), y_r) def test_rank_1_IIR_init_cond(self): x = self.generate((6,)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([0.5, -0.5]) zi = self.convert_dtype([1, 2]) y_r = self.convert_dtype([1, 5, 9, 13, 17, 21]) zf_r = self.convert_dtype([13, -10]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_1_FIR_init_cond(self): x = self.generate((6,)) b = self.convert_dtype([1, 1, 1]) a = self.convert_dtype([1]) zi = self.convert_dtype([1, 1]) y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.]) zf_r = self.convert_dtype([9, 5]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_2_IIR_axis_0(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]]) y = lfilter(b, a, x, axis=0) assert_array_almost_equal(y_r2_a0, y) def test_rank_2_IIR_axis_1(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12], [18, -16, 18]]) y = lfilter(b, a, x, axis=1) assert_array_almost_equal(y_r2_a1, y) def test_rank_2_IIR_axis_0_init_cond(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) zi = self.convert_dtype(np.ones((4,1))) y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13], [19, -17, 19]]) zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis] y, zf = lfilter(b, a, x, axis=1, zi=zi) assert_array_almost_equal(y_r2_a0_1, y) assert_array_almost_equal(zf, zf_r) def test_rank_2_IIR_axis_1_init_cond(self): x = self.generate((4,3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) zi = self.convert_dtype(np.ones((1,3))) y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1], [1, 3, 5], [5, 3, 1]]) zf_r = self.convert_dtype([[-23, -23, -23]]) y, zf = lfilter(b, a, x, axis=0, zi=zi) assert_array_almost_equal(y_r2_a0_0, y) assert_array_almost_equal(zf, zf_r) def test_rank_3_IIR(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) for axis in range(x.ndim): y = lfilter(b, a, x, axis) y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) assert_array_almost_equal(y, y_r) def test_rank_3_IIR_init_cond(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) for axis in range(x.ndim): zi_shape = list(x.shape) zi_shape[axis] = 1 zi = self.convert_dtype(np.ones(zi_shape)) zi1 = self.convert_dtype([1]) y, zf = lfilter(b, a, x, axis, zi) lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0] lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1] y_r = np.apply_along_axis(lf0, axis, x) zf_r = np.apply_along_axis(lf1, axis, x) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_3_FIR(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) for axis in range(x.ndim): y = lfilter(b, a, x, axis) y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) assert_array_almost_equal(y, y_r) def test_rank_3_FIR_init_cond(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) for axis in range(x.ndim): zi_shape = list(x.shape) zi_shape[axis] = 2 zi = self.convert_dtype(np.ones(zi_shape)) zi1 = self.convert_dtype([1, 1]) y, zf = lfilter(b, a, x, axis, zi) lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0] lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1] y_r = np.apply_along_axis(lf0, axis, x) zf_r = np.apply_along_axis(lf1, axis, x) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_zi_pseudobroadcast(self): x = self.generate((4, 5, 20)) b,a = signal.butter(8, 0.2, output='ba') b = self.convert_dtype(b) a = self.convert_dtype(a) zi_size = b.shape[0] - 1 # lfilter requires x.ndim == zi.ndim exactly. However, zi can have # length 1 dimensions. zi_full = self.convert_dtype(np.ones((4, 5, zi_size))) zi_sing = self.convert_dtype(np.ones((1, 1, zi_size))) y_full, zf_full = lfilter(b, a, x, zi=zi_full) y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing) assert_array_almost_equal(y_sing, y_full) assert_array_almost_equal(zf_full, zf_sing) # lfilter does not prepend ones assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size)) def test_scalar_a(self): # a can be a scalar. x = self.generate(6) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) y_r = self.convert_dtype([0, 1, 2, 2, 2, 2]) y = lfilter(b, a[0], x) assert_array_almost_equal(y, y_r) def test_zi_some_singleton_dims(self): # lfilter doesn't really broadcast (no prepending of 1's). But does # do singleton expansion if x and zi have the same ndim. This was # broken only if a subset of the axes were singletons (gh-4681). x = self.convert_dtype(np.zeros((3,2,5), 'l')) b = self.convert_dtype(np.ones(5, 'l')) a = self.convert_dtype(np.array([1,0,0])) zi = np.ones((3,1,4), 'l') zi[1,:,:] *= 2 zi[2,:,:] *= 3 zi = self.convert_dtype(zi) zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l')) y_expected = np.zeros((3,2,5), 'l') y_expected[:,:,:4] = [[[1]], [[2]], [[3]]] y_expected = self.convert_dtype(y_expected) # IIR y_iir, zf_iir = lfilter(b, a, x, -1, zi) assert_array_almost_equal(y_iir, y_expected) assert_array_almost_equal(zf_iir, zf_expected) # FIR y_fir, zf_fir = lfilter(b, a[0], x, -1, zi) assert_array_almost_equal(y_fir, y_expected) assert_array_almost_equal(zf_fir, zf_expected) def base_bad_size_zi(self, b, a, x, axis, zi): b = self.convert_dtype(b) a = self.convert_dtype(a) x = self.convert_dtype(x) zi = self.convert_dtype(zi) assert_raises(ValueError, lfilter, b, a, x, axis, zi) def test_bad_size_zi(self): # rank 1 x1 = np.arange(6) self.base_bad_size_zi([1], [1], x1, -1, [1]) self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1]) self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]]) self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]]) self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1]) self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]]) self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3]) # rank 2 x2 = np.arange(12).reshape((4,3)) # for axis=0 zi.shape should == (max(len(a),len(b))-1, 3) self.base_bad_size_zi([1], [1], x2, 0, [0]) # for each of these there are 5 cases tested (in this order): # 1. not deep enough, right # elements # 2. too deep, right # elements # 3. right depth, right # elements, transposed # 4. right depth, too few elements # 5. right depth, too many elements self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) # for axis=1 zi.shape should == (4, max(len(a),len(b))-1) self.base_bad_size_zi([1], [1], x2, 1, [0]) self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) def test_empty_zi(self): # Regression test for #880: empty array for zi crashes. x = self.generate((5,)) a = self.convert_dtype([1]) b = self.convert_dtype([1]) zi = self.convert_dtype([]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, x) assert_equal(zf.dtype, self.dtype) assert_equal(zf.size, 0) def test_lfiltic_bad_zi(self): # Regression test for #3699: bad initial conditions a = self.convert_dtype([1]) b = self.convert_dtype([1]) # "y" sets the datatype of zi, so it truncates if int zi = lfiltic(b, a, [1., 0]) zi_1 = lfiltic(b, a, [1, 0]) zi_2 = lfiltic(b, a, [True, False]) assert_array_equal(zi, zi_1) assert_array_equal(zi, zi_2) def test_short_x_FIR(self): # regression test for #5116 # x shorter than b, with non None zi fails a = self.convert_dtype([1]) b = self.convert_dtype([1, 0, -1]) zi = self.convert_dtype([2, 7]) x = self.convert_dtype([72]) ye = self.convert_dtype([74]) zfe = self.convert_dtype([7, -72]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, ye) assert_array_almost_equal(zf, zfe) def test_short_x_IIR(self): # regression test for #5116 # x shorter than b, with non None zi fails a = self.convert_dtype([1, 1]) b = self.convert_dtype([1, 0, -1]) zi = self.convert_dtype([2, 7]) x = self.convert_dtype([72]) ye = self.convert_dtype([74]) zfe = self.convert_dtype([-67, -72]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, ye) assert_array_almost_equal(zf, zfe) def test_do_not_modify_a_b_IIR(self): x = self.generate((6,)) b = self.convert_dtype([1, -1]) b0 = b.copy() a = self.convert_dtype([0.5, -0.5]) a0 = a.copy() y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) y_f = lfilter(b, a, x) assert_array_almost_equal(y_f, y_r) assert_equal(b, b0) assert_equal(a, a0) def test_do_not_modify_a_b_FIR(self): x = self.generate((6,)) b = self.convert_dtype([1, 0, 1]) b0 = b.copy() a = self.convert_dtype([2]) a0 = a.copy() y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.]) y_f = lfilter(b, a, x) assert_array_almost_equal(y_f, y_r) assert_equal(b, b0) assert_equal(a, a0) class TestLinearFilterFloat32(_TestLinearFilter): dtype = np.dtype('f') class TestLinearFilterFloat64(_TestLinearFilter): dtype = np.dtype('d') class TestLinearFilterFloatExtended(_TestLinearFilter): dtype = np.dtype('g') class TestLinearFilterComplex64(_TestLinearFilter): dtype = np.dtype('F') class TestLinearFilterComplex128(_TestLinearFilter): dtype = np.dtype('D') class TestLinearFilterComplexExtended(_TestLinearFilter): dtype = np.dtype('G') class TestLinearFilterDecimal(_TestLinearFilter): dtype = np.dtype('O') def type(self, x): return Decimal(str(x)) class TestLinearFilterObject(_TestLinearFilter): dtype = np.dtype('O') type = float def test_lfilter_bad_object(): # lfilter: object arrays with non-numeric objects raise TypeError. # Regression test for ticket #1452. assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0]) assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0]) assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0]) with assert_raises(ValueError, match='common type'): lfilter([1.], [1., 1.], ['a', 'b', 'c']) def test_lfilter_notimplemented_input(): # Should not crash, gh-7991 assert_raises(NotImplementedError, lfilter, [2,3], [4,5], [1,2,3,4,5]) @pytest.mark.parametrize('dt', [np.ubyte, np.byte, np.ushort, np.short, np.uint, int, np.ulonglong, np.ulonglong, np.float32, np.float64, np.longdouble, Decimal]) class TestCorrelateReal(object): def _setup_rank1(self, dt): a = np.linspace(0, 3, 4).astype(dt) b = np.linspace(1, 2, 2).astype(dt) y_r = np.array([0, 2, 5, 8, 3]).astype(dt) return a, b, y_r def equal_tolerance(self, res_dt): # default value of keyword decimal = 6 try: dt_info = np.finfo(res_dt) if hasattr(dt_info, 'resolution'): decimal = int(-0.5*np.log10(dt_info.resolution)) except Exception: pass return decimal def equal_tolerance_fft(self, res_dt): # FFT implementations convert longdouble arguments down to # double so don't expect better precision, see gh-9520 if res_dt == np.longdouble: return self.equal_tolerance(np.double) else: return self.equal_tolerance(res_dt) def test_method(self, dt): if dt == Decimal: method = choose_conv_method([Decimal(4)], [Decimal(3)]) assert_equal(method, 'direct') else: a, b, y_r = self._setup_rank3(dt) y_fft = correlate(a, b, method='fft') y_direct = correlate(a, b, method='direct') assert_array_almost_equal(y_r, y_fft, decimal=self.equal_tolerance_fft(y_fft.dtype)) assert_array_almost_equal(y_r, y_direct, decimal=self.equal_tolerance(y_direct.dtype)) assert_equal(y_fft.dtype, dt) assert_equal(y_direct.dtype, dt) def test_rank1_valid(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'valid') assert_array_almost_equal(y, y_r[1:4]) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, 'valid') assert_array_almost_equal(y, y_r[1:4][::-1]) assert_equal(y.dtype, dt) def test_rank1_same(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'same') assert_array_almost_equal(y, y_r[:-1]) assert_equal(y.dtype, dt) def test_rank1_full(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r) assert_equal(y.dtype, dt) def _setup_rank3(self, dt): a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype( dt) b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype( dt) y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.], [46., 432., 1062., 1840., 2672., 1698., 864., 266.], [134., 736., 1662., 2768., 3920., 2418., 1168., 314.], [260., 952., 1932., 3056., 4208., 2580., 1240., 332.], [202., 664., 1290., 1984., 2688., 1590., 712., 150.], [114., 344., 642., 960., 1280., 726., 296., 38.]], [[23., 400., 1035., 1832., 2696., 1737., 904., 293.], [134., 920., 2166., 3680., 5280., 3306., 1640., 474.], [325., 1544., 3369., 5512., 7720., 4683., 2192., 535.], [571., 1964., 3891., 6064., 8272., 4989., 2324., 565.], [434., 1360., 2586., 3920., 5264., 3054., 1312., 230.], [241., 700., 1281., 1888., 2496., 1383., 532., 39.]], [[22., 214., 528., 916., 1332., 846., 430., 132.], [86., 484., 1098., 1832., 2600., 1602., 772., 206.], [188., 802., 1698., 2732., 3788., 2256., 1018., 218.], [308., 1006., 1950., 2996., 4052., 2400., 1078., 230.], [230., 692., 1290., 1928., 2568., 1458., 596., 78.], [126., 354., 636., 924., 1212., 654., 234., 0.]]], dtype=dt) return a, b, y_r def test_rank3_valid(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b, "valid") assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5]) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, "valid") assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1]) assert_equal(y.dtype, dt) def test_rank3_same(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b, "same") assert_array_almost_equal(y, y_r[0:-1, 1:-1, 1:-2]) assert_equal(y.dtype, dt) def test_rank3_all(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b) assert_array_almost_equal(y, y_r) assert_equal(y.dtype, dt) class TestCorrelate(object): # Tests that don't depend on dtype def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'}) def test_invalid_params(self): a = [3, 4, 5] b = [1, 2, 3] assert_raises(ValueError, correlate, a, b, mode='spam') assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft') assert_raises(ValueError, correlate, a, b, mode='ham', method='direct') assert_raises(ValueError, correlate, a, b, mode='full', method='bacon') assert_raises(ValueError, correlate, a, b, mode='same', method='bacon') def test_mismatched_dims(self): # Input arrays should have the same number of dimensions assert_raises(ValueError, correlate, [1], 2, method='direct') assert_raises(ValueError, correlate, 1, [2], method='direct') assert_raises(ValueError, correlate, [1], 2, method='fft') assert_raises(ValueError, correlate, 1, [2], method='fft') assert_raises(ValueError, correlate, [1], [[2]]) assert_raises(ValueError, correlate, [3], 2) def test_numpy_fastpath(self): a = [1, 2, 3] b = [4, 5] assert_allclose(correlate(a, b, mode='same'), [5, 14, 23]) a = [1, 2, 3] b = [4, 5, 6] assert_allclose(correlate(a, b, mode='same'), [17, 32, 23]) assert_allclose(correlate(a, b, mode='full'), [6, 17, 32, 23, 12]) assert_allclose(correlate(a, b, mode='valid'), [32]) @pytest.mark.parametrize('dt', [np.csingle, np.cdouble, np.clongdouble]) class TestCorrelateComplex(object): # The decimal precision to be used for comparing results. # This value will be passed as the 'decimal' keyword argument of # assert_array_almost_equal(). # Since correlate may chose to use FFT method which converts # longdoubles to doubles internally don't expect better precision # for longdouble than for double (see gh-9520). def decimal(self, dt): if dt == np.clongdouble: dt = np.cdouble return int(2 * np.finfo(dt).precision / 3) def _setup_rank1(self, dt, mode): np.random.seed(9) a = np.random.randn(10).astype(dt) a += 1j * np.random.randn(10).astype(dt) b = np.random.randn(8).astype(dt) b += 1j * np.random.randn(8).astype(dt) y_r = (correlate(a.real, b.real, mode=mode) + correlate(a.imag, b.imag, mode=mode)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag, mode=mode) + correlate(a.imag, b.real, mode=mode)) return a, b, y_r def test_rank1_valid(self, dt): a, b, y_r = self._setup_rank1(dt, 'valid') y = correlate(a, b, 'valid') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, 'valid') assert_array_almost_equal(y, y_r[::-1].conj(), decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_rank1_same(self, dt): a, b, y_r = self._setup_rank1(dt, 'same') y = correlate(a, b, 'same') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_rank1_full(self, dt): a, b, y_r = self._setup_rank1(dt, 'full') y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_swap_full(self, dt): d = np.array([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt) k = np.array([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt) y = correlate(d, k) assert_equal(y, [0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j]) def test_swap_same(self, dt): d = [0.+0.j, 1.+1.j, 2.+2.j] k = [1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j] y = correlate(d, k, mode="same") assert_equal(y, [10.-2.j, 28.-6.j, 22.-6.j]) def test_rank3(self, dt): a = np.random.randn(10, 8, 6).astype(dt) a += 1j * np.random.randn(10, 8, 6).astype(dt) b = np.random.randn(8, 6, 4).astype(dt) b += 1j * np.random.randn(8, 6, 4).astype(dt) y_r = (correlate(a.real, b.real) + correlate(a.imag, b.imag)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) assert_equal(y.dtype, dt) def test_rank0(self, dt): a = np.array(np.random.randn()).astype(dt) a += 1j * np.array(np.random.randn()).astype(dt) b = np.array(np.random.randn()).astype(dt) b += 1j * np.array(np.random.randn()).astype(dt) y_r = (correlate(a.real, b.real) + correlate(a.imag, b.imag)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) assert_equal(y.dtype, dt) assert_equal(correlate([1], [2j]), correlate(1, 2j)) assert_equal(correlate([2j], [3j]), correlate(2j, 3j)) assert_equal(correlate([3j], [4]), correlate(3j, 4)) class TestCorrelate2d(object): def test_consistency_correlate_funcs(self): # Compare np.correlate, signal.correlate, signal.correlate2d a = np.arange(5) b = np.array([3.2, 1.4, 3]) for mode in ['full', 'valid', 'same']: assert_almost_equal(np.correlate(a, b, mode=mode), signal.correlate(a, b, mode=mode)) assert_almost_equal(np.squeeze(signal.correlate2d([a], [b], mode=mode)), signal.correlate(a, b, mode=mode)) # See gh-5897 if mode == 'valid': assert_almost_equal(np.correlate(b, a, mode=mode), signal.correlate(b, a, mode=mode)) assert_almost_equal(np.squeeze(signal.correlate2d([b], [a], mode=mode)), signal.correlate(b, a, mode=mode)) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'}) def test_complex_input(self): assert_equal(signal.correlate2d([[1]], [[2j]]), -2j) assert_equal(signal.correlate2d([[2j]], [[3j]]), 6) assert_equal(signal.correlate2d([[3j]], [[4]]), 12j) class TestLFilterZI(object): def test_basic(self): a = np.array([1.0, -1.0, 0.5]) b = np.array([1.0, 0.0, 2.0]) zi_expected = np.array([5.0, -1.0]) zi = lfilter_zi(b, a) assert_array_almost_equal(zi, zi_expected) def test_scale_invariance(self): # Regression test. There was a bug in which b was not correctly # rescaled when a[0] was nonzero. b = np.array([2, 8, 5]) a = np.array([1, 1, 8]) zi1 = lfilter_zi(b, a) zi2 = lfilter_zi(2*b, 2*a) assert_allclose(zi2, zi1, rtol=1e-12) class TestFiltFilt(object): filtfilt_kind = 'tf' def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None, method='pad', irlen=None): if self.filtfilt_kind == 'tf': b, a = zpk2tf(*zpk) return filtfilt(b, a, x, axis, padtype, padlen, method, irlen) elif self.filtfilt_kind == 'sos': sos = zpk2sos(*zpk) return sosfiltfilt(sos, x, axis, padtype, padlen) def test_basic(self): zpk = tf2zpk([1, 2, 3], [1, 2, 3]) out = self.filtfilt(zpk, np.arange(12)) assert_allclose(out, arange(12), atol=1e-11) def test_sine(self): rate = 2000 t = np.linspace(0, 1.0, rate + 1) # A signal with low frequency and a high frequency. xlow = np.sin(5 * 2 * np.pi * t) xhigh = np.sin(250 * 2 * np.pi * t) x = xlow + xhigh zpk = butter(8, 0.125, output='zpk') # r is the magnitude of the largest pole. r = np.abs(zpk[1]).max() eps = 1e-5 # n estimates the number of steps for the # transient to decay by a factor of eps. n = int(np.ceil(np.log(eps) / np.log(r))) # High order lowpass filter... y = self.filtfilt(zpk, x, padlen=n) # Result should be just xlow. err = np.abs(y - xlow).max() assert_(err < 1e-4) # A 2D case. x2d = np.vstack([xlow, xlow + xhigh]) y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1) assert_equal(y2d.shape, x2d.shape) err = np.abs(y2d - xlow).max() assert_(err < 1e-4) # Use the previous result to check the use of the axis keyword. # (Regression test for ticket #1620) y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0) assert_equal(y2d, y2dt.T) def test_axis(self): # Test the 'axis' keyword on a 3D array. x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12) zpk = butter(3, 0.125, output='zpk') y0 = self.filtfilt(zpk, x, padlen=0, axis=0) y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1) assert_array_equal(y0, np.swapaxes(y1, 0, 1)) y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2) assert_array_equal(y0, np.swapaxes(y2, 0, 2)) def test_acoeff(self): if self.filtfilt_kind != 'tf': return # only necessary for TF # test for 'a' coefficient as single number out = signal.filtfilt([.5, .5], 1, np.arange(10)) assert_allclose(out, np.arange(10), rtol=1e-14, atol=1e-14) def test_gust_simple(self): if self.filtfilt_kind != 'tf': pytest.skip('gust only implemented for TF systems') # The input array has length 2. The exact solution for this case # was computed "by hand". x = np.array([1.0, 2.0]) b = np.array([0.5]) a = np.array([1.0, -0.5]) y, z1, z2 = _filtfilt_gust(b, a, x) assert_allclose([z1[0], z2[0]], [0.3*x[0] + 0.2*x[1], 0.2*x[0] + 0.3*x[1]]) assert_allclose(y, [z1[0] + 0.25*z2[0] + 0.25*x[0] + 0.125*x[1], 0.25*z1[0] + z2[0] + 0.125*x[0] + 0.25*x[1]]) def test_gust_scalars(self): if self.filtfilt_kind != 'tf': pytest.skip('gust only implemented for TF systems') # The filter coefficients are both scalars, so the filter simply # multiplies its input by b/a. When it is used in filtfilt, the # factor is (b/a)**2. x = np.arange(12) b = 3.0 a = 2.0 y = filtfilt(b, a, x, method="gust") expected = (b/a)**2 * x assert_allclose(y, expected) class TestSOSFiltFilt(TestFiltFilt): filtfilt_kind = 'sos' def test_equivalence(self): """Test equivalence between sosfiltfilt and filtfilt""" x = np.random.RandomState(0).randn(1000) for order in range(1, 6): zpk = signal.butter(order, 0.35, output='zpk') b, a = zpk2tf(*zpk) sos = zpk2sos(*zpk) y = filtfilt(b, a, x) y_sos = sosfiltfilt(sos, x) assert_allclose(y, y_sos, atol=1e-12, err_msg='order=%s' % order) def filtfilt_gust_opt(b, a, x): """ An alternative implementation of filtfilt with Gustafsson edges. This function computes the same result as `scipy.signal.signaltools._filtfilt_gust`, but only 1-d arrays are accepted. The problem is solved using `fmin` from `scipy.optimize`. `_filtfilt_gust` is significanly faster than this implementation. """ def filtfilt_gust_opt_func(ics, b, a, x): """Objective function used in filtfilt_gust_opt.""" m = max(len(a), len(b)) - 1 z0f = ics[:m] z0b = ics[m:] y_f = lfilter(b, a, x, zi=z0f)[0] y_fb = lfilter(b, a, y_f[::-1], zi=z0b)[0][::-1] y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] y_bf = lfilter(b, a, y_b, zi=z0f)[0] value = np.sum((y_fb - y_bf)**2) return value m = max(len(a), len(b)) - 1 zi = lfilter_zi(b, a) ics = np.concatenate((x[:m].mean()*zi, x[-m:].mean()*zi)) result = fmin(filtfilt_gust_opt_func, ics, args=(b, a, x), xtol=1e-10, ftol=1e-12, maxfun=10000, maxiter=10000, full_output=True, disp=False) opt, fopt, niter, funcalls, warnflag = result if warnflag > 0: raise RuntimeError("minimization failed in filtfilt_gust_opt: " "warnflag=%d" % warnflag) z0f = opt[:m] z0b = opt[m:] # Apply the forward-backward filter using the computed initial # conditions. y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] y = lfilter(b, a, y_b, zi=z0f)[0] return y, z0f, z0b def check_filtfilt_gust(b, a, shape, axis, irlen=None): # Generate x, the data to be filtered. np.random.seed(123) x = np.random.randn(*shape) # Apply filtfilt to x. This is the main calculation to be checked. y = filtfilt(b, a, x, axis=axis, method="gust", irlen=irlen) # Also call the private function so we can test the ICs. yg, zg1, zg2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen) # filtfilt_gust_opt is an independent implementation that gives the # expected result, but it only handles 1-d arrays, so use some looping # and reshaping shenanigans to create the expected output arrays. xx = np.swapaxes(x, axis, -1) out_shape = xx.shape[:-1] yo = np.empty_like(xx) m = max(len(a), len(b)) - 1 zo1 = np.empty(out_shape + (m,)) zo2 = np.empty(out_shape + (m,)) for indx in product(*[range(d) for d in out_shape]): yo[indx], zo1[indx], zo2[indx] = filtfilt_gust_opt(b, a, xx[indx]) yo = np.swapaxes(yo, -1, axis) zo1 = np.swapaxes(zo1, -1, axis) zo2 = np.swapaxes(zo2, -1, axis) assert_allclose(y, yo, rtol=1e-9, atol=1e-10) assert_allclose(yg, yo, rtol=1e-9, atol=1e-10) assert_allclose(zg1, zo1, rtol=1e-9, atol=1e-10) assert_allclose(zg2, zo2, rtol=1e-9, atol=1e-10) def test_choose_conv_method(): for mode in ['valid', 'same', 'full']: for ndims in [1, 2]: n, k, true_method = 8, 6, 'direct' x = np.random.randn(*((n,) * ndims)) h = np.random.randn(*((k,) * ndims)) method = choose_conv_method(x, h, mode=mode) assert_equal(method, true_method) method_try, times = choose_conv_method(x, h, mode=mode, measure=True) assert_(method_try in {'fft', 'direct'}) assert_(type(times) is dict) assert_('fft' in times.keys() and 'direct' in times.keys()) n = 10 for not_fft_conv_supp in ["complex256", "complex192"]: if hasattr(np, not_fft_conv_supp): x = np.ones(n, dtype=not_fft_conv_supp) h = x.copy() assert_equal(choose_conv_method(x, h, mode=mode), 'direct') x = np.array([2**51], dtype=np.int64) h = x.copy() assert_equal(choose_conv_method(x, h, mode=mode), 'direct') x = [Decimal(3), Decimal(2)] h = [Decimal(1), Decimal(4)] assert_equal(choose_conv_method(x, h, mode=mode), 'direct') def test_filtfilt_gust(): # Design a filter. z, p, k = signal.ellip(3, 0.01, 120, 0.0875, output='zpk') # Find the approximate impulse response length of the filter. eps = 1e-10 r = np.max(np.abs(p)) approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r))) np.random.seed(123) b, a = zpk2tf(z, p, k) for irlen in [None, approx_impulse_len]: signal_len = 5 * approx_impulse_len # 1-d test case check_filtfilt_gust(b, a, (signal_len,), 0, irlen) # 3-d test case; test each axis. for axis in range(3): shape = [2, 2, 2] shape[axis] = signal_len check_filtfilt_gust(b, a, shape, axis, irlen) # Test case with length less than 2*approx_impulse_len. # In this case, `filtfilt_gust` should behave the same as if # `irlen=None` was given. length = 2*approx_impulse_len - 50 check_filtfilt_gust(b, a, (length,), 0, approx_impulse_len) class TestDecimate(object): def test_bad_args(self): x = np.arange(12) assert_raises(TypeError, signal.decimate, x, q=0.5, n=1) assert_raises(TypeError, signal.decimate, x, q=2, n=0.5) def test_basic_IIR(self): x = np.arange(12) y = signal.decimate(x, 2, n=1, ftype='iir', zero_phase=False).round() assert_array_equal(y, x[::2]) def test_basic_FIR(self): x = np.arange(12) y = signal.decimate(x, 2, n=1, ftype='fir', zero_phase=False).round() assert_array_equal(y, x[::2]) def test_shape(self): # Regression test for ticket #1480. z = np.zeros((30, 30)) d0 = signal.decimate(z, 2, axis=0, zero_phase=False) assert_equal(d0.shape, (15, 30)) d1 = signal.decimate(z, 2, axis=1, zero_phase=False) assert_equal(d1.shape, (30, 15)) def test_phaseshift_FIR(self): with suppress_warnings() as sup: sup.filter(BadCoefficients, "Badly conditioned filter") self._test_phaseshift(method='fir', zero_phase=False) def test_zero_phase_FIR(self): with suppress_warnings() as sup: sup.filter(BadCoefficients, "Badly conditioned filter") self._test_phaseshift(method='fir', zero_phase=True) def test_phaseshift_IIR(self): self._test_phaseshift(method='iir', zero_phase=False) def test_zero_phase_IIR(self): self._test_phaseshift(method='iir', zero_phase=True) def _test_phaseshift(self, method, zero_phase): rate = 120 rates_to = [15, 20, 30, 40] # q = 8, 6, 4, 3 t_tot = int(100) # Need to let antialiasing filters settle t = np.arange(rate*t_tot+1) / float(rate) # Sinusoids at 0.8*nyquist, windowed to avoid edge artifacts freqs = np.array(rates_to) * 0.8 / 2 d = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t) * signal.windows.tukey(t.size, 0.1)) for rate_to in rates_to: q = rate // rate_to t_to = np.arange(rate_to*t_tot+1) / float(rate_to) d_tos = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t_to) * signal.windows.tukey(t_to.size, 0.1)) # Set up downsampling filters, match v0.17 defaults if method == 'fir': n = 30 system = signal.dlti(signal.firwin(n + 1, 1. / q, window='hamming'), 1.) elif method == 'iir': n = 8 wc = 0.8*np.pi/q system = signal.dlti(*signal.cheby1(n, 0.05, wc/np.pi)) # Calculate expected phase response, as unit complex vector if zero_phase is False: _, h_resps = signal.freqz(system.num, system.den, freqs/rate*2*np.pi) h_resps /= np.abs(h_resps) else: h_resps = np.ones_like(freqs) y_resamps = signal.decimate(d.real, q, n, ftype=system, zero_phase=zero_phase) # Get phase from complex inner product, like CSD h_resamps = np.sum(d_tos.conj() * y_resamps, axis=-1) h_resamps /= np.abs(h_resamps) subnyq = freqs < 0.5*rate_to # Complex vectors should be aligned, only compare below nyquist assert_allclose(np.angle(h_resps.conj()*h_resamps)[subnyq], 0, atol=1e-3, rtol=1e-3) def test_auto_n(self): # Test that our value of n is a reasonable choice (depends on # the downsampling factor) sfreq = 100. n = 1000 t = np.arange(n) / sfreq # will alias for decimations (>= 15) x = np.sqrt(2. / n) * np.sin(2 * np.pi * (sfreq / 30.) * t) assert_allclose(np.linalg.norm(x), 1., rtol=1e-3) x_out = signal.decimate(x, 30, ftype='fir') assert_array_less(np.linalg.norm(x_out), 0.01) class TestHilbert(object): def test_bad_args(self): x = np.array([1.0 + 0.0j]) assert_raises(ValueError, hilbert, x) x = np.arange(8.0) assert_raises(ValueError, hilbert, x, N=0) def test_hilbert_theoretical(self): # test cases by Ariel Rokem decimal = 14 pi = np.pi t = np.arange(0, 2 * pi, pi / 256) a0 = np.sin(t) a1 = np.cos(t) a2 = np.sin(2 * t) a3 = np.cos(2 * t) a = np.vstack([a0, a1, a2, a3]) h = hilbert(a) h_abs = np.abs(h) h_angle = np.angle(h) h_real = np.real(h) # The real part should be equal to the original signals: assert_almost_equal(h_real, a, decimal) # The absolute value should be one everywhere, for this input: assert_almost_equal(h_abs, np.ones(a.shape), decimal) # For the 'slow' sine - the phase should go from -pi/2 to pi/2 in # the first 256 bins: assert_almost_equal(h_angle[0, :256], np.arange(-pi / 2, pi / 2, pi / 256), decimal) # For the 'slow' cosine - the phase should go from 0 to pi in the # same interval: assert_almost_equal( h_angle[1, :256], np.arange(0, pi, pi / 256), decimal) # The 'fast' sine should make this phase transition in half the time: assert_almost_equal(h_angle[2, :128], np.arange(-pi / 2, pi / 2, pi / 128), decimal) # Ditto for the 'fast' cosine: assert_almost_equal( h_angle[3, :128], np.arange(0, pi, pi / 128), decimal) # The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia assert_almost_equal(h[1].imag, a0, decimal) def test_hilbert_axisN(self): # tests for axis and N arguments a = np.arange(18).reshape(3, 6) # test axis aa = hilbert(a, axis=-1) assert_equal(hilbert(a.T, axis=0), aa.T) # test 1d assert_almost_equal(hilbert(a[0]), aa[0], 14) # test N aan = hilbert(a, N=20, axis=-1) assert_equal(aan.shape, [3, 20]) assert_equal(hilbert(a.T, N=20, axis=0).shape, [20, 3]) # the next test is just a regression test, # no idea whether numbers make sense a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j, 1.000000000000000e+00 - 2.047794505137069j, 1.999999999999999e+00 - 2.244055555687583j, 3.000000000000000e+00 - 1.262750302935009j, 4.000000000000000e+00 - 1.066489252384493j, 5.000000000000000e+00 + 2.918022706971047j, 8.881784197001253e-17 + 3.845658908989067j, -9.444121133484362e-17 + 0.985044202202061j, -1.776356839400251e-16 + 1.332257797702019j, -3.996802888650564e-16 + 0.501905089898885j, 1.332267629550188e-16 + 0.668696078880782j, -1.192678053963799e-16 + 0.235487067862679j, -1.776356839400251e-16 + 0.286439612812121j, 3.108624468950438e-16 + 0.031676888064907j, 1.332267629550188e-16 - 0.019275656884536j, -2.360035624836702e-16 - 0.1652588660287j, 0.000000000000000e+00 - 0.332049855010597j, 3.552713678800501e-16 - 0.403810179797771j, 8.881784197001253e-17 - 0.751023775297729j, 9.444121133484362e-17 - 0.79252210110103j]) assert_almost_equal(aan[0], a0hilb, 14, 'N regression') class TestHilbert2(object): def test_bad_args(self): # x must be real. x = np.array([[1.0 + 0.0j]]) assert_raises(ValueError, hilbert2, x) # x must be rank 2. x = np.arange(24).reshape(2, 3, 4) assert_raises(ValueError, hilbert2, x) # Bad value for N. x = np.arange(16).reshape(4, 4) assert_raises(ValueError, hilbert2, x, N=0) assert_raises(ValueError, hilbert2, x, N=(2, 0)) assert_raises(ValueError, hilbert2, x, N=(2,)) class TestPartialFractionExpansion(object): def test_invresz_one_coefficient_bug(self): # Regression test for issue in gh-4646. r = [1] p = [2] k = [0] a_expected = [1.0, 0.0] b_expected = [1.0, -2.0] a_observed, b_observed = invresz(r, p, k) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) def test_invres_distinct_roots(self): # This test was inspired by github issue 2496. r = [3 / 10, -1 / 6, -2 / 15] p = [0, -2, -5] k = [] a_expected = [1, 3] b_expected = [1, 7, 10, 0] a_observed, b_observed = invres(r, p, k) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum') # With the default tolerance, the rtype does not matter # for this example. for rtype in rtypes: a_observed, b_observed = invres(r, p, k, rtype=rtype) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) # With unrealistically large tolerances, repeated roots may be inferred # and the rtype comes into play. ridiculous_tolerance = 1e10 for rtype in rtypes: a, b = invres(r, p, k, tol=ridiculous_tolerance, rtype=rtype) def test_invres_repeated_roots(self): r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] p = [0, -2, -2, -5] k = [] a_expected = [1, 3] b_expected = [1, 9, 24, 20, 0] rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum') for rtype in rtypes: a_observed, b_observed = invres(r, p, k, rtype=rtype) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) def test_invres_bad_rtype(self): r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] p = [0, -2, -2, -5] k = [] assert_raises(ValueError, invres, r, p, k, rtype='median') class TestVectorstrength(object): def test_single_1dperiod(self): events = np.array([.5]) period = 5. targ_strength = 1. targ_phase = .1 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_single_2dperiod(self): events = np.array([.5]) period = [1, 2, 5.] targ_strength = [1.] * 3 targ_phase = np.array([.5, .25, .1]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_array_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_equal_1dperiod(self): events = np.array([.25, .25, .25, .25, .25, .25]) period = 2 targ_strength = 1. targ_phase = .125 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_equal_2dperiod(self): events = np.array([.25, .25, .25, .25, .25, .25]) period = [1, 2, ] targ_strength = [1.] * 2 targ_phase = np.array([.25, .125]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_spaced_1dperiod(self): events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) period = 1 targ_strength = 1. targ_phase = .1 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_spaced_2dperiod(self): events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) period = [1, .5] targ_strength = [1.] * 2 targ_phase = np.array([.1, .2]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_partial_1dperiod(self): events = np.array([.25, .5, .75]) period = 1 targ_strength = 1. / 3. targ_phase = .5 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_partial_2dperiod(self): events = np.array([.25, .5, .75]) period = [1., 1., 1., 1.] targ_strength = [1. / 3.] * 4 targ_phase = np.array([.5, .5, .5, .5]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_opposite_1dperiod(self): events = np.array([0, .25, .5, .75]) period = 1. targ_strength = 0 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) def test_opposite_2dperiod(self): events = np.array([0, .25, .5, .75]) period = [1.] * 10 targ_strength = [0.] * 10 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) def test_2d_events_ValueError(self): events = np.array([[1, 2]]) period = 1. assert_raises(ValueError, vectorstrength, events, period) def test_2d_period_ValueError(self): events = 1. period = np.array([[1]]) assert_raises(ValueError, vectorstrength, events, period) def test_zero_period_ValueError(self): events = 1. period = 0 assert_raises(ValueError, vectorstrength, events, period) def test_negative_period_ValueError(self): events = 1. period = -1 assert_raises(ValueError, vectorstrength, events, period) class TestSOSFilt(object): # For sosfilt we only test a single datatype. Since sosfilt wraps # to lfilter under the hood, it's hopefully good enough to ensure # lfilter is extensively tested. dt = np.float64 # The test_rank* tests are pulled from _TestLinearFilter def test_rank1(self): x = np.linspace(0, 5, 6).astype(self.dt) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, -0.5]).astype(self.dt) # Test simple IIR y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(self.dt) assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) # Test simple FIR b = np.array([1, 1]).astype(self.dt) # NOTE: This was changed (rel. to TestLinear...) to add a pole @zero: a = np.array([1, 0]).astype(self.dt) y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt) assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) b = [1, 1, 0] a = [1, 0, 0] x = np.ones(8) sos = np.concatenate((b, a)) sos.shape = (1, 6) y = sosfilt(sos, x) assert_allclose(y, [1, 2, 2, 2, 2, 2, 2, 2]) def test_rank2(self): shape = (4, 3) x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) x = x.astype(self.dt) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, 0.5]).astype(self.dt) y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]], dtype=self.dt) y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12], [18, -16, 18]], dtype=self.dt) y = sosfilt(tf2sos(b, a), x, axis=0) assert_array_almost_equal(y_r2_a0, y) y = sosfilt(tf2sos(b, a), x, axis=1) assert_array_almost_equal(y_r2_a1, y) def test_rank3(self): shape = (4, 3, 2) x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, 0.5]).astype(self.dt) # Test last axis y = sosfilt(tf2sos(b, a), x) for i in range(x.shape[0]): for j in range(x.shape[1]): assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j])) def test_initial_conditions(self): b1, a1 = signal.butter(2, 0.25, 'low') b2, a2 = signal.butter(2, 0.75, 'low') b3, a3 = signal.butter(2, 0.75, 'low') b = np.convolve(np.convolve(b1, b2), b3) a = np.convolve(np.convolve(a1, a2), a3) sos = np.array((np.r_[b1, a1], np.r_[b2, a2], np.r_[b3, a3])) x = np.random.rand(50) # Stopping filtering and continuing y_true, zi = lfilter(b, a, x[:20], zi=np.zeros(6)) y_true = np.r_[y_true, lfilter(b, a, x[20:], zi=zi)[0]] assert_allclose(y_true, lfilter(b, a, x)) y_sos, zi = sosfilt(sos, x[:20], zi=np.zeros((3, 2))) y_sos = np.r_[y_sos, sosfilt(sos, x[20:], zi=zi)[0]] assert_allclose(y_true, y_sos) # Use a step function zi = sosfilt_zi(sos) x = np.ones(8) y, zf = sosfilt(sos, x, zi=zi) assert_allclose(y, np.ones(8)) assert_allclose(zf, zi) # Initial condition shape matching x.shape = (1, 1) + x.shape # 3D assert_raises(ValueError, sosfilt, sos, x, zi=zi) zi_nd = zi.copy() zi_nd.shape = (zi.shape[0], 1, 1, zi.shape[-1]) assert_raises(ValueError, sosfilt, sos, x, zi=zi_nd[:, :, :, [0, 1, 1]]) y, zf = sosfilt(sos, x, zi=zi_nd) assert_allclose(y[0, 0], np.ones(8)) assert_allclose(zf[:, 0, 0, :], zi) def test_initial_conditions_3d_axis1(self): # Test the use of zi when sosfilt is applied to axis 1 of a 3-d input. # Input array is x. x = np.random.RandomState(159).randint(0, 5, size=(2, 15, 3)) # Design a filter in ZPK format and convert to SOS zpk = signal.butter(6, 0.35, output='zpk') sos = zpk2sos(*zpk) nsections = sos.shape[0] # Filter along this axis. axis = 1 # Initial conditions, all zeros. shp = list(x.shape) shp[axis] = 2 shp = [nsections] + shp z0 = np.zeros(shp) # Apply the filter to x. yf, zf = sosfilt(sos, x, axis=axis, zi=z0) # Apply the filter to x in two stages. y1, z1 = sosfilt(sos, x[:, :5, :], axis=axis, zi=z0) y2, z2 = sosfilt(sos, x[:, 5:, :], axis=axis, zi=z1) # y should equal yf, and z2 should equal zf. y = np.concatenate((y1, y2), axis=axis) assert_allclose(y, yf, rtol=1e-10, atol=1e-13) assert_allclose(z2, zf, rtol=1e-10, atol=1e-13) # let's try the "step" initial condition zi = sosfilt_zi(sos) zi.shape = [nsections, 1, 2, 1] zi = zi * x[:, 0:1, :] y = sosfilt(sos, x, axis=axis, zi=zi)[0] # check it against the TF form b, a = zpk2tf(*zpk) zi = lfilter_zi(b, a) zi.shape = [1, zi.size, 1] zi = zi * x[:, 0:1, :] y_tf = lfilter(b, a, x, axis=axis, zi=zi)[0] assert_allclose(y, y_tf, rtol=1e-10, atol=1e-13) def test_bad_zi_shape(self): # The shape of zi is checked before using any values in the # arguments, so np.empty is fine for creating the arguments. x = np.empty((3, 15, 3)) sos = np.empty((4, 6)) zi = np.empty((4, 3, 3, 2)) # Correct shape is (4, 3, 2, 3) assert_raises(ValueError, sosfilt, sos, x, zi=zi, axis=1) def test_sosfilt_zi(self): sos = signal.butter(6, 0.2, output='sos') zi = sosfilt_zi(sos) y, zf = sosfilt(sos, np.ones(40), zi=zi) assert_allclose(zf, zi, rtol=1e-13) # Expected steady state value of the step response of this filter: ss = np.prod(sos[:, :3].sum(axis=-1) / sos[:, 3:].sum(axis=-1)) assert_allclose(y, ss, rtol=1e-13) class TestDeconvolve(object): def test_basic(self): # From docstring example original = [0, 1, 0, 0, 1, 1, 0, 0] impulse_response = [2, 1] recorded = [0, 2, 1, 0, 2, 3, 1, 0, 0] recovered, remainder = signal.deconvolve(recorded, impulse_response) assert_allclose(recovered, original)
38.018442
98
0.526704
79575da42b09eda2b7d67123e8b4e8fbb79ee8b1
2,556
py
Python
qark/utils.py
lugang21506/qark
ac12f26373911316937fa372f45b9fadb763c5c7
[ "Apache-2.0", "OpenSSL" ]
null
null
null
qark/utils.py
lugang21506/qark
ac12f26373911316937fa372f45b9fadb763c5c7
[ "Apache-2.0", "OpenSSL" ]
null
null
null
qark/utils.py
lugang21506/qark
ac12f26373911316937fa372f45b9fadb763c5c7
[ "Apache-2.0", "OpenSSL" ]
null
null
null
import os from functools import partial import json import urllib.request import time from urllib.error import URLError global TASK_ID TASK_ID = None def create_directories_to_path(path): """Create directories to a path if they don't exist.""" try: os.makedirs(os.path.dirname(path)) except Exception: # directory already exists pass def file_has_extension(extension, file_path): return os.path.splitext(file_path.lower())[1] == extension.lower() is_java_file = partial(file_has_extension, ".java") httpHeaders = { "Accept": "application/json, text/javascript, */*; q=0.01", "X-Requested-With": "XMLHttpRequest", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0", "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8", } rdata = { "Function": "appTestMain.py", "ErrorCode": 1000, "Result": "", "ExecDesc": "", "FailReason": "", "Advice": "", "StartTime": int(time.time()), "EndTime": int(time.time()), "State": 4, } def RsltPush(taskId, rdata, charset='utf-8', reqnum=2): # Push function modules and script execution results to the cloud via the HTTP API if taskId is None: print("taskid is None") return DATA = json.JSONEncoder().encode(rdata).encode('utf-8') #  url = 'http://10.120.99.202:8090/api/v1/LockTest/LockRsltPush=' + taskId url = 'http://10.124.103.14/api/v1/LockTest/LockRsltPush?taskid=' + taskId req = urllib.request.Request(url, data=DATA, headers=httpHeaders, method='POST') info = None try: response = urllib.request.urlopen(req) info = response.read().decode(charset,errors="ignore") except URLError as e: if hasattr(e,'reason'): print("[RsltPush]Failed to reach a server,Reason:",e.reason, flush=True) elif hasattr(e, 'code'): print("[RsltPush]The Server couldnt fulfill the request,errcode:",e.code, flush=True) if reqnum > 0 and 500 <= e.code <= 600: time.sleep(random.randint(5, 11)) RsltPush(taskId,rdata,charset=charset, reqnum=reqnum-1) def environ_path_variable_exists(variable_name): """Determines if the os.environ variable exists and is a valid path. :rtype: bool """ try: return os.path.exists(os.environ[variable_name]) except KeyError: return False
30.795181
98
0.61698
79575ec77657ca8dc8139465465c03441c10e754
2,438
py
Python
code/extract_data.py
Zimiao1025/BioSeq-BLM
8e833d9a767d2585bbf1fbee987a065fc421e8e6
[ "BSD-2-Clause" ]
2
2021-12-31T09:12:02.000Z
2022-03-21T13:45:57.000Z
code/extract_data.py
Zimiao1025/BioSeq-BLM
8e833d9a767d2585bbf1fbee987a065fc421e8e6
[ "BSD-2-Clause" ]
null
null
null
code/extract_data.py
Zimiao1025/BioSeq-BLM
8e833d9a767d2585bbf1fbee987a065fc421e8e6
[ "BSD-2-Clause" ]
null
null
null
import os.path import sys t = sys.argv[1] c = '../results/batch/' + str(t) + '/' for a in os.walk(c): for dirname in a[1]: if os.path.exists(c + dirname + '/param.txt'): if os.path.exists(c + dirname + '/cv_model.model'): with open(c + dirname + '/final_results.txt') as f, open(c + dirname + '/cv_model.model') as m,\ open(c + dirname + '/param.txt') as p, open(c + 'results.txt', 'a') as w: lines = f.readlines() m_lines = m.readlines() p_lines = p.readlines() for line in lines[1:6]: dirname += ' ' + line.strip().split(' ')[-1] if t.split('/')[1] == 'svm': param = m_lines[0].strip().split(',') dirname += ' ' + param[1] + ' ' + param[3] else: param = m_lines[-1].strip().split(',') dirname += ' ' + param[0] dirname += ' ' + p_lines[0] w.writelines(dirname + '\n') else: with open(c + dirname + '/cv_eval_results.txt') as f, open(c + dirname + '/param.txt') as p, \ open(c + 'results.txt', 'a') as w: lines = f.readlines() p_lines = p.readlines() for line in lines[1:6]: dirname += ' ' + line.strip().split(' ')[-1] if t.split('/')[1] == 'rf': dirname += ' ' + 'None' dirname += ' ' + p_lines[0] w.writelines(dirname + '\n') else: with open(c + dirname + '/cv_eval_results.txt') as f, open(c + dirname + '/cv_model.model') as m,\ open(c + 'results.txt', 'a') as w: lines = f.readlines() m_lines = m.readlines() for line in lines[1:6]: dirname += ' ' + line.strip().split(' ')[-1] if t.split('/')[1] == 'svm': param = m_lines[0].strip().split(',') dirname += ' ' + param[1] + ' ' + param[3] else: param = m_lines[-1].strip().split(',') dirname += ' ' + param[0] dirname += ' ' + 'None' w.writelines(dirname + '\n')
46
112
0.389664
79575ef07fefd6db2b6e9b2cb3d550bb0bc9ac3a
2,275
py
Python
tests/test_PropertyReader.py
tku137/JPKay
2be812263c665207bca7664c673d5aef635ea3c5
[ "MIT" ]
1
2016-08-11T05:17:41.000Z
2016-08-11T05:17:41.000Z
tests/test_PropertyReader.py
tku137/JPKay
2be812263c665207bca7664c673d5aef635ea3c5
[ "MIT" ]
null
null
null
tests/test_PropertyReader.py
tku137/JPKay
2be812263c665207bca7664c673d5aef635ea3c5
[ "MIT" ]
null
null
null
# coding=utf-8 import pytest import json from numpy import array from JPKay.core.data_structures import Properties # noinspection PyShadowingNames @pytest.mark.usefixtures("sample_force_file", "general_prop_dict", "segments_prop_dict") class TestPropertyReader: def test_load_java_props(self, sample_force_file, general_prop_dict): props = Properties(file_path=sample_force_file) loaded_props = props.general with open(general_prop_dict) as infile: original_props = json.load(infile) assert original_props == loaded_props # noinspection PyPep8Naming def test_get_vDeflection_channel_number(self, sample_force_file): props = Properties(file_path=sample_force_file) channel = props.get_channel_numbers() assert channel["vDeflection"] == "1" assert channel["hDeflection"] == "2" assert channel["height"] == "0" assert channel["capacitiveSensorHeight"] == "3" def test_extract_factors(self, sample_force_file): props = Properties(file_path=sample_force_file) props.extract_conversion_factors() assert props.conversion_factors["vDeflection"]["raw multiplier"] == array(float("5.525411033343059E-9")) assert props.conversion_factors["vDeflection"]["raw offset"] == array(float("-6.075877326676198E-4")) assert props.conversion_factors["vDeflection"]["distance multiplier"] == array(float("7.730641603896163E-8")) assert props.conversion_factors["vDeflection"]["distance offset"] == array(float("0.0")) assert props.conversion_factors["vDeflection"]["force multiplier"] == array(float("0.01529211140472191")) assert props.conversion_factors["vDeflection"]["force offset"] == array(float("0.0")) def test_extract_specs(self, sample_force_file): props = Properties(file_path=sample_force_file) props.extract_specs() assert props.units["vDeflection"] == "N" def test_extract_segment_props(self, sample_force_file, segments_prop_dict): props = Properties(file_path=sample_force_file) loaded_props = props.segments with open(segments_prop_dict) as infile: original_props = json.load(infile) assert original_props == loaded_props
43.75
117
0.716044
79575f67cbe003dd19b415f1c4dbb71a4b4748ba
7,320
py
Python
third_party/stdlib/getopt.py
ralic/grumpy3
a471f7ba64167d5812c0f6701380f9f71fa937c3
[ "Apache-2.0" ]
null
null
null
third_party/stdlib/getopt.py
ralic/grumpy3
a471f7ba64167d5812c0f6701380f9f71fa937c3
[ "Apache-2.0" ]
null
null
null
third_party/stdlib/getopt.py
ralic/grumpy3
a471f7ba64167d5812c0f6701380f9f71fa937c3
[ "Apache-2.0" ]
null
null
null
"""Parser for command line options. This module helps scripts to parse the command line arguments in sys.argv. It supports the same conventions as the Unix getopt() function (including the special meanings of arguments of the form `-' and `--'). Long options similar to those supported by GNU software may be used as well via an optional third argument. This module provides two functions and an exception: getopt() -- Parse command line options gnu_getopt() -- Like getopt(), but allow option and non-option arguments to be intermixed. GetoptError -- exception (class) raised with 'opt' attribute, which is the option involved with the exception. """ # Long option support added by Lars Wirzenius <liw@iki.fi>. # # Gerrit Holl <gerrit@nl.linux.org> moved the string-based exceptions # to class-based exceptions. # # Peter Astrand <astrand@lysator.liu.se> added gnu_getopt(). # # TODO for gnu_getopt(): # # - GNU getopt_long_only mechanism # - allow the caller to specify ordering # - RETURN_IN_ORDER option # - GNU extension with '-' as first character of option string # - optional arguments, specified by double colons # - an option string with a W followed by semicolon should # treat "-W foo" as "--foo" __all__ = ["GetoptError","error","getopt","gnu_getopt"] import os class GetoptError(Exception): opt = '' msg = '' def __init__(self, msg, opt=''): self.msg = msg self.opt = opt Exception.__init__(self, msg, opt) def __str__(self): return self.msg error = GetoptError # backward compatibility def getopt(args, shortopts, longopts = []): """getopt(args, options[, long_options]) -> opts, args Parses command line options and parameter list. args is the argument list to be parsed, without the leading reference to the running program. Typically, this means "sys.argv[1:]". shortopts is the string of option letters that the script wants to recognize, with options that require an argument followed by a colon (i.e., the same format that Unix getopt() uses). If specified, longopts is a list of strings with the names of the long options which should be supported. The leading '--' characters should not be included in the option name. Options which require an argument should be followed by an equal sign ('='). The return value consists of two elements: the first is a list of (option, value) pairs; the second is the list of program arguments left after the option list was stripped (this is a trailing slice of the first argument). Each option-and-value pair returned has the option as its first element, prefixed with a hyphen (e.g., '-x'), and the option argument as its second element, or an empty string if the option has no argument. The options occur in the list in the same order in which they were found, thus allowing multiple occurrences. Long and short options may be mixed. """ opts = [] if type(longopts) == type(""): longopts = [longopts] else: longopts = list(longopts) while args and args[0].startswith('-') and args[0] != '-': if args[0] == '--': args = args[1:] break if args[0].startswith('--'): opts, args = do_longs(opts, args[0][2:], longopts, args[1:]) else: opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:]) return opts, args def gnu_getopt(args, shortopts, longopts = []): """getopt(args, options[, long_options]) -> opts, args This function works like getopt(), except that GNU style scanning mode is used by default. This means that option and non-option arguments may be intermixed. The getopt() function stops processing options as soon as a non-option argument is encountered. If the first character of the option string is `+', or if the environment variable POSIXLY_CORRECT is set, then option processing stops as soon as a non-option argument is encountered. """ opts = [] prog_args = [] if isinstance(longopts, str): longopts = [longopts] else: longopts = list(longopts) # Allow options after non-option arguments? if shortopts.startswith('+'): shortopts = shortopts[1:] all_options_first = True elif os.environ.get("POSIXLY_CORRECT"): all_options_first = True else: all_options_first = False while args: if args[0] == '--': prog_args += args[1:] break if args[0][:2] == '--': opts, args = do_longs(opts, args[0][2:], longopts, args[1:]) elif args[0][:1] == '-' and args[0] != '-': opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:]) else: if all_options_first: prog_args += args break else: prog_args.append(args[0]) args = args[1:] return opts, prog_args def do_longs(opts, opt, longopts, args): try: i = opt.index('=') except ValueError: optarg = None else: opt, optarg = opt[:i], opt[i+1:] has_arg, opt = long_has_args(opt, longopts) if has_arg: if optarg is None: if not args: raise GetoptError('option --%s requires argument' % opt, opt) optarg, args = args[0], args[1:] elif optarg is not None: raise GetoptError('option --%s must not have an argument' % opt, opt) opts.append(('--' + opt, optarg or '')) return opts, args # Return: # has_arg? # full option name def long_has_args(opt, longopts): possibilities = [o for o in longopts if o.startswith(opt)] if not possibilities: raise GetoptError('option --%s not recognized' % opt, opt) # Is there an exact match? if opt in possibilities: return False, opt elif opt + '=' in possibilities: return True, opt # No exact match, so better be unique. if len(possibilities) > 1: # XXX since possibilities contains all valid continuations, might be # nice to work them into the error msg raise GetoptError('option --%s not a unique prefix' % opt, opt) assert len(possibilities) == 1 unique_match = possibilities[0] has_arg = unique_match.endswith('=') if has_arg: unique_match = unique_match[:-1] return has_arg, unique_match def do_shorts(opts, optstring, shortopts, args): while optstring != '': opt, optstring = optstring[0], optstring[1:] if short_has_arg(opt, shortopts): if optstring == '': if not args: raise GetoptError('option -%s requires argument' % opt, opt) optstring, args = args[0], args[1:] optarg, optstring = optstring, '' else: optarg = '' opts.append(('-' + opt, optarg)) return opts, args def short_has_arg(opt, shortopts): for i in range(len(shortopts)): if opt == shortopts[i] != ':': return shortopts.startswith(':', i+1) raise GetoptError('option -%s not recognized' % opt, opt) if __name__ == '__main__': import sys print(getopt(sys.argv[1:], "a:b", ["alpha=", "beta"]))
34.691943
77
0.629918
795760b524676f13219ab3967c8d67f9d0dd32a4
5,549
py
Python
tests/system/providers/ec2/__init__.py
befair/bootstrap-vz
ac87a05d96519e4569b38914f5bb32dd3938dd5b
[ "Apache-2.0" ]
null
null
null
tests/system/providers/ec2/__init__.py
befair/bootstrap-vz
ac87a05d96519e4569b38914f5bb32dd3938dd5b
[ "Apache-2.0" ]
null
null
null
tests/system/providers/ec2/__init__.py
befair/bootstrap-vz
ac87a05d96519e4569b38914f5bb32dd3938dd5b
[ "Apache-2.0" ]
1
2019-11-08T15:20:42.000Z
2019-11-08T15:20:42.000Z
from contextlib import contextmanager from tests.system.tools import waituntil import logging log = logging.getLogger(__name__) @contextmanager def prepare_bootstrap(manifest, build_server): if manifest.volume['backing'] == 's3': credentials = {'access-key': build_server.build_settings['ec2-credentials']['access-key'], 'secret-key': build_server.build_settings['ec2-credentials']['secret-key']} from boto.s3 import connect_to_region as s3_connect s3_connection = s3_connect(manifest.image['region'], aws_access_key_id=credentials['access-key'], aws_secret_access_key=credentials['secret-key']) log.debug('Creating S3 bucket') bucket = s3_connection.create_bucket(manifest.image['bucket'], location=manifest.image['region']) try: yield finally: log.debug('Deleting S3 bucket') for item in bucket.list(): bucket.delete_key(item.key) s3_connection.delete_bucket(manifest.image['bucket']) else: yield @contextmanager def boot_image(manifest, build_server, bootstrap_info, instance_type=None): credentials = {'access-key': build_server.run_settings['ec2-credentials']['access-key'], 'secret-key': build_server.run_settings['ec2-credentials']['secret-key']} from boto.ec2 import connect_to_region as ec2_connect ec2_connection = ec2_connect(bootstrap_info._ec2['region'], aws_access_key_id=credentials['access-key'], aws_secret_access_key=credentials['secret-key']) from boto.vpc import connect_to_region as vpc_connect vpc_connection = vpc_connect(bootstrap_info._ec2['region'], aws_access_key_id=credentials['access-key'], aws_secret_access_key=credentials['secret-key']) if manifest.volume['backing'] == 'ebs': from .images import EBSImage image = EBSImage(bootstrap_info._ec2['image'], ec2_connection) if manifest.volume['backing'] == 's3': from .images import S3Image image = S3Image(bootstrap_info._ec2['image'], ec2_connection) try: with run_instance(image, manifest, instance_type, ec2_connection, vpc_connection) as instance: yield instance finally: image.destroy() @contextmanager def run_instance(image, manifest, instance_type, ec2_connection, vpc_connection): with create_env(ec2_connection, vpc_connection) as boot_env: def waituntil_instance_is(state): def instance_has_state(): instance.update() return instance.state == state return waituntil(instance_has_state, timeout=600, interval=3) instance = None try: log.debug('Booting ec2 instance') reservation = image.ami.run(instance_type=instance_type, subnet_id=boot_env['subnet_id']) [instance] = reservation.instances instance.add_tag('Name', 'bootstrap-vz test instance') if not waituntil_instance_is('running'): raise EC2InstanceStartupException('Timeout while booting instance') if not waituntil(lambda: instance.get_console_output().output is not None, timeout=600, interval=3): raise EC2InstanceStartupException('Timeout while fetching console output') from bootstrapvz.common.releases import wheezy if manifest.release <= wheezy: termination_string = 'INIT: Entering runlevel: 2' else: termination_string = 'Debian GNU/Linux' console_output = instance.get_console_output().output if termination_string not in console_output: last_lines = '\n'.join(console_output.split('\n')[-50:]) message = ('The instance did not boot properly.\n' 'Last 50 lines of console output:\n{output}'.format(output=last_lines)) raise EC2InstanceStartupException(message) yield instance finally: if instance is not None: log.debug('Terminating ec2 instance') instance.terminate() if not waituntil_instance_is('terminated'): raise EC2InstanceStartupException('Timeout while terminating instance') # wait a little longer, aws can be a little slow sometimes and think the instance is still running import time time.sleep(15) @contextmanager def create_env(ec2_connection, vpc_connection): vpc_cidr = '10.0.0.0/28' subnet_cidr = '10.0.0.0/28' @contextmanager def vpc(): log.debug('Creating VPC') vpc = vpc_connection.create_vpc(vpc_cidr) try: yield vpc finally: log.debug('Deleting VPC') vpc_connection.delete_vpc(vpc.id) @contextmanager def subnet(vpc): log.debug('Creating subnet') subnet = vpc_connection.create_subnet(vpc.id, subnet_cidr) try: yield subnet finally: log.debug('Deleting subnet') vpc_connection.delete_subnet(subnet.id) with vpc() as _vpc: with subnet(_vpc) as _subnet: yield {'subnet_id': _subnet.id} class EC2InstanceStartupException(Exception): pass
39.635714
114
0.623175
795763765be523ef702873541c0cbc731b39f15d
3,113
py
Python
www/src/Lib/genericpath.py
frederickavita/testing
577f2e621c188cab59495dcb5166a9e6309a9a81
[ "BSD-3-Clause" ]
4
2018-03-19T12:07:18.000Z
2019-09-20T08:53:31.000Z
www/src/Lib/genericpath.py
frederickavita/testing
577f2e621c188cab59495dcb5166a9e6309a9a81
[ "BSD-3-Clause" ]
null
null
null
www/src/Lib/genericpath.py
frederickavita/testing
577f2e621c188cab59495dcb5166a9e6309a9a81
[ "BSD-3-Clause" ]
null
null
null
""" Path operations common to more than one OS Do not use directly. The OS specific modules import the appropriate functions from this module themselves. """ import os __all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime', 'getsize', 'isdir', 'isfile'] # Does a path exist? # This is false for dangling symbolic links on systems that support them. def exists(path): """Test whether a path exists. Returns False for broken symbolic links""" try: os.stat(path) except os.error: return False return True # This follows symbolic links, so both islink() and isdir() can be true # for the same path on systems that support symlinks def isfile(path): """Test whether a path is a regular file""" try: st = os.stat(path) except os.error: return False import stat return stat.S_ISREG(st.st_mode) # Is a path a directory? # This follows symbolic links, so both islink() and isdir() # can be true for the same path on systems that support symlinks def isdir(s): """Return true if the pathname refers to an existing directory.""" try: st = os.stat(s) except os.error: return False import stat return stat.S_ISDIR(st.st_mode) def getsize(filename): """Return the size of a file, reported by os.stat().""" return os.stat(filename).st_size def getmtime(filename): """Return the last modification time of a file, reported by os.stat().""" return os.stat(filename).st_mtime def getatime(filename): """Return the last access time of a file, reported by os.stat().""" return os.stat(filename).st_atime def getctime(filename): """Return the metadata change time of a file, reported by os.stat().""" return os.stat(filename).st_ctime # Return the longest prefix of all list elements. def commonprefix(m): "Given a list of pathnames, returns the longest common leading component" if not m: return '' s1 = min(m) s2 = max(m) for i, c in enumerate(s1): if c != s2[i]: return s1[:i] return s1 # Split a path in root and extension. # The extension is everything starting at the last dot in the last # pathname component; the root is everything before that. # It is always true that root + ext == p. # Generic implementation of splitext, to be parametrized with # the separators def _splitext(p, sep, altsep, extsep): """Split the extension from a pathname. Extension is everything from the last dot to the end, ignoring leading dots. Returns "(root, ext)"; ext may be empty.""" # NOTE: This code must work for text and bytes strings. sepIndex = p.rfind(sep) if altsep: altsepIndex = p.rfind(altsep) sepIndex = max(sepIndex, altsepIndex) dotIndex = p.rfind(extsep) if dotIndex > sepIndex: # skip all leading dots filenameIndex = sepIndex + 1 while filenameIndex < dotIndex: if p[filenameIndex:filenameIndex+1] != extsep: return p[:dotIndex], p[dotIndex:] filenameIndex += 1 return p, p[:0]
28.824074
78
0.663347
795764413edaca7b802e3c4e5dcbd3e9533fc27f
38
py
Python
tests/__init__.py
infosec-garage/maltools
007c7856f343b267b28b2bca8065ec2832678fc2
[ "MIT" ]
null
null
null
tests/__init__.py
infosec-garage/maltools
007c7856f343b267b28b2bca8065ec2832678fc2
[ "MIT" ]
null
null
null
tests/__init__.py
infosec-garage/maltools
007c7856f343b267b28b2bca8065ec2832678fc2
[ "MIT" ]
null
null
null
"""Unit test package for maltools."""
19
37
0.684211
795764541068f70cc081497df412184cbba59995
3,213
py
Python
GAN/model.py
IvoryCandy/generative-adversarial-networks
4010a20b22ecb016da164b37d6f915788e8f09f5
[ "Apache-2.0" ]
1
2018-06-05T02:26:48.000Z
2018-06-05T02:26:48.000Z
GAN/model.py
IvoryCandy/generative-adversarial-networks
4010a20b22ecb016da164b37d6f915788e8f09f5
[ "Apache-2.0" ]
null
null
null
GAN/model.py
IvoryCandy/generative-adversarial-networks
4010a20b22ecb016da164b37d6f915788e8f09f5
[ "Apache-2.0" ]
null
null
null
import torch.nn as nn import torch.nn.functional as func def de_conv(in_channels, out_channels, kernel_size, stride=2, padding=1, bn=True): """Custom de_convolutional layer for simplicity.""" layers = [nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)] if bn: layers.append(nn.BatchNorm2d(out_channels)) return nn.Sequential(*layers) def conv(in_channels, out_channels, kernel_size, stride=2, padding=1, bn=True): """Custom convolutional layer for simplicity.""" layers = [nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding)] if bn: layers.append(nn.BatchNorm2d(out_channels)) return nn.Sequential(*layers) class Generator(nn.Module): """Generator containing 7 de_convolutional layers.""" def __init__(self, z_dim=256, image_size=128, conv_dim=64): super(Generator, self).__init__() self.fc = de_conv(in_channels=z_dim, out_channels=conv_dim * 8, kernel_size=int(image_size / 16), stride=1, padding=0, bn=False) self.de_conv_1 = de_conv(in_channels=conv_dim * 8, out_channels=conv_dim * 4, kernel_size=4) self.de_conv_2 = de_conv(in_channels=conv_dim * 4, out_channels=conv_dim * 2, kernel_size=4) self.de_conv_3 = de_conv(in_channels=conv_dim * 2, out_channels=conv_dim, kernel_size=4) self.de_conv_4 = de_conv(in_channels=conv_dim, out_channels=3, kernel_size=4, bn=False) def forward(self, x): x = x.view(x.size(0), x.size(1), 1, 1) # If image_size is 64, output shape is as below. x = self.fc(x) # (?, 512, 4, 4) x = func.leaky_relu(self.de_conv_1(x), 0.05) # (?, 256, 8, 8) x = func.leaky_relu(self.de_conv_2(x), 0.05) # (?, 128, 16, 16) x = func.leaky_relu(self.de_conv_3(x), 0.05) # (?, 64, 32, 32) x = func.tanh(self.de_conv_4(x)) # (?, 3, 64, 64) return x class Discriminator(nn.Module): """Discriminator containing 4 convolutional layers.""" def __init__(self, image_size=128, conv_dim=64): super(Discriminator, self).__init__() self.conv_1 = conv(in_channels=3, out_channels=conv_dim, kernel_size=4, bn=False) self.conv_2 = conv(in_channels=conv_dim, out_channels=conv_dim * 2, kernel_size=4) self.conv_3 = conv(in_channels=conv_dim * 2, out_channels=conv_dim * 4, kernel_size=4) self.conv_4 = conv(in_channels=conv_dim * 4, out_channels=conv_dim * 8, kernel_size=4) self.fc = conv(in_channels=conv_dim * 8, out_channels=1, kernel_size=int(image_size / 16), stride=1, padding=0, bn=False) def forward(self, x): # If image_size is 64, output shape is as below. x = func.leaky_relu(self.conv_1(x), 0.05) # (?, 64, 32, 32) x = func.leaky_relu(self.conv_2(x), 0.05) # (?, 128, 16, 16) x = func.leaky_relu(self.conv_3(x), 0.05) # (?, 256, 8, 8) x = func.leaky_relu(self.conv_4(x), 0.05) # (?, 512, 4, 4) x = self.fc(x).squeeze() return x
54.457627
136
0.628385
7957649efada28609dfba70d1b41b1cfd1e5de97
3,954
py
Python
examples/connectivity/plot_multi_subject_connectome.py
GaelVaroquaux/nilearn
8e902704bbd186912e753cf08e90eb50f228915c
[ "BSD-2-Clause" ]
1
2020-12-20T00:22:14.000Z
2020-12-20T00:22:14.000Z
examples/connectivity/plot_multi_subject_connectome.py
GaelVaroquaux/nilearn
8e902704bbd186912e753cf08e90eb50f228915c
[ "BSD-2-Clause" ]
3
2016-02-23T09:47:05.000Z
2018-10-12T16:54:38.000Z
examples/connectivity/plot_multi_subject_connectome.py
GaelVaroquaux/nilearn
8e902704bbd186912e753cf08e90eb50f228915c
[ "BSD-2-Clause" ]
3
2017-01-06T09:54:00.000Z
2020-02-17T12:57:35.000Z
""" Group Sparse inverse covariance for multi-subject connectome ============================================================= This example shows how to estimate a connectome on a groupe of subjects using the group sparse inverse covariance estimate. """ import matplotlib.pyplot as plt import numpy as np from nilearn import plotting n_subjects = 4 # subjects to consider for group-sparse covariance (max: 40) def plot_matrices(cov, prec, title): """Plot covariance and precision matrices, for a given processing. """ prec = prec.copy() # avoid side effects # Put zeros on the diagonal, for graph clarity. size = prec.shape[0] prec[list(range(size)), list(range(size))] = 0 span = max(abs(prec.min()), abs(prec.max())) # Display covariance matrix plt.figure() plt.imshow(cov, interpolation="nearest", vmin=-1, vmax=1, cmap=plotting.cm.bwr) plt.colorbar() plt.title("%s / covariance" % title) # Display precision matrix plt.figure() plt.imshow(prec, interpolation="nearest", vmin=-span, vmax=span, cmap=plotting.cm.bwr) plt.colorbar() plt.title("%s / precision" % title) ############################################################################## # Fetching datasets from nilearn import datasets msdl_atlas_dataset = datasets.fetch_msdl_atlas() adhd_dataset = datasets.fetch_adhd(n_subjects=n_subjects) # print basic information on the dataset print('First subject functional nifti image (4D) is at: %s' % adhd_dataset.func[0]) # 4D data ############################################################################## # Extracting region signals from nilearn import image from nilearn import input_data # A "memory" to avoid recomputation from sklearn.externals.joblib import Memory mem = Memory('nilearn_cache') masker = input_data.NiftiMapsMasker( msdl_atlas_dataset.maps, resampling_target="maps", detrend=True, low_pass=None, high_pass=0.01, t_r=2.5, standardize=True, memory='nilearn_cache', memory_level=1, verbose=2) masker.fit() subject_time_series = [] func_filenames = adhd_dataset.func confound_filenames = adhd_dataset.confounds for func_filename, confound_filename in zip(func_filenames, confound_filenames): print("Processing file %s" % func_filename) # Computing some confounds hv_confounds = mem.cache(image.high_variance_confounds)( func_filename) region_ts = masker.transform(func_filename, confounds=[hv_confounds, confound_filename]) subject_time_series.append(region_ts) ############################################################################## # Computing group-sparse precision matrices from nilearn.connectome import GroupSparseCovarianceCV gsc = GroupSparseCovarianceCV(verbose=2) gsc.fit(subject_time_series) from sklearn import covariance gl = covariance.GraphLassoCV(verbose=2) gl.fit(np.concatenate(subject_time_series)) ############################################################################## # Displaying results atlas_imgs = image.iter_img(msdl_atlas_dataset.maps) atlas_region_coords = [plotting.find_xyz_cut_coords(img) for img in atlas_imgs] title = "GraphLasso" plotting.plot_connectome(-gl.precision_, atlas_region_coords, edge_threshold='90%', title="Sparse inverse covariance (GraphLasso)") plotting.plot_connectome(gl.covariance_, atlas_region_coords, edge_threshold='90%', title="Covariance") plot_matrices(gl.covariance_, gl.precision_, title) title = "GroupSparseCovariance" plotting.plot_connectome(-gsc.precisions_[..., 0], atlas_region_coords, edge_threshold='90%', title=title) plot_matrices(gsc.covariances_[..., 0], gsc.precisions_[..., 0], title) plotting.show()
33.226891
79
0.632524
79576561942604452e6b7b7d47916f20bff8ce8d
5,972
py
Python
features/device-types-feature/raspberrypi-plugin-feature/org.wso2.carbon.device.mgt.iot.raspberrypi.backend.feature/src/main/resources/agent/src/mqttConnector.py
hasuniea/carbon-device-mgt-plugins
a948a7d69a2e99d6dc1351ae18daf750145d0d32
[ "Apache-2.0" ]
9
2015-10-29T05:24:20.000Z
2018-01-09T08:14:28.000Z
features/device-types-feature/raspberrypi-plugin-feature/org.wso2.carbon.device.mgt.iot.raspberrypi.backend.feature/src/main/resources/agent/src/mqttConnector.py
hasuniea/carbon-device-mgt-plugins
a948a7d69a2e99d6dc1351ae18daf750145d0d32
[ "Apache-2.0" ]
50
2015-08-04T06:06:51.000Z
2022-01-10T16:17:16.000Z
features/device-types-feature/raspberrypi-plugin-feature/org.wso2.carbon.device.mgt.iot.raspberrypi.backend.feature/src/main/resources/agent/src/mqttConnector.py
hasuniea/carbon-device-mgt-plugins
a948a7d69a2e99d6dc1351ae18daf750145d0d32
[ "Apache-2.0" ]
170
2015-02-12T05:56:29.000Z
2020-02-11T16:39:22.000Z
#!/usr/bin/env python """ /** * Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. **/ """ import time import iotUtils import paho.mqtt.client as mqtt from token_updater import RefreshToken agent_connected = False # The callback for when the client receives a CONNACK response from the server. def on_connect(client, userdata, flags, rc): if rc == 0: global agent_connected agent_connected = True # Subscribing in on_connect() means that if we lose the connection and # reconnect then subscriptions will be renewed. print ("MQTT_LISTENER: Subscribing with topic " + TOPIC_TO_SUBSCRIBE) client.subscribe(TOPIC_TO_SUBSCRIBE) elif rc == 4: token = RefreshToken() response = token.updateTokens() newAccessToken = response['access_token'] client.username_pw_set(newAccessToken, password="") else: global agent_connected agent_connected = False print("MQTT_LISTENER: Connected with result code " + str(rc)) # The callback for when a PUBLISH message is received from the server. def on_message(client, userdata, msg): print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'MQTT_LISTENER: Message Received by Device' print( "MQTT_LISTENER: " + msg.topic + " --> " + str(msg.payload) ) request = str(msg.payload) resource = request.split(":")[0].upper() state = request.split(":")[1].upper() print "MQTT_LISTENER: Resource- " + resource if resource == "TEMP": pass #request.send_response(200) #request.send_header("Content-type", "text/plain") #request.end_headers() #request.wfile.write(LAST_TEMP) # return elif resource == "BULB": iotUtils.switchBulb(state) def on_publish(client, userdata, mid): print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Temperature Data Published Succesfully' # print (client) # print (userdata) # print (mid) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # The callback for when a PUBLISH message to the server when door is open or close # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def publish(msg): if agent_connected: print '~~~~~~~~~~~~~~~~~~~~~~~~ Publishing Device-Data ~~~~~~~~~~~~~~~~~~~~~~~~~' print ('PUBLISHED DATA: ' + msg) print ('PUBLISHED TOPIC: ' + TOPIC_TO_PUBLISH) global mqttClient mqttClient.publish(TOPIC_TO_PUBLISH, msg) def on_subscribe(client, userdata, mid, granted_qos): print "Successfully subscribed to " + TOPIC_TO_SUBSCRIBE def on_disconnect(client, userdata, rc): global agent_connected agent_connected = False print ("Agent disconnected from broker") print("Obtaining new access token") token = RefreshToken() response = token.updateTokens() newAccessToken = response['access_token'] client.username_pw_set(newAccessToken, password="") # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # The Main method of the server script # This method is invoked from RaspberryStats.py on a new thread # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def main(): MQTT_ENDPOINT = iotUtils.MQTT_EP.split(":") MQTT_IP = MQTT_ENDPOINT[1].replace('//','') MQTT_PORT = int(MQTT_ENDPOINT[2]) SERVER_NAME = iotUtils.SERVER_NAME DEV_ID = iotUtils.DEVICE_ID global TOPIC_TO_SUBSCRIBE # TOPIC_TO_SUBSCRIBE = SERVER_NAME + "/raspberrypi/" + DEV_ID TOPIC_TO_SUBSCRIBE = SERVER_NAME + "/raspberrypi/" + DEV_ID global TOPIC_TO_PUBLISH # TOPIC_TO_PUBLISH = SERVER_NAME + "/raspberrypi/" + DEV_ID + "/publisher" TOPIC_TO_PUBLISH = SERVER_NAME + "/raspberrypi/" + DEV_ID + "/temperature" print ("MQTT_LISTENER: MQTT_ENDPOINT is " + str(MQTT_ENDPOINT)) print ("MQTT_LISTENER: MQTT_TOPIC is " + TOPIC_TO_SUBSCRIBE) global mqttClient mqttClient = mqtt.Client(client_id="RaspberryPi Agent") mqttClient.on_connect = on_connect mqttClient.on_message = on_message mqttClient.on_publish = on_publish mqttClient.on_subscribe = on_subscribe mqttClient.on_disconnect = on_disconnect mqttClient.username_pw_set(iotUtils.AUTH_TOKEN, password = "") while True: try: mqttClient.connect(MQTT_IP, MQTT_PORT, 180) print "MQTT_LISTENER: " + time.asctime(), "Connected to MQTT Broker - %s:%s" % (MQTT_IP, MQTT_PORT) # Blocking call that processes network traffic, dispatches callbacks and # handles reconnecting. # Other loop*() functions are available that give a threaded interface and a # manual interface. mqttClient.loop_forever() except (KeyboardInterrupt, Exception) as e: print "MQTT_LISTENER: Exception in MQTTServerThread (either KeyboardInterrupt or Other)" print ("MQTT_LISTENER: " + str(e)) mqttClient.disconnect() print "MQTT_LISTENER: " + time.asctime(), "Connection to Broker closed - %s:%s" % (MQTT_IP, MQTT_PORT) print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' pass if __name__ == '__main__': iotUtils.setUpGPIOPins() main()
36.414634
114
0.616544
7957666f10791f63d75ac89f891017e1824a11dc
799
py
Python
test/test_edit_contact.py
nicholas-y/python_selenium
84c3344cbed9990903028d9a99c03ee6ffb4aa55
[ "Apache-2.0" ]
null
null
null
test/test_edit_contact.py
nicholas-y/python_selenium
84c3344cbed9990903028d9a99c03ee6ffb4aa55
[ "Apache-2.0" ]
null
null
null
test/test_edit_contact.py
nicholas-y/python_selenium
84c3344cbed9990903028d9a99c03ee6ffb4aa55
[ "Apache-2.0" ]
null
null
null
import random from model.contact import Contact def test_edit_group(app, db, check_ui): i = random.randint(1, 25) if len(db.get_contact_list()) == 0: app.contact.create(Contact(firstname="Tester")) old_contacts = db.get_contact_list() contact = random.choice(old_contacts) old_contacts.remove(contact) contact.firstname = "Tester" + str(i) old_contacts.append(contact) app.contact.edit_contact_by_id(contact) assert len(old_contacts) == app.contact.count() new_contact_list = db.get_contact_list() assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contact_list, key=Contact.id_or_max) if check_ui: assert sorted(new_contact_list, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
39.95
127
0.732165
795767974c83252945e4e7806825244ac36e86a3
555
py
Python
tests/integration_tests/boolean_tests/operations_tests/test_is_trivial.py
skrat/martinez
86db48324cb50ecb52be8ab2e4278a6d5cdd562b
[ "MIT" ]
7
2020-05-07T08:13:44.000Z
2021-12-17T07:33:51.000Z
tests/integration_tests/boolean_tests/operations_tests/test_is_trivial.py
skrat/martinez
86db48324cb50ecb52be8ab2e4278a6d5cdd562b
[ "MIT" ]
17
2019-11-29T23:17:26.000Z
2020-12-20T15:47:17.000Z
tests/integration_tests/boolean_tests/operations_tests/test_is_trivial.py
skrat/martinez
86db48324cb50ecb52be8ab2e4278a6d5cdd562b
[ "MIT" ]
1
2020-12-17T22:44:21.000Z
2020-12-17T22:44:21.000Z
from typing import Tuple from hypothesis import given from tests.bind_tests.hints import BoundOperation from tests.integration_tests.utils import are_bound_ported_polygons_equal from tests.port_tests.hints import PortedOperation from . import strategies @given(strategies.operations_pairs) def test_basic(operations_pair: Tuple[BoundOperation, PortedOperation] ) -> None: bound, ported = operations_pair assert bound.is_trivial is ported.is_trivial assert are_bound_ported_polygons_equal(bound.resultant, ported.resultant)
30.833333
77
0.812613
795767af1700ffd3444c8a027c05a12d5b70d759
1,809
py
Python
ui/widgets/group_selection.py
vvstubbs/NordVpnLinuxGUI
78e78c212c1fc50d2ff9a418285b397f2d65d6a2
[ "MIT" ]
17
2021-12-27T20:19:37.000Z
2022-03-25T10:19:08.000Z
ui/widgets/group_selection.py
vvstubbs/NordVpnLinuxGUI
78e78c212c1fc50d2ff9a418285b397f2d65d6a2
[ "MIT" ]
5
2021-12-28T08:17:26.000Z
2022-02-09T03:44:32.000Z
ui/widgets/group_selection.py
vvstubbs/NordVpnLinuxGUI
78e78c212c1fc50d2ff9a418285b397f2d65d6a2
[ "MIT" ]
5
2021-12-28T08:01:19.000Z
2022-02-22T04:14:42.000Z
from kivy.app import App from kivy.clock import Clock from kivy.properties import StringProperty from kivy.uix.boxlayout import BoxLayout from kivy.lang.builder import Builder from kivy.uix.gridlayout import GridLayout ICON_TABLE = { "Africa_The_Middle_East_And_India": "earth", "P2P": "share", "Asia_Pacific": "earth", "Standard_VPN_Servers": "vpn", "Europe": "earth", "The_Americas": "earth", "Onion_Over_VPN": "layers", } Builder.load_string(""" <GroupSelection> id: country_selection height: dp(60) padding: dp(5), dp(7) size_hint_y: None cols: 1 BoxLayout: id: country_box orientation: "horizontal" Widget: size_hint_x: 0.05 MDIcon: icon: root.group_icon size_hint_x: 0.2 BoxLayout: orientation: "horizontal" Widget: MDFlatButton: text: root.group_label theme_text_color: "Custom" text_color: (1,1,1,1) pos_hint: {'center_x': 0, 'center_y': .5} on_release: root.connect_to_group() Widget: Widget: id: padding height: dp(5) MDSeparator: id: separator height: dp(1) padding: dp(10),0,0,0 """) class GroupSelection(GridLayout): group_label = StringProperty("") group_icon = StringProperty("chevron-down") def __init__(self, group, connect, **kwargs): super().__init__(**kwargs) self.group = group self.connect = connect self.group_icon = ICON_TABLE.get(group, "broken-image") self.group_label = group.replace("_", " ") self.nord_client = App.get_running_app().nord_client def connect_to_group(self): self.connect(self.group)
27
63
0.601437
795767df4fad236ff7dc189753ebda7d8d32eea1
1,381
py
Python
exo/studio/figures/views.py
moas/sketchbadges
7c5be91a74f9edd759eaacd9a7b97aef873f7fcb
[ "MIT" ]
null
null
null
exo/studio/figures/views.py
moas/sketchbadges
7c5be91a74f9edd759eaacd9a7b97aef873f7fcb
[ "MIT" ]
null
null
null
exo/studio/figures/views.py
moas/sketchbadges
7c5be91a74f9edd759eaacd9a7b97aef873f7fcb
[ "MIT" ]
null
null
null
from django.core.urlresolvers import reverse from django.shortcuts import get_object_or_404 from django.utils.functional import cached_property from django.views import generic from .forms import EvaluationModel3DForm from .models import EvaluationModel3D, Model3D # Create your views here. class ListModel3DView(generic.ListView): model = Model3D template_name = 'home.html' context_object_name = 'items' def get_queryset(self): qs = super().get_queryset() return qs.select_related('designer').filter(is_active=True) class DetailModel3DView(generic.DetailView): template_name = 'model_detail.html' context_object_name = 'item' model = Model3D def get_queryset(self): qs = super().get_queryset() return qs.select_related('designer').filter(is_active=True) class AddModel3DEvaluationView(generic.CreateView): template_name = 'add_comment.html' model = EvaluationModel3D form_class = EvaluationModel3DForm @cached_property def model_3d_object(self): model_3d = get_object_or_404(Model3D, pk=self.kwargs['pk']) return model_3d def get_success_url(self): return reverse('models:detail', args=[self.model_3d_object.pk]) def get_initial(self): initial = super().get_initial() initial['model_3d'] = self.model_3d_object return initial
28.183673
71
0.726285
795768cd62a83457515a1a3a92a8c54392e7bf85
18,769
py
Python
shell/impala_client.py
kexianda/impala
2e60347868d7e719d80401f9abcbe971e659502b
[ "Apache-2.0" ]
1
2019-12-14T03:09:50.000Z
2019-12-14T03:09:50.000Z
shell/impala_client.py
kexianda/impala
2e60347868d7e719d80401f9abcbe971e659502b
[ "Apache-2.0" ]
null
null
null
shell/impala_client.py
kexianda/impala
2e60347868d7e719d80401f9abcbe971e659502b
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sasl import time from beeswaxd import BeeswaxService from beeswaxd.BeeswaxService import QueryState from ExecStats.ttypes import TExecStats from ImpalaService import ImpalaService from ErrorCodes.ttypes import TErrorCode from Status.ttypes import TStatus from thrift.protocol import TBinaryProtocol from thrift_sasl import TSaslClientTransport from thrift.transport.TSocket import TSocket from thrift.transport.TTransport import TBufferedTransport, TTransportException from thrift.Thrift import TApplicationException class RpcStatus: """Convenience enum to describe Rpc return statuses""" OK = 0 ERROR = 1 class RPCException(Exception): def __init__(self, value=""): self.value = value def __str__(self): return self.value class QueryStateException(Exception): def __init__(self, value=""): self.value = value def __str__(self): return self.value class DisconnectedException(Exception): def __init__(self, value=""): self.value = value def __str__(self): return self.value class QueryCancelledByShellException(Exception): pass class ImpalaClient(object): def __init__(self, impalad, use_kerberos=False, kerberos_service_name="impala", use_ssl=False, ca_cert=None, user=None, ldap_password=None, use_ldap=False): self.connected = False self.impalad = impalad self.imp_service = None self.transport = None self.use_kerberos = use_kerberos self.kerberos_service_name = kerberos_service_name self.use_ssl = use_ssl self.ca_cert = ca_cert self.user, self.ldap_password = user, ldap_password self.use_ldap = use_ldap self.default_query_options = {} self.query_option_levels = {} self.query_state = QueryState._NAMES_TO_VALUES self.fetch_batch_size = 1024 # This is set from ImpalaShell's signal handler when a query is cancelled # from command line via CTRL+C. It is used to suppress error messages of # query cancellation. self.is_query_cancelled = False def _options_to_string_list(self, set_query_options): return ["%s=%s" % (k, v) for (k, v) in set_query_options.iteritems()] def build_default_query_options_dict(self): """The default query options are retrieved from a rpc call, and are dependent on the impalad to which a connection has been established. They need to be refreshed each time a connection is made. This is particularly helpful when there is a version mismatch between the shell and the impalad. """ try: get_default_query_options = self.imp_service.get_default_configuration(False) except: return rpc_result = self._do_rpc(lambda: get_default_query_options) options, status = rpc_result if status != RpcStatus.OK: raise RPCException("Unable to retrieve default query options") for option in options: self.default_query_options[option.key.upper()] = option.value # If connected to an Impala that predates IMPALA-2181 then the received options # wouldn't contain a level attribute. In this case the query_option_levels # map is left empty. if option.level is not None: self.query_option_levels[option.key.upper()] = option.level def build_summary_table(self, summary, idx, is_fragment_root, indent_level, new_indent_level, output): """Direct translation of Coordinator::PrintExecSummary() to recursively build a list of rows of summary statistics, one per exec node summary: the TExecSummary object that contains all the summary data idx: the index of the node to print is_fragment_root: true if the node to print is the root of a fragment (and therefore feeds into an exchange) indent_level: the number of spaces to print before writing the node's label, to give the appearance of a tree. The 0th child of a node has the same indent_level as its parent. All other children have an indent_level of one greater than their parent. output: the list of rows into which to append the rows produced for this node and its children. Returns the index of the next exec node in summary.exec_nodes that should be processed, used internally to this method only. NOTE: This is duplicated in impala_beeswax.py, and changes made here should also be made there. TODO: refactor into a shared library. (IMPALA-5792) """ attrs = ["latency_ns", "cpu_time_ns", "cardinality", "memory_used"] # Initialise aggregate and maximum stats agg_stats, max_stats = TExecStats(), TExecStats() for attr in attrs: setattr(agg_stats, attr, 0) setattr(max_stats, attr, 0) node = summary.nodes[idx] if node.exec_stats is not None: for stats in node.exec_stats: for attr in attrs: val = getattr(stats, attr) if val is not None: setattr(agg_stats, attr, getattr(agg_stats, attr) + val) setattr(max_stats, attr, max(getattr(max_stats, attr), val)) if node.exec_stats is not None and node.exec_stats: avg_time = agg_stats.latency_ns / len(node.exec_stats) else: avg_time = 0 # If the node is a broadcast-receiving exchange node, the cardinality of rows produced # is the max over all instances (which should all have received the same number of # rows). Otherwise, the cardinality is the sum over all instances which process # disjoint partitions. if node.is_broadcast: cardinality = max_stats.cardinality else: cardinality = agg_stats.cardinality est_stats = node.estimated_stats label_prefix = "" if indent_level > 0: label_prefix = "|" label_prefix += " |" * (indent_level - 1) if new_indent_level: label_prefix += "--" else: label_prefix += " " def prettyprint(val, units, divisor): for unit in units: if val < divisor: if unit == units[0]: return "%d%s" % (val, unit) else: return "%3.2f%s" % (val, unit) val /= divisor def prettyprint_bytes(byte_val): return prettyprint(byte_val, [' B', ' KB', ' MB', ' GB', ' TB'], 1024.0) def prettyprint_units(unit_val): return prettyprint(unit_val, ["", "K", "M", "B"], 1000.0) def prettyprint_time(time_val): return prettyprint(time_val, ["ns", "us", "ms", "s"], 1000.0) hosts = 0 if node.exec_stats is not None: hosts = len(node.exec_stats) row = [ label_prefix + node.label, hosts, prettyprint_time(avg_time), prettyprint_time(max_stats.latency_ns), prettyprint_units(cardinality), prettyprint_units(est_stats.cardinality), prettyprint_bytes(max_stats.memory_used), prettyprint_bytes(est_stats.memory_used), node.label_detail ] output.append(row) try: sender_idx = summary.exch_to_sender_map[idx] # This is an exchange node, so the sender is a fragment root, and should be printed # next. self.build_summary_table(summary, sender_idx, True, indent_level, False, output) except (KeyError, TypeError): # Fall through if idx not in map, or if exch_to_sender_map itself is not set pass idx += 1 if node.num_children > 0: first_child_output = [] idx = \ self.build_summary_table( summary, idx, False, indent_level, False, first_child_output) for child_idx in xrange(1, node.num_children): # All other children are indented (we only have 0, 1 or 2 children for every exec # node at the moment) idx = self.build_summary_table( summary, idx, False, indent_level + 1, True, output) output += first_child_output return idx def test_connection(self): """Checks to see if the current Impala connection is still alive. If not, an exception will be raised.""" if self.connected: self.imp_service.PingImpalaService() def connect(self): """Creates a connection to an Impalad instance The instance of the impala service is then pinged to test the connection and get back the server version """ if self.transport is not None: self.transport.close() self.transport = None self.connected = False self.transport = self._get_transport() self.transport.open() protocol = TBinaryProtocol.TBinaryProtocol(self.transport) self.imp_service = ImpalaService.Client(protocol) result = self.ping_impala_service() self.connected = True return result def ping_impala_service(self): return self.imp_service.PingImpalaService() def close_connection(self): """Close the transport if it's still open""" if self.transport: self.transport.close() def _get_transport(self): """Create a Transport. A non-kerberized impalad just needs a simple buffered transport. For the kerberized version, a sasl transport is created. If SSL is enabled, a TSSLSocket underlies the transport stack; otherwise a TSocket is used. """ if self.use_ssl: # TSSLSocket needs the ssl module, which may not be standard on all Operating # Systems. Only attempt to import TSSLSocket if the user wants an SSL connection. from TSSLSocketWithWildcardSAN import TSSLSocketWithWildcardSAN # sasl does not accept unicode strings, explicitly encode the string into ascii. host, port = self.impalad[0].encode('ascii', 'ignore'), int(self.impalad[1]) if self.use_ssl: if self.ca_cert is None: # No CA cert means don't try to verify the certificate sock = TSSLSocketWithWildcardSAN(host, port, validate=False) else: sock = TSSLSocketWithWildcardSAN(host, port, validate=True, ca_certs=self.ca_cert) else: sock = TSocket(host, port) if not (self.use_ldap or self.use_kerberos): return TBufferedTransport(sock) # Initializes a sasl client def sasl_factory(): sasl_client = sasl.Client() sasl_client.setAttr("host", host) if self.use_ldap: sasl_client.setAttr("username", self.user) sasl_client.setAttr("password", self.ldap_password) else: sasl_client.setAttr("service", self.kerberos_service_name) sasl_client.init() return sasl_client # GSSASPI is the underlying mechanism used by kerberos to authenticate. if self.use_kerberos: return TSaslClientTransport(sasl_factory, "GSSAPI", sock) else: return TSaslClientTransport(sasl_factory, "PLAIN", sock) def create_beeswax_query(self, query_str, set_query_options): """Create a beeswax query object from a query string""" query = BeeswaxService.Query() query.hadoop_user = self.user query.query = query_str query.configuration = self._options_to_string_list(set_query_options) return query def execute_query(self, query): self.is_query_cancelled = False rpc_result = self._do_rpc(lambda: self.imp_service.query(query)) last_query_handle, status = rpc_result if status != RpcStatus.OK: raise RPCException("Error executing the query") return last_query_handle def wait_to_finish(self, last_query_handle, periodic_callback=None): loop_start = time.time() while True: query_state = self.get_query_state(last_query_handle) if query_state == self.query_state["FINISHED"]: break elif query_state == self.query_state["EXCEPTION"]: if self.connected: raise QueryStateException(self.get_warning_log(last_query_handle)) else: raise DisconnectedException("Not connected to impalad.") if periodic_callback is not None: periodic_callback() time.sleep(self._get_sleep_interval(loop_start)) def fetch(self, query_handle): """Fetch all the results. This function returns a generator to create an iterable of the result rows. """ result_rows = [] while True: rpc_result = self._do_rpc( lambda: self.imp_service.fetch(query_handle, False, self.fetch_batch_size)) result, status = rpc_result if status != RpcStatus.OK: raise RPCException() result_rows.extend(result.data) if len(result_rows) >= self.fetch_batch_size or not result.has_more: rows = [row.split('\t') for row in result_rows] result_rows = [] yield rows if not result.has_more: break def close_dml(self, last_query_handle): """Fetches the results of a DML query. Returns a tuple containing the number of rows modified and the number of row errors, in that order. If the DML operation doesn't return 'num_row_errors', then the second element in the tuple is None.""" rpc_result = self._do_rpc( lambda: self.imp_service.CloseInsert(last_query_handle)) insert_result, status = rpc_result if status != RpcStatus.OK: raise RPCException() num_rows = sum([int(k) for k in insert_result.rows_modified.values()]) return (num_rows, insert_result.num_row_errors) def close_query(self, last_query_handle, query_handle_closed=False): """Close the query handle""" # Make closing a query handle idempotent if query_handle_closed: return True rpc_result = self._do_rpc(lambda: self.imp_service.close(last_query_handle)) _, status = rpc_result return status == RpcStatus.OK def cancel_query(self, last_query_handle, query_handle_closed=False): """Cancel a query on a keyboard interrupt from the shell.""" # Cancel sets query_state to EXCEPTION before calling cancel() in the # co-ordinator, so we don't need to wait. if query_handle_closed: return True rpc_result = self._do_rpc(lambda: self.imp_service.Cancel(last_query_handle), False) _, status = rpc_result return status == RpcStatus.OK def get_query_state(self, last_query_handle): rpc_result = self._do_rpc( lambda: self.imp_service.get_state(last_query_handle)) state, status = rpc_result if status != RpcStatus.OK: return self.query_state["EXCEPTION"] return state def get_runtime_profile(self, last_query_handle): rpc_result = self._do_rpc( lambda: self.imp_service.GetRuntimeProfile(last_query_handle)) profile, status = rpc_result if status == RpcStatus.OK and profile: return profile def get_summary(self, last_query_handle): """Calls GetExecSummary() for the last query handle""" rpc_result = self._do_rpc( lambda: self.imp_service.GetExecSummary(last_query_handle)) summary, status = rpc_result if status == RpcStatus.OK and summary: return summary return None def _do_rpc(self, rpc, suppress_error_on_cancel=True): """Executes the provided callable.""" if not self.connected: raise DisconnectedException("Not connected (use CONNECT to establish a connection)") return None, RpcStatus.ERROR try: ret = rpc() status = RpcStatus.OK # TODO: In the future more advanced error detection/handling can be done based on # the TStatus return value. For now, just print any error(s) that were encountered # and validate the result of the operation was a success. if ret is not None and isinstance(ret, TStatus): if ret.status_code != TErrorCode.OK: if ret.error_msgs: raise RPCException ('RPC Error: %s' % '\n'.join(ret.error_msgs)) status = RpcStatus.ERROR return ret, status except BeeswaxService.QueryNotFoundException: if suppress_error_on_cancel and self.is_query_cancelled: raise QueryCancelledByShellException() raise QueryStateException('Error: Stale query handle') # beeswaxException prints out the entire object, printing # just the message is far more readable/helpful. except BeeswaxService.BeeswaxException, b: # Suppress the errors from cancelling a query that is in fetch state if suppress_error_on_cancel and self.is_query_cancelled: raise QueryCancelledByShellException() raise RPCException("ERROR: %s" % b.message) except TTransportException, e: # issue with the connection with the impalad raise DisconnectedException("Error communicating with impalad: %s" % e) except TApplicationException, t: # Suppress the errors from cancelling a query that is in waiting_to_finish # state if suppress_error_on_cancel and self.is_query_cancelled: raise QueryCancelledByShellException() raise RPCException("Application Exception : %s" % t) return None, RpcStatus.ERROR def _get_sleep_interval(self, start_time): """Returns a step function of time to sleep in seconds before polling again. Maximum sleep is 1s, minimum is 0.1s""" elapsed = time.time() - start_time if elapsed < 10.0: return 0.1 elif elapsed < 60.0: return 0.5 return 1.0 def get_column_names(self, last_query_handle): rpc_result = self._do_rpc( lambda: self.imp_service.get_results_metadata(last_query_handle)) metadata, _ = rpc_result if not metadata is None: return [fs.name for fs in metadata.schema.fieldSchemas] def expect_result_metadata(self, query_str): """ Given a query string, return True if impalad expects result metadata""" excluded_query_types = ['use', 'drop'] if True in set(map(query_str.startswith, excluded_query_types)): return False return True def get_warning_log(self, last_query_handle): if last_query_handle is None: return "Query could not be executed" rpc_result = self._do_rpc( lambda: self.imp_service.get_log(last_query_handle.log_context)) log, status = rpc_result if status != RpcStatus.OK: return "Failed to get error log: %s" % status if log and log.strip(): return "WARNINGS: %s" % log return ""
37.917172
90
0.699931
7957692c7f4f576f32e0b4d7957f34de13b051fb
2,625
py
Python
tests/mypy/test_mypy.py
ducminhgd/typical
24157060fe463b0a5b110885e69544995c576ccc
[ "MIT" ]
null
null
null
tests/mypy/test_mypy.py
ducminhgd/typical
24157060fe463b0a5b110885e69544995c576ccc
[ "MIT" ]
null
null
null
tests/mypy/test_mypy.py
ducminhgd/typical
24157060fe463b0a5b110885e69544995c576ccc
[ "MIT" ]
null
null
null
import importlib import os import re from pathlib import Path import pytest from mypy import api as mypy_api # This ensures mypy can find the test files, no matter where tests are run from: os.chdir(Path(__file__).parent.parent.parent) cases = ( ("mypy.ini", "success.py", "success.txt"), ("mypy.ini", "fail.py", "fail.txt"), ) executable_modules = ("success",) @pytest.mark.skipif( "sys.version_info > (3, 8)", reason="Mypy doesn't yet support Python 3.9." ) @pytest.mark.parametrize("config_filename,python_filename,output_filename", cases) def test_mypy_results(config_filename, python_filename, output_filename): full_config_filename = f"tests/mypy/config/{config_filename}" full_filename = f"tests/mypy/module/{python_filename}" output_path = ( None if output_filename is None else Path(f"tests/mypy/output/{output_filename}") ) # Specifying a different cache dir for each configuration dramatically speeds up # subsequent execution. # It also prevents cache-invalidation-related bugs in the tests cache_dir = f".mypy_cache/test-{config_filename[:-4]}" command = [ full_filename, "--config-file", full_config_filename, "--cache-dir", cache_dir, "--show-error-codes", ] print( f"\nExecuting: mypy {' '.join(command)}" ) # makes it easier to debug as necessary actual_result = mypy_api.run(command) actual_out, actual_err, actual_returncode = actual_result # Need to strip filenames due to differences in formatting by OS actual_out = "\n".join( [".py:".join(line.split(".py:")[1:]) for line in actual_out.split("\n") if line] ).strip() actual_out = re.sub(r"\n\s*\n", r"\n", actual_out) if actual_out: print( "{0}\n{1:^100}\n{0}\n{2}\n{0}".format("=" * 100, "mypy output", actual_out) ) assert actual_err == "" expected_returncode = 0 if "success" in output_filename else 1 assert actual_returncode == expected_returncode if output_path and not output_path.exists(): output_path.write_text(actual_out) raise RuntimeError( f"wrote actual output to {output_path} since file did not exist" ) expected_out = Path(output_path).read_text() if output_path else "" assert actual_out.rstrip() == expected_out.rstrip(), actual_out @pytest.mark.parametrize("module", executable_modules) def test_success_cases_run(module): """ Ensure the "success" files can actually be executed """ importlib.import_module(f"tests.mypy.module.{module}")
32.407407
88
0.671619
795769480c4a6dafa7f0d066138ed179735bb05c
2,724
py
Python
applications/MetisApplication/test_exemples/square_domain/square_domain_contact.py
lkusch/Kratos
e8072d8e24ab6f312765185b19d439f01ab7b27b
[ "BSD-4-Clause" ]
778
2017-01-27T16:29:17.000Z
2022-03-30T03:01:51.000Z
applications/MetisApplication/test_exemples/square_domain/square_domain_contact.py
lkusch/Kratos
e8072d8e24ab6f312765185b19d439f01ab7b27b
[ "BSD-4-Clause" ]
6,634
2017-01-15T22:56:13.000Z
2022-03-31T15:03:36.000Z
applications/MetisApplication/test_exemples/square_domain/square_domain_contact.py
lkusch/Kratos
e8072d8e24ab6f312765185b19d439f01ab7b27b
[ "BSD-4-Clause" ]
224
2017-02-07T14:12:49.000Z
2022-03-06T23:09:34.000Z
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7 # # # setting the domain size for the problem to be solved domain_size = 2 import mpi # # # ATTENTION: here the order is important # including kratos path kratos_libs_path = '../../../../libs' # kratos_root/libs # kratos_libs_path = 'C:/kratosR1/libs' ##kratos_root/libs kratos_applications_path = '../../../../applications' # kratos_root/applications import sys sys.path.append(kratos_libs_path) sys.path.append(kratos_applications_path) # importing Kratos main library from Kratos import * kernel = Kernel() # defining kernel print("importing KratosMetisApplication ...") sys.path.append(kratos_applications_path + '/metis_application/python_scripts') from KratosMetisApplication import * metis_application = KratosMetisApplication() kernel.AddApplication(metis_application) print("KratosMetisApplication sucessfully imported") # importing applications import applications_interface applications_interface.Import_IncompressibleFluidApplication = True applications_interface.ImportApplications(kernel, kratos_applications_path) kernel.InitializeApplication(metis_application) # from now on the order is not anymore crucial # # # defining a model part model_part = ModelPart("FluidPart") model_part.AddNodalSolutionStepVariable(VELOCITY) model_part.AddNodalSolutionStepVariable(PARTITION_INDEX) # reading a model gid_mode = GiDPostMode.GiD_PostBinary use_multi_file = MultiFileFlag.MultipleFiles deformed_mesh_flag = WriteDeformedMeshFlag.WriteDeformed write_conditions = WriteConditionsFlag.WriteElementsOnly gid_io = GidIO("test", gid_mode, use_multi_file, deformed_mesh_flag, write_conditions) # print kernel print("line 58") number_of_partitions = mpi.size # we set it equal to the number of processors # metis_partitioning_process = MetisPartitioningProcess(model_part, gid_io, number_of_partitions) contact_indices = IndicesVector() contact_indices[:] = [1, 2, 3, 6, 7, 11, 12] metis_partitioning_process = MetisContactPartitioningProcess(model_part, gid_io, number_of_partitions, contact_indices) print("line 62") print(GetRank(), ": i am : ... ") import time time.sleep(2.0) metis_partitioning_process.Execute() print("line 65") print(GetRank(), ": metis_partitioning_process finised") print(model_part) gid_io.InitializeMesh(GetRank()) gid_io.WriteMesh(model_part.GetMesh()) gid_io.FinalizeMesh() gid_io.InitializeResults(GetRank(), model_part.GetMesh()) gid_io.WriteNodalResults(PARTITION_INDEX, model_part.Nodes, 0, 0) gid_io.FinalizeResults() # # command line: # mpirun -np 2 /usr/bin/mpipython square_domain.py # where 2 is the mumber of threads #
28.978723
134
0.805433
79576b349fac5a6d78fa102c6565bafdc32e59e3
31,244
py
Python
python/ct/crypto/cert_test.py
kucske/certificate-transparency
fb5b4503f7e3ddf6721fed60cc8b019303742cd0
[ "Apache-2.0" ]
807
2015-01-03T09:12:09.000Z
2022-03-31T14:59:13.000Z
python/ct/crypto/cert_test.py
kucske/certificate-transparency
fb5b4503f7e3ddf6721fed60cc8b019303742cd0
[ "Apache-2.0" ]
845
2015-01-02T02:03:18.000Z
2022-03-16T09:12:55.000Z
python/ct/crypto/cert_test.py
kucske/certificate-transparency
fb5b4503f7e3ddf6721fed60cc8b019303742cd0
[ "Apache-2.0" ]
313
2015-01-08T07:50:30.000Z
2022-03-03T14:25:46.000Z
#!/usr/bin/env python # coding=utf-8 import unittest import time from ct.crypto import cert from ct.crypto import error from ct.crypto.asn1 import oid from ct.crypto.asn1 import x509_common from ct.crypto.asn1 import x509_extension as x509_ext from ct.crypto.asn1 import x509_name from ct.crypto.asn1 import x509 from ct.test import test_config class CertificateTest(unittest.TestCase): _PEM_FILE = "google_cert.pem" # Contains 3 certificates # C=US/ST=California/L=Mountain View/O=Google Inc/CN=www.google.com # C=US/O=Google Inc/CN=Google Internet Authority # C=US/O=Equifax/OU=Equifax Secure Certificate Authority _PEM_CHAIN_FILE = "google_chain.pem" _DER_FILE = "google_cert.der" # An X509v1 certificate _V1_PEM_FILE = "v1_cert.pem" # A old but common (0.5% of all certs as of 2013-10-01) SSL # cert that uses a different or older DER format for Boolean # values. _PEM_MATRIXSSL = "matrixssl_sample.pem" # Self-signed cert by marchnetworks.com for embedded systems # and uses start date in form of "0001010000Z" (no seconds) _PEM_MARCHNETWORKS = "marchnetworks_com.pem" # Self-signed cert by subrigo.net for embedded systems # and uses a start date in the form of 121214093107+0000 _PEM_SUBRIGONET = "subrigo_net.pem" # Self-signed cert by promise.com (as of 2013-10-16) that # is in use by embedded systems. # # * has a start date in the format of 120703092726-1200 # * uses a 512-key RSA key _PEM_PROMISECOM = "promise_com.pem" # This self-signed cert was used to test proper (or # improper) handling of UTF-8 characters in CN # See CVE 2009-2408 for more details # # Mozilla bug480509 # https://bugzilla.mozilla.org/show_bug.cgi?id=480509 # Mozilla bug484111 # https://bugzilla.mozilla.org/show_bug.cgi?id=484111 # RedHat bug510251 # https://bugzilla.redhat.com/show_bug.cgi?id=510251 _PEM_CN_UTF8 = "cn_utf8.pem" # A self-signed cert with null characters in various names # Misparsing was involved in CVE 2009-2408 (above) and # CVE-2013-4248 _PEM_NULL_CHARS = "null_chars.pem" # A certificate with a negative serial number, and, for more fun, # an extra leading ff-octet therein. _PEM_NEGATIVE_SERIAL = "negative_serial.pem" # A certificate with an ECDSA key and signature. _PEM_ECDSA = "ecdsa_cert.pem" # A certificate with multiple EKU extensions. _PEM_MULTIPLE_EKU = "multiple_eku.pem" # A certificate with multiple "interesting" SANs. _PEM_MULTIPLE_AN = "multiple_an.pem" # A certificate with multiple CN attributes. _PEM_MULTIPLE_CN = "multiple_cn.pem" # A certificate with authority cert issuer and authority cert serial. _PEM_AKID = "authority_keyid.pem" # A certificate chain with an EV policy. _PEM_EV_CHAIN = "ev_chain.pem" # EV OID for VeriSign Class 3 Public Primary Certification Authority _EV_POLICY_OID = oid.ObjectIdentifier(value="2.16.840.1.113733.1.7.23.6") _PEM_MULTIPLE_POLICIES = "multiple_policies.pem" # A certificate with a UserNotice containing a VisibleString. _PEM_USER_NOTICE = "user_notice.pem" # A certificate with an invalid (8-byte) IP address in a SAN. _PEM_INVALID_IP = "invalid_ip.pem" # A certificate with both kinds of AIA information. _PEM_AIA = "aia.pem" # A certificate with ASN1 indefinite length encoding. _PEM_INDEFINITE_LENGTH = "asn1_indefinite_length_encoding.pem" # A certificate with 99991231235959Z expiration date _PEM_NOT_WELL_DEFINED_EXPIRATION = "expiration_not_well_defined.pem" # A certificate with street address, postal code etc. provided _PEM_WITH_ADDRESS = "cert_with_address.pem" @property def pem_file(self): return test_config.get_test_file_path(self._PEM_FILE) def get_file(self, filename): return test_config.get_test_file_path(filename) def cert_from_pem_file(self, filename, strict=True): return cert.Certificate.from_pem_file( self.get_file(filename), strict_der=strict) def test_from_pem_file(self): c = self.cert_from_pem_file(self._PEM_FILE) self.assertTrue(isinstance(c, cert.Certificate)) def test_certs_from_pem_file(self): certs = list(cert.certs_from_pem_file(self.get_file( self._PEM_CHAIN_FILE))) self.assertEqual(3, len(certs)) self.assertTrue(all(map(lambda x: isinstance(x, cert.Certificate), certs))) self.assertTrue("google.com" in certs[0].print_subject_name()) self.assertTrue("Google Inc" in certs[1].print_subject_name()) self.assertTrue("Equifax" in certs[2].print_subject_name()) def test_from_pem(self): with open(self.get_file(self._PEM_FILE)) as f: c = cert.Certificate.from_pem(f.read()) self.assertTrue(isinstance(c, cert.Certificate)) def test_to_pem(self): with open(self.get_file(self._PEM_FILE)) as f: c = cert.Certificate.from_pem(f.read()) # PEM files can and do contain arbitrary additional information, # so we can't assert equality with the original contents. # Instead, simply check that we can read the newly constructed PEM. new_pem = c.to_pem() c2 = cert.Certificate.from_pem(new_pem) self.assertTrue(c2.is_identical_to(c)) def test_all_from_pem(self): with open(self.get_file(self._PEM_CHAIN_FILE)) as f: certs = list(cert.certs_from_pem(f.read())) self.assertEqual(3, len(certs)) self.assertTrue(all(map(lambda x: isinstance(x, cert.Certificate), certs))) self.assertTrue("google.com" in certs[0].print_subject_name()) self.assertTrue("Google Inc" in certs[1].print_subject_name()) self.assertTrue("Equifax" in certs[2].print_subject_name()) def test_from_der_file(self): c = cert.Certificate.from_der_file(self.get_file(self._DER_FILE)) self.assertTrue(isinstance(c, cert.Certificate)) def test_from_der(self): with open(self.get_file(self._DER_FILE), "rb") as f: cert_der = f.read() c = cert.Certificate.from_der(cert_der) self.assertTrue(isinstance(c, cert.Certificate)) self.assertEqual(c.to_der(), cert_der) def test_invalid_encoding_raises(self): self.assertRaises(error.EncodingError, cert.Certificate.from_der, "bogus_der_string") self.assertRaises(error.EncodingError, cert.Certificate.from_pem, "bogus_pem_string") def test_to_der(self): with open(self.get_file(self._DER_FILE), "rb") as f: der_string = f.read() c = cert.Certificate(der_string) self.assertEqual(der_string, c.to_der()) def test_identical_to_self(self): c = self.cert_from_pem_file(self._PEM_FILE) self.assertTrue(c.is_identical_to(c)) self.assertEqual(c, c) def test_identical(self): c = self.cert_from_pem_file(self._PEM_FILE) c2 = self.cert_from_pem_file(self._PEM_FILE) self.assertTrue(c.is_identical_to(c2)) self.assertTrue(c2.is_identical_to(c)) self.assertEqual(c2, c) def test_not_identical(self): c = self.cert_from_pem_file(self._PEM_FILE) c2 = self.cert_from_pem_file(self._V1_PEM_FILE) self.assertFalse(c2.is_identical_to(c)) self.assertNotEqual(c2, c) self.assertNotEqual(c2, "foo") def test_hash(self): c = self.cert_from_pem_file(self._PEM_FILE) c2 = self.cert_from_pem_file(self._PEM_FILE) self.assertEqual(hash(c), hash(c)) self.assertEqual(hash(c), hash(c2)) def test_parse_matrixssl(self): """Test parsing of old MatrixSSL.org sample certificate As of 2013-10-01, about 0.5% of all SSL sites use an old sample certificate from MatrixSSL.org. It appears it's used mostly for various home routers. Unfortunately it uses a non-DER encoding for boolean value: the DER encoding of True is 0xFF but this cert uses a BER encoding of 0x01. This causes pure DER parsers to break. This test makes sure we can parse this cert without exceptions or errors. """ self.assertRaises(error.ASN1Error, self.cert_from_pem_file, self._PEM_MATRIXSSL) c = self.cert_from_pem_file(self._PEM_MATRIXSSL, strict=False) issuer = c.print_issuer_name() self.assertTrue("MatrixSSL Sample Server" in issuer) def test_parse_marchnetworks(self): """Test parsing certificates issued by marchnetworks.com.""" c = self.cert_from_pem_file(self._PEM_MARCHNETWORKS) issuer = c.print_issuer_name() self.assertTrue("March Networks" in issuer) # 0001010000Z expected = [2000, 1, 1, 0, 0, 0, 5, 1, 0] self.assertEqual(list(c.not_before()), expected) # 3001010000Z expected = [2030, 1, 1, 0, 0, 0, 1, 1, 0] self.assertEqual(list(c.not_after()), expected) def test_parse_subrigonet(self): """Test parsing certificates issued by subrigo.net The certificates issued by subrigo.net (non-root) use an start date with time zone. Not Before: Dec 14 09:31:07 2012 Not After : Dec 13 09:31:07 2022 GMT """ c = self.cert_from_pem_file(self._PEM_SUBRIGONET) issuer = c.print_issuer_name() self.assertTrue("subrigo.net" in issuer) # timezone format -- 121214093107+0000 expected = [2012, 12, 14, 9, 31, 7, 4, 349, 0] self.assertEqual(list(c.not_before()), expected) # standard format -- 221213093107Z expected = [2022, 12, 13, 9, 31, 7, 1, 347, 0] self.assertEqual(list(c.not_after()), expected) def test_utf8_names(self): c = self.cert_from_pem_file(self._PEM_CN_UTF8) nameutf8 = "ñeco ñýáěšžěšžřěčíě+ščýáíéřáíÚ" unicodename = u"ñeco ñýáěšžěšžřěčíě+ščýáíéřáíÚ" # Compare UTF-8 strings directly. self.assertEqual(c.print_subject_name(), "CN=" + nameutf8) self.assertEqual(c.print_issuer_name(), "CN=" + nameutf8) cns = c.subject_common_names() self.assertEqual(1, len(cns)) self.assertEqual(cns[0], nameutf8) # Name comparison is unicode-based so decode and compare unicode names. # TODO(ekasper): implement proper stringprep-based name comparison # and use these test cases there. self.assertEqual(cns[0].value.decode("utf8"), unicodename) def test_null_chars_in_names(self): """Test handling null chars in subject and subject alternative names.""" c = self.cert_from_pem_file(self._PEM_NULL_CHARS) cns = c.subject_common_names() self.assertEqual(1, len(cns)) self.assertEqual("null.python.org\000example.org", cns[0]) alt_names = c.subject_alternative_names() self.assertEqual(len(alt_names), 5) self.assertEqual(alt_names[0].component_key(), x509_name.DNS_NAME) self.assertEqual(alt_names[0].component_value(), "altnull.python.org\000example.com") self.assertEqual(alt_names[1].component_key(), x509_name.RFC822_NAME) self.assertEqual(alt_names[1].component_value(), "null@python.org\000user@example.org") self.assertEqual(alt_names[2].component_key(),x509_name.URI_NAME) self.assertEqual(alt_names[2].component_value(), "http://null.python.org\000http://example.org") # the following does not contain nulls. self.assertEqual(alt_names[3].component_key(), x509_name.IP_ADDRESS_NAME) self.assertEqual(alt_names[3].component_value().as_octets(), (192, 0, 2, 1)) self.assertEqual(alt_names[4].component_key(), x509_name.IP_ADDRESS_NAME) self.assertEqual(alt_names[4].component_value().as_octets(), (32, 1, 13, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)) def test_parse_promisecom(self): """Test parsing certificates issued by promise.com The certificates issued by promise.com (non-root) use an start date with time zone (and are 512-bit) Not Before: Jun 29 15:32:48 2011 Not After : Jun 26 15:32:48 2021 GMT """ c = self.cert_from_pem_file(self._PEM_PROMISECOM) issuer = c.print_issuer_name() self.assertTrue("Promise Technology Inc." in issuer) # 110629153248-1200 expected = [2011,6,29,15,32,48,2,180,0] self.assertEqual(list(c.not_before()), expected) # 210626153248Z expected = [2021,6,26,15,32,48,5,177,0] self.assertEqual(list(c.not_after()), expected) def test_parse_ecdsa_cert(self): c = self.cert_from_pem_file(self._PEM_ECDSA) self.assertTrue("kmonos.jp" in c.print_subject_name()) self.assertEquals(oid.ECDSA_WITH_SHA256, c.signature()["algorithm"]) self.assertEquals(oid.ECDSA_WITH_SHA256, c.signature_algorithm()["algorithm"]) def test_print_subject_name(self): c = self.cert_from_pem_file(self._PEM_FILE) subject = c.print_subject_name() # C=US, ST=California, L=Mountain View, O=Google Inc, CN=*.google.com self.assertTrue("US" in subject) self.assertTrue("California" in subject) self.assertTrue("Mountain View" in subject) self.assertTrue("Google Inc" in subject) self.assertTrue("*.google.com" in subject) def test_print_issuer_name(self): c = self.cert_from_pem_file(self._PEM_FILE) issuer = c.print_issuer_name() # Issuer: C=US, O=Google Inc, CN=Google Internet Authority self.assertTrue("US" in issuer) self.assertTrue("Google Inc" in issuer) self.assertTrue("Google Internet Authority" in issuer) def test_subject_common_names(self): c = self.cert_from_pem_file(self._PEM_FILE) cns = c.subject_common_names() self.assertEqual(1, len(cns)) self.assertEqual("*.google.com", cns[0]) def test_multiple_subject_common_names(self): c = self.cert_from_pem_file(self._PEM_MULTIPLE_CN) cns = c.subject_common_names() self.assertItemsEqual(cns, ["www.rd.io", "rdio.com", "rd.io", "api.rdio.com", "api.rd.io", "www.rdio.com"]) def test_subject_dns_names(self): c = self.cert_from_pem_file(self._PEM_FILE) dns_names = c.subject_dns_names() self.assertEqual(44, len(dns_names)) self.assertTrue("*.youtube.com" in dns_names) def test_subject_ip_addresses(self): c = self.cert_from_pem_file(self._PEM_MULTIPLE_AN) ips = c.subject_ip_addresses() self.assertEqual(1, len(ips)) self.assertEqual((129, 48, 105, 104), ips[0].as_octets()) def test_invalid_ip_addresses(self): with self.assertRaises(error.ASN1Error) as fail: self.cert_from_pem_file(self._PEM_INVALID_IP) self.assertIn("00000000ffffff00", str(fail.exception)) c = self.cert_from_pem_file(self._PEM_INVALID_IP, strict=False) ips = c.subject_ip_addresses() self.assertEqual(1, len(ips)) self.assertEqual((0, 0, 0, 0, 255, 255, 255, 0), ips[0].as_octets()) def test_subject_alternative_names(self): cert = self.cert_from_pem_file(self._PEM_MULTIPLE_AN) sans = cert.subject_alternative_names() self.assertEqual(4, len(sans)) self.assertEqual(x509_name.DNS_NAME, sans[0].component_key()) self.assertEqual("spires.wpafb.af.mil", sans[0].component_value()) self.assertEqual(x509_name.DIRECTORY_NAME, sans[1].component_key()) self.assertTrue(isinstance(sans[1].component_value(), x509_name.Name), sans[1].component_value()) self.assertEqual(x509_name.IP_ADDRESS_NAME, sans[2].component_key()) self.assertEqual((129, 48, 105, 104), sans[2].component_value().as_octets()) self.assertEqual(x509_name.URI_NAME, sans[3].component_key()) self.assertEqual("spires.wpafb.af.mil", sans[3].component_value()) def test_no_alternative_names(self): c = cert.Certificate.from_pem_file(self.get_file(self._V1_PEM_FILE)) self.assertEqual(0, len(c.subject_alternative_names())) self.assertEqual(0, len(c.subject_dns_names())) self.assertEqual(0, len(c.subject_ip_addresses())) def test_validity(self): certs = list(cert.certs_from_pem_file( self.get_file(self._PEM_CHAIN_FILE))) self.assertEqual(3, len(certs)) # notBefore: Sat Aug 22 16:41:51 1998 GMT # notAfter: Wed Aug 22 16:41:51 2018 GMT c = certs[2] # Aug 22 16:41:51 2018 self.assertTrue(c.is_temporally_valid_at(time.gmtime(1534956111))) # Aug 22 16:41:52 2018 self.assertFalse(c.is_temporally_valid_at(time.gmtime(1534956112))) # Aug 22 16:41:50 1998 self.assertFalse(c.is_temporally_valid_at(time.gmtime(903804110))) # Aug 22 16:41:51 1998 self.assertTrue(c.is_temporally_valid_at(time.gmtime(903804111))) def test_basic_constraints(self): certs = list(cert.certs_from_pem_file( self.get_file(self._PEM_CHAIN_FILE))) self.assertFalse(certs[0].basic_constraint_ca()) self.assertTrue(certs[1].basic_constraint_ca()) self.assertIsNone(certs[0].basic_constraint_path_length()) self.assertEqual(0, certs[1].basic_constraint_path_length()) def test_version(self): c = self.cert_from_pem_file(self._PEM_FILE) self.assertEqual(3, c.version()) def test_issuer_common_name(self): c = self.cert_from_pem_file(self._PEM_FILE) icn = c.issuer_common_name() self.assertIn("Google Internet Authority", icn[0].value) self.assertEqual(len(icn), 1) def test_issuer_country_name(self): c = self.cert_from_pem_file(self._PEM_FILE) icn = c.issuer_country_name() self.assertIn("US", icn) self.assertEqual(len(icn), 1) def test_subject_organization_name(self): c = self.cert_from_pem_file(self._PEM_FILE) icn = c.subject_organization_name() self.assertIn("Google Inc", icn) self.assertEqual(len(icn), 1) def test_subject_street_address(self): c = self.cert_from_pem_file(self._PEM_WITH_ADDRESS) address = c.subject_street_address() self.assertIn("CQ Mail Centre", address) self.assertIn("Building 19", address) def test_subject_locality_name(self): c = self.cert_from_pem_file(self._PEM_WITH_ADDRESS) locality_name = c.subject_locality_name() self.assertIn("Rockhampton", locality_name) def test_subject_state_or_province(self): c = self.cert_from_pem_file(self._PEM_WITH_ADDRESS) state_or_province = c.subject_state_or_province_name() self.assertIn("Queensland", state_or_province) def test_subject_postal_code(self): c = self.cert_from_pem_file(self._PEM_WITH_ADDRESS) postal_code = c.subject_postal_code() self.assertIn("4702", postal_code) def test_serial_number(self): c = self.cert_from_pem_file(self._PEM_FILE) self.assertEqual(454887626504608315115709, c.serial_number()) def test_negative_serial_number(self): # Fails because of the leading ff-octet. self.assertRaises(error.ASN1Error, self.cert_from_pem_file, self._PEM_NEGATIVE_SERIAL) c = self.cert_from_pem_file(self._PEM_NEGATIVE_SERIAL, strict=False) self.assertEqual(-218943125988803304701934765446014018, c.serial_number()) def test_v1_cert(self): c = self.cert_from_pem_file(self._V1_PEM_FILE) self.assertEqual(1, c.version()) self.assertIsNone(c.basic_constraint_ca()) def test_fingerprint(self): c = cert.Certificate.from_der_file(self.get_file(self._DER_FILE)) self.assertEqual(c.fingerprint().encode("hex"), "570fe2e3bfee986ed4a158aed8770f2e21614659") self.assertEqual(c.fingerprint("sha1").encode("hex"), "570fe2e3bfee986ed4a158aed8770f2e21614659") self.assertEqual(c.fingerprint("sha256").encode("hex"), "6d4106b4544e9e5e7a0924ee86a577ffefaadae8b8dad73413a7" "d874747a81d1") def test_key_usage(self): c = cert.Certificate.from_pem_file(self.get_file(self._PEM_FILE)) self.assertTrue(c.key_usage(x509_ext.KeyUsage.DIGITAL_SIGNATURE)) certs = [c for c in cert.certs_from_pem_file(self.get_file( self._PEM_CHAIN_FILE))] # This leaf cert does not have a KeyUsage extension. self.assertEqual([], certs[0].key_usages()) self.assertIsNone(certs[0].key_usage( x509_ext.KeyUsage.DIGITAL_SIGNATURE)) # The second cert has keyCertSign and cRLSign. self.assertIsNotNone(certs[1].key_usage( x509_ext.KeyUsage.DIGITAL_SIGNATURE)) self.assertFalse(certs[1].key_usage( x509_ext.KeyUsage.DIGITAL_SIGNATURE)) self.assertTrue(certs[1].key_usage(x509_ext.KeyUsage.KEY_CERT_SIGN)) self.assertTrue(certs[1].key_usage(x509_ext.KeyUsage.CRL_SIGN)) self.assertItemsEqual([x509_ext.KeyUsage.KEY_CERT_SIGN, x509_ext.KeyUsage.CRL_SIGN], certs[1].key_usages()) def test_extended_key_usage(self): certs = [c for c in cert.certs_from_pem_file(self.get_file( self._PEM_CHAIN_FILE))] self.assertTrue(certs[0].extended_key_usage(oid.ID_KP_SERVER_AUTH)) self.assertIsNotNone( certs[0].extended_key_usage(oid.ID_KP_CODE_SIGNING)) self.assertFalse(certs[0].extended_key_usage(oid.ID_KP_CODE_SIGNING)) self.assertItemsEqual([oid.ID_KP_SERVER_AUTH, oid.ID_KP_CLIENT_AUTH], certs[0].extended_key_usages()) # EKU is normally only found in leaf certs. self.assertIsNone(certs[1].extended_key_usage(oid.ID_KP_SERVER_AUTH)) self.assertEqual([], certs[1].extended_key_usages()) def test_multiple_extensions(self): self.assertRaises(error.ASN1Error, cert.Certificate.from_pem_file, self.get_file(self._PEM_MULTIPLE_EKU)) c = cert.Certificate.from_pem_file(self.get_file(self._PEM_MULTIPLE_EKU), strict_der=False) self.assertTrue("www.m-budget-mobile-abo.ch" in c.subject_common_names()) self.assertRaises(cert.CertificateError, c.extended_key_usages) def test_key_identifiers(self): certs = [c for c in cert.certs_from_pem_file(self.get_file( self._PEM_CHAIN_FILE))] self.assertEqual("\x12\x4a\x06\x24\x28\xc4\x18\xa5\x63\x0b\x41\x6e\x95" "\xbf\x72\xb5\x3e\x1b\x8e\x8f", certs[0].subject_key_identifier()) self.assertEqual("\xbf\xc0\x30\xeb\xf5\x43\x11\x3e\x67\xba\x9e\x91\xfb" "\xfc\x6a\xda\xe3\x6b\x12\x24", certs[0].authority_key_identifier()) self.assertIsNone(certs[0].authority_key_identifier( identifier_type=x509_ext.AUTHORITY_CERT_ISSUER)) self.assertIsNone(certs[0].authority_key_identifier( identifier_type=x509_ext.AUTHORITY_CERT_SERIAL_NUMBER)) self.assertEqual(certs[0].authority_key_identifier(), certs[1].subject_key_identifier()) c = self.cert_from_pem_file(self._PEM_AKID) cert_issuers = c.authority_key_identifier( identifier_type=x509_ext.AUTHORITY_CERT_ISSUER) self.assertEqual(1, len(cert_issuers)) # A DirectoryName. cert_issuer = cert_issuers[0] self.assertEqual(x509_name.DIRECTORY_NAME, cert_issuer.component_key()) self.assertEqual(["KISA RootCA 1"], cert_issuer.component_value().attributes( oid.ID_AT_COMMON_NAME)) self.assertEqual(10119, c.authority_key_identifier( identifier_type=x509_ext.AUTHORITY_CERT_SERIAL_NUMBER)) def test_policies(self): certs = [c for c in cert.certs_from_pem_file(self.get_file( self._PEM_EV_CHAIN))] ev_cert = certs[0] policies = ev_cert.policies() self.assertEqual(1, len(policies)) self.assertTrue(ev_cert.has_policy(self._EV_POLICY_OID)) self.assertFalse(ev_cert.has_policy(oid.ANY_POLICY)) policy = ev_cert.policy(self._EV_POLICY_OID) qualifiers = policy[x509_ext.POLICY_QUALIFIERS] self.assertEqual(1, len(qualifiers)) qualifier = qualifiers[0] self.assertEqual(oid.ID_QT_CPS, qualifier[x509_ext.POLICY_QUALIFIER_ID]) # CPS location is an Any(IA5String). self.assertEqual("https://www.verisign.com/cps", qualifier[x509_ext.QUALIFIER].decoded_value) any_cert = certs[1] policies = any_cert.policies() self.assertEqual(1, len(policies)) self.assertFalse(any_cert.has_policy(self._EV_POLICY_OID)) self.assertTrue(any_cert.has_policy(oid.ANY_POLICY)) policy = ev_cert.policy(self._EV_POLICY_OID) qualifiers = policy[x509_ext.POLICY_QUALIFIERS] self.assertEqual(1, len(qualifiers)) qualifier = qualifiers[0] self.assertEqual(oid.ID_QT_CPS, qualifier[x509_ext.POLICY_QUALIFIER_ID]) # CPS location is an IA5String. self.assertEqual("https://www.verisign.com/cps", qualifier[x509_ext.QUALIFIER].decoded_value) no_policy_cert = certs[2] self.assertEqual(0, len(no_policy_cert.policies())) self.assertFalse(no_policy_cert.has_policy(self._EV_POLICY_OID)) self.assertFalse(no_policy_cert.has_policy(oid.ANY_POLICY)) def test_multiple_policies(self): c = self.cert_from_pem_file(self._PEM_MULTIPLE_POLICIES) policies = c.policies() self.assertEqual(2, len(policies)) self.assertTrue(c.has_policy(oid.ObjectIdentifier( value="1.3.6.1.4.1.6449.1.2.2.7"))) self.assertTrue(c.has_policy(oid.ObjectIdentifier( value="2.23.140.1.2.1"))) self.assertFalse(c.has_policy(oid.ANY_POLICY)) def test_user_notice(self): c = self.cert_from_pem_file(self._PEM_USER_NOTICE) policies = c.policies() self.assertEqual(1, len(policies)) qualifiers = policies[0][x509_ext.POLICY_QUALIFIERS] self.assertEqual(2, len(qualifiers)) qualifier = qualifiers[0] self.assertEqual(oid.ID_QT_UNOTICE, qualifier[x509_ext.POLICY_QUALIFIER_ID]) qualifier = qualifier[x509_ext.QUALIFIER].decoded_value self.assertIsNone(qualifier[x509_ext.NOTICE_REF]) expected_text = ("For more details, please visit our website " "https://www.cybertrust.ne.jp .") explicit_text = qualifier[x509_ext.EXPLICIT_TEXT].component_value() self.assertEqual(expected_text, explicit_text) def test_crl_distribution_points(self): c = self.cert_from_pem_file(self._PEM_FILE) crls = c.crl_distribution_points() self.assertEqual(1, len(crls)) crl = crls[0] # Optional components, not present. self.assertIsNone(crl[x509_ext.REASONS]) self.assertIsNone(crl[x509_ext.CRL_ISSUER]) # This is the prevalent form of CRL distribution points. dist_points = crl[x509_ext.DISTRIBUTION_POINT] self.assertEqual(x509_ext.FULL_NAME, dist_points.component_key()) self.assertEqual(1, len(dist_points.component_value())) # A GeneralName URI. dist_point = dist_points.component_value()[0] self.assertEqual("http://www.gstatic.com/GoogleInternetAuthority/" "GoogleInternetAuthority.crl", dist_point[x509_name.URI_NAME]) def test_aia(self): c = self.cert_from_pem_file(self._PEM_AIA) ca_issuers = c.ca_issuers() self.assertEqual(1, len(ca_issuers)) # A GeneralName URI. self.assertEqual("http://pki.google.com/GIAG2.crt", ca_issuers[0][x509_name.URI_NAME]) ocsp = c.ocsp_responders() self.assertEqual(1, len(ocsp)) self.assertEqual("http://clients1.google.com/ocsp", ocsp[0][x509_name.URI_NAME]) # Cert has CA issuers but no OCSP responders. c = self.cert_from_pem_file(self._PEM_FILE) self.assertItemsEqual([], c.ocsp_responders()) def test_is_self_signed_root(self): c = self.cert_from_pem_file(self._PEM_SUBRIGONET) self.assertTrue(c.is_self_signed()) def test_is_self_signed_leaf(self): c = self.cert_from_pem_file(self._PEM_AIA) self.assertFalse(c.is_self_signed()) def test_get_extensions(self): c = self.cert_from_pem_file(self._PEM_AIA) extensions = c.get_extensions() extensions_oids = [extension['extnID'] for extension in extensions] self.assertItemsEqual((oid.ID_CE_EXT_KEY_USAGE, oid.ID_CE_SUBJECT_ALT_NAME, oid.ID_PE_AUTHORITY_INFO_ACCESS, oid.ID_CE_SUBJECT_KEY_IDENTIFIER, oid.ID_CE_BASIC_CONSTRAINTS, oid.ID_CE_AUTHORITY_KEY_IDENTIFIER, oid.ID_CE_CERTIFICATE_POLICIES, oid.ID_CE_CRL_DISTRIBUTION_POINTS), extensions_oids) def test_tbscertificate(self): c = self.cert_from_pem_file(self._PEM_FILE) tbs = c.tbscertificate() self.assertTrue(isinstance(tbs, x509.TBSCertificate)) self.assertEqual( x509_common.CertificateSerialNumber(454887626504608315115709L), tbs["serialNumber"]) def test_indefinite_encoding(self): self.assertRaises(error.ASN1Error, self.cert_from_pem_file, self._PEM_INDEFINITE_LENGTH) c = self.cert_from_pem_file(self._PEM_INDEFINITE_LENGTH, strict=False) issuer = c.print_issuer_name() self.assertTrue("VeriSign Class 1 CA" in issuer) def test_expiration_not_well_defined(self): c = self.cert_from_pem_file(self._PEM_NOT_WELL_DEFINED_EXPIRATION) self.assertFalse(c.is_not_after_well_defined()) # Make sure that certificate with regular expiration date return true c = self.cert_from_pem_file(self._PEM_AIA) self.assertTrue(c.is_not_after_well_defined()) if __name__ == "__main__": unittest.main()
41.714286
81
0.659359
79576b40e0bbf99151a5592bece19f00141f8552
21,643
py
Python
skbio/stats/tests/test_power.py
ebolyen/scikit-bio
04dff688aa67de871e7c4b1c47f459d0f701b4d2
[ "BSD-3-Clause" ]
null
null
null
skbio/stats/tests/test_power.py
ebolyen/scikit-bio
04dff688aa67de871e7c4b1c47f459d0f701b4d2
[ "BSD-3-Clause" ]
null
null
null
skbio/stats/tests/test_power.py
ebolyen/scikit-bio
04dff688aa67de871e7c4b1c47f459d0f701b4d2
[ "BSD-3-Clause" ]
null
null
null
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function from unittest import TestCase, main import numpy as np import numpy.testing as npt import pandas as pd from scipy.stats import kruskal from skbio.stats.power import (subsample_power, subsample_paired_power, _check_nans, confidence_bound, _calculate_power, _compare_distributions, _calculate_power_curve, _check_subsample_power_inputs, _identify_sample_groups, _draw_paired_samples, _get_min_size, paired_subsamples ) class PowerAnalysisTest(TestCase): def setUp(self): # Defines a testing functions def test_meta(ids, meta, cat, div): """Checks thhe div metric with a kruskal wallis""" out = [meta.loc[id_, div] for id_ in ids] return kruskal(*out)[1] def meta_f(x): """Applies `test_meta` to a result""" return test_meta(x, self.meta, 'INT', 'DIV') def f(x): """returns the p value of a kruskal wallis test""" return kruskal(*x)[1] self.test_meta = test_meta self.f = f self.meta_f = meta_f self.num_p = 1 # Sets the random seed np.random.seed(5) # Sets up the distributions of data for use self.s1 = np.arange(0, 10, 1) # Sets up two distributions which will never be equal by a rank-sum # test. self.samps = [np.ones((10))/10., np.ones((10))] self.pop = [np.arange(0, 10, 0.1), np.arange(0, 20, 0.2)] # Sets up a vector of alpha values self.alpha = np.power(10, np.array([-1, -1.301, -2, -3])).round(3) # Sets up a vector of samples self.num_samps = np.arange(10, 100, 10) # Sets up a mapping file meta = {'GW': {'INT': 'N', 'ABX': np.nan, 'DIV': 19.5, 'AGE': '30s', 'SEX': 'M'}, 'CB': {'INT': 'Y', 'ABX': np.nan, 'DIV': 42.7, 'AGE': '30s', 'SEX': 'M'}, 'WM': {'INT': 'N', 'ABX': 'N', 'DIV': 27.5, 'AGE': '20s', 'SEX': 'F'}, 'MH': {'INT': 'Y', 'ABX': 'N', 'DIV': 62.3, 'AGE': '30s', 'SEX': 'F'}, 'CD': {'INT': 'Y', 'ABX': 'Y', 'DIV': 36.4, 'AGE': '40s', 'SEX': 'F'}, 'LF': {'INT': 'Y', 'ABX': 'N', 'DIV': 50.2, 'AGE': '20s', 'SEX': 'M'}, 'PP': {'INT': 'N', 'ABX': 'Y', 'DIV': 10.8, 'AGE': '30s', 'SEX': 'F'}, 'MM': {'INT': 'N', 'ABX': 'N', 'DIV': 55.6, 'AGE': '40s', 'SEX': 'F'}, 'SR': {'INT': 'N', 'ABX': 'Y', 'DIV': 2.2, 'AGE': '20s', 'SEX': 'M'}, 'TS': {'INT': 'N', 'ABX': 'Y', 'DIV': 16.1, 'AGE': '40s', 'SEX': 'M'}, 'PC': {'INT': 'Y', 'ABX': 'N', 'DIV': 82.6, 'AGE': '40s', 'SEX': 'M'}, 'NR': {'INT': 'Y', 'ABX': 'Y', 'DIV': 15.7, 'AGE': '20s', 'SEX': 'F'}} self.meta = pd.DataFrame.from_dict(meta, orient='index') self.meta_pairs = {0: [['GW', 'SR', 'TS'], ['CB', 'LF', 'PC']], 1: [['MM', 'PP', 'WM'], ['CD', 'MH', 'NR']]} self.pair_index = np.array([0, 0, 0, 1, 1, 1]) self.counts = np.array([5, 15, 25, 35, 45]) self.powers = [np.array([[0.105, 0.137, 0.174, 0.208, 0.280], [0.115, 0.135, 0.196, 0.204, 0.281], [0.096, 0.170, 0.165, 0.232, 0.256], [0.122, 0.157, 0.202, 0.250, 0.279], [0.132, 0.135, 0.173, 0.203, 0.279]]), np.array([[0.157, 0.345, 0.522, 0.639, 0.739], [0.159, 0.374, 0.519, 0.646, 0.757], [0.161, 0.339, 0.532, 0.634, 0.745], [0.169, 0.372, 0.541, 0.646, 0.762], [0.163, 0.371, 0.522, 0.648, 0.746]]), np.array([[0.276, 0.626, 0.865, 0.927, 0.992], [0.267, 0.667, 0.848, 0.937, 0.978], [0.236, 0.642, 0.850, 0.935, 0.977], [0.249, 0.633, 0.828, 0.955, 0.986], [0.249, 0.663, 0.869, 0.951, 0.985]])] self.power_alpha = 0.1 self.effects = np.array([0.15245, 0.34877, 0.55830]) self.bounds = np.array([0.01049, 0.00299, 0.007492]) self.labels = np.array(['Age', 'Intervenption', 'Antibiotics']) self.cats = np.array(['AGE', 'INT', 'ABX']) self.cat = "AGE" self.control_cats = ['INT', 'ABX'] def test_subsample_power_defaults(self): test_p, test_c = subsample_power(self.f, self.pop, num_iter=10, num_runs=5) self.assertEqual(test_p.shape, (5, 4)) npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c) def test_subsample_power_counts(self): test_p, test_c = subsample_power(self.f, samples=self.pop, num_iter=10, num_runs=2, min_counts=5) self.assertEqual(test_p.shape, (2, 5)) npt.assert_array_equal(np.arange(5, 50, 10), test_c) def test_subsample_power_matches(self): test_p, test_c = subsample_power(self.f, samples=self.pop, num_iter=10, num_runs=5, draw_mode="matched") self.assertEqual(test_p.shape, (5, 4)) npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c) def test_subsample_power_multi_p(self): test_p, test_c = subsample_power(lambda x: np.array([0.5, 0.5]), samples=self.pop, num_iter=10, num_runs=5) self.assertEqual(test_p.shape, (5, 4, 2)) npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c) def test_subsample_paired_power(self): known_c = np.array([1, 2, 3, 4]) # Sets up the handling values cat = 'INT' control_cats = ['SEX'] # Tests for the control cats test_p, test_c = subsample_paired_power(self.meta_f, meta=self.meta, cat=cat, control_cats=control_cats, counts_interval=1, num_iter=10, num_runs=2) # Test the output shapes are sane self.assertEqual(test_p.shape, (2, 4)) npt.assert_array_equal(known_c, test_c) def test_subsample_paired_power_multi_p(self): def f(x): return np.array([0.5, 0.5, 0.005]) cat = 'INT' control_cats = ['SEX'] # Tests for the control cats test_p, test_c = subsample_paired_power(f, meta=self.meta, cat=cat, control_cats=control_cats, counts_interval=1, num_iter=10, num_runs=2) self.assertEqual(test_p.shape, (2, 4, 3)) def test_check_nans_str(self): self.assertTrue(_check_nans('string')) def test_check_nans_num(self): self.assertTrue(_check_nans(4.2)) def test__check_nans_nan(self): self.assertFalse(_check_nans(np.nan)) def test__check_nans_clean_list(self): self.assertTrue(_check_nans(['foo', 'bar'], switch=True)) def test__check_nans_list_nan(self): self.assertFalse(_check_nans(['foo', np.nan], switch=True)) def test__check_str_error(self): with self.assertRaises(TypeError): _check_nans(self.f) def test__get_min_size_strict(self): known = 5 test = _get_min_size(self.meta, 'INT', ['ABX', 'SEX'], ['Y', 'N'], True) self.assertEqual(test, known) def test__get_min_size_relaxed(self): known = 5 test = _get_min_size(self.meta, 'INT', ['ABX', 'SEX'], ['Y', 'N'], False) self.assertEqual(known, test) def test_confidence_bound_default(self): # Sets the know confidence bound known = 2.2830070 test = confidence_bound(self.s1) npt.assert_almost_equal(test, known, 3) def test_confidence_bound_df(self): known = 2.15109 test = confidence_bound(self.s1, df=15) npt.assert_almost_equal(known, test, 3) def test_confidence_bound_alpha(self): known = 3.2797886 test = confidence_bound(self.s1, alpha=0.01) npt.assert_almost_equal(known, test, 3) def test_confidence_bound_nan(self): # Sets the value to test samples = np.array([[4, 3.2, 3.05], [2, 2.8, 2.95], [5, 2.9, 3.07], [1, 3.1, 2.93], [3, np.nan, 3.00]]) # Sets the know value known = np.array([2.2284, 0.2573, 0.08573]) # Tests the function test = confidence_bound(samples, axis=0) npt.assert_almost_equal(known, test, 3) def test_confidence_bound_axis_none(self): # Sets the value to test samples = np.array([[4, 3.2, 3.05], [2, 2.8, 2.95], [5, 2.9, 3.07], [1, 3.1, 2.93], [3, np.nan, 3.00]]) # Sest the known value known = 0.52852 # Tests the output test = confidence_bound(samples, axis=None) npt.assert_almost_equal(known, test, 3) def test__calculate_power(self): # Sets up the values to test crit = 0.025 # Sets the known value known = 0.5 # Calculates the test value test = _calculate_power(self.alpha, crit) # Checks the test value npt.assert_almost_equal(known, test) def test__calculate_power_n(self): crit = 0.025 known = np.array([0.5, 0.5]) alpha = np.vstack((self.alpha, self.alpha)) test = _calculate_power(alpha, crit) npt.assert_almost_equal(known, test) def test__compare_distributions_sample_counts_error(self): with self.assertRaises(ValueError): _compare_distributions(self.f, [self.pop[0][:5], self.pop[1]], 1, counts=25) def test__compare_distributions_all_mode(self): known = np.ones((100))*0.0026998 test = _compare_distributions(self.f, self.samps, 1, num_iter=100) npt.assert_allclose(known, test, 5) def test__compare_distributions_matched_mode(self): # Sets the known value known_mean = 0.162195 known_std = 0.121887 known_shape = (100,) # Tests the sample value test = _compare_distributions(self.f, self.pop, self.num_p, mode='matched', num_iter=100) npt.assert_allclose(known_mean, test.mean(), rtol=0.1, atol=0.02) npt.assert_allclose(known_std, test.std(), rtol=0.1, atol=0.02) self.assertEqual(known_shape, test.shape) def test__compare_distributions_draw_mode(self): draw_mode = 'Ultron' with self.assertRaises(ValueError): _check_subsample_power_inputs(self.f, self.pop, draw_mode, self.num_p) def test__compare_distributions_multiple_returns(self): known = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]) def f(x): return np.array([1, 2, 3]) test = _compare_distributions(f, self.pop, 3, mode='matched', num_iter=3) npt.assert_array_equal(known, test) def test_check_subsample_power_inputs_matched_mode(self): with self.assertRaises(ValueError): _check_subsample_power_inputs(self.f, samples=[np.ones((2)), np.ones((5))], draw_mode="matched") def test_check_subsample_power_inputs_counts(self): with self.assertRaises(ValueError): _check_subsample_power_inputs(self.f, samples=[np.ones((3)), np.ones((5))], min_counts=5, counts_interval=1000, max_counts=7) def test_check_subsample_power_inputs_ratio(self): with self.assertRaises(ValueError): _check_subsample_power_inputs(self.f, self.samps, ratio=np.array([1, 2, 3])) def test_check_subsample_power_inputs_test(self): # Defines a test function def test(x): return 'Hello World!' with self.assertRaises(TypeError): _check_subsample_power_inputs(test, self.samps) def test_check_sample_power_inputs(self): # Defines the know returns known_num_p = 1 known_ratio = np.ones((2)) known_counts = np.arange(2, 10, 2) # Runs the code for the returns test_ratio, test_num_p, test_counts = \ _check_subsample_power_inputs(self.f, self.samps, counts_interval=2, max_counts=10) # Checks the returns are sane self.assertEqual(known_num_p, test_num_p) npt.assert_array_equal(known_ratio, test_ratio) npt.assert_array_equal(known_counts, test_counts) def test__calculate_power_curve_ratio_error(self): with self.assertRaises(ValueError): _calculate_power_curve(self.f, self.pop, self.num_samps, ratio=np.array([0.1, 0.2, 0.3]), num_iter=100) def test__calculate_power_curve_default(self): # Sets the known output known = np.array([0.509, 0.822, 0.962, 0.997, 1.000, 1.000, 1.000, 1.000, 1.000]) # Generates the test values test = _calculate_power_curve(self.f, self.pop, self.num_samps, num_iter=100) # Checks the samples returned sanely npt.assert_allclose(test, known, rtol=0.1, atol=0.01) def test__calculate_power_curve_alpha(self): # Sets the know output known = np.array([0.31, 0.568, 0.842, 0.954, 0.995, 1.000, 1.000, 1.000, 1.000]) # Generates the test values test = _calculate_power_curve(self.f, self.pop, self.num_samps, alpha=0.01, num_iter=100) # Checks the samples returned sanely npt.assert_allclose(test, known, rtol=0.1, atol=0.1) def test__calculate_power_curve_ratio(self): # Sets the know output known = np.array([0.096, 0.333, 0.493, 0.743, 0.824, 0.937, 0.969, 0.996, 0.998]) # Generates the test values test = _calculate_power_curve(self.f, self.pop, self.num_samps, ratio=np.array([0.25, 0.75]), num_iter=100) # Checks the samples returned sanely npt.assert_allclose(test, known, rtol=0.1, atol=0.1) def test_paired_subsamples_default(self): # Sets the known np.array set known_array = [{'MM', 'SR', 'TS', 'GW', 'PP', 'WM'}, {'CD', 'LF', 'PC', 'CB', 'MH', 'NR'}] # Gets the test value cat = 'INT' control_cats = ['SEX', 'AGE'] test_array = paired_subsamples(self.meta, cat, control_cats) self.assertEqual(known_array[0], set(test_array[0])) self.assertEqual(known_array[1], set(test_array[1])) def test_paired_subsamples_break(self): # Sets known np.array set known_array = [np.array([]), np.array([])] # Gets the test value cat = 'ABX' control_cats = ['SEX', 'AGE', 'INT'] test_array = paired_subsamples(self.meta, cat, control_cats) npt.assert_array_equal(known_array, test_array) def test_paired_subsample_undefined(self): known_array = np.zeros((2, 0)) cat = 'INT' order = ['Y', 'N'] control_cats = ['AGE', 'ABX', 'SEX'] test_array = paired_subsamples(self.meta, cat, control_cats, order=order) npt.assert_array_equal(test_array, known_array) def test_paired_subsample_fewer(self): # Set known value known_array = {'PP', 'MH', 'CD', 'PC', 'TS', 'MM'} # Sets up test values cat = 'AGE' order = ['30s', '40s'] control_cats = ['ABX'] test_array = paired_subsamples(self.meta, cat, control_cats, order=order) for v in test_array[0]: self.assertTrue(v in known_array) for v in test_array[1]: self.assertTrue(v in known_array) def test_paired_subsamples_not_strict(self): known_array = [{'WM', 'MM', 'GW', 'SR', 'TS'}, {'LF', 'PC', 'CB', 'NR', 'CD'}] # Gets the test values cat = 'INT' control_cats = ['ABX', 'AGE'] test_array = paired_subsamples(self.meta, cat, control_cats, strict_match=False) self.assertEqual(set(test_array[0]), known_array[0]) self.assertEqual(set(test_array[1]), known_array[1]) def test__identify_sample_groups(self): # Defines the know values known_pairs = {0: [['MM'], ['CD']], 1: [['SR'], ['LF']], 2: [['TS'], ['PC']], 3: [['GW'], ['CB']], 4: [['PP'], ['MH']], 5: [['WM'], ['NR']]} known_index = np.array([0, 1, 2, 3, 4, 5]) test_pairs, test_index = _identify_sample_groups(self.meta, 'INT', ['SEX', 'AGE'], order=['N', 'Y'], strict_match=True) self.assertEqual(known_pairs.keys(), test_pairs.keys()) self.assertEqual(sorted(known_pairs.values()), sorted(test_pairs.values())) npt.assert_array_equal(known_index, test_index) def test__identify_sample_groups_not_strict(self): # Defines the know values known_pairs = {0: [['PP'], ['CD', 'NR']], 1: [['MM', 'WM'], ['MH']], 2: [['GW'], ['CB']]} known_index = np.array([0, 1, 2]) test_pairs, test_index = _identify_sample_groups(self.meta, 'INT', ['SEX', 'ABX'], order=['N', 'Y'], strict_match=False) self.assertEqual(known_pairs.keys(), test_pairs.keys()) self.assertEqual(sorted(known_pairs.values()), sorted(test_pairs.values())) npt.assert_array_equal(known_index, test_index) def test__draw_paired_samples(self): num_samps = 3 known_sets = [{'GW', 'SR', 'TS', 'MM', 'PP', 'WM'}, {'CB', 'LF', 'PC', 'CD', 'MH', 'NR'}] test_samps = _draw_paired_samples(self.meta_pairs, self.pair_index, num_samps) for i, t in enumerate(test_samps): self.assertTrue(set(t).issubset(known_sets[i])) if __name__ == '__main__': main()
42.604331
79
0.473548
79576c5146e9ea71349c0f1ce0ee1e71392f202e
7,321
py
Python
cvqa-r-nouns/dataset/automatic-dataset/sameQuestionDataset.py
andeeptoor/qpr-qe-datasets
4359af17e7df335abe38a18d046f94f9cef57277
[ "Apache-2.0" ]
null
null
null
cvqa-r-nouns/dataset/automatic-dataset/sameQuestionDataset.py
andeeptoor/qpr-qe-datasets
4359af17e7df335abe38a18d046f94f9cef57277
[ "Apache-2.0" ]
null
null
null
cvqa-r-nouns/dataset/automatic-dataset/sameQuestionDataset.py
andeeptoor/qpr-qe-datasets
4359af17e7df335abe38a18d046f94f9cef57277
[ "Apache-2.0" ]
2
2018-06-09T01:03:05.000Z
2021-02-05T11:45:06.000Z
import json import os import random import pandas as pd import time import sys import re import operator reload(sys) sys.setdefaultencoding('utf8') def readJson(filename): print "Reading [%s]..." % (filename) with open(filename) as inputFile: jsonData = json.load(inputFile) print "Finished reading [%s]." % (filename) return jsonData def stat(output): sys.stdout.write("\r" + output) sys.stdout.flush() def filter(questionRows): questionStats = {} counter = {} for i,questionRowIndex in enumerate(questionRows.keys()): questionRow = questionRows[questionRowIndex] question = questionRow['question'] if question not in questionStats: questionStats[question] = {'question':question,'relevant':0,'irrelevant':0} counter['question'] = 0 if questionRow['label'] == 1: questionStats[question]['irrelevant'] += 1 elif questionRow['label'] == 2: questionStats[question]['relevant'] += 1 df = pd.DataFrame(questionStats.values()) df['min'] = pd.concat([df['irrelevant'], df['relevant']], axis=1).min(axis=1) print 'Irrelevant: [%d]' % (len(df[df['irrelevant'] > 0])) print 'Relevant: [%d]' % (len(df[df['relevant'] > 0])) # print len(df[df['min'] > 40]) subset = df[df['min'] > 40] questionDf = pd.DataFrame(questionRows.values()) questionDf['selected'] = 0 for i,s in subset.iterrows(): minVal = s['min'] all = questionDf[questionDf['question'] == s['question']] questionDf.ix[all[all['label'] == 1].sample(minVal).index, 'selected'] = 1 questionDf.ix[all[all['label'] == 2].sample(minVal).index, 'selected'] = 1 print 'Not Selected: [%d]' % (len(questionDf[questionDf['selected'] == 0])) print 'Selected [%d]' % (len(questionDf[questionDf['selected'] == 1])) output = {} outputItems = questionDf[questionDf['selected'] == 1].T.to_dict().values() for i,o in enumerate(outputItems): output[i] = o return output baseFolder = '/sb-personal/cvqa/data/visual-genome/7-11-2016' outputFolder = '/sb-personal/cvqa/data/cvqa/' # list1 = ['dog','cat','horse','man','woman'] # list1 = ['boy','girl','car','bus','van','motorcycle','truck','flower','tree'] list1 = ['dog','cat','horse','man','woman','boy','girl','car','bus','van','motorcycle','truck','flower','tree'] # list2 = {'canine':['dog'], 'feline':['cat'], 'person':['man','woman','boy','girl'], 'automobile':['car','bus','taxi','jeep','van','minivan','ambulance','motorcycle','truck'], 'fauna':['shrub','flower','tree']} # list1 = ['dog','cat','horse','man','woman','boy','girl'] # numberOfObjects = len(objects) # outputFilePath = os.path.join(baseFolder,'generated-data/cvqa-sameQuestionDataset-subset-list3-test.csv') outputJsonFilePath = os.path.join(outputFolder,'cvqa-sameQuestionDataset-subset-list3-train.json') imageData = readJson(baseFolder + '/source-data/image_data.json') objects = readJson(baseFolder + '/source-data/objects.json') question_answers = readJson(baseFolder + '/source-data/question_answers.json') image_captions = readJson('/sb-personal/questionCaptionMatchModels/base-cvqa/data/imagecaptions.json') print 'Image data: [%d]' % (len(imageData)) print 'Objects [%d]' % (len(objects)) print 'Question Answers [%d]' % (len(question_answers)) tic = time.time() questionTemplates = {} for l in list1: questionTemplates[l] = {} questionToQuestionIndexes = {} print 'Gathering questions...' total = len(question_answers) for i,item in enumerate(question_answers): stat('[%d/%d]' % (i+1,total)) for q in item['qas']: words = re.split('[ ]+',q['question'].lower()[:-1]) question = ' '.join(words) if (question not in questionToQuestionIndexes): questionToQuestionIndexes[question] = [] questionToQuestionIndexes[question].append(i) for l in list1: if l in words: if question not in questionTemplates[l].keys(): questionTemplates[l][question] = 1 else: questionTemplates[l][question] += 1 print '' print 'Selecting questions...' selectedTemplates = {} for l in questionTemplates: selectedTemplates[l] = [] print 'Type: [%s]\tItems: [%d]' % (l,len(questionTemplates[l])) sortedTemplates = sorted(questionTemplates[l].items(), key=operator.itemgetter(1), reverse=True) for (k,v) in sortedTemplates[:10]: selectedTemplates[l].append(k) # print 'Reducing number of instances of selected questions...' # for q in questionToQuestionIndexes: # print q # indices = questionToQuestionIndexes[q] # print 'Original size: [%d]' % (len(indices)) # K = int(len(indices) * 0.10) # indices = [ indices[i] for i in sorted(random.sample(xrange(len(indices)), K))] # print 'New size: [%d]' % (len(indices)) # questionToQuestionIndexes[q] = indices questionCount = 0 imageToObjects = {} imageToQuestionObjects = {} selectedQuestions = [] print 'Selecting relevant questions...' for l in questionTemplates: print 'Type: [%s]\tItems: [%d]' % (l,len(questionTemplates[l])) sortedTemplates = sorted(questionTemplates[l].items(), key=operator.itemgetter(1), reverse=True) for (k,v) in sortedTemplates[:10]: print '\t[%d] Question:[%s]' % (v,k) questionCount += len(questionToQuestionIndexes[k]) for qId in questionToQuestionIndexes[k]: currentImageData = imageData[qId] url = os.path.basename(currentImageData['url']) if (url not in image_captions): # print '\t\tSkipping missing image caption [%s]' % (url) continue if url not in imageToObjects: imageToObjects[url] = {item:False for item in list1} imageToQuestionObjects[url] = {item:False for item in list1} for item in objects[qId]['objects']: for n in item['names']: if n in list1: imageToObjects[url][n] = True if not imageToQuestionObjects[url][l]: selectedQuestion = {} selectedQuestion['question'] = k selectedQuestion['image'] = url selectedQuestion['label'] = 2 selectedQuestion['wordLabels'] = ' '.join(['1' for w in k.split()]) selectedQuestion['word'] = l selectedQuestions.append(selectedQuestion) imageToQuestionObjects[url][l] = True print 'All Questions: [%d]' % (questionCount) print 'Selected Questions: [%d]' % (len(selectedQuestions)) print 'Images: [%d]' % (len(imageToObjects)) subsetCount = 2 print 'Selecting irrelevant questions...' for url in imageToQuestionObjects: flags = imageToQuestionObjects[url] possibleObjects = [o for o in flags if (not(flags[o]) and not(flags[o]))] subset = [possibleObjects[i] for i in sorted(random.sample(xrange(len(possibleObjects)), subsetCount)) ] for o in subset: if len(selectedTemplates[o]) > 0: question = random.choice(selectedTemplates[o]) selectedQuestion = {} selectedQuestion['question'] = question selectedQuestion['image'] = url selectedQuestion['label'] = 1 selectedQuestion['wordLabels'] = ' '.join(['0' if w==o else '1' for w in question.split()]) selectedQuestion['word'] = o selectedQuestions.append(selectedQuestion) print 'Selected Questions: [%d]' % (len(selectedQuestions)) # print 'Saving selected questions to [%s]' % (outputFilePath) # pd.DataFrame(selectedQuestions).to_csv(outputFilePath) sortedQuestionsJson = {} for i,s in enumerate(selectedQuestions): sortedQuestionsJson[str(i)] = s sortedQuestionsJson = filter(sortedQuestionsJson) with open(outputJsonFilePath, 'w') as output: output.write(json.dumps(sortedQuestionsJson)) print 'Done (t=%0.2fs)'%(time.time()- tic)
34.533019
211
0.692119
79576e459aaf51e909915d31cf38c2d088d98e18
865
py
Python
lib/exabgp/bgp/message/__init__.py
cloudscale-ch/exabgp
55ee496dfbc3fce75c5107fae7a7d38567154d46
[ "BSD-3-Clause" ]
1,560
2015-01-01T08:53:05.000Z
2022-03-29T20:22:43.000Z
lib/exabgp/bgp/message/__init__.py
nembery/exabgp
53cfff843ddde33bf1c437a1c4ce99de20c6bade
[ "BSD-3-Clause" ]
818
2015-01-01T17:38:40.000Z
2022-03-30T07:29:24.000Z
lib/exabgp/bgp/message/__init__.py
nembery/exabgp
53cfff843ddde33bf1c437a1c4ce99de20c6bade
[ "BSD-3-Clause" ]
439
2015-01-06T21:20:41.000Z
2022-03-19T23:24:25.000Z
# encoding: utf-8 """ update/__init__.py Created by Thomas Mangin on 2010-01-15. Copyright (c) 2009-2017 Exa Networks. All rights reserved. License: 3-clause BSD. (See the COPYRIGHT file) """ # Every Message should be imported from this file # as it makes sure that all the registering decorator are run from exabgp.bgp.message.direction import OUT from exabgp.bgp.message.direction import IN from exabgp.bgp.message.message import Message from exabgp.bgp.message.nop import NOP from exabgp.bgp.message.nop import _NOP from exabgp.bgp.message.open import Open from exabgp.bgp.message.update import Update from exabgp.bgp.message.update import EOR from exabgp.bgp.message.keepalive import KeepAlive from exabgp.bgp.message.notification import Notification from exabgp.bgp.message.notification import Notify from exabgp.bgp.message.operational import Operational
33.269231
61
0.812717
79576e678ec73951366eaeb3be9e10883f1144e2
361
py
Python
mapclientplugins/zincmodelsourcestep/__init__.py
tsalemink/zincmodelsourcestep
0c5b98fb394e94452ed536beb4de99ace626c95d
[ "Apache-2.0" ]
null
null
null
mapclientplugins/zincmodelsourcestep/__init__.py
tsalemink/zincmodelsourcestep
0c5b98fb394e94452ed536beb4de99ace626c95d
[ "Apache-2.0" ]
null
null
null
mapclientplugins/zincmodelsourcestep/__init__.py
tsalemink/zincmodelsourcestep
0c5b98fb394e94452ed536beb4de99ace626c95d
[ "Apache-2.0" ]
1
2021-06-09T21:50:14.000Z
2021-06-09T21:50:14.000Z
''' MAP Client Plugin ''' __version__ = '0.1.0' __author__ = 'Hugh Sorby' __stepname__ = 'Zinc Model Source' __location__ = 'https://github.com/mapclient-plugins/zincdatasourcestep' # import class that derives itself from the step mountpoint. from mapclientplugins.zincmodelsourcestep import resources_rc from mapclientplugins.zincmodelsourcestep import step
27.769231
72
0.803324
79576e9d62aa7813fb64cfc3c38a716ad66e6531
1,497
py
Python
ports/esp8266/modules/inisetup.py
zeilenschubser/micropython
61dfc61230ca1fd0b964fb86911f19ccdc30acb9
[ "MIT" ]
79
2019-02-07T09:04:50.000Z
2022-02-20T06:54:44.000Z
ports/esp8266/modules/inisetup.py
zeilenschubser/micropython
61dfc61230ca1fd0b964fb86911f19ccdc30acb9
[ "MIT" ]
176
2020-10-18T14:31:03.000Z
2022-03-30T23:22:39.000Z
ports/esp8266/modules/inisetup.py
zeilenschubser/micropython
61dfc61230ca1fd0b964fb86911f19ccdc30acb9
[ "MIT" ]
25
2019-03-20T08:16:57.000Z
2022-03-11T17:59:36.000Z
import uos import network from flashbdev import bdev def wifi(): import ubinascii ap_if = network.WLAN(network.AP_IF) essid = b"MicroPython-%s" % ubinascii.hexlify(ap_if.config("mac")[-3:]) ap_if.config(essid=essid, authmode=network.AUTH_WPA_WPA2_PSK, password=b"micropythoN") def check_bootsec(): buf = bytearray(bdev.SEC_SIZE) bdev.readblocks(0, buf) empty = True for b in buf: if b != 0xFF: empty = False break if empty: return True fs_corrupted() def fs_corrupted(): import time while 1: print( """\ The FAT filesystem starting at sector %d with size %d sectors appears to be corrupted. If you had important data there, you may want to make a flash snapshot to try to recover it. Otherwise, perform factory reprogramming of MicroPython firmware (completely erase flash, followed by firmware programming). """ % (bdev.START_SEC, bdev.blocks) ) time.sleep(3) def setup(): check_bootsec() print("Performing initial setup") wifi() uos.VfsLfs2.mkfs(bdev) vfs = uos.VfsLfs2(bdev) uos.mount(vfs, "/") with open("boot.py", "w") as f: f.write( """\ # This file is executed on every boot (including wake-boot from deepsleep) #import esp #esp.osdebug(None) import uos, machine #uos.dupterm(None, 1) # disable REPL on UART(0) import gc #import webrepl #webrepl.start() gc.collect() """ ) return vfs
22.681818
90
0.643955
79576ec2279f59e4d4e8dc9fc0e5c3d462def945
28,219
py
Python
blechpy/plotting/data_plot.py
thomasrgray/blechpy
46a95991e1d41556a263e48c9c3b61b1d337aae0
[ "MIT" ]
8
2020-10-05T19:00:45.000Z
2021-09-14T16:43:08.000Z
blechpy/plotting/data_plot.py
thomasrgray/blechpy
46a95991e1d41556a263e48c9c3b61b1d337aae0
[ "MIT" ]
25
2019-11-01T14:42:22.000Z
2022-03-02T21:43:58.000Z
blechpy/plotting/data_plot.py
thomasrgray/blechpy
46a95991e1d41556a263e48c9c3b61b1d337aae0
[ "MIT" ]
3
2019-11-01T14:38:42.000Z
2021-10-21T16:15:09.000Z
import pandas as pd import numpy as np import tables import os import umap import pywt import itertools as it from blechpy import dio from blechpy.analysis import spike_analysis as sas from scipy.stats import sem from scipy.ndimage.filters import gaussian_filter1d from statsmodels.stats.diagnostic import lilliefors from sklearn.decomposition import PCA from blechpy.plotting import blech_waveforms_datashader import matplotlib matplotlib.use('TkAgg') import pylab as plt plot_params = {'xtick.labelsize': 14, 'ytick.labelsize': 14, 'axes.titlesize': 26, 'figure.titlesize': 28, 'axes.labelsize': 24} matplotlib.rcParams.update(plot_params) def make_unit_plots(file_dir, unit_name, save_dir=None): '''Makes waveform plots for sorted unit in unit_waveforms_plots Parameters ---------- file_dir : str, full path to recording directory fs : float, smapling rate in Hz ''' if isinstance(unit_name, int): unit_num = unit_name unit_name = 'unit%03i' % unit_num else: unit_num = dio.h5io.parse_unit_number(unit_name) waveforms, descriptor, fs = dio.h5io.get_unit_waveforms(file_dir, unit_name) fs_str = '%g samples per ms' % (fs/10/1000.0) # since both theses plots # downsample by 10 and then to convert to samples/ms fig, ax = blech_waveforms_datashader.waveforms_datashader(waveforms) ax.set_xlabel('Samples (%s)' % fs_str) ax.set_ylabel('Voltage (microvolts)') unit_title = (('Unit %i, total waveforms = %i\nElectrode: %i, ' 'Single Unit: %i, RSU: %i, FSU: %i') % (unit_num, waveforms.shape[0], descriptor['electrode_number'], descriptor['single_unit'], descriptor['regular_spiking'], descriptor['fast_spiking'])) ax.set_title(unit_title) fig.savefig(os.path.join(save_dir, 'Unit%i.png' % unit_num)) plt.close('all') # Plot mean and SEM of waveforms # Downsample by 10 to remove upsampling from de-jittering fig, ax = plt.subplots(figsize=(12,8)) mean_wave = np.mean(waveforms[:, ::10], axis=0) std_wave = np.std(waveforms[:, ::10], axis=0) mean_x = np.arange(mean_wave.shape[0]) + 1 ax.plot(mean_x, mean_wave, linewidth=4.0) ax.fill_between(mean_x, mean_wave - std_wave, mean_wave + std_wave, alpha=0.4) ax.set_xlabel('Samples (%s)' % fs_str) ax.set_ylabel('Voltage (microvolts)') ax.set_title(unit_title) fig.savefig(os.path.join(save_dir, 'Unit%i_mean_sd.png' % unit_num)) plt.close('all') def plot_traces_and_outliers(h5_file, window=60, save_file=None): '''plot first 30 sec of raw data traces as well as a subplot with a metric to help identify dead channels (max(abs(trace)) * std(trace)) Parameters ---------- h5_file : str, full path to h5_file with raw data ''' if not os.path.isfile(h5_file): raise FileNotFoundError('%s not found.' % h5_file) with tables.open_file(h5_file, 'r') as hf5: if '/raw' not in hf5: raise ValueError('No raw data in %s' % h5_file) electrodes = hf5.list_nodes('/raw') t_idx = np.where(lambda x: x.v_name == 'amplifier_time')[0] time = electrodes.pop(t_idx[0])[:] n_electrodes = len(electrodes) max_amp = np.zeros(n_electrodes) max_amp_idx = np.zeros(n_electrodes) std_amp = np.zeros(n_electrodes) range_amp = np.zeros(n_electrodes) for node in electrodes: i = int(node._v_name.replace('electrode','')) trace = node[:] * dio.rawIO.voltage_scaling max_amp[i] = np.max(np.abs(trace)) max_amp_idx[i] = int(np.argmax(np.abs(trace))) std_amp[i] = np.std(trace) range_amp[i] = np.max(trace) - np.min(trace) max_v = np.max(max_amp) max_idx = int(max_amp_idx[np.argmax(max_amp)]) metric = max_amp * std_amp idx = np.where((time >= time[max_idx] - window/2) & (time <= time[max_idx] + window/2))[0] fig, ax = plt.subplots(nrows=2, figsize=(30,30)) for node in electrodes: i = int(node._v_name.replace('electrode','')) trace = node[:] * dio.rawIO.voltage_scaling / max_v ax[0].plot(time[idx], trace[idx] + i, linewidth=0.5) ax[1].plot([i, i], [0, metric[i]], color='black', linewidth=0.5) ax[1].scatter(np.arange(n_electrodes), metric) med = np.median(metric) sd = np.std(metric) ax[1].plot([0, n_electrodes-1], [med, med], color='blue', linewidth=0.5, alpha=0.5) ax[1].plot([0, n_electrodes-1], [med + 1.5*sd, med + 1.5*sd], color='red', linewidth=0.5, alpha=0.5) ax[0].set_ylabel('Electrode') ax[0].set_xlabel('Time (s)') ax[0].set_title('Raw Traces') ax[1].set_ylabel('max * st. dev.') ax[1].set_xlabel('Electrode') over = np.where(metric > med+1.5*sd)[0] ax[1].set_title('Electrodes over line: %s' % over) if save_file is not None: fig.savefig(save_file) plt.close('all') return fig, ax def plot_overlay_psth(rec_dir, unit, din_map, plot_window=[-1500, 2500], bin_size=250, bin_step=25, dig_ins=None, smoothing_width=3, save_file=None): ''' Plots overlayed PSTHs for all tastants or a specified subset Parameters ---------- rec_dir: str unit: int plot_window: list of int, time window for plotting in ms bin_size: int, window size for binning spikes in ms bin_step: int, step size for binning spikes in ms dig_ins: list of int (optional) which digital inputs to plot PSTHs for, None (default) plots all save_file: str (optional), full path to save file, if None, saves in Overlay_PSTHs subfolder ''' if isinstance(unit, str): unit = dio.h5io.parse_unit_number(unit) if dig_ins is None: dig_ins = din_map.query('spike_array==True').channel.values if save_file is None: save_dir = os.path.join(rec_dir, 'Overlay_PSTHs') save_file = os.path.join(save_dir, 'Overlay_PSTH_unit%03d' % unit) if not os.path.isdir(save_dir): os.mkdir(save_dir) fig, ax = plt.subplots(figsize=(20,15)) for din in dig_ins: name = din_map.query('channel==@din').name.values[0] time, spike_train = dio.h5io.get_spike_data(rec_dir, unit, din) psth_time, fr = sas.get_binned_firing_rate(time, spike_train, bin_size, bin_step) mean_fr = np.mean(fr, axis=0) sem_fr = sem(fr, axis=0) t_idx = np.where((psth_time >= plot_window[0]) & (psth_time <= plot_window[1]))[0] psth_time = psth_time[t_idx] mean_fr = mean_fr[t_idx] sem_fr = sem_fr[t_idx] mean_fr = gaussian_filter1d(mean_fr, smoothing_width) ax.fill_between(psth_time, mean_fr - sem_fr, mean_fr + sem_fr, alpha=0.3) ax.plot(psth_time, mean_fr, linewidth=3, label=name) ax.set_title('Peri-stimulus Firing Rate Plot\nUnit %i' % unit, fontsize=34) ax.set_xlabel('Time (ms)', fontsize=28) ax.set_ylabel('Firing Rate (Hz)', fontsize=28) plt.xticks(fontsize=18) plt.yticks(fontsize=18) ax.autoscale(enable=True, axis='x', tight=True) ax.legend(loc='best') ax.axvline(0, color='red', linestyle='--') fig.savefig(save_file) plt.close('all') def plot_J3s(intra_J3, inter_J3, save_dir, percent_criterion): print('\n----------\nPlotting J3 distribution\n----------\n') fig = plt.figure(figsize=(10,5)) plt.hist([inter_J3, intra_J3], bins=20, alpha=0.7, label=['Across-session J3', 'Within-session J3']) plt.legend(prop={'size':12}, loc='upper right') plt.axvline(np.percentile(intra_J3, percent_criterion), linewidth=2, color='black', linestyle='dashed') plt.xlabel('J3', fontsize=18) plt.ylabel('Number of single unit pairs', fontsize=18) plt.tick_params(axis='both', which='major', labelsize=12) fig.savefig(os.path.join(save_dir, 'J3_distribution.png'), bbox_inches='tight') plt.close('all') def plot_held_units(rec_dirs, held_df, save_dir, rec_names=None): '''Plot waveforms of held units side-by-side Parameters ---------- rec_dirs : list of str full paths to recording directories held_df : pandas.DataFrame dataframe listing held units with columns matching the names of the recording directories or the given rec_names. Also colulmns: - unit : str, unit name - single_unit : bool - unit_type : str, unit_type - electrode : int - J3 : list of float, J3 values for the held unit save_dir : str, directory to save plots in rec_names : list of str (optional) abbreviated rec_names if any were used for held_df creation if not given, rec_names are assumed to be the basenames of rec_dirs ''' if rec_names is None: rec_names = [os.path.basename(x) for x in rec_dirs] rec_labels = {x: y for x, y in zip(rec_names, rec_dirs)} print('\n----------\nPlotting held units\n----------\n') for idx, row in held_df.iterrows(): n_subplots = 0 units = {} for rn in rec_names: if not pd.isna(row.get(rn)): n_subplots += 1 units[rn] = row.get(rn) if n_subplots == 0: continue single_unit = row['single_unit'] if single_unit: single_str = 'single-unit' else: single_str = 'multi-unit' unit_type = row['unit_type'] unit_name = row['unit'] electrode = row['electrode'] area = row['area'] J3_vals = row['J3'] J3_str = np.array2string(np.array(J3_vals), precision=3) print('Plotting Unit %s...' % unit_name) title_str = 'Unit %s\nElectrode %i: %s %s\nJ3: %s' % (unit_name, electrode, unit_type, single_str, J3_str) fig, fig_ax = plt.subplots(ncols=n_subplots, figsize=(20, 10)) ylim = [0, 0] row_ax = [] for ax, unit_info in zip(fig_ax, units.items()): rl = unit_info[0] u = unit_info[1] rd = rec_labels.get(rl) params = dio.params.load_params('clustering_params', rd) if params is None: raise FileNotFoundError('No dataset pickle file for %s' % rd) #waves, descriptor, fs = get_unit_waveforms(rd, x[1]) waves, descriptor, fs = dio.h5io.get_raw_unit_waveforms(rd, u) waves = waves[:, ::10] fs = fs/10 time = np.arange(0, waves.shape[1], 1) / (fs/1000) snapshot = params['spike_snapshot'] t_shift = snapshot['Time before spike (ms)'] time = time - t_shift mean_wave = np.mean(waves, axis=0) std_wave = np.std(waves, axis=0) ax.plot(time, mean_wave, linewidth=5.0, color='black') ax.plot(time, mean_wave - std_wave, linewidth=2.0, color='black', alpha=0.5) ax.plot(time, mean_wave + std_wave, linewidth=2.0, color='black', alpha=0.5) ax.set_xlabel('Time (ms)', fontsize=35) ax.set_title('%s %s\ntotal waveforms = %i' % (rl, u, waves.shape[0]), fontsize = 20) ax.autoscale(axis='x', tight=True) plt.tick_params(axis='both', which='major', labelsize=32) if np.min(mean_wave - std_wave) - 20 < ylim[0]: ylim[0] = np.min(mean_wave - std_wave) - 20 if np.max(mean_wave + std_wave) + 20 > ylim[1]: ylim[1] = np.max(mean_wave + std_wave) + 20 for ax in row_ax: ax.set_ylim(ylim) fig_ax[0].set_ylabel('Voltage (microvolts)', fontsize=35) plt.subplots_adjust(top=.75) plt.suptitle(title_str) fig.savefig(os.path.join(save_dir, 'Unit%s_waveforms.png' % unit_name), bbox_inches='tight') plt.close('all') def plot_cluster_pca(clusters): '''Plot PCA view of clusters from spike_sorting Parameters ---------- clusters : ilist of dict list of dictionaries containing spike cluster information from blechpy.analysis.spike_sorting Returns ------- matplotlib.pyplot.figure, matplotlib.pyplot.Axes ''' fig, axs = plt.subplots(2, 2, sharex=False, sharey=False, figsize=(20,15)) pca = PCA(n_components=3) pca.fit(np.concatenate(tuple(x['spike_waveforms'] for x in clusters), axis=0)) colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] for i, c in enumerate(clusters): pcs = pca.transform(c['spike_waveforms']) axs[0, 0].scatter(pcs[:, 0], pcs[:, 1], alpha=0.4, s=5, color=colors[i], label=str(i)) axs[0, 1].scatter(pcs[:, 0], pcs[:, 2], alpha=0.4, s=5, color=colors[i], label=str(i)) axs[1, 0].scatter(pcs[:, 1], pcs[:, 2], alpha=0.4, s=5, color=colors[i], label=str(i)) handles, labels = axs[0, 0].get_legend_handles_labels() axs[1, 1].set_axis_off() axs[1, 1].legend(handles, labels, loc='center') axs[0, 0].set_xlabel('PC1') axs[0, 0].set_ylabel('PC2') axs[0, 1].set_xlabel('PC1') axs[0, 1].set_ylabel('PC3') axs[1, 0].set_xlabel('PC2') axs[1, 0].set_ylabel('PC3') return fig, axs def plot_cluster_raster(clusters): '''Plot raster view of a cluster from blechpy.analysis.spike_sorting Parameters ---------- clusters : ilist of dict list of dictionaries containing spike cluster information from blechpy.analysis.spike_sorting Returns ------- matplotlib.pyplot.figure ''' fig = plt.figure(figsize=(15,10)) pca = PCA(n_components=1) pca.fit(np.concatenate(tuple(x['spike_waveforms'] for x in clusters), axis=0)) colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] for i, c in enumerate(clusters): pcs = pca.transform(c['spike_waveforms']) st = c['spike_times'] plt.scatter(st, pcs[:, 0], s=5, color=colors[i], label=str(i)) plt.legend(loc='best') return fig def plot_waveforms(waveforms, title=None, save_file=None, threshold=None): '''Plots a cluster with isi and violation info for viewing Parameters ---------- cluster : dict with cluster info ''' fig, ax = blech_waveforms_datashader.waveforms_datashader(waveforms, threshold=threshold) ax.set_xlabel('Samples', fontsize=12) ax.set_ylabel('Voltage (microvolts)', fontsize=12) ax.set_title(title, fontsize=12) plt.xticks(fontsize=10) plt.yticks(fontsize=10) if save_file is not None: fig.savefig(save_file) plt.close(fig) return None, None else: return fig, ax def plot_ISIs(ISIs, total_spikes=None, save_file=None): '''Plots a cluster with isi and violation info for viewing Parameters ---------- ISIs : np.array, list of ISIs in ms save_file : str (optional) path to save figure to. Closes figure after save. Returns ------- pyplot.Figure, pyplot.Axes if save_file is provided figured is saved and close and None, None is returned ''' if total_spikes is None: total_spikes = len(ISIs)+1 viol_1ms = np.sum(ISIs < 1.0) viol_2ms = np.sum(ISIs < 2.0) fig, ax = plt.subplots(figsize=(15,10)) max_bin = max(np.max(ISIs), 11.0) bins = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, max_bin] histogram, _ = np.histogram(ISIs, bins) histogram = histogram[:-1] ax.hist(ISIs, bins = bins) ax.set_xlim((0.0, 10.0)) title_str = ('2ms violations = %0.1f %% (%i/%i)\n' '1ms violations = %0.1f %% (%i/%i)' % (100*viol_2ms/total_spikes, viol_2ms, total_spikes, 100*viol_1ms/total_spikes, viol_1ms, total_spikes)) ax.set_ylim((0.0, np.max(histogram)+5)) ax.set_title(title_str) ax.set_xlabel('ISIs (ms)') if save_file is not None: fig.savefig(save_file) plt.close(fig) return None, None else: return fig, ax def plot_correlogram(hist_counts, bin_centers, bin_edges, title=None, save_file=None): fig, ax = plt.subplots(figsize=(10,6)) ax.hist(bin_centers, bins=bin_edges, weights=hist_counts, color='black') ax.autoscale(axis='both', tight=True) if title: ax.set_title(title) else: ax.set_title('Correlogram') ax.set_ylabel('spikes/s') ax.set_xlabel('Lag') if save_file: fig.savefig(save_file) fig.close() return None, None else: return fig, ax def plot_spike_raster(spike_times, waveforms, cluster_ids=None, save_file=None): '''Plot raster view of a cluster from blechpy.analysis.spike_sorting Parameters ---------- spike_times : list of np.array spike_times for each cluster to be plotted spike_waveforms: list of np.array spike_waveforms for each cluster to be plotted cluster_ids : list names or numbers with which to label each cluster plotted save_file : str (optional) path to save figure to, if provided, figure is saved and closed and this returns None Returns ------- matplotlib.pyplot.figure ''' if cluster_ids is None: cluster_ids = list(range(len(spike_times))) fig, ax = plt.subplots(figsize=(15,10)) all_waves = np.vstack(waveforms) pca = PCA(n_components=1) pca.fit(all_waves) colors = [plt.cm.jet(x) for x in np.linspace(0,1,len(waveforms))] for i, c in enumerate(zip(cluster_ids, spike_times, waveforms)): pcs = pca.transform(c[2]) ax.scatter(c[1], pcs[:, 0], s=5, color=colors[i], label=str(c[0])) ax.legend(loc='best') ax.set_title('Spike Raster') ax.set_ylabel('PC1') ax.set_xlabel('Time') if save_file: fig.savefig(save_file) plt.close(fig) return None else: return fig, ax def plot_waveforms_pca(waveforms, cluster_ids=None, save_file=None): '''Plot PCA view of clusters from spike_sorting Parameters ---------- waveforms : list of np.array list of np.arrays containing waveforms for each cluster cluster_ids : list names or numbers with which to label each cluster plotted save_file : str (optional) path to save figure to, if provided, figure is saved and closed and this returns None Returns ------- matplotlib.pyplot.figure, matplotlib.pyplot.Axes ''' if cluster_ids is None: cluster_ids = list(range(len(waveforms))) fig, axs = plt.subplots(2, 2, sharex=False, sharey=False, figsize=(20,15)) pca = PCA(n_components=3) all_waves = np.vstack(waveforms) pca.fit(all_waves) colors = [plt.cm.jet(x) for x in np.linspace(0,1,len(waveforms))] for i, c in enumerate(zip(cluster_ids, waveforms)): pcs = pca.transform(c[1]) axs[0, 0].scatter(pcs[:, 0], pcs[:, 1], alpha=0.4, s=5, color=colors[i], label=str(c[0])) axs[0, 1].scatter(pcs[:, 0], pcs[:, 2], alpha=0.4, s=5, color=colors[i], label=str(c[0])) axs[1, 0].scatter(pcs[:, 1], pcs[:, 2], alpha=0.4, s=5, color=colors[i], label=str(c[0])) handles, labels = axs[0, 0].get_legend_handles_labels() axs[1, 1].set_axis_off() axs[1, 1].legend(handles, labels, loc='center') axs[0, 0].set_xlabel('PC1') axs[0, 0].set_ylabel('PC2') axs[0, 1].set_xlabel('PC1') axs[0, 1].set_ylabel('PC3') axs[1, 0].set_xlabel('PC2') axs[1, 0].set_ylabel('PC3') if save_file: fig.savefig(save_file) plt.close(fig) return None else: return fig def plot_waveforms_umap(waveforms, cluster_ids=None, save_file=None, n_neighbors=30, min_dist=0.0, embedding=None): '''Plot UMAP view of clusters from spike_sorting Parameters ---------- waveforms : list of np.array list of np.arrays containing waveforms for each cluster cluster_ids : list names or numbers with which to label each cluster plotted save_file : str (optional) path to save figure to, if provided, figure is saved and closed and this returns None n_neighbors : int (optional) parameters for UMAP, default = 20, lower preferences local structure and higher preferences global structure min_dist : float [0,1] (optional) minimum distance between points in 2D represenation. (default = 0.1) Returns ------- matplotlib.pyplot.figure, matplotlib.pyplot.Axes ''' if cluster_ids is None: cluster_ids = list(range(len(waveforms))) if embedding is None: reducer = umap.UMAP(n_neighbors=n_neighbors, min_dist=min_dist, n_components=2) embedding = reducer.fit(np.vstack(waveforms)) colors = [plt.cm.rainbow(x) for x in np.linspace(0, 1, len(waveforms))] fig, ax = plt.subplots(figsize=(15,10)) for x, y, z in zip(waveforms, cluster_ids, colors): u = embedding.transform(x) ax.scatter(u[:, 0], u[:, 1], s=3, color=z, marker='o', label=y) ax.legend() ax.set_title('Waveforms UMAP\nmin_dist=%f, n_neighbors=%i' % (min_dist, n_neighbors)) if save_file: fig.savefig(save_file) fig.close() return None else: return fig def plot_waveforms_wavelet_tranform(waveforms, cluster_ids=None, save_file=None, n_pc=4): all_waves = np.vstack(waveforms) coeffs = pywt.wavedec(all_waves, 'haar', axis=1) all_coeffs = np.column_stack(coeffs) k_stats = np.zeros((all_coeffs.shape[1],)) p_vals = np.ones((all_coeffs.shape[1],)) for i, coef in enumerate(all_coeffs.T): if len(np.unique(coef)) == 1: # to avoid nans continue try: k_stats[i], p_vals[i] = lilliefors(coef, dist='norm') except ValueError: continue # pick best coefficients as ones that are least normally distributed # that is lowest p-values from Lilliefors K-S test idx = np.argsort(p_vals) best_coeffs = all_coeffs[:, idx[:n_pc]] data = [] for i, w in enumerate(waveforms): tmp = best_coeffs[:w.shape[0]] best_coeffs = best_coeffs[w.shape[0]:] data.append(tmp) if cluster_ids is None: cluster_ids = list(range(len(waveforms))) colors = [plt.cm.jet(x) for x in np.linspace(0,1,len(waveforms))] pairs = list(it.combinations(range(n_pc), 2)) n_cols = 1 while np.power(n_cols, 2) < len(pairs): n_cols += 1 n_rows = int(np.ceil(len(pairs)/n_cols)) fig, ax = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=(5*(n_cols+1), 5*n_rows)) ax = ax.reshape(ax.size) for i, p in enumerate(pairs): for x, y, z in zip(data, cluster_ids, colors): ax[i].scatter(x[:, p[0]], x[:, p[1]], s=3, alpha=0.5, color=z, label=y, marker='o') ax[i].set_xlabel('Coefficient %i' % p[0]) ax[i].set_ylabel('Coefficient %i' % p[1]) handles, labels = ax[0].get_legend_handles_labels() if n_rows * n_cols > len(pairs): ax[-1].set_axis_off() ax[-1].legend(handles, labels, loc='center', shadow=True) else: idx = int(((n_cols * (n_rows-1)) -1) + np.ceil(n_cols/2)) ax[idx].legend(handles, labels, ncol=len(pairs), loc='upper center', bbox_to_anchor=(0.5, -0.05), shadow=True) fig.suptitle('Wavelet transform coefficients') if save_file: fig.savefig(save_file) return None, None else: return fig, ax.reshape((n_rows, n_cols)) def plot_recording_cutoff(filt_el, fs, cutoff, out_file=None): fig, ax = plt.subplots(figsize=(15,10)) test_el = np.reshape(filt_el[:int(fs)*int(len(filt_el)/fs)], (-1, int(fs))) ax.plot(np.arange(test_el.shape[0]), np.mean(test_el, axis = 1)) ax.axvline(cutoff, color='black', linewidth=4.0) ax.set_xlabel('Recording time (secs)', fontsize=18) ax.set_ylabel('Average voltage recorded\nper sec (microvolts)', fontsize=18) ax.set_title('Recording cutoff time\n(indicated by the black horizontal line)', fontsize=18) if out_file is not None: fig.savefig(out_file, bbox_inches='tight') plt.close(fig) return None, None return fig, ax def plot_explained_pca_variance(explained_variance_ratio, out_file=None): fig, ax = plt.subplots(figsize=(15,10)) x = np.arange(len(explained_variance_ratio)) ax.plot(x, explained_variance_ratio) ax.set_title('Variance ratios explained by PCs',fontsize=26) ax.set_xlabel('PC #',fontsize=24) ax.set_ylabel('Explained variance ratio',fontsize=24) if out_file is not None: fig.savefig(out_file, bbox_inches='tight') plt.close(fig) return None, None return fig, ax def plot_cluster_features(data, clusters, x_label='X', y_label='Y', save_file=None): '''Plot scatter of feature1 vs feature2 for each cluster Parameters ---------- data : np.array 2-column data array of where columns are features and rows are points clusters : np.array 1-d array corresponding to each row of data, labels each data point as part of a cluster x_label : str (optional), x-label of plot, default is X y_label : str (optional), y-label of plot, default is Y save_file : str (optional) if given, figure will be saved and closed otherwise, figure and axis handles will be returned Returns ------- pyplot.figure, pyplot.axes if no save_file is given, otherwise returns None, None ''' unique_clusters = np.unique(clusters) unique_clusters = unique_clusters[unique_clusters >= 0] colors = matplotlib.cm.rainbow(np.linspace(0,1,len(unique_clusters))) fig, ax = plt.subplots(figsize=(15,10)) for i, clust in enumerate(unique_clusters): idx = np.where(clusters == clust)[0] tmp = ax.scatter(data[idx, 0], data[idx, 1], color=colors[i], s=0.8) tmp.set_label('Cluster %i' % clust) ax.set_xlabel(x_label) ax.set_ylabel(y_label) ax.legend(scatterpoints = 1, loc = 'best', ncol = 3, fontsize = 8, shadow=True) ax.set_title("Feature plot for %i cluster solution" % len(unique_clusters)) if save_file is not None: fig.savefig(save_file) plt.close(fig) return None, None else: return fig, ax def plot_mahalanobis_to_cluster(distances, title=None, save_file=None): unique_clusters = sorted(list(distances.keys())) colors = matplotlib.cm.rainbow(np.linspace(0,1,len(unique_clusters))) fig, ax = plt.subplots(figsize=(15,10)) for clust, dists in distances.items(): y, binEdges = np.histogram(dists) bincenters = 0.5*(binEdges[1:] + binEdges[:-1]) ax.plot(bincenters, y, label = 'Dist from cluster %i' % clust) ax.set_xlabel('Mahalanobis distance') ax.set_ylabel('Frequency') ax.legend(loc = 'upper right', fontsize = 8) if title: ax.set_title(title) if save_file is not None: fig.savefig(save_file) plt.close(fig) return None, None else: return fig, ax
35.317897
103
0.602396
79576f9d75413d5d3f6926eea59dc6633578f206
16,943
py
Python
test/mobile/model_test/math_ops.py
tenpercent/pytorch
7f996b855c5070ab4a6bea0f451c8a22c0ce2394
[ "Intel" ]
1
2022-03-29T00:44:31.000Z
2022-03-29T00:44:31.000Z
test/mobile/model_test/math_ops.py
tenpercent/pytorch
7f996b855c5070ab4a6bea0f451c8a22c0ce2394
[ "Intel" ]
null
null
null
test/mobile/model_test/math_ops.py
tenpercent/pytorch
7f996b855c5070ab4a6bea0f451c8a22c0ce2394
[ "Intel" ]
1
2022-03-28T21:49:41.000Z
2022-03-28T21:49:41.000Z
# https://pytorch.org/docs/stable/torch.html#math-operations import math import torch class PointwiseOpsModule(torch.nn.Module): def __init__(self): super(PointwiseOpsModule, self).__init__() def forward(self): return self.pointwise_ops() def pointwise_ops(self): a = torch.randn(4) b = torch.randn(4) t = torch.tensor([-1, -2, 3], dtype=torch.int8) r = torch.tensor([0, 1, 10, 0], dtype=torch.int8) t = torch.tensor([-1, -2, 3], dtype=torch.int8) s = torch.tensor([4, 0, 1, 0], dtype=torch.int8) f = torch.zeros(3) g = torch.tensor([-1, 0, 1]) w = torch.tensor([0.3810, 1.2774, -0.2972, -0.3719, 0.4637]) return ( torch.abs(torch.tensor([-1, -2, 3])), torch.absolute(torch.tensor([-1, -2, 3])), torch.acos(a), torch.arccos(a), torch.acosh(a.uniform_(1.0, 2.0)), torch.add(a, 20), torch.add(a, b, out=a), b.add(a), b.add(a, out=b), b.add_(a), b.add(1), torch.add(a, torch.randn(4, 1), alpha=10), torch.addcdiv( torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), value=0.1 ), torch.addcmul( torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), value=0.1 ), torch.angle(a), torch.asin(a), torch.arcsin(a), torch.asinh(a), torch.arcsinh(a), torch.atan(a), torch.arctan(a), torch.atanh(a.uniform_(-1.0, 1.0)), torch.arctanh(a.uniform_(-1.0, 1.0)), torch.atan2(a, a), torch.bitwise_not(t), torch.bitwise_and(t, torch.tensor([1, 0, 3], dtype=torch.int8)), torch.bitwise_or(t, torch.tensor([1, 0, 3], dtype=torch.int8)), torch.bitwise_xor(t, torch.tensor([1, 0, 3], dtype=torch.int8)), torch.ceil(a), torch.ceil(float(torch.tensor(0.5))), torch.ceil(torch.tensor(0.5).item()), torch.clamp(a, min=-0.5, max=0.5), torch.clamp(a, min=0.5), torch.clamp(a, max=0.5), torch.clip(a, min=-0.5, max=0.5), torch.conj(a), torch.copysign(a, 1), torch.copysign(a, b), torch.cos(a), torch.cosh(a), torch.deg2rad( torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]]) ), torch.div(a, b), a.div(b), a.div(1), a.div_(b), torch.divide(a, b, rounding_mode="trunc"), torch.divide(a, b, rounding_mode="floor"), torch.digamma(torch.tensor([1.0, 0.5])), torch.erf(torch.tensor([0.0, -1.0, 10.0])), torch.erfc(torch.tensor([0.0, -1.0, 10.0])), torch.erfinv(torch.tensor([0.0, 0.5, -1.0])), torch.exp(torch.tensor([0.0, math.log(2.0)])), torch.exp(float(torch.tensor(1))), torch.exp2(torch.tensor([0.0, math.log(2.0), 3.0, 4.0])), torch.expm1(torch.tensor([0.0, math.log(2.0)])), torch.fake_quantize_per_channel_affine( torch.randn(2, 2, 2), (torch.randn(2) + 1) * 0.05, torch.zeros(2), 1, 0, 255, ), torch.fake_quantize_per_tensor_affine(a, 0.1, 0, 0, 255), torch.float_power(torch.randint(10, (4,)), 2), torch.float_power(torch.arange(1, 5), torch.tensor([2, -3, 4, -5])), torch.floor(a), torch.floor(float(torch.tensor(1))), torch.floor_divide(torch.tensor([4.0, 3.0]), torch.tensor([2.0, 2.0])), torch.floor_divide(torch.tensor([4.0, 3.0]), 1.4), torch.fmod(torch.tensor([-3, -2, -1, 1, 2, 3]), 2), torch.fmod(torch.tensor([1, 2, 3, 4, 5]), 1.5), torch.frac(torch.tensor([1.0, 2.5, -3.2])), torch.randn(4, dtype=torch.cfloat).imag, torch.ldexp(torch.tensor([1.0]), torch.tensor([1])), torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4])), torch.lerp(torch.arange(1.0, 5.0), torch.empty(4).fill_(10), 0.5), torch.lerp( torch.arange(1.0, 5.0), torch.empty(4).fill_(10), torch.full_like(torch.arange(1.0, 5.0), 0.5), ), torch.lgamma(torch.arange(0.5, 2, 0.5)), torch.log(torch.arange(5) + 10), torch.log10(torch.rand(5)), torch.log1p(torch.randn(5)), torch.log2(torch.rand(5)), torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])), torch.logaddexp( torch.tensor([-100.0, -200.0, -300.0]), torch.tensor([-1, -2, -3]) ), torch.logaddexp( torch.tensor([1.0, 2000.0, 30000.0]), torch.tensor([-1, -2, -3]) ), torch.logaddexp2(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])), torch.logaddexp2( torch.tensor([-100.0, -200.0, -300.0]), torch.tensor([-1, -2, -3]) ), torch.logaddexp2( torch.tensor([1.0, 2000.0, 30000.0]), torch.tensor([-1, -2, -3]) ), torch.logical_and(r, s), torch.logical_and(r.double(), s.double()), torch.logical_and(r.double(), s), torch.logical_and(r, s, out=torch.empty(4, dtype=torch.bool)), torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8)), torch.logical_not(torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)), torch.logical_not( torch.tensor([0.0, 1.0, -10.0], dtype=torch.double), out=torch.empty(3, dtype=torch.int16), ), torch.logical_or(r, s), torch.logical_or(r.double(), s.double()), torch.logical_or(r.double(), s), torch.logical_or(r, s, out=torch.empty(4, dtype=torch.bool)), torch.logical_xor(r, s), torch.logical_xor(r.double(), s.double()), torch.logical_xor(r.double(), s), torch.logical_xor(r, s, out=torch.empty(4, dtype=torch.bool)), torch.logit(torch.rand(5), eps=1e-6), torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0])), torch.i0(torch.arange(5, dtype=torch.float32)), torch.igamma(a, b), torch.igammac(a, b), torch.mul(torch.randn(3), 100), b.mul(a), b.mul(5), b.mul(a, out=b), b.mul_(a), b.mul_(5), torch.multiply(torch.randn(4, 1), torch.randn(1, 4)), torch.mvlgamma(torch.empty(2, 3).uniform_(1.0, 2.0), 2), torch.tensor([float("nan"), float("inf"), -float("inf"), 3.14]), torch.nan_to_num(w), torch.nan_to_num_(w), torch.nan_to_num(w, nan=2.0), torch.nan_to_num(w, nan=2.0, posinf=1.0), torch.neg(torch.randn(5)), # torch.nextafter(torch.tensor([1, 2]), torch.tensor([2, 1])) == torch.tensor([eps + 1, 2 - eps]), torch.polygamma(1, torch.tensor([1.0, 0.5])), torch.polygamma(2, torch.tensor([1.0, 0.5])), torch.polygamma(3, torch.tensor([1.0, 0.5])), torch.polygamma(4, torch.tensor([1.0, 0.5])), torch.pow(a, 2), torch.pow(2, float(torch.tensor(0.5))), torch.pow(torch.arange(1.0, 5.0), torch.arange(1.0, 5.0)), torch.rad2deg( torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]]) ), torch.randn(4, dtype=torch.cfloat).real, torch.reciprocal(a), torch.remainder(torch.tensor([-3.0, -2.0]), 2), torch.remainder(torch.tensor([1, 2, 3, 4, 5]), 1.5), torch.round(a), torch.round(torch.tensor(0.5).item()), torch.rsqrt(a), torch.sigmoid(a), torch.sign(torch.tensor([0.7, -1.2, 0.0, 2.3])), torch.sgn(a), torch.signbit(torch.tensor([0.7, -1.2, 0.0, 2.3])), torch.sin(a), torch.sinc(a), torch.sinh(a), torch.sqrt(a), torch.square(a), torch.sub(torch.tensor((1, 2)), torch.tensor((0, 1)), alpha=2), b.sub(a), b.sub_(a), b.sub(5), torch.sum(5), torch.tan(a), torch.tanh(a), torch.true_divide(a, a), torch.trunc(a), torch.trunc_(a), torch.xlogy(f, g), torch.xlogy(f, g), torch.xlogy(f, 4), torch.xlogy(2, g), ) class ReductionOpsModule(torch.nn.Module): def __init__(self): super(ReductionOpsModule, self).__init__() def forward(self): return self.reduction_ops() def reduction_ops(self): a = torch.randn(4) b = torch.randn(4) c = torch.tensor(0.5) return ( torch.argmax(a), torch.argmin(a), torch.amax(a), torch.amin(a), torch.aminmax(a), torch.all(a), torch.any(a), torch.max(a), a.max(a), torch.max(a, 0), torch.min(a), a.min(a), torch.min(a, 0), torch.dist(a, b), torch.logsumexp(a, 0), torch.mean(a), torch.mean(a, 0), torch.nanmean(a), torch.median(a), torch.nanmedian(a), torch.mode(a), torch.norm(a), a.norm(2), torch.norm(a, dim=0), torch.norm(c, torch.tensor(2)), torch.nansum(a), torch.prod(a), torch.quantile(a, torch.tensor([0.25, 0.5, 0.75])), torch.quantile(a, 0.5), torch.nanquantile(a, torch.tensor([0.25, 0.5, 0.75])), torch.std(a), torch.std_mean(a), torch.sum(a), torch.unique(a), torch.unique_consecutive(a), torch.var(a), torch.var_mean(a), torch.count_nonzero(a), ) class ComparisonOpsModule(torch.nn.Module): def __init__(self): super(ComparisonOpsModule, self).__init__() def forward(self): a = torch.tensor(0) b = torch.tensor(1) return ( torch.allclose(a, b), torch.argsort(a), torch.eq(a, b), torch.eq(a, 1), torch.equal(a, b), torch.ge(a, b), torch.ge(a, 1), torch.greater_equal(a, b), torch.greater_equal(a, 1), torch.gt(a, b), torch.gt(a, 1), torch.greater(a, b), torch.isclose(a, b), torch.isfinite(a), torch.isin(a, b), torch.isinf(a), torch.isposinf(a), torch.isneginf(a), torch.isnan(a), torch.isreal(a), torch.kthvalue(a, 1), torch.le(a, b), torch.le(a, 1), torch.less_equal(a, b), torch.lt(a, b), torch.lt(a, 1), torch.less(a, b), torch.maximum(a, b), torch.minimum(a, b), torch.fmax(a, b), torch.fmin(a, b), torch.ne(a, b), torch.ne(a, 1), torch.not_equal(a, b), torch.sort(a), torch.topk(a, 1), torch.msort(a), ) class OtherMathOpsModule(torch.nn.Module): def __init__(self): super(OtherMathOpsModule, self).__init__() def forward(self): return self.other_ops() def other_ops(self): a = torch.randn(4) b = torch.randn(4) c = torch.randint(0, 8, (5,), dtype=torch.int64) e = torch.randn(4, 3) f = torch.randn(4, 4, 4) size = [0, 1] dims = [0, 1] return ( torch.atleast_1d(a), torch.atleast_2d(a), torch.atleast_3d(a), torch.bincount(c), torch.block_diag(a), torch.broadcast_tensors(a), torch.broadcast_to(a, (4)), # torch.broadcast_shapes(a), torch.bucketize(a, b), torch.cartesian_prod(a), torch.cdist(e, e), torch.clone(a), torch.combinations(a), torch.corrcoef(a), # torch.cov(a), torch.cross(e, e), torch.cummax(a, 0), torch.cummin(a, 0), torch.cumprod(a, 0), torch.cumsum(a, 0), torch.diag(a), torch.diag_embed(a), torch.diagflat(a), torch.diagonal(e), torch.diff(a), torch.einsum("iii", f), torch.flatten(a), torch.flip(e, dims), torch.fliplr(e), torch.flipud(e), torch.kron(a, b), torch.rot90(e), torch.gcd(c, c), torch.histc(a), torch.histogram(a), torch.meshgrid(a), torch.meshgrid(a, indexing="xy"), torch.lcm(c, c), torch.logcumsumexp(a, 0), torch.ravel(a), torch.renorm(e, 1, 0, 5), torch.repeat_interleave(c), torch.roll(a, 1, 0), torch.searchsorted(a, b), torch.tensordot(e, e), torch.trace(e), torch.tril(e), torch.tril_indices(3, 3), torch.triu(e), torch.triu_indices(3, 3), torch.vander(a), torch.view_as_real(torch.randn(4, dtype=torch.cfloat)), torch.view_as_complex(torch.randn(4, 2)), torch.resolve_conj(a), torch.resolve_neg(a), ) class SpectralOpsModule(torch.nn.Module): def __init__(self): super(SpectralOpsModule, self).__init__() def forward(self): return self.spectral_ops() def spectral_ops(self): a = torch.randn(10) b = torch.randn(10, 8, 4, 2) return ( torch.stft(a, 8), torch.stft(a, torch.tensor(8)), torch.istft(b, 8), torch.bartlett_window(2, dtype=torch.float), torch.blackman_window(2, dtype=torch.float), torch.hamming_window(4, dtype=torch.float), torch.hann_window(4, dtype=torch.float), torch.kaiser_window(4, dtype=torch.float), ) class BlasLapackOpsModule(torch.nn.Module): def __init__(self): super(BlasLapackOpsModule, self).__init__() def forward(self): return self.blas_lapack_ops() def blas_lapack_ops(self): m = torch.randn(3, 3) a = torch.randn(10, 3, 4) b = torch.randn(10, 4, 3) v = torch.randn(3) return ( torch.addbmm(m, a, b), torch.addmm(torch.randn(2, 3), torch.randn(2, 3), torch.randn(3, 3)), torch.addmv(torch.randn(2), torch.randn(2, 3), torch.randn(3)), torch.addr(torch.zeros(3, 3), v, v), torch.baddbmm(m, a, b), torch.bmm(a, b), torch.chain_matmul(torch.randn(3, 3), torch.randn(3, 3), torch.randn(3, 3)), # torch.cholesky(a), # deprecated # torch.cholesky_inverse(torch.randn(3, 3)), # had some error # torch.cholesky_solve(torch.randn(3, 3), torch.randn(3, 3)), torch.dot(v, v), # torch.linalg.eig(m), # not build with lapack # torch.geqrf(a), torch.ger(v, v), torch.inner(m, m), # torch.inverse(m), # torch.det(m), # torch.logdet(m), # torch.slogdet(m), # torch.lstsq(m, m), # torch.lu(m), # torch.lu_solve(m, *torch.lu(m)), # torch.lu_unpack(*torch.lu(m)), torch.matmul(m, m), torch.matrix_power(m, 2), # torch.matrix_rank(m), torch.matrix_exp(m), torch.mm(m, m), torch.mv(m, v), # torch.orgqr(a, m), # torch.ormqr(a, m, v), torch.outer(v, v), # torch.pinverse(m), # torch.qr(a), # torch.solve(m, m), # torch.svd(a), # torch.svd_lowrank(a), # torch.pca_lowrank(a), # torch.symeig(a), # deprecated # torch.lobpcg(a, b), # not supported torch.trapz(m, m), torch.trapezoid(m, m), torch.cumulative_trapezoid(m, m), # torch.triangular_solve(m, m), torch.vdot(v, v), )
36.048936
110
0.474532
7957700f505fe822d925ca2a068cdc14546fecc5
3,049
py
Python
Gamle scripts/systematic/drop_0_10.py
MadsAW/machine-learning-on-materials
6101c7e3d12be54b12391c78442294198a39cc9b
[ "MIT" ]
2
2018-10-10T09:32:34.000Z
2019-03-28T08:42:31.000Z
Gamle scripts/systematic/drop_0_10.py
MadsAW/machine-learning-on-materials
6101c7e3d12be54b12391c78442294198a39cc9b
[ "MIT" ]
null
null
null
Gamle scripts/systematic/drop_0_10.py
MadsAW/machine-learning-on-materials
6101c7e3d12be54b12391c78442294198a39cc9b
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Nov 13 12:31:46 2018 @author: Simon """ import os from createLargerFeatureMatrix import simpleLargeMatrix import pickle from keras.models import Sequential from keras.layers import Dense, Dropout from keras import regularizers import numpy as np path = "Saved matrices/11-10-2018 11.36/sorted_Cutoff25_noSingleElementKrystals/" #Load training data featureMatrixFile = "train_featureMatrix.npy" atomicSymbolsListFile = "train_pickledAtomicSymbolsList.txt" energiesFile = "train_pickledEnergies.txt" largeFeatureMatrix, mappedAtomicNumber = simpleLargeMatrix(path,featureMatrixFile, atomicSymbolsListFile) with open(path+energiesFile, "rb") as pickleFile: energies = pickle.load(pickleFile) largeFeatureMatrix.shape = (largeFeatureMatrix.shape[0], -1) X = largeFeatureMatrix Y = np.array(energies) #Load validation set featureMatrixFileValidate = "validate_featureMatrix.npy" atomicSymbolsListFileValidate = "validate_pickledAtomicSymbolsList.txt" energiesFileValidate = "validate_pickledEnergies.txt" largeFeatureMatrixValidate, mappedAtomicNumberValidate = simpleLargeMatrix(path,featureMatrixFileValidate, atomicSymbolsListFileValidate) with open(path+energiesFileValidate, "rb") as pickleFile: energiesValidate = pickle.load(pickleFile) largeFeatureMatrixValidate.shape = (largeFeatureMatrixValidate.shape[0], -1) X_v = largeFeatureMatrixValidate Y_v = np.array(energiesValidate) #Model drop=0.10 model = Sequential() inputShape = np.shape(X)[1:] model.add(Dropout(drop, input_shape=inputShape)) model.add(Dense(1000, activation='relu')) model.add(Dropout(drop)) model.add(Dense(500, activation='relu')) model.add(Dropout(drop)) model.add(Dense(1)) #Compile model model.compile(loss='mse', optimizer='adam', metrics=["mse"]) print(model.summary()) #Fit the model. This is where the hard computing happens. #Number of epochs is number of iterations through dataset #Batch size is number of iterations before weights are changed. model.fit(X, Y, epochs=40, batch_size=50) #Evaluate model efficiency scores = model.evaluate(X, Y) print("\n%s: %.2f eV" % (model.metrics_names[1], scores[1])) #Make predictions predictions = model.predict(X) predictionsValidate = model.predict(X_v) #Make predictions on training set a=0 for i in range(len(predictions)): a+=(energies[i]-predictions[i])**2 rmse=np.sqrt(a/len(energies)) print("RMSE on training data "+str(rmse)) #Make predictions on validation set predictionsValidate = model.predict(X_v) a=0 for i in range(len(predictionsValidate)): a+=(energiesValidate[i]-predictionsValidate[i])**2 rmseValidate=np.sqrt(a/len(energiesValidate)) print("RMSE on validation data "+str(rmseValidate)) outs = ["Drop = " + str(drop),"RMSE on training data "+str(rmse),"RMSE on validation data "+str(rmseValidate)] outfile="rmse_dropout.txt" with open(outfile, "a+") as file: for line in outs: file.write(line) file.write("\n") file.write("\n") print("DONE")
23.274809
137
0.762545
7957715d2faae3e1728cc38e91f3eb3228d4ccc8
311
py
Python
sleep/middleware.py
sleepers-anonymous/zscore
2d7eb2e2c06c307af7fae4058173a25ba9c40025
[ "MIT" ]
3
2015-05-20T02:38:27.000Z
2017-02-13T20:46:40.000Z
sleep/middleware.py
sleepers-anonymous/zscore
2d7eb2e2c06c307af7fae4058173a25ba9c40025
[ "MIT" ]
23
2015-01-06T17:42:22.000Z
2020-10-23T12:32:34.000Z
sleep/middleware.py
sleepers-anonymous/zscore
2d7eb2e2c06c307af7fae4058173a25ba9c40025
[ "MIT" ]
1
2018-01-29T21:54:54.000Z
2018-01-29T21:54:54.000Z
from django.utils import timezone from models import SleeperProfile, Sleeper import pytz class TimezoneMiddleware(object): def process_request(self,request): if request.user.is_authenticated(): tz = pytz.timezone(request.user.sleeperprofile.timezone) timezone.activate(tz)
28.272727
68
0.733119
795771994d5854b046a58287be4338170136341e
10,188
py
Python
tests/test_default_values.py
berland/configsuite
9c1eaeed3610ffaa9e549a35dc2709da44633c75
[ "MIT" ]
8
2019-08-12T08:16:12.000Z
2022-03-15T12:42:03.000Z
tests/test_default_values.py
berland/configsuite
9c1eaeed3610ffaa9e549a35dc2709da44633c75
[ "MIT" ]
95
2019-01-29T08:05:35.000Z
2022-01-06T07:42:59.000Z
tests/test_default_values.py
berland/configsuite
9c1eaeed3610ffaa9e549a35dc2709da44633c75
[ "MIT" ]
14
2019-02-06T08:15:10.000Z
2020-11-05T12:59:37.000Z
"""Copyright 2019 Equinor ASA and The Netherlands Organisation for Applied Scientific Research TNO. Licensed under the MIT license. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the conditions stated in the LICENSE file in the project root for details. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. """ import unittest import datetime import configsuite from configsuite import MetaKeys as MK from configsuite import types from .data import car class TestDefaultValues(unittest.TestCase): def test_untouched_basic_types(self): raw_config = car.build_all_default_config() car_schema = car.build_schema() car_schema[MK.Content]["tire"][MK.Content]["rim"].pop(MK.Default) config_suite = configsuite.ConfigSuite(raw_config, car_schema) self.assertTrue(config_suite.valid) self.assertEqual(17, config_suite.snapshot.tire.dimension) self.assertEqual(None, config_suite.snapshot.tire.rim) def test_empty_named_dict_default_values(self): raw_config = car.build_all_default_config() car_schema = car.build_schema() config_suite = configsuite.ConfigSuite(raw_config, car_schema) self.assertTrue(config_suite.valid) self.assertEqual(17, config_suite.snapshot.tire.dimension) self.assertEqual("green", config_suite.snapshot.tire.rim) def test_default_values(self): raw_config = car.build_config() car_schema = car.build_schema() config_suite = configsuite.ConfigSuite(raw_config, car_schema) self.assertTrue(config_suite.valid) # Top level basic type within named dict self.assertEqual("Norway", config_suite.snapshot.country) # Named dict within named dict self.assertEqual(15, config_suite.snapshot.tire.dimension) self.assertEqual("green", config_suite.snapshot.tire.rim) # Named dict within dict owner_data = (("Johan", "Svalbard"), ("Svein", "Earth")) self.assertEqual(len(owner_data), len(config_suite.snapshot.owner)) for (expected_name, expected_location), config_owner in zip( owner_data, sorted(config_suite.snapshot.owner) ): self.assertEqual(expected_name, config_owner.value.name) self.assertEqual(expected_location, config_owner.value.location) # Named dict within list incidents_data = (("whereabouts", 0), ("somewhere else", 1)) self.assertEqual(len(incidents_data), len(config_suite.snapshot.incidents)) for (expected_location, expected_casualties), config_owner in zip( incidents_data, sorted(config_suite.snapshot.incidents) ): self.assertEqual(expected_location, config_owner.location) self.assertEqual(expected_casualties, config_owner.casualties) def test_element_transformations_applied(self): raw_config = car.build_all_default_config() car_schema = car.build_schema() tire_schema = car_schema[MK.Content]["tire"] tire_schema[MK.Content]["dimension"][MK.Transformation] = car.inch_to_cm config_suite = configsuite.ConfigSuite(raw_config, car_schema) self.assertTrue(config_suite.valid) self.assertAlmostEqual(17 * 2.54, config_suite.snapshot.tire.dimension) def test_context_transformation_applied(self): raw_config = car.build_all_default_config() car_schema = car.build_schema() tire_schema = car_schema[MK.Content]["tire"] tire_schema[MK.Content]["dimension"][ MK.ContextTransformation ] = car.inch_to_cm_context_based config_suite = configsuite.ConfigSuite( raw_config, car_schema, extract_validation_context=lambda snapshot: None ) self.assertTrue(config_suite.valid) self.assertAlmostEqual(17 * 2.54, config_suite.snapshot.tire.dimension) def test_element_validators_applied(self): raw_config = car.build_all_default_config() car_schema = car.build_schema() tire_content = car_schema[MK.Content]["tire"][MK.Content] tire_content["dimension"][MK.ElementValidators] = [car.is_valid_dimension] config_suite = configsuite.ConfigSuite(raw_config, car_schema) self.assertFalse(config_suite.valid) for err in config_suite.errors: self.assertTrue( isinstance(err, configsuite.validation_errors.InvalidValueError) ) self.assertTrue(err.msg.startswith("Is x a valid dimension")) def test_context_validators_applied(self): raw_config = car.build_config() car_schema = car.build_schema() date_content = { MK.Type: types.DateTime, MK.Default: datetime.datetime(1999, 1, 1), MK.ContextValidators: [car.is_valid_date], MK.Required: False, MK.AllowNone: True, } owner_schema = car_schema[MK.Content]["owner"] owner_schema[MK.Content][MK.Value][MK.Content]["date"] = date_content config_suite = configsuite.ConfigSuite( raw_config, car_schema, extract_validation_context=car.extract_production_date, ) self.assertFalse(config_suite.valid) for err in config_suite.errors: self.assertTrue( isinstance(err, configsuite.validation_errors.InvalidValueError) ) self.assertTrue(err.msg.startswith("Is x a valid date")) def test_not_allow_default_for_list(self): raw_config = car.build_all_default_config() car_schema = car.build_schema() schema_value_content = { MK.Default: ["default value"], MK.Type: types.List, MK.Content: {MK.Item: {MK.Type: types.String}}, } car_schema[MK.Content]["incidents"] = schema_value_content with self.assertRaises(ValueError) as error_context: configsuite.ConfigSuite(raw_config, car_schema) self.assertTrue( str(error_context.exception).startswith("Default can only be used") ) def test_not_allow_defaults_for_list_items(self): raw_config = car.build_all_default_config() car_schema = car.build_schema() schema_value_content = { MK.Type: types.List, MK.Content: {MK.Item: {MK.Type: types.String, MK.Default: "default value"}}, } car_schema[MK.Content]["incidents"] = schema_value_content with self.assertRaises(ValueError) as error_context: configsuite.ConfigSuite(raw_config, car_schema) self.assertTrue(str(error_context.exception).startswith("Default value is")) def test_not_allow_default_for_dict(self): raw_config = car.build_all_default_config() car_schema = car.build_schema() schema_value_content = { MK.Default: {"key": 10}, MK.Type: types.Dict, MK.Content: { MK.Key: {MK.Type: types.String}, MK.Value: {MK.Type: types.Number, MK.Default: 10}, }, } car_schema[MK.Content]["owner"] = schema_value_content with self.assertRaises(ValueError) as error_context: configsuite.ConfigSuite(raw_config, car_schema) self.assertTrue( str(error_context.exception).startswith("Default can only be used") ) def test_not_allow_defaults_for_dict_keys(self): raw_config = car.build_all_default_config() car_schema = car.build_schema() schema_value_content = { MK.Type: types.Dict, MK.Content: { MK.Key: { MK.Type: types.String, MK.Default: "default value", MK.Required: False, MK.AllowNone: True, }, MK.Value: {MK.Type: types.Number}, }, } car_schema[MK.Content]["owner"] = schema_value_content with self.assertRaises(ValueError) as error_context: configsuite.ConfigSuite(raw_config, car_schema) self.assertTrue(str(error_context.exception).startswith("Default value is")) def test_not_allow_defaults_for_dict_values(self): raw_config = car.build_all_default_config() car_schema = car.build_schema() schema_value_content = { MK.Type: types.Dict, MK.Content: { MK.Key: {MK.Type: types.String}, MK.Value: { MK.Type: types.Number, MK.Default: 10, MK.Required: False, MK.AllowNone: True, }, }, } car_schema[MK.Content]["owner"] = schema_value_content with self.assertRaises(ValueError) as error_context: configsuite.ConfigSuite(raw_config, car_schema) self.assertTrue(str(error_context.exception).startswith("Default value is")) def test_not_allow_default_for_named_dict(self): raw_config = car.build_all_default_config() schema_value_content = { MK.Type: types.NamedDict, MK.Default: {"profile": 200, "width": 20}, MK.Content: { "profile": {MK.Type: types.Number}, "width": {MK.Type: types.Number}, }, } car_schema = car.build_schema() car_schema[MK.Content]["tire"][MK.Content]["rim"] = schema_value_content with self.assertRaises(ValueError) as error_context: configsuite.ConfigSuite(raw_config, car_schema) self.assertTrue( str(error_context.exception).startswith("Default can only be used") )
38.737643
88
0.652925
795773ed1443726aa14594a1c107c71b94e1a034
3,018
py
Python
salt/tests/unit/formulas/fixtures/kubernetes.py
elenamarcova/metalk8s
d23df7144e710777150fdfe5779d733743715771
[ "Apache-2.0" ]
1
2021-09-04T18:08:07.000Z
2021-09-04T18:08:07.000Z
salt/tests/unit/formulas/fixtures/kubernetes.py
elenamarcova/metalk8s
d23df7144e710777150fdfe5779d733743715771
[ "Apache-2.0" ]
null
null
null
salt/tests/unit/formulas/fixtures/kubernetes.py
elenamarcova/metalk8s
d23df7144e710777150fdfe5779d733743715771
[ "Apache-2.0" ]
null
null
null
"""Expose a really crude mock of K8s API for use in rendering tests.""" import collections import re from typing import Any, Dict, Iterator, List, Optional import pytest APIVersion = str Kind = str ItemList = List[Any] K8sData = Dict[APIVersion, Dict[Kind, ItemList]] class KubernetesMock: """Simple object for mocking basic API calls on an in-memory K8s dataset.""" Matcher = collections.namedtuple("Matcher", ("key", "op", "value")) def __init__(self, data: K8sData): self.data = data @staticmethod def _apply_matchers(objects: ItemList, matchers: List[Matcher]) -> Iterator[Any]: def _filter(item: Any) -> bool: matches = True for matcher in matchers: val = item for key in matcher.key: val = val[key] if matcher.op == "=": matches &= val == matcher.value elif matcher.op == "!=": matches &= val != matcher.value return matches return filter(_filter, objects) def _get_item_list(self, api_version: APIVersion, kind: Kind) -> ItemList: try: item = self.data[api_version][kind] except KeyError: pytest.fail(f"No data in Kubernetes mock for '{api_version}/{kind}'") return item def get( self, api_version: APIVersion, kind: Kind, name: str, namespace: Optional[str] = None, ) -> Optional[Any]: """Retrieve an object from the data store.""" items = self._get_item_list(api_version, kind) matchers = [self.Matcher(["metadata", "name"], "=", name)] if namespace is not None: matchers.append(self.Matcher(["metadata", "namespace"], "=", namespace)) return next(self._apply_matchers(items, matchers), None) def list( self, api_version: APIVersion, kind: Kind, namespace: Optional[str] = None, label_selector: Optional[str] = None, ) -> List[Any]: """Retrieve a list of objects from the data store.""" items = self._get_item_list(api_version, kind) matchers = [] if namespace is not None: matchers.append(self.Matcher(["metadata", "namespace"], "=", namespace)) if label_selector is not None: for match_expr in label_selector.split(","): match = re.match( r"^(?P<key>.*[^!])(?P<op>!=|=)(?P<value>.+)$", match_expr ) assert ( match is not None ), f"Invalid label selector expression: {match_expr}" matchers.append( self.Matcher( key=["metadata", "labels", match.group("key")], op=match.group("op"), value=match.group("value"), ) ) return list(self._apply_matchers(items, matchers))
31.768421
85
0.543075
795774679084758b267625e44e4a8176f1d02ef2
192,329
py
Python
gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py
shahin/gpdb
3909ad6b2d9bc06ed4659d2c9223fc12b9409a33
[ "PostgreSQL", "Apache-2.0" ]
null
null
null
gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py
shahin/gpdb
3909ad6b2d9bc06ed4659d2c9223fc12b9409a33
[ "PostgreSQL", "Apache-2.0" ]
null
null
null
gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py
shahin/gpdb
3909ad6b2d9bc06ed4659d2c9223fc12b9409a33
[ "PostgreSQL", "Apache-2.0" ]
1
2018-12-04T09:13:57.000Z
2018-12-04T09:13:57.000Z
import commands import fnmatch import getpass import glob import gzip import json import os import platform import shutil import socket import tarfile import thread import yaml import json from collections import defaultdict from datetime import datetime from gppylib.commands.gp import SegmentStart, GpStandbyStart from gppylib.commands.unix import findCmdInPath from gppylib.operations.backup_utils import Context from gppylib.operations.dump import get_partition_state from gppylib.operations.startSegments import MIRROR_MODE_MIRRORLESS from gppylib.operations.unix import ListRemoteFilesByPattern, CheckRemoteFile from test.behave_utils.gpfdist_utils.gpfdist_mgmt import Gpfdist from test.behave_utils.utils import * from test.behave_utils.PgHba import PgHba, Entry timestamp_json = '/tmp/old_to_new_timestamps.json' master_data_dir = os.environ.get('MASTER_DATA_DIRECTORY') timestamp_json = '/tmp/old_to_new_timestamps.json' global_timestamps = {} if master_data_dir is None: raise Exception('Please set MASTER_DATA_DIRECTORY in environment') def _write_timestamp_to_json(context): scenario_name = context._stack[0]['scenario'].name timestamp = get_timestamp_from_output(context) if not global_timestamps.has_key(scenario_name): global_timestamps[scenario_name] = list() global_timestamps[scenario_name].append(timestamp) with open(timestamp_json, 'w') as outfile: json.dump(global_timestamps, outfile) def _read_timestamp_from_json(context): scenario_name = context._stack[0]['scenario'].name with open(timestamp_json, 'r') as infile: return json.load(infile)[scenario_name] @given('the database is running') def impl(context): start_database_if_not_started(context) if has_exception(context): raise context.exception @given('the database is not running') @when('the database is not running') def impl(context): stop_database_if_started(context) if has_exception(context): raise context.exception @given('the database is "{version}" with dburl "{dbconn}"') def impl(context, dbconn, version): command = '%s -t -q -c \'select version();\''%(dbconn) (rc, out, err) = run_cmd(command) if not ('Greenplum Database '+version) in out: print 'version %s does not match current gpdb version %s'%(version, out) @given('database "{dbname}" exists') @then('database "{dbname}" exists') def impl(context, dbname): create_database_if_not_exists(context, dbname) @given('database "{dbname}" is created if not exists on host "{HOST}" with port "{PORT}" with user "{USER}"') @then('database "{dbname}" is created if not exists on host "{HOST}" with port "{PORT}" with user "{USER}"') def impl(context, dbname, HOST, PORT, USER): host = os.environ.get(HOST) port = 0 if os.environ.get(PORT) == None else int(os.environ.get(PORT)) user = os.environ.get(USER) create_database_if_not_exists(context, dbname, host, port, user) @when('the database "{dbname}" does not exist') @given('the database "{dbname}" does not exist') @then('the database "{dbname}" does not exist') def impl(context, dbname): drop_database_if_exists(context, dbname) @when('the database "{dbname}" does not exist on host "{HOST}" with port "{PORT}" with user "{USER}"') @given('the database "{dbname}" does not exist on host "{HOST}" with port "{PORT}" with user "{USER}"') @then('the database "{dbname}" does not exist on host "{HOST}" with port "{PORT}" with user "{USER}"') def impl(context, dbname, HOST, PORT, USER): host = os.environ.get(HOST) port = int(os.environ.get(PORT)) user = os.environ.get(USER) drop_database_if_exists(context, dbname, host, port, user) @given('the database "{dbname}" does not exist with connection "{dbconn}"') @when('the database "{dbname}" does not exist with connection "{dbconn}"') @then('the database "{dbname}" does not exist with connection "{dbconn}"') def impl(context, dbname, dbconn): command = '%s -c \'drop database if exists %s;\''%(dbconn, dbname) run_command(context, command) @given('the database "{dbname}" exists with connection "{dbconn}"') @when('the database "{dbname}" exists with connection "{dbconn}"') @then('the database "{dbname}" exists with connection "{dbconn}"') def impl(context, dbname, dbconn): command = '%s -c \'create database %s;\''%(dbconn, dbname) run_command(context, command) def get_segment_hostlist(): gparray = GpArray.initFromCatalog(dbconn.DbURL()) segment_hostlist = sorted(gparray.get_hostlist(includeMaster=False)) if not segment_hostlist: raise Exception('segment_hostlist was empty') return segment_hostlist @given('we have determined the first segment hostname') def impl(context): segment_hostlist = get_segment_hostlist() context.first_segment_hostname = segment_hostlist[0] @given('{nic} on the first segment host is {nic_status}') @then('{nic} on the first segment host is {nic_status}') def impl(context, nic, nic_status): if nic_status.strip() == 'down': bring_nic_down(context.first_segment_hostname, nic) elif nic_status.strip() == 'up': bring_nic_up(context.first_segment_hostname, nic) else: raise Exception('Invalid nic status in feature file %s' % nic_status) @when('an insert on "{table}" in "{dbname}" is rolled back') def impl(context, table, dbname): with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn: insert_sql = """INSERT INTO %s values (1)""" % table dbconn.execSQL(conn, insert_sql) conn.rollback() @when('a truncate on "{table}" in "{dbname}" is rolled back') def impl(context, table, dbname): with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn: insert_sql = """TRUNCATE table %s""" % table dbconn.execSQL(conn, insert_sql) conn.rollback() @when('an alter on "{table}" in "{dbname}" is rolled back') def impl(context, table, dbname): with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn: insert_sql = """ALTER TABLE %s add column cnew int default 0""" % table dbconn.execSQL(conn, insert_sql) conn.rollback() @given('the user truncates "{table_list}" tables in "{dbname}"') @when('the user truncates "{table_list}" tables in "{dbname}"') @then('the user truncates "{table_list}" tables in "{dbname}"') def impl(context, table_list, dbname): if not table_list: raise Exception('Table list is empty') tables = table_list.split(',') for t in tables: truncate_table(dbname, t.strip()) def populate_regular_table_data(context, tabletype, table_name, compression_type, dbname, rowcount=1094, with_data=False, host=None, port=0, user=None): create_database_if_not_exists(context, dbname, host=host, port=port, user=user) drop_table_if_exists(context, table_name=table_name, dbname=dbname, host=host, port=port, user=user) if compression_type == "None": create_partition(context, table_name, tabletype, dbname, compression_type=None, partition=False, rowcount=rowcount, with_data=with_data, host=host, port=port, user=user) else: create_partition(context, table_name, tabletype, dbname, compression_type, partition=False, rowcount=rowcount, with_data=with_data, host=host, port=port, user=user) @given('there is a "{tabletype}" table "{table_name}" with compression "{compression_type}" in "{dbname}" with data') @when('there is a "{tabletype}" table "{table_name}" with compression "{compression_type}" in "{dbname}" with data') @then('there is a "{tabletype}" table "{table_name}" with compression "{compression_type}" in "{dbname}" with data') def impl(context, tabletype, table_name, compression_type, dbname): populate_regular_table_data(context, tabletype, table_name, compression_type, dbname, with_data=True) @given('there is a "{tabletype}" table "{table_name}" with compression "{compression_type}" in "{dbname}" with data "{with_data}" on host "{HOST}" with port "{PORT}" with user "{USER}"') @when('there is a "{tabletype}" table "{table_name}" with compression "{compression_type}" in "{dbname}" with data "{with_data}" on host "{HOST}" with port "{PORT}" with user "{USER}"') @then('there is a "{tabletype}" table "{table_name}" with compression "{compression_type}" in "{dbname}" with data "{with_data}" on host "{HOST}" with port "{PORT}" with user "{USER}"') def impl(context, tabletype, table_name, compression_type, dbname, with_data, HOST, PORT, USER): host = os.environ.get(HOST) port = int(os.environ.get(PORT)) user = os.environ.get(USER) with_data = bool(with_data) populate_regular_table_data(context, tabletype, table_name, compression_type, dbname, 10, with_data, host, port, user) @when('the partition table "{table_name}" in "{dbname}" is populated with similar data') def impl(context, table_name, dbname): populate_partition_diff_data_same_eof(table_name, dbname) @given('the partition table "{table_name}" in "{dbname}" is populated with same data') def impl(context, table_name, dbname): populate_partition_same_data(table_name, dbname) @given('there is a "{tabletype}" table "{table_name}" with index "{indexname}" compression "{compression_type}" in "{dbname}" with data') def impl(context, tabletype, table_name, compression_type, indexname, dbname): create_database_if_not_exists(context, dbname) drop_table_if_exists(context, table_name=table_name, dbname=dbname) if compression_type == "None": create_partition(context, table_name, tabletype, dbname, compression_type=None, partition=False) else: create_partition(context, table_name, tabletype, dbname, compression_type, partition=False) create_indexes(context, table_name, indexname, dbname) @given('there is a "{tabletype}" partition table "{table_name}" with compression "{compression_type}" in "{dbname}" with data') @then('there is a "{tabletype}" partition table "{table_name}" with compression "{compression_type}" in "{dbname}" with data') def impl(context, tabletype, table_name, compression_type, dbname): create_database_if_not_exists(context, dbname) drop_table_if_exists(context, table_name=table_name, dbname=dbname) if compression_type == "None": create_partition(context, tablename=table_name, storage_type=tabletype, dbname=dbname, with_data=True) else: create_partition(context, tablename=table_name, storage_type=tabletype, dbname=dbname, with_data=True, compression_type=compression_type) @given('there is a mixed storage partition table "{tablename}" in "{dbname}" with data') def impl(context, tablename, dbname): create_database_if_not_exists(context, dbname) drop_table_if_exists(context, table_name=tablename, dbname=dbname) create_mixed_storage_partition(context, tablename, dbname) @given('there is a partition table "{tablename}" has external partitions of gpfdist with file "{filename}" on port "{port}" in "{dbname}" with data') def impl(context, tablename, dbname, filename, port): create_database_if_not_exists(context, dbname) drop_table_if_exists(context, table_name=tablename, dbname=dbname) create_external_partition(context, tablename, dbname, port, filename) @given('"{dbname}" does not exist') def impl(context, dbname): drop_database(context, dbname) @given('{env_var} environment variable is not set') def impl(context, env_var): if not hasattr(context, 'orig_env'): context.orig_env = dict() context.orig_env[env_var] = os.environ.get(env_var) if env_var in os.environ: del os.environ[env_var] @given('there are no "{tmp_file_prefix}" tempfiles') def impl(context, tmp_file_prefix): if tmp_file_prefix is not None and tmp_file_prefix: run_command(context, 'rm -f /tmp/%s*' % tmp_file_prefix) else: raise Exception('Invalid call to temp file removal %s' % tmp_file_prefix) @then('{env_var} environment variable should be restored') def impl(context, env_var): if not hasattr(context, 'orig_env'): raise Exception('%s can not be reset' % env_var) if env_var not in context.orig_env: raise Exception('%s can not be reset.' % env_var) os.environ[env_var] = context.orig_env[env_var] del context.orig_env[env_var] @when('the table names in "{dbname}" is stored') @then('the table names in "{dbname}" is stored') def impl(context, dbname): context.table_names = get_table_names(dbname) @given('the user runs "{command}"') @when('the user runs "{command}"') @then('the user runs "{command}"') def impl(context, command): run_gpcommand(context, command) @given('the user runs command "{command}"') @when('the user runs command "{command}"') @then('the user runs command "{command}"') def impl(context, command): run_command(context, command) @given('the user puts cluster on "{HOST}" "{PORT}" "{USER}" in "{transition}"') @when('the user puts cluster on "{HOST}" "{PORT}" "{USER}" in "{transition}"') @then('the user puts cluster on "{HOST}" "{PORT}" "{USER}" in "{transition}"') def impl(context, HOST, PORT, USER, transition): host = os.environ.get(HOST) user = os.environ.get(USER) port = os.environ.get(PORT) source_file = os.path.join(os.environ.get('GPHOME'),'greenplum_path.sh') master_dd = os.environ.get('MASTER_DATA_DIRECTORY') export_mdd = 'export MASTER_DATA_DIRECTORY=%s;export PGPORT=%s'%(master_dd, port) # reset all fault inject entry if exists command = 'gpfaultinjector -f all -m async -y reset -r primary -H ALL' run_command_remote(context, command, host, source_file, export_mdd) command = 'gpfaultinjector -f all -m async -y resume -r primary -H ALL' run_command_remote(context, command, host, source_file, export_mdd) trigger_transition = "psql -d template1 -h %s -U %s -p %s -c 'drop table if exists trigger;'"%(host,user,port) if transition == 'ct': command = 'gpfaultinjector -f filerep_consumer -m async -y fault -r primary -H ALL' run_command_remote(context, command, host, source_file, export_mdd) run_command(context, trigger_transition) wait_till_change_tracking_transition(host,port,user) if transition == 'resync': command = 'gpfaultinjector -f filerep_consumer -m async -y fault -r primary -H ALL' run_command_remote(context,command, host, source_file, export_mdd) run_command(context, trigger_transition) wait_till_change_tracking_transition(host,port,user) command = 'gpfaultinjector -f filerep_resync -m async -y suspend -r primary -H ALL' run_command_remote(context, command, host, source_file, export_mdd) run_command_remote(context, 'gprecoverseg -a', host, source_file, export_mdd) wait_till_resync_transition(host,port,user) if transition == 'sync': run_command_remote(context, 'gpstop -air', host, source_file, export_mdd) run_command_remote(context, 'gprecoverseg -a', host, source_file, export_mdd) wait_till_insync_transition(host,port,user) run_command_remote(context, 'gprecoverseg -ar', host, source_file, export_mdd) @given('the user runs workload under "{dir}" with connection "{dbconn}"') @when('the user runs workload under "{dir}" with connection "{dbconn}"') def impl(context, dir, dbconn): for file in os.listdir(dir): if file.endswith('.sql'): command = '%s -f %s'%(dbconn, os.path.join(dir,file)) run_command(context, command) @given('the user "{USER}" creates filespace_config file for "{fs_name}" on host "{HOST}" with gpdb port "{PORT}" and config "{config_file}" in "{dir}" directory') @then('the user "{USER}" creates filespace_config file for "{fs_name}" on host "{HOST}" with gpdb port "{PORT}" and config "{config_file}" in "{dir}" directory') def impl(context, USER, HOST, PORT,fs_name,config_file,dir): user = os.environ.get(USER) host = os.environ.get(HOST) port = os.environ.get(PORT) if not dir.startswith("/"): dir = os.environ.get(dir) config_file_path = dir + "/" + config_file create_gpfilespace_config(host,port, user, fs_name, config_file_path, dir) @given('the user "{USER}" creates filespace on host "{HOST}" with gpdb port "{PORT}" and config "{config_file}" in "{dir}" directory') @when('the user "{USER}" creates filespace on host "{HOST}" with gpdb port "{PORT}" and config "{config_file}" in "{dir}" directory') def impl(context, USER, HOST, PORT, config_file, dir): user = os.environ.get(USER) host = os.environ.get(HOST) port = os.environ.get(PORT) if not dir.startswith("/"): dir = os.environ.get(dir) config_file_path = dir + "/" + config_file cmdStr = 'gpfilespace -h %s -p %s -U %s -c "%s"'%(host, port, user, config_file_path) run_command(context, cmdStr) @given('the user modifies the external_table.sql file "{filepath}" with host "{HOST}" and port "{port}"') @when('the user modifies the external_table.sql file "{filepath}" with host "{HOST}" and port "{port}"') def impl(context, filepath, HOST, port): host=os.environ.get(HOST) substr = host+':'+port modify_sql_file(filepath, substr) @given('the user starts the gpfdist on host "{HOST}" and port "{port}" in work directory "{dir}" from remote "{ctxt}"') @then('the user starts the gpfdist on host "{HOST}" and port "{port}" in work directory "{dir}" from remote "{ctxt}"') def impl(context, HOST, port, dir, ctxt): host = os.environ.get(HOST) remote_gphome = os.environ.get('GPHOME') if not dir.startswith("/"): dir = os.environ.get(dir) gp_source_file = os.path.join(remote_gphome, 'greenplum_path.sh') gpfdist = Gpfdist('gpfdist on host %s'%host, dir, port, os.path.join(dir,'gpfdist.pid'), int(ctxt), host, gp_source_file) gpfdist.startGpfdist() @given('the user stops the gpfdist on host "{HOST}" and port "{port}" in work directory "{dir}" from remote "{ctxt}"') @then('the user stops the gpfdist on host "{HOST}" and port "{port}" in work directory "{dir}" from remote "{ctxt}"') def impl(context, HOST, port, dir, ctxt): host = os.environ.get(HOST) remote_gphome = os.environ.get('GPHOME') if not dir.startswith("/"): dir = os.environ.get(dir) gp_source_file = os.path.join(remote_gphome, 'greenplum_path.sh') gpfdist = Gpfdist('gpfdist on host %s'%host, dir, port, os.path.join(dir,'gpfdist.pid'), int(ctxt), host, gp_source_file) gpfdist.cleanupGpfdist() def parse_netbackup_params(): current_path = os.path.realpath(__file__) current_dir = os.path.dirname(current_path) netbackup_yaml_file_path = os.path.join(current_dir, 'data/netbackup_behave_config.yaml') try: nbufile = open(netbackup_yaml_file_path, 'r') except IOError,e: raise Exception("Unable to open file %s: %s" % (netbackup_yaml_file_path, e)) try: nbudata = yaml.load(nbufile.read()) except yaml.YAMLError, exc: raise Exception("Error reading file %s: %s" % (netbackup_yaml_file_path, exc)) finally: nbufile.close() if len(nbudata) == 0: raise Exception("The load of the config file %s failed.\ No configuration information to continue testing operation." % netbackup_yaml_file_path) else: return nbudata def run_valgrind_command(context, command, suppressions_file): current_path = os.path.realpath(__file__) current_dir = os.path.dirname(current_path) cmd_text = "valgrind --suppressions=%s/%s %s" %(current_dir, suppressions_file, command) run_command(context, cmd_text) for line in context.error_message.splitlines(): if 'ERROR SUMMARY' in line: if '0 errors from 0 contexts' not in line: raise Exception('Output: %s' % context.error_message) else: return raise Exception('Could not find "ERROR SUMMARY" in %s' % context.error_message) @then('the user runs valgrind with "{command}" and options "{options}"') @when('the user runs valgrind with "{command}" and options "{options}"') def impl(context, command, options): port = os.environ.get('PGPORT') user = getpass.getuser() if hasattr(context, 'backup_timestamp'): ts = context.backup_timestamp bnr_tool = command.split()[0].strip() if bnr_tool == 'gp_dump': command_str = command elif bnr_tool == 'gp_dump_agent': command_str = command + ' -p %s' % port elif bnr_tool == 'gp_restore': command_str = "%s %s --gp-k %s --gp-d db_dumps/%s --gp-r db_dumps/%s" % (command, options, context.backup_timestamp, context.backup_timestamp[0:8], context.backup_timestamp[0:8]) elif bnr_tool == 'gp_restore_agent': command_str = "%s %s --gp-k %s --gp-d db_dumps/%s -p %s -U %s --target-host localhost --target-port %s db_dumps/%s/gp_dump_-1_1_%s_post_data.gz" % (command, options, ts, ts[0:8], port, user, port, ts[0:8], ts) run_valgrind_command(context, command_str, "valgrind_suppression.txt") @then('the user runs valgrind with "{command}" and options "{options}" and suppressions file "{suppressions_file}" using netbackup') @when('the user runs valgrind with "{command}" and options "{options}" and suppressions file "{suppressions_file}" using netbackup') def impl(context, command, options, suppressions_file): port = os.environ.get('PGPORT') user = getpass.getuser() if hasattr(context, 'backup_timestamp'): ts = context.backup_timestamp if hasattr(context, 'netbackup_service_host'): netbackup_service_host = context.netbackup_service_host if hasattr(context, 'netbackup_policy'): netbackup_policy = context.netbackup_policy if hasattr(context, 'netbackup_schedule'): netbackup_schedule = context.netbackup_schedule bnr_tool = command.split()[0].strip() if bnr_tool == 'gp_dump': command_str = command + " --netbackup-service-host " + netbackup_service_host + " --netbackup-policy " + netbackup_policy + " --netbackup-schedule " + netbackup_schedule elif bnr_tool == 'gp_dump_agent': command_str = command + ' -p %s' % port + " --netbackup-service-host " + netbackup_service_host + " --netbackup-policy " + netbackup_policy + " --netbackup-schedule " + netbackup_schedule elif bnr_tool == 'gp_restore': command_str = "%s %s --gp-k %s --gp-d db_dumps/%s --gp-r db_dumps/%s --netbackup-service-host %s" % (command, options, context.backup_timestamp, context.backup_timestamp[0:8], context.backup_timestamp[0:8], netbackup_service_host) elif bnr_tool == 'gp_restore_agent': command_str = "%s %s --gp-k %s --gp-d db_dumps/%s -p %s -U %s --target-host localhost --target-port %s db_dumps/%s/gp_dump_-1_1_%s_post_data.gz --netbackup-service-host %s" % (command, options, ts, ts[0:8], port, user, port, ts[0:8], ts, netbackup_service_host) else: command_str = "%s %s" % (command, options) run_valgrind_command(context, command_str, "netbackup_suppressions.txt") @when('the timestamp key is stored') def impl(context): stdout = context.stdout_message for line in stdout.splitlines(): if '--gp-k' in line: pat = re.compile('.* --gp-k=([0-9]{14}).*') m = pat.search(line) if not m: raise Exception('Timestamp key not found') context.timestamp_key = m.group(1) return @then('{command} should print {err_msg} error message') def impl(context, command, err_msg): check_err_msg(context, err_msg) @then('{command} should print {out_msg} to stdout') def impl(context, command, out_msg): check_stdout_msg(context, out_msg) @given('{command} should not print {out_msg} to stdout') @when('{command} should not print {out_msg} to stdout') @then('{command} should not print {out_msg} to stdout') def impl(context, command, out_msg): check_string_not_present_stdout(context, out_msg) @given('{command} should return a return code of {ret_code}') @when('{command} should return a return code of {ret_code}') @then('{command} should return a return code of {ret_code}') def impl(context, command, ret_code): check_return_code(context, ret_code) @given('{command} should not return a return code of {ret_code}') @when('{command} should not return a return code of {ret_code}') @then('{command} should not return a return code of {ret_code}') def impl(context, command, ret_code): check_not_return_code(context, ret_code) @then('an "{ex_type}" should be raised') def impl(context, ex_type): if not context.exception: raise Exception('An exception was expected but was not thrown') typ = context.exception.__class__.__name__ if typ != ex_type: raise Exception('got exception of type "%s" but expected type "%s"' % (typ, ex_type)) @given('database "{dbname}" health check should pass on table "{tablename}"') @when('database "{dbname}" health check should pass on table "{tablename}"') @then('database "{dbname}" health check should pass on table "{tablename}"') def impl(context, dbname, tablename): drop_database_if_exists(context, dbname) create_database(context, dbname) create_int_table(context, tablename, dbname=dbname) drop_database(context, dbname) @given('the segments are synchronized') @when('the segments are synchronized') @then('the segments are synchronized') def impl(context): times = 30 sleeptime = 10 for i in range(times): if are_segments_synchronized(): return time.sleep(sleeptime) raise Exception('segments are not in sync after %d seconds' % (times * sleeptime)) @when('at least one segment is resynchronized') @then('at least one segment is resynchronized') @given('at least one segment is resynchronized') def impl(context): times = 30 sleeptime = 10 for i in range(times): if is_any_segment_resynchronized(): return time.sleep(sleeptime) raise Exception('segments are not in resync after %d seconds' % (times * sleeptime)) @when('table "{table_list}" is assumed to be in dirty state in "{dbname}"') @then('table "{table_list}" is assumed to be in dirty state in "{dbname}"') @given('table "{table_list}" is assumed to be in dirty state in "{dbname}"') def impl(context, table_list, dbname): tables = table_list.split(',') for t in tables: modify_data(context, t.strip(), dbname) backup_data(context, t.strip(), dbname) get_distribution_policy(dbname) @given('all the data from "{dbname}" is saved for verification') @when('all the data from "{dbname}" is saved for verification') @then('all the data from "{dbname}" is saved for verification') def impl(context, dbname): backup_db_data(context, dbname) @then('partition "{partition}" of partition table "{table_list}" is assumed to be in dirty state in "{dbname}" in schema "{schema}"') @when('partition "{partition}" of partition table "{table_list}" is assumed to be in dirty state in "{dbname}" in schema "{schema}"') @given('partition "{partition}" of partition table "{table_list}" is assumed to be in dirty state in "{dbname}" in schema "{schema}"') @then('partition "{partition}" in partition level "{partitionlevel}" of partition table "{table_list}" is assumed to be in dirty state in "{dbname}" in schema "{schema}"') @when('partition "{partition}" in partition level "{partitionlevel}" of partition table "{table_list}" is assumed to be in dirty state in "{dbname}" in schema "{schema}"') @given('partition "{partition}" in partition level "{partitionlevel}" of partition table "{table_list}" is assumed to be in dirty state in "{dbname}" in schema "{schema}"') def impl(context, partition, table_list, dbname, schema, partitionlevel=1): tables = table_list.split(',') for t in tables: part_t = get_partition_names(schema, t.strip(), dbname, partitionlevel, partition) if len(part_t) < 1 or len(part_t[0]) < 1: print part_t dirty_table_name = part_t[0][0].strip() modify_partition_data(context, dirty_table_name, dbname, int(partition)) backup_data(context, dirty_table_name, dbname) def validate_timestamp(ts): try: int_ts = int(ts) except Exception as e: raise Exception('Timestamp is not valid %s' % ts) if len(ts) != 14: raise Exception('Timestamp is invalid %s' % ts) @when('the subdir from gpcrondump is stored') @then('the subdir from gpcrondump is stored') def impl(context): stdout = context.stdout_message for line in stdout.splitlines(): if 'Dump subdirectory' in line: log_msg, delim, subdir = line.partition('=') context.backup_subdir = subdir.strip() return raise Exception('Dump subdirectory not found %s' % stdout) def get_timestamp_from_output(context): ts = None stdout = context.stdout_message for line in stdout.splitlines(): if 'Timestamp key = ' in line: log_msg, delim, timestamp = line.partition('=') ts = timestamp.strip() validate_timestamp(ts) return ts raise Exception('Timestamp not found %s' % stdout) @given('the full backup timestamp from gpcrondump is stored') @when('the full backup timestamp from gpcrondump is stored') @then('the full backup timestamp from gpcrondump is stored') def impl(context): context.full_backup_timestamp = get_timestamp_from_output(context) _write_timestamp_to_json(context) @when('the timestamp from gpcrondump is stored') @then('the timestamp from gpcrondump is stored') def impl(context): context.backup_timestamp = get_timestamp_from_output(context) _write_timestamp_to_json(context) @when('the timestamp is labeled "{lbl}"') def impl(context, lbl): if not 'timestamp_labels' in context._root: context._root['timestamp_labels'] = {} context._root['timestamp_labels'][lbl] = get_timestamp_from_output(context) @when('the timestamp for scenario "{scenario_number}" is labeled "{lbl}"') def impl(context, scenario_number, lbl): labels_key = 'timestamp_labels' + scenario_number if not labels_key in context._root: context._root[labels_key] = {} context._root[labels_key][lbl] = get_timestamp_from_output(context) @given('there is a list to store the incremental backup timestamps') def impl(context): context.inc_backup_timestamps = [] @given('there is a list to store the backup timestamps') def impl(context): context.backup_timestamp_list = [] @then('the timestamp from gpcrondump is stored in a list') @when('the timestamp from gpcrondump is stored in a list') def impl(context): context.backup_timestamp = get_timestamp_from_output(context) context.inc_backup_timestamps.append(context.backup_timestamp) _write_timestamp_to_json(context) @when('the timestamp for database dumps "{db_list}" are stored') def impl(context, db_list): context.db_timestamps = get_timestamp_from_output_for_db(context) scenario_name = context._stack[0]['scenario'].name if not global_timestamps.has_key(scenario_name): global_timestamps[scenario_name] = list() global_timestamps[scenario_name].append(context.db_timestamps.values()) with open(timestamp_json, 'w') as outfile: json.dump(global_timestamps, outfile) def get_timestamp_from_output_for_db(context): db_timestamps = {} ts = None database = None stdout = context.stdout_message for line in stdout.splitlines(): if 'Target database' in line: log_msg, delim, database = line.partition('=') db = database.strip() elif 'Dump key ' in line: log_msg, delim, timestamp = line.partition('=') ts = timestamp.strip() validate_timestamp(ts) if database is None: raise Exception('Database not found for timestamp "%s"' % ts) db_timestamps[db] = ts database = None if not db_timestamps: raise Exception('No Timestamps found') return db_timestamps @then('verify data integrity of database "{dbname}" between source and destination system, work-dir "{dir}"') def impl(context, dbname, dir): dbconn_src = 'psql -p $GPTRANSFER_SOURCE_PORT -h $GPTRANSFER_SOURCE_HOST -U $GPTRANSFER_SOURCE_USER -d %s' % dbname dbconn_dest = 'psql -p $GPTRANSFER_DEST_PORT -h $GPTRANSFER_DEST_HOST -U $GPTRANSFER_DEST_USER -d %s' % dbname for file in os.listdir(dir): if file.endswith('.sql'): filename_prefix = os.path.splitext(file)[0] ans_file_path = os.path.join(dir,filename_prefix + '.ans') out_file_path = os.path.join(dir,filename_prefix + '.out') diff_file_path = os.path.join(dir,filename_prefix + '.diff') # run the command to get the exact data from the source system command = '%s -f %s > %s' % (dbconn_src, os.path.join(dir, file), ans_file_path) run_command(context, command) # run the command to get the data from the destination system, locally command = '%s -f %s > %s' % (dbconn_dest, os.path.join(dir, file), out_file_path) run_command(context, command) gpdiff_cmd = 'gpdiff.pl -w -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gpd_init=test/behave/mgmt_utils/steps/data/global_init_file %s %s > %s' % (ans_file_path, out_file_path, diff_file_path) run_command(context, gpdiff_cmd) if context.ret_code != 0: raise Exception ("Found difference between source and destination system, see %s" % file) @then('run post verifying workload under "{dir}"') def impl(context, dir): for file in os.listdir(dir): if file.endswith('.sql'): filename_prefix = os.path.splitext(file)[0] ans_file_path = os.path.join(dir,filename_prefix+'.ans') out_file_path = os.path.join(dir,filename_prefix+'.out') diff_file_path = os.path.join(dir,filename_prefix+'.diff') # run the command to get the data from the destination system, locally dbconn = 'psql -d template1 -p $GPTRANSFER_DEST_PORT -U $GPTRANSFER_DEST_USER -h $GPTRANSFER_DEST_HOST' command = '%s -f %s > %s'%(dbconn, os.path.join(dir,file), out_file_path) run_command(context, command) gpdiff_cmd = 'gpdiff.pl -w -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gpd_init=test/behave/mgmt_utils/steps/data/global_init_file %s %s > %s'%(ans_file_path, out_file_path, diff_file_path) run_command(context, gpdiff_cmd) for file in os.listdir(dir): if file.endswith('.diff') and os.path.getsize(os.path.join(dir,file)) > 0: # if there is some difference generated into the diff file, raise expception raise Exception ("Found difference between source and destination system, see %s"%file) @then('verify that the incremental file has the stored timestamp') def impl(context): inc_file_name = 'gp_dump_%s_increments' % context.full_backup_timestamp subdirectory = context.full_backup_timestamp[0:8] full_path = os.path.join(master_data_dir, 'db_dumps', subdirectory, inc_file_name) if not os.path.isfile(full_path): raise Exception ("Can not find increments file: %s" % full_path) contents = "" with open(full_path) as fd: contents = fd.read().strip() if context.backup_timestamp != contents: raise Exception("The increments file '%s' does not contain the timestamp %s" % (full_path, context.backup_timestamp)) def check_increments_file_for_list(context, location): inc_file_name = 'gp_dump_%s_increments' % context.full_backup_timestamp subdirectory = context.full_backup_timestamp[0:8] full_path = os.path.join(location, 'db_dumps', subdirectory, inc_file_name) if not os.path.isfile(full_path): raise Exception ("Can not find increments file: %s" % full_path) found_timestamps = [] contents = "" with open(full_path) as fd: contents = fd.read() for line in contents.splitlines(): line = line.strip() if not line: continue found_timestamps.append(line) found_timestamps = sorted(found_timestamps) context.inc_backup_timestamps = sorted(context.inc_backup_timestamps) if found_timestamps != context.inc_backup_timestamps: print "Found timestamps: " print found_timestamps print "Expected timestamps: " print context.inc_backup_timestamps raise Exception("Expected timestamps not found") @then('verify that the incremental file in "{location}" has all the stored timestamps') def impl(context, location): check_increments_file_for_list(context, location) @then('verify that the incremental file has all the stored timestamps') def impl(context): check_increments_file_for_list(context, master_data_dir) @then('verify that the plan file is created for the latest timestamp') def impl(context): context.inc_backup_timestamps = sorted(context.inc_backup_timestamps) latest_ts = context.inc_backup_timestamps[-1] plan_file_dir = os.path.join(master_data_dir, 'db_dumps', latest_ts[0:8]) plan_file_count = len(glob.glob('/%s/*%s*_plan' % (plan_file_dir, latest_ts))) if plan_file_count != 1: raise Exception('Expected only one plan file, found %s' % plan_file_count) filename = '%s/gp_restore_%s_plan' % (plan_file_dir, latest_ts) if not os.path.exists(filename): raise Exception('Plan file %s not created for the latest timestamp' % filename) @then('the timestamp from gp_dump is stored and subdir is "{subdir}"') def impl(context, subdir): stdout = context.stdout_message context.backup_subdir = subdir for line in stdout.splitlines(): if 'Timestamp Key: ' in line: context.backup_timestamp = line.split()[-1] validate_timestamp(context.backup_timestamp) return raise Exception('Timestamp not found %s' % stdout) @when('the state files are generated under "{dir}" for stored "{backup_type}" timestamp') @then('the state files are generated under "{dir}" for stored "{backup_type}" timestamp') def impl(context, dir, backup_type): dump_dir = dir if len(dir.strip()) != 0 else master_data_dir if backup_type == 'full': timestamp = context.full_backup_timestamp else: timestamp = context.backup_timestamp ao_state_filename = "%s/db_dumps/%s/gp_dump_%s_ao_state_file" % (dump_dir, timestamp[0:8], timestamp) co_state_filename = "%s/db_dumps/%s/gp_dump_%s_co_state_file" % (dump_dir, timestamp[0:8], timestamp) if not os.path.exists(ao_state_filename): raise Exception('AO state file %s not generated' % ao_state_filename) if not os.path.exists(co_state_filename): raise Exception('CO state file %s not generated' % co_state_filename) verify_integer_tuple_counts(context, ao_state_filename) verify_integer_tuple_counts(context, co_state_filename) @then('the "{file_type}" files are generated under "{dirname}" for stored "{backup_type}" timestamp') def impl(context, file_type, dirname, backup_type): dump_dir = dirname if len(dirname.strip()) != 0 else master_data_dir if backup_type == 'full': timestamp = context.full_backup_timestamp else: timestamp = context.backup_timestamp last_operation_filename = "%s/db_dumps/%s/gp_dump_%s_last_operation" % (dump_dir, timestamp[0:8], timestamp) if not os.path.exists(last_operation_filename): raise Exception('Last operation file %s not generated' % last_operation_filename) @given('the user runs gp_restore with the the stored timestamp subdir and stored filename in "{dbname}"') @when('the user runs gp_restore with the the stored timestamp subdir and stored filename in "{dbname}"') def impl(context, dbname): command = 'gp_restore -i --gp-k %s --gp-d db_dumps/%s --gp-i --gp-r db_dumps/%s --gp-l=p -d %s --gp-c --gp-f %s' % (context.backup_timestamp, context.backup_subdir, context.backup_subdir, dbname, context.filename) run_gpcommand(context, command) @then('the user runs gp_restore with the the stored timestamp and subdir in "{dbname}"') def impl(context, dbname): command = 'gp_restore -i --gp-k %s --gp-d db_dumps/%s --gp-i --gp-r db_dumps/%s --gp-l=p -d %s --gp-c' % (context.backup_timestamp, context.backup_subdir, context.backup_subdir, dbname) run_gpcommand(context, command) @then('the user runs gp_restore with the the stored timestamp and subdir in "{dbname}" and bypasses ao stats') def impl(context, dbname): command = 'gp_restore -i --gp-k %s --gp-d db_dumps/%s --gp-i --gp-r db_dumps/%s --gp-l=p -d %s --gp-c --gp-nostats' % (context.backup_timestamp, context.backup_subdir, context.backup_subdir, dbname) run_gpcommand(context, command) @then('the user runs gp_restore with the stored timestamp and subdir in "{dbname}" and backup_dir "{backup_dir}"') def impl(context, dbname, backup_dir): command = 'gp_restore -i --gp-k %s --gp-d %s/db_dumps/%s --gp-i --gp-r %s/db_dumps/%s --gp-l=p -d %s --gp-c' % (context.backup_timestamp, backup_dir, context.backup_subdir, backup_dir, context.backup_subdir, dbname) run_gpcommand(context, command) @when('the user runs gp_restore with the the stored timestamp and subdir for metadata only in "{dbname}"') @then('the user runs gp_restore with the the stored timestamp and subdir for metadata only in "{dbname}"') def impl(context, dbname): command = 'gp_restore -i --gp-k %s --gp-d db_dumps/%s --gp-i --gp-r db_dumps/%s --gp-l=p -d %s --gp-c -s db_dumps/%s/gp_dump_-1_1_%s.gz' % \ (context.backup_timestamp, context.backup_subdir, context.backup_subdir, dbname, context.backup_subdir, context.backup_timestamp) run_gpcommand(context, command) @when('the user runs gpdbrestore with the stored timestamp') @then('the user runs gpdbrestore with the stored timestamp') def impl(context): command = 'gpdbrestore -e -t %s -a' % context.backup_timestamp run_gpcommand(context, command) @when('the user runs gpdbrestore with the stored json timestamp') @then('the user runs gpdbrestore with the stored json timestamp') def impl(context): timestamp = _read_timestamp_from_json(context)[0] command = 'gpdbrestore -e -t %s -a' % timestamp run_gpcommand(context, command) @when('the user runs gpdbrestore with the stored timestamp to print the backup set with options "{options}"') def impl(context, options): command = 'gpdbrestore -t %s %s --list-backup' % (context.backup_timestamp, options) run_gpcommand(context, command) @then('the user runs gpdbrestore with the stored timestamp and options "{options}"') @when('the user runs gpdbrestore with the stored timestamp and options "{options}"') def impl(context, options): if options == '-b': command = 'gpdbrestore -e -b %s -a' % (context.backup_timestamp[0:8]) else: command = 'gpdbrestore -e -t %s %s -a' % (context.backup_timestamp, options) run_gpcommand(context, command) @when('the user runs gpdbrestore with the stored timestamp and options "{options}" without -e option') def impl(context, options): if options == '-b': command = 'gpdbrestore -b %s -a' % (context.backup_timestamp[0:8]) else: command = 'gpdbrestore -t %s %s -a' % (context.backup_timestamp, options) run_gpcommand(context, command) @when('the user runs "{cmd}" with the stored timestamp') @then('the user runs "{cmd}" with the stored timestamp') def impl(context, cmd): command = '%s -t %s' % (cmd, context.backup_timestamp) run_gpcommand(context, command) @then('verify that there is no table "{tablename}" in "{dbname}"') def impl(context, tablename, dbname): if check_table_exists(context, dbname=dbname, table_name=tablename): raise Exception("Table '%s' still exists when it should not" % tablename) @then('verify that there is no view "{viewname}" in "{dbname}"') def impl(context, viewname, dbname): if check_table_exists(context, dbname=dbname, table_name=viewname, table_type='view'): raise Exception("View '%s' still exists when it should not" % viewname) @then('verify that there is no procedural language "{planguage}" in "{dbname}"') def impl(context, planguage, dbname): if check_pl_exists(context, dbname=dbname, lan_name=planguage): raise Exception("Procedural Language '%s' still exists when it should not" % planguage) @then('verify that there is a constraint "{conname}" in "{dbname}"') def impl(context, conname, dbname): if not check_constraint_exists(context, dbname=dbname, conname=conname): raise Exception("Constraint '%s' does not exist when it should" % conname) @then('verify that there is a rule "{rulename}" in "{dbname}"') def impl(context, rulename, dbname): if not check_rule_exists(context, dbname=dbname, rulename=rulename): raise Exception("Rule '%s' does not exist when it should" % rulename) @then('verify that there is a trigger "{triggername}" in "{dbname}"') def impl(context, triggername, dbname): if not check_trigger_exists(context, dbname=dbname, triggername=triggername): raise Exception("Trigger '%s' does not exist when it should" % triggername) @then('verify that there is an index "{indexname}" in "{dbname}"') def impl(context, indexname, dbname): if not check_index_exists(context, dbname=dbname, indexname=indexname): raise Exception("Index '%s' does not exist when it should" % indexname) @then('verify that there is a "{table_type}" table "{tablename}" in "{dbname}"') def impl(context, table_type, tablename, dbname): if not check_table_exists(context, dbname=dbname, table_name=tablename,table_type=table_type): raise Exception("Table '%s' of type '%s' does not exist when expected" % (tablename, table_type)) @then('verify that there is partition "{partition}" of "{table_type}" partition table "{tablename}" in "{dbname}" in "{schemaname}"') def impl(context, partition, table_type, tablename, dbname, schemaname): if not check_partition_table_exists(context, dbname=dbname, schemaname=schemaname, table_name=tablename, table_type=table_type, part_level=1, part_number=partition): raise Exception("Partition %s for table '%s' of type '%s' does not exist when expected" % (partition, tablename, table_type)) @then('verify that there is partition "{partition}" of mixed partition table "{tablename}" with storage_type "{storage_type}" in "{dbname}" in "{schemaname}"') @then('verify that there is partition "{partition}" in partition level "{partitionlevel}" of mixed partition table "{tablename}" with storage_type "{storage_type}" in "{dbname}" in "{schemaname}"') def impl(context, partition, tablename, storage_type, dbname, schemaname, partitionlevel=1): part_t = get_partition_names(schemaname, tablename, dbname, partitionlevel, partition) partname = part_t[0][0].strip() validate_storage_type(context, partname, storage_type, dbname) @given('there is a function "{functionname}" in "{dbname}"') def impl(context, functionname, dbname): SQL = """CREATE FUNCTION %s(a integer, b integer) RETURNS integer AS $$ if a > b: return a return b $$ LANGUAGE plpythonu;""" % functionname execute_sql(dbname, SQL) @then('verify that storage_types of the partition table "{tablename}" are as expected in "{dbname}"') def impl(context, tablename, dbname): validate_mixed_partition_storage_types(context, tablename, dbname) @then('data for partition table "{table_name}" with partition level "{part_level}" is distributed across all segments on "{dbname}"') def impl(context, table_name, part_level, dbname): validate_part_table_data_on_segments(context, table_name, part_level, dbname) @then('data for table "{table_name}" is distributed across all segments on "{dbname}"') def impl(context, table_name, dbname): validate_table_data_on_segments(context, table_name, dbname) @then('verify that the data of the {file} under "{backup_dir}" in "{dbname}" is validated after restore') def impl(context, file, dbname, backup_dir): dump_dir = backup_dir if len(backup_dir.strip()) != 0 else master_data_dir if file == 'dirty tables': dirty_list_filename = '%s/db_dumps/%s/gp_dump_%s_dirty_list' % (dump_dir, context.backup_timestamp[0:8], context.backup_timestamp) elif file == 'table_filter_file': dirty_list_filename = os.path.join(os.getcwd(), file) if not os.path.exists(dirty_list_filename): raise Exception('Dirty list file %s does not exist' % dirty_list_filename) with open(dirty_list_filename) as fd: tables = fd.readlines() for table in tables: validate_restore_data(context, table.strip(), dbname) @then('verify that the distribution policy of all the tables in "{dbname}" are validated after restore') def impl(context, dbname): validate_distribution_policy(context, dbname) @then('verify that tables "{table_list}" in "{dbname}" has no rows') def impl(context, table_list, dbname): tables = [t.strip() for t in table_list.split(',')] for t in tables: check_empty_table(t, dbname) @then('verify that table "{tname}" in "{dbname}" has "{nrows}" rows') def impl(context, tname, dbname, nrows): check_row_count(tname, dbname, int(nrows)) @then('verify that table "{src_tname}" in database "{src_dbname}" of source system has same data with table "{dest_tname}" in database "{dest_dbname}" of destination system with options "{options}"') def impl(context, src_tname, src_dbname, dest_tname, dest_dbname, options): match_table_select(context, src_tname, src_dbname, dest_tname, dest_dbname, options) @then('verify that table "{src_tname}" in database "{src_dbname}" of source system has same data with table "{dest_tname}" in database "{dest_dbname}" of destination system with order by "{orderby}"') def impl(context, src_tname, src_dbname, dest_tname, dest_dbname, orderby): match_table_select(context, src_tname, src_dbname, dest_tname, dest_dbname, orderby) @then('verify that partitioned tables "{table_list}" in "{dbname}" have {num_parts} partitions') @then('verify that partitioned tables "{table_list}" in "{dbname}" have {num_parts} partitions in partition level "{partitionlevel}"') def impl(context, table_list, dbname, num_parts, partitionlevel=1): num_parts = int(num_parts.strip()) tables = [t.strip() for t in table_list.split(',')] for t in tables: names = get_partition_tablenames(t, dbname, partitionlevel) if len(names) != num_parts: raise Exception("%s.%s should have %d partitions but has %d" % (dbname, t, num_parts, len(names))) # raise exception if tname does not have X empty partitions def check_x_empty_parts(dbname, tname, x): num_empty = 0 parts = get_partition_tablenames(tname, dbname) for part in parts: p = part[0] try: check_empty_table(p, dbname) num_empty += 1 except Exception as e: print e if num_empty != x: raise Exception("%s.%s has %d empty partitions and should have %d" % (dbname, tname, num_empty, x)) @then('the user runs gpdbrestore with "{opt}" option in path "{path}"') def impl(context, opt, path): command = 'gpdbrestore -e -a %s localhost:%s/db_dumps/%s --verbose' % (opt, path, context.backup_subdir) run_gpcommand(context, command) @then('all files for full backup have been removed in path "{path}"') def impl(context, path): path = path if len(path.strip()) != 0 else master_data_dir file_pattern = "*_%s*" % context.full_backup_timestamp dir = "%s/db_dumps/%s" %(path, context.backup_subdir) cleanup_cmd = "rm -f %s/%s" % (dir, file_pattern) run_command(context, cleanup_cmd) if context.exception: raise context.exception @when('there are no backup files') @given('there are no backup files') def impl(context): cleanup_backup_files(context, 'template1') @given('the backup files in "{location}" are deleted') @when('the backup files in "{location}" are deleted') @then('the backup files in "{location}" are deleted') def impl(context, location): cleanup_backup_files(context, 'template1', location) @then('there are no report files in the master data directory') def impl(context): cleanup_report_files(context, master_data_dir) @when('verify that partitioned tables "{table_list}" in "{dbname}" has {num_parts} empty partitions') @then('verify that partitioned tables "{table_list}" in "{dbname}" has {num_parts} empty partitions') def impl(context, table_list, dbname, num_parts): expected_num_parts = int(num_parts.strip()) tables = [t.strip() for t in table_list.split(',')] for t in tables: check_x_empty_parts(dbname, t, expected_num_parts) @given('there is a backupfile of tables "{table_list}" in "{dbname}" exists for validation') @when('there is a backupfile of tables "{table_list}" in "{dbname}" exists for validation') @then('there is a backupfile of tables "{table_list}" in "{dbname}" exists for validation') def impl(context, table_list, dbname): tables = [t.strip() for t in table_list.split(',')] for t in tables: backup_data(context, t.strip(), dbname) @when('verify that there is a table "{tablename}" of "{tabletype}" type in "{dbname}" with same data as table "{backedup_table}"') @then('verify that there is a table "{tablename}" of "{tabletype}" type in "{dbname}" with same data as table "{backedup_table}"') def impl(context, tablename, tabletype, dbname, backedup_table): if not check_table_exists(context, dbname=dbname, table_name=tablename, table_type=tabletype): raise Exception("Table '%s' does not exist when it should" % tablename) validate_restore_data(context, tablename, dbname, backedup_table) @when('check that there is a "{table_type}" table "{tablename}" in "{dbname}" with same data from "{backedup_dbname}"') @then('check that there is a "{table_type}" table "{tablename}" in "{dbname}" with same data from "{backedup_dbname}"') def impl(context, table_type, tablename, dbname, backedup_dbname): if not check_table_exists(context, dbname=dbname, table_name=tablename, table_type=table_type): raise Exception("Table '%s' does not exist when it should" % tablename) validate_restore_data(context, tablename, dbname, None, backedup_dbname) @when('verify that there is a "{table_type}" table "{tablename}" in "{dbname}" with data') @then('verify that there is a "{table_type}" table "{tablename}" in "{dbname}" with data') def impl(context, table_type, tablename, dbname): if not check_table_exists(context, dbname=dbname, table_name=tablename, table_type=table_type): raise Exception("Table '%s' does not exist when it should" % tablename) validate_restore_data(context, tablename, dbname) @when('verify that the data in the "{table_type}" table "{tablename}" is the same in "{dbname}" as in "{old_dbname}"') @then('verify that the data in the "{table_type}" table "{tablename}" is the same in "{dbname}" as in "{old_dbname}"') def impl(context, table_type, tablename, dbname, old_dbname): if not check_table_exists(context, dbname=dbname, table_name=tablename, table_type=table_type): raise Exception("Table '%s' does not exist in database %s when it should" % (tablename, dbname)) if not check_table_exists(context, dbname=old_dbname, table_name=tablename, table_type=table_type): raise Exception("Table '%s' does not exist in database %s when it should" % (tablename, dbname)) validate_restore_data(context, tablename, dbname, None, old_dbname) @given('there is schema "{schema_list}" exists in "{dbname}"') @then('there is schema "{schema_list}" exists in "{dbname}"') def impl(context, schema_list, dbname): schemas = [s.strip() for s in schema_list.split(',')] for s in schemas: drop_schema_if_exists(context, s.strip(), dbname) create_schema(context, s.strip(), dbname) @then('the temporary file "{filename}" is removed') def impl(context, filename): if os.path.exists(filename): os.remove(filename) @then('the temporary table file "{filename}" is removed') def impl(context, filename): table_file = 'test/behave/mgmt_utils/steps/data/gptransfer/%s' % filename if os.path.exists(table_file): os.remove(table_file) def create_table_file_locally(context, filename, table_list, location=os.getcwd()): tables = table_list.split('|') file_path = os.path.join(location, filename) with open(file_path, 'w') as fp: for t in tables: fp.write(t + '\n') context.filename = file_path @given('there is a file "{filename}" with tables "{table_list}"') @then('there is a file "{filename}" with tables "{table_list}"') def impl(context, filename, table_list): create_table_file_locally(context, filename, table_list) @given('there is a fake pg_aoseg table named "{table}" in "{dbname}"') def impl(context, table, dbname): create_fake_pg_aoseg_table(context, table, dbname) def verify_file_contents(context, file_type, file_dir, text_find, should_contain=True): if len(file_dir.strip()) == 0: file_dir = master_data_dir if not hasattr(context, "dump_prefix"): context.dump_prefix = '' if file_type == 'pg_dump_log': fn = 'pg_dump_log' context.backup_timestamp = '0' elif file_type == 'report': fn = '%sgp_dump_%s.rpt' % (context.dump_prefix, context.backup_timestamp) elif file_type == 'status': fn = '%sgp_dump_status_-1_1_%s' % (context.dump_prefix, context.backup_timestamp) elif file_type == 'filter': fn = '%sgp_dump_%s_filter' % (context.dump_prefix, context.backup_timestamp) elif file_type == "statistics": fn = '%sgp_statistics_-1_1_%s' % (context.dump_prefix, context.backup_timestamp) elif file_type == 'schema': fn = '%sgp_dump_%s_schema' % (context.dump_prefix, context.backup_timestamp) elif file_type == 'cdatabase': fn = '%sgp_cdatabase_-1_1_%s' % (context.dump_prefix, context.backup_timestamp) elif file_type == 'dump': fn = '%sgp_dump_-1_1_%s.gz' % (context.dump_prefix, context.backup_timestamp) subdirectory = context.backup_timestamp[0:8] if file_type == 'pg_dump_log': full_path = os.path.join(file_dir, fn) else: full_path = os.path.join(file_dir, 'db_dumps', subdirectory, fn) if not os.path.isfile(full_path): raise Exception ("Can not find %s file: %s" % (file_type, full_path)) contents = "" if file_type == 'dump': fd = gzip.open(full_path) else: fd = open(full_path) contents = fd.read() fd.close() if should_contain and not text_find in contents: raise Exception("Did not find '%s' in file %s" % (text_find, full_path)) elif not should_contain and text_find in contents: raise Exception("Found '%s' in file '%s'" % (text_find, full_path)) @then('verify that the "{file_type}" file in "{file_dir}" dir contains "{text_find}"') def impl(context, file_type, file_dir, text_find): verify_file_contents(context, file_type, file_dir, text_find) @then('verify that the "{file_type}" file in "{file_dir}" dir does not contain "{text_find}"') def impl(context, file_type, file_dir, text_find): verify_file_contents(context, file_type, file_dir, text_find, should_contain=False) @then('the timestamp in the report file should be same as timestamp key') def impl(context): if not hasattr(context, 'timestamp_key'): raise Exception('Unable to find timestamp key in context') if hasattr(context, 'backup_dir'): report_file = os.path.join(context.backup_dir, 'db_dumps', '%s' % (context.timestamp_key[0:8]),'gp_dump_%s.rpt' % context.timestamp_key) else: report_file = os.path.join(master_data_dir, 'db_dumps', '%s' % (context.timestamp_key[0:8]), 'gp_dump_%s.rpt' % context.timestamp_key) with open(report_file) as rpt: for line in rpt: if line.startswith('Timestamp Key'): timestamp_key = line.split(':')[-1].strip() if timestamp_key != context.timestamp_key: raise Exception('Expected timestamp key to be %s, but found %s in report file %s' % (context.timestamp_key, timestamp_key, report_file)) @then('there should be dump files with filename having timestamp key in "{dbname}"') def impl(context, dbname): if not hasattr(context, 'timestamp_key'): raise Exception('Unable to find timestamp key in context') master_hostname = get_master_hostname(dbname) results = get_hosts_and_datadirs(dbname) for (host, datadir) in results: if host == master_hostname: if hasattr(context, 'backup_dir'): dump_dir = os.path.join(context.backup_dir, 'db_dumps', '%s' % (context.timestamp_key[0:8])) else: dump_dir = os.path.join(master_data_dir, 'db_dumps', '%s' % (context.timestamp_key[0:8])) master_dump_files = ['%s/gp_dump_-1_1_%s' % (dump_dir, context.timestamp_key), '%s/gp_dump_status_-1_1_%s' % (dump_dir, context.timestamp_key), '%s/gp_cdatabase_-1_1_%s' % (dump_dir, context.timestamp_key), '%s/gp_dump_-1_1_%s_post_data' % (dump_dir, context.timestamp_key)] for dump_file in master_dump_files: cmd = Command('check for dump files', 'ls -1 %s | wc -l' % (dump_file)) cmd.run(validateAfter=True) results = cmd.get_results() if int(results.stdout.strip()) != 1: raise Exception('Dump file %s not found after gp_dump on host %s' % (dump_file, host)) else: if hasattr(context, 'backup_dir'): dump_dir = os.path.join(context.backup_dir, 'db_dumps', '%s' % (context.timestamp_key[0:8])) else: dump_dir = os.path.join(datadir, 'db_dumps', '%s' % (context.timestamp_key[0:8])) segment_dump_files = ['%s/gp_dump_*_*_%s' % (dump_dir, context.timestamp_key), '%s/gp_dump_status_*_*_%s' % (dump_dir, context.timestamp_key)] for dump_file in segment_dump_files: cmd = Command('check for dump files', 'ls -1 %s | wc -l' % (dump_file), ctxt=REMOTE, remoteHost=host) cmd.run(validateAfter=True) results = cmd.get_results() if int(results.stdout.strip()) != 1: raise Exception('Dump file %s not found after gp_dump on host %s' % (dump_file, host)) @then('"{filetype}" file should not be created under "{dir}"') def impl(context, filetype, dir): if not hasattr(context, 'backup_timestamp'): raise Exception('Unable to find out the %s because backup timestamp has not been stored' % filename) filename = '' if filetype == "dirty_list": filename = 'gp_dump_%s_dirty_list' % context.backup_timestamp elif filetype == "plan": filename = 'gp_restore_%s_plan' % context.backup_timestamp elif filetype == 'pipes': filename = 'gp_dump_%s_pipes' % context.backup_timestamp elif filetype == 'regular_files': filename = 'gp_dump_%s_regular_files' % context.backup_timestamp else: raise Exception("Unknown filetype '%s' specified" % filetype) dump_dir = dir if len(dir.strip()) != 0 else master_data_dir file_path = os.path.join(dump_dir, 'db_dumps', context.backup_timestamp[0:8], filename) if os.path.exists(file_path): raise Exception("File path %s should not exist for filetype '%s'" % (file_path, filetype)) def get_plan_filename(context): filename = 'gp_restore_%s_plan' % context.backup_timestamp return os.path.join(master_data_dir, 'db_dumps', context.backup_timestamp[0:8], filename) def get_dirty_list_filename(context, backup_dir=None): if not backup_dir: backup_dir = master_data_dir if not hasattr(context, "dump_prefix"): context.dump_prefix = '' filename = '%sgp_dump_%s_dirty_list' % (context.dump_prefix, context.backup_timestamp) return os.path.join(backup_dir, 'db_dumps', context.backup_timestamp[0:8], filename) @then('plan file should match "{filename}"') def impl(context, filename): current_path = os.path.realpath(__file__) current_dir = os.path.dirname(current_path) golden_filename = "%s/%s" % (current_dir, filename) generated_filename = get_plan_filename(context) if not filecmp.cmp(generated_filename, golden_filename): raise Exception("File contents do not match '%s' and '%s'" % (generated_filename, golden_filename)) def parse_plan_file(filename): plan = {} with open(filename) as fd: for line in fd: parts = line.partition(":") ts = parts[0].strip() if ts not in plan: plan[ts] = set() tables = parts[2].split(",") for t in tables: if t not in plan[ts]: plan[ts].add(t.strip()) return plan def modify_plan_with_labels(context, expected_plan, scenario_number=""): labels_key = 'timestamp_labels' + scenario_number newplan = {} for k in expected_plan: if k not in context._root[labels_key]: raise Exception("Label '%s' not specified in behave test" % k) ts = context._root[labels_key][k] newplan[ts] = expected_plan[k] return newplan def compare_plans(expected, actual): expected_keys = expected.keys() actual_keys = actual.keys() if len(expected_keys) != len(actual_keys): raise Exception("Expected plan has %s timestamps actual plan has %s timestamps" % (len(expected_keys), len(actual_keys))) for k in expected: if k not in actual: raise Exception("Expected timestamp in plan and did not find it: %s " % k) expected_tables = sorted(expected[k]) actual_tables = sorted(actual[k]) if expected_tables != actual_tables: print "Expected plan: %s" % expected print "Actual plan: %s" % actual raise Exception("Tables in plan for timestamp '%s' do not match expected tables" % k) @then('the plan file is validated against "{expected_plan}"') def impl(context, expected_plan): context.restore_plan = parse_plan_file(get_plan_filename(context)) current_path = os.path.realpath(__file__) current_dir = os.path.dirname(current_path) expected_file = '%s/%s' % (current_dir, expected_plan) expected_plan = parse_plan_file(expected_file) expected_plan = modify_plan_with_labels(context, expected_plan) compare_plans(expected_plan, context.restore_plan) @then('the plan file for scenario "{scenario_number}" is validated against "{expected_plan}"') def impl(context, scenario_number, expected_plan): context.restore_plan = parse_plan_file(get_plan_filename(context)) current_path = os.path.realpath(__file__) current_dir = os.path.dirname(current_path) expected_file = '%s/%s' % (current_dir, expected_plan) expected_plan = parse_plan_file(expected_file) expected_plan = modify_plan_with_labels(context, expected_plan, scenario_number) compare_plans(expected_plan, context.restore_plan) @then('there should be "{numtimestamps}" timestamps in the plan file') def impl(context, numtimestamps): num = int(numtimestamps) if len(context.restore_plan) != num: raise Exception("Expected %d timestamps and found %d in restore plan" % (num, len(context.restore_plan))) @then('restore plan for timestamp "{ts}" should contain "{numtables}" tables') def impl(context, ts, numtables): num = int(numtables) if ts not in context.restore_plan: raise Exception("Timestamp label '%s' not found in restore plan" % ts) @then('"{filetype}" file is removed under "{dir}"') def impl(context, filetype, dir): if not hasattr(context, 'backup_timestamp'): raise Exception('Backup timestamp has not been stored') if filetype == "dirty_list": filename = 'gp_dump_%s_dirty_list' % context.backup_timestamp elif filetype == "plan": filename = 'gp_restore_%s_plan' % context.backup_timestamp elif filetype == "global": filename = 'gp_global_-1_1_%s' % context.backup_timestamp elif filetype == "report": filename = 'gp_dump_%s.rpt' % context.backup_timestamp elif filetype == "dump": filename = 'gp_dump_1_1_%s.gz' % context.backup_timestamp else: raise Exception("Unknown filetype '%s' specified" % filetype) dump_dir = dir if len(dir.strip()) != 0 else master_data_dir file_path = os.path.join(dump_dir, 'db_dumps', context.backup_timestamp[0:8], filename) if os.path.exists(file_path): os.remove(file_path) @when('"{filetype}" file should be created under "{dir}"') @then('"{filetype}" file should be created under "{dir}"') def impl(context, filetype, dir): if not hasattr(context, "dump_prefix"): context.dump_prefix = '' if not hasattr(context, 'backup_timestamp'): raise Exception('Backup timestamp has not been stored') if filetype == "dirty_list": filename = 'gp_dump_%s_dirty_list' % context.backup_timestamp elif filetype == "plan": filename = 'gp_restore_%s_plan' % context.backup_timestamp elif filetype == "global": filename = 'gp_global_-1_1_%s' % context.backup_timestamp elif filetype == "statistics": filename = 'gp_statistics_-1_1_%s' % context.backup_timestamp elif filetype == 'pipes': filename = 'gp_dump_%s_pipes' % context.backup_timestamp elif filetype == 'regular_files': filename = 'gp_dump_%s_regular_files' % context.backup_timestamp elif filetype == '_filter': filename = 'gp_dump_%s_filter' % context.backup_timestamp elif filetype == '_schema': filename = 'gp_dump_%s_schema' % context.backup_timestamp else: raise Exception("Unknown filetype '%s' specified" % filetype) dump_dir = dir.strip() if len(dir.strip()) != 0 else master_data_dir file_path = os.path.join(dump_dir, 'db_dumps', context.backup_timestamp[0:8], '%s%s' % (context.dump_prefix, filename)) if not os.path.exists(file_path): raise Exception("File path %s does not exist for filetype '%s'" % (file_path, filetype)) @then('verify there are no "{tmp_file_prefix}" tempfiles') def impl(context, tmp_file_prefix): if tmp_file_prefix is not None and tmp_file_prefix: if glob.glob('/tmp/%s*' % tmp_file_prefix): raise Exception('Found temp %s files where they should not be present' % tmp_file_prefix) else: raise Exception('Invalid call to temp file removal %s' % tmp_file_prefix) @then('tables names should be identical to stored table names in "{dbname}" except "{fq_table_names_list}"') def impl(context, dbname, fq_table_names_list): table_names = sorted(get_table_names(dbname)) stored_table_names = sorted(context.table_names) if fq_table_names_list: fq_table_names = fq_table_names_list.split(',') for fq_table_name in fq_table_names: if fq_table_name != "" : stored_table_names.remove(fq_table_name.strip().split('.')) if table_names != stored_table_names: print "Table names after backup:" print stored_table_names print "Table names after restore:" print table_names raise Exception('Schema not restored correctly. List of tables are not equal before and after restore in database %s' % dbname) @then('tables names should be identical to stored table names in "{dbname}"') def impl(context, dbname): table_names = sorted(get_table_names(dbname)) stored_table_names = sorted(context.table_names) if table_names != stored_table_names: print "Table names after backup:" print stored_table_names print "Table names after restore:" print table_names raise Exception('Schema not restored correctly. List of tables are not equal before and after restore in database %s' % dbname) @then('tables in "{dbname}" should not contain any data') def impl(context, dbname): for table in context.table_names: table_name = "%s.%s" % (table[0], table[1]) check_empty_table(table_name, dbname) @then('verify that the data of "{expected_count}" tables in "{dbname}" is validated after restore') def impl(context, dbname, expected_count): validate_db_data(context, dbname, int(expected_count)) @then('verify that the data of "{expected_count}" tables in "{dbname}" is validated after restore from "{backedup_dbname}"') def impl(context, dbname, expected_count, backedup_dbname): validate_db_data(context, dbname, int(expected_count), backedup_dbname) @then('all the data from the remote segments in "{dbname}" are stored in path "{dir}" for "{backup_type}"') def impl(context, dbname, dir, backup_type): segs = get_segment_hostnames(context, dbname) if backup_type == 'inc': timestamp = context.backup_timestamp[0:8] elif backup_type == 'full': timestamp = context.full_backup_timestamp[0:8] from_path = '%s/db_dumps/%s' %(dir, timestamp) to_path = '%s/db_dumps' %(dir) for seg in segs: print type(seg[0].strip()) cmdStr = "%s -o 'StrictHostKeyChecking no' -r %s:%s %s" % (findCmdInPath('scp'), seg[0].strip(),from_path, to_path) run_command(context, cmdStr) @then('pg_stat_last_operation registers the truncate for tables "{table_list}" in "{dbname}" in schema "{schema}"') def impl(context, table_list, dbname, schema): if not table_list: raise Exception('Empty table list') tables = table_list.split(',') for t in tables: table_oid = get_table_oid(context, dbname, schema, t.strip()) verify_truncate_in_pg_stat_last_operation(context, dbname, table_oid) @then('pg_stat_last_operation does not register the truncate for tables "{table_list}" in "{dbname}" in schema "{schema}"') def impl(context, table_list, dbname, schema): if not table_list: raise Exception('Empty table list') tables = table_list.split(',') for t in tables: table_oid = get_table_oid(context, dbname, schema, t.strip()) verify_truncate_not_in_pg_stat_last_operation(context, dbname, table_oid) @given('the numbers "{lownum}" to "{highnum}" are inserted into "{tablename}" tables in "{dbname}"') @when('the numbers "{lownum}" to "{highnum}" are inserted into "{tablename}" tables in "{dbname}"') def impl(context, lownum, highnum, tablename, dbname): insert_numbers(dbname, tablename, lownum, highnum) @when('the user adds column "{cname}" with type "{ctype}" and default "{defval}" to "{tname}" table in "{dbname}"') def impl(context, cname, ctype, defval, tname, dbname): sql = "ALTER table %s ADD COLUMN %s %s DEFAULT %s" % (tname, cname, ctype, defval) execute_sql(dbname, sql) @given('there is a fake timestamp for "{ts}"') def impl(context, ts): dname = os.path.join(master_data_dir, 'db_dumps', ts[0:8]) os.makedirs(dname) contents = """ Timestamp Key: %s Backup Type: Full gp_dump utility finished successfully. """ % ts fname = os.path.join(dname, 'gp_dump_%s.rpt' % ts) with open(fname, 'w') as fd: fd.write(contents) @then('a timestamp in increments file in "{directory}" is modified to be newer') def impl(context, directory): if not hasattr(context, 'full_backup_timestamp'): raise Exception('Full backup timestamp needs to be specified in the test') if not directory.strip(): directory = master_data_dir dump_dir = os.path.join(directory, 'db_dumps', context.full_backup_timestamp[0:8]) increments_filename = os.path.join(dump_dir, 'gp_dump_%s_increments' % context.full_backup_timestamp) if not os.path.exists(increments_filename): raise Exception('Increments file %s does not exist !' % increments_filename) with open(increments_filename) as fd: lines = fd.readlines() lines[0] = str(int(lines[0].strip()) + 10000) with open(increments_filename, 'w') as fd: for line in lines: fd.write(line + '\n') @then('the "{table_type}" state file under "{backup_dir}" is saved for the "{backup_type}" timestamp') def impl(context, table_type, backup_dir, backup_type): timestamp_key = None if backup_type == 'full': timestamp_key = context.full_backup_timestamp elif backup_type == 'inc': timestamp_key = context.backup_timestamp backup_dir = backup_dir if len(backup_dir.strip()) != 0 else master_data_dir context.state_file = os.path.join(backup_dir, 'db_dumps', timestamp_key[0:8], 'gp_dump_%s_%s_state_file' % (timestamp_key, table_type)) @then('the saved state file is deleted') def impl(context): run_command(context, 'rm -f %s' % context.state_file) if context.exception: raise context.exception @then('the saved state file is corrupted') def impl(context): write_lines = list() with open(context.state_file, "r") as fr: lines = fr.readlines() for line in lines: line = line.replace(",", "") write_lines.append(line) with open(context.state_file, "w") as fw: for line in write_lines: fw.write("%s\n" % line.rstrip()) @then('"{table_name}" is marked as dirty in dirty_list file') def impl(context, table_name): dirty_list = get_dirty_list_filename(context) with open(dirty_list) as fp: for line in fp: if table_name.strip() in line.strip(): return raise Exception('Expected table %s to be marked as dirty in %s' % (table_name, dirty_list)) @when('the "{table_name}" is recreated with same data in "{dbname}"') def impl(context, table_name, dbname): select_sql = 'select * into public.temp from %s' % table_name execute_sql(dbname, select_sql) drop_sql = 'drop table %s' % table_name execute_sql(dbname, drop_sql) recreate_sql = 'select * into %s from public.temp' % table_name execute_sql(dbname, recreate_sql) @then('verify that plan file has latest timestamp for "{table_name}"') def impl(context, table_name): plan_file = get_plan_filename(context) with open(plan_file) as fp: for line in fp: if table_name in line.strip(): timestamp = line.split(':')[0].strip() if timestamp != context.backup_timestamp: raise Exception('Expected table %s with timestamp %s in plan file %s does not match timestamp %s' \ % (table_name, context.backup_timestamp, plan_file, timestamp)) @given('the row "{row_values}" is inserted into "{table}" in "{dbname}"') def impl(context, row_values, table, dbname): insert_row(context, row_values, table, dbname) @when('the method get_partition_state is executed on table "{table}" in "{dbname}" for ao table "{ao_table}"') def impl(context, table, dbname, ao_table): (sch, tbl) = table.split('.') ao_sch, ao_tbl = ao_table.split('.') part_info = [(1, ao_sch, ao_tbl, tbl)] try: backup_utils = Context() backup_utils.master_port = os.environ.get('PGPORT') backup_utils.target_db = dbname context.exception = None context.partition_list_res = None context.partition_list_res = get_partition_state(backup_utils, sch, part_info) except Exception as e: context.exception = e @then('an exception should be raised with "{txt}"') def impl(context, txt): if not context.exception: raise Exception("An exception was not raised") output = context.exception.__str__() if not txt in output: raise Exception("Exception output is not matching: '%s'" % output) @then('the get_partition_state result should contain "{elem}"') def impl(context, elem): if not context.partition_list_res: raise Exception('get_partition_state did not return any results') if len(context.partition_list_res) != 1: raise Exception('get_partition_state returned more results than expected "%s"' % context.partition_list_res) if elem not in context.partition_list_res: raise Exception('Expected text "%s" not found in partition list returned by get_partition_state "%s"' % (elem, context.partition_list_res)) @given('older backup directories "{dirlist}" exists') @when('older backup directories "{dirlist}" exists') @then('older backup directories "{dirlist}" exists') def impl(context, dirlist): dirs = [d.strip() for d in dirlist.split(',')] for d in dirs: if len(d) != 8 or not d.isdigit(): raise Exception('Invalid directory name provided %s' % d) for d in dirs: dump_dir = os.path.join(master_data_dir, 'db_dumps', d) if os.path.exists(dump_dir): continue os.makedirs(dump_dir) for i in range(10): with open(os.path.join(dump_dir, '%s_%s' % (d, i)), 'w'): pass @then('the dump directories "{dirlist}" should not exist') def impl(context, dirlist): dirs = [d.strip() for d in dirlist.split(',')] for d in dirs: if len(d) != 8 or not d.isdigit(): raise Exception('Invalid directory name provided %s' % d) for d in dirs: dump_dir = os.path.join(master_data_dir, 'db_dumps', d) if os.path.exists(dump_dir): raise Exception('Unexpected directory exists %s' % dump_dir) @then('the dump directory for the stored timestamp should exist') def impl(context): if not hasattr(context, 'full_backup_timestamp'): raise Exception('Full backup timestamp needs to be stored') dump_dir = os.path.join(master_data_dir, 'db_dumps', context.full_backup_timestamp[0:8]) if not os.path.exists(dump_dir): raise Exception('Expected directory does not exist %s' % dump_dir) def validate_master_config_backup_files(context, dir=master_data_dir): if not hasattr(context, "dump_prefix"): context.dump_prefix = '' master_dump_dir = os.path.join(dir, 'db_dumps', context.backup_timestamp[0:8]) dump_files = os.listdir(master_dump_dir) for df in dump_files: if df.startswith('%sgp_master_config_files' % context.dump_prefix) and df.endswith('.tar'): return raise Exception('Config files not backed up on master "%s"' % master_config_file) def validate_segment_config_backup_files(context, dir=None): if not hasattr(context, "dump_prefix"): context.dump_prefix = '' gparray = GpArray.initFromCatalog(dbconn.DbURL()) primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()] for ps in primary_segs: seg_data_dir = dir if dir is not None else ps.getSegmentDataDirectory() dump_dir = os.path.join(seg_data_dir, 'db_dumps', context.backup_timestamp[0:8]) dump_files = ListRemoteFilesByPattern(dump_dir, '%sgp_segment_config_files_*_%d_*.tar' % (context.dump_prefix, ps.getSegmentDbId()), ps.getSegmentHostName()).run() if len(dump_files) != 1: raise Exception('Error in finding config files "%s" for segment %s' % (dump_files, seg_data_dir)) @then('config files should be backed up on all segments') def impl(context): if not hasattr(context, 'backup_timestamp'): raise Exception('Backup timestamp needs to be stored') validate_master_config_backup_files(context) validate_segment_config_backup_files(context) @then('config files should be backed up on all segments in directory "{dir}"') def impl(context, dir): if not hasattr(context, 'backup_timestamp'): raise Exception('Backup timestamp needs to be stored') validate_master_config_backup_files(context, dir=dir) validate_segment_config_backup_files(context, dir=dir) @then('verify that the table "{table_name}" in "{dbname}" has dump info for the stored timestamp') def impl(context, table_name, dbname): dump_history = {} dump_history_sql = 'select dump_key,options from public.%s' % table_name dump_history = getRows(dbname, dump_history_sql) for (dump_key,options) in dump_history: if context.backup_timestamp == dump_key.strip() and dbname in options: return raise Exception('Could not find dump info for timestamp %s in %s table' % (context.backup_timestamp, table_name)) @then('verify that database "{dbname}" does not exist') def impl(context, dbname): with dbconn.connect(dbconn.DbURL(dbname='template1')) as conn: sql = """select datname from pg_database""" dbs = dbconn.execSQL(conn, sql) if dbname in dbs: raise Exception('Database exists when it shouldnt "%s"' % dbname) @then('there are no saved data files') def impl(context): clear_all_saved_data_verify_files(context) @then('the dump timestamp for "{db_list}" are different') def impl(context, db_list): if db_list is None: raise Exception('Expected at least 1 database in the list, found none.') if not hasattr(context, 'db_timestamps'): raise Exception('The database timestamps need to be stored') db_names = db_list.strip().split(',') for db in db_names: if db.strip() not in context.db_timestamps: raise Exception('Could not find timestamp for database: %s' % context.db_timestamps) timestamp_set = set([v for v in context.db_timestamps.values()]) if len(timestamp_set) != len(context.db_timestamps): raise Exception('Some databases have same timestamp: "%s"' % context.db_timestamps) @given('there is a "{table_type}" table "{table_name}" in "{db_name}" having large number of partitions') def impl(context, table_type, table_name, db_name): create_large_num_partitions(table_type, table_name, db_name) @given('there is a "{table_type}" table "{table_name}" in "{db_name}" having "{num_partitions}" partitions') def impl(context, table_type, table_name, db_name, num_partitions): if not num_partitions.strip().isdigit(): raise Exception('Invalid number of partitions specified "%s"' % num_partitions) num_partitions = int(num_partitions.strip()) + 1 create_large_num_partitions(table_type, table_name, db_name, num_partitions) @given('the length of partition names of table "{table_name}" in "{db_name}" exceeds the command line maximum limit') def impl(context, table_name, db_name): partitions = get_partition_tablenames(table_name, db_name) partition_list_string = '' for part in partitions: partition_list_string += (part[0] + ',') if partition_list_string[-1] == ',': parition_list_string = partition_list_string[:-1] MAX_COMMAND_LINE_LEN = 100000 if len(partition_list_string) < MAX_COMMAND_LINE_LEN: raise Exception('Expected the length of the string to be greater than %s, but got %s instead' % (MAX_COMMAND_LINE_LEN, len(partition_list_string))) @given('there is a table-file "{filename}" with tables "{table_list}"') @then('there is a table-file "{filename}" with tables "{table_list}"') def impl(context, filename, table_list): tables = table_list.split(',') with open(filename, 'w') as fd: for table in tables: fd.write(table.strip() + '\n') if not os.path.exists(filename): raise Exception('Unable to create file "%s"' % filename) def create_ext_table_file(file_location): with open(file_location, 'w') as fd: for i in range(100): fd.write('abc, 10, 10\n') def get_host_and_port(): if 'PGPORT' not in os.environ: raise Exception('PGPORT needs to be set in the environment') port = os.environ['PGPORT'] gparray = GpArray.initFromCatalog(dbconn.DbURL()) master_host = None for seg in gparray.getDbList(): if seg.isSegmentMaster(): master_host = seg.getSegmentAddress() if master_host is None: raise Exception('Unable to determine the master hostname') return (master_host, port) @given('there is an external table "{tablename}" in "{dbname}" with data for file "{file_location}"') def impl(context, tablename, dbname, file_location): create_ext_table_file(file_location) host, port = get_host_and_port() ext_table_sql = """CREATE EXTERNAL WEB TABLE %s(name text, column1 int, column2 int) EXECUTE 'cat %s 2> /dev/null || true' ON MASTER FORMAT 'CSV' (DELIMITER ',')""" % (tablename, file_location) execute_sql(dbname, ext_table_sql) verify_ext_table_creation_sql = """SELECT count(*) FROM pg_class WHERE relname = '%s'""" % tablename row_count = getRows(dbname, verify_ext_table_creation_sql)[0][0] if row_count != 1: raise Exception('Creation of external table failed for "%s:%s, row count = %s"' % (file_location, tablename, row_count)) @then('verify that there is no "{tablename}" in the "{file_type}" file in "{backup_dir}"') def impl(context, tablename, file_type, backup_dir): dump_dir = backup_dir if len(backup_dir.strip()) != 0 else master_data_dir if not hasattr(context, "dump_prefix"): context.dump_prefix = '' filename = '%s/db_dumps/%s/%sgp_dump_%s_%s' % (dump_dir, context.backup_timestamp[0:8], context.dump_prefix, context.backup_timestamp, file_type) with open(filename) as fd: for line in fd: if tablename.strip() == line.strip(): raise Exception('Found an unwanted table in the file: "%s" in line: "%s"' %(filename, line)) @then('verify that exactly "{num_tables}" tables in "{dbname}" have been restored') def impl(context, num_tables, dbname): validate_num_restored_tables(context, num_tables, dbname) @then('verify that exactly "{num_tables}" tables in "{dbname}" have been restored from "{backedup_dbname}"') def impl(context, num_tables, dbname, backedup_dbname): validate_num_restored_tables(context, num_tables, dbname, backedup_dbname=backedup_dbname) @then('the user runs gpdbrestore on dump date directory with options "{options}"') def impl(context, options): command = 'gpdbrestore -e -b %s %s -a' % (context.backup_timestamp[0:8], options) run_gpcommand(context, command) @then('the timestamps should be printed in sorted order') def impl(context): stdout_lines = context.stdout_message.split('\n') process_ts = False timestamps = [] for line in stdout_lines: if '--------------------------' in line: process_ts = True elif process_ts: if 'Enter timestamp number to restore' not in line: timestamps.append(line.strip().split('......')[-1].strip()) else: process_ts = False break timestamps = [ts.split()[0]+ts.split()[1] for ts in timestamps] sorted_timestamps = sorted(timestamps, key=lambda x: int(x)) if sorted_timestamps != timestamps: raise Exception('Timestamps are not in sorted order "%s"' % timestamps) @given('there are "{table_count}" "{tabletype}" tables "{table_name}" with data in "{dbname}"') def impl(context, table_count, tabletype, table_name, dbname): table_count = int(table_count) for i in range(1, table_count+1): tablename = "%s_%s" % (table_name, i) create_database_if_not_exists(context, dbname) drop_table_if_exists(context, table_name=tablename, dbname=dbname) create_partition(context, tablename, tabletype, dbname, compression_type=None, partition=False) @given('the tables "{table_list}" are in dirty hack file "{dirty_hack_file}"') def impl(context, table_list, dirty_hack_file): tables = [t.strip() for t in table_list.split(',')] with open(dirty_hack_file, 'w') as fd: for t in tables: fd.write(t + '\n') if not os.path.exists(dirty_hack_file): raise Exception('Failed to create dirty hack file "%s"' % dirty_hack_file) @given('partition "{part_num}" of partition tables "{table_list}" in "{dbname}" in schema "{schema}" are in dirty hack file "{dirty_hack_file}"') def impl(context, part_num, table_list, dbname, schema, dirty_hack_file): tables = table_list.split(',') with open(dirty_hack_file, 'w') as fd: part_num = int(part_num.strip()) for table in tables: part_t = get_partition_names(schema, table.strip(), dbname, 1, part_num) if len(part_t) < 1 or len(part_t[0]) < 1: print part_t partition_name = part_t[0][0].strip() fd.write(partition_name + '\n') if not os.path.exists(dirty_hack_file): raise Exception('Failed to write to dirty hack file "%s"' % dirty_hack_file) @then('verify that the config files are backed up with the stored timestamp') def impl(context): if not hasattr(context, 'backup_timestamp'): raise Exception('Timestamp needs to be stored') config_file = os.path.join(master_data_dir, 'db_dumps', context.backup_timestamp[0:8], 'gp_master_config_files_%s.tar' % context.backup_timestamp) if not os.path.exists(config_file): raise Exception('Failed to locate config file on master "%s"' % config_file) gparray = GpArray.initFromCatalog(dbconn.DbURL()) primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True)] for seg in primary_segs: segment_config_file = os.path.join(seg.getSegmentDataDirectory(), 'db_dumps', context.backup_timestamp[0:8], 'gp_segment_config_files_0_%s_%s.tar' % (seg.getSegmentDbId(), context.backup_timestamp)) if not CheckRemoteFile(segment_config_file, seg.getSegmentAddress()): raise Exception('Failed to locate "%s" on "%s"' % segment_config_file, seg.getSegmentDataDirectory()) @then('verify that the list of stored timestamps is printed to stdout') def impl(context): found_ts = 0 stdout = context.stdout_message for ts in context.inc_backup_timestamps: for line in stdout.splitlines(): ts_str = 'Backup Timestamp: %s' % ts if ts_str in line: found_ts += 1 print 'context.inc_backup_timestamps = ', context.inc_backup_timestamps if found_ts != len(context.inc_backup_timestamps): raise Exception('Expected "%d" timestamps but found "%d" timestamps' % (len(context.inc_backup_timestamps), found_ts)) @given('there is a "{table_type}" table "{tablename}" with funny characters in "{dbname}"') def impl(context, table_type, tablename, dbname): funny_chars_table_name_sql = """create table "%s new line in it" (a int)""" % tablename.strip() table_type = table_type.strip() if table_type == 'ao': funny_chars_table_name_sql += ' with(appendonly=true)' elif table_type == 'co': funny_chars_table_name_sql += ' with(appendonly=true, orientation=column)' elif table_type == 'heap': pass else: raise Exception('Unknown table type specified "%s"' % table_type) execute_sql(dbname.strip(), funny_chars_table_name_sql) @then('verify that the tuple count of all appendonly tables are consistent in "{dbname}"') def impl(context, dbname): ao_partition_list = get_partition_list('ao', dbname) verify_stats(dbname, ao_partition_list) co_partition_list = get_partition_list('co', dbname) verify_stats(dbname, co_partition_list) @then('verify that there are no aoco stats in "{dbname}" for table "{tables}"') def impl(context, dbname, tables): tables = tables.split(',') for t in tables: validate_no_aoco_stats(context, dbname, t.strip()) @then('verify that there are "{tupcount}" tuples in "{dbname}" for table "{tables}"') def impl(context, tupcount, dbname, tables): tables = tables.split(',') for t in tables: validate_aoco_stats(context, dbname, t.strip(), tupcount) @when('the performance timer is started') def impl(context): context.performance_timer = time.time() @then('the performance timer should be less then "{num_seconds}" seconds') def impl(context, num_seconds): max_seconds = float(num_seconds) current_time = time.time() elapsed = current_time - context.performance_timer if elapsed > max_seconds: raise Exception("Performance timer ran for %.1f seconds but had a max limit of %.1f seconds" % (elapsed, max_seconds)) print "Elapsed time was %.1f seconds" % elapsed @given('the file "{filename}" is removed from the system') @when('the file "{filename}" is removed from the system') @then('the file "{filename}" is removed from the system') def impl(context, filename): os.remove(filename) @given('the client program "{program_name}" is present under {parent_dir} in "{sub_dir}"') def impl(context, program_name, parent_dir, sub_dir): if parent_dir == 'CWD': parent_dir = os.getcwd() program_path = '%s/%s/%s' % (parent_dir, sub_dir, program_name) print program_path if not os.path.isfile(program_path): raise Exception('gpfdist client progream does not exist: %s' % (program_path)) @when('the user runs client program "{program_name}" from "{subdir}" under {parent_dir}') def impl(context, program_name, subdir, parent_dir): if parent_dir == 'CWD': parent_dir = os.getcwd() command_line = "python %s/%s/%s" % (parent_dir, subdir, program_name) run_command(context, command_line) @then('the gpfdist should print {msg} to "{filename}" under {parent_dir}') def impl(context, msg, filename, parent_dir): if parent_dir == 'CWD': parent_dir = os.getcwd() filepath = '%s/%s' % (parent_dir, filename) with open(filepath, 'r') as fp: for line in fp: if msg in line: return raise Exception('Log file %s did not contain the message %s' % (filepath, msg)) @given('the "{process_name}" process is killed') @then('the "{process_name}" process is killed') @when('the "{process_name}" process is killed') def impl(context, process_name): run_command(context, 'pkill %s' % process_name) @then('the client program should print {msg} to stdout with value in range {min_val} to {max_val}') def impl(context, msg, min_val, max_val): stdout = context.stdout_message for line in stdout: if msg in line: val = re.finadall(r'\d+', line) if not val: raise Exception('Expected a numeric digit after message: %s' % msg) if len(val) > 1: raise Exception('Expected one numeric digit after message: %s' % msg) if val[0] < min_val or val[0] > max_val: raise Exception('Value not in acceptable range %s' % val[0]) @given('the directory "{dirname}" exists') def impl(context, dirname): if not os.path.isdir(dirname): os.mkdir(dirname) if not os.path.isdir(dirname): raise Exception("directory '%s' not created" % dirname) @given('the directory "{dirname}" exists in current working directory') def impl(context, dirname): dirname = os.path.join(os.getcwd(), dirname) if os.path.isdir(dirname): shutil.rmtree(dirname, ignore_errors=True) if os.path.isdir(dirname): raise Exception("directory '%s' not removed" % dirname) os.mkdir(dirname) if not os.path.isdir(dirname): raise Exception("directory '%s' not created" % dirname) @given('the file "{filename}" exists under "{directory}" in current working directory') def impl(context, filename, directory): directory = os.path.join(os.getcwd(), directory) if not os.path.isdir(directory): raise Exception("directory '%s' not exists" % directory) filepath = os.path.join(directory, filename) open(filepath, 'a').close() if not os.path.exists(filepath): raise Exception("file '%s' not created" % filepath) @given('the directory "{dirname}" does not exist in current working directory') def impl(context, dirname): dirname = os.path.join(os.getcwd(), dirname) if os.path.isdir(dirname): shutil.rmtree(dirname, ignore_errors=True) if os.path.isdir(dirname): raise Exception("directory '%s' not removed" % dirname) @when('the data line "{dataline}" is appened to "{fname}" in cwd') @then('the data line "{dataline}" is appened to "{fname}" in cwd') def impl(context, dataline, fname): fname = os.path.join(os.getcwd(), fname) with open(fname, 'a') as fd: fd.write("%s\n" % dataline) @when('a "{readwrite}" external table "{tname}" is created on file "{fname}" in "{dbname}"') def impl(context, readwrite, tname, fname, dbname): hostname = socket.gethostname() sql = """CREATE %s EXTERNAL TABLE %s (name varchar(255), id int) LOCATION ('gpfdist://%s:8088/%s') FORMAT 'text' (DELIMITER '|'); """ % (readwrite, tname, hostname, fname) with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn: dbconn.execSQL(conn, sql) conn.commit() @given('the external table "{tname}" does not exist in "{dbname}"') def impl(context, tname, dbname): drop_external_table_if_exists(context, table_name=tname, dbname=dbname) @when('all rows from table "{tname}" db "{dbname}" are stored in the context') def impl(context, tname, dbname): context.stored_rows = [] with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn: sql = "SELECT * FROM %s" % tname curs = dbconn.execSQL(conn, sql) context.stored_rows = curs.fetchall() @then('validate that "{dataline}" "{formatter}" seperated by "{delim}" is in the stored rows') def impl(context, dataline, formatter, delim): lookfor = dataline.split(delim) formatter = formatter.split(delim) for i in range(len(formatter)): if formatter[i] == 'int': lookfor[i] = int(lookfor[i]) if lookfor not in context.stored_rows: print context.stored_rows print lookfor raise Exception("'%s' not found in stored rows" % dataline) @then('validate that following rows are in the stored rows') def impl(context): for row in context.table: found_match = False for stored_row in context.stored_rows: match_this_row = True for i in range(len(stored_row)): value = row[i] if isinstance(stored_row[i], bool): value = str(True if row[i] == 't' else False) if value != str(stored_row[i]): match_this_row = False break if match_this_row: found_match = True break if not found_match: print context.stored_rows raise Exception("'%s' not found in stored rows" % row) @then('validate that stored rows has "{numlines}" lines of output') def impl(context, numlines): num_found = len(context.stored_rows) numlines = int(numlines) if num_found != numlines: raise Exception("Found %d of stored query result but expected %d records" % (num_found, numlines)) def get_standby_host(): gparray = GpArray.initFromCatalog(dbconn.DbURL()) segments = gparray.getDbList() standby_master = [seg.getSegmentHostName() for seg in segments if seg.isSegmentStandby()] if len(standby_master) > 0: return standby_master[0] else: return [] @given('user does not have ssh permissions') def impl(context): user_home = os.environ.get('HOME') authorized_keys_file = '%s/.ssh/authorized_keys' % user_home if os.path.exists(os.path.abspath(authorized_keys_file)): shutil.move(authorized_keys_file, '%s.bk' % authorized_keys_file) @then('user has ssh permissions') def impl(context): user_home = os.environ.get('HOME') authorized_keys_backup_file = '%s/.ssh/authorized_keys.bk' % user_home if os.path.exists(authorized_keys_backup_file): shutil.move(authorized_keys_backup_file, authorized_keys_backup_file[:-3]) def delete_data_dir(host): cmd = Command(name='remove data directories', cmdStr='rm -rf %s' % master_data_dir, ctxt=REMOTE, remoteHost=host) cmd.run(validateAfter=True) @when('the user initializes standby master on "{hostname}"') def impl(context, hostname): create_standby(context, hostname) def create_standby(context, hostname): port = os.environ.get('PGPORT') if hostname.strip() == 'mdw': if not hasattr(context, 'master') or not hasattr(context, 'standby_host'): raise Exception('Expected master to be saved but was not found') delete_data_dir(context.master) cmd = Command(name='gpinitstandby', cmdStr='PGPORT=%s MASTER_DATA_DIRECTORY=%s gpinitstandby -a -s %s' % (port, master_data_dir, context.master), ctxt=REMOTE, remoteHost=context.standby_host) cmd.run(validateAfter=True) return elif hostname.strip() == 'smdw': context.master = get_master_hostname('template1')[0][0] context.ret_code = 0 context.stdout_message = '' context.error_message = '' segment_hostlist = get_segment_hostlist() segment_hosts = [seg for seg in segment_hostlist if seg != context.master] context.standby_host = segment_hosts[0] gparray = GpArray.initFromCatalog(dbconn.DbURL()) segdbs = gparray.getDbList() for seg in segdbs: if seg.getSegmentHostName() == context.standby_host: context.standby_host_data_dir = seg.getSegmentDataDirectory() context.standby_was_initialized = False standby = get_standby_host() if standby: context.standby_was_initialized = True context.standby_host = standby return delete_data_dir(context.standby_host) cmd = "gpinitstandby -a -s %s" % context.standby_host run_gpcommand(context, cmd) else: raise Exception('Invalid host type specified "%s"' % hostname) @when('the user runs the query "{query}" on "{dbname}"') def impl(context, query, dbname): if query.lower().startswith('create') or query.lower().startswith('insert'): thread.start_new_thread(execute_sql, (dbname, query)) else: thread.start_new_thread(getRows, (dbname, query)) time.sleep(30) @given('we have exchanged keys with the cluster') def impl(context): hostlist = get_all_hostnames_as_list(context, 'template1') host_str = ' -h '.join(hostlist) cmd_str = 'gpssh-exkeys %s' % host_str run_gpcommand(context, cmd_str) @then('the temp files "{filename_prefix}" are not created in the system') def impl(context, filename_prefix): hostlist = get_all_hostnames_as_list(context, 'template1') print hostlist file_pattern = 'ls /tmp/%s* | wc -l' % filename_prefix print file_pattern for host in hostlist: cmd = Command(name='check for temp files', cmdStr=file_pattern, ctxt=REMOTE, remoteHost=host) cmd.run(validateAfter=True) results = cmd.get_results() if int(results.stdout.strip()) > 0: raise Exception('Temp files with prefix %s are not cleaned up on host %s after gpcrondump' % (filename_prefix, host)) @when('the user activates standby on the standbyhost') @then('the user activates standby on the standbyhost') def impl(context): port = os.environ.get('PGPORT') cmd = 'PGPORT=%s MASTER_DATA_DIRECTORY=%s gpactivatestandby -d %s -fa' % (port, master_data_dir, master_data_dir) cmd = Command('run remote command', cmd, ctxt=REMOTE, remoteHost=context.standby_host) cmd.run(validateAfter=True) @then('the user runs the command "{cmd}" from standby') @when('the user runs the command "{cmd}" from standby') def impl(context, cmd): port = os.environ.get('PGPORT') cmd = 'PGPORT=%s MASTER_DATA_DIRECTORY=%s %s' % (port, master_data_dir, cmd) cmd = Command('run remote command', cmd, ctxt=REMOTE, remoteHost=context.standby_host) cmd.run(validateAfter=True) @given('user kills a primary postmaster process') @when('user kills a primary postmaster process') @then('user kills a primary postmaster process') def impl(context): if hasattr(context, 'pseg'): seg_data_dir = context.pseg_data_dir seg_host = context.pseg_hostname seg_port = context.pseg.getSegmentPort() else: gparray=GpArray.initFromCatalog(dbconn.DbURL()) for seg in gparray.getDbList(): if seg.isSegmentPrimary(): seg_data_dir = seg.getSegmentDataDirectory() seg_host = seg.getSegmentHostName() seg_port = seg.getSegmentPort() break pid = get_pid_for_segment(seg_data_dir, seg_host) if pid is None: raise Exception('Unable to locate segment "%s" on host "%s"' % (seg_data_dir, seg_host)) kill_process(int(pid), seg_host) time.sleep(10) pid = get_pid_for_segment(seg_data_dir, seg_host) if pid is not None: raise Exception('Unable to kill postmaster with pid "%d" datadir "%s"' % (pid, seg_data_dir)) context.killed_seg_host = seg_host context.killed_seg_port = seg_port @when('the temp files "{filename_prefix}" are removed from the system') @given('the temp files "{filename_prefix}" are removed from the system') def impl(context, filename_prefix): hostlist = get_all_hostnames_as_list(context, 'template1') print hostlist for host in hostlist: cmd = Command(name='remove data directories', cmdStr='rm -rf /tmp/%s*' % filename_prefix, ctxt=REMOTE, remoteHost=host) cmd.run(validateAfter=True) @then('the standby is initialized if required') def impl(context): if context.standby_was_initialized or hasattr(context, 'cluster_had_standby'): if get_standby_host(): return delete_data_dir(context.standby_host) cmd = Command('create the standby', cmdStr='gpinitstandby -s %s -a' % context.standby_host) cmd.run(validateAfter=True) else: standby = get_standby_host() if standby: run_gpcommand(context, 'gpinitstandby -ra') @given('user can start transactions') @when('user can start transactions') @then('user can start transactions') def impl(context): num_retries = 150 attempt = 0 while attempt < num_retries: try: with dbconn.connect(dbconn.DbURL()) as conn: break except Exception as e: attempt +=1 pass time.sleep(1) if attempt == num_retries: raise Exception('Unable to establish a connection to database !!!') @when('user runs "{cmd}" with sudo access') def impl(context, cmd): gphome = os.environ.get('GPHOME') python_path = os.environ.get('PYTHONPATH') python_home = os.environ.get('PYTHONHOME') ld_library_path = os.environ.get('LD_LIBRARY_PATH') path = os.environ.get('PATH') cmd_str = """sudo GPHOME=%s PATH=%s PYTHONHOME=%s PYTHONPATH=%s LD_LIBRARY_PATH=%s %s/bin/%s""" % (gphome, path, python_home, python_path, ld_library_path, gphome, cmd) run_command(context, cmd_str) def verify_num_files(results, expected_num_files, timestamp): num_files = results.stdout.strip() if num_files != expected_num_files: raise Exception('Expected "%s" files with timestamp key "%s" but found "%s"' % (expected_num_files, timestamp,num_files)) def verify_timestamps_on_master(timestamp, dump_type): list_cmd = 'ls -l %s/db_dumps/%s/*%s* | wc -l' % (master_data_dir, timestamp[:8], timestamp) cmd = Command('verify timestamps on master', list_cmd) cmd.run(validateAfter=True) expected_num_files = '10' if dump_type == 'incremental' else '8' verify_num_files(cmd.get_results(), expected_num_files, timestamp) def verify_timestamps_on_segments(timestamp): gparray = GpArray.initFromCatalog(dbconn.DbURL()) primary_segs = [segdb for segdb in gparray.getDbList() if segdb.isSegmentPrimary()] for seg in primary_segs: db_dumps_dir = os.path.join(seg.getSegmentDataDirectory(), 'db_dumps', timestamp[:8]) list_cmd = 'ls -l %s/*%s* | wc -l' % (db_dumps_dir, timestamp) cmd = Command('get list of dump files', list_cmd, ctxt=REMOTE, remoteHost=seg.getSegmentHostName()) cmd.run(validateAfter=True) verify_num_files(cmd.get_results(), '2', timestamp) @then('verify that "{dump_type}" dump files have stored timestamp in their filename') def impl(context, dump_type): if dump_type.strip().lower() != 'full' and dump_type.strip().lower() != 'incremental': raise Exception('Invalid dump type "%s"' % dump_type) verify_timestamps_on_master(context.backup_timestamp, dump_type.strip().lower()) verify_timestamps_on_segments(context.backup_timestamp) def validate_files(file_list, pattern_list, expected_file_count): file_count = 0 for pattern in pattern_list: pat = re.compile(pattern) pat_found = False for f in file_list: m = pat.search(f.strip()) if m is not None: pat_found = True file_count += 1 if not pat_found: raise Exception('Expected file not found for pattern: "%s" in file list "%s"' % (pattern, file_list)) if file_count != expected_file_count: raise Exception('Expected count of %d does not match actual count %d in file list "%s"' % (expected_file_count, file_count, file_list)) @then('the "{file_type}" file under "{directory}" with options "{options}" is validated after dump operation') def impl(context, file_type, directory, options): backup_dir = directory.strip() if directory.strip() != '' else master_data_dir if len(options.split(',')) > 3: raise Exception('Invalid options specified "%s"' % options) option_list = options.split(',') pipe_file_count = 1 + get_num_segments(primary=True, mirror=False, master=True, standby=False) reg_file_count = 6 pipes_pattern_list = ['gp_dump_.*_%s.*(?:\.gz)?' % context.backup_timestamp] regular_pattern_list = ['gp_cdatabase_-1_1_%s' % context.backup_timestamp, 'gp_dump_%s.*' % context.backup_timestamp, 'gp_dump_status_-1_1_%s' % context.backup_timestamp] if '-G' in option_list: pipe_file_count += 1 pipes_pattern_list += ['gp_global_-1_1_%s' % context.backup_timestamp] if '-g' in option_list: pipe_file_count += get_num_segments(primary=True, mirror=False, master=True, standby=False) pipes_pattern_list += ['gp_master_config_files_%s.*' % context.backup_timestamp, 'gp_segment_config_files_.*_.*_%s.*' % context.backup_timestamp] if '--incremental' in option_list: regular_pattern_list += ['gp_dump_%s.*' % context.full_backup_timestamp] reg_file_count += 1 if hasattr(context, "dump_prefix"): if '-t' in option_list or '-T' in option_list: reg_file_count += 1 for id, p in enumerate(pipes_pattern_list): pipes_pattern_list[id] = '%s%s' % (context.dump_prefix, p) for id, p in enumerate(regular_pattern_list): regular_pattern_list[id] = '%s%s' % (context.dump_prefix, p) filename = '%s/db_dumps/%s/%sgp_dump_%s_%s' % (backup_dir, context.backup_timestamp[0:8], context.dump_prefix, context.backup_timestamp, file_type.strip()) with open(filename) as fp: file_list = fp.readlines() if file_type == 'pipes': validate_files(file_list, pipes_pattern_list, pipe_file_count) elif file_type == 'regular_files': validate_files(file_list, regular_pattern_list, reg_file_count) @then('the timestamp key "{timestamp_key}" for gpcrondump is stored') def impl(context, timestamp_key): context.backup_timestamp = timestamp_key @given('the prefix "{prefix}" is stored') def impl(context, prefix): context.dump_prefix = prefix + '_' def get_segment_dump_files(context, dir): results = [] gparray = GpArray.initFromCatalog(dbconn.DbURL()) primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()] for seg in primary_segs: segment_dump_dir = dir.strip() if len(dir.strip()) != 0 else seg.getSegmentDataDirectory() cmd = Command('check dump files', 'ls %s/db_dumps/%s/*%s*' % (segment_dump_dir, context.backup_timestamp[0:8], context.backup_timestamp), ctxt=REMOTE, remoteHost=seg.getSegmentHostName()) cmd.run(validateAfter=False) #because we expect ls to fail results.append((seg, [os.path.basename(r) for r in cmd.get_results().stdout.strip().split()])) return results @then('the named pipes are created for the timestamp "{timestamp_key}" under "{dir}"') def impl(context, timestamp_key, dir): dump_dir = dir if len(dir.strip()) != 0 else master_data_dir pipes_filename = '%s/db_dumps/%s/gp_dump_%s_pipes' % (dump_dir, timestamp_key[0:8], timestamp_key) with open(pipes_filename, 'r') as fp: for f in fp: (host, filename) = [t.strip() for t in f.split(':')] cmd_str = 'mkdir -p %s' % os.path.dirname(filename) cmd = Command('create named pipes directory', cmd_str, ctxt=REMOTE, remoteHost=host) cmd.run(validateAfter=True) results = cmd.get_results() if int(results.rc) != 0: raise Exception('Non zero return code during makedirs command') cmd = Command('create named pipes', 'mkfifo %s' % filename, ctxt=REMOTE, remoteHost=host) cmd.run(validateAfter=True) results = cmd.get_results() if int(results.rc) != 0: raise Exception('Non zero return code during mkfifo command') @then('the named pipes are validated against the timestamp "{timestamp_key}" under "{dir}"') def impl(context, timestamp_key, dir): dump_dir = dir if len(dir.strip()) != 0 else master_data_dir pipes_filename = '%s/db_dumps/%s/gp_dump_%s_pipes' % (dump_dir, timestamp_key[0:8], timestamp_key) with open(pipes_filename, 'r') as fp: for f in fp: (host, filename) = [t.strip() for t in f.split(':')] cmd = Command('create named pipes', 'file %s' % filename, ctxt=REMOTE, remoteHost=host) cmd.run(validateAfter=True) results = cmd.get_results() if int(results.rc) != 0: raise Exception('Non zero return code during mkfifo command') if not 'named pipe' in results.stdout: raise Exception('Expected %s to be a named pipe' % filename) @when('the named pipe script for the "{operation}" is run for the files under "{dump_directory}"') @then('the named pipe script for the "{operation}" is run for the files under "{dump_directory}"') def impl(context, operation, dump_directory): dump_dir = dump_directory if len(dump_directory.strip()) != 0 else master_data_dir if operation == 'restore' and hasattr(context, 'inc_backup_timestamps'): if not hasattr(context, 'backup_timestamp'): raise Exception('Backup timestamp not stored') for ts in context.inc_backup_timestamps: open_named_pipes(context, operation, ts, dump_dir) else: open_named_pipes(context, operation, context.backup_timestamp, dump_dir) @then('close all opened pipes') def impl(context): hosts = set(get_all_hostnames_as_list(context, 'template1')) for h in hosts: find_cmd = Command('get list of pipe processes', "ps -eaf | grep _pipe.py | grep -v grep | grep -v ssh", ctxt=REMOTE, remoteHost=h) find_cmd.run() results = find_cmd.get_results().stdout.strip().split('\n') for process in results: if not process.strip(): continue pid = process.split()[1].strip() print 'pid = %s on host %s' % (pid, h) cmd = Command('Kill pipe process', "kill %s" % pid, ctxt=REMOTE, remoteHost=h) cmd.run(validateAfter=True) find_cmd.run() # We expecte failure here results = find_cmd.get_results().stdout.strip() if results: raise Exception('Unexpected pipe processes found "%s"' % results) def open_named_pipes(context, operation, timestamp, dump_dir): sleeptime = 5 pipes_filename = '%s/db_dumps/%s/gp_dump_%s_pipes' % (dump_dir, timestamp[0:8], timestamp) filename = os.path.join(os.getcwd(), './test/data/%s_pipe.py' % operation) segs = get_all_hostnames_as_list(context, 'template1') for seg in segs: cmdStr = "%s -o 'StrictHostKeyChecking no' %s %s:%s" % (findCmdInPath('scp'), filename, seg, '/tmp') run_command(context, cmdStr) with open(pipes_filename, 'r') as fp: for f in fp: (host, filename) = [t.strip() for t in f.split(':')] cmd = Command('run pipe script', 'sh -c "python /tmp/%s_pipe.py %s" &>/dev/null &' % (operation, filename), ctxt=REMOTE, remoteHost=host) cmd.run(validateAfter=True) time.sleep(sleeptime) results = cmd.get_results() @given('the core dump directory is stored') def impl(context): with open('/etc/sysctl.conf', 'r') as fp: for line in fp: if 'kernel.core_pattern' in line: context.core_dir = os.path.dirname(line.strip().split('=')[1]) if not hasattr(context, 'core_dir') or not context.core_dir: context.core_dir = os.getcwd() @given('the number of core files "{stage}" running "{utility}" is stored') @then('the number of core files "{stage}" running "{utility}" is stored') def impl(context, stage, utility): core_files_count = 0 files_list = os.listdir(context.core_dir) for f in files_list: if f.startswith('core'): core_files_count += 1 if stage.strip() == 'before': context.before_core_count = core_files_count elif stage.strip() == 'after': context.after_core_count = core_files_count else: raise Exception('Invalid stage entered: %s' % stage) @then('the number of core files is the same') def impl(context): if not hasattr(context, 'before_core_count'): raise Exception('Core file count not stored before operation') if not hasattr(context, 'after_core_count'): raise Exception('Core file count not stored after operation') if context.before_core_count != context.after_core_count: raise Exception('Core files count before %s does not match after %s' % (context.before_core_count, context.after_core_count)) @given('the gpAdminLogs directory has been backed up') def impl(context): src = os.path.join(os.path.expanduser('~'), 'gpAdminLogs') dest = os.path.join(os.path.expanduser('~'), 'gpAdminLogs.bk') shutil.move(src, dest) @given('the user does not have "{access}" permission on the home directory') def impl(context, access): home_dir = os.path.expanduser('~') context.orig_write_permission = check_user_permissions(home_dir, 'write') if access == 'write': cmd = "sudo chmod u-w %s" % home_dir run_command(context, cmd) if check_user_permissions(home_dir, access): raise Exception('Unable to change "%s" permissions for the directory "%s"' % (access, home_dir)) @then('the "{filetype}" path "{file_path}" should "{cond}" exist') def impl(context, filetype, file_path, cond): cond = cond.strip() if file_path[0] == '~': file_path = os.path.join(os.path.expanduser('~'), file_path[2:]) if filetype == 'file': existence_check_fn = os.path.isfile elif filetype == 'directory': existence_check_fn = os.path.isdir else: raise Exception('File type should be either file or directory') if cond == '' and not existence_check_fn(file_path): raise Exception('The %s "%s" does not exist' % (filetype, file_path)) elif cond == 'not' and existence_check_fn(file_path): raise Exception('The %s "%s" exist' % (filetype, file_path)) @then('the directory "{file_path}" is removed') def impl(context, file_path): if file_path[0] == '~': file_path = os.path.join(os.path.expanduser('~'), file_path[2:]) backup_file_path = file_path + '.bk' if not os.path.exists(backup_file_path): raise Exception('Backup file "%s" must exist in order to delete the file "%s"' % (backup_file_path, file_path)) if '*' in file_path: raise Exception('WildCard found in file path !!!!. Cannot delete') run_command(context, 'rm -rf %s' % file_path) @then('there should be dump files under "{directory}" with prefix "{prefix}"') def impl(context, directory, prefix): if not hasattr(context, "dump_prefix"): context.dump_prefix = '' dump_prefix = '%s_gp' % prefix.strip() master_dump_dir = directory if len(directory.strip()) != 0 else master_data_dir segment_dump_files = get_segment_dump_files(context, directory) for seg, dump_files in segment_dump_files: segment_dump_dir = directory if len(directory.strip()) != 0 else seg.getSegmentDataDirectory() if len(dump_files) == 0: raise Exception('Failed to find dump files on the segment %s under %s/db_dumps/%s' % (seg.getSegmentDataDirectory(), segment_dump_dir, context.backup_timestamp[0:8])) for dump_file in dump_files: if not dump_file.startswith(dump_prefix): raise Exception('Dump file %s on the segment %s under %s/db_dumps/%s does not start with required prefix %s' % (dump_file, seg.getSegmentDataDirectory(), segment_dump_dir, context.backup_timestamp[0:8], prefix)) cmd = Command('check dump files', 'ls %s/db_dumps/%s/*%s*' % (master_dump_dir, context.backup_timestamp[0:8], context.backup_timestamp)) cmd.run(validateAfter=True) results = cmd.get_results().stdout.strip().split('\n') if len(results) == 0: raise Exception('Failed to find dump files %s on the master under %s' % (results, master_dump_dir)) for filename in results: if not os.path.basename(filename).startswith(prefix.strip()): raise Exception('Dump file %s on master under %s does not have required prefix %s' %(filename, master_dump_dir, prefix)) @given('the environment variable "{var}" is not set') def impl(context, var): context.env_var = os.environ.get(var) os.environ[var] = '' @then('the environment variable "{var}" is reset') def impl(context, var): if hasattr(context, 'env_var'): os.environ[var] = context.env_var else: raise Exception('Environment variable %s cannot be reset because its value was not saved.' % var) @given('the environment variable "{var}" is set to "{val}"') def impl(context, var, val): context.env_var = os.environ.get(var) os.environ[var] = val @given('the path "{path}" exists') def impl(context, path): if not os.path.exists(path): os.makedirs(path) @then('the path "{path}" does not exist') def impl(context, path): if os.path.exists(path): shutil.rmtree(path) @when('the user runs the following query on "{dbname}" without fetching results') def impl(context, dbname): query = context.text.strip() thread.start_new_thread(execute_sql, (dbname, query)) time.sleep(30) @when('the user runs query from the file "{filename}" on "{dbname}" without fetching results') def impl(context, filename, dbname): with open(filename) as fr: for line in fr: query = line.strip() thread.start_new_thread(execute_sql, (dbname, query)) time.sleep(30) @then('the following text should be printed to stdout') def impl(context): check_stdout_msg(context, context.text.strip()) @then('the text in the file "{filename}" should be printed to stdout') def impl(context, filename): contents = '' with open(filename) as fr: for line in fr: contents = line.strip() print "contents: ", contents check_stdout_msg(context, contents) @when('the user runs command "{cmd}" on the "{seg_type}" segment') def impl(context, cmd, seg_type): if seg_type == 'Change Tracking': port, host = get_change_tracking_segment_info() elif seg_type == 'Original': port, host = context.killed_seg_port, context.killed_seg_host else: raise Exception('Invalid segment type "%s" specified' % seg_type) cmd += ' -p %s -h %s' % (port, host) run_command(context, cmd) @given('below sql is executed in "{dbname}" db') @when('below sql is executed in "{dbname}" db') def impl(context, dbname): sql = context.text execute_sql(dbname, sql) @when('sql "{sql}" is executed in "{dbname}" db') def impl(context, sql, dbname): execute_sql(dbname, sql) @when('execute following sql in db "{dbname}" and store result in the context') def impl(context, dbname): context.stored_rows = [] with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn: curs = dbconn.execSQL(conn, context.text) context.stored_rows = curs.fetchall() @when('execute sql "{sql}" in db "{dbname}" and store result in the context') def impl(context, sql, dbname): context.stored_rows = [] with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn: curs = dbconn.execSQL(conn, sql) context.stored_rows = curs.fetchall() @then('validate that "{message}" is in the stored rows') def impl(context, message): for row in context.stored_rows: for column in row: if message in column: return print context.stored_rows print message raise Exception("'%s' not found in stored rows" % message) @then('verify that file "{filename}" exists under "{path}"') def impl(context, filename, path): fullpath = "%s/%s" % (path, filename) fullpath = os.path.expandvars(fullpath) if not os.path.exists(fullpath): raise Exception('file "%s" is not exist' % fullpath) @given('waiting "{second}" seconds') @when('waiting "{second}" seconds') @then('waiting "{second}" seconds') def impl(context, second): time.sleep(float(second)) def get_opened_files(filename, pidfile): cmd = "if [ `uname -s` = 'SunOS' ]; then CMD=pfiles; else CMD='lsof -p'; fi && PATH=$PATH:/usr/bin:/usr/sbin $CMD `cat %s` | grep %s | wc -l" % (pidfile, filename) return commands.getstatusoutput(cmd) @then('the file "{filename}" by process "{pidfile}" is not closed') def impl(context, filename, pidfile): (ret, output) = get_opened_files(filename, pidfile) if int(output) == 0: raise Exception('file %s has been closed' % (filename)) @then('the file "{filename}" by process "{pidfile}" is closed') def impl(context, filename, pidfile): (ret, output) = get_opened_files(filename, pidfile) if int(output) != 0: raise Exception('file %s has not been closed' % (filename)) @then('the file "{filename}" by process "{pidfile}" opened number is "{num}"') def impl(context, filename, pidfile, num): (ret, output) = get_opened_files(filename, pidfile) if int(output) != int(num): raise Exception('file %s opened number %d is not %d' % (filename, int(output), int(num))) @given('the directory {path} exists') @then('the directory {path} exists') def impl(context, path): if not os.path.isdir(path): raise Exception('Directory "%s" does not exist' %path) @then('{file} should be found in tarball with prefix "{prefix}" within directory {path}') def impl(context, file, prefix, path): ## look for subdirectory created during collection collection_dirlist = os.listdir(path) if len(collection_dirlist) > 1: raise Exception('more then one data collection directory found.') if len(collection_dirlist) == 0: raise Exception('Collection directory was not found') ## get a listing of files in the subdirectory and make sure there is only one tarball found tarpath = os.path.join(path, collection_dirlist[0]) collection_filelist = os.listdir(tarpath) if len(collection_filelist) > 1: raise Exception('Too many files found in "%s"' %tarpath) if len(collection_filelist) == 0: raise Exception('No files found in "%s"' %tarpath) ## Expand tarball with prefix "GP_LOG_COLLECTION_" and search for given file within collection if prefix in collection_filelist[0]: ## extract the root tarball tar = tarfile.open(os.path.join(tarpath, collection_filelist[0]), "r:") tar.extractall(tarpath) tar.close() FOUND = False for tfile in os.listdir(tarpath): if prefix in tfile: continue ## Find any tar file that contain given file segtar = tarfile.open(os.path.join(tarpath, tfile), "r:gz") for tarinfo in segtar: if tarinfo.isreg() and file in tarinfo.name: FOUND = True segtar.close() break ## break segtar loop if FOUND: return ## we found the file so pass the test case else: ## the file should have been found in the first segment tarball raise Exception('Unable to find "%s" in "%s" tar file' % (file, collection_filelist[0]) ) else: raise Exception('tarball with prefix "%s" was not found' %prefix) @given('the directory {path} is removed or does not exist') @when('the directory {path} is removed or does not exist') @then('the directory {path} is removed or does not exist') def impl(context, path): if '*' in path: raise Exception('Wildcard not supported') if path[0] == '~': path = os.path.join(os.path.expanduser('~'), path[2:]) run_command(context, 'rm -rf %s' % path) @given('the user runs sbin command "{cmd}"') @when('the user runs sbin command "{cmd}"') @then('the user runs sbin command "{cmd}"') def impl(context, cmd): gpsbin = os.path.join(os.environ.get('GPHOME'), 'sbin') if not os.path.isdir(gpsbin): raise Exception('ERROR: GPHOME not set in environment') cmd = gpsbin + "/" + cmd ## don't us os.path join because command might have arguments run_command(context, cmd) @given('the OS type is not "{os_type}"') def impl(context, os_type): assert platform.system() != os_type @then('{file1} and {file2} should exist and have a new mtime') def impl(context, file1, file2): gphome = os.environ.get('GPHOME') if not os.path.isfile(os.path.join(gphome, file1)) or not os.path.isfile(os.path.join(gphome, file2)): raise Exception('installation of ' + context.utility + ' failed because one or more files do not exist in ' + os.path.join(gphome, file1)) file1_mtime = os.path.getmtime(os.path.join(gphome, file1)) file2_mtime = os.path.getmtime(os.path.join(gphome, file2)) if file1_mtime < context.currenttime or file2_mtime < context.currenttime: raise Exception('one or both file mtime was not updated') os.chdir(context.cwd) run_command(context, 'rm -rf %s' % context.path) @given('you are about to run a database query') @when('you are about to run a database query') @then('you are about to run a database query') def impl(context): context.sessionID = [] @given('the user ran this query "{query}" against database "{dbname}" for table "{table}" and it hangs') @when('the user ran this query "{query}" against database "{dbname}" for table "{table}" and it hangs') def impl(context, query, dbname, table): get_sessionID_query = "select sess_id from pg_stat_activity where current_query ~ '" + table + "' and current_query !~ 'pg_stat_activity'" if not check_db_exists(dbname): raise Exception('The database ' + dbname + 'Does not exist') thread.start_new_thread(getRows, (dbname, query)) time.sleep(15) context.sessionIDRow = getRows(dbname, get_sessionID_query) if len(context.sessionIDRow) == 0: raise Exception('Was not able to determine the session ID') context.sessionID.append( context.sessionIDRow[0][0] ) @then('user runs "{command}" against the queries session ID') def impl(context, command): ## build the session_list session_list = None for session in context.sessionID: if not session_list: session_list = str(session) else: session_list = session_list + ',' + str(session) command += " " + session_list run_gpcommand(context, command) @then('{file} file with queries sessionIDs should be found within directory {path}') def impl(context, file, path): ###################################################################################### ## This function needs to be modified.. changes are pending hung_analyzer revisions ## ###################################################################################### ## look for subdirectory created during collection collection_dirlist = os.listdir(path) if len(collection_dirlist) > 1: raise Exception('more then one data collection directory found. Possibly left over from a previous run of hung analyzer') if len(collection_dirlist) == 0: raise Exception('Collection directory was not found') ## Make sure we have a core file for each session sessions_found = 0 for rootdir, dirs, files in os.walk(os.path.join(path, collection_dirlist[0])): for session in context.sessionID: for f in files: core_prefix = file + str(session) + '.' if core_prefix in f: sessions_found += 1 break if sessions_found == 0: raise Exception('No core files were found in collection') if sessions_found != len(context.sessionID): raise Exception('Only ' + str(sessions_found) + ' core files were found out of ' + str(len(context.sessionID))) @then('{file} file should be found within directory {path}') def impl(context, file, path): ## look for subdirectory created during collection collection_dirlist = os.listdir(path) if len(collection_dirlist) > 1: raise Exception('more then one data collection directory found. Possibly left over from a previous run of hung analyzer') if len(collection_dirlist) == 0: raise Exception('Collection directory was not found') ## get a listing of files and dirs and prune until file is found for rootdir, dirs, files in os.walk(os.path.join(path, collection_dirlist[0])): for f in files: if file in f: return raise Exception('File was not found in :' + path) @then('database is restarted to kill the hung query') def impl(context): try: stop_database_if_started(context) except Exception as e: context.exception = None pass ## capture the thread dieing from our hung query if check_database_is_running(context): raise Exception('Failed to stop the database') start_database_if_not_started(context) if not check_database_is_running(): raise Exception('Failed to start the database') @then('partition "{partitionnum}" is added to partition table "{tablename}" in "{dbname}"') def impl(context, partitionnum, tablename, dbname): add_partition(context, partitionnum, tablename, dbname) @then('partition "{partitionnum}" is dropped from partition table "{tablename}" in "{dbname}"') def impl(context, partitionnum, tablename, dbname): drop_partition(context, partitionnum, tablename, dbname) @when('table "{tablename}" is dropped in "{dbname}"') @then('table "{tablename}" is dropped in "{dbname}"') def impl(context, tablename, dbname): drop_table_if_exists(context, table_name=tablename, dbname=dbname) def create_trigger_function(dbname, trigger_func_name, tablename): trigger_func_sql = """ CREATE OR REPLACE FUNCTION %s() RETURNS TRIGGER AS $$ BEGIN INSERT INTO %s VALUES(2001, 'backup', '2100-08-23'); END; $$ LANGUAGE plpgsql """ % (trigger_func_name, tablename) execute_sql(dbname, trigger_func_sql) def create_trigger(dbname, trigger_func_name, trigger_name, tablename): SQL = """ CREATE TRIGGER %s AFTER INSERT OR UPDATE OR DELETE ON %s FOR EACH STATEMENT EXECUTE PROCEDURE %s(); """ % (trigger_name, tablename, trigger_func_name) execute_sql(dbname, SQL) @given('there is a trigger "{trigger_name}" on table "{tablename}" in "{dbname}" based on function "{trigger_func_name}"') def impl(context, trigger_name, tablename, dbname, trigger_func_name): create_trigger_function(dbname, trigger_func_name, tablename) create_trigger(dbname, trigger_func_name, trigger_name, tablename) @then('there is a trigger function "{trigger_func_name}" on table "{tablename}" in "{dbname}"') def impl(context, trigger_func_name, tablename, dbname): create_trigger_function(dbname, trigger_func_name, tablename) @when('the index "{index_name}" in "{dbname}" is dropped') def impl(context, index_name, dbname): drop_index_sql = """DROP INDEX %s""" % index_name execute_sql(dbname, drop_index_sql) @when('the trigger "{trigger_name}" on table "{tablename}" in "{dbname}" is dropped') def impl(context, trigger_name, tablename, dbname): drop_trigger_sql = """DROP TRIGGER %s ON %s""" % (trigger_name, tablename) execute_sql(dbname, drop_trigger_sql) @given('all the segments are running') @when('all the segments are running') @then('all the segments are running') def impl(context): if not are_segments_running(): raise Exception("all segments are not currently running") return @given('the "{seg}" segment information is saved') @when('the "{seg}" segment information is saved') @then('the "{seg}" segment information is saved') def impl(context, seg): gparray = GpArray.initFromCatalog(dbconn.DbURL()) if seg == "primary": primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()] context.pseg = primary_segs[0] context.pseg_data_dir = context.pseg.getSegmentDataDirectory() context.pseg_hostname = context.pseg.getSegmentHostName() context.pseg_dbid = context.pseg.getSegmentDbId() elif seg == "mirror": mirror_segs = [seg for seg in gparray.getDbList() if seg.isSegmentMirror()] context.mseg = mirror_segs[0] context.mseg_hostname = context.mseg.getSegmentHostName() context.mseg_dbid = context.mseg.getSegmentDbId() context.mseg_data_dir = context.mseg.getSegmentDataDirectory() @when('we run a sample background script to generate a pid on "{seg}" segment') def impl(context, seg): if seg == "primary": if not hasattr(context, 'pseg_hostname'): raise Exception("primary seg host is not saved in the context") hostname = context.pseg_hostname elif seg == "smdw": if not hasattr(context, 'standby_host'): raise Exception("Standby host is not saved in the context") hostname = context.standby_host filename = os.path.join(os.getcwd(), './test/behave/mgmt_utils/steps/data/pid_background_script.py') cmd = Command(name="Remove background script on remote host", cmdStr='rm -f /tmp/pid_background_script.py', remoteHost=hostname, ctxt=REMOTE) cmd.run(validateAfter=True) cmd = Command(name="Copy background script to remote host", cmdStr='scp %s %s:/tmp' % (filename, hostname)) cmd.run(validateAfter=True) cmd = Command(name="Run Bg process to save pid", cmdStr='sh -c "python /tmp/pid_background_script.py" &>/dev/null &', remoteHost=hostname, ctxt=REMOTE) cmd.run(validateAfter=True) cmd = Command(name="get bg pid", cmdStr="ps ux | grep pid_background_script.py | grep -v grep | awk '{print \$2}'", remoteHost=hostname, ctxt=REMOTE) cmd.run(validateAfter=True) context.bg_pid = cmd.get_results().stdout.strip() if not context.bg_pid: raise Exception("Unable to obtain the pid of the background script. Seg Host: %s, get_results: %s" % (hostname, cmd.get_results().stdout.strip())) @when('the background pid is killed on "{seg}" segment') @then('the background pid is killed on "{seg}" segment') def impl(context, seg): if seg == "primary": if not hasattr(context, 'pseg_hostname'): raise Exception("primary seg host is not saved in the context") hostname = context.pseg_hostname elif seg == "smdw": if not hasattr(context, 'standby_host'): raise Exception("Standby host is not saved in the context") hostname = context.standby_host cmd = Command(name="get bg pid", cmdStr="ps ux | grep pid_background_script.py | grep -v grep | awk '{print \$2}'", remoteHost=hostname, ctxt=REMOTE) cmd.run(validateAfter=True) pids = cmd.get_results().stdout.strip().splitlines() for pid in pids: cmd = Command(name="killbg pid", cmdStr='kill -9 %s' % pid, remoteHost=hostname, ctxt=REMOTE) cmd.run(validateAfter=True) @when('we generate the postmaster.pid file with the background pid on "{seg}" segment') def impl(context, seg): if seg == "primary": if not hasattr(context, 'pseg_hostname'): raise Exception("primary seg host is not saved in the context") hostname = context.pseg_hostname data_dir = context.pseg_data_dir elif seg == "smdw": if not hasattr(context, 'standby_host'): raise Exception("Standby host is not saved in the context") hostname = context.standby_host data_dir = context.standby_host_data_dir pid_file = os.path.join(data_dir, 'postmaster.pid') pid_file_orig = pid_file + '.orig' cmd = Command(name="Copy pid file", cmdStr='cp %s %s' % (pid_file_orig, pid_file), remoteHost=hostname, ctxt=REMOTE) cmd.run(validateAfter=True) cpCmd = Command(name='copy pid file to master for editing', cmdStr='scp %s:%s /tmp' % (hostname, pid_file)) cpCmd.run(validateAfter=True) with open('/tmp/postmaster.pid', 'r') as fr: lines = fr.readlines() lines[0] = "%s\n" % context.bg_pid with open('/tmp/postmaster.pid', 'w') as fw: fw.writelines(lines) cpCmd = Command(name='copy pid file to segment after editing', cmdStr='scp /tmp/postmaster.pid %s:%s' % (hostname, pid_file)) cpCmd.run(validateAfter=True) @when('we generate the postmaster.pid file with a non running pid on the same "{seg}" segment') def impl(context, seg): if seg == "primary": data_dir = context.pseg_data_dir hostname = context.pseg_hostname elif seg == "mirror": data_dir = context.mseg_data_dir hostname = context.mseg_hostname elif seg == "smdw": if not hasattr(context, 'standby_host'): raise Exception("Standby host is not saved in the context") hostname = context.standby_host data_dir = context.standby_host_data_dir pid_file = os.path.join(data_dir, 'postmaster.pid') pid_file_orig = pid_file + '.orig' cmd = Command(name="Copy pid file", cmdStr='cp %s %s' % (pid_file_orig, pid_file), remoteHost=hostname, ctxt=REMOTE) cmd.run(validateAfter=True) cpCmd = Command(name='copy pid file to master for editing', cmdStr='scp %s:%s /tmp' % (hostname, pid_file)) cpCmd.run(validateAfter=True) with open('/tmp/postmaster.pid', 'r') as fr: pid = fr.readline().strip() while True: cmd = Command(name="get non-existing pid", cmdStr="ps ux | grep %s | grep -v grep | awk '{print \$2}'" % pid, remoteHost=hostname, ctxt=REMOTE) cmd.run(validateAfter=True) if cmd.get_results().stdout.strip(): pid = pid + 1 else: break with open('/tmp/postmaster.pid', 'r') as fr: lines = fr.readlines() lines[0] = "%s\n" % pid with open('/tmp/postmaster.pid', 'w') as fw: fw.writelines(lines) cpCmd = Command(name='copy pid file to segment after editing', cmdStr='scp /tmp/postmaster.pid %s:%s' % (hostname, pid_file)) cpCmd.run(validateAfter=True) @when('the user starts one "{seg}" segment') def impl(context, seg): if seg == "primary": dbid = context.pseg_dbid hostname = context.pseg_hostname segment = context.pseg elif seg == "mirror": dbid = context.mseg_dbid hostname = context.mseg_hostname segment = context.mseg segStartCmd = SegmentStart( name = "Starting new segment dbid %s on host %s." % (str(dbid), hostname) , gpdb = segment , numContentsInCluster = 0 # Starting seg on it's own. , era = None , mirrormode = MIRROR_MODE_MIRRORLESS , utilityMode = False , ctxt = REMOTE , remoteHost = hostname , noWait = False , timeout = 300) segStartCmd.run(validateAfter=True) @when('the postmaster.pid file on "{seg}" segment is saved') def impl(context, seg): if seg == "primary": data_dir = context.pseg_data_dir hostname = context.pseg_hostname elif seg == "mirror": data_dir = context.mseg_data_dir hostname = context.mseg_hostname elif seg == "smdw": if not hasattr(context, 'standby_host'): raise Exception("Standby host is not saved in the context") hostname = context.standby_host data_dir = context.standby_host_data_dir pid_file = os.path.join(data_dir, 'postmaster.pid') pid_file_orig = pid_file + '.orig' cmd = Command(name="Copy pid file", cmdStr='cp %s %s' % (pid_file, pid_file_orig), remoteHost=hostname, ctxt=REMOTE) cmd.run(validateAfter=True) @then('the backup pid file is deleted on "{seg}" segment') def impl(context, seg): if seg == "primary": data_dir = context.pseg_data_dir hostname = context.pseg_hostname elif seg == "mirror": data_dir = context.mseg_data_dir hostname = context.mseg_hostname elif seg == "smdw": data_dir = context.standby_host_data_dir hostname = context.standby_host cmd = Command(name="Remove pid file", cmdStr='rm -f %s' % (os.path.join(data_dir, 'postmaster.pid.orig')), remoteHost=hostname, ctxt=REMOTE) cmd.run(validateAfter=True) @given('the user creates an init config file "{to_file}" without mirrors') @when('the user creates an init config file "{to_file}" without mirrors') @then('the user creates an init config file "{to_file}" without mirrors') def impl(context, to_file): write_lines = [] BLDWRAP_TOP = os.environ.get('BLDWRAP_TOP') from_file = BLDWRAP_TOP + '/sys_mgmt_test/test/general/cluster_conf.out' with open(from_file) as fr: lines = fr.readlines() for line in lines: if not line.startswith('REPLICATION_PORT_BASE') and not line.startswith('MIRROR_REPLICATION_PORT_BASE') and not line.startswith('MIRROR_PORT_BASE') and not line.startswith('declare -a MIRROR_DATA_DIRECTORY'): write_lines.append(line) with open(to_file, 'w+') as fw: fw.writelines(write_lines) @given('the user creates mirrors config file "{to_file}"') @when('the user creates mirrors config file "{to_file}"') @then('the user creates mirrors config file "{to_file}"') def impl(context, to_file): data_dirs = [] BLDWRAP_TOP = os.environ.get('BLDWRAP_TOP') from_file = BLDWRAP_TOP + '/sys_mgmt_test/test/general/cluster_conf.out' with open(from_file) as fr: lines = fr.readlines() for line in lines: if line.startswith('declare -a MIRROR_DATA_DIRECTORY'): data_dirs = line.split('(')[-1].strip().strip(')').split() break if not data_dirs: raise Exception("Could not find MIRROR_DATA_DIRECTORY in config file %s" % from_file) with open(to_file, 'w+') as fw: for dir in data_dirs: fw.write(dir.strip(')') + '\n') @given('the standby hostname is saved') @when('the standby hostname is saved') @then('the standby hostname is saved') def impl(context): gparray = GpArray.initFromCatalog(dbconn.DbURL()) primary_segs = [seg for seg in gparray.getDbList() if (seg.isSegmentPrimary() and not seg.isSegmentMaster())] context.standby = primary_segs[0].getSegmentHostName() @given('user runs the init command "{cmd}" with the saved standby host') @when('user runs the init command "{cmd}" with the saved standby host') @then('user runs the init command "{cmd}" with the saved standby host') def impl(context, cmd): run_cmd = cmd + '-s %s' % context.standby run_cmd.run(validateAfter=True) @given('there is a sequence "{seq_name}" in "{dbname}"') def impl(context, seq_name, dbname): sequence_sql = 'CREATE SEQUENCE %s' % seq_name execute_sql(dbname, sequence_sql) @when('the user removes the "{cmd}" command on standby') @then('the user removes the "{cmd}" command on standby') def impl(context, cmd): cmdStr = 'chmod u+rw ~/.bashrc && cp ~/.bashrc ~/.bashrc.backup' run_cmd = Command('run remote command', cmdStr, ctxt=REMOTE, remoteHost=context.standby_host) run_cmd.run(validateAfter=True) cmdStr = """echo >>~/.bashrc && echo "shopt -s expand_aliases" >>~/.bashrc && echo "alias %s='no_command'" >>~/.bashrc""" % cmd run_cmd = Command('run remote command', cmdStr, ctxt=REMOTE, remoteHost=context.standby_host) run_cmd.run(validateAfter=True) @when('the user restores the "{cmd}" command on the standby') @then('the user restores the "{cmd}" command on the standby') def impl(context, cmd): cmdStr = 'cp ~/.bashrc.backup ~/.bashrc' run_cmd = Command('run remote command', cmdStr, ctxt=REMOTE, remoteHost=context.standby_host) run_cmd.run(validateAfter=True) @when('the user stops the syncmaster') @then('the user stops the syncmaster') def impl(context): host = context.gparray.standbyMaster.hostname #Cat is added because pgrep returns all the processes of the tree, while #child processes are kill when the parent is kill, which produces an error cmdStr = 'pgrep syncmaster | xargs -i kill {} | cat' run_cmd = Command('kill syncmaster', cmdStr, ctxt=REMOTE, remoteHost=host) run_cmd.run(validateAfter=True) datadir=context.gparray.standbyMaster.datadir @when('the user starts the syncmaster') @then('the user starts the syncmaster') def impl(context): host=context.gparray.standbyMaster.hostname datadir=context.gparray.standbyMaster.datadir port=context.gparray.standbyMaster.port dbid=context.gparray.standbyMaster.dbid ncontents = context.gparray.getNumSegmentContents() GpStandbyStart.remote('test start syncmaster',host,datadir,port,ncontents,dbid) @when('save the cluster configuration') @then('save the cluster configuration') def impl(context): context.gparray = GpArray.initFromCatalog(dbconn.DbURL()) @given('partition "{partition}" of partition table "{schema_parent}.{table_name}" is assumed to be in schema "{schema_child}" in database "{dbname}"') @when('partition "{partition}" of partition table "{schema_parent}.{table_name}" is assumed to be in schema "{schema_child}" in database "{dbname}"') @then('partition "{partition}" of partition table "{schema_parent}.{table_name}" is assumed to be in schema "{schema_child}" in database "{dbname}"') def impl(context, partition, schema_parent, table_name, schema_child, dbname): part_t = get_partition_names(schema_parent.strip(), table_name.strip(), dbname.strip(), 1, partition) if len(part_t) < 1 or len(part_t[0]) < 1: print part_t a_partition_name = part_t[0][0].strip() alter_sql = """ALTER TABLE %s SET SCHEMA %s""" % (a_partition_name, schema_child) execute_sql(dbname, alter_sql) @given('this test sleeps for "{secs}" seconds') @when('this test sleeps for "{secs}" seconds') @then('this test sleeps for "{secs}" seconds') def impl(context, secs): secs = float(secs) time.sleep(secs) @then('verify that there are no duplicates in column "{columnname}" of table "{tablename}" in "{dbname}"') def impl(context, columnname, tablename, dbname): duplicate_sql = 'SELECT %s, COUNT(*) FROM %s GROUP BY %s HAVING COUNT(*) > 1' % (columnname, tablename, columnname) rows = getRows(dbname, duplicate_sql) if rows: raise Exception('Found duplicate rows in the column "%s" for table "%s" in database "%s"' % (columnname, tablename, dbname)) def execute_sql_for_sec(dbname, query, sec): with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn: dbconn.execSQL(conn, query) conn.commit() time.sleep(sec) @given('the user runs the query "{query}" on "{dbname}" for "{sec}" seconds') @when('the user runs the query "{query}" on "{dbname}" for "{sec}" seconds') @then('the user runs the query "{query}" on "{dbname}" for "{sec}" seconds') def impl(context, query, dbname, sec): if query.lower().startswith('create') or query.lower().startswith('insert'): thread.start_new_thread(execute_sql_for_sec, (dbname, query, float(sec))) else: thread.start_new_thread(getRows, (dbname, query)) time.sleep(30) @given('verify that the contents of the files "{filepath1}" and "{filepath2}" are identical') @when('verify that the contents of the files "{filepath1}" and "{filepath2}" are identical') @then('verify that the contents of the files "{filepath1}" and "{filepath2}" are identical') def impl(context, filepath1, filepath2): contents1 = [] contents2 = [] with open(filepath1) as fr1: contents1 = fr1.readlines() with open(filepath2) as fr2: contents2 = fr2.readlines() if (contents1 != contents2): raise Exception("Contents of the files: %s and %s do not match" % (filepath1, filepath2)) def get_gp_toolkit_info(context, dbname, fname): cmdStr = """psql -c '\d gp_toolkit.*' -d %s > %s""" % (dbname, fname) cmd = Command(name='get gp_toolkit info to file', cmdStr=cmdStr) cmd.run(validateAfter=True) @given('the gp_toolkit schema for "{dbname}" is saved for verification') def impl(context, dbname): get_gp_toolkit_info(context, dbname, 'gp_toolkit_backup') @then('the gp_toolkit schema for "{dbname}" is verified after restore') def impl(context, dbname): get_gp_toolkit_info(context, dbname, 'gp_toolkit_restore') diff_backup_restore_data(context, 'gp_toolkit_backup', 'gp_toolkit_restore') @given('the standby is not initialized') @then('the standby is not initialized') def impl(context): standby = get_standby_host() if standby: context.cluster_had_standby = True context.standby_host = standby run_gpcommand(context, 'gpinitstandby -ra') @given('"{path}" has "{perm}" permissions') @then('"{path}" has "{perm}" permissions') def impl(context, path, perm): path = os.path.expandvars(path) if not os.path.exists(path): raise Exception('Path does not exist! "%s"' % path) os.chmod(path, int(perm, 8)) @when('user can "{can_ssh}" ssh locally on standby') @then('user can "{can_ssh}" ssh locally on standby') def impl(context, can_ssh): if not hasattr(context, 'standby_host'): raise Exception('Standby host not stored in context !') if can_ssh.strip() == 'not': cmdStr = 'mv ~/.ssh/authorized_keys ~/.ssh/authorized_keys.bk' else: cmdStr = 'mv ~/.ssh/authorized_keys.bk ~/.ssh/authorized_keys' cmd = Command(name='disable ssh locally', cmdStr=cmdStr, ctxt=REMOTE, remoteHost=context.standby_host) cmd.run(validateAfter=True) @given('all the compression data from "{dbname}" is saved for verification') def impl(context, dbname): partitions = get_partition_list('ao', dbname) + get_partition_list('co', dbname) with open('test/data/compression_{db}_backup'.format(db=dbname), 'w') as fp: with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn: for p in partitions: query = """SELECT get_ao_compression_ratio('{schema}.{table}')""".format(schema=p[1], table=p[2]) compression_rate = dbconn.execSQLForSingleton(conn, query) fp.write('{schema}.{table}:{comp}\n'.format(schema=p[1], table=p[2], comp=compression_rate)) @then('verify that the compression ratio of "{table}" in "{dbname}" is good') def impl(context, table, dbname): with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn: query = """SELECT get_ao_compression_ratio('{table}')""".format(table=table) compression_rate = dbconn.execSQLForSingleton(conn, query) found = False with open('test/data/compression_{db}_backup'.format(db=dbname)) as fp: for line in fp: t, c = line.split(':') if t == table: if float(c) != compression_rate and float(c) - 0.1 * float(c) > compression_rate: #10% more than original compression rate raise Exception('Expected compression ratio to be greater than or equal to %s but got %s' % (c, compression_rate)) found = True if not found: raise Exception('Compression ratio for table %s was not stored' % table) @then('verify that the data of tables in "{dbname}" is validated after reload') def impl(context, dbname): tbls = get_table_names(dbname) backed_up_data = [] reloaded_data = [] for t in tbls: with open('test/data/%s.%s_backup' % (t[0], t[1])) as fp: for line in fp: toks = line.split() backed_up_data.append(' '.join(toks[1:])) #Ignore the gp_segment_id value since it changes after reload with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn: res = dbconn.execSQL(conn, 'select * from %s.%s' % (t[0], t[1])) for r in res: reloaded_data.append(' '.join([str(x) for x in r])) if sorted(reloaded_data) != sorted(backed_up_data): raise Exception('Data does not match for table %s.%s' % (t[0], t[1])) @given('the schemas "{schema_list}" do not exist in "{dbname}"') @then('the schemas "{schema_list}" do not exist in "{dbname}"') def impl(context, schema_list, dbname): schemas = [s.strip() for s in schema_list.split(',')] for s in schemas: drop_schema_if_exists(context, s.strip(), dbname) @then('verify that the schema "{schema_name}" exists in "{dbname}"') def impl(context, schema_name, dbname): schema_exists = check_schema_exists(context, schema_name, dbname) if not schema_exists: raise Exception("Schema '%s' does not exist in the database '%s'" % (schema_name,dbname)) def get_log_name(utilname, logdir): today = datetime.now() logname = "%s/%s_%s.log" % (logdir, utilname, today.strftime('%Y%m%d')) return logname @then('verify that a log was created by {utilname} in the user\'s "{dirname}" directory') def impl(context, utilname, dirname): absdirname = "%s/%s" % (os.path.expanduser("~"), dirname) if not os.path.exists(absdirname): raise Exception('No such directory: %s' % absdirname) logname = get_log_name(utilname, absdirname) if not os.path.exists(logname): raise Exception('Log "%s" was not created' % logname) @then('verify that a log was created by {utilname} in the "{dirname}" directory') def impl(context, utilname, dirname): if not os.path.exists(dirname): raise Exception('No such directory: %s' % dirname) logname = get_log_name(utilname, dirname) if not os.path.exists(logname): raise Exception('Log "%s" was not created' % logname) @given('a table is created containing rows of length "{length}" with connection "{dbconn}"') def impl(context, length, dbconn): length = int(length) wide_row_file = 'test/behave/mgmt_utils/steps/data/gptransfer/wide_row_%s.sql' % length tablename = 'public.wide_row_%s' % length entry = "x" * length with open (wide_row_file, 'w') as sql_file: sql_file.write("CREATE TABLE %s (a integer, b text);\n" % tablename) for i in range(10): sql_file.write("INSERT INTO %s VALUES (%d, \'%s\');\n" % (tablename, i, entry)) command = '%s -f %s'%(dbconn, wide_row_file) run_gpcommand(context, command) @then('drop the table "{tablename}" with connection "{dbconn}"') def impl(context, tablename, dbconn): command = "%s -c \'drop table if exists %s\'"%(dbconn, tablename) run_gpcommand(context, command) # gptransfer must be run in verbose mode (-v) with default log location when using this step @then('verify that gptransfer has a sub batch size of "{num}"') def impl(context, num): num = int(num) logdir = "%s/gpAdminLogs" % os.path.expanduser("~") if not os.path.exists(logdir): raise Exception('No such directory: %s' % absdirname) logname = get_log_name('gptransfer', logdir) full_path = os.path.join(logdir, logname) if not os.path.isfile(full_path): raise Exception ("Can not find %s file: %s" % (file_type, full_path)) contents = "" with open(full_path) as fd: contents = fd.read() for i in range(num): worker = "\[DEBUG\]:-\[worker%d\]" % i try: check_stdout_msg(context, worker) except: raise Exception("gptransfer sub batch size should be %d, is %d" % (num, i)) worker = "\[DEBUG\]:-\[worker%d\]" % num try: check_string_not_present_stdout(context, worker) except: raise Exception("gptransfer sub batch size should be %d, is at least %d" % (num, num+1)) # Read in a full map file, remove the first host, print it to a new file @given('an incomplete map file is created') def impl(context): map_file = os.environ['GPTRANSFER_MAP_FILE'] contents = [] with open(map_file, 'r') as fd: contents = fd.readlines() with open('/tmp/incomplete_map_file', 'w') as fd: for line in contents[1:]: fd.write(line) @given('there is a table "{table_name}" dependent on function "{func_name}" in database "{dbname}" on the source system') def impl(context, table_name, func_name, dbname): dbconn = 'psql -d %s -p $GPTRANSFER_SOURCE_PORT -U $GPTRANSFER_SOURCE_USER -h $GPTRANSFER_SOURCE_HOST' % dbname SQL = """CREATE TABLE %s (num integer); CREATE FUNCTION %s (integer) RETURNS integer AS 'select abs(\$1);' LANGUAGE SQL IMMUTABLE; CREATE INDEX test_index ON %s (%s(num))""" % (table_name, func_name, table_name, func_name) command = '%s -c "%s"'%(dbconn, SQL) run_command(context, command) @then('the function-dependent table "{table_name}" and the function "{func_name}" in database "{dbname}" are dropped on the source system') def impl(context, table_name, func_name, dbname): dbconn = 'psql -d %s -p $GPTRANSFER_SOURCE_PORT -U $GPTRANSFER_SOURCE_USER -h $GPTRANSFER_SOURCE_HOST' % dbname SQL = """DROP TABLE %s; DROP FUNCTION %s(integer);""" % (table_name, func_name) command = '%s -c "%s"'%(dbconn, SQL) run_command(context, command) @then('verify that function "{func_name}" exists in database "{dbname}"') def impl(context, func_name, dbname): SQL = """SELECT proname FROM pg_proc WHERE proname = '%s';""" % func_name row_count = getRows(dbname, SQL)[0][0] if row_count != 'test_function': raise Exception('Function %s does not exist in %s"' % (func_name, dbname)) @when('the user runs the query "{query}" in database "{dbname}" and sends the output to "{filename}"') def impl(context, query, dbname, filename): cmd = "psql -d %s -p $GPTRANSFER_DEST_PORT -U $GPTRANSFER_DEST_USER -c '\copy (%s) to %s'" % (dbname, query, filename) thread.start_new_thread(run_gpcommand, (context, cmd)) time.sleep(10) @given('the user runs the command "{cmd}" in the background') @when('the user runs the command "{cmd}" in the background') def impl(context, cmd): thread.start_new_thread(run_command, (context,cmd)) time.sleep(10) @given('the user runs the command "{cmd}" in the background without sleep') @when('the user runs the command "{cmd}" in the background without sleep') def impl(context, cmd): thread.start_new_thread(run_command, (context,cmd)) @then('verify that the file "{filename}" contains the string "{output}"') def impl(context, filename, output): contents = '' with open(filename) as fr: for line in fr: contents = line.strip() print contents check_stdout_msg(context, output) @then('verify that the last line of the master postgres configuration file contains the string "{output}"') def impl(context, output): contents = '' filename = master_data_dir + "/postgresql.conf" with open(filename) as fr: for line in fr: contents = line.strip() pat = re.compile(output) if not pat.search(contents): err_str = "Expected stdout string '%s' and found: '%s'" % (msg, contents) raise Exception(err_str) @then('the user waits for "{process_name}" to finish running') def impl(context, process_name): run_command(context, "ps ux | grep `which %s` | grep -v grep | awk '{print $2}' | xargs" % process_name) pids = context.stdout_message.split() while len(pids) > 0: for pid in pids: try: os.kill(int(pid), 0) except OSError, error: pids.remove(pid) time.sleep(10) @given('the gpfdists occupying port {port} on host "{hostfile}"') def impl(context, port, hostfile): remote_gphome = os.environ.get('GPHOME') gp_source_file = os.path.join(remote_gphome, 'greenplum_path.sh') source_map_file = os.environ.get(hostfile) dir = '/tmp' ctxt = 2 with open(source_map_file,'r') as f: for line in f: host = line.strip().split(',')[0] if host in ('localhost', '127.0.0.1',socket.gethostname()): ctxt = 1 gpfdist = Gpfdist('gpfdist on host %s'%host, dir, port, os.path.join('/tmp','gpfdist.pid'), ctxt, host, gp_source_file) gpfdist.startGpfdist() @then('the gpfdists running on port {port} get cleaned up from host "{hostfile}"') def impl(context, port, hostfile): remote_gphome = os.environ.get('GPHOME') gp_source_file = os.path.join(remote_gphome, 'greenplum_path.sh') source_map_file = os.environ.get(hostfile) dir = '/tmp' ctxt = 2 with open(source_map_file,'r') as f: for line in f: host = line.strip().split(',')[0] if host in ('localhost', '127.0.0.1',socket.gethostname()): ctxt = 1 gpfdist = Gpfdist('gpfdist on host %s'%host, dir, port, os.path.join('/tmp','gpfdist.pid'), ctxt, host, gp_source_file) gpfdist.cleanupGpfdist() @when('verify that db_dumps directory does not exist in master or segments') @then('verify that db_dumps directory does not exist in master or segments') def impl(context): check_dump_dir_exists(context, 'template1') @when('verify that the restored table "{table_name}" in database "{dbname}" is analyzed') @then('verify that the restored table "{table_name}" in database "{dbname}" is analyzed') def impl(context, table_name, dbname): if verify_restored_table_is_analyzed(context, table_name, dbname) is not True: raise Exception("The restored table \'%s\' of database \'%s\' is not analyzed" % (table_name, dbname)) @when('verify that the table "{table_name}" in database "{dbname}" is not analyzed') @then('verify that the table "{table_name}" in database "{dbname}" is not analyzed') def impl(context, table_name, dbname): if (verify_restored_table_is_analyzed(context, table_name, dbname)): raise Exception("The restored table \'%s\' of database \'%s\' is analyzed" % (table_name, dbname)) @given('the database "{dbname}" is analyzed') def impl(context, dbname): analyze_database(context, dbname) @when('the user deletes rows from the table "{table_name}" of database "{dbname}" where "{column_name}" is "{info}"') @then('the user deletes rows from the table "{table_name}" of database "{dbname}" where "{column_name}" is "{info}"') def impl(context, dbname, table_name, column_name, info): delete_rows_from_table(context, dbname, table_name, column_name, info) @then('verify that the query "{query}" in database "{dbname}" returns "{nrows}"') def impl(context, dbname, query, nrows): check_count_for_specific_query(dbname, query, int(nrows)) @then('verify that the file "{filepath}" contains "{line}"') def impl(context, filepath, line): if line not in open(filepath).read(): raise Exception("The file '%s' does not contain '%s'" % (filepath, line)) @then('verify that the file "{filepath}" does not contain "{line}"') def impl(context, filepath, line): if line in open(filepath).read(): raise Exception("The file '%s' does contain '%s'" % (filepath, line)) @then('verify that gptransfer is in order of "{filepath}" when partition transfer is "{is_partition_transfer}"') def impl(context, filepath, is_partition_transfer): table = [] with open(filepath) as f: table = f.read().splitlines() if is_partition_transfer != "None": table = [x.split(',')[0] for x in table] split_message = re.findall("Starting transfer of.*\n", context.stdout_message) if len(split_message) == 0 and len(table) != 0: raise Exception("There were no tables transfered") counter_table = 0 counter_split = 0 found = 0 while counter_table < len(table) and counter_split < len(split_message): for i in range(counter_split, len(split_message)): pat = table[counter_table] + " to" prog = re.compile(pat) res = prog.search(split_message[i]) if not res: counter_table += 1 break else: found += 1 counter_split += 1 if found != len(split_message): raise Exception("expected to find %s tables in order and only found %s in order" % (len(split_message), found)) @given('database "{dbname}" is dropped and recreated') @when('database "{dbname}" is dropped and recreated') @then('database "{dbname}" is dropped and recreated') def impl(context, dbname): drop_database_if_exists(context, dbname) create_database(context, dbname) @given('the user runs the query "{query}" on "{dbname}" in the background until stopped') @when('the user runs the query "{query}" on "{dbname}" in the background until stopped') @then('the user runs the query "{query}" on "{dbname}" in the background until stopped') def impl(context, query, dbname): thread.start_new_thread(execute_sql_until_stopped, (context, dbname, query)) def execute_sql_until_stopped(context, dbname, query): with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn: dbconn.execSQL(conn, query) conn.commit() while True: if hasattr(context, 'background_query_lock'): break time.sleep(1) @when('the user stops all background queries') @then('the user stops all background queries') def impl(context): context.background_query_lock = True @given('the test is initialized') def impl(context): context.execute_steps(u''' Given the database is running And database "bkdb" is dropped and recreated And there are no backup files And the backup files in "/tmp" are deleted ''') @given('the test is initialized with database "{dbname}"') def impl(context, dbname): context.execute_steps(u''' Given the database is running And database "%s" is dropped and recreated ''' % dbname) @given('the test is initialized for special characters') def impl(context): context.execute_steps(u''' Given the database is running And the user runs "psql -f test/behave/mgmt_utils/steps/data/special_chars/create_special_database.sql template1" And the user runs "psql -f test/behave/mgmt_utils/steps/data/special_chars/create_special_schema.sql template1" And the user runs "psql -f test/behave/mgmt_utils/steps/data/special_chars/create_special_table.sql template1" And the user runs "psql -f test/behave/mgmt_utils/steps/data/special_chars/insert_into_special_table.sql template1" ''') @then('validate and run gpcheckcat repair') def impl(context): context.execute_steps(u''' Then gpcheckcat should print repair script\(s\) generated in dir gpcheckcat.repair.* to stdout Then the path "gpcheckcat.repair.*" is found in cwd "1" times Then run all the repair scripts in the dir "gpcheckcat.repair.*" And the path "gpcheckcat.repair.*" is removed from current working directory ''') @given('there is a "{tabletype}" table "{tablename}" in "{dbname}" with data') @then('there is a "{tabletype}" table "{tablename}" in "{dbname}" with data') @when('there is a "{tabletype}" table "{tablename}" in "{dbname}" with data') def impl(context, tabletype, tablename, dbname): populate_regular_table_data(context, tabletype, tablename, 'None', dbname, with_data=True) @given('there is a "{tabletype}" partition table "{table_name}" in "{dbname}" with data') @then('there is a "{tabletype}" partition table "{table_name}" in "{dbname}" with data') @when('there is a "{tabletype}" partition table "{table_name}" in "{dbname}" with data') def impl(context, tabletype, table_name, dbname): create_partition(context, tablename=table_name, storage_type=tabletype, dbname=dbname, with_data=True) @then('read pid from file "{filename}" and kill the process') @when('read pid from file "{filename}" and kill the process') @given('read pid from file "{filename}" and kill the process') def impl(context, filename): with open(filename) as fr: pid = fr.readline().strip() if not pid: raise Exception("process id '%s' not found in the file '%s'" % (pid,filename)) cmd = Command(name="killing pid", cmdStr='kill -9 %s' % pid) cmd.run(validateAfter=True) @then('an attribute of table "{table}" in database "{dbname}" is deleted on segment with content id "{segid}"') def impl(context, table, dbname, segid): local_cmd = 'psql %s -t -c "SELECT port,hostname FROM gp_segment_configuration WHERE content=%s and role=\'p\';"' % (dbname, segid) run_command(context, local_cmd) port, host = context.stdout_message.split("|") port = port.strip() host = host.strip() user = os.environ.get('USER') source_file = os.path.join(os.environ.get('GPHOME'),'greenplum_path.sh') # Yes, the below line is ugly. It looks much uglier when done with separate strings, given the multiple levels of escaping required. remote_cmd = """ ssh %s "source %s; export PGUSER=%s; export PGPORT=%s; export PGOPTIONS=\\\"-c gp_session_role=utility\\\"; psql -d %s -c \\\"SET allow_system_table_mods=\'dml\'; DELETE FROM pg_attribute where attrelid=\'%s\'::regclass::oid;\\\"" """ % (host, source_file, user, port, dbname, table) run_command(context, remote_cmd.strip()) @then('The user runs sql "{query}" in "{dbname}" on first primary segment') @when('The user runs sql "{query}" in "{dbname}" on first primary segment') @given('The user runs sql "{query}" in "{dbname}" on first primary segment') def impl(context, query, dbname): host, port = get_primary_segment_host_port() psql_cmd = "PGDATABASE=\'%s\' PGOPTIONS=\'-c gp_session_role=utility\' psql -h %s -p %s -c \"%s\"; " % (dbname, host, port, query) Command(name='Running Remote command: %s' % psql_cmd, cmdStr = psql_cmd).run(validateAfter=True) @then('The user runs sql file "{file}" in "{dbname}" on all the segments') @when('The user runs sql file "{file}" in "{dbname}" on all the segments') @given('The user runs sql file "{file}" in "{dbname}" on all the segments') def impl(context, file, dbname): with open(file) as fd: query = fd.read().strip() gparray = GpArray.initFromCatalog(dbconn.DbURL()) segments = gparray.getDbList() for seg in segments: host = seg.getSegmentHostName() if seg.isSegmentPrimary() or seg.isSegmentMaster(): port = seg.getSegmentPort() psql_cmd = "PGDATABASE=\'%s\' PGOPTIONS=\'-c gp_session_role=utility\' psql -h %s -p %s -c \"%s\"; " % (dbname, host, port, query) Command(name='Running Remote command: %s' % psql_cmd, cmdStr = psql_cmd).run(validateAfter=True) @then( 'The path "{path}" is removed from current working directory') @when( 'The path "{path}" is removed from current working directory') @given('The path "{path}" is removed from current working directory') def impl(context, path): remove_local_path(path) @given('the path "{path}" is found in cwd "{num}" times') @then('the path "{path}" is found in cwd "{num}" times') @when('the path "{path}" is found in cwd "{num}" times') def impl(context, path, num): result = validate_local_path(path) if result != int(num): raise Exception("expected %s items but found %s items in path %s" % (num, result, path) ) @then('run all the repair scripts in the dir "{dir}"') def impl(context, dir): command = "cd {0} ; for i in *.sh ; do bash $i; done".format(dir) run_command(context, command) @when('the entry for the table "{user_table}" is removed from "{catalog_table}" with key "{primary_key}" in the database "{db_name}"') def impl(context, user_table, catalog_table, primary_key, db_name): delete_qry = "delete from %s where %s='%s'::regclass::oid;" % (catalog_table, primary_key, user_table) with dbconn.connect(dbconn.DbURL(dbname=db_name)) as conn: for qry in ["set allow_system_table_mods='dml';", "set allow_segment_dml=true;", delete_qry]: dbconn.execSQL(conn, qry) conn.commit() @when('the entry for the table "{user_table}" is removed from "{catalog_table}" with key "{primary_key}" in the database "{db_name}" on the first primary segment') def impl(context, user_table, catalog_table, primary_key, db_name): host, port = get_primary_segment_host_port() delete_qry = "delete from %s where %s='%s'::regclass::oid;" % (catalog_table, primary_key, user_table) with dbconn.connect(dbconn.DbURL(dbname=db_name, port=port, hostname=host), utility=True, allowSystemTableMods='dml') as conn: for qry in [delete_qry]: dbconn.execSQL(conn, qry) conn.commit() @given('the timestamps in the repair dir are consistent') @when('the timestamps in the repair dir are consistent') @then('the timestamps in the repair dir are consistent') def impl(_): repair_regex = "gpcheckcat.repair.*" timestamp = "" repair_dir = "" for file in os.listdir('.'): if fnmatch.fnmatch(file, repair_regex): repair_dir = file timestamp = repair_dir.split('.')[2] if not timestamp: raise Exception("Timestamp was not found") for file in os.listdir(repair_dir): if not timestamp in file: raise Exception("file found containing inconsistent timestamp") @when('user kills a mirror process with the saved information') def impl(context): cmdStr = "ps ux | grep '[m]irror process' | grep %s | awk '{print $2}'" % context.mirror_port cmd=Command(name='get mirror pid: %s' % cmdStr, cmdStr=cmdStr) cmd.run() pid = cmd.get_stdout_lines()[0] kill_process(int(pid), context.mirror_segdbname, sig=signal.SIGABRT) @when('user temporarily moves the data directory of the killed mirror') @then('user temporarily moves the data directory of the killed mirror') def impl(context): rmStr = "mv %s{,.bk}" % context.mirror_datadir cmd=Command(name='Move mirror data directory', cmdStr=rmStr) cmd.run(validateAfter=True) @when('user returns the data directory to the default location of the killed mirror') @then('user returns the data directory to the default location of the killed mirror') def impl(context): rmStr = "mv %s{.bk,}" % context.mirror_datadir cmd=Command(name='Move mirror data directory', cmdStr=rmStr) cmd.run(validateAfter=True) @when('wait until the mirror is down') @then('wait until the mirror is down') @given('wait until the mirror is down') def impl(context): qry = "select status from gp_segment_configuration where dbid='%s' and status='d' " % context.mirror_segdbId start_time = current_time = datetime.now() while (current_time - start_time).seconds < 120: row_count = len(getRows('template1', qry)) if row_count == 1: break time.sleep(5) current_time = datetime.now() @when('run gppersistent_rebuild with the saved content id') @then('run gppersistent_rebuild with the saved content id') def impl(context): cmdStr = "echo -e 'y\ny\n' | $GPHOME/sbin/gppersistentrebuild -c %s" % context.saved_segcid cmd=Command(name='Run gppersistentrebuild',cmdStr=cmdStr) cmd.run(validateAfter=True) context.ret_code = cmd.get_results().rc @given('the information of a "{seg}" segment on any host is saved') @when('the information of a "{seg}" segment on any host is saved') @then('the information of a "{seg}" segment on any host is saved') def impl(context, seg): gparray = GpArray.initFromCatalog(dbconn.DbURL()) if seg == "mirror": to_save_segs = [seg for seg in gparray.getDbList() if seg.isSegmentMirror()] context.mirror_segdbId = to_save_segs[0].getSegmentDbId() context.mirror_segcid = to_save_segs[0].getSegmentContentId() context.mirror_segdbname = to_save_segs[0].getSegmentHostName() context.mirror_datadir = to_save_segs[0].getSegmentDataDirectory() context.mirror_port = to_save_segs[0].getSegmentPort() elif seg == "primary": to_save_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()] elif seg == "master": to_save_segs = [seg for seg in gparray.getDbList() if seg.isSegmentMaster()] context.saved_segcid = to_save_segs[0].getSegmentContentId() @given('the user creates an index for table "{table_name}" in database "{db_name}"') @when('the user creates an index for table "{table_name}" in database "{db_name}"') @then('the user creates an index for table "{table_name}" in database "{db_name}"') def impl(context, table_name, db_name): index_qry = "create table {0}(i int primary key, j varchar); create index test_index on index_table using bitmap(j)".format(table_name) with dbconn.connect(dbconn.DbURL(dbname=db_name)) as conn: dbconn.execSQL(conn, index_qry) conn.commit() @given('verify that mirror_existence_state of segment "{segc_id}" is "{mirror_existence_state}"') @when('verify that mirror_existence_state of segment "{segc_id}" is "{mirror_existence_state}"') @then('verify that mirror_existence_state of segment "{segc_id}" is "{mirror_existence_state}"') def impl(context, segc_id, mirror_existence_state): with dbconn.connect(dbconn.DbURL(dbname='template1')) as conn: sql = """SELECT mirror_existence_state from gp_dist_random('gp_persistent_relation_node') where gp_segment_id=%s group by 1;""" % segc_id cluster_state = dbconn.execSQL(conn, sql).fetchone() if cluster_state[0] != int(mirror_existence_state): raise Exception("mirror_existence_state of segment %s is %s. Expected %s." % (segc_id, cluster_state[0], mirror_existence_state)) @given('a role "{role_name}" is created') @when('a role "{role_name}" is created') @then('a role "{role_name}" is created') def impl(context, role_name): with dbconn.connect(dbconn.DbURL(dbname='template1')) as conn: pghba = PgHba() new_entry = Entry(entry_type='local', database='all', user=role_name, authmethod="password") pghba.add_entry(new_entry) pghba.write() dbconn.execSQL(conn, "Drop role if exists dsp_role") dbconn.execSQL(conn, "Create role %s with login password 'dsprolepwd'" % role_name) dbconn.execSQL(conn, "select pg_reload_conf()") conn.commit() @given('the backup files for the stored timestamp are in the old format in dir "{directory}"') @when('the backup files for the stored timestamp are in the old format in dir "{directory}"') @then('the backup files for the stored timestamp are in the old format in dir "{directory}"') def impl(context, directory): store_timestamp_in_old_format(context, directory=directory) @given('the backup files for the stored timestamp are in the old format') @when('the backup files for the stored timestamp are in the old format') @then('the backup files for the stored timestamp are in the old format') def impl(context): store_timestamp_in_old_format(context) @given('the backup files for the stored timestamp are in the old format with prefix "{prefix}"') @when('the backup files for the stored timestamp are in the old format with prefix "{prefix}"') @then('the backup files for the stored timestamp are in the old format with prefix "{prefix}"') def impl(context, prefix): store_timestamp_in_old_format(context, prefix=prefix) def store_timestamp_in_old_format(context, directory = None, prefix = ""): gparray = GpArray.initFromCatalog(dbconn.DbURL()) primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary() or seg.isSegmentMaster()] try: context.backup_timestamp except: context.backup_timestamp = None if context.backup_timestamp is not None: timestamp = context.backup_timestamp else: timestamp = context.full_backup_timestamp if directory is None: master_dump_dir = gparray.master.getSegmentDataDirectory() else : master_dump_dir = directory if prefix is not "": prefix = prefix + "_" for ps in primary_segs: if directory is None: seg_dir = ps.getSegmentDataDirectory() else: seg_dir = directory dump_dir = os.path.join(seg_dir, 'db_dumps', timestamp[0:8]) segdbId = ps.getSegmentDbId() segcid = ps.getSegmentContentId() segdbname = ps.getSegmentHostName() new_format = "%s_%s" % (segcid, segdbId) old_format = "%s_%s" % (1 if ps.isSegmentMaster() else 0, segdbId) rename_files_to_older_format = """ ssh {segdbname} 'if [ -d "{dump_dir}" ]; then for i in `ls {dump_dir}/*{new_format}_{timestamp}* | xargs`; do old_format=${{i/{new_format}/{old_format}}} if [ ! -f $old_format ]; then mv $i $old_format; fi ; done; fi;' """.format(segdbname=segdbname, dump_dir=dump_dir, new_format=new_format, old_format=old_format, timestamp=timestamp) run_command(context, rename_files_to_older_format) if context.exception: raise context.exception #replace new format with old format on master directory report file master_report_file = os.path.join(master_dump_dir, 'db_dumps', timestamp[0:8], '%sgp_dump_%s.rpt' % (prefix,timestamp)) change_report_file_content = "sed -i 's|%s|%s|' %s" % (new_format, old_format, master_report_file) run_command(context, change_report_file_content) @then('the timestamp will be stored in json format') @given('the timestamp will be stored in json format') @when('the timestamp will be stored in json format') def impl(context): context.is_timestamp_stored_as_json = True
45.564795
269
0.677371
795774e926b1173dd2d81bfdd0657009cee65339
1,668
py
Python
setup.py
graingert/requests
5d87e1aeba0f98d13b7827c8472dab26eb3bd13e
[ "Apache-2.0" ]
4
2019-05-20T13:15:28.000Z
2019-05-23T04:49:47.000Z
setup.py
graingert/requests
5d87e1aeba0f98d13b7827c8472dab26eb3bd13e
[ "Apache-2.0" ]
1
2021-12-13T20:55:07.000Z
2021-12-13T20:55:07.000Z
vendor/packages/requests/setup.py
joshua-s/fjord
8cdaddf5b05e51fef44675492e115279efe9fd5c
[ "BSD-3-Clause" ]
4
2015-07-08T09:13:44.000Z
2021-12-14T08:33:38.000Z
#!/usr/bin/env python import os import sys import requests try: from setuptools import setup except ImportError: from distutils.core import setup if sys.argv[-1] == 'publish': os.system('python setup.py sdist upload') sys.exit() packages = [ 'requests', 'requests.packages', 'requests.packages.charade', 'requests.packages.urllib3', 'requests.packages.urllib3.packages', 'requests.packages.urllib3.packages.ssl_match_hostname' ] requires = [] setup( name='requests', version=requests.__version__, description='Python HTTP for Humans.', long_description=open('README.rst').read() + '\n\n' + open('HISTORY.rst').read(), author='Kenneth Reitz', author_email='me@kennethreitz.com', url='http://python-requests.org', packages=packages, package_data={'': ['LICENSE', 'NOTICE'], 'requests': ['*.pem']}, package_dir={'requests': 'requests'}, include_package_data=True, install_requires=requires, license=open('LICENSE').read(), zip_safe=False, classifiers=( 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', # 'Programming Language :: Python :: 3.0', 'Programming Language :: Python :: 3.1', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', ), )
28.271186
68
0.626499
7957756fa27efd8fb8c8130dee421fc71e8ef10c
1,677
py
Python
Hint.py
kennethwpak/hangman
ecc911494249752a71d4d3c08328fafe81066402
[ "MIT" ]
null
null
null
Hint.py
kennethwpak/hangman
ecc911494249752a71d4d3c08328fafe81066402
[ "MIT" ]
null
null
null
Hint.py
kennethwpak/hangman
ecc911494249752a71d4d3c08328fafe81066402
[ "MIT" ]
null
null
null
import hangman1 as hm import random secret_word = hm.secret_word letters_guessed = hm.letters_guessed get_available_letters = hm.get_available_letters get_guessed_word = hm.get_guessed_word class hint: def __init__(self): print('Oops, you now meet trouble') print('You can use any hint to help you out') print('But you are encouraged to finish the problem yourself') call=input('''Here is the hint you can call for: 'dele' for deleting a wrong letter from available letters 'show()' for showing one letter in the word cheat for showing the answer''') if call=='dele': hint.dele() if call=='show': hint.show(secret_word,letters_guessed) if call=='cheat': hint.cheat() def dele(secret_word,letters_guessed): my_list=[] for i in range(len(get_available_letters(letters_guessed+list(secret_word)))): my_list.append(get_available_letters(letters_guessed+list(secret_word))[i]) my_list.pop(random.randint(0,len(my_list)-1)) return my_list def show(secret_word,letters_guessed): secret_word_list=[] words_ready_to_show=[] for i in range(len(secret_word)): secret_word_list.append (secret_word[i]) for i in range(len(secret_word_list)): if secret_word_list[i] not in letters_guessed: words_ready_to_show.append(secret_word_list[i]) letters_guessed=words_ready_to_show[random.randint(0,len(words_ready_to_show)-1)] print(get_guessed_word(secret_word,letters_guessed)) def cheat(): print(secret_word)
39
89
0.664878
795776048eceb9321524c67c72e768fea4b7d6d8
188
py
Python
opencivicdata/core/apps.py
palewire/python-opencivicdata
7862be45bef6846d3e284995d208fabcc8635362
[ "BSD-3-Clause" ]
20
2017-06-23T17:31:48.000Z
2021-11-23T19:20:58.000Z
opencivicdata/core/apps.py
palewire/python-opencivicdata
7862be45bef6846d3e284995d208fabcc8635362
[ "BSD-3-Clause" ]
70
2015-01-06T18:40:22.000Z
2017-05-24T18:06:52.000Z
opencivicdata/core/apps.py
california-civic-data-coalition/python-opencivicdata-django
375cd09d48908a7be58186de64f470b233f616d6
[ "BSD-3-Clause" ]
17
2017-05-25T17:05:57.000Z
2021-06-05T14:45:39.000Z
from django.apps import AppConfig import os class BaseConfig(AppConfig): name = "opencivicdata.core" verbose_name = "Open Civic Data - Core" path = os.path.dirname(__file__)
20.888889
43
0.723404
79577618e3ce78e887bd524785db1a6ac1099479
623
py
Python
finance/urls.py
Hamifthi/gym-app
514d7efa4f7777ab9d2e0481311c1c15542756c1
[ "MIT" ]
null
null
null
finance/urls.py
Hamifthi/gym-app
514d7efa4f7777ab9d2e0481311c1c15542756c1
[ "MIT" ]
4
2021-03-30T12:49:27.000Z
2021-06-10T18:27:47.000Z
finance/urls.py
Hamifthi/gym-app
514d7efa4f7777ab9d2e0481311c1c15542756c1
[ "MIT" ]
null
null
null
from django.urls import path, re_path, reverse_lazy from . import views urlpatterns = [ path('submit/income/', views.SubmitIncome.as_view(), name='submit_income'), path('submit/expense/', views.SubmitExpense.as_view(), name='submit_expense'), path('report/form/', views.TransactionReportForm.as_view(), name='report_form'), path('report/income/', views.IncomeTransactionReport.as_view(), name='report_income'), path('report/expense/', views.ExpenseTransactionReport.as_view(), name='report_expense'), path('report/total/', views.TotalTransactionReport.as_view(), name='total_transaction_report'), ]
56.636364
99
0.744783
795776bdbafc87f29a4c87412abe4fc850ee03a4
1,654
py
Python
app/auth/views.py
Waithera-m/You-Got-This
efc83bd712ffbb5d57acf3c9925389905adf8673
[ "Unlicense" ]
null
null
null
app/auth/views.py
Waithera-m/You-Got-This
efc83bd712ffbb5d57acf3c9925389905adf8673
[ "Unlicense" ]
null
null
null
app/auth/views.py
Waithera-m/You-Got-This
efc83bd712ffbb5d57acf3c9925389905adf8673
[ "Unlicense" ]
null
null
null
from flask import render_template,redirect,url_for,flash,request from . import auth from ..models import User from .forms import UserRegistrationForm,LoginForm from .. import db from flask_login import login_user,logout_user,login_required from ..email import email_message @auth.route('/login', methods=["GET","POST"]) def login(): ''' view function renders login.html ''' login_form = LoginForm() if login_form.validate_on_submit(): user = User.query.filter_by(email=login_form.email.data).first() if user is not None and user.verify_password(login_form.password.data): login_user(user,login_form.remember.data) return redirect(url_for('main.index')) flash('Invalid email or password') title = "You Got This Login" return render_template('auth/login.html',login_form=login_form,title=title) @auth.route('/logout') @login_required def logout(): ''' view function logs out an authenticated user ''' logout_user() return redirect(url_for("main.index")) @auth.route('/register',methods=["GET","POST"]) def register (): ''' view function renders registration form ''' form = UserRegistrationForm() if form.validate_on_submit(): user = User(email=form.email.data,username=form.username.data,password=form.password.data) db.session.add(user) db.session.commit() email_message("Welcome to You Got This","email/welcome_user",user.email,user=user) return redirect(url_for('.login')) title="New Account" return render_template('auth/register.html',registration_form=form)
30.072727
98
0.691052
7957778bd2fed4a7a1709adc1984924ddd018e7b
31,254
py
Python
merge_statistics.py
loganlebanoff/correct_summarization
cec0d5401ddb5f7c33aca14f31da68b2f8092c53
[ "BSD-3-Clause" ]
2
2019-07-20T14:57:39.000Z
2020-06-01T11:14:40.000Z
merge_statistics.py
loganlebanoff/correct_summarization
cec0d5401ddb5f7c33aca14f31da68b2f8092c53
[ "BSD-3-Clause" ]
null
null
null
merge_statistics.py
loganlebanoff/correct_summarization
cec0d5401ddb5f7c33aca14f31da68b2f8092c53
[ "BSD-3-Clause" ]
null
null
null
import cPickle from pathos.multiprocessing import ProcessingPool as Pool import itertools import os from tqdm import tqdm import numpy as np from absl import flags from absl import app import pickle import util import sys import glob import data from scipy.stats.stats import pearsonr import matplotlib if not "DISPLAY" in os.environ: matplotlib.use("Agg") import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from matplotlib.ticker import MaxNLocator FLAGS = flags.FLAGS if 'dataset_name' not in flags.FLAGS: flags.DEFINE_string('dataset_name', 'all', 'Which dataset to use. Can be {duc_2004, tac_2011, etc}') if 'dataset_split' not in flags.FLAGS: flags.DEFINE_string('dataset_split', 'val', 'Which dataset split to use. Must be one of {train, val (or dev), test}') if 'sentence_limit' not in flags.FLAGS: flags.DEFINE_integer('sentence_limit', 2, 'Max number of sentences to include for merging.') if 'num_instances' not in flags.FLAGS: flags.DEFINE_integer('num_instances', -1, 'Number of instances to run for before stopping. Use -1 to run on all instances.') FLAGS(sys.argv) import convert_data import lambdamart_scores_to_summaries import preprocess_for_lambdamart_no_flags data_dir = os.path.expanduser('~') + '/data/tf_data/with_coref_and_ssi' ssi_dir = 'data/ssi' names_to_types = [('raw_article_sents', 'string_list'), ('similar_source_indices', 'delimited_list_of_tuples'), ('summary_text', 'string'), ('corefs', 'json'), ('doc_indices', 'delimited_list')] min_matched_tokens = 1 tfidf_vec_path = 'data/tfidf/' + 'all' + '_tfidf_vec_5.pkl' bin_values = [x / 100. for x in list(range(100))] pretty_dataset_names = {'cnn_dm': 'CNN/DM', 'xsum': 'XSum', 'duc_2004': 'DUC-04'} plt.rcParams['font.family'] = 'serif' plt.rcParams['font.serif'] = 'Ubuntu' plt.rcParams['font.monospace'] = 'Ubuntu Mono' # plt.rcParams['font.weight'] = 'bold' plt.rcParams['axes.labelsize'] = 20 plt.rcParams['axes.labelweight'] = 'bold' plt.rcParams['axes.titlesize'] = 20 util.create_dirs('stuff/plots') plot_data_file = os.path.join('stuff/plots', FLAGS.dataset_name + '_' + FLAGS.dataset_split + '.pkl') plot_file = os.path.join('stuff/plots', FLAGS.dataset_name + '_' + FLAGS.dataset_split + '.pdf') def plot_histograms(all_list_of_hist_pairs): nrows = len(all_list_of_hist_pairs) ncols = len(all_list_of_hist_pairs[0]) fig, axes = plt.subplots(nrows=nrows, ncols=ncols) if axes.ndim == 1: axes = axes.reshape(1, -1) fig.set_size_inches(10, 5) fig.subplots_adjust(wspace=0.075, hspace=0.05) for row_idx in range(axes.shape[0]): for col_idx in range(axes.shape[1]): ax = axes[row_idx, col_idx] plot_histogram(ax, row_idx, col_idx, **all_list_of_hist_pairs[row_idx][col_idx]) pp = PdfPages(plot_file) plt.savefig(pp, format='pdf',bbox_inches='tight') plt.show() pp.close() font = { 'size': 20, } def plot_histogram(ax, row_idx, col_idx, lst=None, num_bins=None, start_at_0=False, pdf=False, cutoff_std=4, log=False, max_val=None, y_label=None, x_label=None, x_lim=None, y_lim=None, legend_labels=None): def plot(my_lst, translucent=False, legend_label=None): alpha = 0.5 if translucent else 1 histtype = 'stepfilled' if max_val is None: my_max_val = np.mean(my_lst) + cutoff_std*np.std(my_lst) else: my_max_val = max_val # bins = 100 if normalized else list(range(min(my_lst), max(my_lst) + 2)) if not start_at_0 else list(range(max(my_lst) + 2)) bins = num_bins if num_bins is not None else list(range(int(min(my_lst)), int(my_max_val))) if not start_at_0 else list(range(int(my_max_val))) ax.hist(my_lst, bins=bins, density=pdf, alpha=alpha, histtype=histtype, edgecolor='black', log=log, label=legend_label) if row_idx == 0: ax.legend() if y_label is not None: ax.set_ylabel(pretty_dataset_names[y_label]) if row_idx == 2: ax.set_xlabel(x_label) else: ax.set_xticklabels([]) ax.xaxis.set_ticks_position('none') ax.yaxis.set_ticks_position('none') ax.set_yticklabels([]) if y_lim is not None: ax.set_ylim(top=y_lim) if x_lim is not None: ax.set_xlim(right=x_lim) # nbins = len(ax.get_xticklabels()) if row_idx > 0: ax.yaxis.set_major_locator(MaxNLocator(nbins=4, prune='upper')) for lst_idx, my_lst in enumerate(lst): if legend_labels is not None: legend_label = legend_labels[lst_idx] plot(my_lst, translucent=True, legend_label=legend_label) # fig, ax1 = plt.subplots(nrows=1) # fig.set_size_inches(6, 2) # varname = util.varname(lst)[0] # if type(lst[0]) == list: # for my_lst in lst: # plot(my_lst, translucent=True) # # pp = PdfPages(os.path.join('stuff/plots', FLAGS.dataset_name + '_' + varname + '.pdf')) # plt.savefig(pp, format='pdf',bbox_inches='tight') # plt.show() # pp.close() # def plot_histogram(lst, num_bins=None, start_at_0=False, pdf=False, cutoff_std=4, log=False, max_val=None): # def plot(my_lst, translucent=False): # alpha = 0.5 if translucent else 1 # histtype = 'stepfilled' # if max_val is None: # my_max_val = np.mean(my_lst) + cutoff_std*np.std(my_lst) # else: # my_max_val = max_val # # bins = 100 if normalized else list(range(min(my_lst), max(my_lst) + 2)) if not start_at_0 else list(range(max(my_lst) + 2)) # bins = num_bins if num_bins is not None else list(range(int(min(my_lst)), int(my_max_val))) if not start_at_0 else list(range(int(my_max_val))) # plt.hist(my_lst, bins=bins, density=pdf, alpha=alpha, histtype=histtype, edgecolor='black', log=log) # # fig, ax1 = plt.subplots(nrows=1) # fig.set_size_inches(6, 2) # varname = util.varname(lst)[0] # if type(lst[0]) == list: # for my_lst in lst: # plot(my_lst, translucent=True) # else: # plot(lst) # # pp = PdfPages(os.path.join('stuff/plots', FLAGS.dataset_name + '_' + varname + '.pdf')) # plt.savefig(pp, format='pdf',bbox_inches='tight') # plt.show() # pp.close() def plot_positions(primary_pos, secondary_pos, all_pos): print('Sentence positions (primary (mean, median), secondary (mean, median), all (mean, median)) : ', np.mean(primary_pos), np.median(primary_pos), np.mean(secondary_pos), np.median(secondary_pos), np.mean(all_pos), np.median(all_pos)) hist_pos_primary = np.histogram(primary_pos, bins=max(primary_pos)+1) hist_pos_secondary = np.histogram(secondary_pos, bins=max(secondary_pos)+1) hist_pos_all = np.histogram(all_pos, bins=max(all_pos)+1) print('Histogram of positions primary:', util.hist_as_pdf_str(hist_pos_primary)) print('Histogram of positions secondary:', util.hist_as_pdf_str(hist_pos_secondary)) print('Histogram of positions all:', util.hist_as_pdf_str(hist_pos_all)) plot_histogram(primary_pos) plot_histogram(secondary_pos) plot_histogram(all_pos) def get_integral_values_for_histogram(orig_val, rel_sent_indices, doc_sent_indices, doc_sent_lens, raw_article_sents): if FLAGS.dataset_name == 'duc_2004': val = rel_sent_indices[orig_val] num_sents_total = doc_sent_lens[doc_sent_indices[orig_val]] else: val = orig_val num_sents_total = len(raw_article_sents) norm = val*1./num_sents_total next_norm = (val+1)*1./num_sents_total vals_to_add = [bin_val for bin_val in bin_values if bin_val >= norm and bin_val < next_norm] return vals_to_add def main(unused_argv): print('Running statistics on %s' % FLAGS.dataset_name) if len(unused_argv) != 1: # prints a message if you've entered flags incorrectly raise Exception("Problem with flags: %s" % unused_argv) if FLAGS.dataset_name == 'all': dataset_names = ['cnn_dm', 'xsum', 'duc_2004'] else: dataset_names = [FLAGS.dataset_name] if not os.path.exists(plot_data_file): all_lists_of_histogram_pairs = [] for dataset_name in dataset_names: FLAGS.dataset_name = dataset_name if dataset_name == 'duc_2004': dataset_splits = ['test'] elif FLAGS.dataset_split == 'all': dataset_splits = ['test', 'val', 'train'] else: dataset_splits = [FLAGS.dataset_split] ssi_list = [] for dataset_split in dataset_splits: ssi_path = os.path.join(ssi_dir, FLAGS.dataset_name, dataset_split + '_ssi.pkl') with open(ssi_path) as f: ssi_list.extend(pickle.load(f)) if FLAGS.dataset_name == 'duc_2004': for abstract_idx in [1,2,3]: ssi_path = os.path.join(ssi_dir, FLAGS.dataset_name, dataset_split + '_ssi_' + str(abstract_idx) + '.pkl') with open(ssi_path) as f: temp_ssi_list = pickle.load(f) ssi_list.extend(temp_ssi_list) ssi_2d = util.flatten_list_of_lists(ssi_list) num_extracted = [len(ssi) for ssi in util.flatten_list_of_lists(ssi_list)] hist_num_extracted = np.histogram(num_extracted, bins=6, range=(0,5)) print(hist_num_extracted) print('Histogram of number of sentences merged: ' + util.hist_as_pdf_str(hist_num_extracted)) distances = [abs(ssi[0]-ssi[1]) for ssi in ssi_2d if len(ssi) >= 2] print('Distance between sentences (mean, median): ', np.mean(distances), np.median(distances)) hist_dist = np.histogram(distances, bins=max(distances)) print('Histogram of distances: ' + util.hist_as_pdf_str(hist_dist)) summ_sent_idx_to_number_of_source_sents = [[], [], [], [], [], [], [], [], [], []] for ssi in ssi_list: for summ_sent_idx, source_indices in enumerate(ssi): if len(source_indices) == 0 or summ_sent_idx >= len(summ_sent_idx_to_number_of_source_sents): continue num_sents = len(source_indices) if num_sents > 2: num_sents = 2 summ_sent_idx_to_number_of_source_sents[summ_sent_idx].append(num_sents) print ("Number of source sents for summary sentence indices (Is the first summary sent more likely to match with a singleton or a pair?):") for summ_sent_idx, list_of_numbers_of_source_sents in enumerate(summ_sent_idx_to_number_of_source_sents): if len(list_of_numbers_of_source_sents) == 0: percent_singleton = 0. else: percent_singleton = list_of_numbers_of_source_sents.count(1) * 1. / len(list_of_numbers_of_source_sents) percent_pair = list_of_numbers_of_source_sents.count(2) * 1. / len(list_of_numbers_of_source_sents) print str(percent_singleton) + '\t', print '' for summ_sent_idx, list_of_numbers_of_source_sents in enumerate(summ_sent_idx_to_number_of_source_sents): if len(list_of_numbers_of_source_sents) == 0: percent_pair = 0. else: percent_singleton = list_of_numbers_of_source_sents.count(1) * 1. / len(list_of_numbers_of_source_sents) percent_pair = list_of_numbers_of_source_sents.count(2) * 1. / len(list_of_numbers_of_source_sents) print str(percent_pair) + '\t', print '' primary_pos = [ssi[0] for ssi in ssi_2d if len(ssi) >= 1] secondary_pos = [ssi[1] for ssi in ssi_2d if len(ssi) >= 2] all_pos = [max(ssi) for ssi in ssi_2d if len(ssi) >= 1] # if FLAGS.dataset_name != 'duc_2004': # plot_positions(primary_pos, secondary_pos, all_pos) if FLAGS.dataset_split == 'all': glob_string = '*.bin' else: glob_string = dataset_splits[0] print('Loading TFIDF vectorizer') with open(tfidf_vec_path, 'rb') as f: tfidf_vectorizer = pickle.load(f) source_dir = os.path.join(data_dir, FLAGS.dataset_name) source_files = sorted(glob.glob(source_dir + '/' + glob_string + '*')) total = len(source_files) * 1000 if ('cnn' in FLAGS.dataset_name or 'newsroom' in FLAGS.dataset_name or 'xsum' in FLAGS.dataset_name) else len(source_files) example_generator = data.example_generator(source_dir + '/' + glob_string + '*', True, False, should_check_valid=False) all_possible_singles = 0 all_possible_pairs = [0] all_filtered_pairs = 0 all_all_combinations = 0 all_ssi_pairs = [0] ssi_pairs_with_shared_coref = [0] ssi_pairs_with_shared_word = [0] ssi_pairs_with_either_coref_or_word = [0] all_pairs_with_shared_coref = [0] all_pairs_with_shared_word = [0] all_pairs_with_either_coref_or_word = [0] actual_total = [0] rel_positions_primary = [] rel_positions_secondary = [] rel_positions_all = [] sent_lens = [] all_sent_lens = [] all_pos = [] y = [] normalized_positions_primary = [] normalized_positions_secondary = [] all_normalized_positions_primary = [] all_normalized_positions_secondary = [] normalized_positions_singles = [] normalized_positions_pairs_first = [] normalized_positions_pairs_second = [] primary_pos_duc = [] secondary_pos_duc = [] all_pos_duc = [] all_distances = [] distances_duc = [] tfidf_similarities = [] all_tfidf_similarities = [] average_mmrs = [] all_average_mmrs = [] for example_idx, example in enumerate(tqdm(example_generator, total=total)): # def process(example_idx_example): # # print '0' # example = example_idx_example if FLAGS.num_instances != -1 and example_idx >= FLAGS.num_instances: break raw_article_sents, groundtruth_similar_source_indices_list, groundtruth_summary_text, corefs, doc_indices = util.unpack_tf_example( example, names_to_types) article_sent_tokens = [util.process_sent(sent) for sent in raw_article_sents] article_text = ' '.join(raw_article_sents) groundtruth_summ_sents = [[sent.strip() for sent in groundtruth_summary_text.strip().split('\n')]] if doc_indices is None: doc_indices = [0] * len(util.flatten_list_of_lists(article_sent_tokens)) doc_indices = [int(doc_idx) for doc_idx in doc_indices] rel_sent_indices, doc_sent_indices, doc_sent_lens = preprocess_for_lambdamart_no_flags.get_rel_sent_indices(doc_indices, article_sent_tokens) groundtruth_similar_source_indices_list = util.enforce_sentence_limit(groundtruth_similar_source_indices_list, FLAGS.sentence_limit) sent_term_matrix = util.get_doc_substituted_tfidf_matrix(tfidf_vectorizer, raw_article_sents, article_text) sents_similarities = util.cosine_similarity(sent_term_matrix, sent_term_matrix) importances = util.special_squash(util.get_tfidf_importances(tfidf_vectorizer, raw_article_sents)) if FLAGS.dataset_name == 'duc_2004': first_k_indices = lambdamart_scores_to_summaries.get_indices_of_first_k_sents_of_each_article(rel_sent_indices, FLAGS.first_k) else: first_k_indices = [idx for idx in range(len(raw_article_sents))] article_indices = list(range(len(raw_article_sents))) possible_pairs = [x for x in list(itertools.combinations(article_indices, 2))] # all pairs # # # filtered_possible_pairs = preprocess_for_lambdamart_no_flags.filter_pairs_by_criteria(raw_article_sents, possible_pairs, corefs) # if FLAGS.dataset_name == 'duc_2004': # filtered_possible_pairs = [x for x in list(itertools.combinations(first_k_indices, 2))] # all pairs # else: # filtered_possible_pairs = preprocess_for_lambdamart_no_flags.filter_pairs_by_sent_position(possible_pairs) # # removed_pairs = list(set(possible_pairs) - set(filtered_possible_pairs)) # possible_singles = [(i,) for i in range(len(raw_article_sents))] # all_combinations = filtered_possible_pairs + possible_singles # # all_possible_singles += len(possible_singles) # all_possible_pairs[0] += len(possible_pairs) # all_filtered_pairs += len(filtered_possible_pairs) # all_all_combinations += len(all_combinations) # for ssi in groundtruth_similar_source_indices_list: # if len(ssi) > 0: # idx = rel_sent_indices[ssi[0]] # rel_positions_primary.append(idx) # rel_positions_all.append(idx) # if len(ssi) > 1: # idx = rel_sent_indices[ssi[1]] # rel_positions_secondary.append(idx) # rel_positions_all.append(idx) # # # # coref_pairs = preprocess_for_lambdamart_no_flags.get_coref_pairs(corefs) # # DO OVER LAP PAIRS BETTER # overlap_pairs = preprocess_for_lambdamart_no_flags.filter_by_overlap(article_sent_tokens, possible_pairs) # either_coref_or_word = list(set(list(coref_pairs) + overlap_pairs)) # # for ssi in groundtruth_similar_source_indices_list: # if len(ssi) == 2: # all_ssi_pairs[0] += 1 # do_share_coref = ssi in coref_pairs # do_share_words = ssi in overlap_pairs # if do_share_coref: # ssi_pairs_with_shared_coref[0] += 1 # if do_share_words: # ssi_pairs_with_shared_word[0] += 1 # if do_share_coref or do_share_words: # ssi_pairs_with_either_coref_or_word[0] += 1 # all_pairs_with_shared_coref[0] += len(coref_pairs) # all_pairs_with_shared_word[0] += len(overlap_pairs) # all_pairs_with_either_coref_or_word[0] += len(either_coref_or_word) if FLAGS.dataset_name == 'duc_2004': primary_pos_duc.extend([rel_sent_indices[ssi[0]] for ssi in groundtruth_similar_source_indices_list if len(ssi) >= 1]) secondary_pos_duc.extend([rel_sent_indices[ssi[1]] for ssi in groundtruth_similar_source_indices_list if len(ssi) >= 2]) all_pos_duc.extend([max([rel_sent_indices[sent_idx] for sent_idx in ssi]) for ssi in groundtruth_similar_source_indices_list if len(ssi) >= 1]) for ssi in groundtruth_similar_source_indices_list: for sent_idx in ssi: sent_lens.append(len(article_sent_tokens[sent_idx])) if len(ssi) >= 1: orig_val = ssi[0] vals_to_add = get_integral_values_for_histogram(orig_val, rel_sent_indices, doc_sent_indices, doc_sent_lens, raw_article_sents) normalized_positions_primary.extend(vals_to_add) if len(ssi) >= 2: orig_val = ssi[1] vals_to_add = get_integral_values_for_histogram(orig_val, rel_sent_indices, doc_sent_indices, doc_sent_lens, raw_article_sents) normalized_positions_secondary.extend(vals_to_add) if FLAGS.dataset_name == 'duc_2004': distances_duc.append(abs(rel_sent_indices[ssi[1]] - rel_sent_indices[ssi[0]])) tfidf_similarities.append(sents_similarities[ssi[0], ssi[1]]) average_mmrs.append((importances[ssi[0]] + importances[ssi[1]])/2) for ssi in groundtruth_similar_source_indices_list: if len(ssi) == 1: orig_val = ssi[0] vals_to_add = get_integral_values_for_histogram(orig_val, rel_sent_indices, doc_sent_indices, doc_sent_lens, raw_article_sents) normalized_positions_singles.extend(vals_to_add) if len(ssi) >= 2: if doc_sent_indices[ssi[0]] != doc_sent_indices[ssi[1]]: continue orig_val_first = min(ssi[0], ssi[1]) vals_to_add = get_integral_values_for_histogram(orig_val_first, rel_sent_indices, doc_sent_indices, doc_sent_lens, raw_article_sents) normalized_positions_pairs_first.extend(vals_to_add) orig_val_second = max(ssi[0], ssi[1]) vals_to_add = get_integral_values_for_histogram(orig_val_second, rel_sent_indices, doc_sent_indices, doc_sent_lens, raw_article_sents) normalized_positions_pairs_second.extend(vals_to_add) # all_normalized_positions_primary.extend(util.flatten_list_of_lists([get_integral_values_for_histogram(single[0], rel_sent_indices, doc_sent_indices, doc_sent_lens, raw_article_sents) for single in possible_singles])) # all_normalized_positions_secondary.extend(util.flatten_list_of_lists([get_integral_values_for_histogram(pair[1], rel_sent_indices, doc_sent_indices, doc_sent_lens, raw_article_sents) for pair in possible_pairs])) all_sent_lens.extend([len(sent) for sent in article_sent_tokens]) all_distances.extend([abs(rel_sent_indices[pair[1]] - rel_sent_indices[pair[0]]) for pair in possible_pairs]) all_tfidf_similarities.extend([sents_similarities[pair[0], pair[1]] for pair in possible_pairs]) all_average_mmrs.extend([(importances[pair[0]] + importances[pair[1]])/2 for pair in possible_pairs]) # if FLAGS.dataset_name == 'duc_2004': # rel_pos_single = [rel_sent_indices[single[0]] for single in possible_singles] # rel_pos_pair = [[rel_sent_indices[pair[0]], rel_sent_indices[pair[1]]] for pair in possible_pairs] # all_pos.extend(rel_pos_single) # all_pos.extend([max(pair) for pair in rel_pos_pair]) # else: # all_pos.extend(util.flatten_list_of_lists(possible_singles)) # all_pos.extend([max(pair) for pair in possible_pairs]) # y.extend([1 if single in groundtruth_similar_source_indices_list else 0 for single in possible_singles]) # y.extend([1 if pair in groundtruth_similar_source_indices_list else 0 for pair in possible_pairs]) # actual_total[0] += 1 # # p = Pool(144) # # list(tqdm(p.imap(process, example_generator), total=total)) # # # print 'Possible_singles\tPossible_pairs\tFiltered_pairs\tAll_combinations: \n%.2f\t%.2f\t%.2f\t%.2f' % (all_possible_singles*1./actual_total, \ # # all_possible_pairs*1./actual_total, all_filtered_pairs*1./actual_total, all_all_combinations*1./actual_total) # # # # # print 'Relative positions of groundtruth source sentences in document:\nPrimary\tSecondary\tBoth\n%.2f\t%.2f\t%.2f' % (np.mean(rel_positions_primary), np.mean(rel_positions_secondary), np.mean(rel_positions_all)) # # # # print 'SSI Pair statistics:\nShare_coref\tShare_word\tShare_either\n%.2f\t%.2f\t%.2f' \ # # % (ssi_pairs_with_shared_coref[0]*100./all_ssi_pairs[0], ssi_pairs_with_shared_word[0]*100./all_ssi_pairs[0], ssi_pairs_with_either_coref_or_word[0]*100./all_ssi_pairs[0]) # # print 'All Pair statistics:\nShare_coref\tShare_word\tShare_either\n%.2f\t%.2f\t%.2f' \ # # % (all_pairs_with_shared_coref[0]*100./all_possible_pairs[0], all_pairs_with_shared_word[0]*100./all_possible_pairs[0], all_pairs_with_either_coref_or_word[0]*100./all_possible_pairs[0]) # # # hist_all_pos = np.histogram(all_pos, bins=max(all_pos)+1) # # print 'Histogram of all sent positions: ', util.hist_as_pdf_str(hist_all_pos) # # min_sent_len = min(sent_lens) # # hist_sent_lens = np.histogram(sent_lens, bins=max(sent_lens)-min_sent_len+1) # # print 'min, max sent lens:', min_sent_len, max(sent_lens) # # print 'Histogram of sent lens: ', util.hist_as_pdf_str(hist_sent_lens) # # min_all_sent_len = min(all_sent_lens) # # hist_all_sent_lens = np.histogram(all_sent_lens, bins=max(all_sent_lens)-min_all_sent_len+1) # # print 'min, max all sent lens:', min_all_sent_len, max(all_sent_lens) # # print 'Histogram of all sent lens: ', util.hist_as_pdf_str(hist_all_sent_lens) # # # print 'Pearsons r, p value', pearsonr(all_pos, y) # # fig, ax1 = plt.subplots(nrows=1) # # plt.scatter(all_pos, y) # # pp = PdfPages(os.path.join('stuff/plots', FLAGS.dataset_name + '_position_scatter.pdf')) # # plt.savefig(pp, format='pdf',bbox_inches='tight') # # plt.show() # # pp.close() # # # if FLAGS.dataset_name == 'duc_2004': # # plot_positions(primary_pos_duc, secondary_pos_duc, all_pos_duc) # # normalized_positions_all = normalized_positions_primary + normalized_positions_secondary # # plot_histogram(normalized_positions_primary, num_bins=100) # # plot_histogram(normalized_positions_secondary, num_bins=100) # # plot_histogram(normalized_positions_all, num_bins=100) # # sent_lens_together = [sent_lens, all_sent_lens] # # plot_histogram(sent_lens_together, pdf=True, start_at_0=True, max_val=70) # # if FLAGS.dataset_name == 'duc_2004': # distances = distances_duc # sent_distances_together = [distances, all_distances] # # plot_histogram(sent_distances_together, pdf=True, start_at_0=True, max_val=100) # # tfidf_similarities_together = [tfidf_similarities, all_tfidf_similarities] # # plot_histogram(tfidf_similarities_together, pdf=True, num_bins=100) # # average_mmrs_together = [average_mmrs, all_average_mmrs] # # plot_histogram(average_mmrs_together, pdf=True, num_bins=100) # # normalized_positions_primary_together = [normalized_positions_primary, bin_values] # normalized_positions_secondary_together = [normalized_positions_secondary, bin_values] # # plot_histogram(normalized_positions_primary_together, pdf=True, num_bins=100) # # plot_histogram(normalized_positions_secondary_together, pdf=True, num_bins=100) # # # list_of_hist_pairs = [ # { # 'lst': normalized_positions_primary_together, # 'pdf': True, # 'num_bins': 100, # 'y_lim': 3.9, # 'y_label': FLAGS.dataset_name, # 'x_label': 'Sent position (primary)' # }, # { # 'lst': normalized_positions_secondary_together, # 'pdf': True, # 'num_bins': 100, # 'y_lim': 3.9, # 'x_label': 'Sent position (secondary)' # }, # { # 'lst': sent_distances_together, # 'pdf': True, # 'start_at_0': True, # 'max_val': 100, # 'x_label': 'Sent distance' # }, # { # 'lst': sent_lens_together, # 'pdf': True, # 'start_at_0': True, # 'max_val': 70, # 'x_label': 'Sent length' # }, # { # 'lst': average_mmrs_together, # 'pdf': True, # 'num_bins': 100, # 'x_label': 'Average TF-IDF importance' # } # ] normalized_positions_pairs_together = [normalized_positions_pairs_first, normalized_positions_pairs_second] list_of_hist_pairs = [ { 'lst': [normalized_positions_singles], 'pdf': True, 'num_bins': 100, # 'y_lim': 3.9, 'x_lim': 1.0, 'y_label': FLAGS.dataset_name, 'x_label': 'Sent Position (Singles)', 'legend_labels': ['Primary'] }, { 'lst': normalized_positions_pairs_together, 'pdf': True, 'num_bins': 100, # 'y_lim': 3.9, 'x_lim': 1.0, 'x_label': 'Sent Position (Pairs)', 'legend_labels': ['Primary', 'Secondary'] } ] all_lists_of_histogram_pairs.append(list_of_hist_pairs) with open(plot_data_file, 'w') as f: cPickle.dump(all_lists_of_histogram_pairs, f) else: with open(plot_data_file) as f: all_lists_of_histogram_pairs = cPickle.load(f) plot_histograms(all_lists_of_histogram_pairs) if __name__ == '__main__': app.run(main)
52.972881
240
0.598195
7957778f13badbc805416eb7a8599d292f2cae67
1,939
py
Python
aliquotmaf/annotators/hotspot.py
NCI-GDC/aliquot-maf-tools
6aec9490ab7194ec605bf02c4c8e7c1cfca53973
[ "Apache-2.0" ]
1
2020-09-18T17:52:37.000Z
2020-09-18T17:52:37.000Z
aliquotmaf/annotators/hotspot.py
NCI-GDC/aliquot-maf-tools
6aec9490ab7194ec605bf02c4c8e7c1cfca53973
[ "Apache-2.0" ]
null
null
null
aliquotmaf/annotators/hotspot.py
NCI-GDC/aliquot-maf-tools
6aec9490ab7194ec605bf02c4c8e7c1cfca53973
[ "Apache-2.0" ]
1
2020-08-14T08:49:39.000Z
2020-08-14T08:49:39.000Z
""" Implements the hotspots annotation. """ from __future__ import absolute_import from aliquotmaf.converters.builder import get_builder from .annotator import Annotator class Hotspot(Annotator): def __init__(self, source, scheme, data): super().__init__(name="Hotspot", source=source, scheme=scheme) self.data = data @classmethod def setup(cls, scheme, source): # load the hotspots hsdic = {} head = [] count = 0 with open(source, "rt") as fh: for line in fh: if not head: head = line.rstrip("\r\n").lower().split("\t") assert all( [i in head for i in ["hugo_symbol", "change", "type"]] ), cls.logger.error("Unexpected header {0} found!".format(head)) else: dat = dict(zip(head, line.rstrip("\r\n").split("\t"))) if dat["hugo_symbol"] not in hsdic: hsdic[dat["hugo_symbol"]] = {} hsdic[dat["hugo_symbol"]][dat["change"]] = dat["type"] count += 1 curr = cls(source, scheme, hsdic) curr.logger.info("Loaded {0} hotspots".format(count)) return curr def annotate(self, maf_record): gene = maf_record["Hugo_Symbol"].value mval = "N" if gene in self.data: hgvsp = ( None if not maf_record["HGVSp_Short"].value else maf_record["HGVSp_Short"].value.lstrip("p.") ) if hgvsp and "fs*" in hgvsp: idx = hgvsp.index("fs") hgvsp = hgvsp[: idx - 1] + "fs" if hgvsp and hgvsp in self.data[gene]: mval = "Y" maf_record["hotspot"] = get_builder("hotspot", self.scheme, value=mval) return maf_record def shutdown(self): pass
32.864407
84
0.513667
7957787f8415e186326ba6f325359195d25e97ac
3,688
py
Python
source/setup.py
ph4s3r/webhook-shims
077ab606800612d1cfd048731264ad4cf91bfba6
[ "Apache-2.0" ]
null
null
null
source/setup.py
ph4s3r/webhook-shims
077ab606800612d1cfd048731264ad4cf91bfba6
[ "Apache-2.0" ]
null
null
null
source/setup.py
ph4s3r/webhook-shims
077ab606800612d1cfd048731264ad4cf91bfba6
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python import os, sys from setuptools import setup, find_packages from setuptools.command.test import test as TestCommand #from pip.req import parse_requirements from pip._internal.req import parse_requirements def parse_requirements(filename): """ load requirements from a pip requirements file """ lineiter = (line.strip() for line in open(filename)) return [line for line in lineiter if line and not line.startswith("#")] try: from loginsightwebhookdemo import __version__ as loginsightwebhookdemoversion # TODO Replace with a static variant? except ImportError: loginsightwebhookdemoversion = "0.dev0" # Hack from https://stackoverflow.com/questions/14399534/how-can-i-reference-requirements-txt-for-the-install-requires-kwarg-in-setuptool # parse_requirements() returns generator of pip.req.InstallRequirement objects try: if os.environ['PYTHONPATH']: HDIR = os.environ['PYTHONPATH'] except: try: if os.environ['TRAVIS_BUILD_DIR']: HDIR = os.environ['TRAVIS_BUILD_DIR'] except: HDIR = '.' #install_reqs = parse_requirements(HDIR + '/requirements.txt', session='hack') #test_reqs = parse_requirements(HDIR + '/test-requirements.txt', session='hack') # reqs is a list of requirement # e.g. ['django==1.5.1', 'mezzanine==1.4.6'] reqs = parse_requirements(HDIR + '/requirements.txt') treqs = parse_requirements(HDIR + '/test-requirements.txt') class PyTest(TestCommand): user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")] description = "Run tests in the current environment" def initialize_options(self): TestCommand.initialize_options(self) self.args = [] def run(self): import shlex # import here, cause outside the eggs aren't loaded import pytest try: args = shlex.split(self.args) except AttributeError: args = [] errno = pytest.main(args) sys.exit(errno) class ToxTest(TestCommand): user_options = [('tox-args=', "t", "Arguments to pass to pytest")] description = "Run tests in all configured tox environments" def initialize_options(self): TestCommand.initialize_options(self) self.args = [] def run(self): import shlex # import here, cause outside the eggs aren't loaded from tox.__main__ import main try: args = shlex.split(self.args) except AttributeError: args = [] errno = main(args) sys.exit(errno) setup( name='loginsightwebhookdemo', version=loginsightwebhookdemoversion, url='http://github.com/vmw-loginsight/loginsightwebhookdemo/', license='Apache Software License 2.0', author='Steve Flanders', install_requires=reqs, tests_require=treqs, description='VMware vRealize Log Insight Webhook Shim', author_email='stevefl@vmware.com', long_description=open('README.rst').read(), packages=find_packages(), platforms='any', classifiers=[ 'Programming Language :: Python :: 2.7', 'Development Status :: 1 - Planning', 'Natural Language :: English', 'Intended Audience :: Information Technology', 'Intended Audience :: System Administrators', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules', ], entry_points={ 'console_scripts': [ 'li = loginsightwebhookdemo.__init__:main' ] }, cmdclass={'test': PyTest, 'tox': ToxTest} )
32.928571
137
0.667299
795778aa6255d5a769444ef20c68fd4c031e7192
5,749
py
Python
sc2players/playerManagement.py
ttinies/sc2players
fd9b37c268bf1005d9ef73a25e65ed97c8b7895f
[ "Apache-2.0" ]
3
2018-06-16T02:47:59.000Z
2019-12-24T04:51:11.000Z
sc2players/playerManagement.py
ttinies/sc2players
fd9b37c268bf1005d9ef73a25e65ed97c8b7895f
[ "Apache-2.0" ]
2
2018-07-09T05:44:22.000Z
2018-07-13T05:31:17.000Z
sc2players/playerManagement.py
ttinies/sc2players
fd9b37c268bf1005d9ef73a25e65ed97c8b7895f
[ "Apache-2.0" ]
null
null
null
""" PURPOSE: manage records of all known players, both local and remote """ from __future__ import absolute_import from __future__ import division # python 2/3 compatibility from __future__ import print_function # python 2/3 compatibility from six import iteritems, itervalues # python 2/3 compatibility import glob import os import re import time from sc2players import constants as c from sc2players.playerRecord import PlayerRecord from sc2players.playerPreGame import PlayerPreGame ################################################################################ playerCache = {} # mapping of player names to PlayerRecord objects ################################################################################ def addPlayer(settings): """define a new PlayerRecord setting and save to disk file""" _validate(settings) player = PlayerRecord(settings) player.save() getKnownPlayers()[player.name] = player return player ################################################################################ def updatePlayer(name, settings): """update an existing PlayerRecord setting and save to disk file""" player = delPlayer(name) # remove the existing record _validate(settings) player.update(settings) player.save() getKnownPlayers()[player.name] = player return player ################################################################################ def getPlayer(name): """obtain a specific PlayerRecord settings file""" if isinstance(name, PlayerRecord): return name try: return getKnownPlayers()[name.lower()] except KeyError: raise ValueError("given player name '%s' is not a known player definition"%(name)) ################################################################################ def delPlayer(name): """forget about a previously defined PlayerRecord setting by deleting its disk file""" player = getPlayer(name) try: os.remove(player.filename) # delete from disk except IOError: pass # shouldn't happen, but don't crash if the disk data doesn't exist try: del getKnownPlayers()[player.name] # forget object from cache except: pass return player # leave it to the caller to process further or allow deallocation ################################################################################ def buildPlayer(name, ptype, cmd='', options={}, difficulty=None, rating=None, race=None, obs=False, pid=0, raceDefault=c.RANDOM): newRating = rating or c.DEFAULT_RATING if not isinstance(difficulty, c.ComputerDifficulties): newDiff = c.ComputerDifficulties(difficulty) else: newDiff = difficulty ret = PlayerRecord(name=name, type=ptype, initCmd=cmd, initOptions=options, difficulty=newDiff, rating=newRating, raceDefault=raceDefault) if bool(race or obs or pid): return PlayerPreGame(ret, selectedRace=race, observe=obs, playerID=pid) else: return ret ################################################################################ def getKnownPlayers(reset=False): """identify all of the currently defined players""" global playerCache if not playerCache or reset: jsonFiles = os.path.join(c.PLAYERS_FOLDER, "*.json") for playerFilepath in glob.glob(jsonFiles): filename = os.path.basename(playerFilepath) name = re.sub("^player_", "", filename) name = re.sub("\.json$", "", name) player = PlayerRecord(name) playerCache[player.name] = player return playerCache ################################################################################ def getBlizzBotPlayers(): """identify all of Blizzard's built-in bots""" ret = {} for pName,p in iteritems(getKnownPlayers()): if p.isComputer: ret[pName] = p return ret ################################################################################ def getStaleRecords(limit=c.DEFAULT_TIME_LIMIT): ret = [] now = time.time() seconds = float(limit) * 24 * 60 * 60 # convert days to seconds maxNoAct= min(seconds, c.NO_ACTIVITY_LIMIT * 24 * 60 * 60) # convert days to seconds for player in itervalues(getKnownPlayers()): if player.matches: # can only determine time since last match if matches exist sinceLastMatch, match = sorted( # player's last match is the shortest time since now [(now - m.endTime, m) for m in player.matches])[0] if sinceLastMatch > seconds: ret.append(player) else: # if no matches, verify player's time since creation for sufficient time to play a match sinceCreation = now - player.created if sinceCreation > maxNoAct: # players created > 10 days ago without any recorded matches are identifed as stale ret.append(player) return ret ################################################################################ def removeStaleRecords(**kwargs): """identify all currently stale records and remove them""" return [delPlayer(record) for record in getStaleRecords(**kwargs)] ################################################################################ def _validate(settings): if "created" in settings: raise ValueError("parameter 'created' is expected to be automatmically generated.") if "_matches" in settings: raise ValueError("matches are declared after playing matches, not during init.") ################################################################################ __all__ = ["addPlayer", "getPlayer", "delPlayer", "getKnownPlayers", "getBlizzBotPlayers", "updatePlayer", "getStaleRecords", "removeStaleRecords"]
41.064286
124
0.573317
795778ad5b579edaddca6b50a3bf0dd8d8e97254
21,010
py
Python
zentral/contrib/monolith/forms.py
janheise/zentral
cd809483573301e7d1aa5d3fc2da2c74a62405ab
[ "Apache-2.0" ]
null
null
null
zentral/contrib/monolith/forms.py
janheise/zentral
cd809483573301e7d1aa5d3fc2da2c74a62405ab
[ "Apache-2.0" ]
null
null
null
zentral/contrib/monolith/forms.py
janheise/zentral
cd809483573301e7d1aa5d3fc2da2c74a62405ab
[ "Apache-2.0" ]
null
null
null
from django import forms from django.db import IntegrityError, transaction from django.db.models import F, Max, Q from zentral.contrib.inventory.models import MetaBusinessUnit, Tag from .attachments import MobileconfigFile, PackageFile from .exceptions import AttachmentError from .models import (Catalog, Enrollment, Manifest, ManifestCatalog, ManifestSubManifest, Printer, PrinterPPD, PkgInfoName, SubManifest, SubManifestPkgInfo, SubManifestAttachment) from .ppd import get_ppd_information class PkgInfoSearchForm(forms.Form): name = forms.CharField(label="Name", required=False, widget=forms.TextInput(attrs={"placeholder": "name"})) catalog = forms.ModelChoiceField(queryset=Catalog.objects.filter(archived_at__isnull=True), required=False) def is_initial(self): return not {k: v for k, v in self.cleaned_data.items() if v} class ManifestForm(forms.ModelForm): class Meta: model = Manifest fields = ('meta_business_unit', 'name') def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.instance.pk: self.fields["meta_business_unit"].widget = forms.HiddenInput() self.fields['meta_business_unit'].queryset = MetaBusinessUnit.objects.available_for_api_enrollment() class ManifestSearchForm(forms.Form): name = forms.CharField(label="Name", required=False, widget=forms.TextInput(attrs={"autofocus": "true", "size": 32, "placeholder": "Name or business unit name"})) def get_queryset(self): qs = Manifest.objects.select_related("meta_business_unit").all() name = self.cleaned_data.get("name") if name: qs = qs.filter(Q(name__icontains=name) | Q(meta_business_unit__name__icontains=name)) return qs class SubManifestSearchForm(forms.Form): keywords = forms.CharField(label="Keywords", required=False, widget=forms.TextInput(attrs={"placeholder": "Keywords…"})) def get_queryset(self): qs = SubManifest.objects.select_related("meta_business_unit").all() keywords = self.cleaned_data.get("keywords") if keywords: qs = qs.distinct().filter(Q(name__icontains=keywords) | Q(description__icontains=keywords) | Q(meta_business_unit__name__icontains=keywords) | Q(submanifestpkginfo__pkg_info_name__name__icontains=keywords) | Q(submanifestattachment__name__icontains=keywords)) return qs class SubManifestForm(forms.ModelForm): class Meta: model = SubManifest fields = ('meta_business_unit', 'name', 'description') def clean_meta_business_unit(self): mbu = self.cleaned_data.get("meta_business_unit") if mbu and self.instance.pk: linked_mbu = {manifest.meta_business_unit for _, manifest in self.instance.manifests_with_tags()} if linked_mbu - {mbu}: raise forms.ValidationError( "Cannot restrict this sub manifest to this business unit. " "It is already included in some other business units." ) return mbu def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['meta_business_unit'].queryset = MetaBusinessUnit.objects.available_for_api_enrollment() class SubManifestPkgInfoForm(forms.ModelForm): excluded_tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), required=False, widget=forms.SelectMultiple(attrs={"class": "hide-if-not-install"})) default_shard = forms.IntegerField(min_value=0, max_value=1000, required=False, initial=100, widget=forms.TextInput(attrs={"class": "hide-if-not-install"})) shard_modulo = forms.IntegerField(min_value=1, max_value=1000, required=False, initial=100, widget=forms.TextInput(attrs={"class": "hide-if-not-install"})) def __init__(self, *args, **kwargs): self.sub_manifest = kwargs.pop('sub_manifest', None) super().__init__(*args, **kwargs) if self.instance.pk: self.sub_manifest = self.instance.sub_manifest # pin qs pin_qs = PkgInfoName.objects.distinct().filter(pkginfo__id__isnull=False, pkginfo__archived_at__isnull=True, pkginfo__update_for=None) if not self.instance.pk: pin_qs = pin_qs.exclude(submanifestpkginfo__sub_manifest=self.sub_manifest) self.fields['pkg_info_name'].queryset = pin_qs if self.instance.pk: self.fields["excluded_tags"].initial = [tag.pk for tag in self.instance.excluded_tags] self.fields["default_shard"].initial = self.instance.default_shard self.fields["shard_modulo"].initial = self.instance.shard_modulo self.fields["pkg_info_name"].widget = forms.HiddenInput() # tag qs tag_qs = Tag.objects.select_related("meta_business_unit", "taxonomy").all() if self.sub_manifest.meta_business_unit: tag_qs = tag_qs.filter( Q(meta_business_unit__isnull=True) | Q(meta_business_unit=self.sub_manifest.meta_business_unit) ) self.fields['excluded_tags'].queryset = tag_qs # tags shards self.tag_shards = [] existing_tag_shard_dict = {} if self.instance.pk: existing_tag_shard_dict = dict(self.instance.tag_shards) for tag in tag_qs: self.tag_shards.append( (tag, tag in existing_tag_shard_dict, existing_tag_shard_dict.get(tag, self.instance.shard_modulo)) ) self.tag_shards.sort(key=lambda t: t[0].name.lower()) def clean(self): super().clean() default_shard = self.cleaned_data.get("default_shard") shard_modulo = self.cleaned_data.get("shard_modulo") if default_shard and shard_modulo and shard_modulo < default_shard: self.add_error("default_shard", "Must be less than or equal to the shard modulo") # options options = {} if self.cleaned_data.get("key") in ("managed_installs", "optional_installs"): excluded_tags = self.cleaned_data.get("excluded_tags") if excluded_tags: options["excluded_tags"] = [tag.name for tag in excluded_tags] if default_shard is not None: options.setdefault("shards", {})["default"] = default_shard if shard_modulo is not None: options.setdefault("shards", {})["modulo"] = shard_modulo tag_shards = {} for tag, _, _ in self.tag_shards: try: shard = int(self.data[f"tag-shard-{tag.pk}"]) except Exception: continue if isinstance(shard_modulo, int): shard = min(shard, shard_modulo) tag_shards[tag.name] = shard if tag_shards: options.setdefault("shards", {})["tags"] = tag_shards self.instance.options = options class Meta: model = SubManifestPkgInfo fields = ('pkg_info_name', 'key', 'condition', 'featured_item') class SubManifestAttachmentForm(forms.ModelForm): def __init__(self, *args, **kwargs): self.sub_manifest = kwargs.pop('sub_manifest') super().__init__(*args, **kwargs) class Meta: model = SubManifestAttachment fields = ('key', 'condition', 'featured_item', 'file',) def clean_file(self): f = self.cleaned_data["file"] if not f: raise forms.ValidationError("You need to select a file.") error_messages = [] for file_class in (MobileconfigFile, PackageFile): try: af = file_class(f) except AttachmentError as e: error_messages.append(e.message) else: break else: raise forms.ValidationError(", ".join(error_messages)) self.attachment_file = af return f def save(self, *args, **kwargs): sma = super().save(commit=False) sma.sub_manifest = self.sub_manifest sma.type = self.attachment_file.type sma.name = self.attachment_file.name sma.identifier = self.attachment_file.identifier for i in range(10): # 10 trials max max_version = SubManifestAttachment.objects.filter( sub_manifest=self.sub_manifest, name=sma.name ).aggregate(Max("version"))["version__max"] sma.version = (max_version or 0) + 1 sma.pkg_info = self.attachment_file.make_package_info(sma) try: with transaction.atomic(): sma.save() except IntegrityError: raise else: break else: raise Exception("Could not find valid version #") # trash other versions for sma_with_different_version in (SubManifestAttachment.objects.filter( sub_manifest=self.sub_manifest, name=sma.name ).exclude(version=sma.version)): sma_with_different_version.mark_as_trashed() return sma class SubManifestScriptForm(forms.Form): DEFAULT_INSTALL_CHECK_SCRIPT = ( "#!/bin/bash\n\n" "# WARNING: executed at every Munki run!\n\n" "exit 0" ) name = forms.CharField(max_length=256, required=True) key = forms.ChoiceField(choices=(("managed_installs", "Managed Installs"), ("managed_uninstalls", "Managed Uninstalls")), required=True) description = forms.CharField(required=True, widget=forms.Textarea()) installcheck_script = forms.CharField( label="install check script", help_text="This script is executed to determine if an item needs to be installed. " "A return code of 0 means install is needed.", required=True, initial=DEFAULT_INSTALL_CHECK_SCRIPT, widget=forms.Textarea(), ) postinstall_script = forms.CharField( label="post install script", help_text="The main script.", required=True, widget=forms.Textarea(), ) uninstall_script = forms.CharField( label="uninstall script", help_text="Script that performs an uninstall.", required=False, widget=forms.Textarea(), ) def __init__(self, *args, **kwargs): self.sub_manifest = kwargs.pop('sub_manifest') self.script = kwargs.pop('script', None) super().__init__(*args, **kwargs) def clean(self): super().clean() key = self.cleaned_data["key"] if key == "managed_uninstalls" and not self.cleaned_data["uninstall_script"]: self.add_error("uninstall_script", "Can't be empty if managed uninstalls") return self.cleaned_data def save(self, *args, **kwargs): name = self.cleaned_data["name"] key = self.cleaned_data["key"] pkg_info = { 'display_name': name, 'description': self.cleaned_data["description"], 'autoremove': False, 'unattended_install': True, 'installer_type': 'nopkg', 'uninstallable': True, 'unattended_uninstall': True, 'minimum_munki_version': '2.2', 'minimum_os_version': '10.6.0', # TODO: HARDCODED !!! 'installcheck_script': self.cleaned_data["installcheck_script"], 'postinstall_script': self.cleaned_data["postinstall_script"], } uninstall_script = self.cleaned_data["uninstall_script"] if uninstall_script: pkg_info["uninstall_method"] = "uninstall_script" pkg_info["uninstall_script"] = uninstall_script if not self.script: self.script = SubManifestAttachment( sub_manifest=self.sub_manifest, type="script", key=key, name=name, pkg_info=pkg_info, version=1, ) self.script.save() else: self.script.name = name self.script.key = key self.script.version = F("version") + 1 self.script.pkg_info = pkg_info self.script.save() self.script.refresh_from_db() self.script.pkg_info["version"] = "{}.0".format(self.script.version) self.script.save() return self.script class AddManifestCatalogForm(forms.Form): catalog = forms.ModelChoiceField(queryset=Catalog.objects.filter(archived_at__isnull=True)) tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.none(), required=False) def __init__(self, *args, **kwargs): self.manifest = kwargs.pop('manifest') super().__init__(*args, **kwargs) field = self.fields['catalog'] field.queryset = field.queryset.exclude(id__in=[mc.catalog_id for mc in self.manifest.manifestcatalog_set.all()]) field = self.fields['tags'] field.queryset = Tag.objects.available_for_meta_business_unit(self.manifest.meta_business_unit) def save(self): mc = ManifestCatalog(manifest=self.manifest, catalog=self.cleaned_data['catalog']) mc.save() mc.tags.set(self.cleaned_data['tags']) return mc class EditManifestCatalogForm(forms.Form): tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.none(), required=False) def __init__(self, *args, **kwargs): self.manifest = kwargs.pop('manifest') self.mc = ManifestCatalog.objects.get(manifest=self.manifest, catalog=kwargs.pop("catalog")) super().__init__(*args, **kwargs) field = self.fields['tags'] field.queryset = Tag.objects.available_for_meta_business_unit(self.manifest.meta_business_unit) field.initial = self.mc.tags.all() def save(self): self.mc.tags.set(self.cleaned_data['tags']) return self.mc class DeleteManifestCatalogForm(forms.Form): catalog = forms.ModelChoiceField(queryset=Catalog.objects.all(), widget=forms.HiddenInput) def __init__(self, *args, **kwargs): self.manifest = kwargs.pop('manifest') super().__init__(*args, **kwargs) field = self.fields['catalog'] field.queryset = field.queryset.filter(id__in=[mc.catalog_id for mc in self.manifest.manifestcatalog_set.all()]) def save(self): ManifestCatalog.objects.filter(manifest=self.manifest, catalog=self.cleaned_data['catalog']).delete() class AddManifestEnrollmentPackageForm(forms.Form): tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.none(), required=False) def __init__(self, *args, **kwargs): self.manifest = kwargs.pop('manifest') super().__init__(*args, **kwargs) field = self.fields['tags'] field.queryset = Tag.objects.available_for_meta_business_unit(self.manifest.meta_business_unit) class ManifestPrinterForm(forms.ModelForm): def __init__(self, *args, **kwargs): self.manifest = kwargs.pop('manifest') super().__init__(*args, **kwargs) field = self.fields['tags'] field.queryset = Tag.objects.available_for_meta_business_unit(self.manifest.meta_business_unit) class Meta: model = Printer fields = ["tags", "name", "location", "scheme", "address", "shared", "error_policy", "ppd", "required_package"] class AddManifestSubManifestForm(forms.Form): sub_manifest = forms.ModelChoiceField(queryset=SubManifest.objects.all()) tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.none(), required=False) def __init__(self, *args, **kwargs): self.manifest = kwargs.pop('manifest') super().__init__(*args, **kwargs) field = self.fields['sub_manifest'] field.queryset = (field.queryset.filter(Q(meta_business_unit__isnull=True) | Q(meta_business_unit=self.manifest.meta_business_unit)) .exclude(id__in=[sm.id for sm in self.manifest.sub_manifests()])) field = self.fields['tags'] field.queryset = Tag.objects.available_for_meta_business_unit(self.manifest.meta_business_unit) def save(self): msn = ManifestSubManifest(manifest=self.manifest, sub_manifest=self.cleaned_data['sub_manifest']) msn.save() msn.tags.set(self.cleaned_data['tags']) self.manifest.save() # updated_at return msn class EditManifestSubManifestForm(forms.Form): tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.none(), required=False) def __init__(self, *args, **kwargs): self.manifest = kwargs.pop('manifest') self.msm = ManifestSubManifest.objects.get(manifest=self.manifest, sub_manifest=kwargs.pop("sub_manifest")) super().__init__(*args, **kwargs) field = self.fields['tags'] field.queryset = Tag.objects.available_for_meta_business_unit(self.manifest.meta_business_unit) field.initial = self.msm.tags.all() def save(self): self.msm.tags.set(self.cleaned_data['tags']) return self.msm class DeleteManifestSubManifestForm(forms.Form): sub_manifest = forms.ModelChoiceField(queryset=SubManifest.objects.all(), widget=forms.HiddenInput) def __init__(self, *args, **kwargs): self.manifest = kwargs.pop('manifest') super().__init__(*args, **kwargs) field = self.fields['sub_manifest'] field.queryset = field.queryset.filter(id__in=[msm.sub_manifest_id for msm in self.manifest.manifestsubmanifest_set.all()]) def save(self): number_deleted, _ = ManifestSubManifest.objects.filter(manifest=self.manifest, sub_manifest=self.cleaned_data['sub_manifest']).delete() if number_deleted: self.manifest.save() # updated_at class UploadPPDForm(forms.ModelForm): class Meta: model = PrinterPPD fields = ['file'] def clean_file(self): f = self.cleaned_data["file"] try: self.cleaned_data["ppd_info"] = get_ppd_information(f) except Exception: raise forms.ValidationError("Could not parse PPD file %s." % f.name) return f def save(self, *args, **kwargs): ppd = PrinterPPD.objects.create(**self.cleaned_data["ppd_info"]) uploaded_file = self.cleaned_data["file"] ppd.file.save(uploaded_file.name, uploaded_file) return ppd class EnrollmentForm(forms.ModelForm): class Meta: model = Enrollment fields = "__all__" def __init__(self, *args, **kwargs): self.meta_business_unit = kwargs.pop("meta_business_unit", None) self.manifest = kwargs.pop("manifest", None) assert(self.manifest is None or self.meta_business_unit is None) self.standalone = kwargs.pop("standalone", False) super().__init__(*args, **kwargs) # hide manifest dropdown if manifest is fixed # the value will be set in the clean_manifest method # TODO: kind of a hack if self.manifest: self.fields["manifest"].widget = forms.HiddenInput() self.fields["manifest"].required = False def clean_manifest(self): if self.manifest: return self.manifest else: return self.cleaned_data.get("manifest") def clean(self): cleaned_data = super().clean() if self.meta_business_unit: manifest = cleaned_data.get("manifest") if manifest and manifest.meta_business_unit != self.meta_business_unit: raise forms.ValidationError("Manifest business unit != meta business unit") return cleaned_data
42.02
119
0.606188
795778ca60c3d326796c3934baeacea2e6b807e7
8,713
py
Python
sdk/python/pulumi_azure_nextgen/documentdb/v20191212/sql_resource_sql_stored_procedure.py
pulumi/pulumi-azure-nextgen
452736b0a1cf584c2d4c04666e017af6e9b2c15c
[ "Apache-2.0" ]
31
2020-09-21T09:41:01.000Z
2021-02-26T13:21:59.000Z
sdk/python/pulumi_azure_nextgen/documentdb/v20191212/sql_resource_sql_stored_procedure.py
pulumi/pulumi-azure-nextgen
452736b0a1cf584c2d4c04666e017af6e9b2c15c
[ "Apache-2.0" ]
231
2020-09-21T09:38:45.000Z
2021-03-01T11:16:03.000Z
sdk/python/pulumi_azure_nextgen/documentdb/v20191212/sql_resource_sql_stored_procedure.py
pulumi/pulumi-azure-nextgen
452736b0a1cf584c2d4c04666e017af6e9b2c15c
[ "Apache-2.0" ]
4
2020-09-29T14:14:59.000Z
2021-02-10T20:38:16.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs from ._inputs import * __all__ = ['SqlResourceSqlStoredProcedure'] class SqlResourceSqlStoredProcedure(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, account_name: Optional[pulumi.Input[str]] = None, container_name: Optional[pulumi.Input[str]] = None, database_name: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, options: Optional[pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']]] = None, resource: Optional[pulumi.Input[pulumi.InputType['SqlStoredProcedureResourceArgs']]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, stored_procedure_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None, __name__=None, __opts__=None): """ An Azure Cosmos DB storedProcedure. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] account_name: Cosmos DB database account name. :param pulumi.Input[str] container_name: Cosmos DB container name. :param pulumi.Input[str] database_name: Cosmos DB database name. :param pulumi.Input[str] location: The location of the resource group to which the resource belongs. :param pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request. :param pulumi.Input[pulumi.InputType['SqlStoredProcedureResourceArgs']] resource: The standard JSON format of a storedProcedure :param pulumi.Input[str] resource_group_name: Name of an Azure resource group. :param pulumi.Input[str] stored_procedure_name: Cosmos DB storedProcedure name. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB". """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if account_name is None and not opts.urn: raise TypeError("Missing required property 'account_name'") __props__['account_name'] = account_name if container_name is None and not opts.urn: raise TypeError("Missing required property 'container_name'") __props__['container_name'] = container_name if database_name is None and not opts.urn: raise TypeError("Missing required property 'database_name'") __props__['database_name'] = database_name __props__['location'] = location if options is None and not opts.urn: raise TypeError("Missing required property 'options'") __props__['options'] = options if resource is None and not opts.urn: raise TypeError("Missing required property 'resource'") __props__['resource'] = resource if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['stored_procedure_name'] = stored_procedure_name __props__['tags'] = tags __props__['name'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-nextgen:documentdb/latest:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200301:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200401:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200601preview:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200901:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:SqlResourceSqlStoredProcedure")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(SqlResourceSqlStoredProcedure, __self__).__init__( 'azure-nextgen:documentdb/v20191212:SqlResourceSqlStoredProcedure', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'SqlResourceSqlStoredProcedure': """ Get an existing SqlResourceSqlStoredProcedure resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return SqlResourceSqlStoredProcedure(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: """ The location of the resource group to which the resource belongs. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the ARM resource. """ return pulumi.get(self, "name") @property @pulumi.getter def resource(self) -> pulumi.Output[Optional['outputs.SqlStoredProcedureGetPropertiesResponseResource']]: return pulumi.get(self, "resource") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB". """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ The type of Azure resource. """ return pulumi.get(self, "type") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
55.852564
751
0.682888
79577a601a3ff20754dd317ff6442be7f7b5f719
7,492
py
Python
docs/advanced/abf-file-format/old/2017-10-07 header struct formatting/v03.py
ChristianKeine/pyABF
38e0a68e3f021299a285e44a66a9edac57c9bedd
[ "MIT" ]
null
null
null
docs/advanced/abf-file-format/old/2017-10-07 header struct formatting/v03.py
ChristianKeine/pyABF
38e0a68e3f021299a285e44a66a9edac57c9bedd
[ "MIT" ]
null
null
null
docs/advanced/abf-file-format/old/2017-10-07 header struct formatting/v03.py
ChristianKeine/pyABF
38e0a68e3f021299a285e44a66a9edac57c9bedd
[ "MIT" ]
null
null
null
"""Minimal-case demonstration how to read an ABF2 header and data. https://github.com/swharden/pyABF/""" import os import struct STRUCTS_HEADER="""fFileSignature_4s,fFileVersionNumber_4b,uFileInfoSize_I,lActualEpisodes_I,uFileStartDate_I, uFileStartTimeMS_I,uStopwatchTime_I,nFileType_H,nDataFormat_H,nSimultaneousScan_H,nCRCEnable_H,uFileCRC_I, FileGUID_I,unknown1_I,unknown2_I,unknown3_I,uCreatorVersion_I,uCreatorNameIndex_I,uModifierVersion_I, uModifierNameIndex_I,uProtocolPathIndex_I""" STRUCTS_SECTIONS="""ProtocolSection_IIl,ADCSection_IIl,DACSection_IIl,EpochSection_IIl,ADCPerDACSection_IIl, EpochPerDACSection_IIl,UserListSection_IIl,StatsRegionSection_IIl,MathSection_IIl,StringsSection_IIl, DataSection_IIl,TagSection_IIl,ScopeSection_IIl,DeltaSection_IIl,VoiceTagSection_IIl,SynchArraySection_IIl, AnnotationSection_IIl,StatsSection_IIl""" STRUCTS_SEC_PROTO="""nOperationMode_h,fADCSequenceInterval_f,bEnableFileCompression_b,sUnused_3s, uFileCompressionRatio_I,fSynchTimeUnit_f,fSecondsPerRun_f,lNumSamplesPerEpisode_i,lPreTriggerSamples_i, lEpisodesPerRun_i,lRunsPerTrial_i,lNumberOfTrials_i,nAveragingMode_h,nUndoRunCount_h,nFirstEpisodeInRun_h, fTriggerThreshold_f,nTriggerSource_h,nTriggerAction_h,nTriggerPolarity_h,fScopeOutputInterval_f, fEpisodeStartToStart_f,fRunStartToStart_f,lAverageCount_i,fTrialStartToStart_f,nAutoTriggerStrategy_h, fFirstRunDelayS_f,nChannelStatsStrategy_h,lSamplesPerTrace_i,lStartDisplayNum_i,lFinishDisplayNum_i, nShowPNRawData_h,fStatisticsPeriod_f,lStatisticsMeasurements_i,nStatisticsSaveStrategy_h,fADCRange_f, fDACRange_f,lADCResolution_i,lDACResolution_i,nExperimentType_h,nManualInfoStrategy_h,nCommentsEnable_h, lFileCommentIndex_i,nAutoAnalyseEnable_h,nSignalType_h,nDigitalEnable_h,nActiveDACChannel_h, nDigitalHolding_h,nDigitalInterEpisode_h,nDigitalDACChannel_h,nDigitalTrainActiveLogic_h,nStatsEnable_h, nStatisticsClearStrategy_h,nLevelHysteresis_h,lTimeHysteresis_i,nAllowExternalTags_h,nAverageAlgorithm_h, fAverageWeighting_f,nUndoPromptStrategy_h,nTrialTriggerSource_h,nStatisticsDisplayStrategy_h, nExternalTagType_h,nScopeTriggerOut_h,nLTPType_h,nAlternateDACOutputState_h,nAlternateDigitalOutputState_h, fCellID_3f,nDigitizerADCs_h,nDigitizerDACs_h,nDigitizerTotalDigitalOuts_h,nDigitizerSynchDigitalOuts_h, nDigitizerType_h""" STRUCTS_SEC_ADC="""nADCNum_h,nTelegraphEnable_h,nTelegraphInstrument_h,fTelegraphAdditGain_f, fTelegraphFilter_f,fTelegraphMembraneCap_f,nTelegraphMode_h,fTelegraphAccessResistance_f,nADCPtoLChannelMap_h, nADCSamplingSeq_h,fADCProgrammableGain_f,fADCDisplayAmplification_f,fADCDisplayOffset_f, fInstrumentScaleFactor_f,fInstrumentOffset_f,fSignalGain_f,fSignalOffset_f,fSignalLowpassFilter_f, fSignalHighpassFilter_f,nLowpassFilterType_b,nHighpassFilterType_b,fPostProcessLowpassFilter_f, nPostProcessLowpassFilterType_c,bEnabledDuringPN_b,nStatsChannelPolarity_h,lADCChannelNameIndex_i, lADCUnitsIndex_i""" STRUCTS_SEC_DAC="""nDACNum_h,nTelegraphDACScaleFactorEnable_h,fInstrumentHoldingLevel_f,fDACScaleFactor_f, fDACHoldingLevel_f,fDACCalibrationFactor_f,fDACCalibrationOffset_f,lDACChannelNameIndex_i, lDACChannelUnitsIndex_i,lDACFilePtr_i,lDACFileNumEpisodes_i,nWaveformEnable_h,nWaveformSource_h, nInterEpisodeLevel_h,fDACFileScale_f,fDACFileOffset_f,lDACFileEpisodeNum_i,nDACFileADCNum_h,nConditEnable_h, lConditNumPulses_i,fBaselineDuration_f,fBaselineLevel_f,fStepDuration_f,fStepLevel_f,fPostTrainPeriod_f, fPostTrainLevel_f,nMembTestEnable_h,nLeakSubtractType_h,nPNPolarity_h,fPNHoldingLevel_f,nPNNumADCChannels_h, nPNPosition_h,nPNNumPulses_h,fPNSettlingTime_f,fPNInterpulse_f,nLTPUsageOfDAC_h,nLTPPresynapticPulses_h, lDACFilePathIndex_i,fMembTestPreSettlingTimeMS_f,fMembTestPostSettlingTimeMS_f,nLeakSubtractADCIndex_h""" STRUCTS_SEC_EPOCH_PER_DAC="""nEpochNum_h,nDACNum_h,nEpochType_h,fEpochInitLevel_f,fEpochLevelInc_f, lEpochInitDuration_i,lEpochDurationInc_i,lEpochPulsePeriod_i,lEpochPulseWidth_i""" STRUCTS_SEC_EPOCH_DIG="""nEpochNum_h,nEpochDigitalOutput_h""" STRUCTS_SEC_TAGS="""lTagTime_i,sComment_56s,nTagType_h,nVoiceTagNumberorAnnotationIndex_h""" STRUCTS_UNKNOWN="""unknown_c""" class ABFheader: def __init__(self,abfFileName): """Given an ABF2 file, provide simple access to its header and data.""" self.abfFileName=abfFileName self.fb = open(abfFileName,'rb') self.secHeader=self.fileReadStructMap(STRUCTS_HEADER) self.secMap=self.fileReadStructMap(STRUCTS_SECTIONS,76,16) self.secProtocol=self.fileReadSection('ProtocolSection',STRUCTS_SEC_PROTO) self.secADC=self.fileReadSection('ADCSection',STRUCTS_SEC_ADC) self.secDAC=self.fileReadSection('DACSection',STRUCTS_SEC_DAC) self.secEpochPerDac=self.fileReadSection('EpochPerDACSection',STRUCTS_SEC_EPOCH_PER_DAC) self.secEpochDig=self.fileReadSection('EpochSection',STRUCTS_SEC_EPOCH_DIG) self.secTags=self.fileReadSection('TagSection',STRUCTS_SEC_TAGS) self.fb.close() def fileReadStructMap(self,structMap,startByte=0,fixedOffset=None): """Given a string of varName_varFormat structs, read the ABF file and return the objects.""" values={} self.fb.seek(startByte) for structCode in structMap.replace("\n","").split(","): varName,varFormat=structCode.strip().split("_") varVal=struct.unpack(varFormat,self.fb.read(struct.calcsize(varFormat))) values[varName]=varVal if len(varVal)>1 else varVal[0] if fixedOffset: self.fb.read(fixedOffset-struct.calcsize(varFormat)) return values def fileReadSection(self,sectionName,structMap): entries=[] entryStartBlock,entryBytes,entryCount=self.secMap[sectionName] for entryNumber in range(entryCount): entries.append(self.fileReadStructMap(structMap,entryStartBlock*512+entryNumber*entryBytes)) if len(entries)==1: entries=entries[0] return entries def headerMarkdown(self,saveAs=False): """Python ninjery to dump all header information into markdown-formatted text.""" out="# %s Header\n\n"%(os.path.basename(self.abfFileName)) flat={} for sectionName in [x for x in sorted(dir(self)) if x.startswith('sec')]: thing=getattr(self,sectionName) if type(thing) is list: out+="\n## %s (%d entries)\n"%(sectionName.replace("sec","Section: "),len(thing)) originalList,thing=thing,{} for i,d in enumerate(originalList): for key in sorted(d.keys()): if not key in thing: thing[key]=[] thing[key]=thing[key]+[d[key]] else: out+="\n## %s\n"%(sectionName.replace("sec","Section: ")) for key in sorted(thing.keys()): flat[key]=thing[key] out+="* %s: `%s`\n"%(key,thing[key]) if saveAs: with open(saveAs,'w') as f: f.write(out) with open(saveAs+"_flat.md",'w') as f: out="# %s Header (flattened)\n\n"%(os.path.basename(self.abfFileName)) for key in sorted(flat.keys()): out+="* %s: `%s`\n"%(key,flat[key]) f.write(out) return out if __name__=="__main__": abf=ABFheader(R"../../../../data/17o05028_ic_steps.abf") abf.headerMarkdown("sampleOutput.md")
64.586207
110
0.776428
79577aa0d69cf9042b1f81fd5c88d069bb33ca8b
4,168
py
Python
parse_pbs.py
NCAR/pbs_accounting_parser
a81b6da43c28d54a248c948bacca37c2ff03fde8
[ "MIT" ]
null
null
null
parse_pbs.py
NCAR/pbs_accounting_parser
a81b6da43c28d54a248c948bacca37c2ff03fde8
[ "MIT" ]
3
2018-07-12T21:44:07.000Z
2018-07-12T22:37:12.000Z
parse_pbs.py
NCAR/pbs_accounting_parser
a81b6da43c28d54a248c948bacca37c2ff03fde8
[ "MIT" ]
2
2018-10-10T01:28:57.000Z
2018-11-29T17:21:51.000Z
#MIT License # #Copyright (c) 2018 National Center for Atmospheric Research # #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: # #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #SOFTWARE. from sys import argv,exit from string import split,join import csv import os import time import calendar def parse_acct_record(m): squote = 0 dquote = 0 paren = 0 key = "" value = "" in_key = 1 rval = {} for i in range(0, len(m)): #safety checks if in_key < 0: raise Exception("Unexpected Happened") if in_key < 1 and key == "": raise Exception("Null Key") #parens seem to be super-quotes if m[i] == '(': paren = paren + 1 if m[i] == ')': paren = paren - 1 #single quotes are the next strongest escape character if m[i] == '\'': if squote > 0: squote = squote - 1 else: if dquote > 1: raise Exception("Don't think this can happen") squote = squote + 1 #then double quotes if m[i] == '"': if dquote > 0 and squote == 0: dquote = dquote - 1 else: dquote = dquote + 1 #last, equal signs if m[i] == '=' and squote == 0 and dquote == 0 and paren == 0: if value is "": in_key = 0 continue else: if not (m[i] == '=' and in_key == 0): #pretty sure you can't have an equal in a key print m raise Exception("Unhandled Input", m[i]) if m[i] == ' ' and (squote > 0 or dquote > 0 or paren > 0): if in_key == 1: key += m[i] continue else: value += m[i] continue if m[i] == ' ' and in_key==0: #print "Key: " + key #print "Value: " + value if not key in rval: rval[key] = value else: raise Exception("Duplicate Key") in_key = 1 key = "" value = "" continue if m[i] == ' ': continue raise Exception("Unexpected whitespace") if in_key == 1: key += m[i] if in_key == 0: value += m[i] if in_key == 1 and len(key) > 1: #raise Exception("Partial Record Detected", argv[1]) print "Warning: Gibberish: " + key rval[key.rstrip('\n')] = value.rstrip('\n') return rval def main(): do_output = 0 if len(argv) < 2: print "accounting_file [key_table] [job_table]" exit(1) if len(argv) > 2: do_output = 1 key_table_fd = open(argv[2], 'w') key_table = csv.writer(key_table_fd) record_table_fd = open(argv[3], 'w') record_table = csv.writer(record_table_fd) accounting_file = open(argv[1], 'r') accounting_file_name = os.path.basename(argv[1]) record_id = -1 for entry in accounting_file: record_id = record_id + 1 fields = split(entry, ';') rtime = time.strptime(fields[0], "%m/%d/%Y %H:%M:%S") #localtime() -- local time rtime = calendar.timegm(rtime) etype = fields[1] #[LQSED] entity = fields[2] #"license" or job number message = join(fields[3:]) if etype == 'L': continue #PBS license stats? not associated with a job rec = parse_acct_record(message) keystr = [accounting_file_name, record_id, rtime, entity] if do_output == 1: record_table.writerow(keystr + [etype]) for k,v in rec.iteritems(): #print k,v key_table.writerow(keystr + [k, v] ) if do_output == 0: print rec if do_output == 1: key_table_fd.close() record_table_fd.close() accounting_file.close() if __name__ == "__main__": main()
27.602649
82
0.659549
79577b021b7614f443da02698bcfa0027a4ad293
5,993
py
Python
cottonformation/res/codeguruprofiler.py
MacHu-GWU/cottonformation-project
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
[ "BSD-2-Clause" ]
5
2021-07-22T03:45:59.000Z
2021-12-17T21:07:14.000Z
cottonformation/res/codeguruprofiler.py
MacHu-GWU/cottonformation-project
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
[ "BSD-2-Clause" ]
1
2021-06-25T18:01:31.000Z
2021-06-25T18:01:31.000Z
cottonformation/res/codeguruprofiler.py
MacHu-GWU/cottonformation-project
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
[ "BSD-2-Clause" ]
2
2021-06-27T03:08:21.000Z
2021-06-28T22:15:51.000Z
# -*- coding: utf-8 -*- """ This module """ import attr import typing from ..core.model import ( Property, Resource, Tag, GetAtt, TypeHint, TypeCheck, ) from ..core.constant import AttrMeta #--- Property declaration --- @attr.s class PropProfilingGroupChannel(Property): """ AWS Object Type = "AWS::CodeGuruProfiler::ProfilingGroup.Channel" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codeguruprofiler-profilinggroup-channel.html Property Document: - ``rp_channelUri``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codeguruprofiler-profilinggroup-channel.html#cfn-codeguruprofiler-profilinggroup-channel-channeluri - ``p_channelId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codeguruprofiler-profilinggroup-channel.html#cfn-codeguruprofiler-profilinggroup-channel-channelid """ AWS_OBJECT_TYPE = "AWS::CodeGuruProfiler::ProfilingGroup.Channel" rp_channelUri: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: "channelUri"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codeguruprofiler-profilinggroup-channel.html#cfn-codeguruprofiler-profilinggroup-channel-channeluri""" p_channelId: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "channelId"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codeguruprofiler-profilinggroup-channel.html#cfn-codeguruprofiler-profilinggroup-channel-channelid""" #--- Resource declaration --- @attr.s class ProfilingGroup(Resource): """ AWS Object Type = "AWS::CodeGuruProfiler::ProfilingGroup" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html Property Document: - ``rp_ProfilingGroupName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-profilinggroupname - ``p_AgentPermissions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-agentpermissions - ``p_AnomalyDetectionNotificationConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-anomalydetectionnotificationconfiguration - ``p_ComputePlatform``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-computeplatform - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-tags """ AWS_OBJECT_TYPE = "AWS::CodeGuruProfiler::ProfilingGroup" rp_ProfilingGroupName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: "ProfilingGroupName"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-profilinggroupname""" p_AgentPermissions: dict = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(dict)), metadata={AttrMeta.PROPERTY_NAME: "AgentPermissions"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-agentpermissions""" p_AnomalyDetectionNotificationConfiguration: typing.List[typing.Union['PropProfilingGroupChannel', dict]] = attr.ib( default=None, converter=PropProfilingGroupChannel.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropProfilingGroupChannel), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "AnomalyDetectionNotificationConfiguration"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-anomalydetectionnotificationconfiguration""" p_ComputePlatform: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "ComputePlatform"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-computeplatform""" p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib( default=None, converter=Tag.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "Tags"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-tags""" @property def rv_Arn(self) -> GetAtt: """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#aws-resource-codeguruprofiler-profilinggroup-return-values""" return GetAtt(resource=self, attr_name="Arn")
57.625
244
0.776406
79577b73eb73b8e43e4f481f7d414bbd29be4203
1,108
py
Python
python/simpleMotorTest.py
BillElliot/k9-chess-angular
ed46e5de4865ff9398102e643c802aae61967a1c
[ "Unlicense" ]
10
2016-11-05T22:49:42.000Z
2020-02-07T15:30:05.000Z
python/simpleMotorTest.py
BillElliot/k9-chess-angular
ed46e5de4865ff9398102e643c802aae61967a1c
[ "Unlicense" ]
37
2017-05-30T21:13:09.000Z
2019-03-15T12:49:30.000Z
python/simpleMotorTest.py
BillElliot/k9-chess-angular
ed46e5de4865ff9398102e643c802aae61967a1c
[ "Unlicense" ]
11
2017-05-23T16:38:38.000Z
2021-03-06T22:34:56.000Z
import math import time from roboclaw import Roboclaw address = 0x80 rc = Roboclaw("/dev/roboclaw",115200) rc.Open() version = rc.ReadVersion(address) if version[0]==False: print "GETVERSION Failed" else: print repr(version[1]) rc.SetM1VelocityPID(address,3000,300,0,708) rc.SetM2VelocityPID(address,3000,300,0,720) clicks = 5000 click_vel = 300 ACCELERATION = 30 while(1): rc.ForwardM1(address,32) #1/4 power forward rc.BackwardM2(address,32) #1/4 power backward time.sleep(2) rc.BackwardM1(address,32) #1/4 power backward rc.ForwardM2(address,32) #1/4 power forward time.sleep(2) rc.BackwardM1(address,0) #Stopped rc.ForwardM2(address,0) #Stopped time.sleep(2) m1duty = 16 m2duty = -16 rc.ForwardBackwardM1(address,64+m1duty) #1/4 power forward rc.ForwardBackwardM2(address,64+m2duty) #1/4 power backward time.sleep(2) m1duty = -16 m2duty = 16 rc.ForwardBackwardM1(address,64+m1duty) #1/4 power backward rc.ForwardBackwardM2(address,64+m2duty) #1/4 power forward time.sleep(2) rc.ForwardBackwardM1(address,64) #Stopped rc.ForwardBackwardM2(address,64) #Stopped time.sleep(2)
24.086957
60
0.755415
79577b84900bcb0b53f813eabdee16858db2d2c4
70,110
py
Python
netpyne/cell/compartCell.py
jchen6727/netpyne
52edba6b337768100861d83987ae6117d3e6ca62
[ "MIT" ]
1
2020-05-11T14:54:54.000Z
2020-05-11T14:54:54.000Z
netpyne/cell/compartCell.py
jchen6727/netpyne
52edba6b337768100861d83987ae6117d3e6ca62
[ "MIT" ]
null
null
null
netpyne/cell/compartCell.py
jchen6727/netpyne
52edba6b337768100861d83987ae6117d3e6ca62
[ "MIT" ]
null
null
null
""" cell/compartCell.py Contains compartCell class Contributors: salvadordura@gmail.com """ from __future__ import division from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import from builtins import super from builtins import next from builtins import zip from builtins import range from builtins import round from builtins import str try: basestring except NameError: basestring = str from future import standard_library standard_library.install_aliases() from numbers import Number from copy import deepcopy from neuron import h # Import NEURON import numpy as np from math import sin, cos from .cell import Cell from ..specs import Dict ############################################################################### # # COMPARTMENTAL CELL CLASS # ############################################################################### # --- Temporarily copied from HNN code; improve so doesn't use h globals --- # global variables for dipole calculation, should be node-independent class CompartCell (Cell): ''' Class for section-based neuron models ''' def __init__ (self, gid, tags, create=True, associateGid=True): super(CompartCell, self).__init__(gid, tags) self.secs = Dict() # dict of sections self.secLists = Dict() # dict of sectionLists if create: self.create() # create cell if associateGid: self.associateGid() # register cell for this node def __str__ (self): try: gid, cty, cmo = self.gid, self.tags['cellType'], self.tags['cellModel'] # only use if these exist return 'compartCell_%s_%s_%d'%(cty, cmo, gid) except: return 'compartCell%d'%self.gid def __repr__ (self): return self.__str__() def create (self): from .. import sim if sim.cfg.recordDipoles: h("dp_total_L2 = 0."); h("dp_total_L5 = 0.") # put here since these variables used in cells # generate random rotation angle for each cell if sim.net.params.rotateCellsRandomly: if isinstance(sim.net.params.rotateCellsRandomly, list): [rotMin, rotMax] = sim.net.params.rotateCellsRandomly else: [rotMin, rotMax] = 0, 6.2832 rand = h.Random() rand.Random123(self.gid) self.randRotationAngle = rand.uniform(0, 6.2832) # 0 to 2pi for propLabel, prop in sim.net.params.cellParams.items(): # for each set of cell properties conditionsMet = 1 for (condKey,condVal) in prop['conds'].items(): # check if all conditions are met if isinstance(condVal, list): if isinstance(condVal[0], Number): if self.tags.get(condKey) < condVal[0] or self.tags.get(condKey) > condVal[1]: conditionsMet = 0 break elif isinstance(condVal[0], basestring): if self.tags.get(condKey) not in condVal: conditionsMet = 0 break elif self.tags.get(condKey) != condVal: conditionsMet = 0 break if conditionsMet: # if all conditions are met, set values for this cell if sim.cfg.includeParamsLabel: if 'label' not in self.tags: self.tags['label'] = [propLabel] # create list of property sets else: self.tags['label'].append(propLabel) # add label of cell property set to list of property sets for this cell if sim.cfg.createPyStruct: self.createPyStruct(prop) if sim.cfg.createNEURONObj: self.createNEURONObj(prop) # add sections, mechanisms, synaptic mechanisms, geometry and topolgy specified by this property set def modify (self, prop): from .. import sim conditionsMet = 1 for (condKey,condVal) in prop['conds'].items(): # check if all conditions are met if condKey=='label': if condVal not in self.tags['label']: conditionsMet = 0 break elif isinstance(condVal, list): if isinstance(condVal[0], Number): if self.tags.get(condKey) < condVal[0] or self.tags.get(condKey) > condVal[1]: conditionsMet = 0 break elif isinstance(condVal[0], basestring): if self.tags.get(condKey) not in condVal: conditionsMet = 0 break elif self.tags.get(condKey) != condVal: conditionsMet = 0 break if conditionsMet: # if all conditions are met, set values for this cell if sim.cfg.createPyStruct: self.createPyStruct(prop) if sim.cfg.createNEURONObj: self.createNEURONObj(prop) # add sections, mechanisms, synaptic mechanisms, geometry and topolgy specified by this property set def createPyStruct (self, prop): from .. import sim # set params for all sections for sectName,sectParams in prop['secs'].items(): # create section if sectName not in self.secs: self.secs[sectName] = Dict() # create section dict sec = self.secs[sectName] # pointer to section # add distributed mechanisms if 'mechs' in sectParams: for mechName,mechParams in sectParams['mechs'].items(): if 'mechs' not in sec: sec['mechs'] = Dict() if mechName not in sec['mechs']: sec['mechs'][mechName] = Dict() for mechParamName,mechParamValue in mechParams.items(): # add params of the mechanism sec['mechs'][mechName][mechParamName] = mechParamValue # add ion info if 'ions' in sectParams: for ionName,ionParams in sectParams['ions'].items(): if 'ions' not in sec: sec['ions'] = Dict() if ionName not in sec['ions']: sec['ions'][ionName] = Dict() for ionParamName,ionParamValue in ionParams.items(): # add params of the ion sec['ions'][ionName][ionParamName] = ionParamValue # add synMechs if 'synMechs' in sectParams: for synMech in sectParams['synMechs']: if 'label' in synMech and 'loc' in synMech: self.addSynMech(synLabel=synMech['label'], secLabel=sectName, loc=synMech['loc']) # add point processes if 'pointps' in sectParams: for pointpName,pointpParams in sectParams['pointps'].items(): #if self.tags['cellModel'] == pointpName: # only required if want to allow setting various cell models in same rule if 'pointps' not in sec: sec['pointps'] = Dict() if pointpName not in sec['pointps']: sec['pointps'][pointpName] = Dict() for pointpParamName,pointpParamValue in pointpParams.items(): # add params of the mechanism if pointpParamValue == 'gid': pointpParamValue = self.gid sec['pointps'][pointpName][pointpParamName] = pointpParamValue # add geometry params if 'geom' in sectParams: for geomParamName,geomParamValue in sectParams['geom'].items(): if 'geom' not in sec: sec['geom'] = Dict() if not type(geomParamValue) in [list, dict]: # skip any list or dic params sec['geom'][geomParamName] = geomParamValue # add 3d geometry if 'pt3d' in sectParams['geom']: if 'pt3d' not in sec['geom']: sec['geom']['pt3d'] = [] for ipt, pt3d in enumerate(sectParams['geom']['pt3d']): if sim.net.params.rotateCellsRandomly == True: """Rotate the cell about the Z axis.""" x = pt3d[0] z = pt3d[2] c = cos(self.randRotationAngle) s = sin(self.randRotationAngle) pt3d = (x * c - z * s, pt3d[1], x * s + z * c, pt3d[3]) sectParams['geom']['pt3d'][ipt] = pt3d sec['geom']['pt3d'].append(pt3d) # add topolopgy params if 'topol' in sectParams: if 'topol' not in sec: sec['topol'] = Dict() for topolParamName,topolParamValue in sectParams['topol'].items(): sec['topol'][topolParamName] = topolParamValue # add other params if 'spikeGenLoc' in sectParams: sec['spikeGenLoc'] = sectParams['spikeGenLoc'] if 'vinit' in sectParams: sec['vinit'] = sectParams['vinit'] if 'weightNorm' in sectParams: sec['weightNorm'] = sectParams['weightNorm'] if 'threshold' in sectParams: sec['threshold'] = sectParams['threshold'] # add sectionLists if 'secLists' in prop: self.secLists.update(prop['secLists']) # diction of section lists def initV (self): for sec in list(self.secs.values()): if 'vinit' in sec: sec['hObj'].v = sec['vinit'] # Create dictionary of section names with entries to scale section lengths to length along z-axis def __dipoleGetSecLength (self, secName): L = 1 # basal_2 and basal_3 at 45 degree angle to z-axis. if 'basal_2' in secName: L = np.sqrt(2) / 2. elif 'basal_3' in secName: L = np.sqrt(2) / 2. # apical_oblique at 90 perpendicular to z-axis elif 'apical_oblique' in secName: L = 0. # All basalar dendrites extend along negative z-axis if 'basal' in secName: L = -L return L # insert dipole in section def __dipoleInsert(self, secName, sec): # insert dipole mech (dipole.mod) try: sec['hObj'].insert('dipole') except: print('Error inserting dipole mechanism') return -1 # insert Dipole point process (dipole_pp.mod) try: sec['hDipole_pp'] = h.Dipole(1.0, sec = sec['hObj']) except: print('Error inserting Dipole point process') return -1 dpp = sec['hDipole_pp'] # assign internal resistance values to dipole point process (dpp) dpp.ri = h.ri(1, sec=sec['hObj']) # sets pointers in dipole mod file to the correct locations -- h.setpointer(ref, ptr, obj) h.setpointer(sec['hObj'](0.99)._ref_v, 'pv', dpp) if self.tags['cellType'].startswith('L2'): h.setpointer(h._ref_dp_total_L2, 'Qtotal', dpp) elif self.tags['cellType'].startswith('L5'): h.setpointer(h._ref_dp_total_L5, 'Qtotal', dpp) # gives INTERNAL segments of the section, non-endpoints # creating this because need multiple values simultaneously loc = np.array([seg.x for seg in sec['hObj']]) # these are the positions, including 0 but not L pos = np.array([seg.x for seg in sec['hObj'].allseg()]) # diff in yvals, scaled against the pos np.array. y_long as in longitudinal y_scale = (self.__dipoleGetSecLength(secName) * sec['hObj'].L) * pos # y_long = (h.y3d(1, sec=sect) - h.y3d(0, sec=sect)) * pos # diff values calculate length between successive section points y_diff = np.diff(y_scale) for i in range(len(loc)): # assign the ri value to the dipole sec['hObj'](loc[i]).dipole.ri = h.ri(loc[i], sec=sec['hObj']) # range variable 'dipole' # set pointers to previous segment's voltage, with boundary condition if i > 0: h.setpointer(sec['hObj'](loc[i-1])._ref_v, 'pv', sec['hObj'](loc[i]).dipole) else: h.setpointer(sec['hObj'](0)._ref_v, 'pv', sec['hObj'](loc[i]).dipole) # set aggregate pointers h.setpointer(dpp._ref_Qsum, 'Qsum', sec['hObj'](loc[i]).dipole) if self.tags['cellType'].startswith('L2'): h.setpointer(h._ref_dp_total_L2, 'Qtotal', sec['hObj'](loc[i]).dipole) elif self.tags['cellType'].startswith('L5'): h.setpointer(h._ref_dp_total_L5, 'Qtotal', sec['hObj'](loc[i]).dipole) # add ztan values sec['hObj'](loc[i]).dipole.ztan = y_diff[i] # set the pp dipole's ztan value to the last value from y_diff dpp.ztan = y_diff[-1] def createNEURONObj (self, prop): from .. import sim excludeMechs = ['dipole'] # dipole is special case mechInsertError = False # flag to print error inserting mechanisms # set params for all sections for sectName,sectParams in prop['secs'].items(): # create section if sectName not in self.secs: self.secs[sectName] = Dict() # create sect dict if doesn't exist if 'hObj' not in self.secs[sectName] or self.secs[sectName]['hObj'] in [None, {}, []]: self.secs[sectName]['hObj'] = h.Section(name=sectName, cell=self) # create h Section object sec = self.secs[sectName] # pointer to section # set geometry params if 'geom' in sectParams: for geomParamName,geomParamValue in sectParams['geom'].items(): if not type(geomParamValue) in [list, dict]: # skip any list or dic params setattr(sec['hObj'], geomParamName, geomParamValue) # set 3d geometry if 'pt3d' in sectParams['geom']: h.pt3dclear(sec=sec['hObj']) if sim.cfg.pt3dRelativeToCellLocation: x = self.tags['x'] y = -self.tags['y'] if sim.cfg.invertedYCoord else self.tags['y'] # Neuron y-axis positive = upwards, so assume pia=0 and cortical depth = neg z = self.tags['z'] else: x = y = z = 0 for pt3d in sectParams['geom']['pt3d']: h.pt3dadd(x+pt3d[0], y+pt3d[1], z+pt3d[2], pt3d[3], sec=sec['hObj']) # add distributed mechanisms if 'mechs' in sectParams: for mechName,mechParams in sectParams['mechs'].items(): if mechName not in sec['mechs']: sec['mechs'][mechName] = Dict() try: sec['hObj'].insert(mechName) except: mechInsertError = True if sim.cfg.verbose: print('# Error inserting %s mechanims in %s section! (check mod files are compiled)'%(mechName, sectName)) continue for mechParamName,mechParamValue in mechParams.items(): # add params of the mechanism mechParamValueFinal = mechParamValue for iseg,seg in enumerate(sec['hObj']): # set mech params for each segment if type(mechParamValue) in [list]: if len(mechParamValue) == 1: mechParamValueFinal = mechParamValue[0] else: mechParamValueFinal = mechParamValue[iseg] if mechParamValueFinal is not None: # avoid setting None values setattr(getattr(seg, mechName), mechParamName,mechParamValueFinal) # add ions if 'ions' in sectParams: for ionName,ionParams in sectParams['ions'].items(): if ionName not in sec['ions']: sec['ions'][ionName] = Dict() try: sec['hObj'].insert(ionName+'_ion') # insert mechanism except: mechInsertError = True if sim.cfg.verbose: print('# Error inserting %s ion in %s section!'%(ionName, sectName)) continue for ionParamName,ionParamValue in ionParams.items(): # add params of the mechanism ionParamValueFinal = ionParamValue for iseg,seg in enumerate(sec['hObj']): # set ion params for each segment if type(ionParamValue) in [list]: ionParamValueFinal = ionParamValue[iseg] if ionParamName == 'e': setattr(seg, ionParamName+ionName, ionParamValueFinal) elif ionParamName == 'o': setattr(seg, '%so'%ionName, ionParamValueFinal) h('%so0_%s_ion = %s'%(ionName,ionName,ionParamValueFinal)) # e.g. cao0_ca_ion, the default initial value elif ionParamName == 'i': setattr(seg, '%si'%ionName, ionParamValueFinal) h('%si0_%s_ion = %s'%(ionName,ionName,ionParamValueFinal)) # e.g. cai0_ca_ion, the default initial value #if sim.cfg.verbose: print("Updated ion: %s in %s, e: %s, o: %s, i: %s" % \ # (ionName, sectName, seg.__getattribute__('e'+ionName), seg.__getattribute__(ionName+'o'), seg.__getattribute__(ionName+'i'))) # add synMechs (only used when loading) if 'synMechs' in sectParams: for synMech in sectParams['synMechs']: if 'label' in synMech and 'loc' in synMech: self.addSynMech(synLabel=synMech['label'], secLabel=sectName, loc=synMech['loc']) # add point processes if 'pointps' in sectParams: for pointpName,pointpParams in sectParams['pointps'].items(): #if self.tags['cellModel'] == pointpParams: # only required if want to allow setting various cell models in same rule if pointpName not in sec['pointps']: sec['pointps'][pointpName] = Dict() pointpObj = getattr(h, pointpParams['mod']) loc = pointpParams['loc'] if 'loc' in pointpParams else 0.5 # set location sec['pointps'][pointpName]['hObj'] = pointpObj(loc, sec = sec['hObj']) # create h Pointp object (eg. h.Izhi2007b) for pointpParamName,pointpParamValue in pointpParams.items(): # add params of the point process if pointpParamValue == 'gid': pointpParamValue = self.gid if pointpParamName not in ['mod', 'loc', 'vref', 'synList'] and not pointpParamName.startswith('_'): setattr(sec['pointps'][pointpName]['hObj'], pointpParamName, pointpParamValue) if 'params' in self.tags.keys(): # modify cell specific params for pointpParamName,pointpParamValue in self.tags['params'].items(): setattr(sec['pointps'][pointpName]['hObj'], pointpParamName, pointpParamValue) # set topology for sectName,sectParams in prop['secs'].items(): # iterate sects again for topology (ensures all exist) sec = self.secs[sectName] # pointer to section # pointer to child sec if 'topol' in sectParams: if sectParams['topol']: sec['hObj'].connect(self.secs[sectParams['topol']['parentSec']]['hObj'], sectParams['topol']['parentX'], sectParams['topol']['childX']) # make topol connection # add dipoles if sim.cfg.recordDipoles: for sectName,sectParams in prop['secs'].items(): sec = self.secs[sectName] if 'mechs' in sectParams and 'dipole' in sectParams['mechs']: self.__dipoleInsert(sectName, sec) # add dipole mechanisms to each section # Print message about error inserting mechanisms if mechInsertError: print("ERROR: Some mechanisms and/or ions were not inserted (for details run with cfg.verbose=True). Make sure the required mod files are compiled.") def addSynMechsNEURONObj(self): # set params for all sections for sectName,sectParams in self.secs.items(): # add synMechs (only used when loading) if 'synMechs' in sectParams: for synMech in sectParams['synMechs']: if 'label' in synMech and 'loc' in synMech: self.addSynMech(synLabel=synMech['label'], secLabel=sectName, loc=synMech['loc']) # Create NEURON objs for conns and syns if included in prop (used when loading) def addStimsNEURONObj(self): # assumes python structure exists for stimParams in self.stims: if stimParams['type'] == 'NetStim': self.addNetStim(stimParams, stimContainer=stimParams) elif stimParams['type'] in ['IClamp', 'VClamp', 'SEClamp', 'AlphaSynapse']: stim = getattr(h, stimParams['type'])(self.secs[stimParams['sec']]['hObj'](stimParams['loc'])) stimProps = {k:v for k,v in stimParams.items() if k not in ['label', 'type', 'source', 'loc', 'sec', 'hObj']} for stimPropName, stimPropValue in stimProps.items(): # set mechanism internal stimParams if isinstance(stimPropValue, list): if stimPropName == 'amp': for i,val in enumerate(stimPropValue): stim.amp[i] = val elif stimPropName == 'dur': for i,val in enumerate(stimPropValue): stim.dur[i] = val #setattr(stim, stimParamName._ref_[0], stimParamValue[0]) else: setattr(stim, stimPropName, stimPropValue) stimParams['hObj'] = stim # add stim object to dict in stims list # Create NEURON objs for conns and syns if included in prop (used when loading) def addConnsNEURONObj(self): # Note: loading connections to point process (eg. Izhi2007a) not yet supported # Note: assumes weight is in index 0 (netcon.weight[0]) from .. import sim # assumes python structure exists for conn in self.conns: # set postsyn target synMech = next((synMech for synMech in self.secs[conn['sec']]['synMechs'] if synMech['label']==conn['synMech'] and synMech['loc']==conn['loc']), None) if not synMech: synMech = self.addSynMech(conn['synMech'], conn['sec'], conn['loc']) #continue # go to next conn try: postTarget = synMech['hObj'] except: print('\nError: no synMech available for conn: ', conn) print(' cell tags: ',self.tags) print(' cell synMechs: ',self.secs[conn['sec']]['synMechs']) import sys sys.exit() # create NetCon if conn['preGid'] == 'NetStim': netstim = next((stim['hObj'] for stim in self.stims if stim['source']==conn['preLabel']), None) if netstim: netcon = h.NetCon(netstim, postTarget) else: continue else: #cell = next((c for c in sim.net.cells if c.gid == conn['preGid']), None) netcon = sim.pc.gid_connect(conn['preGid'], postTarget) netcon.weight[0] = conn['weight'] netcon.delay = conn['delay'] #netcon.threshold = conn.get('threshold', sim.net.params.defaultThreshold) conn['hObj'] = netcon # Add plasticity if conn.get('plast'): self._addConnPlasticity(conn['plast'], self.secs[conn['sec']], netcon, 0) def associateGid (self, threshold = None): from .. import sim if self.secs: if sim.cfg.createNEURONObj: sim.pc.set_gid2node(self.gid, sim.rank) # this is the key call that assigns cell gid to a particular node sec = next((secParams for secName,secParams in self.secs.items() if 'spikeGenLoc' in secParams), None) # check if any section has been specified as spike generator if sec: loc = sec['spikeGenLoc'] # get location of spike generator within section else: #sec = self.secs['soma'] if 'soma' in self.secs else self.secs[self.secs.keys()[0]] # use soma if exists, otherwise 1st section sec = next((sec for secName, sec in self.secs.items() if len(sec['topol']) == 0), self.secs[list(self.secs.keys())[0]]) # root sec (no parents) loc = 0.5 nc = None if 'pointps' in sec: # if no syns, check if point processes with 'vref' (artificial cell) for pointpName, pointpParams in sec['pointps'].items(): if 'vref' in pointpParams: nc = h.NetCon(getattr(sec['pointps'][pointpName]['hObj'], '_ref_'+pointpParams['vref']), None, sec=sec['hObj']) break if not nc: # if still haven't created netcon nc = h.NetCon(sec['hObj'](loc)._ref_v, None, sec=sec['hObj']) if 'threshold' in sec: threshold = sec['threshold'] threshold = threshold if threshold is not None else sim.net.params.defaultThreshold nc.threshold = threshold sim.pc.cell(self.gid, nc, 1) # associate a particular output stream of events del nc # discard netcon sim.net.gid2lid[self.gid] = len(sim.net.gid2lid) def addSynMech (self, synLabel, secLabel, loc, oneSynPerNetcon = False): from .. import sim synMechParams = sim.net.params.synMechParams.get(synLabel) # get params for this synMech sec = self.secs.get(secLabel, None) # add synaptic mechanism to python struct if 'synMechs' not in sec or not isinstance(sec['synMechs'], list): sec['synMechs'] = [] if synMechParams and sec: # if both the synMech and the section exist if sim.cfg.createPyStruct and sim.cfg.addSynMechs: synMech = next((synMech for synMech in sec['synMechs'] if synMech['label']==synLabel and synMech['loc']==loc), None) if not synMech or oneSynPerNetcon: # if synMech not in section, or need multiple synMech per section, then create synMech = Dict({'label': synLabel, 'loc': loc}) for paramName, paramValue in synMechParams.items(): synMech[paramName] = paramValue sec['synMechs'].append(synMech) else: synMech = None if sim.cfg.createNEURONObj and sim.cfg.addSynMechs: # add synaptic mechanism NEURON objectes if not synMech: # if pointer not created in createPyStruct, then check synMech = next((synMech for synMech in sec['synMechs'] if synMech['label']==synLabel and synMech['loc']==loc), None) if oneSynPerNetcon: synMech = None if not synMech: # if still doesnt exist, then create synMech = Dict() sec['synMechs'].append(synMech) if not synMech.get('hObj'): # if synMech doesn't have NEURON obj, then create synObj = getattr(h, synMechParams['mod']) synMech['hObj'] = synObj(loc, sec=sec['hObj']) # create h Syn object (eg. h.Exp2Syn) for synParamName,synParamValue in synMechParams.items(): # add params of the synaptic mechanism if synParamName not in ['label', 'mod', 'selfNetCon', 'loc']: setattr(synMech['hObj'], synParamName, synParamValue) elif synParamName == 'selfNetcon': # create self netcon required for some synapses (eg. homeostatic) secLabelNetCon = synParamValue.get('sec', 'soma') locNetCon = synParamValue.get('loc', 0.5) secNetCon = self.secs.get(secLabelNetCon, None) synMech['hObj'] = h.NetCon(secNetCon['hObj'](locNetCon)._ref_v, synMech[''], sec=secNetCon['hObj']) for paramName,paramValue in synParamValue.items(): if paramName == 'weight': synMech['hObj'].weight[0] = paramValue elif paramName not in ['sec', 'loc']: setattr(synMech['hObj'], paramName, paramValue) else: synMech = None return synMech def modifySynMechs (self, params): from .. import sim conditionsMet = 1 if 'cellConds' in params: if conditionsMet: for (condKey,condVal) in params['cellConds'].items(): # check if all conditions are met # check if conditions met if isinstance(condVal, list): if self.tags.get(condKey) < condVal[0] or self.tags.get(condKey) > condVal[1]: conditionsMet = 0 break elif self.tags.get(condKey) != condVal: conditionsMet = 0 break if conditionsMet: for secLabel,sec in self.secs.items(): for synMech in sec['synMechs']: conditionsMet = 1 if 'conds' in params: for (condKey,condVal) in params['conds'].items(): # check if all conditions are met # check if conditions met if condKey == 'sec': if condVal != secLabel: conditionsMet = 0 break elif isinstance(condVal, list) and isinstance(condVal[0], Number): if synMech.get(condKey) < condVal[0] or synMech.get(condKey) > condVal[1]: conditionsMet = 0 break elif isinstance(condVal, list) and isinstance(condVal[0], basestring): if synMech.get(condKey) not in condVal: conditionsMet = 0 break elif synMech.get(condKey) != condVal: conditionsMet = 0 break if conditionsMet: # if all conditions are met, set values for this cell exclude = ['conds', 'cellConds', 'label', 'mod', 'selfNetCon', 'loc'] for synParamName,synParamValue in {k: v for k,v in params.items() if k not in exclude}.items(): if sim.cfg.createPyStruct: synMech[synParamName] = synParamValue if sim.cfg.createNEURONObj: try: setattr(synMech['hObj'], synParamName, synParamValue) except: print('Error setting %s=%s on synMech' % (synParamName, str(synParamValue))) def addConn (self, params, netStimParams = None): from .. import sim # threshold = params.get('threshold', sim.net.params.defaultThreshold) # depreacated -- use threshold in preSyn cell sec if params.get('weight') is None: params['weight'] = sim.net.params.defaultWeight # if no weight, set default if params.get('delay') is None: params['delay'] = sim.net.params.defaultDelay # if no delay, set default if params.get('loc') is None: params['loc'] = 0.5 # if no loc, set default if params.get('synsPerConn') is None: params['synsPerConn'] = 1 # if no synsPerConn, set default # Get list of section labels secLabels = self._setConnSections(params) if secLabels == -1: return # if no section available exit func # Warning or error if self connections if params['preGid'] == self.gid: # Only allow self connections if option selected by user # !!!! AD HOC RULE FOR HNN!!! - or 'soma' in secLabels and not self.tags['cellType'] == 'L5Basket' (removed) if sim.cfg.allowSelfConns: if sim.cfg.verbose: print(' Warning: creating self-connection on cell gid=%d, section=%s '%(self.gid, params.get('sec'))) else: if sim.cfg.verbose: print(' Error: attempted to create self-connection on cell gid=%d, section=%s '%(self.gid, params.get('sec'))) return # if self-connection return # Weight weights = self._setConnWeights(params, netStimParams, secLabels) weightIndex = 0 # set default weight matrix index # Delays if isinstance(params['delay'],list): delays = params['delay'] else: delays = [params['delay']] * params['synsPerConn'] # Check if target is point process (artificial cell) with V not in section pointp, weightIndex = self._setConnPointP(params, secLabels, weightIndex) if pointp == -1: return # Add synaptic mechanisms if not pointp: # check not a point process synMechs, synMechSecs, synMechLocs = self._setConnSynMechs(params, secLabels) if synMechs == -1: return # Adapt weight based on section weightNorm (normalization based on section location) for i,(sec,loc) in enumerate(zip(synMechSecs, synMechLocs)): if 'weightNorm' in self.secs[sec] and isinstance(self.secs[sec]['weightNorm'], list): nseg = self.secs[sec]['geom']['nseg'] weights[i] = weights[i] * self.secs[sec]['weightNorm'][int(round(loc*nseg))-1] # Create connections for i in range(params['synsPerConn']): if not sim.cfg.allowConnsWithWeight0 and weights[i] == 0.0: continue if netStimParams: netstim = self.addNetStim(netStimParams) if params.get('gapJunction', False) == True: # only run for post gap junc (not pre) preGapId = 1e9*sim.rank + sim.net.lastGapId # global index for presyn gap junc postGapId = preGapId + 1 # global index for postsyn gap junc sim.net.lastGapId += 2 # keep track of num of gap juncs in this node if not getattr(sim.net, 'preGapJunctions', False): sim.net.preGapJunctions = [] # if doesn't exist, create list to store presynaptic cell gap junctions preGapParams = {'gid': params['preGid'], 'preGid': self.gid, 'sec': params.get('preSec', 'soma'), 'loc': params.get('preLoc', 0.5), 'weight': params['weight'], 'gapId': preGapId, 'preGapId': postGapId, 'synMech': params['synMech'], 'gapJunction': 'pre'} sim.net.preGapJunctions.append(preGapParams) # add conn params to add pre gap junction later # Python Structure if sim.cfg.createPyStruct: connParams = {k:v for k,v in params.items() if k not in ['synsPerConn']} connParams['weight'] = weights[i] connParams['delay'] = delays[i] if not pointp: connParams['sec'] = synMechSecs[i] connParams['loc'] = synMechLocs[i] if netStimParams: connParams['preGid'] = 'NetStim' connParams['preLabel'] = netStimParams['source'] if params.get('gapJunction', 'False') == True: # only run for post gap junc (not pre) connParams['gapId'] = postGapId connParams['preGapId'] = preGapId connParams['gapJunction'] = 'post' self.conns.append(Dict(connParams)) else: # do not fill in python structure (just empty dict for NEURON obj) self.conns.append(Dict()) # NEURON objects if sim.cfg.createNEURONObj: # gap junctions if params.get('gapJunction', 'False') in [True, 'pre', 'post']: # create NEURON obj for pre and post synMechs[i]['hObj'].weight = weights[i] sourceVar = self.secs[synMechSecs[i]]['hObj'](synMechLocs[i])._ref_v targetVar = synMechs[i]['hObj']._ref_vpeer # assumes variable is vpeer -- make a parameter sec = self.secs[synMechSecs[i]] sim.pc.target_var(targetVar, connParams['gapId']) self.secs[synMechSecs[i]]['hObj'].push() sim.pc.source_var(sourceVar, connParams['preGapId']) h.pop_section() netcon = None # connections using NetCons else: if pointp: sec = self.secs[secLabels[0]] postTarget = sec['pointps'][pointp]['hObj'] # local point neuron else: sec = self.secs[synMechSecs[i]] postTarget = synMechs[i]['hObj'] # local synaptic mechanism if netStimParams: netcon = h.NetCon(netstim, postTarget) # create Netcon between netstim and target else: netcon = sim.pc.gid_connect(params['preGid'], postTarget) # create Netcon between global gid and target netcon.weight[weightIndex] = weights[i] # set Netcon weight netcon.delay = delays[i] # set Netcon delay #netcon.threshold = threshold # set Netcon threshold self.conns[-1]['hObj'] = netcon # add netcon object to dict in conns list # Add time-dependent weight shaping if 'shape' in params and params['shape']: temptimevecs = [] tempweightvecs = [] # Default shape pulsetype = params['shape']['pulseType'] if 'pulseType' in params['shape'] else 'square' pulsewidth = params['shape']['pulseWidth'] if 'pulseWidth' in params['shape'] else 100.0 pulseperiod = params['shape']['pulsePeriod'] if 'pulsePeriod' in params['shape'] else 100.0 # Determine on-off switching time pairs for stimulus, where default is always on if 'switchOnOff' not in params['shape']: switchtimes = [0, sim.cfg.duration] else: if not params['shape']['switchOnOff'] == sorted(params['shape']['switchOnOff']): raise Exception('On-off switching times for a particular stimulus are not monotonic') switchtimes = deepcopy(params['shape']['switchOnOff']) switchtimes.append(sim.cfg.duration) switchiter = iter(switchtimes) switchpairs = list(zip(switchiter,switchiter)) for pair in switchpairs: # Note: Cliff's makestim code is in seconds, so conversions from ms to s occurs in the args. stimvecs = self._shapeStim(width=float(pulsewidth)/1000.0, isi=float(pulseperiod)/1000.0, weight=params['weight'], start=float(pair[0])/1000.0, finish=float(pair[1])/1000.0, stimshape=pulsetype) temptimevecs.extend(stimvecs[0]) tempweightvecs.extend(stimvecs[1]) self.conns[-1]['shapeTimeVec'] = h.Vector().from_python(temptimevecs) self.conns[-1]['shapeWeightVec'] = h.Vector().from_python(tempweightvecs) self.conns[-1]['shapeWeightVec'].play(netcon._ref_weight[weightIndex], self.conns[-1]['shapeTimeVec']) # Add plasticity self._addConnPlasticity(params, sec, netcon, weightIndex) if sim.cfg.verbose: sec = params['sec'] if pointp else synMechSecs[i] loc = params['loc'] if pointp else synMechLocs[i] preGid = netStimParams['source']+' NetStim' if netStimParams else params['preGid'] try: print((' Created connection preGid=%s, postGid=%s, sec=%s, loc=%.4g, synMech=%s, weight=%.4g, delay=%.2f' % (preGid, self.gid, sec, loc, params['synMech'], weights[i], delays[i]))) except: print((' Created connection preGid=%s' % (preGid))) def modifyConns (self, params): from .. import sim for conn in self.conns: conditionsMet = 1 if 'conds' in params: for (condKey,condVal) in params['conds'].items(): # check if all conditions are met # choose what to comapare to if condKey in ['postGid']: compareTo = self.gid else: compareTo = conn.get(condKey) # check if conditions met if isinstance(condVal, list) and isinstance(condVal[0], Number): if compareTo < condVal[0] or compareTo > condVal[1]: conditionsMet = 0 break elif isinstance(condVal, list) and isinstance(condVal[0], basestring): if compareTo not in condVal: conditionsMet = 0 break elif compareTo != condVal: conditionsMet = 0 break if conditionsMet and 'postConds' in params: for (condKey,condVal) in params['postConds'].items(): # check if all conditions are met # check if conditions met if isinstance(condVal, list) and isinstance(condVal[0], Number): if self.tags.get(condKey) < condVal[0] or self.tags.get(condKey) > condVal[1]: conditionsMet = 0 break elif isinstance(condVal, list) and isinstance(condVal[0], basestring): if self.tags.get(condKey) not in condVal: conditionsMet = 0 break elif self.tags.get(condKey) != condVal: conditionsMet = 0 break if conditionsMet and 'preConds' in params: print('Warning: modifyConns() does not yet support conditions of presynaptic cells') if conditionsMet: # if all conditions are met, set values for this cell if sim.cfg.createPyStruct: for paramName, paramValue in {k: v for k,v in params.items() if k not in ['conds','preConds','postConds']}.items(): conn[paramName] = paramValue if sim.cfg.createNEURONObj: for paramName, paramValue in {k: v for k,v in params.items() if k not in ['conds','preConds','postConds']}.items(): try: if paramName == 'weight': conn['hObj'].weight[0] = paramValue else: setattr(conn['hObj'], paramName, paramValue) except: print('Error setting %s=%s on Netcon' % (paramName, str(paramValue))) def modifyStims (self, params): from .. import sim conditionsMet = 1 if 'cellConds' in params: if conditionsMet: for (condKey,condVal) in params['cellConds'].items(): # check if all conditions are met # check if conditions met if isinstance(condVal, list): if self.tags.get(condKey) < condVal[0] or self.tags.get(condKey) > condVal[1]: conditionsMet = 0 break elif self.tags.get(condKey) != condVal: conditionsMet = 0 break if conditionsMet == 1: for stim in self.stims: conditionsMet = 1 if 'conds' in params: for (condKey,condVal) in params['conds'].items(): # check if all conditions are met # check if conditions met if isinstance(condVal, list) and isinstance(condVal[0], Number): if stim.get(condKey) < condVal[0] or stim.get(condKey) > condVal[1]: conditionsMet = 0 break elif isinstance(condVal, list) and isinstance(condVal[0], basestring): if stim.get(condKey) not in condVal: conditionsMet = 0 break elif stim.get(condKey) != condVal: conditionsMet = 0 break if conditionsMet: # if all conditions are met, set values for this cell if stim['type'] == 'NetStim': # for netstims, find associated netcon conn = next((conn for conn in self.conns if conn['source'] == stim['source']), None) if sim.cfg.createPyStruct: for paramName, paramValue in {k: v for k,v in params.items() if k not in ['conds','cellConds']}.items(): if stim['type'] == 'NetStim' and paramName in ['weight', 'delay']: conn[paramName] = paramValue else: stim[paramName] = paramValue if sim.cfg.createNEURONObj: for paramName, paramValue in {k: v for k,v in params.items() if k not in ['conds','cellConds']}.items(): try: if stim['type'] == 'NetStim': if paramName == 'weight': conn['hObj'].weight[0] = paramValue elif paramName in ['delay']: setattr(conn['hObj'], paramName, paramValue) elif paramName in ['rate']: stim['interval'] = 1.0/paramValue setattr(stim['hObj'], 'interval', stim['interval']) elif paramName in ['interval']: stim['rate'] = 1.0/paramValue setattr(stim['hObj'], 'interval', stim['interval']) else: setattr(stim['hObj'], paramName, paramValue) else: setattr(stim['hObj'], paramName, paramValue) except: print('Error setting %s=%s on stim' % (paramName, str(paramValue))) def addStim (self, params): from .. import sim if not params['sec'] or (isinstance(params['sec'], basestring) and not params['sec'] in list(self.secs.keys())+list(self.secLists.keys())): if sim.cfg.verbose: print(' Warning: no valid sec specified for stim on cell gid=%d so using soma or 1st available. Existing secs: %s; params: %s'%(self.gid, list(self.secs.keys()),params)) if 'soma' in self.secs: params['sec'] = 'soma' # use 'soma' if exists elif self.secs: params['sec'] = list(self.secs.keys())[0] # if no 'soma', use first sectiona available else: if sim.cfg.verbose: print(' Error: no Section available on cell gid=%d to add stim'%(self.gid)) return if not 'loc' in params: params['loc'] = 0.5 # default stim location if params['type'] == 'NetStim': if not 'start' in params: params['start'] = 0 # add default start time if not 'number' in params: params['number'] = 1e9 # add default number connParams = {'preGid': params['type'], 'sec': params.get('sec'), 'loc': params.get('loc'), 'synMech': params.get('synMech'), 'weight': params.get('weight'), 'delay': params.get('delay'), 'synsPerConn': params.get('synsPerConn')} # if 'threshold' in params: connParams['threshold'] = params.get('threshold') # depreacted, set threshold in preSyn cell if 'shape' in params: connParams['shape'] = params.get('shape') if 'plast' in params: connParams['plast'] = params.get('plast') netStimParams = {'source': params['source'], 'type': params['type'], 'rate': params['rate'] if 'rate' in params else 1000.0/params['interval'], 'noise': params['noise'] if 'noise' in params else 0.0, 'number': params['number'], 'start': params['start'], 'seed': params['seed'] if 'seed' in params else sim.cfg.seeds['stim']} self.addConn(connParams, netStimParams) elif params['type'] in ['IClamp', 'VClamp', 'SEClamp', 'AlphaSynapse']: sec = self.secs[params['sec']] stim = getattr(h, params['type'])(sec['hObj'](params['loc'])) stimParams = {k:v for k,v in params.items() if k not in ['type', 'source', 'loc', 'sec', 'label']} stringParams = '' for stimParamName, stimParamValue in stimParams.items(): # set mechanism internal params if isinstance(stimParamValue, list): if stimParamName == 'amp': for i,val in enumerate(stimParamValue): stim.amp[i] = val elif stimParamName == 'dur': for i,val in enumerate(stimParamValue): stim.dur[i] = val #setattr(stim, stimParamName._ref_[0], stimParamValue[0]) else: setattr(stim, stimParamName, stimParamValue) stringParams = stringParams + ', ' + stimParamName +'='+ str(stimParamValue) self.stims.append(Dict(params)) # add to python structure self.stims[-1]['hObj'] = stim # add stim object to dict in stims list if sim.cfg.verbose: print((' Added %s %s to cell gid=%d, sec=%s, loc=%.4g%s'% (params['source'], params['type'], self.gid, params['sec'], params['loc'], stringParams))) else: if sim.cfg.verbose: print(('Adding exotic stim (NeuroML 2 based?): %s'% params)) sec = self.secs[params['sec']] stim = getattr(h, params['type'])(sec['hObj'](params['loc'])) stimParams = {k:v for k,v in params.items() if k not in ['type', 'source', 'loc', 'sec', 'label']} stringParams = '' for stimParamName, stimParamValue in stimParams.items(): # set mechanism internal params if isinstance(stimParamValue, list): print("Can't set point process paramaters of type vector eg. VClamp.amp[3]") pass #setattr(stim, stimParamName._ref_[0], stimParamValue[0]) elif 'originalFormat' in params and stimParamName=='originalFormat' and params['originalFormat']=='NeuroML2_stochastic_input': if sim.cfg.verbose: print((' originalFormat: %s'%(params['originalFormat']))) rand = h.Random() stim_ref = params['label'][:params['label'].rfind(self.tags['pop'])] # e.g. Stim3_2_popPyrS_2_soma_0_5 -> 2 index_in_stim = int(stim_ref.split('_')[-2]) stim_id = stim_ref.split('_')[0] sim._init_stim_randomizer(rand, stim_id, index_in_stim, sim.cfg.seeds['stim']) rand.negexp(1) stim.noiseFromRandom(rand) params['h%s'%params['originalFormat']] = rand else: if stimParamName in ['weight']: setattr(stim, stimParamName, stimParamValue) stringParams = stringParams + ', ' + stimParamName +'='+ str(stimParamValue) self.stims.append(params) # add to python structure self.stims[-1]['hObj'] = stim # add stim object to dict in stims list if sim.cfg.verbose: print((' Added %s %s to cell gid=%d, sec=%s, loc=%.4g%s'% (params['source'], params['type'], self.gid, params['sec'], params['loc'], stringParams))) def _setConnSections (self, params): from .. import sim # if no section specified or single section specified does not exist if not params.get('sec') or (isinstance(params.get('sec'), basestring) and not params.get('sec') in list(self.secs.keys())+list(self.secLists.keys())): if sim.cfg.verbose: print(' Warning: no valid sec specified for connection to cell gid=%d so using soma or 1st available'%(self.gid)) if 'soma' in self.secs: params['sec'] = 'soma' # use 'soma' if exists elif self.secs: params['sec'] = list(self.secs.keys())[0] # if no 'soma', use first sectiona available else: if sim.cfg.verbose: print(' Error: no Section available on cell gid=%d to add connection'%(self.gid)) sec = -1 # if no Sections available print error and exit return sec secLabels = [params['sec']] # if sectionList or list of sections elif isinstance(params.get('sec'), list) or params.get('sec') in self.secLists: secList = list(params['sec']) if isinstance(params['sec'], list) else list(self.secLists[params['sec']]) secLabels = [] for i,section in enumerate(secList): if section not in self.secs: # remove sections that dont exist; and corresponding weight and delay if sim.cfg.verbose: print(' Error: Section %s not available so removing from list of sections for connection to cell gid=%d'%(section, self.gid)) secList.remove(section) if isinstance(params['weight'], list): params['weight'].remove(params['weight'][i]) if isinstance(params['delay'], list): params['delay'].remove(params['delay'][i]) else: secLabels.append(section) # if section is string else: secLabels = [params['sec']] return secLabels def _setConnWeights (self, params, netStimParams, secLabels): from .. import sim if netStimParams: scaleFactor = sim.net.params.scaleConnWeightNetStims elif isinstance(sim.net.params.scaleConnWeightModels, dict) and sim.net.params.scaleConnWeightModels.get(self.tags['cellModel'], None) is not None: scaleFactor = sim.net.params.scaleConnWeightModels[self.tags['cellModel']] # use scale factor specific for this cell model else: scaleFactor = sim.net.params.scaleConnWeight # use global scale factor if isinstance(params['weight'],list): weights = [scaleFactor * w for w in params['weight']] if len(weights) == 1: weights = [weights[0]] * params['synsPerConn'] else: weights = [scaleFactor * params['weight']] * params['synsPerConn'] return weights def _setConnPointP(self, params, secLabels, weightIndex): from .. import sim # Find if any point process with V not calculated in section (artifical cell, eg. Izhi2007a) pointp = None if len(secLabels)==1 and 'pointps' in self.secs[secLabels[0]]: # check if point processes with 'vref' (artificial cell) for pointpName, pointpParams in self.secs[secLabels[0]]['pointps'].items(): if 'vref' in pointpParams: # if includes vref param means doesn't use Section v or synaptic mechanisms pointp = pointpName if 'synList' in pointpParams: if params.get('synMech') in pointpParams['synList']: if isinstance(params.get('synMech'), list): weightIndex = [pointpParams['synList'].index(synMech) for synMech in params.get('synMech')] else: weightIndex = pointpParams['synList'].index(params.get('synMech')) # udpate weight index based pointp synList if pointp and params['synsPerConn'] > 1: # only single synapse per connection rule allowed if sim.cfg.verbose: print(' Error: Multiple synapses per connection rule not allowed for cells where V is not in section (cell gid=%d) '%(self.gid)) return -1, weightIndex return pointp, weightIndex def _setConnSynMechs (self, params, secLabels): from .. import sim synsPerConn = params['synsPerConn'] if not params.get('synMech'): if sim.net.params.synMechParams: # if no synMech specified, but some synMech params defined synLabel = list(sim.net.params.synMechParams.keys())[0] # select first synMech from net params and add syn params['synMech'] = synLabel if sim.cfg.verbose: print(' Warning: no synaptic mechanisms specified for connection to cell gid=%d so using %s '%(self.gid, synLabel)) else: # if no synaptic mechanism specified and no synMech params available if sim.cfg.verbose: print(' Error: no synaptic mechanisms available to add conn on cell gid=%d '%(self.gid)) return -1 # if no Synapse available print error and exit # if desired synaptic mechanism specified in conn params if synsPerConn > 1: # if more than 1 synapse if len(secLabels) == 1: # if single section, create all syns there synMechSecs = [secLabels[0]] * synsPerConn # same section for all if isinstance(params['loc'], list): if len(params['loc']) == synsPerConn: synMechLocs = params['loc'] else: print("Error: The length of the list of locations does not match synsPerConn (distributing uniformly)") synMechSecs, synMechLocs = self._distributeSynsUniformly(secList=secLabels, numSyns=synsPerConn) else: synMechLocs = [i*(1.0/synsPerConn)+1.0/synsPerConn/2 for i in range(synsPerConn)] else: # if multiple sections, distribute syns if sim.cfg.distributeSynsUniformly: synMechSecs, synMechLocs = self._distributeSynsUniformly(secList=secLabels, numSyns=synsPerConn) else: if synsPerConn == len(secLabels): # have list of secs that matches num syns synMechSecs = secLabels if isinstance(params['loc'], list): if len(params['loc']) == synsPerConn: # list of locs matches num syns synMechLocs = params['loc'] else: # list of locs does not match num syns print("Error: The length of the list of locations does not match synsPerConn (with cfg.distributeSynsUniformly = False") return else: # single loc synMechLocs = [params['loc']] * synsPerConn else: print("Error: The length of the list of sections does not match synsPerConn (with cfg.distributeSynsUniformly = False") return else: # if 1 synapse # by default place on 1st section of list and location available synMechSecs = secLabels synMechLocs = params['loc'] if isinstance(params['loc'], list) else [params['loc']] # randomize the section to connect to and move it to beginning of list if sim.cfg.connRandomSecFromList and len(synMechSecs)>1: rand = h.Random() preGid = params['preGid'] if isinstance(params['preGid'], int) else 0 rand.Random123(sim.hashStr('connSynMechsSecs'), self.gid, preGid) # initialize randomizer pos = int(rand.discunif(0, len(synMechSecs)-1)) synMechSecs[pos], synMechSecs[0] = synMechSecs[0], synMechSecs[pos] if len(synMechLocs)>1: synMechLocs[pos], synMechLocs[0] = synMechLocs[0], synMechLocs[pos] # check flag for nonlinearity -- which requires separate point processes oneSynPerNetcon = sim.net.params.connParams[params['label']]['oneSynPerNetcon'] if ('label' in params and 'oneSynPerNetcon' in sim.net.params.connParams[params['label']]) else False # add synaptic mechanism to section based on synMechSecs and synMechLocs (if already exists won't be added unless nonLinear set to True) synMechs = [self.addSynMech(synLabel=params['synMech'], secLabel=synMechSecs[i], loc=synMechLocs[i], oneSynPerNetcon=oneSynPerNetcon) for i in range(synsPerConn)] return synMechs, synMechSecs, synMechLocs def _distributeSynsUniformly (self, secList, numSyns): from .. import sim from numpy import cumsum if 'L' in self.secs[secList[0]]['geom']: secLengths = [self.secs[s]['geom']['L'] for s in secList] elif getattr(self.secs[secList[0]]['hObj'], 'L', None): secLengths = [self.secs[s]['hObj'].L for s in secList] else: secLengths = [1.0 for s in secList] if sim.cfg.verbose: print((' Section lengths not available to distribute synapses in cell %d'%self.gid)) try: totLength = sum(secLengths) cumLengths = list(cumsum(secLengths)) absLocs = [i*(totLength/numSyns)+totLength/numSyns/2 for i in range(numSyns)] inds = [cumLengths.index(next(x for x in cumLengths if x >= absLoc)) for absLoc in absLocs] secs = [secList[ind] for ind in inds] locs = [(cumLengths[ind] - absLoc) / secLengths[ind] for absLoc,ind in zip(absLocs,inds)] except: secs, locs = [],[] return secs, locs def _addConnPlasticity (self, params, sec, netcon, weightIndex): from .. import sim plasticity = params.get('plast') if plasticity and sim.cfg.createNEURONObj: try: plastMech = getattr(h, plasticity['mech'], None)(0, sec=sec['hObj']) # create plasticity mechanism (eg. h.STDP) for plastParamName,plastParamValue in plasticity['params'].items(): # add params of the plasticity mechanism setattr(plastMech, plastParamName, plastParamValue) if plasticity['mech'] == 'STDP': # specific implementation steps required for the STDP mech precon = sim.pc.gid_connect(params['preGid'], plastMech); precon.weight[0] = 1 # Send presynaptic spikes to the STDP adjuster pstcon = sim.pc.gid_connect(self.gid, plastMech); pstcon.weight[0] = -1 # Send postsynaptic spikes to the STDP adjuster h.setpointer(netcon._ref_weight[weightIndex], 'synweight', plastMech) # Associate the STDP adjuster with this weight #self.conns[-1]['hPlastSection'] = plastSection self.conns[-1]['hSTDP'] = plastMech self.conns[-1]['hSTDPprecon'] = precon self.conns[-1]['hSTDPpstcon'] = pstcon self.conns[-1]['STDPdata'] = {'preGid':params['preGid'], 'postGid': self.gid, 'receptor': weightIndex} # Not used; FYI only; store here just so it's all in one place if sim.cfg.verbose: print(' Added STDP plasticity to synaptic mechanism') except: print('Error: exception when adding plasticity using %s mechanism' % (plasticity['mech'])) def getSomaPos(self): ''' Get soma position; Used to calculate seg coords for LFP calc (one per population cell; assumes same morphology)''' n3dsoma = 0 r3dsoma = np.zeros(3) for sec in [sec for secName, sec in self.secs.items() if 'soma' in secName]: sec['hObj'].push() n3d = int(h.n3d()) # get number of n3d points in each section r3d = np.zeros((3, n3d)) # to hold locations of 3D morphology for the current section n3dsoma += n3d for i in range(n3d): r3dsoma[0] += h.x3d(i) r3dsoma[1] += h.y3d(i) r3dsoma[2] += h.z3d(i) h.pop_section() r3dsoma /= n3dsoma return r3dsoma def calcAbsSegCoords(self): ''' Calculate absolute seg coords by translating the relative seg coords -- used for LFP calc''' from .. import sim p3dsoma = self.getSomaPos() pop = self.tags['pop'] morphSegCoords = sim.net.pops[pop]._morphSegCoords # rotated coordinates around z axis first then shift relative to the soma self._segCoords = {} p3dsoma = p3dsoma[np.newaxis].T # trasnpose 1d array to enable matrix calculation self._segCoords['p0'] = p3dsoma + morphSegCoords['p0'] self._segCoords['p1'] = p3dsoma + morphSegCoords['p1'] def setImembPtr(self): """Set PtrVector to point to the i_membrane_""" jseg = 0 for sec in list(self.secs.values()): hSec = sec['hObj'] for iseg, seg in enumerate(hSec): self.imembPtr.pset(jseg, seg._ref_i_membrane_) # notice the underscore at the end (in nA) jseg += 1 def getImemb(self): """Gather membrane currents from PtrVector into imVec (does not need a loop!)""" self.imembPtr.gather(self.imembVec) return self.imembVec.as_numpy() # (nA) def updateShape(self): """Call after h.define_shape() to update cell coords""" x = self.tags['x'] y = -self.tags['y'] # Neuron y-axis positive = upwards, so assume pia=0 and cortical depth = neg z = self.tags['z'] for sec in list(self.secs.values()): if 'geom' in sec and 'pt3d' not in sec['geom']: # only cells that didn't have pt3d before sec['geom']['pt3d'] = [] sec['hObj'].push() n3d = int(h.n3d()) # get number of n3d points in each section for i in range(n3d): # by default L is added in x-axis; shift to y-axis; z increases 100um for each cell so set to 0 pt3d = [h.y3d(i), h.x3d(i), 0, h.diam3d(i)] sec['geom']['pt3d'].append(pt3d) h.pt3dchange(i, x+pt3d[0], y+pt3d[1], z+pt3d[2], pt3d[3], sec=sec['hObj']) h.pop_section()
53.113636
218
0.532178
79577bbbb25b7bf48d1884c2542aa95916e32032
169
py
Python
adaptivedistillation/models/classifiers/__init__.py
wyze-AI/AdaptiveDistillation
5f4575794101dbb1ed2f7e90a2be03856f76041c
[ "MIT" ]
1
2022-01-12T22:24:07.000Z
2022-01-12T22:24:07.000Z
adaptivedistillation/models/classifiers/__init__.py
wyze-AI/AdaptiveDistillation
5f4575794101dbb1ed2f7e90a2be03856f76041c
[ "MIT" ]
null
null
null
adaptivedistillation/models/classifiers/__init__.py
wyze-AI/AdaptiveDistillation
5f4575794101dbb1ed2f7e90a2be03856f76041c
[ "MIT" ]
2
2022-01-12T22:24:17.000Z
2022-02-15T05:46:30.000Z
from .kd_image import KnowledgeDistillationImageClassifier from .image import ImageClassifierAD __all__ = ['KnowledgeDistillationImageClassifier', 'ImageClassifierAD']
33.8
71
0.863905
79577c2d0a6d806410d23bae3e2090f23e9ea3f5
5,884
py
Python
django/core/handlers/exception.py
jpmallarino/django
659d2421c7adbbcd205604002d521d82d6b0b465
[ "BSD-3-Clause", "0BSD" ]
16
2019-08-10T12:24:06.000Z
2020-05-21T09:11:14.000Z
django/core/handlers/exception.py
jpmallarino/django
659d2421c7adbbcd205604002d521d82d6b0b465
[ "BSD-3-Clause", "0BSD" ]
12
2019-08-10T11:55:29.000Z
2020-05-21T04:46:30.000Z
django/core/handlers/exception.py
jpmallarino/django
659d2421c7adbbcd205604002d521d82d6b0b465
[ "BSD-3-Clause", "0BSD" ]
3
2019-08-20T13:29:34.000Z
2020-01-30T22:05:10.000Z
import asyncio import logging import sys from functools import wraps from asgiref.sync import sync_to_async from django.conf import settings from django.core import signals from django.core.exceptions import ( BadRequest, PermissionDenied, RequestDataTooBig, SuspiciousOperation, TooManyFieldsSent, ) from django.http import Http404 from django.http.multipartparser import MultiPartParserError from django.urls import get_resolver, get_urlconf from django.utils.log import log_response from django.views import debug def convert_exception_to_response(get_response): """ Wrap the given get_response callable in exception-to-response conversion. All exceptions will be converted. All known 4xx exceptions (Http404, PermissionDenied, MultiPartParserError, SuspiciousOperation) will be converted to the appropriate response, and all other exceptions will be converted to 500 responses. This decorator is automatically applied to all middleware to ensure that no middleware leaks an exception and that the next middleware in the stack can rely on getting a response instead of an exception. """ if asyncio.iscoroutinefunction(get_response): @wraps(get_response) async def inner(request): try: response = await get_response(request) except Exception as exc: response = await sync_to_async( response_for_exception, thread_sensitive=False )(request, exc) return response return inner else: @wraps(get_response) def inner(request): try: response = get_response(request) except Exception as exc: response = response_for_exception(request, exc) return response return inner def response_for_exception(request, exc): if isinstance(exc, Http404): if settings.DEBUG: response = debug.technical_404_response(request, exc) else: response = get_exception_response( request, get_resolver(get_urlconf()), 404, exc ) elif isinstance(exc, PermissionDenied): response = get_exception_response( request, get_resolver(get_urlconf()), 403, exc ) log_response( "Forbidden (Permission denied): %s", request.path, response=response, request=request, exception=exc, ) elif isinstance(exc, MultiPartParserError): response = get_exception_response( request, get_resolver(get_urlconf()), 400, exc ) log_response( "Bad request (Unable to parse request body): %s", request.path, response=response, request=request, exception=exc, ) elif isinstance(exc, BadRequest): if settings.DEBUG: response = debug.technical_500_response( request, *sys.exc_info(), status_code=400 ) else: response = get_exception_response( request, get_resolver(get_urlconf()), 400, exc ) log_response( "%s: %s", str(exc), request.path, response=response, request=request, exception=exc, ) elif isinstance(exc, SuspiciousOperation): if isinstance(exc, (RequestDataTooBig, TooManyFieldsSent)): # POST data can't be accessed again, otherwise the original # exception would be raised. request._mark_post_parse_error() # The request logger receives events for any problematic request # The security logger receives events for all SuspiciousOperations security_logger = logging.getLogger( "django.security.%s" % exc.__class__.__name__ ) security_logger.error( str(exc), exc_info=exc, extra={"status_code": 400, "request": request}, ) if settings.DEBUG: response = debug.technical_500_response( request, *sys.exc_info(), status_code=400 ) else: response = get_exception_response( request, get_resolver(get_urlconf()), 400, exc ) else: signals.got_request_exception.send(sender=None, request=request) response = handle_uncaught_exception( request, get_resolver(get_urlconf()), sys.exc_info() ) log_response( "%s: %s", response.reason_phrase, request.path, response=response, request=request, exception=exc, ) # Force a TemplateResponse to be rendered. if not getattr(response, "is_rendered", True) and callable( getattr(response, "render", None) ): response = response.render() return response def get_exception_response(request, resolver, status_code, exception): try: callback = resolver.resolve_error_handler(status_code) response = callback(request, exception=exception) except Exception: signals.got_request_exception.send(sender=None, request=request) response = handle_uncaught_exception(request, resolver, sys.exc_info()) return response def handle_uncaught_exception(request, resolver, exc_info): """ Processing for any otherwise uncaught exceptions (those that will generate HTTP 500 responses). """ if settings.DEBUG_PROPAGATE_EXCEPTIONS: raise if settings.DEBUG: return debug.technical_500_response(request, *exc_info) # Return an HttpResponse that displays a friendly error message. callback = resolver.resolve_error_handler(500) return callback(request)
31.634409
79
0.633753
79577cea5ed2e6630bfb15daf87dac46d104a959
2,570
py
Python
tests/functional/test_delete_service_with_active_export.py
AKhodus/adcm
98dbf22af3f1c6afa94505e9acaff0ac4088a602
[ "Apache-2.0" ]
null
null
null
tests/functional/test_delete_service_with_active_export.py
AKhodus/adcm
98dbf22af3f1c6afa94505e9acaff0ac4088a602
[ "Apache-2.0" ]
null
null
null
tests/functional/test_delete_service_with_active_export.py
AKhodus/adcm
98dbf22af3f1c6afa94505e9acaff0ac4088a602
[ "Apache-2.0" ]
null
null
null
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint:disable=redefined-outer-name import allure import coreapi import pytest from adcm_client.objects import ADCMClient from adcm_pytest_plugin.utils import get_data_dir from tests.library import errorcodes as err @pytest.fixture() def cluster(sdk_client_fs: ADCMClient): bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'cluster_export')) bundle_import = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'cluster_import')) cluster = bundle.cluster_create("test") cluster_import = bundle_import.cluster_create("cluster_import") service = cluster.service_add(name="hadoop") cluster_import.bind(service) return service @pytest.fixture() def service_import(sdk_client_fs: ADCMClient): bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'cluster_export')) bundle_import = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'cluster_service_import')) cluster = bundle.cluster_create("test") cluster_import = bundle_import.cluster_create("cluster_import") service = cluster.service_add(name="hadoop") import_service = cluster_import.service_add(name='hadoop') import_service.bind(service) return service def test_delete_service_with_with_active_export(cluster): """If host has NO component, than we can simple remove it from cluster.""" with allure.step('Create cluster'): service = cluster with allure.step('Delete service'): with pytest.raises(coreapi.exceptions.ErrorMessage) as e: service.delete() with allure.step('Check service conflict'): err.SERVICE_CONFLICT.equal(e) def test_delete_service_with_active_export_for_service(service_import): """Add test for bind service :param service_import: :return: """ with allure.step('Delete imported to cluster service'): with pytest.raises(coreapi.exceptions.ErrorMessage) as e: service_import.delete() with allure.step('Check service conflict'): err.SERVICE_CONFLICT.equal(e)
37.246377
98
0.754475
79577df0850f728f1531a12a5fce70fb7861df7c
2,464
py
Python
routes/web_directory.py
TalaoDAO/ecole42
2236f24527966195c953f222f9715ee967348b0f
[ "Apache-2.0" ]
1
2021-09-22T16:30:57.000Z
2021-09-22T16:30:57.000Z
routes/web_directory.py
TalaoDAO/credential-repository
d36c694d9e90ead8a35bd8cc5be47c6d951474ba
[ "Apache-2.0" ]
null
null
null
routes/web_directory.py
TalaoDAO/credential-repository
d36c694d9e90ead8a35bd8cc5be47c6d951474ba
[ "Apache-2.0" ]
null
null
null
from flask import request, render_template, session import json import logging import os logging.basicConfig(level=logging.INFO) # dependances LANGUAGES = ['en', 'fr'] IMAGE_PATH = 'static/directory/' def init_app(app, mode) : app.add_url_rule('/directory', view_func=ssi_directory, methods = ['GET', 'POST'], defaults={'mode': mode}) global PATH PATH = mode.sys_path + '/Talao/issuers_directory/' return def ssi_directory(mode) : card = str() if not session.get('language') : session['language'] = request.accept_languages.best_match(LANGUAGES) provider_list = [filename.lower() for filename in os.listdir(PATH)] featured_provider = request.args.get('search', "").lower() + (".json") if featured_provider in provider_list : provider_list.remove(featured_provider) provider_list.insert(0, featured_provider) for filename in provider_list : provider = json.load(open(PATH + filename, 'r')) description_text = str() for description in provider.get('description') : if description['@language'] == session['language'] : description_text = description.get('@value', "") break requirement_text = str() for requirement in provider.get('requirements') : if requirement['@language'] == session['language'] : requirement_text = requirement.get('@value', "") break link = str() for service in provider.get('services') : link_text = str() for description in service.get('description') : if description['@language'] == session['language'] : link_text = description.get('@value', "") break link += """<li class="list-group-item"><a href = '"""+ service.get('link', "") + """' class="card-link">""" + link_text + """</a></li>""" card +=""" <div class="card m-2 border shadow" > <div class="card-body"> <div> <a href='""" + provider.get('website',"") + """' > <div class="text-center"> <img src='""" + IMAGE_PATH + provider.get('image', "") + """' class="img-thumbnail " alt="No image"> </div> </a> </div> <div class="card-title"><strong>""" + provider.get('name', "") + """</strong></div> <p class="card-text">""" + description_text + """<br> <br>""" + requirement_text + """</p> </div> <ul class="list-group list-group-flush">""" + link + """</ul> </div>""" return render_template('directory/directory.html', card=card, server=mode.server)
39.111111
140
0.621347
79577e50b309e806cf6061b3de87039bce094efd
1,069
py
Python
setup.py
VictorSanh/promptsource
b7a187217c569f7ad6470af2c1f30e527f6afc5f
[ "Apache-2.0" ]
387
2021-05-19T16:57:56.000Z
2022-03-30T22:55:16.000Z
setup.py
VictorSanh/promptsource
b7a187217c569f7ad6470af2c1f30e527f6afc5f
[ "Apache-2.0" ]
523
2021-05-19T20:32:42.000Z
2022-03-31T13:38:18.000Z
setup.py
VictorSanh/promptsource
b7a187217c569f7ad6470af2c1f30e527f6afc5f
[ "Apache-2.0" ]
103
2021-05-19T20:27:20.000Z
2022-03-28T13:36:56.000Z
from setuptools import setup, find_packages with open('README.md') as readme_file: readme = readme_file.read() with open('requirements.txt') as requirements_file: requirements = requirements_file.read().split('\n') setup( name='promptsource', version='0.1.0', url='https://github.com/bigscience-workshop/promptsource.git', author='Multiple Authors', author_email='xxx', python_requires='>=3.7, <3.8', install_requires=requirements, classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.7', ], description='Toolkit for collecting and applying templates of prompting instances.', packages=find_packages(), license="Apache Software License 2.0", long_description=readme, package_data={"": [ "templates/*/*.yaml", "templates/*/*/*.yaml", ]} )
31.441176
88
0.647334
79577e646b9425c08edb4a3af05b48bc7145f8ba
1,650
py
Python
veides/sdk/api/properties.py
Veides/veides-sdk-python
0a146f2ac0e9432359524189f6b992d315d2621b
[ "MIT" ]
null
null
null
veides/sdk/api/properties.py
Veides/veides-sdk-python
0a146f2ac0e9432359524189f6b992d315d2621b
[ "MIT" ]
null
null
null
veides/sdk/api/properties.py
Veides/veides-sdk-python
0a146f2ac0e9432359524189f6b992d315d2621b
[ "MIT" ]
null
null
null
import os from veides.sdk.api.exceptions import ConfigurationException class AuthProperties: def __init__(self, token): """ :param token: User's token :type token: str """ self._token = token @property def token(self): return self._token @staticmethod def from_env(): """ Returns AuthProperties instance built from env variables. Required variables are: 2. VEIDES_AUTH_USER_TOKEN: user's token :raises ConfigurationException: If required variables are not provided :return AuthProperties """ token = os.getenv('VEIDES_AUTH_USER_TOKEN', None) if token is None: raise ConfigurationException("Missing 'VEIDES_AUTH_USER_TOKEN' variable in env") return AuthProperties(token) class ConfigurationProperties: def __init__(self, base_url): """ :param base_url: Veides API url :type base_url: str """ self._base_url = base_url @property def base_url(self): return self._base_url @staticmethod def from_env(): """ Returns ConnectionProperties instance built from env variables. Required variables are: 1. VEIDES_API_BASE_URL: Veides Api url :raises ConfigurationException: If required variables are not provided :return ConnectionProperties """ base_url = os.getenv('VEIDES_API_BASE_URL', None) if base_url is None: raise ConfigurationException("Missing 'VEIDES_API_BASE_URL' variable in env") return ConfigurationProperties(base_url)
27.04918
95
0.646667
79577f53fcb6442cbcaedca20ea7dcb77343277d
9,427
py
Python
pysrc/faceplace/train_gppvae.py
ahmerb/GPPVAE
6f806426627942a92d96b007ee1c1ece02445e48
[ "Apache-2.0" ]
65
2018-10-31T14:51:52.000Z
2022-03-15T04:57:24.000Z
pysrc/faceplace/train_gppvae.py
ahmerb/GPPVAE
6f806426627942a92d96b007ee1c1ece02445e48
[ "Apache-2.0" ]
4
2019-05-15T09:35:06.000Z
2021-01-06T03:00:02.000Z
pysrc/faceplace/train_gppvae.py
ahmerb/GPPVAE
6f806426627942a92d96b007ee1c1ece02445e48
[ "Apache-2.0" ]
9
2018-12-27T12:02:21.000Z
2021-04-21T04:56:14.000Z
import matplotlib import sys matplotlib.use("Agg") import torch from torch import nn, optim import torch.nn.functional as F from torch.autograd import Variable from torch.utils.data import DataLoader from vae import FaceVAE from vmod import Vmodel from gp import GP import h5py import scipy as sp import os import pdb import logging import pylab as pl from utils import smartSum, smartAppendDict, smartAppend, export_scripts from callbacks import callback_gppvae from data_parser import read_face_data, FaceDataset from optparse import OptionParser import logging import pickle import time parser = OptionParser() parser.add_option( "--data", dest="data", type=str, default="./../data/faceplace/data_faces.h5", help="dataset path", ) parser.add_option( "--outdir", dest="outdir", type=str, default="./../out/gppvae", help="output dir" ) parser.add_option("--vae_cfg", dest="vae_cfg", type=str, default=None) parser.add_option("--vae_weights", dest="vae_weights", type=str, default=None) parser.add_option("--seed", dest="seed", type=int, default=0, help="seed") parser.add_option( "--vae_lr", dest="vae_lr", type=float, default=2e-4, help="learning rate of vae params", ) parser.add_option( "--gp_lr", dest="gp_lr", type=float, default=1e-3, help="learning rate of gp params" ) parser.add_option( "--xdim", dest="xdim", type=int, default=64, help="rank of object linear covariance" ) parser.add_option("--bs", dest="bs", type=int, default=64, help="batch size") parser.add_option( "--epoch_cb", dest="epoch_cb", type=int, default=100, help="number of epoch by which a callback (plot + dump weights) is executed", ) parser.add_option( "--epochs", dest="epochs", type=int, default=10000, help="total number of epochs" ) parser.add_option("--debug", action="store_true", dest="debug", default=False) (opt, args) = parser.parse_args() opt_dict = vars(opt) if opt.vae_cfg is None: opt.vae_cfg = "./../out/vae/vae.cfg.p" vae_cfg = pickle.load(open(opt.vae_cfg, "rb")) if opt.vae_weights is None: opt.vae_weights = "./../out/vae/weights/weights.00000.pt" if not os.path.exists(opt.outdir): os.makedirs(opt.outdir) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # output dir wdir = os.path.join(opt.outdir, "weights") fdir = os.path.join(opt.outdir, "plots") if not os.path.exists(wdir): os.makedirs(wdir) if not os.path.exists(fdir): os.makedirs(fdir) # copy code to output folder export_scripts(os.path.join(opt.outdir, "scripts")) # create logfile log_format = "%(asctime)s %(message)s" logging.basicConfig( stream=sys.stdout, level=logging.INFO, format=log_format, datefmt="%m/%d %I:%M:%S %p", ) fh = logging.FileHandler(os.path.join(opt.outdir, "log.txt")) fh.setFormatter(logging.Formatter(log_format)) logging.getLogger().addHandler(fh) logging.info("opt = %s", opt) def main(): torch.manual_seed(opt.seed) if opt.debug: pdb.set_trace() # load data img, obj, view = read_face_data(opt.data) # image, object, and view train_data = FaceDataset(img["train"], obj["train"], view["train"]) val_data = FaceDataset(img["val"], obj["val"], view["val"]) train_queue = DataLoader(train_data, batch_size=opt.bs, shuffle=True) val_queue = DataLoader(val_data, batch_size=opt.bs, shuffle=False) # longint view and object repr Dt = Variable(obj["train"][:, 0].long(), requires_grad=False).cuda() Wt = Variable(view["train"][:, 0].long(), requires_grad=False).cuda() Dv = Variable(obj["val"][:, 0].long(), requires_grad=False).cuda() Wv = Variable(view["val"][:, 0].long(), requires_grad=False).cuda() # define VAE and optimizer vae = FaceVAE(**vae_cfg).to(device) RV = torch.load(opt.vae_weights) vae.load_state_dict(RV) vae.to(device) # define gp P = sp.unique(obj["train"]).shape[0] Q = sp.unique(view["train"]).shape[0] vm = Vmodel(P, Q, opt.xdim, Q).cuda() gp = GP(n_rand_effs=1).to(device) gp_params = nn.ParameterList() gp_params.extend(vm.parameters()) gp_params.extend(gp.parameters()) # define optimizers vae_optimizer = optim.Adam(vae.parameters(), lr=opt.vae_lr) gp_optimizer = optim.Adam(gp_params, lr=opt.gp_lr) if opt.debug: pdb.set_trace() history = {} for epoch in range(opt.epochs): # 1. encode Y in mini-batches Zm, Zs = encode_Y(vae, train_queue) # 2. sample Z Eps = Variable(torch.randn(*Zs.shape), requires_grad=False).cuda() Z = Zm + Eps * Zs # 3. evaluation step (not needed for training) Vt = vm(Dt, Wt).detach() Vv = vm(Dv, Wv).detach() rv_eval, imgs, covs = eval_step(vae, gp, vm, val_queue, Zm, Vt, Vv) # 4. compute first-order Taylor expansion coefficient Zb, Vbs, vbs, gp_nll = gp.taylor_coeff(Z, [Vt]) rv_eval["gp_nll"] = float(gp_nll.data.mean().cpu()) / vae.K # 5. accumulate gradients over mini-batches and update params rv_back = backprop_and_update( vae, gp, vm, train_queue, Dt, Wt, Eps, Zb, Vbs, vbs, vae_optimizer, gp_optimizer, ) rv_back["loss"] = ( rv_back["recon_term"] + rv_eval["gp_nll"] + rv_back["pen_term"] ) smartAppendDict(history, rv_eval) smartAppendDict(history, rv_back) smartAppend(history, "vs", gp.get_vs().data.cpu().numpy()) logging.info( "epoch %d - tra_mse_val: %f - train_mse_out: %f" % (epoch, rv_eval["mse_val"], rv_eval["mse_out"]) ) # callback? if epoch % opt.epoch_cb == 0: logging.info("epoch %d - executing callback" % epoch) ffile = os.path.join(opt.outdir, "plot.%.5d.png" % epoch) callback_gppvae(epoch, history, covs, imgs, ffile) def encode_Y(vae, train_queue): vae.eval() with torch.no_grad(): n = train_queue.dataset.Y.shape[0] Zm = Variable(torch.zeros(n, vae_cfg["zdim"]), requires_grad=False).cuda() Zs = Variable(torch.zeros(n, vae_cfg["zdim"]), requires_grad=False).cuda() for batch_i, data in enumerate(train_queue): y = data[0].cuda() idxs = data[-1].cuda() zm, zs = vae.encode(y) Zm[idxs], Zs[idxs] = zm.detach(), zs.detach() return Zm, Zs def eval_step(vae, gp, vm, val_queue, Zm, Vt, Vv): rv = {} with torch.no_grad(): _X = vm.x().data.cpu().numpy() _W = vm.v().data.cpu().numpy() covs = {"XX": sp.dot(_X, _X.T), "WW": sp.dot(_W, _W.T)} rv["vars"] = gp.get_vs().data.cpu().numpy() # out of sample vs = gp.get_vs() U, UBi, _ = gp.U_UBi_Shb([Vt], vs) Kiz = gp.solve(Zm, U, UBi, vs) Zo = vs[0] * Vv.mm(Vt.transpose(0, 1).mm(Kiz)) mse_out = Variable(torch.zeros(Vv.shape[0], 1), requires_grad=False).cuda() mse_val = Variable(torch.zeros(Vv.shape[0], 1), requires_grad=False).cuda() for batch_i, data in enumerate(val_queue): idxs = data[-1].cuda() Yv = data[0].cuda() Zv = vae.encode(Yv)[0].detach() Yr = vae.decode(Zv) Yo = vae.decode(Zo[idxs]) mse_out[idxs] = ( ((Yv - Yo) ** 2).view(Yv.shape[0], -1).mean(1)[:, None].detach() ) mse_val[idxs] = ( ((Yv - Yr) ** 2).view(Yv.shape[0], -1).mean(1)[:, None].detach() ) # store a few examples if batch_i == 0: imgs = {} imgs["Yv"] = Yv[:24].data.cpu().numpy().transpose(0, 2, 3, 1) imgs["Yr"] = Yr[:24].data.cpu().numpy().transpose(0, 2, 3, 1) imgs["Yo"] = Yo[:24].data.cpu().numpy().transpose(0, 2, 3, 1) rv["mse_out"] = float(mse_out.data.mean().cpu()) rv["mse_val"] = float(mse_val.data.mean().cpu()) return rv, imgs, covs def backprop_and_update( vae, gp, vm, train_queue, Dt, Wt, Eps, Zb, Vbs, vbs, vae_optimizer, gp_optimizer ): rv = {} vae_optimizer.zero_grad() gp_optimizer.zero_grad() vae.train() gp.train() vm.train() for batch_i, data in enumerate(train_queue): # subset data y = data[0].cuda() eps = Eps[data[-1]] _d = Dt[data[-1]] _w = Wt[data[-1]] _Zb = Zb[data[-1]] _Vbs = [Vbs[0][data[-1]]] # forward vae zm, zs = vae.encode(y) z = zm + zs * eps yr = vae.decode(z) recon_term, mse = vae.nll(y, yr) # forward gp _Vs = [vm(_d, _w)] gp_nll_fo = gp.taylor_expansion(z, _Vs, _Zb, _Vbs, vbs) / vae.K # penalization pen_term = -0.5 * zs.sum(1)[:, None] / vae.K # loss and backward loss = (recon_term + gp_nll_fo + pen_term).sum() loss.backward() # store stuff _n = train_queue.dataset.Y.shape[0] smartSum(rv, "mse", float(mse.data.sum().cpu()) / _n) smartSum(rv, "recon_term", float(recon_term.data.sum().cpu()) / _n) smartSum(rv, "pen_term", float(pen_term.data.sum().cpu()) / _n) vae_optimizer.step() gp_optimizer.step() return rv if __name__ == "__main__": main()
29.832278
88
0.594569
79577f924c03c45f9bb6812341a6777296981f08
3,000
py
Python
ironic/tests/unit/objects/utils.py
hpproliant/ironic
4f62cd97196b2a0068700ffb17456912147778d0
[ "Apache-2.0" ]
null
null
null
ironic/tests/unit/objects/utils.py
hpproliant/ironic
4f62cd97196b2a0068700ffb17456912147778d0
[ "Apache-2.0" ]
null
null
null
ironic/tests/unit/objects/utils.py
hpproliant/ironic
4f62cd97196b2a0068700ffb17456912147778d0
[ "Apache-2.0" ]
null
null
null
# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Ironic object test utilities.""" from ironic import objects from ironic.tests.unit.db import utils as db_utils def get_test_node(ctxt, **kw): """Return a Node object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ db_node = db_utils.get_test_node(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_node['id'] node = objects.Node(ctxt) for key in db_node: setattr(node, key, db_node[key]) return node def create_test_node(ctxt, **kw): """Create and return a test node object. Create a node in the DB and return a Node object with appropriate attributes. """ node = get_test_node(ctxt, **kw) node.create() return node def get_test_port(ctxt, **kw): """Return a Port object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ db_port = db_utils.get_test_port(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_port['id'] port = objects.Port(ctxt) for key in db_port: setattr(port, key, db_port[key]) return port def create_test_port(ctxt, **kw): """Create and return a test port object. Create a port in the DB and return a Port object with appropriate attributes. """ port = get_test_port(ctxt, **kw) port.create() return port def get_test_chassis(ctxt, **kw): """Return a Chassis object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ db_chassis = db_utils.get_test_chassis(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_chassis['id'] chassis = objects.Chassis(ctxt) for key in db_chassis: setattr(chassis, key, db_chassis[key]) return chassis def create_test_chassis(ctxt, **kw): """Create and return a test chassis object. Create a chassis in the DB and return a Chassis object with appropriate attributes. """ chassis = get_test_chassis(ctxt, **kw) chassis.create() return chassis
30
78
0.681
795780921963d90fe5541fa8f5cf6d675652e245
9,260
py
Python
stylebox.py
LightSoar/pystylebox
2a5cc6f9b5a67c0cb2100ccad139a927117f9e3e
[ "MIT" ]
null
null
null
stylebox.py
LightSoar/pystylebox
2a5cc6f9b5a67c0cb2100ccad139a927117f9e3e
[ "MIT" ]
null
null
null
stylebox.py
LightSoar/pystylebox
2a5cc6f9b5a67c0cb2100ccad139a927117f9e3e
[ "MIT" ]
null
null
null
#!env/bin/python import abc # abstract base class import numpy as np from overrides import overrides def rescale(x, from_lo, from_hi, to_lo, to_hi): # y = yi + (yf-yi)/(xf-xi)*(x-xi) y = to_lo + (to_hi-to_lo)/(from_hi-from_lo)*(x-from_lo) return y class Scatter: def __init__(self, x_domain=[-np.inf, np.inf], y_domain=[-np.inf, np.inf]): if not(len(x_domain) == len(y_domain) == 2): raise ValueError self.x_domain = x_domain self.y_domain = y_domain x_lo, x_hi = x_domain y_lo, y_hi = y_domain self.x_lo, self.x_hi = x_domain self.y_lo, self.y_hi = y_domain self.x_range = x_hi-x_lo self.y_range = y_hi-y_lo self.x = [] self.y = [] def add_point(self, x: float, y: float) -> bool: in_range = (self.x_lo <= x <= self.x_hi) and (self.y_lo <= y <= self.y_hi) if in_range: self.x.append(x) self.y.append(y) return in_range class StyleBox(Scatter): def set_gridlines(self, nx:int, ny:int=None): ny = nx if ny is None else ny # make sure `nx`, `ny` are non-negative nx, ny = max(0, nx), max(0, ny) # TODO handle case when domain is (semi) infinite dx, dy = self.x_range/(nx+1), self.y_range/(ny+1) # generate a list of coordinates for the gridlines xi = np.arange(self.x_lo + dx, self.x_hi, dx) # x-coords, vertical gridlines yi = np.arange(self.y_lo + dy, self.y_hi, dy) # y-coords, horizontal gridlines self.grid_xi, self.grid_yi = list(xi), list(yi) class StyleBoxBuilder(metaclass=abc.ABCMeta): def __init__(self, x_domain, y_domain, size, color='None'): self.size = size self.color = color self.stylebox = StyleBox(x_domain, y_domain) def grid(self, nx: int, ny: int): # SVG box size is always 300 self.stylebox.set_gridlines(nx, ny) return self def point(self, x: float, y: float): # SVG box size is always 300 self.stylebox.add_point(x, y) return self @abc.abstractmethod def build(self): pass class SVGStyleBoxBuilder(StyleBoxBuilder): VERT_GRIDLINE_TEMPLATE = '<path id="vgridline-{vid}" d="m{x:.3f} 0.0v300"/>\n' # SVG box size is always 300 HORZ_GRIDLINE_TEMPLATE = '<path id="hgridline-{hid}" d="m0.0 {y:.3f}h300"/>\n' # SVG box size is always 300 CIRC_TEMPLATE = '<circle id="{pid}" cx="{x:.3f}" cy="{y:.3f}" r="15" fill="{color}"/>\n' # Circle radius is always 15 def x_to_box_coords(self, x): return rescale(x, self.stylebox.x_lo, self.stylebox.x_hi, 0.0, 300.0) def y_to_box_coords(self, y): return rescale(y, self.stylebox.y_lo, self.stylebox.y_hi, 0.0, 300.0) def build(self): # Concatenate gridline <path /> statements vert = '\n'.join([self.VERT_GRIDLINE_TEMPLATE.format(vid=vid, x=self.x_to_box_coords(x)) \ for vid,x in enumerate(self.stylebox.grid_xi)]) horz = '\n'.join([self.HORZ_GRIDLINE_TEMPLATE.format(hid=hid, y=self.y_to_box_coords(y)) \ for hid,y in enumerate(self.stylebox.grid_yi)]) # Group all gridlines self.grid = '<g id="gridlines">\n' + vert + horz + '</g>\n' self.axes = '<rect id="axes" stroke-opacity="1.0" height="297.50" width="297.50" y="2.5" x="2.5"/>\n' points = '\n'.join([self.CIRC_TEMPLATE.format(pid=pid, x=self.x_to_box_coords(x), y=self.y_to_box_coords(y), color=self.color) \ for pid,x,y in zip(range(len(self.stylebox.x)), self.stylebox.x, self.stylebox.y)]) self.points = '<g id="points">\n' + points + '</g>\n' self.box = '<g id="box" stroke="{color}" stroke-width="5" fill="none">\n'.format(color=self.color) + \ self.axes + self.grid + self.points + '</g>\n' self.layer = '<g id="layer1">\n' + self.box + '</g>\n' self.svg = """<?xml version="1.0" encoding="UTF-8" standalone="no"?> <!-- Created with Inkscape (http://www.inkscape.org/) --> <svg id="svg2" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns="http://www.w3.org/2000/svg" height="{size}" width="{size}" version="1.1" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" viewBox="0 0 302.5 302.5">\n""".format(size=self.size) + self.layer + '</svg>\n' return self.svg class HTMLStyleBoxBuilder(StyleBoxBuilder): HTML_TEMPLATE = """ <div class="stylebox" style="position: relative; width: 50px; height: 50px;"> <table class="grid" style="position: absolute; border-collapse: collapse; border-color:#000000; overflow:hidden; width:50px; height:50px; border: 1px;"> <tr> <td class="cell" style="border-style:solid; padding: 0px; margin: 0px;"></td> <td class="cell" style="border-style:solid; padding: 0px; margin: 0px;"></td> <td class="cell" style="border-style:solid; padding: 0px; margin: 0px;"></td> </tr> <tr> <td class="cell" style="border-style:solid; padding: 0px; margin: 0px;"></td> <td class="cell" style="border-style:solid; padding: 0px; margin: 0px;"></td> <td class="cell" style="border-style:solid; padding: 0px; margin: 0px;"></td> </tr> <tr> <td class="cell" style="border-style:solid; padding: 0px; margin: 0px;"></td> <td class="cell" style="border-style:solid; padding: 0px; margin: 0px;"></td> <td class="cell" style="border-style:solid; padding: 0px; margin: 0px;"></td> </tr> </table> <div class="circle" style="position: absolute; width: 10px; height: 10px; background-color: black; border-radius: 50%; left: {x}%; top: {y}%; transform: translate(-50%, -50%);"/> <div> """ def x_to_box_coords(self, x): return rescale(x, self.stylebox.x_lo, self.stylebox.x_hi, 0.0, 100.0) def y_to_box_coords(self, y): return rescale(y, self.stylebox.y_lo, self.stylebox.y_hi, 0.0, 100.0) def build(self): # TODO make use of all points return self.HTML_TEMPLATE.format(x=self.x_to_box_coords(self.stylebox.x[-1]), y=self.y_to_box_coords(self.stylebox.y[-1])) class PNGStyleBoxBuilder(StyleBoxBuilder): pass class ASCIIStyleBoxBuilder(StyleBoxBuilder): def x_to_box_coords(self, x): return round(rescale(x, self.stylebox.x_lo, self.stylebox.x_hi, 0.0, self.size-1)) def y_to_box_coords(self, y): return round(rescale(y, self.stylebox.y_lo, self.stylebox.y_hi, 0.0, self.size-1)) @overrides def grid(self, nx:int, ny:int=None): # N gridlines are possible only if size-1 is divisible by N+1. # e.g. for 1 gridline, (size-1) must be divisible by 2 # for 2 gridlines, (size-1) must be divisible by 3 nx = nx if ((self.size-1) % (nx+1))==0 else 0 if ny is None: ny = nx else: ny = ny if ((self.size-1) % (ny+1))==0 else 0 # make sure `nx`, `ny` are non-negative nx, ny = max(0, nx), max(0, ny) xi = np.arange(0,self.size,self.size//(nx+1))[1:-1] yi = np.arange(0,self.size,self.size//(ny+1))[1:-1] self.stylebox.grid_xi, self.stylebox.grid_yi = list(xi), list(yi) return self def build(self): # Generate the "wireframe" top = ['┌'] + ['─']*(self.size-2) + ['┐'] sides = ['│'] + [' ']*(self.size-2) + ['│'] bottom = ['└'] + ['─']*(self.size-2) + ['┘'] box = [sides.copy() for i in range(self.size-2)] box.insert(0, top) box.append(bottom) # Add gridlines for ix in self.stylebox.grid_xi: box[0][ix] = '┬' # top box[-1][ix] = '┴' # bottom for iy in range(1, self.size-1): box[iy][ix] = '│' for iy in self.stylebox.grid_yi: box[iy][0] = '├' box[iy][-1] = '┤' for ix in range(1, self.size-1): box[iy][ix] = '─' #if ix not in self.stylebox.grid_xi else '┼' for ix in self.stylebox.grid_xi: for iy in self.stylebox.grid_yi: box[iy][ix] = '┼' # Add point if len(self.stylebox.x): ix = self.x_to_box_coords(self.stylebox.x[-1]) iy = self.y_to_box_coords(self.stylebox.y[-1]) box[iy][ix] = '*' return '\n'.join([''.join(line) for line in box]) class StyleBoxBuilderDirector: pass #with open("/tmp/direct.svg", "w") as svg_file: # svg_file.write(SVGStyleBoxBuilder([0, 1], [0, 1], size=50, color="#000000").grid(2,2).point(0.5,0.5).point(0.5,0.4).point(0.4,0.5).build()) #print(ASCIIStyleBoxBuilder([0,1],[0,1], size=7).grid(1).point(0.25,0.75).build())
37.795918
317
0.554752